---------------------------------------------------------------------------KeyboardInterrupt Traceback (most recent call last)
Cell In[5], line 204 202# Run all experiments 203for exp in experiments:
--> 204 run_experiment(exp)
Cell In[5], line 154, in run_experiment(exp) 152 outputs, _ = model(inputs)
153 loss = criterion(outputs, labels_scaled)
--> 154 loss.backward()
155 torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_VALUE)
156 optimizer.step()
File ~/miniconda3/envs/tesi/lib/python3.11/site-packages/torch/_tensor.py:581, in Tensor.backward(self, gradient, retain_graph, create_graph, inputs) 571if has_torch_function_unary(self):
572return handle_torch_function(
573 Tensor.backward,
574 (self,),
(...) 579 inputs=inputs,
580 )
--> 581 torch.autograd.backward(
582self, gradient, retain_graph, create_graph, inputs=inputs
583 )
File ~/miniconda3/envs/tesi/lib/python3.11/site-packages/torch/autograd/__init__.py:347, in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs) 342 retain_graph = create_graph
344# The reason we repeat the same comment below is that 345# some Python versions print out the first line of a multi-line function 346# calls in the traceback and some print out the last line--> 347 _engine_run_backward(
348 tensors,
349 grad_tensors_,
350 retain_graph,
351 create_graph,
352 inputs,
353 allow_unreachable=True,
354 accumulate_grad=True,
355 )
File ~/miniconda3/envs/tesi/lib/python3.11/site-packages/torch/autograd/graph.py:825, in _engine_run_backward(t_outputs, *args, **kwargs) 823 unregister_hooks = _register_logging_hooks_on_whole_graph(t_outputs)
824try:
--> 825return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass 826 t_outputs, *args, **kwargs
827 ) # Calls into the C++ engine to run the backward pass 828finally:
829if attach_logging_hooks:
KeyboardInterrupt: