L.
  • Home
  • Blog
  • Projects
  • Lecture notes
    • Complexity and Information Theory 2024/2025
    • Deep Learning 2024/2025
    • Ricerca Operativa 2024/2025
    • Foundations of Neural Networks 2024/2025
    • Quantum Computing 2023/2024
    • Biologia Molecolare 2024/2025
    • Verification and Validation Techniques 2024/2025
    • Automated Reasoning 2024/2025
    • Advanced Algorithms 2023/2024
    • Artificial Intelligence 2023/2024
    • Applied Statistics 2024/2025
Code
import json
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from tqdm.notebook import trange, tqdm
import os
from pathlib import Path

class AddingProblemGRU(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(AddingProblemGRU, self).__init__()
        self.gru = nn.GRU(input_size, hidden_size, batch_first=True)
        self.linear = nn.Linear(hidden_size, output_size)
        self.init_weights()

    def init_weights(self):
        for name, param in self.gru.named_parameters():
            if "weight" in name:
                nn.init.orthogonal_(param)
            elif "bias" in name:
                nn.init.constant_(param, 0)
        nn.init.xavier_uniform_(self.linear.weight)
        nn.init.zeros_(self.linear.bias)

    def forward(self, x):
        out, _ = self.gru(x)
        return self.linear(out[:, -1, :]), out

# Hyperparameters
RANDOM_SEED = 37
N_SAMPLES = 10000
TRAIN_SPLIT = 0.8
BATCH_SIZE = 256
LEARNING_RATE = 1e-4
WEIGHT_DECAY = 1e-5
CLIP_VALUE = 2.0
NUM_EPOCHS = 3000
HIDDEN_SIZE = 1
OUTPUT_SIZE = 1
INPUT_SIZE = 2
HIGH = 100

# Experiment configurations
experiments = [
    # Fixed delta=0 with varying seq_len
    {'delta': 0, 'seq_len': 2, 'norm': 'high'},
    {'delta': 0, 'seq_len': 2, 'norm': 'high_seq'},
    {'delta': 0, 'seq_len': 3, 'norm': 'high'},
    {'delta': 0, 'seq_len': 3, 'norm': 'high_seq'},
    {'delta': 0, 'seq_len': 5, 'norm': 'high'},
    {'delta': 0, 'seq_len': 5, 'norm': 'high_seq'},
    {'delta': 0, 'seq_len': 7, 'norm': 'high'},
    {'delta': 0, 'seq_len': 7, 'norm': 'high_seq'},
    {'delta': 0, 'seq_len': 9, 'norm': 'high'},
    {'delta': 0, 'seq_len': 9, 'norm': 'high_seq'},
    
    # Fixed seq_len=9 with varying delta
    {'delta': 0.2, 'seq_len': 9, 'norm': 'high'},
    {'delta': 0.2, 'seq_len': 9, 'norm': 'high_seq'},
    {'delta': 0.4, 'seq_len': 9, 'norm': 'high'},
    {'delta': 0.4, 'seq_len': 9, 'norm': 'high_seq'},
    {'delta': 0.6, 'seq_len': 9, 'norm': 'high'},
    {'delta': 0.6, 'seq_len': 9, 'norm': 'high_seq'},
    {'delta': 0.8, 'seq_len': 9, 'norm': 'high'},
    {'delta': 0.8, 'seq_len': 9, 'norm': 'high_seq'},
]

def adding_problem_generator(N, seq_len=6, high=1, delta=0.6):
    actual_seq_len = np.random.randint(
        int(seq_len * (1 - delta)), int(seq_len * (1 + delta))
    ) if delta > 0 else seq_len
    low = 2
    high = max(2, min(actual_seq_len - 1, 4))
    if low == high or high < low:
        num_ones = high
    else:
        num_ones = np.random.randint(2, max(2, min(actual_seq_len - 1, 4)))
    X_num = np.random.randint(0, high, (N, actual_seq_len, 1))
    X_mask = np.zeros((N, actual_seq_len, 1))
    Y = np.ones((N, 1))
    for i in range(N):
        positions = np.random.choice(actual_seq_len, num_ones, False)
        X_mask[i, positions] = 1
        Y[i] = X_num[i, positions].sum()
    return np.concatenate([X_num, X_mask], axis=2), Y

def run_experiment(exp):
    # Set hyperparameters
    delta = exp['delta']
    seq_len = exp['seq_len']
    norm_method = exp['norm']
    
    # Create output directory
    dir_name = f"delta_{delta}_seqlen_{seq_len}_norm_{norm_method}"
    os.makedirs(dir_name, exist_ok=True)
    
    # Set normalization factor
    norm_factor = HIGH * (seq_len if norm_method == 'high_seq' else 1)

    # Generate data
    X, Y = adding_problem_generator(N_SAMPLES, seq_len, HIGH, delta)
    
    # Split dataset
    train_len = int(N_SAMPLES * TRAIN_SPLIT)
    train_X, test_X = X[:train_len], X[train_len:]
    train_Y, test_Y = Y[:train_len], Y[train_len:]

    # Create DataLoaders with normalization
    def create_loader(data_X, data_Y):
        dataset = TensorDataset(
            torch.tensor(data_X).float(),
            torch.tensor(data_Y).float()
        )
        return DataLoader(dataset, BATCH_SIZE, shuffle=('train' in dir_name))

    train_loader = create_loader(train_X, train_Y)
    test_loader = create_loader(test_X, test_Y)

    # Model setup
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = AddingProblemGRU(INPUT_SIZE, HIDDEN_SIZE, OUTPUT_SIZE).to(device)
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 'min', factor=0.5, patience=5, min_lr=1e-6, verbose=False)

    # Create output directory
    dir_name = f"delta_{delta}_seqlen_{seq_len}_norm_{norm_method}"
    os.makedirs(dir_name, exist_ok=True)
    
    # Set up paths
    weights_path = Path(dir_name) / 'weights.json'

    # Initialize weight storage
    all_weights = []

    # Training loop
    train_losses, test_losses = [], []
    for epoch in trange(NUM_EPOCHS, desc=f"Training {dir_name}"):
        model.train()
        epoch_loss = 0
        for inputs, labels in train_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            inputs[:, :, 0] /= norm_factor
            labels_scaled = labels / norm_factor
            
            optimizer.zero_grad()
            outputs, _ = model(inputs)
            loss = criterion(outputs, labels_scaled)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_VALUE)
            optimizer.step()
            epoch_loss += loss.item() * inputs.size(0)
        
        train_losses.append(epoch_loss / len(train_loader.dataset))
        
        # Validation
        if epoch % 49 == 0:
            # Store weights every epoch
            epoch_weights = {}
            
            model.eval()
            test_loss = 0
            with torch.no_grad():
                for name, param in model.named_parameters():
                    epoch_weights[name] = param.data.cpu().numpy().tolist()
                all_weights.append(epoch_weights)

                for inputs, labels in test_loader:
                    inputs, labels = inputs.to(device), labels.to(device)
                    inputs[:, :, 0] /= norm_factor
                    outputs, _ = model(inputs)
                    outputs = outputs * norm_factor
                    test_loss += criterion(outputs, labels).item() * inputs.size(0)
            test_loss = test_loss / len(test_loader.dataset)
            test_losses.append(test_loss)
            scheduler.step(test_loss)
        else:
            test_losses.append(None)

    # Save results
    results = {
        'train_losses': train_losses,
        'test_losses': test_losses,
        'config': exp
    }
    torch.save(model.state_dict(), Path(dir_name) / 'model.pth')
    with open(Path(dir_name) / 'losses.json', 'w') as f:
        json.dump(results, f)
    
    # Save weights in efficient format
    with open(weights_path, 'w') as f:
        json.dump({
            'epoch_weights': all_weights,
            'param_names': list(model.state_dict().keys())
        }, f)

# Run all experiments
for exp in experiments:
    run_experiment(exp)
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
Cell In[5], line 204
    202 # Run all experiments
    203 for exp in experiments:
--> 204     run_experiment(exp)

Cell In[5], line 154, in run_experiment(exp)
    152 outputs, _ = model(inputs)
    153 loss = criterion(outputs, labels_scaled)
--> 154 loss.backward()
    155 torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_VALUE)
    156 optimizer.step()

File ~/miniconda3/envs/tesi/lib/python3.11/site-packages/torch/_tensor.py:581, in Tensor.backward(self, gradient, retain_graph, create_graph, inputs)
    571 if has_torch_function_unary(self):
    572     return handle_torch_function(
    573         Tensor.backward,
    574         (self,),
   (...)
    579         inputs=inputs,
    580     )
--> 581 torch.autograd.backward(
    582     self, gradient, retain_graph, create_graph, inputs=inputs
    583 )

File ~/miniconda3/envs/tesi/lib/python3.11/site-packages/torch/autograd/__init__.py:347, in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
    342     retain_graph = create_graph
    344 # The reason we repeat the same comment below is that
    345 # some Python versions print out the first line of a multi-line function
    346 # calls in the traceback and some print out the last line
--> 347 _engine_run_backward(
    348     tensors,
    349     grad_tensors_,
    350     retain_graph,
    351     create_graph,
    352     inputs,
    353     allow_unreachable=True,
    354     accumulate_grad=True,
    355 )

File ~/miniconda3/envs/tesi/lib/python3.11/site-packages/torch/autograd/graph.py:825, in _engine_run_backward(t_outputs, *args, **kwargs)
    823     unregister_hooks = _register_logging_hooks_on_whole_graph(t_outputs)
    824 try:
--> 825     return Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass
    826         t_outputs, *args, **kwargs
    827     )  # Calls into the C++ engine to run the backward pass
    828 finally:
    829     if attach_logging_hooks:

KeyboardInterrupt: 
 

Copyright 2025, Luca Simonetti