Optuna PyTorch integration error

Hello,
I’ve been trying to implement Optuna into my Pytorch workflow for hyperparameter optimization with the code below:

from optuna import trial 
from optuna.trial import TrialState

def define_model(trial): 
    in_features = torch.tensor(1024, requires_grad = False)
    n_layers = trial.suggest_int('n_layers', 1, 8)
    #Suggest a number of layers from 1 to 8
    net = nn.Sequential()
    #Define the number of input features 
    for i in range(n_layers): 
        #Loop through the number of proposed layers 
        out_features = torch.tensor((trial.suggest_int('n_units_l{}'.format(i), 1, in_features)))
        #Define the number of output features between the layers
        net.append(nn.Linear(in_features, out_features, dtype = torch.float64))
        #Add a linear layer to the NN 
        net.append(nn.ReLU())
        #Add the activation function 
        net.append(nn.Dropout(torch.tensor(trial.suggest_float('dropout_l{}'.format(i), 0.1, 0.5))))
        #Define the dropout rate 
        #in_features = torch.tensor(out_features)
        in_features = out_features.clone().detach()
        #Redefine the number of input features for the next layer 
        return in_features
    net.append(nn.Linear(in_features, 1, dtype = torch.int8))
    #Define output layer     
    return net

def objective(trial): 
    model = define_model(trial) 
    #Define the model 
    optimizer_name = trial.suggest_categorical('optimizer', ['Adam', 'Adamax', 'ASGD', 'LBFGS', 'SGD'])
    #Define the optimizer
    lr = trial.suggest_float('lr', 1e-5, 1e-1, log = True)
    #Define the learning rate
    optimizer = getattr(optim, optimizer_name)(model.parameters(), lr = lr)
    #Define the optimizer 
    criterion = nn.MSELoss()
    #Define the criterion
    epochs = trial.suggest_int('epochs', 50, 500, step = 50)
    #Define the number of epochs 

    #Model training
    train_time = time.time()
    model.train()
    for epoch in range(epochs): 
        running_loss = 0 
        for data, target in enumerate(train_loader):
            #Loop through the training set
            optimizer.zero_grad()
            #Reset the gradients 
            output = model(data)
            #Run the model on the data 
            loss = criterion(output, target)
            #Calculate the loss 
            loss.backward()
            optimizer.step()
            #Backpropogate
            running_loss += loss.item()
            #Append to running loss
        #Model validation
        model.eval()
        #Change to evaluation mode 
        y_pred_test = model(X_test)
        #Run model on the train 
        test_mse = torch.mean((y_test - y_pred_test) **2).item()
        trial.report(test_mse, epoch)
        if trial.should_prune():
            raise optuna.exceptions.TrialPruned()
    return test_mse

in_features = 1024
train_loader = train_loader_conc_1024
y_train = y_train_conc_1024
X_train = X_train_conc_1024
y_test = y_test_conc_1024
X_test = X_test_conc_1024
study = optuna.create_study(direction = 'minimize')
study.optimize(objective, n_trials = 500, timeout = 1200)

pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])

print("Study statistics: ")
print("  Number of finished trials: ", len(study.trials))
print("  Number of pruned trials: ", len(pruned_trials))
print("  Number of complete trials: ", len(complete_trials))

print("Best trial:")
trial = study.best_trial

print("  Value: ", trial.value)

print("  Params: ")
for key, value in trial.params.items():
    print("    {}: {}".format(key, value))

But I’m getting the following error,

AttributeError                            Traceback (most recent call last)
/home/kvb32/Documents/CYP450/CYP450/small_model/model_building.ipynb Cell 14 in <cell line: 78>()
     76 X_test = X_test_conc_1024
     77 study = optuna.create_study(direction = 'minimize')
---> 78 study.optimize(objective, n_trials = 500, timeout = 1200)
     80 pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
     81 complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])

File ~/anaconda3/envs/my-rdkit-env/lib/python3.10/site-packages/optuna/study/study.py:425, in Study.optimize(self, func, n_trials, timeout, n_jobs, catch, callbacks, gc_after_trial, show_progress_bar)
    321 def optimize(
    322     self,
    323     func: ObjectiveFuncType,
   (...)
    330     show_progress_bar: bool = False,
    331 ) -> None:
    332     """Optimize an objective function.
    333 
    334     Optimization is done by choosing a suitable set of hyperparameter values from a given
   (...)
    422             If nested invocation of this method occurs.
    423     """
--> 425     _optimize(
    426         study=self,
    427         func=func,
...
---> 35 optimizer = getattr(optim, optimizer_name)(model.parameters(), lr = lr)
     36 #Define the optimizer 
     37 criterion = nn.MSELoss()

AttributeError: 'Tensor' object has no attribute 'parameters'

Any assistance would be greatly appreciated :slight_smile: (datasets and loaders are generated in a previous block and seem to be working fine)

Based on the error message it seems model is a tensor while an nn.Module is expected. Could you check what model = define_model(trial) returns and make sure it’s indeed a real model?

thank you for the response! It is returning a tensor, I’ve changed it slightly by initializing net as a list net = [nn.Flatten()] and then returning as nn.Sequential(*net) but I’m still getting a tensor rather than a nn.Module how would I fix this?

Your new code snippet should now be a module, but would not contain any trainable parameters since nn.Flatten will just change the shape of the input activation without any training.

net = [nn.Flatten()]
model = nn.Sequential(*net)

optimizer = torch.optim.SGD(model.parameters(), lr=1.)
# ValueError: optimizer got an empty parameter list

Could you explain why you want to initialize an optimizer if no model seems to be available?