Optuna with Pytorch

Issue => AttributeError: module ‘optuna.trial’ has no attribute ‘suggest_int’

Code:

-- coding: utf-8 --

“”"
Created on Mon May 15 20:22:07 2023

@author: Ashish
“”"

import torch
import numpy as np
from optuna.trial import TrialState
import pandas as pd
from torch.utils.data import Dataset,DataLoader
import optuna
from sklearn.model_selection import train_test_split
torch.manual_seed(42)

CLASSES = 1
BATCH_SIZE = 32
EPOCHS = 100
DEVICE =‘gpu’
N_TRAIN_EXAMPLES = BATCH_SIZE * 30
N_VALID_EXAMPLES = BATCH_SIZE * 10

class BostonData(Dataset):

def __init__(self,X,y):
    self.X = torch.from_numpy(X)
    self.y = torch.from_numpy(y)

def __len__(self):
    return len(self.X)

def __getattr__(self,item):
    return self.X[item],self.y[item]

def get_model(trail,in_features,n_layers,dropout,n_output):
layers = []
fc_layer = in_features
for i in range(n_layers):
out_features = trial.suggest_int(“n_units_l{}”.format(i),2,in_features)
layers.append(torch.nn.Linear(in_features, out_features))
layers.append(torch.nn.LeakyReLU())
in_features = out_features

layers.append(torch.nn.Linear(in_features, fc_layer)) 
layers.append(torch.nn.LeakyReLU())

layers.append(torch.nn.Dropout(dropout))
layers.append(torch.nn.Linear(fc_layer,n_output)) 
   
return torch.nn.Sequential(*layers)

def objective(trial):

# For TPESampler 
params = {
          'learning_rate': trial.suggest_loguniform('learning_rate', 1e-6, 1e-2), 
          'optimizer': trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"]),
          'weight_decay': trial.suggest_loguniform('weight_decay', 1e-4, 1e-2),
          "n_layers" : trial.suggest_int("n_layers", 1, 4),
          "dropout" : trial.suggest_float('dropout',0.1,0.5,step = 0.1)
          }
test_loss   = train_net(trial, params) 
return np.mean(test_loss)

def train_net(trial, params):

data_url = "http://lib.stat.cmu.edu/datasets/boston"
raw_df = pd.read_csv(data_url, sep="\s+", skiprows=22, header=None)
X = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
y = raw_df.values[1::2, 2]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3, random_state=42)

# Prepare Boston dataset
trainloader = DataLoader(BostonData(X_train, y_train), batch_size=BATCH_SIZE, shuffle=True, num_workers=1)
valloader = DataLoader(BostonData(X_val, y_val), batch_size=1, shuffle=False, num_workers=1)

# Initialize the MLP
net = get_model(trial,
        in_features= X.shape[1],
        n_layers=params['n_layers'] ,
        dropout=params['dropout'],
        n_output= 1)

Define the loss function and optimizer

loss_function = torch.nn.MSELoss()
optimizer = getattr(torch.optim, params['optimizer'])(net.parameters(), lr= params['learning_rate'], weight_decay=params['weight_decay'])

# Run the training loop
for epoch in range(0, 5): # 5 epochs at maximum
    
    total_test_loss = []
    # Print epoch
    print(f'Starting epoch {epoch+1}')
    
    # Set current loss value
    #current_loss = 0.0
    
    # Iterate over the DataLoader for training data
    for i, data in enumerate(trainloader, 0):
    
        # Get and prepare inputs
        inputs, targets = data
        inputs, targets = inputs.float(), targets.float()
        targets = targets.reshape((targets.shape[0], 1))
        
        # Zero the gradients
        optimizer.zero_grad()
        # Perform forward pass
        outputs = net(inputs)
        # Compute loss
        loss = loss_function(outputs, targets)
        # Perform backward pass
        loss.backward()
        # Perform optimization
        optimizer.step()
        
    net.eval()
    with torch.no_grad():
        for i, data in enumerate(valloader, 0):
            # Get and prepare inputs
            inputs, targets = data
            inputs, targets = inputs.float(), targets.float()
            targets = targets.reshape((targets.shape[0], 1))
            
            # # Perform forward pass
            test_outputs = net(inputs)
            test_loss = loss_function(test_outputs, targets)
            total_test_loss.append(test_loss.item())
    

# Process is complete.
return total_test_loss

study = optuna.create_study(direction =“minimize”, sampler=optuna.samplers.TPESampler())
study.optimize(objective, n_trials= 100,timeout=600)

pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])
print("Study statistics: “)
print(” Number of finished trials: “, len(study.trials))
print(” Number of pruned trials: “, len(pruned_trials))
print(” Number of complete trials: ", len(complete_trials))

print(“Best trial:”)
trial = study.best_trial

print(" Value: ", trial.value)

print(" Params: “)
for key, value in trial.params.items():
print(” {}: {}".format(key, value))

Any help is appretiated!!!

optuna.trial.Trial.suggest_int is a valid method as seen here so you might need to update optuna in case it’s not available in your release.

1 Like