Objective() missing 1 required positional argument: 'trial'

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn

import torchvision
import torchvision.transforms as transforms

import os
import argparse
from models import resnet, LPM
from utils import progress_bar, MarginRankingLoss_learning_loss
#from objective import *
import optuna
from optuna.trial import TrialState

class Trainer():

def __init__(self, param):
    global best_acc
    parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
    #parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
    parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    best_acc = 0  # best test accuracy
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch

    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Building model..')

    net = resnet.ResNet18()
    net = net.to(device)

    loss_pred_module = LPM.loss_prediction_module()
    loss_pred_module = loss_pred_module.to(device)

    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        loss_pred_module = torch.nn.DataParallel(loss_pred_module)
        cudnn.benchmark = True

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
        checkpoint = torch.load('./checkpoint/ckpt.pth')
        net.load_state_dict(checkpoint['net'])
        best_acc = checkpoint['acc']
        start_epoch = checkpoint['epoch']

    criterion = nn.CrossEntropyLoss(reduction='none')
    loss_pred_criterion = MarginRankingLoss_learning_loss()

    #, momentum=0.9, weight_decay=5e-4
    optimizer_target = getattr(optim, param['optimizer'])(net.parameters(), lr = param['learning_rate'])
    optimizer_loss = getattr(optim, param['optimizer'])(loss_pred_module.parameters(), lr = param['learning_rate'])
    
    # Training
    def train(epoch):
        print('\nEpoch: %d' % epoch)
        net.train()
        loss_pred_module.train()
        train_loss = 0
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.to(device), targets.to(device)
            
            optimizer_target.zero_grad()
            optimizer_loss.zero_grad()
            
            outputs, loss_pred = net(inputs)
            loss = criterion(outputs, targets)
            loss_pred = loss_pred_module(loss_pred)
            loss_prediction_loss = loss_pred_criterion(loss_pred, loss)
            target_loss = loss.mean()
            if epoch < 120:
                loss = loss_prediction_loss + target_loss 
                loss.backward()
                optimizer_target.step()
                optimizer_loss.step()
            else:
                loss = target_loss
                loss.backward()
                optimizer_target.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))

    def test(self,epoch):
        global best_acc
        net.eval()
        test_loss = 0
        correct = 0
        total = 0
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(testloader):
                inputs, targets = inputs.to(device), targets.to(device)
                outputs, loss_pred = net(inputs)
                loss = criterion(outputs, targets)
                loss = loss.mean()
                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()

                progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                    % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))

        # Save checkpoint.
        acc = 100.*correct/total
        if acc > best_acc:
            print('Saving..')
            state = {
                'net': net.state_dict(),
                'acc': acc,
                'epoch': epoch,
            }
            if not os.path.isdir('checkpoint'):
                os.mkdir('checkpoint')
            torch.save(state, './checkpoint/ckpt.pth')
            best_acc = acc
        return acc

    for epoch in range(start_epoch, start_epoch+20):
        train(epoch)
        self.test(epoch)

def objective(self,trial):
    #optimizer_target = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    #optimizer_loss = optim.SGD(loss_pred_module.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    
    #optimizer = optuna.trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"])
    #lr = optuna.trial.suggest_loguniform("lr", 1e-5, 1e-1, log=True)

    params = {
            'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1e-1),
            'optimizer': trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"]),
            }
    
    # Generate the optimizers.
    
    model = Trainer(params)
    model= self.test(trial)
    accuracy = model       
    return accuracy 

if name == “main”:

study = optuna.create_study(direction="maximize")
study.optimize(lambda trial: Trainer.objective(trial), timeout=600)

best_trial = study.best_trial 
for key, value in best_trial.params.items():
    print("{}: {}".format(key, value))

plot_objective_function = optuna.visualization.plot_intermediate_values(study)
plot_objective_function.show()

plot_history = optuna.visualization.plot_optimization_history(study)
plot_history.show()

I am unable to call the objective function in main()