Calling function in another function with lots of arguments

I want to call a function in another function but arguments have to be defined in another function which are producing new errors. is there some way out for this in PyTorch

I have a function that has multiple arguments, and I want that function to be another function using PyTorch

def function1(a,b,c,d,e,z):


def function2(f,g):
z=x+y

model = function2(a,b,c,d,e,z)

how to define all these variables a, b,c,d,e for function2

def query_samples(method, data_unlabeled, subset, labeled_set, cycle, args, param):
if method == ‘VAAL’:
# Create unlabeled dataloader for the unlabeled subset
unlabeled_loader = DataLoader(data_unlabeled, batch_size=BATCH,
sampler=SubsetSequentialSampler(subset),
pin_memory=True)

        labeled_loader = DataLoader(data_unlabeled, batch_size=BATCH, 
                                    sampler=SubsetSequentialSampler(labeled_set), 
                                    pin_memory=True)
        if args.dataset == 'fashionmnist':
            vae = VAE(28,1,3)
            discriminator = Discriminator(28)
        else:
            vae = VAE()
            discriminator = Discriminator(32)
        models      = {'vae': vae, 'discriminator': discriminator}
        
        
        #OPTUNA for VAAL method 
        #optim_vae = optim.Adam(vae.parameters(), lr=5e-4)
        optim_vae = getattr(optim, param['optimizer'])(vae.parameters(), lr= param['learning_rate'])
        
        #optim_discriminator = optim.Adam(discriminator.parameters(), lr=5e-4)
        optim_discriminator = getattr(optim, param['optimizer'])(discriminator.parameters(), lr= param['learning_rate'])
        
        optimizers = {'vae': optim_vae, 'discriminator': optim_discriminator}

        train_vaal(models, optimizers, labeled_loader, unlabeled_loader, cycle+1)
        
        all_preds, all_indices = [], []

        for images, _, indices in unlabeled_loader:                       
            images = images.cuda()
            with torch.no_grad():
                _, _, mu, _ = vae(images)
                preds = discriminator(mu)

            preds = preds.cpu().data
            all_preds.extend(preds)
            all_indices.extend(indices)

        all_preds = torch.stack(all_preds)
        all_preds = all_preds.view(-1)
        # need to multiply by -1 to be able to use torch.topk 
        all_preds *= -1
        # select the points which the discriminator things are the most likely to be unlabeled
        _, arg = torch.sort(all_preds) 
    return arg

I want to call this in the objective function defined below

def objective(trial, args, method):
params = {
‘optimizer’: trial.suggest_categorical(“optimizer”, [“Adam”, “RMSprop”, “SGD”]),
‘learning_rate’: trial.suggest_loguniform(‘learning_rate’, 1e-5, 1e-1)
}
cycle = CYCLES
subset = SUBSET

        indices = list(range(NUM_TRAIN))
        random.shuffle(indices)

        if args.total:
            labeled_set= indices
        else: 
            labeled_set = indices[:ADDENDUM]
            unlabeled_set = [x for x in indices if x not in labeled_set]
        
        data_unlabeled = load_dataset(args.dataset)

        model = **query_samples**(method, data_unlabeled, subset, labeled_set, cycle, args, params)
        accuracy = test(params, model)

        return accuracy

I will be very thankful if someone gives me an idea about avoiding defining
all there arguments in the objective function for (method, data_unlabeled, subset, labeled_set, cycle, args, params) in query_samples()