Speeding up a fintuning process on a pretrained inception v3

I have the following code and it relatively takes a very long time. Is there a way I could speed it up? I have 4 GPUs with 20G VRAM (GeForce RTX 3090). I am running the model using 100 epochs on a dataset with this statistics. All my images are 512x512 and 3 channels:

Below shows number of images for each label for my binary classification problem:

train 
--label 0: 11597
train
--label 1: 13240
val
--label 0:  3477
--label 1:  2445
test
--label 0:  2709
--label 1:  4161
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=True):
    since = time.time()
    val_acc_history = []
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    best_epoch = 0
    metrics = {}
    for epoch in range(num_epochs):
        val_epoch_loss = 0.0
        train_epoch_loss = 0.0
        val_epoch_acc = 0.0
        train_epoch_acc = 0.0
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()  # Set model to training mode
            else:
                model.eval()   # Set model to evaluate mode
            running_loss = 0.0
            running_corrects = 0
            # Iterate over data.
            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)
                # zero the parameter gradients
                optimizer.zero_grad()
                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    # Get model outputs and calculate loss
                    # Special case for inception because in training it has an auxiliary output. In train
                    #   mode we calculate the loss by summing the final output and the auxiliary output
                    #   but in testing we only consider the final output.
                    if is_inception and phase == 'train':
                        # From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
                        outputs, aux_outputs = model(inputs)
                        loss1 = criterion(outputs, labels)
                        loss2 = criterion(aux_outputs, labels)
                        loss = loss1 + 0.4*loss2
                    else:
                        outputs = model(inputs)
                        loss = criterion(outputs, labels)
                    _, preds = torch.max(outputs, 1)
                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # statistics
                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)
                
            if phase == 'train':
                train_epoch_loss = running_loss / len(dataloaders[phase].dataset)
                train_epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
                print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, train_epoch_loss, train_epoch_acc))
            else:
                val_epoch_loss = running_loss / len(dataloaders[phase].dataset)
                val_epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
                print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, val_epoch_loss, val_epoch_acc))
          

            # deep copy the model
            if phase == 'val' and val_epoch_acc > best_acc:
                best_acc = val_epoch_acc
                best_epoch = epoch
                best_model_wts = copy.deepcopy(model.state_dict())
            if phase == 'val':
                val_acc_history.append(val_epoch_acc)
            
            metrics['train_loss'] = train_epoch_loss
            metrics['val_loss'] = val_epoch_loss
            wandb.log(metrics)
            wandb.log({"train loss": train_epoch_loss,
                       "val loss": val_epoch_loss,
                       "epoch": epoch})
            
            wandb.log({"train acc": train_epoch_acc,
                       "val acc": val_epoch_acc,
                       "epoch": epoch})
            
            wandb.log({"best val acc": best_acc, "epoch": epoch})

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f} and best epoch {}'.format(best_acc, best_epoch))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model, val_acc_history


def set_parameter_requires_grad(model, feature_extracting):
    if feature_extracting:
        for param in model.parameters():
            param.requires_grad = False
            

feature_extract = True # set this to False if you want to train from scratch
model_ft = models.inception_v3(pretrained=True)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, args.num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, args.num_classes)
input_size = 299
model_name = "inception"
model_ft = model_ft.to(device)


optimizer_ft = torch.optim.Adam(model_ft.parameters(
), lr=learning_rate, weight_decay=5e-4)       # best:5e-4, 4e-3
##exp_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 40, 90], gamma=0.1)  # gamma=0.3  # 30,90,130 # 20,90,130 -> 150
# or
# Decay LR by a factor of 0.1 every epoch
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=1, gamma=0.1)
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs, is_inception=(model_name=="inception"))

torch.save(model_ft, 'balanced_model_ft_100e.pt')

I would generally recommend to check the performance guide to check if you are running into common performance issues.