Stagnant accuracy while using Transfer learning

Hey guys, I am trying to apply transfer learning for the Cifar-10 dataset, I have been trying multiple learning rates, scheduler, image augmentations. However nothing worked, my accuracy is at a stagnant 81%, I can’t seem to get it above 90%

I am currently using the pre-trained vgg16 model.

Image augmentations:

transform = Compose([
    tt.Resize(size=(224, 224)),
    tt.ToTensor(),
    tt.Normalize( 
       (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010) 
    ),
    tt.RandomPerspective(0.3,0.3)
])

fine-tuning last layer

models.classifier[6] = nn.Linear(4096,10)
models.classifier[6].requires_grad = True
models.classifier[6].to(device)

functions for training:

def accuracy (output,labels):
  _,preds = torch.max(output, dim = 1)
  return torch.tensor(torch.sum(preds == labels).item()/len(preds))


def training_step(train_dl,model):
    model.train()
    images,labels =train_dl
    images,labels = images.to(device),labels.to(device)
    output = model(images)
    loss = F.cross_entropy(output,labels)
    return loss

def validation_step(val_dl,model):
   model.eval()
   images,labels = val_dl
   images,labels = images.to(device),labels.to(device)
   output = model(images)
   loss = F.cross_entropy(output,labels)
   acc = accuracy(output,labels)
   return {"val_loss": loss, "val_acc": acc }

def val_epoch_loss (outputs, model):
  epoch_validation_step = [validation_step(batch,model) for batch in outputs]
  batch_loss = [x["val_loss"] for x in epoch_validation_step]
  epoch_loss = torch.stack(batch_loss).mean()

  batch_acc = [x["val_acc"] for x in epoch_validation_step]
  epoch_acc = torch.stack(batch_acc).mean() ####
  return {"Loss": epoch_loss.item(), "Accuracy": epoch_acc.item()}


def epoch_end(result,epoch):
  print("epoch: {},  Epoch_loss:{}, Epoch_accuracy {}, train_loss {}" .format(epoch, result["Loss"], result["Accuracy"], result["train loss"] )) 

optimizer and lr scheduler:

optimizer = optim.Adam(models.parameters(), lr = 0.1)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 5, 0.8)

training of model:

def fit(epochs,model,train_dl,val_dl):
  history = []
  for epoch in range(epochs):
    train_loss = []
    for batch in train_dl:
      optimizer.zero_grad()
      loss = training_step(batch, model)
      train_loss.append(loss)
      loss.backward()
      optimizer.step()
    lr_scheduler.step()
    #validation portion
    results = val_epoch_loss(val_dl, model)
    results["train loss"] = torch.stack(train_loss).mean()
    history.append(results)
    epoch_end(results,epoch)
  return history

training augmentations

train = fit(15,models,train_dl,val_dl)

I tried to keep the code as short as possible, but can anybody give me tips on getting out of this plateau?