Can anybody can give me recommendations to imrove my model accuracy?

I am currently running the CIFAR-10 dataset, however I can’t seem to get pass the 75% accuracy mark, am I missing something out? I am currently trying to use transfer learning to further boost my accuracy.

def accuracy(outputs, labels):
    __, preds = torch.max(outputs, dim=1)
    return torch.tensor(torch.sum(preds == labels).item() / len(preds))



def training_step(train_set, model):
    images, labels = train_set
    images,labels = images.to(device), labels.to(device)  
    output = model(images)
    loss = F.cross_entropy(output, labels)
    return loss


def validation_loss(val_inputs, model):
  model.eval()
  with torch.no_grad():
    images, labels = val_inputs
    images,labels = images.to(device), labels.to(device)
    output = model(images)
    loss = F.cross_entropy(output, labels)
    acc = accuracy(output, labels)
    return {"loss": loss.detach(), "Accuracy": acc}


def validation_combine_loss(outputs, model):
    loss_accuracy= [validation_loss(batch,model) for batch in outputs]
    extract_loss = [x["loss"] for x in loss_accuracy]
    combining_loss = torch.stack(extract_loss).mean()

    extract_accuracy = [x["Accuracy"] for x in loss_accuracy]
    combining_Accuracy = torch.stack(extract_accuracy).mean()

    return{"Loss":combining_loss.item(), "Accuracy":combining_Accuracy.item()}



def epoch_end(result,epoch):
  print("epoch: {}, last_lr {},  Epoch_loss:{}, Epoch_accuracy {}, train_loss {}" .format(epoch, result["lrs"][-1], result["Loss"], result["Accuracy"], result["train loss"] ))







def conv_block(in_channels, out_channels, pool=False):
    layers = [nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 
              nn.BatchNorm2d(out_channels), 
              nn.ReLU(inplace=True)]
    if pool: layers.append(nn.MaxPool2d(2))
    return nn.Sequential(*layers)

class ResNet9(nn.Module):
    def __init__(self, in_channels, num_classes):
        super().__init__()
        
        self.conv1 = conv_block(in_channels, 64)
        self.conv2 = conv_block(64, 128, pool=True)
        self.res1 = nn.Sequential(conv_block(128, 128), conv_block(128, 128))
        
        self.conv3 = conv_block(128, 256, pool=True)
        self.conv4 = conv_block(256, 512, pool=True)
        self.res2 = nn.Sequential(conv_block(512, 512), conv_block(512, 512))
        
        self.classifier = nn.Sequential(nn.MaxPool2d(4), 
                                        nn.Flatten(), 
                                        nn.Dropout(0.2),
                                        nn.Linear(512, num_classes))
        
    def forward(self, xb):
        out = self.conv1(xb)
        out = self.conv2(out)
        out = self.res1(out) + out
        out = self.conv3(out)
        out = self.conv4(out)
        out = self.res2(out) + out
        out = self.classifier(out)
        return out


model = ResNet9(3,10)
model = model.to(device)


def fit(epochs,train_set,val_dl,model,lr): #model trainer
   optimizer = torch.optim.Adam(model.parameters(), lr, weight_decay = 0.001) #defining the optimizer

   scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, 0.001, epochs = epochs,steps_per_epoch = len(train_set))# learning rate scheduler

   def get_lr():
    for param_group in optimizer.param_groups:  #getting the learning rates of e
      return param_group["lr"]

   history = []      
   


   for epoch in range(epochs):
      model.train()
      train_loss = []
      lrs = []
      for batch in train_set:
        loss = training_step(batch, model)
        train_loss.append(loss)
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), 0.1)
        optimizer.step()
        optimizer.zero_grad()
        lrs.append(get_lr())
        scheduler.step()
      #validation 
      results = validation_combine_loss(val_dl,model)
      results["lrs"] = lrs
      results["train loss"] = torch.stack(train_loss).mean().item()
      epoch_end(results,epoch)
      history.append(results)
   return history  


Training = fit(40,train_set,val_set,model,0.0001)  #training model