Train and Test supervised ResNet

Hi, I want to train supervised ResNet18 on my own dataset. I made a code but accuracy is not improving as good as I would expect. Please, can you help me?

import torch
import torch.nn as nn
from collections import OrderedDict
from torchvision.models import resnet18, resnet34, resnet50

import matplotlib.pyplot as plt
import seaborn as sns
import os
from torchvision import transforms as T
from torchvision import datasets, transforms
import torchvision
import logging

import numpy as np
from pathlib import Path
 
from PIL import Image
import time

import math 
from tqdm import tqdm
import json
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from torch.cuda.amp import GradScaler, autocast
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torchvision.models as models

# MODEL
def get_model(arch, out_dim):
    # defining our deep learning architecture
    if arch == "resnet18":
      resnet = resnet18(pretrained=False, num_classes=out_dim)
    elif arch == "resnet34":
      resnet = resnet34(pretrained=False, num_classes=out_dim)
    elif arch == "resnet50":
      resnet = resnet50(pretrained=False, num_classes=out_dim)
    else:
      print("[ERROR] Define resnet18 or resnet34 or resnet50")

    print("[INFO] Training on architecture: {}".format(arch))
    
    resnet.to(device)

    return resnet

def plot_acc_loss(arr1, arr2, x_axes, y_axes, legend1, legend2, legend_name, fname):
    plt.figure(figsize=(10, 10))
    sns.set_style('darkgrid')
    plt.title(legend_name)
    plt.plot(arr1, label=arr1)
    if arr2:
        plt.plot(arr2, label=arr2)
    plt.xlabel(x_axes)
    plt.ylabel(y_axes)
    plt.legend([legend1, legend2])
    plt.savefig(fname)
    plt.show()
    plt.close()

def get_mean_of_list(L):
    return sum(L) / len(L)

class Dataset(torch.utils.data.Dataset):
    def __init__(self, train_test):
        self.train_test = train_test

        if self.train_test == "train":
          with open(train_dataset_path) as f:     
            self.filedict = json.load(f)          # load train.json
        elif self.train_test == "test":
          with open(test_dataset_path) as f:
            self.filedict = json.load(f)          # load test.json

        with open(os.path.join(datapath_own,'mapper.json')) as f:
            self.mapper = json.load(f)

        self.filenames = list(self.filedict)

    def __len__(self):
        return len(self.filenames)

    def tensorify(self, img):
        tensor_image = T.ToTensor()(img) 
        normalized_image = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(tensor_image)
        return normalized_image

    def __getitem__(self, idx):
        if self.train_test == "train":
          return {
              'image':self.tensorify(T.Resize((224, 224))(Image.open(os.path.join(datapath_own, 'train', self.filenames[idx])).convert('RGB'))), 
              'label':self.mapper[self.filedict[self.filenames[idx]]]}

        elif self.train_test == "test":
          return {
              'image':self.tensorify(T.Resize((224, 224))(Image.open(os.path.join(datapath_own, 'test', self.filenames[idx])).convert('RGB'))), 
              'label':self.mapper[self.filedict[self.filenames[idx]]]}

train_dataset_path = "/content/drive/MyDrive/Colab Notebooks/Self-Supervised-Learning/datasets/milli_imagenet/train/train.json"
test_dataset_path = "/content/drive/MyDrive/Colab Notebooks/Self-Supervised-Learning/datasets/milli_imagenet/test/test.json"
datapath_own = "/content/drive/MyDrive/Colab Notebooks/Self-Supervised-Learning/datasets/milli_imagenet"  # Path to the data root folder which contains train and test folders  
results_path = "/content/drive/MyDrive/Colab Notebooks/Supervised-Learning"

dataset = "milli_imagenet"
arch = "resnet18"
epochs = 50
batch_size = 250  
out_dim = 5

device = torch.device('cuda' if 1 else 'cpu')
print("[INFO] Device: {}".format(device))
resnet = get_model(arch, out_dim)

# create dataset for TRAINing and TESTing
print("[INFO] Preparing datasets")
dataloaders = {}
dataloaders['train'] = torch.utils.data.DataLoader(
                          Dataset(train_test="train"), 
                          batch_size=batch_size, 
                          shuffle=False, 
                          num_workers=2)
    
dataloaders['test'] = torch.utils.data.DataLoader(
                          Dataset(train_test="test"), 
                          batch_size=batch_size, 
                          shuffle=False, 
                          num_workers=2)
print("[INFO] Datasets prepared")

specific_folder = 'dataset-{}_arch-{}_epochs-{}_batch-{}/'.format(dataset, arch, epochs, batch_size)
results_path = os.path.join(results_path, specific_folder)
Path(os.path.join(results_path)).mkdir(parents=True, exist_ok=True)

since = time.time()
losses_train_resnet, acc_train_resnet, losses_test_resnet, acc_test_resnet = [], [], [], []
max_test_acc, min_test_loss = 0, 100
num_epochs = 50
optimizer = torch.optim.Adam(resnet.parameters(), lr=0.001, weight_decay=1e-4)

resnet.train()

for epoch in range(1, num_epochs+1):

    epoch_losses_train_resnet, epoch_acc_train_num_resnet, epoch_acc_train_den_resnet = [], 0.0, 0.0

    for (_, sample_batched) in enumerate(dataloaders["train"]):
        input_image = sample_batched["image"]
        label_actual = sample_batched["label"]

        input_image = input_image.to(device)
        label_actual = label_actual.to(device)

        label_predicted = resnet(input_image)

        optimizer.zero_grad()

        loss = nn.CrossEntropyLoss()(label_predicted, label_actual)
        epoch_losses_train_resnet.append(loss.data.item())

        loss.backward()
        optimizer.step()

        pred = np.argmax(label_predicted.cpu().data, axis=1)
        actual = label_actual.cpu().data
        epoch_acc_train_num_resnet += (actual == pred).sum().item()
        epoch_acc_train_den_resnet += len(actual)

        input_image, label_predicted, sample_batched = None, None, None

    # update losses and acc lists    
    train_acc = epoch_acc_train_num_resnet / epoch_acc_train_den_resnet
    losses_train_resnet.append(get_mean_of_list(epoch_losses_train_resnet))
    acc_train_resnet.append(train_acc)

    resnet.eval()

    epoch_losses_test_resnet, epoch_acc_test_num_resnet, epoch_acc_test_den_resnet = [], 0.0, 0.0

    for (_, sample_batched) in enumerate(dataloaders["test"]):
        input_image = sample_batched['image']
        label_actual = sample_batched['label']

        input_image = input_image.to(device)
        label_actual = label_actual.to(device)

        label_predicted = resnet(input_image)

        loss = nn.CrossEntropyLoss()(label_predicted, label_actual)
        epoch_losses_test_resnet.append(loss.data.item())

        pred = np.argmax(label_predicted.cpu().data, axis=1)
        actual = label_actual.cpu().data
        epoch_acc_test_num_resnet += (actual == pred).sum().item()
        epoch_acc_test_den_resnet += len(actual)

        input_image, label_predicted, sample_batched = None, None, None

    test_acc = epoch_acc_test_num_resnet / epoch_acc_test_den_resnet
    losses_test_resnet.append(get_mean_of_list(epoch_losses_test_resnet))
    acc_test_resnet.append(test_acc)
    
    print("[INFO] Epoch: {} - Train Loss: {:.4} - Train Accuracy: {:.3}% - Test Loss: {:.4} - Test Accuracy: {:.3}%".format(epoch, get_mean_of_list(epoch_losses_train_resnet), train_acc * 100, get_mean_of_list(epoch_losses_test_resnet), test_acc * 100))

time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))

# plotting losses
plot_acc_loss(losses_train_resnet, losses_test_resnet, "Epochs", "Losses", "Training Loss", "Testing Loss", "Losses", os.path.join(results_path, 'train_test_losses.png'))

# plotting accuracies
plot_acc_loss(acc_train_resnet, acc_test_resnet, "Epochs", "Accuracies", "Training Acc", "Testing Acc", "Accuracies", os.path.join(results_path, 'train_test_acc.png'))