Accuracy and loss remains constant

import torch 

import torchvision

import torch.nn as nn

import numpy as np

import torchvision.transforms as transforms

resnet=torchvision.models.resnet18(pretrained=True)

for param in resnet.parameters():

    param.requires_grad=False

resnet.fc=nn.Sequential(nn.Linear(resnet.fc.in_features,128),

                        nn.ReLU(inplace=True),

                        nn.Linear(128,2),

                       nn.Softmax())

data_dir="./hymenoptera_data/hymenoptera_data"

data_transforms = {

    'train': transforms.Compose([

        transforms.RandomResizedCrop(224),

        transforms.RandomHorizontalFlip(),

        transforms.ToTensor(),

        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    ]),

    'val': transforms.Compose([

        transforms.Resize(256),

        transforms.CenterCrop(224),

        transforms.ToTensor(),

        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    ]),

}

import os

image_datasets_train=torchvision.datasets.ImageFolder(os.path.join(data_dir,'train'),data_transforms['train'])

image_datasets_val=torchvision.datasets.ImageFolder(os.path.join(data_dir,'val'),data_transforms['val'])

train_loader=torch.utils.data.DataLoader(image_datasets_train,batch_size=4)

val_loader=torch.utils.data.DataLoader(image_datasets_val,batch_size=4)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

criterion=torch.nn.CrossEntropyLoss()

optimizer=torch.optim.Adam(resnet.fc.parameters(),lr=0.01)

from torch.optim import lr_scheduler

exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)

if torch.cuda.is_available():

    resnet.cuda()

resnet.train()

for epoch in range(20):

    sumloss=0

    counter=0

    running_corrects = 0

    for inputs,labels in train_loader:

        inputs=inputs.to(device)

        labels=labels.to(device)

        optimizer.zero_grad()

        outputs=resnet(inputs)

        _, preds = torch.max(outputs, 1)

        loss=criterion(outputs,labels)

        sumloss+=loss.item()*inputs.size(0)

        running_corrects += torch.sum(preds == labels.data)

        loss.backward()

        optimizer.step()

    exp_lr_scheduler.step()

    epoch_acc = running_corrects.double() / len(image_datasets_train)

    print(sumloss/len(image_datasets_train),epoch_acc.item())

outputs=resnet(images)

print(outputs.size())

from PIL import Image

image=Image.open("cat.jpg")

normalize = transforms.Normalize(

   mean=[0.485, 0.456, 0.406],

   std=[0.229, 0.224, 0.225]

)

preprocess = transforms.Compose([

   transforms.Resize(256),

   transforms.CenterCrop(224),

   transforms.ToTensor(),

   normalize

])

img_tensor = preprocess(image)

img_tensor.unsqueeze_(0)

output=resnet(img_tensor)

output.squeeze_(0)

output.data.numpy().argmax()

I am trying to train a transfer learning model that can classify ants and bees. The model 's loss and accuracy are close to constant. I am new to pytorch. What are the changes to be made to make the model learn better ?

You use CrossEntropyLoss, which combines LogSoftmax and NLLLoss ! Therefore you can remove the nn.Softmax() from your Sequence (normally you also should give a dim e.g dim=1 in your case)!

But i don’t think this will solve your problem.