Loss value is not reducing(stuck at 1.5 approx), is there something wrong in this network?

from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import pandas as pd


# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                    help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                    help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
                    help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
                    help='learning rate (default: 0.001)')
parser.add_argument('--momentum', type=float, default=0, metavar='M',
                    help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
                    help='disables CUDA training')
parser.add_argument('--seed', type=int, default=0, metavar='S',
                    help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                    help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)


kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=False, transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=args.test_batch_size, shuffle=True, **kwargs)

# Convolutional Neural Network creation
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=2, padding = 0)
        self.conv2 = nn.Conv2d(64, 32, kernel_size=2, stride=1, padding = 1)
        self.conv2_drop = nn.Dropout2d(0.35)
        self.fc1 = nn.Linear(5408, 256)
        self.fc1_drop = nn.Dropout2d(0.50)
        self.fc2 = nn.Linear(256, 10)
     
    def forward(self, x):
        
        x = F.relu(self.conv1(x)) #x = F.relu(F.max_pool2d(self.conv1(x), 2))
        
        x = F.relu(self.conv2(x))
        
        x = F.max_pool2d(x, 2, 1)
        
        x = self.conv2_drop(x)
        
        x = x.view(-1, 5408)
        x = F.tanh(self.fc1(x))
        x = self.fc1_drop(x)
        
        #x = F.dropout(x, training=self.training)
        
        x = self.fc2(x)
        return F.softmax(x)

model = Net()

if args.cuda:
    model.cuda()

optimizer = optim.Adam(model.parameters())#, lr=args.lr, momentum=args.momentum)


def train(epoch):
    model.train()
    df = pd.DataFrame(columns=['loss','epoch'])
    index = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        
        if batch_idx % args.log_interval == 0:
            optimizer.zero_grad()
            output = model(data)
            loss = F.cross_entropy(output, target)
            loss.backward()
            optimizer.step()
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.data[0]))
            
            df = df.append({'loss': loss.data[0], 'epoch' : epoch}, ignore_index=True)
        
    with open('log.csv', 'a') as f:
        df.to_csv(f, header=False, sep='\t', columns=['loss','epoch'])
        


def test():
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
        pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))


for epoch in range(1, args.epochs + 1):
    train(epoch)
test()

Have you found any solution yet?Iā€™m also stuck in a similar situation!

No. Still in the same page. I am waiting for the solution.

CrossEntropyLoss expects scores for each class as its input.
You are using softmax in your model, so just remove it.
Alternatively you could change it to F.log_softmax and use NLLLoss.
Let me know, if your model is learning!

1 Like

try using different initializations of weights, maybe your model is unable to break symmetry!