ValueError: optimizer got an empty parameter list!

I am a new Pytorch user and I meet a problem.

This is my code

#-*-coding:utf-8-*-

import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms

# Hyper parameters
num_epochs = 20
batchsize = 100
lr = 0.001

# MNIST Dataset
train_dataset = dsets.MNIST(root='/Users/hushengyou/PycharmProjects/NJU_contest/homework3/',
                            train=True,
                            transform=transforms.ToTensor(),
                            download=False)
test_dataset = dsets.MNIST(root='/Users/hushengyou/PycharmProjects/NJU_contest/homework3/',
                           train=False,
                           transform=transforms.ToTensor())

# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batchsize,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batchsize,
                                          shuffle=False)

# CNN model
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        nn.conlayer1 = nn.Sequential(
            nn.Conv2d(1,6,3),
            nn.Sigmoid(),
            nn.MaxPool2d(2))
        nn.conlayer2 = nn.Sequential(
            nn.Conv2d(6,16,3),
            nn.Sigmoid(),
            nn.MaxPool2d(2))
        nn.fc = nn.Sequential(
            nn.Linear(400,120),
            nn.Linear(120,84),
            nn.Linear(84,10))

    def forward(self, x):
        out = self.conlayer1(x)
        out = self.conlayer2(out)
        out = out.view(out.size(0),-1)
        out = self.fc(out)
        return out


cnn = CNN()

# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=lr)

# Train the Model
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        # Forward + Backward + Optimize
        optimizer.zero_grad()
        outputs = cnn(images)
        loss = criterion(outputs,labels)
        loss.backward()
        optimizer.step()

        if (i+1)%100 == 0:
            print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f' %
                  (epoch+1,num_epochs,i+1,len(train_dataset)//batchsize,loss.data[0]))

# Test the Model
cnn.eval()  # Change model to 'eval' mode (BN uses moving mean/var)
correct = 0
total = 0
for images, labels in test_loader:
    outputs = cnn(images)
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()
print('Test Accuracy of the model on test images: %.6f%%' % (100.0*correct/total))

#Save the Trained Model
torch.save(cnn.state_dict(),'cnn.pkl')

Can anybody solve my problem? Thank you very much!!!

You were not defining your layers correctly.

Change:

nn.conlayer1, nn.conlayer2, nn.fc

to:

self.conlayer1, self.conlayers, slef.fc

Should work.

something like:

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conlayer1 = nn.Sequential(
            nn.Conv2d(1,6,3),
            nn.Sigmoid(),
            nn.MaxPool2d(2))
        self.conlayer2 = nn.Sequential(
            nn.Conv2d(6,16,3),
            nn.Sigmoid(),
            nn.MaxPool2d(2))
        self.fc = nn.Sequential(
            nn.Linear(400,120),
            nn.Linear(120,84),
            nn.Linear(84,10))

    def forward(self, x):
        out = self.conlayer1(x)
        out = self.conlayer2(out)
        out = out.view(out.size(0),-1)
        out = self.fc(out)
        return out

The problem is solved.

Thanks a lot!!!

I am stupid…

You are welcome. Glad it helped.

That was exactly my problem! Thank you!