.ValueError: optimizer got an empty parameter list

root = "/home/hu/.PyCharmCE2018.2/config/scratches/data/corel_5k/"
best_F1 = 0
lr = 0.001
step = 0
viz = visdom.Visdom()
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多
parser = argparse.ArgumentParser(description='PyTorch CIFAR100 Training')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
                    help='path to latest checkpoint (default:none)')
parser.add_argument('--epochs', default=160, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('-e', '--evaluate', dest='evaluate',
                    help='evaluate model on validation set')

args = parser.parse_args()
resnet50 = models.resnet50(pretrained=True)
resnet50.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
resnet50.fc = nn.Linear(2048, 50)


def feature_layer():
    layers = []
    for name, layer in resnet50._modules.items():
        if isinstance(layer, nn.Conv2d):
            layers += []
        else:
            continue
    features = nn.Sequential(*layers)
    return features


class net(nn.Module):
    def __init__(self):
        super(net, self).__init__()
        self.features = feature_layer()

    def forward(self, x):
        x = self.features(x)
        return x


model = net().to(device)
if device == 'cuda':
    model = torch.nn.DataParallel(model)
    cudnn.benchmark == True
pretrained_dict = resnet50.state_dict()
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)


class LoadDataset():
    def __init__(self, txt, transform):
        self.txt = txt
        fh = open(self.txt, 'r')
        imgs = []
        self.transform = transform
        for line in fh:
            line = line.strip('\n')
            line = line.rstrip()
            words = line.split()
            image1 = words[0]
            image1 = int(image1)
            image1 = image1 // 1000
            image1 = image1 * 1000
            image1 = '%d' % image1
            imageList = root + 'images/' + image1 + '/' + words[0] + '.jpeg'
            words.pop(0)
            lableList = list(map(int, words))
            lableList = np.array(lableList)
            lableList = torch.from_numpy(lableList)
            imgs.append((imageList, lableList))
        self.imgs = imgs

    def __getitem__(self, item):
        image, label = self.imgs[item]
        image = Image.open(image)
        img = transform(image)
        return img, label

    def __len__(self):
        return len(self.imgs)


transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.3865, 0.3995, 0.3425), (0.2316, 0.2202, 0.2197)),
])
trainset = LoadDataset(txt=root + 'labels/training_label', transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2)
valset = LoadDataset(txt=root + 'labels/val_label', transform=transform)
valloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2)

x, train_F1, test_F1 = 0, 0, 0
win = viz.line(
    X=np.array([x]),
    Y=np.column_stack((np.array([train_F1]), np.array([test_F1]))),
    opts=dict(
        legend=["train_F1", "test_F1"]
    )
)


def main():
    global args, best_prec1, lr
    args = parser.parse_args()
    print("=> loading checkpoint '{}'".format('model_best.pth.tar'))
    checkpoint = torch.load('model_best.pth.tar')
    args.start_epoch = checkpoint['epoch']
    best_F1 = checkpoint['best_F1']
    model.load_state_dict(checkpoint['state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    print("=> loaded checkpoint '{}' (epoch {})"
          .format('model_best.pth.tar', checkpoint['epoch']))

    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(list(model.parameters()), lr=lr)

    if args.evaluate:
        validate(valloader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        print("epoch = %d" % epoch)
        adjust_LR(optimizer, epoch)  # adjust learning_rate
        train_loss, train_F1 = train(trainloader, model, criterion, optimizer, epoch)
        test_loss, test_F1 = validate(valloader, model, criterion)
        is_best = test_F1 > best_F1
        best_F1 = max(test_F1, best_F1)
        viz.line(
            X=np.array([epoch]),
            Y=np.column_stack((np.array([train_F1]), np.array([test_F1]))),
            win=win,
            update="append"
        )
        save_checkpoint({
            'epoch': i + 1,
            'state_dict': model.state_dict(),
            'best_F1': best_F1,
            'optimizer': optimizer.state_dict(),
        }, is_best)


def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, 'model_best.pth.tar')


def adjust_LR(optimizer, epoch):
    lr = lr * (0.1 ** (epoch // 40))
    for param_group in optimizer.param_group:
        param_group['lr'] = lr


def train(trainloader, model, criterion, optimizer, epoch):
    model.train()
    for i, (input, target) in enumerate(trainloader):
        step += 1
        input = input.to(device)
        if torch.cuda.is_available():
            target = target.cuda()
        output = model(input)
        loss = criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        F1 = metrics.f1_score(target, output, average='weighted')
        print('train : step = %d, F1 = %.3f' % (step, loss, F1))
    return loss, F1


def validate(valloader, model, criterion):
    model.eval()
    f1_total = 0
    loss_total = 0
    total = 0
    for i, (input, target) in enumerate(valloader):
        input = input.to(device)
        if torch.cuda.is_available():
            target = target.cuda()
        output = model(input)
        loss = criterion(output, target)
        f1 = metrics.f1_score(target, output, average='weighted')
        loss_total += loss
        f1_total += f1
        total += 1
    loss = loss_total / total
    f1 = f1_total / total
    print('val: test_loss = %.4f, test_F1' % (loss, f1))
    return loss, f1


print("start test")
testset = LoadDataset(txt=root + 'labels/test_label', transform=transform)
testloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adam(list(model.parameters()), lr=lr)
f1_total = 0
loss_total = 0
total = 0
for i, (input, target) in enumerate(testloader):
    input = input.to(device)
    if torch.cuda.is_available():
        target = target.cuda()
    output = model(input)
    loss = criterion(output, target)
    f1 = metrics.f1_score(target, output, average='weighted')
    loss_total += loss
    f1_total += f1
    total += 1
loss = loss_total / total
f1 = f1_total / total
print('test: test_loss = %.4f, test_F1' % (loss, f1))

Above is my code.And when I ran it,it told me:

Traceback (most recent call last):
** File “/home/hu/下载/Corel5k (2).py”, line 231, in **
** optimizer = torch.optim.Adam(list(model.parameters()), lr=lr)**
** File “/home/hu/.local/lib/python3.6/site-packages/torch/optim/adam.py”, line 41, in init**
** super(Adam, self).init(params, defaults)**
** File “/home/hu/.local/lib/python3.6/site-packages/torch/optim/optimizer.py”, line 38, in init**
** raise ValueError(“optimizer got an empty parameter list”)**
ValueError: optimizer got an empty parameter list

How can I fix it?
Appreciate for answering!

you have not included any trainable layers in your model.

Thank you for your quick answer!

So should I add some conv layers in __init __?

class net(nn.Module):
    def __init__(self):
        super(net, self).__init__()
        self.features = feature_layer()

    def forward(self, x):
        x = self.features(x)
        return x

I’m a beginner so the question might be kind of stupid,pardon:)

The optimizer is optimizing the model weights, so there needs to be some parameters to optimize. Indeed, conv, or linear layer would work fine. Did that solve your problem?

Yeah,thanks a lot,I’ve worked it out.:slight_smile: