ValueError: optimizer got an empty parameter list

‘’’
from future import print_function
import numpy as np
import matplotlib.pyplot as plt
import math
import torch
import error_generate
import os
import torch.nn as nn
import torch.utils.data as Data
from torch.autograd import Variable
import torch.nn.functional as F

‘’‘third part: neural network’’’
‘’‘这一部分需要参考全连接神经网络。同时,根据H修改链接的edges。需要考虑引入residual connect & SI, 引入RC后需要改变一些训练手段,这里需要参考
Resnet的相关文章’’’
#first part:hyperparameters
L = 4
P = [0.01]
H = torch.from_numpy(error_generate.generate_PCM(2 * L * L - 2, L))
h_prep = error_generate.H_Prep(H)
H_prep = torch.from_numpy(h_prep.get_H_Prep())
BATCH_SIZE = 120
#torch.manual_seed(1)
run = 12000
train_num = 10
lr = 20e-4
Nc = 15

#data
dataset = error_generate.gen_syn(P, L, H, run)

class CustomDataset(Data.Dataset):
def init(self, dataset):
self.dataset = dataset

def __getitem__(self, index):
    return dataset[2 * index], dataset[2 * index + 1]

def __len__(self):
    return int(len(self.dataset) / 2)

#x应该为先验分布和syndromes,y 为errors

#build NNet:缺少平移不变性
class ResidualBlock(torch.nn.Module):
def init(self, H):
super(ResidualBlock, self).init()
self.H = Variable(H, requires_grad = False)
self.rows, self.cols = H.size()
self.B = Variable(torch.eye(self.cols), requires_grad = True) #关闭除对角线以外的求导
self.B_vec = Variable(torch.ones(self.cols, 1), requires_grad = True)
self.W_check = Variable((torch.ones(self.rows, self.rows) - torch.eye(self.rows)), requires_grad = True)#关闭对角线的求导
self.W_var = Variable((torch.ones(self.cols, self.cols) - torch.eye(self.cols)), requires_grad = True)#关闭对角线的求导
self.W_vec = Variable(torch.ones(self.rows, 1), requires_grad = True)
#M_check为输入(上一轮)和最后输出(下一轮),但每个block还需要L矩阵,syn向量。可以把输入设置为元组,输出也设置为元组,但中间部分计算必要信息
#需要保持输入输出的一致性,这里还有一点需要注意神经网络的每一block输出都要记录下来,并驾到最后的loss中
def forward(self, x):
L, Syn, M_check, results, train = x
output = (self.B.mm(L)).mm(self.H.t())
output += M_check.t().mm(self.W_check)
for i in range(self.cols):
for j in range(self.rows):
if self.H.t()[i, j] == 0:
output[i, j] = 0
output = torch.log(torch.nn.functional.tanh(output / 2))
output = output.t().mm(self.W_var)
for i in range(self.rows):
for j in range(self.cols):
if self.H[i, j] == 0:
output[i, j] = 0
torch.exp_(output)
output = 0.5 * torch.log((1 + output) / (1 - output))
for s, i in zip(Syn[:, 0 : self.rows], range(self.rows)):
output[i, :] *= (-1) ** s
output = output.t().mm(self.W_vec)
output += L.mm(self.B_vec)
if train == 1:
output += M_check
results.append(output)
return L, Syn, output, results, train

class NNBP(torch.nn.Module):
def init(self, H, Nc, train):
super(NNBP, self).init()
self.H = H
self.rows, self.cols = H.size()
self.Nc = Nc
self.layer = self._make_layer()
self.train = train

def _make_layer(self):
    layers = []
    for i in range(self.Nc):
        layers.append(ResidualBlock(self.H))
    return torch.nn.Sequential(*layers)
    
def forward(self, x):
    L = torch.zeros(self.cols)
    for i in range(self.cols):
        L[i, i] = x[0, i]
    Syn = x[1, :]
    M_check_init = torch.zeros(self.rows, self.cols)
    x = (L, Syn, M_check_init, [], self.train)  #注意pytorch是以一个batch作为输入,可能会导致这里出现问题
    x = self.layer(x)
    return x[3]

class LossFunc(torch.nn.Module):
def init(self, H_prep):
super(LossFunc, self).init()
#定义超参数
self.H_prep = H_prep

def forward(self, err, results):
    #定义计算过程
    #目前只考虑了H,没有L
    loss = 0
    for i in results:
        loss += torch.sum(torch.sin((self.H_prep.mm(err + torch.nn.functional.sigmoid(i))) * (math.pi / 2)))
    loss /= len(results)
    return loss

#train:注意这个神经网络还要看未训练时的效果.用adam和dropout
#该函数可以用于训练并保存神经网络;载入训练完成的NN进行test;对不训练的神经网咯进行test。
def train(H, lr, L, train_num, train, load):
#step1:model
loss_sum = 0
rows, cols = H.shape
decoder = NNBP(H, Nc, train)
if load:
decoder.load_state_dict(torch.load(‘decoder_parameters_L=%d_train=%d.pkl’ % (L, train)))
decoder.cuda()
#step2:load data
torch_dataset = CustomDataset(dataset)
loader = Data.DataLoader(
dataset = torch_dataset, # torch TensorDataset format
batch_size = BATCH_SIZE, # mini batch size
shuffle = True, # random shuffle for training
num_workers = 10, # subprocesses for loading data
)

#step3:loss func & optimizer
criterion = LossFunc(H_prep)
optimizer = torch.optim.Adam(nn.ParameterList(decoder.parameters()), lr = lr)

#step4:train
for epoch in range(train_num):   # train entire dataset 3 times
    for step, (data, target) in enumerate(loader):  # for each training step
        data, target = Variable(data), Variable(target)
        data, target = data.cunda(), target.cuda()
        optimizer.zero_grad()
        results = decoder(data)
        loss = criterion(results, target)
        loss_sum += criterion(results[len(results) - 1], target)
        if train:
            loss = criterion(results, target)
            loss.backward()
            for p in decoder.parameters():
                if len(p.shape) == 2:
                    for i in range(p.shape[0]):
                        for j in range(p.shape[0]):
                            if p[0, 0] == 0:
                                p[i, i].grad = 0
                            else:
                                if i != j:
                                    p[i, j].grad = 0
                                
            optimizer.step()
loss_eval = loss_sum / run
if not load:
    torch.save(decoder.state_dict(), '.\model\decoder_parameters_L=%d_train=%d.pkl' % (L, train))
return loss_eval

if name == ‘main’:

loss = train(H, lr, L, 1, 0, 0)
print(loss)

‘’’

The code is unreadable and long. Unless you format it and include proper comments, its very rare to receive an answer.

I can only guess that you have to make these variables as nn.Parameter().