When I try to optimize variables, ValueError: optimizer got an empty parameter list

hi, i have a problem here, i got a sequence of Variables, and i used them to calculate outputs as follows.
After that, when I try to optimize model, “RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn” is shown.
What should I do?

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import numpy as np

e = 2.71

class monotonicNet(nn.Module):
    def __init__(self, inputDim, numUnit, numGroup):
        super(monotonicNet, self).__init__()

        self.numUnit = numUnit
        self.numGroup = numGroup

        self.var = nn.Parameter(torch.randn(numUnit * numGroup, requires_grad=True))

        self.params = list(self.var)

    def forward(self, x):
        x = torch.ones(self.numUnit * self.numGroup) * x
        result = torch.mul(torch.exp(self.var), x).view(size=(self.numGroup, self.numUnit))
        max = torch.max(result, dim=1)
        min = torch.min(max[0])
        return min

    def getVars(self):
        return self.var

class BackMonotonicNet(nn.Module):
    def __init__(self, numUnit, numGroup, vars):
        super(BackMonotonicNet, self).__init__()

        self.numUnit = numUnit
        self.numGroup = numGroup
        self.vars = vars

    def forward(self, x):
        x = torch.ones(self.numUnit * self.numGroup) * x
        result = torch.mul(torch.exp(-self.vars), x).view(size=(self.numGroup, self.numUnit))
        min = torch.min(result, dim=0)
        max = torch.max(min[0], dim=0)
        return max[0]

class PayNet(nn.Module):
    def __init__(self, numUser):
        super(PayNet, self).__init__()

        self.numUser = numUser

        self.rl = nn.ReLU()
    def forward(self,x):
        x = torch.as_tensor(x)
        x = self.rl(x)
        return x

class AllocNet(nn.Module):
    def __init__(self, numUser, k):
        super(AllocNet, self).__init__()
        self.numUser = numUser
        self.k = k
    def forward(self, x):
        x = torch.as_tensor(x)
        x = torch.div(torch.exp(k*x),torch.sum(torch.exp(k*x)))
        return x

class AuctionNet(nn.Module):
    def __init__(self, numUser, numUnit, numGroup, k):
        super(AuctionNet, self).__init__()

        self.numUser = numUser
        self.numUnit = numUnit
        self.numGroup = numGroup

        self.rl1 = nn.ReLU()
        self.monoNets = [monotonicNet(1, numUnit, numGroup) for i in range(numUser)]
        self.monoVars = [self.monoNets[i].getVars() for i in range(numUser)]
        self.backNets = [BackMonotonicNet(numUnit, numGroup, self.monoVars[i]) for i in range(numUser)]
        self.pay = PayNet(numUser)
        self.allocNet = AllocNet(numUser, k)

        for i in range(numUser):
            nn.Parameter(self.monoNets[i].getVars())

    def forward(self, x):
        x = x.float()
        posBD = self.rl1(x)
        transBD = []
        for i in range(self.numUser):
            transBD.append(self.monoNets[i](posBD[i]))
        probs = self.allocNet(transBD)
        pays = self.pay(torch.tensor(transBD))
        payment = []
        for i in range(self.numUser):
            payment.append(self.backNets[i](pays[i]))
        loss = - torch.sum(torch.mul(probs, torch.as_tensor(payment)))
        return probs, payment, loss






if __name__=="__main__":
    # Training Setting
    epoch = 1
    # Auction Setting
    numUser = 5
    numUnit = 3
    numGroup = 5
    k = 1

    Auction = AuctionNet(numUser, numUnit, numGroup, k)
    optimizer = optim.SGD(Auction.parameters(), lr=0.001, momentum=0.9)
    optimizer.zero_grad()


    for i in range(epoch):

        bids = np.random.randn(numUser)
        bids = torch.FloatTensor(bids)

        probs, pays, loss  = Auction(bids)
        print("Bids : ", bids, " / Probs : ", probs, " / Payment : ", pays[np.argmax(probs)])



        loss.backward()
        optimizer.step()

        loss += loss.item()
        if i % 10 == 0:  # print every 2000 mini-batches
            print('[%d] loss: %.3f'.format(i, loss))


Where are you setting Auction to be trainable? (".train()"). Auction.params in the optimizer must have something that has the requires_grad flag set to true.

After I insert train like this. I got error as follows:

Auction = AuctionNet(numUser, numUnit, numGroup, k)
    Auction.train()
    optimizer = optim.SGD(Auction.monoVars, lr=0.001, momentum=0.9)
    optimizer.zero_grad()

RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

:frowning: So hard to use Pytorch.

You shouldn’t put modules in plain Python lists, as it won’t register these modules and their parameters properly. Instead use nn.ModuleList and add your submodules there.
Once this is done, pass the parameters to the optimizer via torch.optim.SGD(Auction.parameters(), ...).

Also, these lines of code shouldn’t have any effect:

        for i in range(numUser):
            nn.Parameter(self.monoNets[i].getVars())