ValueError: optimizer got an empty parameter list when define FM module

I have some problems when I train the model, it raise a ValueError(“optimizer got an empty parameter list”),I am searching for resolution and need your help, thanks very much!

here is my module code:

class TorchFM(nn.Module):
    def __init__(self, input_dim):
        super(TorchFM, self).__init__()
        self.w0 = torch.tensor(torch.zeros(1), requires_grad=True)
        self.w = torch.tensor(torch.zeros(input_dim), requires_grad=True)
        self.v = nn.init.kaiming_normal_(torch.tensor(torch.zeros((input_dim, 10)), requires_grad=True))

    def forward(self, x):
        torch.set_grad_enabled(True)
        linear_terms = torch.add(self.w0, torch.sum(torch.mul(self.w, x)))
        pair_interactions = 0.5 * torch.sum(torch.sub(torch.pow(torch.matmul(x, self.v), 2),
                                                      torch.matmul(torch.pow(x, 2), torch.pow(self.v, 2))))

        out = torch.add(linear_terms, pair_interactions)
        return out

And here is the code which I train the model

loss_object = torch.nn.modules.MSELoss()
model = TorchFM(x_train.shape[1])
optimizer = torch.optim.Adam(model.parameters())

EPOCH = 100

for e in tqdm(range(EPOCH)):
    for i, (x, y) in enumerate(train_loader):
        optimizer.zero_grad()
        predictions = model(x)

        writer.add_graph(model, x)
        loss = loss_object(predictions, y)
        writer.add_scalar("train_loss", torch.mean(loss.values()))
        loss.backward()
        optimizer.step()

    for i, (x, y) in enumerate(test_loader):
        test_pred = model(x)
        loss = loss_object(test_pred, y)
        writer.add_scalar("test_loss", torch.mean(loss.values()))
        # writer.add_graph(model, x)
writer.close()

I have solved this problem by using nn.Parameter like follow:

class TorchFM(nn.Module):
    def __init__(self, input_dim):
        super(TorchFM, self).__init__()
        w0 = torch.zeros(1).double()
        w = torch.zeros(input_dim).double()
        v = torch.empty(input_dim, 10).double()
        nn.init.xavier_normal_(v)

        self.w0 = nn.Parameter(w0)
        self.w = nn.Parameter(w)
        self.v = nn.Parameter(v)

    def forward(self, x):
        torch.set_grad_enabled(True)
        linear_terms = torch.add(self.w0, torch.sum(torch.mul(self.w, x)))
        pair_interactions = 0.5 * torch.sum(torch.sub(torch.pow(torch.matmul(x, self.v), 2),
                                                      torch.matmul(torch.pow(x, 2), torch.pow(self.v, 2))))

        out = torch.add(linear_terms, pair_interactions)
        return out