Hi, I’m having a problem with my code. I want implement the loss function myself (in the code I present you this loss is greatly simplified). But I don’t understand the parameters and the loss are not update while the gradient is not null.
Thanks,
class LOSS(nn.Module):
def __init__(self,x):
super(LOSS, self).__init__()
self.para = nn.Parameter(torch.tensor(x, requires_grad=True))
def forward(self):
F=0
m=1
for time in range(1, len(self.para[:])):
F += 1 / 2 * m*(self.para[time]-self.para[time-1])**2
return F
x=torch.rand(50)
model = LOSS(x)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
model.__init__(x)
optimizer.zero_grad()
model.zero_grad()
loss.backward()
optimizer.step()