I;m trying to perform gradient descent on a point, but fail in loss.backward() because requires_grad is False.
My code:
class Net(nn.Module):
def __init__(self, p, P1, P2):
super(Net, self).__init__()
self.p = torch.tensor(p, dtype=torch.float32, requires_grad=True)
self.P1 = torch.tensor(P1, dtype=torch.float32)
self.P2 = torch.tensor(P2, dtype=torch.float32)
self.one = torch.tensor([1], dtype=torch.float32)
def forward(self):
p = torch.cat([self.p, self.one])
u1_h = torch.matmul(self.P1, p)
u2_h = torch.matmul(self.P2, p)
u1 = u1_h[:2] / u1_h[2]
u2 = u2_h[:2] / u2_h[2]
return torch.cat([u1, u2])
net = Net(out_3d[landmark_idx, :], P1, P2)
net.train()
criterion = nn.MSELoss()
optimizer = optim.SGD([net.p], lr=0.001, momentum=0.9)
label = torch.Tensor([[u1, v1, u2, v2]])
for iter in range(10):
optimizer.zero_grad()
outputs = net()
loss = criterion(outputs, label)
print(f'landmark #{landmark_idx}: {iter} - {loss}')
loss.backward()
optimizer.step()
Apparantly, after p = torch.cat([self.p, self.one]) p.requires_grad is already False
Any ideas why?