Have the following model as follows:
import torch.nn as nn
class DefaultModel(nn.Module):
def __init__(self, guess, K):
super(DefaultModel, self).__init__()
#guess = [-0.7541, -0.044, 0.0916, 1.5914, -0.0017, 1.4991]
self.T = torch.tensor(guess).unsqueeze(0)
self.T.requires_grad = True
self.K = K
def forward(self, datapoints, draw):
Mat = pose_vec2mat(self.T).squeeze(0)
loss = stuff(datapoints)
return loss
Normally with hand coded gradient descent I would do something like this:
model = DefaultModel(guess, K)
gamma = torch.tensor([5e-5, 5e-5, 5e-5, 2e-5, 2e-5, 2e-5])
for i in range(0, 10000):
loss = model(datapoints, draw=[1, 0,5,6])
loss.backward()
with torch.no_grad():
model.T = model.T - gamma * model.T.grad
model.T.requires_grad = True
However, if I want to do this using Adam Optimizer:
model = DefaultModel(guess, K)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
It crashes with ValueError: optimizer got an empty parameter list
How can I get my variable T which requires grad in the parameter list?