I am not quite able to twice differentiate a module with nn.Embedding
in it
I am using version 0.3.1
Here is the code to reproduce the bug
class Test(torch.nn.Module):
def __init__(self):
super().__init__()
self.embd = torch.nn.Embedding(1000, 100)
self.dense = torch.nn.Linear(100, 1)
def forward(self, inp):
inp = self.embd(inp)
return self.dense(inp)
test = Test()
test.cuda()
inp = Variable(torch.ones(10).long().cuda())
out = test(inp)
raw_loss = out.mean(dim=0)
loss_grad = torch.autograd.grad(outputs=raw_loss,
inputs=list(test.parameters()),
retain_graph=True, create_graph=True, only_inputs=True)
norm = sum([param.norm()**2 for param in loss_grad])
loss = raw_loss + norm
loss.backward(retain_graph=True)
It fails with the following error trace
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-5-9e67f1421e58> in <module>()
22 loss = raw_loss + norm
23
---> 24 loss.backward(retain_graph=True)
~/miniconda3/lib/python3.6/site-packages/torch/autograd/variable.py in backward(self, gradient, retain_graph, create_graph, retain_variables)
165 Variable.
166 """
--> 167 torch.autograd.backward(self, gradient, retain_graph, create_graph, retain_variables)
168
169 def register_hook(self, hook):
~/miniconda3/lib/python3.6/site-packages/torch/autograd/__init__.py in backward(variables, grad_variables, retain_graph, create_graph, retain_variables)
97
98 Variable._execution_engine.run_backward(
---> 99 variables, grad_variables, retain_graph)
100
101
RuntimeError: trying to differentiate twice a function that was markedwith @once_differentiable
tagging @smth