I’m getting the following error for the code listed below, any suggestions are appreciated!
File “scripts/rnn/rnn.py”, line 132, in
train_load_save_model(rnn,path,device)
File “scripts/rnn/rnn.py”, line 104, in train_load_save_model
train(model_obj,device)
File “scripts/rnn/rnn.py”, line 86, in train
outputs,hidden = model_inp(inputs,hidden)
File “/Users/dennis/Library/Python/3.7/lib/python/site-packages/torch/nn/modules/module.py”, line 1051, in _call_impl
return forward_call(*input, **kwargs)
File “scripts/rnn/rnn.py”, line 59, in forward
hidden = self.lin1(combined.view((batch_size,size_in+size_hidden))).view((batch_size,size_hidden))
File “/Users/dennis/Library/Python/3.7/lib/python/site-packages/torch/nn/modules/module.py”, line 1051, in _call_impl
return forward_call(*input, **kwargs)
File “/Users/dennis/Library/Python/3.7/lib/python/site-packages/torch/nn/modules/linear.py”, line 96, in forward
return F.linear(input, self.weight, self.bias)
File “/Users/dennis/Library/Python/3.7/lib/python/site-packages/torch/nn/functional.py”, line 1847, in linear
return torch._C._nn.linear(input, weight, bias)
(Triggered internally at …/torch/csrc/autograd/python_anomaly_mode.cpp:104.)
allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
Traceback (most recent call last):
File “scripts/rnn/rnn.py”, line 132, in
train_load_save_model(rnn,path,device)
File “scripts/rnn/rnn.py”, line 104, in train_load_save_model
train(model_obj,device)
File “scripts/rnn/rnn.py”, line 91, in train
loss.backward()
File “/Users/dennis/Library/Python/3.7/lib/python/site-packages/torch/_tensor.py”, line 255, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File “/Users/dennis/Library/Python/3.7/lib/python/site-packages/torch/autograd/init.py”, line 149, in backward
allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [256, 128]]
class RNNModel(nn.Module):
def __init__(self,size_in,size_hidden,size_out):
super(RNNModel,self).__init__()
self.lin1 = nn.Linear(size_in+size_hidden, size_in)
self.lin2 = nn.Linear(size_in+size_hidden, size_out)
def forward(self, input, hidden):
combined = torch.cat((input,hidden),1).view((batch_size,size_in+size_hidden))
hidden = self.lin1(combined).view((batch_size,size_hidden))
x = self.lin2(combined)
return x, hidden
def initHidden(self):
return torch.zeros(1,size_hidden)
My thoughts are the line in bold is to blame but I don’t know how to remedy the issue.