Please help, got this RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation

Can’t find the problem place.I cant find inplace problem operations. RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [1, 5]], which is output 0 of UnsqueezeBackward0, is at version 321; expected version 316 instead
here is the forward code:
def forward(self, x):

    if len(x.shape) == 3:

        x1 = x.view(1, self.channels, self.bars, self.input_dim) 

        x2 = self.norm(x1)

    else:

        x2 = self.norm(x)

    save = torch.zeros(x2.size()[0], self.act_size).to(device)

    out = torch.zeros(self.channels).to(device)

    for b in range(x2.size()[0]):

        y0 = x2[b][0].view(1, self.bars, self.input_dim)

        y1 = x2[b][1].view(1, self.bars, self.input_dim)

        y2 = x2[b][2].view(1, self.bars, self.input_dim)

        y3 = x2[b][3].view(1, self.bars, self.input_dim)

        y4 = x2[b][4].view(1, self.bars, self.input_dim)

        lstm_out_0, hid_0 = self.lstm_0(y0)

        lstm_out_1, hid_1 = self.lstm_1(y1)

        lstm_out_2, hid_2 = self.lstm_2(y2)

        lstm_out_3, hid_3 = self.lstm_3(y3)

        lstm_out_4, hid_4 = self.lstm_4(y4)

        out[0] = hid_0[0][-1]

        out[1] = hid_1[0][-1]

        out[2] = hid_2[0][-1]

        out[3] = hid_3[0][-1]

        out[4] = hid_4[0][-1]

        save[b] = self.dense(out)

    return save

/pytorch/torch/csrc/autograd/python_anomaly_mode.cpp:57: UserWarning: Traceback of forward call that caused the error:
File “/usr/lib/python3.6/runpy.py”, line 193, in _run_module_as_main
main”, mod_spec)
File “/usr/lib/python3.6/runpy.py”, line 85, in _run_code
exec(code, run_globals)
File “/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py”, line 16, in
app.launch_new_instance()
File “/usr/local/lib/python3.6/dist-packages/traitlets/config/application.py”, line 664, in launch_instance
app.start()
File “/usr/local/lib/python3.6/dist-packages/ipykernel/kernelapp.py”, line 499, in start
self.io_loop.start()
File “/usr/local/lib/python3.6/dist-packages/tornado/platform/asyncio.py”, line 132, in start
self.asyncio_loop.run_forever()
File “/usr/lib/python3.6/asyncio/base_events.py”, line 438, in run_forever
self._run_once()
File “/usr/lib/python3.6/asyncio/base_events.py”, line 1451, in _run_once
handle._run()
File “/usr/lib/python3.6/asyncio/events.py”, line 145, in _run
self._callback(*self._args)
File “/usr/local/lib/python3.6/dist-packages/tornado/platform/asyncio.py”, line 122, in _handle_events
handler_func(fileobj, events)
File “/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py”, line 300, in null_wrapper
return fn(*args, **kwargs)
File “/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py”, line 462, in _handle_events
self._handle_recv()
File “/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py”, line 492, in _handle_recv
self._run_callback(callback, msg)
File “/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py”, line 444, in _run_callback
callback(*args, **kwargs)
File “/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py”, line 300, in null_wrapper
return fn(*args, **kwargs)
File “/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py”, line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File “/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py”, line 233, in dispatch_shell
handler(stream, idents, msg)
File “/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py”, line 399, in execute_request
user_expressions, allow_stdin)
File “/usr/local/lib/python3.6/dist-packages/ipykernel/ipkernel.py”, line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File “/usr/local/lib/python3.6/dist-packages/ipykernel/zmqshell.py”, line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File “/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py”, line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File “/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py”, line 2822, in run_ast_nodes
if self.run_code(code, result):
File “/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py”, line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File “”, line 1223, in
value_v = net_crt(states_v)
File “/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py”, line 541, in call
result = self.forward(*input, **kwargs)
File “”, line 998, in forward
save[b] = self.dense(out)
File “/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py”, line 541, in call
result = self.forward(*input, **kwargs)
File “/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py”, line 92, in forward
input = module(input)
File “/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py”, line 541, in call
result = self.forward(*input, **kwargs)
File “/usr/local/lib/python3.6/dist-packages/torch/nn/modules/linear.py”, line 87, in forward
return F.linear(input, self.weight, self.bias)
File “/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py”, line 1372, in linear
output = input.matmul(weight.t())

I guess each of these calls are actually modifying out inplace. But it’s value is needed by the dense(out) call?
Can you try not to change out inplace and just torch.cat() your hidden layers together?

Thank you :v:
It works now