Problem with in-place op

Trying to find in-place operations but don’t see anything wrong. Clone() or detach() don’t work also for hdn
Here is error:
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [1130, 120]], which is output 0 of AsStridedBackward0, is at version 2; expected version 1 instead.

here is Rnn’s class:

import torch.nn as nn



class RNN_One_To_Many(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(RNN_One_To_Many, self).__init__()

        self.hidden_size = hidden_size

        self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
        self.i2o = nn.Linear(input_size + hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, input, hidden):
        combined = torch.cat((input, hidden), 1)
        hidden = self.i2h(combined)
        output = self.i2o(combined)
        output = self.softmax(output)
        return output, hidden

    def initHidden(self, batch_size):
        return torch.zeros(batch_size, self.hidden_size)

And my train loop:

model = RNN_One_To_Many(1010, 120, 1010)
lr = 0.01

cr = torch.nn.CrossEntropyLoss()

opt = torch.optim.Adam(model.parameters(), lr)

with torch.autograd.set_detect_anomaly(True):

    for i, numwords in enumerate(train_d):

        opt.zero_grad()
        hdn = model.initHidden(1)
        out, hdn = model(numwords[0], hdn)
        dummytensor = torch.zeros((1,1010))
        
        #numwords.shape(valueofwordsInSent,1,Maxvaluewords) onehotencoded
        for i1 in range(1,numwords.shape[0]):
            hdn_copy = hdn.clone()
            out, hdn = model(dummytensor, hdn_copy)
            loss = cr(out, numwords[i1])
            loss.backward()
            opt.step()
            if(i % 100 == 0):
                print(loss.item())


You would need to .detach() the hidden state since you are currently trying to backpopagate through it multiple times.