RuntimeError of Inplace operaion during backpropagation in ENcoder Decorder model

Hi, Can any one please help me to resolve this issue.
i have created a simple CRNN-model(an encoder decoder) which is having trouble in backpropagaition.
class OCR_EncoderDecoder(nn.Module):

def __init__(self,hidden_size1,hidden_size2,output_size,verbose=False):
    super(OCR_EncoderDecoder, self).__init__()
    
    self.hidden_size1 = hidden_size1
    self.hidden_size2 = hidden_size2
    self.output_size = output_size
    
    self.encoder_cnn_cell = nn.Sequential(
    nn.Conv2d(3,28,3,padding=(0,0), stride=1),#----> layer-1
    nn.BatchNorm2d(28),
    nn.ReLU(),
    nn.MaxPool2d(2,2),
    nn.Conv2d(28,32,3,padding=(0,0)),#-----> Layer-2
    nn.BatchNorm2d(32),
    nn.ReLU(),
    nn.MaxPool2d(2,2),
    nn.Conv2d(32,64,3,padding=(0,0)),#-----> Layer-3
    nn.Conv2d(64,128,3,padding=(1,1)),#-----> Layer-4
    nn.BatchNorm2d(128),
    nn.ReLU(),
    nn.MaxPool2d(2,2),
    nn.Conv2d(128,256,3,padding=(1,1)),#-----> Layer-5
    nn.BatchNorm2d(256),
    nn.ReLU()
    )
    
    self.Conv_Linear = nn.Sequential(
        nn.Linear(1536,self.hidden_size1))
    
    self.decoder_rnn_cell1 = nn.GRU(self.hidden_size1,self.hidden_size2)
    self.decoder_rnn_cell2 = nn.GRU(self.hidden_size2,self.output_size)
    
    self.Rnn_Linear = nn.Sequential(
        nn.Linear(self.output_size, self.output_size),
        nn.LogSoftmax(2)) 
    
    #self.softmax = nn.LogSoftmax(dim=2)
    
    self.verbose = verbose        
    
def forward(self,batch):
    
    # encoder
    batch = self.encoder_cnn_cell(batch)
    batch = batch.permute(0,3,2,1)
    
    

    # Decoder
    batch = batch.reshape(batch.size(0),batch.size(1),-1)
    batch = self.Conv_Linear(batch)
    batch,_ = self.decoder_rnn_cell1(batch)
    batch,_ = self.decoder_rnn_cell2(batch)
    batch = self.Rnn_Linear(batch)
    #batch = self.softmax(batch)
    

    return batch

U = OCR_EncoderDecoder(128,256,129,False)
B = U(u)
B.shape #torch.Size([2, 26, 129])
LOSS = {}
Loss = 0
for k in range(25):
opt.zero_grad()
for i in range(B.size(0)):
input_ = B[i].view(B.size(1),1,B.size(2))
op = gt_rep(v[i],hindi_alpha2index)
op = op.view(1,-1)
input_length = torch.full(size=(1,), fill_value= input_.size(0), dtype=torch.long)
target_length = torch.randint(low=1, high=op.size(1), size=(1,), dtype=torch.long)
print(input_.shape)
print(op.shape)
print(input_length,target_length)
loss = loss_fn(input_,op,input_length,target_length)
loss.backward(retain_graph=True)
Loss += loss
print(k)
LOSS[k] = Loss/2
opt.step()

i have used a CTC loss and trying to backpropagate, however says this error :
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [129, 129]], which is output 0 of TBackward, is at version 2; expected version 1 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).

Kindly help in solving this issue.