LSTM Autoencoder throwing an error during backward() function

LSTM Autoencoders is throwing an error during the backward() function. Below is the model.

class Encoder(nn.Module):
def init(self,input_size,hidden_size):
super(Encoder,self).init()

    self.input_size = input_size
    self.hidden_size = hidden_size
    self.lstm = nn.LSTM(input_size = input_size,
                        hidden_size = hidden_size,
                        num_layers = 1,batch_first=True)
    
def forward(self,x):
    output , (h,c) = self.lstm(x)
    hidden_layer = h[-1,:,:]
    return hidden_layer     

class Decoder(nn.Module):
def init(self,seq_len,input_size,output_size):
super(Decoder,self).init()

    self.seq_len = seq_len
    self.input_size = input_size
    self.layer1 = (input_size * 2)
    self.output_size = output_size
    self.lstm = nn.LSTM(input_size = input_size,
                       hidden_size = self.layer1,
                       num_layers = 1,
                       batch_first = True)
    self.fc = nn.Linear(self.layer1, output_size)
        
    
def forward(self,x):
    x = x.unsqueeze(1).repeat(1,self.seq_len,1)
    x,(h,c) = self.lstm(x)
    x = x.reshape((-1, self.seq_len, self.layer1))
    out = self.fc(x)
    return out

#AutoEncoders in place
class LSTM_AE(nn.Module):
def init(self, seq_len, input_size, hidden_size):
super(LSTM_AE,self).init()

    self.seq_len = seq_len
    self.input_size = input_size
    self.hidden_size = hidden_size

    self.encoder = Encoder(self.input_size, self.hidden_size)
    self.decoder = Decoder(self.seq_len, self.hidden_size, self.input_size)
    

def forward(self, x):
    encoded = self.encoder(x)
    decoded = self.decoder(encoded)
    return decoded

#Model
model = LSTM_AE(seq_len=max_len,input_size=final_embed[events[0]].size(2),hidden_size=32)
#Loss
loss_fn = nn.L1Loss(reduction=‘sum’)
#Optimizer
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)

n_epochs = 1 # or whatever
losses = []

for epoch in range(n_epochs):
for batch_idx, (a,b,c,d) in enumerate(loader):

    #a = a.clone().detach().requires_grad_(True)

    # in case you wanted a semi-full example
    decoded = model.forward(a)
    print(decoded.size())
    
    loss = loss_fn(decoded,a)
    
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    
    if batch_idx%10 == 0:
        losses.append(loss)
        print("epoch {}.\tbatch {}.\tloss : {}".format(epoch,batch_idx,loss))

This is throwing the error as

RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved tensors after calling backward.

I am able to run this by adding the requires_grad on the train dataset i.e. a in the above forward call but unable to understand the problem here as I generally don’t do that while running the other models on torch

@aksve This has been explained in detail in the below post

This talks about detaching the previous hidden state to save from the problem of exploding/Vanishing gradients. But here, the problem is to run the auto-encoder, I am providing the input to the model where requires_grad = True to run it successfully. However, it should not be the case as we don’t update the input given to the model. So, inherently something is going wrong as ideally it should not update the input

Please close this as the right answer has been identified