.Backward() error

Hi everyone. my code is:
class CNN(nn.Module):
def init(self):
super(CNN, self).init()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3)
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=3)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=3)
self.pool3 = nn.MaxPool2d(kernel_size=2)
self.conv4 = nn.Conv2d(in_channels=24, out_channels=48, kernel_size=3)
self.pool4 = nn.MaxPool2d(kernel_size=2)
self.conv5 = nn.Conv2d(in_channels=48, out_channels=96, kernel_size=3)
self.fc1 = nn.Linear(in_features=55296, out_features=512)
self.fc2 = nn.Linear(in_features=512, out_features=128)
self.fc3 = nn.Linear(in_features=128, out_features=12)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool2(x)
x = self.conv3(x)
#x = F.relu(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.pool4(x)
x = self.conv5(x)
#print(x.shape)
x = torch.flatten(x, start_dim=0)
x = self.fc1(x)
#print(x)
#print(x.shape)
#x = F.relu(x)
#x = self.fc2(x)
#x = F.relu(x)
#x = self.fc3(x)
#print(x)
return x.unsqueeze(0).unsqueeze(0)
class SGRNN(nn.Module):
def init(self, input_size, hidden_size):
super(SGRNN, self).init()
self.hidden_size = hidden_size
self.rnn = nn.RNN(input_size, hidden_size, num_layers=1, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, input, h):
out, h = self.rnn(input, h)
#print(out.shape)
out1 = self.fc(out)
#print(out1.shape)
return out1
window = 3

input_size = 524*window
hidden_size = 200
num_layers = 1
output_size = 12

rnn_model = SGRNN(input_size, hidden_size)
cnn_model = CNN()
#resnet_model = ResNet18()

#train

#optimizer = optim.Adam(list(cnn_model.parameters()) + list(rnn_model.parameters()), lr=0.01) #–> backward problem

optimizer = optim.Adam(rnn_model.parameters(), lr = 0.01)

num_epochs = 100
best_loss = float(‘inf’)

rnn_model.train()
cnn_model.train()

for epoch in range(num_epochs):

tot_loss = 0

for i in range(len(x_train)):
hidden = torch.zeros(1, 1, hidden_size)

loss = 0

output = torch.zeros(1, window, output_size)

output =  torch.cat((output, y_train[i][1][0].unsqueeze(0).unsqueeze(0)),dim=1)    
outputs = output

prev_features = torch.zeros(1, window, 512)
prev_features = torch.cat((prev_features, cnn_model(x_train[i][1][0])), dim=1)

seq_loss = 0
for j in range(len(x_train[i][1])-1):
  with torch.autograd.detect_anomaly(): 
    optimizer.zero_grad()

    features = cnn_model(x_train[i][1][j].clone())
    prev_features = torch.cat((prev_features, features), dim=1)

    if j < window-1:  
      input = torch.cat((torch.zeros(1,window-j-1,512), prev_features[:,0:j+1,:]),dim=1)
      input = torch.cat((outputs[:,-window:,:], input),dim=2)
      input = torch.flatten(input, start_dim= 0) 
    else:
      input = torch.cat((outputs[:,-window:,:], prev_features[:, -window:,:]),dim=2)
      input = torch.flatten(input, start_dim = 0)

    target = y_train[i][1][j+1].detach()
    
    
    output = rnn_model(input.unsqueeze(0).unsqueeze(0), hidden)

    outputs = torch.cat((outputs, target.unsqueeze(0).unsqueeze(0)), dim=1)
    #outputs = torch.cat((outputs, output), dim=1)

    loss = F.l1_loss(output, target.unsqueeze(0).unsqueeze(0))
    
    '''if epoch % 5 == 0 and epoch != 0:
      print('target: ', target)
      print('output: ', output)'''
    
    
    
    seq_loss += loss
    loss.backward()
    #torch.nn.utils.clip_grad_norm_(list(cnn_model.parameters()) + list(rnn_model.parameters()), 1.0)
    optimizer.step()
  
seq_loss /= len(x_train[i][1])
tot_loss += seq_loss

print(“Epoch [{}/{}], Loss: {:.4f}”.format(epoch+1, num_epochs, tot_loss/len(x_train)))

when i run it, i get the error ‘RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved tensors after calling backward.’.

Using with torch.autograd.detect_anomaly(): it tells me thee problem is line 136 which is ‘features = cnn_model(x_train[i][1][j].clone())’. i tried different things but the error still remains.

Can someone help me?? thank you very much to everyone.