Retain_graph = True Not working

I am trying to train a region proposal network as like in Faster RCNN. However, the training loop always posts a problem “RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved tensors after calling backward.” even though I added retain_graph=True as suggested. Could someone help me to find where the problem could be? Thank you so much!!!

def train_rpn(model, dataloader, num_epochs=10):
# device = torch.device(“cuda” if torch.cuda.is_available() else “cpu”)
# model.to(device)

criterion_classifier = nn.BCELoss()
criterion_regressor = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)

for epoch in range(num_epochs):
    model.train()

    running_loss = 0.0

    for offsets, labels in dataloader:
        # offsets, labels = offsets.to(device), labels.to(device)

        offsets = offsets.clone().detach().requires_grad_(True)
        labels = labels.clone().detach().requires_grad_(True)
        optimizer.zero_grad()

        regressor, classifier = model(prediction)

        classifier_loss = criterion_classifier(classifier.view(1, 1, 7056, 1), labels)
        regressor_loss = criterion_regressor(regressor.view(1, 1, 7056, 4), offsets)


        total_loss = classifier_loss + regressor_loss
        total_loss.backward(retain_graph=True)
        optimizer.step()

        running_loss += classifier_loss.item()

    print(f"Epoch {epoch + 1}, Loss: {running_loss / len(dataloader)}")

print("Training finished")

other code is here

class Region_Proposal_Network(nn.Module):
def init(self):
super(Region_Proposal_Network, self).init()

    self.conv1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
    self.regressor = nn.Conv2d(512, 36, kernel_size=1)
    self.classifier = nn.Conv2d(512, 9, kernel_size=1)

def forward(self, x):
    x = self.conv1(x)
    regressor = self.regressor(x)
    classifier = torch.sigmoid(self.classifier(x))
    return regressor, classifier

k = 9

rpn = Region_Proposal_Network()

class RPN_Dataset(Dataset):
def init(self, offset_list, label_list, transform=None):
self.offset_list = offset_list
self.label_list = label_list
self.transform = transform

def __getitem__(self, idx):
    offsets = self.offset_list[idx]
    labels = self.label_list[idx]

    if self.transform:
        offsets = self.transform(offsets)
        labels = self.transform(labels)

    return offsets, labels

def __len__(self):
    return len(self.offset_list)

transform = transforms.Compose([transforms.ToTensor()])

dataset = RPN_Dataset(offset_list, label_list, transform=transform)

dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

Where is prediction being computed? You probably want to want to recompute prediction everytime your backward?

Thank you for the reply. The prediction is the feature map (256 * 28 * 28) for a single image, which should be invariable the whole time.