Specified 'retain_graph=True' , but still get an error recommending that

These are the code and error message:

     44         print('Epoch ', epoch, '/', EPOCHS, flush=True)
     45 
---> 46         tr_cls_loss, tr_dm_loss, tr_cf_loss, tr_domain_acc, tr_main_acc = train_unlearn_threedatasets(models, train_dataloaders, optimizers, criterions, epoch)
     47 
     48         if epoch == EPOCH_STAGE1+1: #to save sufficient info for continuing unlearning

C:\Users\MINNKY~1\AppData\Local\Temp/ipykernel_64668/2970363749.py in train_unlearn_threedatasets(models, train_loaders, optimizers, criterions, epoch)
     83             loss_conf = BETA * conf_criterion(output_dm, domain_target)    #nan ... why?
     84             loss_dm.backward(inputs=list(domain_predictor.parameters())) #,retain_graph=True
---> 85             loss_conf.backward(inputs=list(encoder.parameters()),retain_graph=True)  #
     86             optimizer_dm.step()
     87             optimizer_conf.step()

~\anaconda3\lib\site-packages\torch\_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
    361                 create_graph=create_graph,
    362                 inputs=inputs)
--> 363         torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
    364 
    365     def register_hook(self, hook):

~\anaconda3\lib\site-packages\torch\autograd\__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
    171     # some Python versions print out the first line of a multi-line function
    172     # calls in the traceback and some print out the last line
--> 173     Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass
    174         tensors, grad_tensors_, retain_graph, create_graph, inputs,
    175         allow_unreachable=True, accumulate_grad=True)  # Calls into the C++ engine to run the backward pass

RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved tensors after calling backward.