Weight Updates From Different Losses: Version Issue

Hello,
I am trying to update different weights using different losses from different sources. My skeleton code is given below. Basically here I want to update linear3, linear4 from both loss1 and loss2, the rest of the network only should get the update from loss1

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
#import shap
import torch.optim as optim 
class testnet(nn.Module):
	def __init__(self):
		super(testnet,self).__init__()
		self.linear1=nn.Sequential(nn.Linear(200,1000),nn.ReLU())	 
		self.linear2=nn.Sequential(nn.Linear(1000,200),nn.ReLU())	 
		self.linear3=nn.Sequential(nn.Linear(200,1),nn.ReLU())	 
		self.linear4=nn.Sequential(nn.Linear(1,10),nn.ReLU())	 
		self.linear5=nn.Sequential(nn.Linear(10,10),nn.ReLU())	  
		self.linear6=nn.Sequential(nn.Linear(10,1),nn.ReLU())	 
	def forward(self,x):
		#import pdb;pdb.set_trace()
		o1 = self.linear1(x)
		o2 = self.linear2(o1)
		o3 = self.linear3(o2)
		o4 = self.linear4(o3)
		o5 = self.linear5(o4)
		o6 = self.linear6(o5)

		return [o3,o6]
def run(inn,la1,la2):	   

	model=testnet()
	# Define the loss
	param1=[]
	param2=[]
	n2=['linear3','linear4']
	for name,p in model.named_parameters():
		param1.append(p)
		if name.split('.')[0] in n2:
			param2.append(p)

	criterion = nn.NLLLoss()
	criterion2 = nn.NLLLoss()

	optimizer1 = optim.SGD(param1, lr=0.003)
	optimizer2 = optim.SGD(param2, lr=0.003)

	# Training pass
	torch.autograd.set_detect_anomaly(True)
	optimizer1.zero_grad()
	optimizer2.zero_grad()

	output = model(inn)
	loss1 = criterion(output[1], la1)
	loss1.backward(retain_graph=True)
	optimizer1.step()


	loss2 = criterion2(output[0], la2)
	loss2.backward()
	optimizer2.step()
	
if __name__=='__main__':
	inn=torch.rand(1,200)
	la1=torch.zeros(1).long()
	la2=torch.zeros(1).long()
	run(inn,la1,la2)

Interestingly this snippet runs fine in torch 1.2.0, 1.3.0, 1.4.0. But it is throwing the following error in pytorch 1.5.0:

Traceback (most recent call last):
 File "test.py", line 63, in <module>
   run(inn,la1,la2)
 File "test.py", line 56, in run
   loss2.backward()
 File "/home/.virtualenvs/thermal_env/lib/python3.6/site-packages/torch/tensor.py", line 198, in backward
   torch.autograd.backward(self, gradient, retain_graph, create_graph)
 File "/home/.virtualenvs/thermal_env/lib/python3.6/site-packages/torch/autograd/__init__.py", line 100, in backward
   allow_unreachable=True)  # allow_unreachable flag
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [200, 1]], which is output 0 of TBackward, is at version 2; expected version 1 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).

Can anyone help me with this error? I was also wondering about the good practice in doing this kind of weight updates.

Hello,
I am also facing the similar problem…on torch version 1.5.0