I have made a custom loss function which is taking parameters calculated from a intermediate function ( a function which is taking as input the output of my CNN network). but from loss.backward() I am getting the error
one of the variables needed for gradient computation has been modified by an inplace operation
.
I couldn’t find out the source of the error so I am embedding both functions.
Loss function is
class my_Loss(torch.nn.Module):
def __init__(self):
super(my_Loss,self).__init__()
#self.X = X
#self.Y = Y
def forward(self,Y,X):
#Z = ..........
.
.
.
.
pdb.set_trace()
return F
and the intermediate function is
class fb(torch.nn.Module):
def __init__(self):
super(fb,self).__init__()
#self.X = X
#self.Y = Y
def forward(self,Y,T,R,C,p0,Rinv,zz):
ll = calc_ll(Y,T,R,C,p0,Rinv,zz)
return ll
def calc_ll(Y,T,R,C,p0,Rinv,zz):
#pdb.set_trace()
cuda = torch.device("cuda:0")
tensor = torch.tensor((), dtype=torch.float64,device=cuda)
[pp,tau] = Y.size(); [pp2,kk] = C.size()
# initialize space
alpha = tensor.new_zeros((kk,tau),requires_grad=False) ;
bb = tensor.new_zeros((kk,tau),requires_grad=False); rho = tensor.new_zeros((1,tau),requires_grad=False)
# compute bb
for ii in range(0,kk):
dd = Y-torch.mm(torch.reshape(C[:,ii],(3,1)),tensor.new_ones((1,tau)))
#dd.requires_grad=True
R1 = (torch.mm(Rinv,dd))*dd
#R1.requires_grad=True
bb[ii,:]= torch.mul(torch.exp(-0.5*torch.sum(R1,0)),zz)
#pdb.set_trace()
alpha[:,[0]]= p0+bb[:,[0]]
rho[0] = torch.sum(alpha[:,0])
alpha[:,0] = alpha[:,0].clone()/rho[0]
#pdb.set_trace()
for tt in range(1,tau):
#pdb.set_trace()
alpha[:,tt] = torch.transpose((torch.mv(torch.transpose(T,0,1),alpha[:,tt-1].clone())*bb[:,tt]),-1,0)
rho[0,tt] = torch.sum(alpha[:,tt].clone())
alpha[:,tt] = alpha[:,tt].clone()/rho[0,tt]
pdb.set_trace()
ll = torch.sum(torch.log(rho))/tau
return ll
please help me to locate the error source line ?
Thanks in advance