Hi all, My code doesn’t work unless I specify retain_graph=True eventhough I am calling backward once every iteration. Thanks in advance
depth_net_encoder.eval()
depth_net_decoder.eval()
noise=Variable(torch.tensor(img), requires_grad=True)
while True :
noise=Variable(torch.tensor(noise.data), requires_grad=True)
dummy=torch.zeros(tgt_img_var.shape).to(device)
true_mask=torch.zeros(tgt_img_var.shape).to(device)
for u in range(dummy.shape[0]):
ex1=ex2=0
dummy[u,:,ry_list[u]-sz2+ex1:ry_list[u]+sz2+ex1, rx_list[u]-sz2+ex2:rx_list[u]+sz2+ex2]=F.sigmoid(noise)
true_mask[u,:,ry_list[u]-sz2+ex1:ry_list[u]+sz2+ex1, rx_list[u]-sz2+ex2:rx_list[u]+sz2+ex2]=torch.tensor(1).to(device)
adv_tgt_img_var = torch.mul((1-true_mask), tgt_img_var) + torch.mul(true_mask, dummy)
enc_var=depth_net_encoder(adv_tgt_img_var)
depth_var = depth_net_decoder(enc_var)
loss_data=0.0
for i in range(1):
o_g_x,o_g_y=torch.abs(X_grad(target_depth[("disp", i)],device)),torch.abs(Y_grad(target_depth[("disp", i)],device))
o_g=o_g_x+o_g_y
g_x,g_y=torch.abs(X_grad(depth_var[("disp", i)],device)),torch.abs(Y_grad(depth_var[("disp", i)],device))
g=g_x+g_y
if i==0:
p_O,p_f=o_g,g
gradient=torch.log(1+g_x*o_g_x).sum()+torch.log(1+g_y*o_g_y).sum()
loss_data=gradient
loss=loss_data*100#+loss_reg
loss.backward()
noise_grad = noise.grad.clone()
noiset=noise-0.001*noise_grad
noise=torch.tensor(noiset.cpu().detach().numpy())
loss_scalar = loss.item()
print(loss_scalar)```