RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [1, 512, 4, 4]] is at version 2; expected version 1 instead. Hint: enable anomaly detection to find the operation that fa

Hi, I am facing the following error while trying to train a model that is using backward operation. Before, the code was working fine but suddenly I started getting this error:

I have already tried replacing inpalce value from True to β†’ LeakyReLU(0.2, inplace=False),
Could anyone suggest me some solution?

Details of the error:

RuntimeError Traceback (most recent call last)
Cell In[13], line 2
1 epochs=1
----> 2 train(epochs)

Cell In[12], line 9, in train(max_epochs)
7 for haze_images, dehaze_images, in train_loader:
8 unet_loss, dis_loss, mse, ssim = DUNet.process(haze_images.cuda(), dehaze_images.cuda())
----> 9 DUNet.backward(unet_loss.cuda(), dis_loss.cuda())
10 print('Epoch: '+str(epoch+1)+ ’ || Batch: '+str(i)+ " || unet loss: "+str(unet_loss.cpu().item()) + " || dis loss: "+str(dis_loss.cpu().item()) + " || mse: β€œ+str(mse.cpu().item()) + " | ssim:” + str(ssim.cpu().item()) )
11 mse_epoch = mse_epoch + mse.cpu().item()

Cell In[6], line 110, in DU_Net.backward(self, unet_loss, dis_loss)
107 self.dis_optimizer.step()
109 if unet_loss is not None:
β†’ 110 unet_loss.backward()
111 self.unet_optimizer.step()

File ~\anaconda3\envs\myenv\lib\site-packages\torch\tensor.py:221, in Tensor.backward(self, gradient, retain_graph, create_graph)
213 if type(self) is not Tensor and has_torch_function(relevant_args):
214 return handle_torch_function(
215 Tensor.backward,
216 relevant_args,
(…)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
β†’ 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)

File ~\anaconda3\envs\myenv\lib\site-packages\torch\autograd_init_.py:130, in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
127 if retain_graph is None:
128 retain_graph = create_graph
β†’ 130 Variable.execution_engine.run_backward(
131 tensors, grad_tensors
, retain_graph, create_graph,
132 allow_unreachable=True)

RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [1, 512, 4, 4]] is at version 2; expected version 1 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).

Could you post a minimal and executable code snippet reproducing the issue, please?

Actually I am not familiar which variable is creating the issue, the error suggested to place β€œtorch.autograd.set_detect_anomaly(True)” so that it could raise an exception, however, I didn’t get any. I have also tried replacing in-place operations to non-inplace but I am not sure what the actual error is coming from. Do you think it could be because of different pytorch version?
But it is referring to the specific module of code:

import torch

Enable anomaly detection

torch.autograd.set_detect_anomaly(True)

class DU_Net(nn.Module):

def __init__(self, unet_input, unet_output, discriminator_input):
    super().__init__()


    unet = UNet(in_channels=unet_input ,out_channels=unet_output)
    unet = nn.DataParallel(unet, device_ids=[0])
    unet = unet.cuda()

    discriminator = Discriminator(in_channels=discriminator_input , use_sigmoid=True)
    discriminator = nn.DataParallel(discriminator, device_ids=[0])
    discriminator = discriminator.cuda()

    criterion = nn.MSELoss()
    adversarial_loss = AdversarialLoss(type='hinge')
    l1_loss = nn.L1Loss()
    content_loss = ContentLoss()
    ssim = SSIM(window_size = 11)
    bce = nn.BCELoss()

    self.add_module('unet', unet)
    self.add_module('discriminator', discriminator)

    self.add_module('criterion', criterion)
    self.add_module('adversarial_loss', adversarial_loss)
    self.add_module('l1_loss', l1_loss)
    self.add_module('content_loss', content_loss)
    self.add_module('ssim_loss', ssim)
    self.add_module('bce_loss', bce)


    self.unet_optimizer = optim.Adam(
        unet.parameters(),
        lr = float(0.00001),
        betas=(0.9, 0.999)
        )

    self.dis_optimizer = optim.Adam(
         params=discriminator.parameters(),
         lr=float(0.00001),
         betas=(0.9, 0.999)
         )

    self.unet_input = unet_input
    self.unet_output = unet_output
    self.discriminator_input = discriminator_input


def load(self, path_unet, path_discriminator):
    weight_unet = torch.load(path_unet)
    weight_discriminator = torch.load(path_discriminator)
    self.unet.load_state_dict(weight_unet)
    self.discriminator.load_state_dict(weight_discriminator)

def save_weight(self, path_unet, path_dis):
    torch.save(self.unet.state_dict(), path_unet)
    torch.save(self.discriminator.state_dict(), path_dis)

def process(self, haze_images, dehaze_images):

    # zero optimizers
    self.unet_optimizer.zero_grad()
    self.dis_optimizer.zero_grad()


    # find output and initialize loss to zero
    unet_loss = 0
    dis_loss = 0

    outputs = self.unet(haze_images.cuda())


    # discriminator loss
    dis_real, dis_real_feat = self.discriminator(dehaze_images.cuda())
    dis_fake, dis_fake_feat = self.discriminator(outputs.detach().cuda())
    dis_real_loss = self.adversarial_loss(dis_real, True, True)
    dis_fake_loss = self.adversarial_loss(dis_fake, False, True)
    dis_loss = dis_loss + ((dis_real_loss + dis_fake_loss) / 2)


    # unet loss
    unet_fake, unet_fake_feat = self.discriminator(outputs.cuda())
    unet_gan_loss = self.adversarial_loss(unet_fake, True, False) * 0.7
    unet_loss = unet_loss + unet_gan_loss

    unet_criterion = self.criterion(outputs.cuda(), dehaze_images.cuda())
    unet_loss = unet_loss + unet_criterion




    gen_content_loss = self.content_loss(outputs.cuda(), dehaze_images.cuda())
    gen_content_loss = (gen_content_loss * 0.7).cuda()
    unet_loss = dis_loss + gen_content_loss.cuda()


    ssim_loss =  self.ssim_loss(outputs.cuda(), dehaze_images.cuda())
    ssim_loss = (1-ssim_loss)*2
    unet_loss = dis_loss + ssim_loss.cuda()

    return unet_loss, dis_loss, unet_criterion, 1-ssim_loss/2

def backward(self, unet_loss=None, dis_loss=None):
    if dis_loss is not None:
      dis_loss.backward(retain_graph = True)
    self.dis_optimizer.step()
    
    if unet_loss is not None:
      unet_loss.backward()
    self.unet_optimizer.step()


def predict(self, haze_images):
  predict_mask = self.unet(haze_images.cuda())
  return predict_mask

Based on your code snippet you might be running into this issue raised by using stale forward activations.
In particular, unet_loss is attached to dis_loss. Calling dis_loss.backward() will create gradients for a parameter set, which is then updated via dis_optimizer.step(). Calling unet_loss.backward() afterwards will fail, since the forward activations are now stale as the corresponding parameters were already updated.

1 Like

Thank you very much for the idea.
I was trying different solutions by replacing the in-place operations to out-places and even by enabling the anomaly detection (no exceptions were raised).
Found out the issue was in the arrangement of backward function:

Rearranging the function from this:

def backward(self, unet_loss, dis_loss):
    dis_loss.backward(retain_graph = True)
    self.dis_optimizer.step()

    unet_loss.backward()
    self.unet_optimizer.step()

To this (has solved the issue)

def backward(self, unet_loss, dis_loss):
    dis_loss.backward(retain_graph = True)
    unet_loss.backward()
    

    self.dis_optimizer.step()
    self.unet_optimizer.step()