LAPGAN Error: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [12800, 1]],

I wanna run LAPGAN Code for my own dataset in Colab but I got this error. This error related to G_loss.backward(). Please Help me.

type or pasRuntimeError                              Traceback (most recent call last)
<ipython-input-10-cebb7c9a8c4d> in <module>()
    156 
    157 if __name__ == '__main__':
--> 158    run_LAPGAN(n_epoch=1, update_max=50)

3 frames
/usr/local/lib/python3.7/dist-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
    130     Variable._execution_engine.run_backward(
    131         tensors, grad_tensors_, retain_graph, create_graph,
--> 132         allow_unreachable=True)  # allow_unreachable flag
    133 
    134 

RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [12800, 1]], which is output 0 of TBackward, is at version 2; expected version 1 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!
  torch.autograd.set_detect_anomaly(True)
    for epoch in range(n_epoch):

        D_running_losses = [0.0 for i in range(n_level)]
        G_running_losses = [0.0 for i in range(n_level)]

        for ind, data in enumerate(trainNormalloader, 0):
            # get the inputs from true distribution
            true_inputs = data
            down_imgs = true_inputs.numpy()
            n_minibatch, n_channel, _, _ = down_imgs.shape


            for l in range(n_level):
                # calculate input images for models at the particular level
                if l == (n_level - 1):
                    condi_inputs = None
                    true_inputs = Variable(torch.Tensor(down_imgs))
                    # print('true_inputs: ', true_inputs.size())
                    if use_gpu:
                        true_inputs = true_inputs.cuda()
                else:
                    new_down_imgs = []
                    up_imgs = []
                    residual_imgs = []

                    # compute a Laplacian Pyramid
                    for i in range(n_minibatch):
                        down_img = []
                        up_img = []
                        residual_img = []

                        for j in range(n_channel):
                            previous = down_imgs[i, j, :]
                            down_img.append(cv2.pyrDown(previous))
                            up_img.append(cv2.pyrUp(down_img[-1]))
                            residual_img.append(previous - up_img[-1])

                        new_down_imgs.append(down_img)
                        up_imgs.append(up_img)
                        residual_imgs.append(residual_img)

                    down_imgs = np.array(new_down_imgs)
                    up_imgs = np.array(up_imgs)
                    residual_imgs = np.array(residual_imgs)

                    condi_inputs = Variable(torch.Tensor(up_imgs))
                    true_inputs = Variable(torch.Tensor(residual_imgs))
                    if use_gpu:
                        condi_inputs = condi_inputs.cuda()
                        true_inputs = true_inputs.cuda()

                # get inputs for discriminators from generators and real data
                if l == 0: noise_dim = 28*28
                elif l == 1: noise_dim = 14*14
                else: noise_dim = 100
                noise = Variable(gen_noise(batch_size, noise_dim))
                if use_gpu:
                    noise = noise.cuda()
                fake_inputs = LapGan_model.Gen_models[l](noise, condi_inputs)
                print(true_inputs.size(), fake_inputs.size())
                inputs = torch.cat([true_inputs, fake_inputs])
                print('inputs: ', inputs.size())
                labels = np.zeros(2 * batch_size)
                labels[:batch_size] = 1
                labels = Variable(torch.from_numpy(labels.astype(np.float32)))
                if use_gpu:
                    labels = labels.cuda()

                # Discriminator
                D_optimizers[l].zero_grad()
                if condi_inputs is not None:
                    condi_inputs = torch.cat((condi_inputs, condi_inputs))
                outputs = LapGan_model.Dis_models[l](inputs, condi_inputs)
                D_loss = D_criterions[l](outputs[:, 0], labels)

                if ind % n_update_dis == 0:
                    D_loss.backward(retain_graph=True)
                    D_optimizers[l].step()

                # Generator
                if ind % n_update_gen == 0:
                    G_optimizers[l].zero_grad()
                    G_loss = G_criterions[l](outputs[batch_size:, 0],
                                             labels[:batch_size])
                    torch.autograd.set_detect_anomaly(True)
                    G_loss.backward()
                    G_optimizers[l].step()

                # print statistics
                D_running_losses[l] += D_loss.data[0]
                G_running_losses[l] += G_loss.data[0]
                if ind % print_every == (print_every - 1):
                    print('[%d, %5d, %d] D loss: %.3f ; G loss: %.3f' %
                          (epoch+1, ind+1, l+1,
                           D_running_losses[l] / print_every,
                           G_running_losses[l] / print_every))
                    D_running_losses[l] = 0.0
                    G_running_losses[l] = 0.0

            if update_max and ind > update_max:
                break

Could you take a look at this post and check, if the same issue would apply to your code?
Since you are also working on a GAN, this type of error seems to easily creep into the code.