I am using loss.backward() in my code with an issue about change gradient by an inplace operation.
The loss is calculated using following class. Any help to handle this issue would be appreciated.
import torch
from torch import nn
from torchvision.models.vgg import vgg16
import numpy as np
class GeneratorLoss(nn.Module):
def init(self):
super(GeneratorLoss, self).init()
vgg = vgg16(pretrained=True)
loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
for param in loss_network.parameters():
param.requires_grad = False
self.loss_network = loss_network
self.mse_loss = nn.MSELoss()
self.tv_loss = TVLoss()
def forward(self, out_images, target_images, class_det_loss, lambda_class):
# Perception Loss
perception_loss = self.mse_loss(self.loss_network(out_images), self.loss_network(target_images))
# Image Loss
image_loss = self.mse_loss(out_images, target_images)
# TV Loss
tv_loss = self.tv_loss(out_images)
result = torch.tensor(0.0, requires_grad=True)
result = image_loss + 0.006 * perception_loss + 2e-8 * tv_loss + lambda_class*class_det_loss
return result
class TVLoss(nn.Module):
def init(self, tv_loss_weight=1):
super(TVLoss, self).init()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
error:
one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [512, 10]], which is output 0 of TBackward, is at version 2; expected version 1 instead.