Hi – I updated my Pytorch version to the latest from source, and the backpropagation code for WGAN now gives the error “Trying to backward through the graph a second time…”
Here is the code for updating the discriminator:
self.D.zero_grad()
d_real_pred = self.D(real_data)
d_real_err = torch.mean(d_real_pred) #want to push d_real as high as possible
d_real_err.backward(one_neg)
z_input = to_var(torch.randn(self.batch_size, 128))
d_fake_data = self.G(z_input).detach()
d_fake_pred = self.D(d_fake_data)
d_fake_err = torch.mean(d_fake_pred) #want to push d_fake as low as possible
d_fake_err.backward(one)
gradient_penalty = self.calc_gradient_penalty(real_data.data, d_fake_data.data)
gradient_penalty.backward()
d_err = d_fake_err - d_real_err + gradient_penalty
self.D_optimizer.step()
For calculating the gradient penalty:
def calc_gradient_penalty(self, real_data, fake_data):
alpha = torch.rand(self.batch_size, 1, 1)
alpha = alpha.expand_as(real_data)
alpha = alpha.cuda() if self.use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda() if self.use_cuda else interpolates
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = self.D(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() \
if self.use_cuda else torch.ones(disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = self.lamda*((gradients.norm(2, 1).norm(2,1) - 1) ** 2).mean() #norm 2 times
return gradient_penalty
Any guidance on what might be causing this error?