Hi @colesbury

I used two loss function loss=loss1+loss2, and I expect to have different gradient when I use just loss=loss1,But the gradient flow and numbers was same.indeed adding second loss does not have any effect. Would you pleas help me with that? I try different second loss but the result does not have any change. The first loss is BCELoss and the second one is L1. I change the sigmoid function to Relu, But again the gradient from backward.() with loss2 and without loss2 is same!

```
netG = Generator(ngpu,nz,ngf).to(device)
optimizerG = optim.Adam(netG.parameters(), lr=lr2, betas=(beta1, 0.999))
netG.zero_grad()
label.fill_(real_label)
label=label.to(device)
output = netD(fake).view(-1)
# Calculate G's loss based on this output
loss1 = criterion(output, label)
xxx=torch.histc(Gaussy.squeeze(1).view(-1).cpu(),100, min=0, max=1, out=None)
ddGaussy=xxx/xxx.sum()
xxx1=torch.histc(fake.squeeze(1).view(-1).cpu(),100, min=0, max=1, out=None)
ddFake=xxx1/xxx1.sum()
loss2=abs(ddGaussy-ddFake).sum()
# Calculate gradients for G with 2 loss
errG=loss1+loss2
errG.backward()
for param in netG.parameters():
print(param.grad.data.sum())
# Update G
optimizerG.step()
## ------------------
class Generator(nn.Module):
def __init__(self,ngpu,nz,ngf):
super(Generator, self).__init__()
self.ngpu=ngpu
self.nz=nz
self.ngf=ngf
self.l1= nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(self.nz, self.ngf * 8, 3, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 8),
nn.ReLU(True),)
# state size. (ngf*8) x 4 x 4
self.l2=nn.Sequential(nn.ConvTranspose2d(self.ngf * 8, self.ngf * 4, 3, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 4),
nn.ReLU(True),)
# state size. (ngf*4) x 8 x 8
self.l3=nn.Sequential(nn.ConvTranspose2d( self.ngf * 4, self.ngf * 2, 3, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 2),
nn.ReLU(True),)
# state size. (ngf*2) x 16 x 16
self.l4=nn.Sequential(nn.ConvTranspose2d( self.ngf*2, 1, 3, 1, 0, bias=False),nn.Sigmoid()
# nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
out=self.l1(input)
out=self.l2(out)
out=self.l3(out)
out=self.l4(out)
print(out.shape)
return out
```