im using the Mnist data set i build this GANs model and my goal is to calculate nonsaturating loss my batch size is 256 , latent_dim is 100 and number of epochs is 10:
class Generator(nn.Module):
def __init__(self, latent_dim, image_size, num_channels,ngpu):
super(Generator, self).__init__()
self.latent_dim = latent_dim
self.image_size = image_size
self.num_channels = num_channels
self.ngpu = ngpu
self.fc = nn.Sequential(
nn.Linear(latent_dim, 128 * (image_size // 4) ** 2),
nn.BatchNorm1d(128 * (image_size // 4) ** 2),
nn.LeakyReLU(0.2, inplace=True)
)
self.conv = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose2d(64, num_channels, kernel_size=4, stride=2, padding=1),
nn.Sigmoid()
)
def forward(self, x):
x = self.fc(x)
x = x.view(-1, 128, self.image_size // 4, self.image_size // 4)
x = self.conv(x)
return x
# Define the discriminator network
class Discriminator(nn.Module):
def __init__(self, image_size, num_channels,ngpu):
super(Discriminator, self).__init__()
self.image_size = image_size
self.num_channels = num_channels
self.ngpu = ngpu
self.conv = nn.Sequential(
nn.Conv2d(num_channels, 64, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True)
)
self.fc = nn.Sequential(
nn.Linear(128 * (image_size // 4) ** 2, 1),
nn.Sigmoid()
)
def forward(self, x):
x = self.conv(x)
x = x.view(-1, 128 * (self.image_size // 4) ** 2)
x = self.fc(x)
return x
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# Initialize the generator and discriminator networks
generator = Generator(latent_dim, image_size, num_channels,ngpu).to(device)
discriminator=Discriminator(image_size, num_channels,ngpu).to(device)
generator.apply( weights_init)
discriminator.apply( weights_init)
Im training the model to calculate nonsaturating loss:
real_label = 1
fake_label = 0
def training_loop(num_epochs=num_epochs, saturating=False):
netG = generator
netD=discriminator
## Initialize BCELoss function
#criterion = nn.BCELoss()
criterion = nn.BCELoss()
# Setup Adam optimizers for both G and D
optimizerD = optim.SGD(netD.parameters(), lr=0.0001, momentum=0.9)
optimizerG = optim.SGD(netG.parameters(), lr=0.0001, momentum=0.9)
## Training Loop
# Lists to keep track of progress
img_list = []
G_losses = []
G_grads_mean = []
G_grads_std = []
D_losses = []
iters = 0
print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):
# For each batch
for i, data in enumerate(train_loader):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, device=device, dtype=torch.float)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, latent_dim, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
for j in range(gen_steps):
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # Non-saturating loss: fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label) # Non-saturating loss
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
# Save gradients
G_grad = [p.grad.view(-1).cpu().numpy() for p in list(netG.parameters())]
G_grads_mean.append(np.concatenate(G_grad).mean())
G_grads_std.append(np.concatenate(G_grad).std())
# Output training stats
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch+1, num_epochs, i, len(train_loader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
return G_losses, D_losses
so i want the generator run 8 times for 1 discriminator run ,
but i get the following error
Starting Training Loop…
[1/10][0/235] Loss_D: 0.0067 Loss_G: 5.6489 D(x): 0.9991 D(G(z)): 0.0057 / 0.0057
RuntimeError Traceback (most recent call last)
in <cell line: 2>()
1 # Train with non-saturating G loss
----> 2 G_losses_nonsat, D_losses_nonsat = training_loop(saturating=False)
2 frames
in training_loop(num_epochs, saturating)
70 errG = criterion(output, label) # Non-saturating loss
71 # Calculate gradients for G
—> 72 errG.backward()
73 D_G_z2 = output.mean().item()
74 # Update G
/usr/local/lib/python3.10/dist-packages/torch/_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
485 inputs=inputs,
486 )
→ 487 torch.autograd.backward(
488 self, gradient, retain_graph, create_graph, inputs=inputs
489 )
/usr/local/lib/python3.10/dist-packages/torch/autograd/init.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
198 # some Python versions print out the first line of a multi-line function
199 # calls in the traceback and some print out the last line
→ 200 Variable.execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
201 tensors, grad_tensors, retain_graph, create_graph, inputs,
202 allow_unreachable=True, accumulate_grad=True) # Calls into the C++ engine to run the backward pass
RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved tensors after calling backward.
cant seen to understand what is the problem
when i did .backward(retain_graph=True)
it solved the error but the situration problem happent which wasnt supposed to so im geusiing somthing is wring in my training loop but i dont uderstand what exactly . Thanks for any help