Inside the train of a DCGAN model, if i save the model like this and load it later. Then i do not get desirable result. Is there any problem saving model like this. But if generate image without saving and loading, i get more good result.
if (errD < ls_D):
torch.save(netD.state_dict(), 'D.pt')
ls_D = errD
if (errG < ls_G):
torch.save(netG.state_dict(), 'G.pt')
ls_G = errG
Training Code is this…
for epoch in range(num_epochs):
for i, data in enumerate(dataloader):
netD.zero_grad()
real_cpu = data[0].cuda()
batch_size = real_cpu.shape[0]
label = torch.ones(batch_size).cuda()
output = netD(real_cpu).flatten()
errD_real = criterion(output.float(), label.float())
errD_real.backward()
D_x = output.mean().item()
######################
label.fill_(fake_label)
noise = torch.randn(batch_size, nz, 1, 1).cuda()
fake = netG(noise)
output = netD(fake.detach()).flatten()
errD_fake = criterion(output.float(), label.float())
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimizerD.step()
#######################################################
netG.zero_grad()
label.fill_(real_label)
output = netD(fake).flatten()
errG = criterion(output.float(), label.float())
errG.backward()
D_G_z2 = output.mean().item()
optimizerG.step()
G_losses.append(errD.item())
D_losses.append(errG.item())
if (errD < ls_D):
torch.save(netD.state_dict(), 'D.pt')
ls_D = errD
if (errG < ls_G):
torch.save(netG.state_dict(), 'G.pt')
ls_G = errG