My code works fine for num_epochs=1 but when i increase the epoch it gives out of memory error here is the code
for epoch in range(num_epochs):
for x,y in train_loader:
real_data=Variable(y.type(dtype))
fake_data=g(Variable(x.type(dtype))).detach()
d_error, d_pred_real, d_pred_fake = train_d(d_optim, real_data, fake_data)
fake_data=g(Variable(x.type(dtype)))
g_error=train_g(g_optim,fake_data,real_data)
logger.log(d_error, g_error, epoch, n_batch, num_batches)
if(c%100==0):
logger.display_status(epoch, num_epochs, n_batch, num_batches,d_error, g_error, d_pred_real, d_pred_fake)
n_batch=c+1
c=c+1
if(c>4000):
break
del real_data,fake_data,x,y,d_error,d_pred_real,d_pred_fake
torch.cuda.empty_cache()
RuntimeError: cuda runtime error (2) : out of memory at /opt/conda/conda-bld/py
torch_1518238409320/work/torch/lib/THC/generic/THCStorage.cu:58