Discriminator loss goes to zero - Context encoder

Hi Guys!
I am trying to implement the context encoder paper Pathak et.al.While training my network,I found that the discriminator loss goes very quickly to zero(in the second epoch).Could some one please help me on this?

here’s my generator network :

class Generator(nn.Module):

  def __init__(self,hparams=3):

    super(Generator,self).__init__()

    nc = hparams

    def Downsample(in_channels,out_channels,normalize = True):

      layer = [nn.Conv2d(in_channels,out_channels,kernel_size=4,stride=2,padding=1)]
      if normalize :
        layer.append(nn.BatchNorm2d(out_channels))
      layer.append(nn.LeakyReLU(0.2,inplace=True))

      return layer

    def Upsample(in_channels,out_channels,normalize = True):

      layer = [nn.ConvTranspose2d(in_channels,out_channels,kernel_size= 4,stride=2,padding=1)]
      if normalize : 
        layer.append(nn.BatchNorm2d(out_channels))
      layer.append(nn.ReLU(True))

      return layer

    self.main = nn.Sequential(
        *Downsample(nc,64,normalize=False),
        *Downsample(64,64),
        *Downsample(64,128),
        *Downsample(128,256),
        *Downsample(256,512),
        nn.Conv2d(512,100,4), # Bottleneck
        nn.ConvTranspose2d(100,512,4),
        nn.BatchNorm2d(512),
        nn.ReLU(),
        *Upsample(512,256),
        *Upsample(256,128),
        *Upsample(128,64),
        *Upsample(64,nc,normalize=False),
        nn.Tanh()
    )    

  def forward(self,x):

      return self.main(x)

here’s my discriminator :

class Discriminator(nn.Module):
  
    def __init__(self, channels=3):
        super(Discriminator, self).__init__()

        def discriminator_block(in_filters, out_filters, normalize):
            layers = [nn.Conv2d(in_filters, out_filters, 4, 2, 1)]
            if normalize:
                layers.append(nn.BatchNorm2d(out_filters))
            layers.append(nn.LeakyReLU(0.2, inplace=True))
            return layers

        layers = []
        in_filters = channels
        layers.extend(discriminator_block(channels,64,False))
        layers.extend(discriminator_block(64,128,True))
        layers.extend(discriminator_block(128,256,True))
        layers.extend(discriminator_block(256,512,True))
        layers.append(nn.Conv2d(512,1,4))
        self.model = nn.Sequential(*layers,
                                   nn.Sigmoid())
        

    def forward(self, img):
        x = self.model(img)
        return x.view(x.size(0),-1)

this is my training loop : (I have made it into a single iteration)

im,masked_im,cut_im = next(iter(dataloader))
cut_im = cut_im.cuda()
masked_im = masked_im.cuda()
im = im.cuda()

valid = real_data_target(im.size(0),'cuda') 
fake = fake_data_target(im.size(0),'cuda')
optimizer_G.zero_grad()

pred_im = generator(masked_im)

lossL2 = gen_loss(pred_im,cut_im)
loss_adv = disc_loss(discriminator(pred_im),valid)
g_loss =  lossL2
print(f'L2 loss : {lossL2} adv loss : {loss_adv} g_loss = {g_loss}')

g_loss.backward()
optimizer_G.step()

optimizer_D.zero_grad()
real_loss = disc_loss(discriminator(cut_im),valid)
fake_loss = disc_loss(discriminator(pred_im.detach()),fake)

d_loss = 0.5*(real_loss+fake_loss)
print(f' real loss :{real_loss} fake_loss : {fake_loss}  total_loss : {d_loss}')
d_loss.backward()
optimizer_D.step()

this is the output it gives :

L2 loss : 0.3206532299518585 adv loss : 0.0 g_loss = 0.3206532299518585
 real loss :0.0 fake_loss : 100.0  total_loss : 50.0

This is my first post here,so any improvements to the way questions must be posted are very much welcome!