BatchNorm2d layer in DCGAN

I’ve implemented a Discriminator which uses Batch Normalization layers. But unfortunately, the discriminator loss is stuck and remains constant throughout but if I remove the batch normalization layer the discriminator performs well. Any suggestions as to what changes are necessary to include Batch Normalization layers in DCGAN.

import torch
import torch.nn as nn

class Flatten(torch.nn.Module):

    def forward(self, x):
        return x.view(x.size()[0], -1)

class Discriminator(nn.Module):

    def __init__(self):
        super(Discriminator, self).__init__()
       
        self.layer = nn.Sequential(

          ## Block 1
  
          nn.Conv2d(in_channels=5,out_channels=32,kernel_size=(3,3),padding='same'),
          #nn.BatchNorm2d(32,0.8),
          nn.ReLU(inplace=False),

          nn.Conv2d(in_channels=32,out_channels=32,kernel_size=(3,3),padding='same'),
          #nn.BatchNorm2d(32,0.8),
          nn.ReLU(inplace=False),

          nn.MaxPool2d(kernel_size=(2,2)),
          
          ## Block 2

          nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(3,3),padding='same'),
          #nn.BatchNorm2d(64,0.8),
          nn.ReLU(inplace=False),

          nn.Conv2d(in_channels=64,out_channels=64,kernel_size=(3,3),padding='same'),
          #nn.BatchNorm2d(64,0.8),
          nn.ReLU(inplace=False),

          nn.MaxPool2d(kernel_size=(2,2)),
          
          ## Block 3

          nn.Conv2d(in_channels=64,out_channels=64,kernel_size=(3,3),padding='same'),
          #nn.BatchNorm2d(64,0.8),
          nn.ReLU(inplace=False),

          nn.Conv2d(in_channels=64,out_channels=64,kernel_size=(3,3),padding='same'),
          #nn.BatchNorm2d(64,0.8),
          nn.ReLU(inplace=False),

          nn.MaxPool2d(kernel_size=(2,2)),  
      


        )
        self.flatten = Flatten()
        self.final = nn.Sequential(
            nn.Linear(50176,100),
            nn.Linear(100,1),

            nn.Sigmoid()  
        )

    def forward(self, input):

      '''Forward pass; map samples to confidence they are real [0, 1]'''
      x = self.layer(input)
      x = self.flatten(x)
      x = self.final(x)


      return x