Progressive growing of GANs

Im trying to implement ProGan to generate flower images, during training I encountred a problem where the generator performs good and is actually learning to produce something acceptable in all resolution but as soon it jumps to 6464 the generated images becomes too much noisy untill it reaches 128128 where images become just plain black.

batch size: 16
lr: 0.001 as mentioned in the paper
loss: wgan loss with gp ( used this implemntation i found on github https://github.com/EmilienDupont/wgan-gp/blob/ef82364f2a2ec452a52fbf4a739f95039ae76fe3/training.py#L73 )

class Generator(nn.Module):
    def __init__(self, dimLatent=512, in_channels=3):
        super().__init__()
        self.dimLatent, self.img_channels = dimLatent, in_channels
        self.alpha = 0
        self.linear = nn.Sequential(
            EqualizedLinear(dimLatent, 16 * dimLatent),  # dimLatent * 4*4
            nn.LeakyReLU(0.2)
        )
        # base block of the network generates 4*4 image.
        self.initial = nn.Sequential(
            EqualizedConv(in_channels=dimLatent, out_channels=dimLatent, kernel_size=4),
            PixelNorm(),
            nn.LeakyReLU(0.2),
            EqualizedConv(in_channels=dimLatent, out_channels=dimLatent, kernel_size=3),
            PixelNorm(),
            nn.LeakyReLU(0.2)
        )
        self.depth = 1
        self.stages = nn.ModuleList()
        self.toRGBLayers = nn.ModuleList(
            [EqualizedConv(in_channels=512, out_channels=in_channels, padding="valid", kernel_size=1)])

    def set_alpha(self, new_alpha):
        self.alpha = new_alpha

    def extend(self):
        self.stages.append(
            nn.Sequential(
                EqualizedConv(num_features(self.depth - 1), num_features(self.depth), kernel_size=3),
                PixelNorm(),
                nn.LeakyReLU(0.2),
                EqualizedConv(num_features(self.depth), num_features(self.depth), kernel_size=3),
                PixelNorm(),
                nn.LeakyReLU(0.2)
            )
        )
        self.toRGBLayers.append(
            EqualizedConv(num_features(self.depth), self.img_channels, kernel_size=1, stride=1)
        )
        self.depth += 1

    def forward(self, x):
        # reshaping to match the linear layer input.
        x = x.view(-1, self.dimLatent)
        # fc layer
        x = self.linear(x)
        # take it from the fc layer to a 4*4 image shape.
        x = x.view(x.size()[0], -1, 4, 4)
        y = self.initial(x)
        
        if len(self.stages) == 0:
            return self.toRGBLayers[0](y)


        for layer in self.stages:
            x = interpolate(y, scale_factor=2, mode='nearest')
            y = layer(x)

        # taking the tensors to rgb dimension.
        x, y = self.toRGBLayers[-2](x), self.toRGBLayers[-1](y)
        # smoothly merge in the new layer y using the alpha factor.
        out = self.alpha * y + (1 - self.alpha) * x
        
        return out

Screenshot (437)