Expected more than 1 spatial element when training

Hi everyone!

I have a network designed for 64x64 images (height/width), I’m trying to reform it to take input of 8x8. I’ve managed to fix the generator, but I’m stuck with the discriminator:

class Discriminator(nn.Module):
    def __init__(self, latent_vector_size, features_d, num_channels):
        super(Discriminator, self).__init__()
        
        self.latent_vector_size = latent_vector_size
        self.features_d = features_d
        self.num_channels = 2 #num_channels # OVO JE PROMINJENO DA GLEDA SAMO JEDAN KANAL
        self.optimizer = None
        self.main = None
    
    @staticmethod    
    def discriminator_block(in_filters, out_filters, stride, normalize):
        
        layers = [
            nn.Conv2d(
                in_channels = in_filters,
                out_channels = out_filters,
                kernel_size = 3,
                stride = stride,
                padding = 1                
                )
            ]
        if normalize:
            layers.append(
                nn.InstanceNorm2d(out_filters)
                )
        
        layers.append(
            nn.LeakyReLU(0.2, inplace = True)
            )
        
        return layers
        
        
    def build(self):
        
        layers = []
        in_filters = self.num_channels
        
        for out_filters, stride, normalize in [(self.features_d, 2, False), (self.features_d * 2, 2, True),
                                               (self.features_d * 4, 2, True), (self.features_d * 8, 1, True)]:
        
            layers.extend(self.discriminator_block(
                in_filters,
                out_filters,
                stride,
                normalize
                ))
            
            in_filters = out_filters
        
        layers.append(
            nn.Conv2d(
                in_channels = out_filters,
                out_channels = 1,
                kernel_size = 3,
                stride = 1,
                padding = 1
                )
            )
        self.main = nn.Sequential(*layers)
        
    def forward(self, input):
        return self.main(input)
    
    def define_optim(self, learning_rate, beta1):
        self.optimizer = optim.Adam(self.parameters(), lr = learning_rate, betas = (beta1, 0.999))
    
    @staticmethod   
    def init_weights(layers):
        classname = layers.__class__.__name__
        if classname.find('Conv') != -1:
            nn.init.normal_(layers.weight.data, 0.0, 0.02)
            
        elif classname.find('BatchNorm') != -1:
            nn.init.normal_(layers.weight.data, 1.0, 0.02)
            nn.init.constant_(layers.bias.data, 0)

Input is [20, 2, 8, 8], but the discriminator gets to torch.Size([20, 16, 1, 1]). How can I adjust the discriminator so that nn.InstanceNorm2d gets the proper input size?

Thanks!