Given groups=1, weight of size [48, 24, 11, 11], expected input[64, 48, 4, 4] to have 24 channels, but got 48 channels instead

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3,out_channels=6,kernel_size=11)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(in_channels=6,out_channels=12,kernel_size=11)
        self.conv3 = nn.Conv2d(in_channels=12,out_channels=24,kernel_size=11)
        self.conv4 = nn.Conv2d(in_channels=24,out_channels=48,kernel_size=11)
        self.conv2_bn = nn.BatchNorm2d(48)

        self.fc1 = nn.Linear(50176, 360)
        self.fc2 = nn.Linear(360, 240)
        self.fc3 = nn.Linear(240, 120)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = self.pool(F.relu(self.conv4(x)))
        x = F.relu(F.max_pool2d(self.conv2_bn(self.conv4(x))))
        x = x.view(-1, 50176 )
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


net = Net()

Given groups=1, weight of size [48, 24, 11, 11], expected input[64, 48, 4, 4] to have 24 channels, but got 48 channels instead

As the error says, you use conv4 with something that already has 48 channels. given your code, I guess its the call on this line that should not contain a conv4 as it is already in the line above?

        x = F.relu(F.max_pool2d(self.conv2_bn(self.conv4(x))))

i have 120 classes which contain almost 20k images of dogs. so my assignment is to code 2 xtra convolution layers with 1 batch normalization layer. the code is working fine on cifar-10 class, but when i use mine class it gives different error. as you said this line shouldn’t be here. i commented it and get this error.

import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3,out_channels=6,kernel_size=11)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(in_channels=6,out_channels=12,kernel_size=11)
        self.conv3 = nn.Conv2d(in_channels=12,out_channels=24,kernel_size=11)
        self.conv4 = nn.Conv2d(in_channels=24,out_channels=48,kernel_size=11)
        self.conv2_bn = nn.BatchNorm2d(48)

        self.fc1 = nn.Linear(49152, 360)
        self.fc2 = nn.Linear(360, 240)
        self.fc3 = nn.Linear(240, 120)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = self.pool(F.relu(self.conv4(x)))
        #x = F.relu(F.max_pool2d(self.conv2_bn(self.conv4(x))))
        x = x.view(-1, 49152 )
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


net = Net()

Maybe you still want the pooling?
And given the error, I guess your view op is not correct. And the -1 does not match the batch size because the other value does not match the dimensions.
You can add prints in your forward pass just before the view to check the size of x. And make sure the second dimension is what you expect. Or do:

# Assuming 4D x as it comes out of the 2d Convolutions above.
x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))