Expected input batch_size to match target batch_size

I’m using the fashion_mnist to practice. I built a network with 2 conv layers and 2 fc layers, it works, but when i added the number of conv layers to 5, it doesn’t work. The error message is the same as the title…

The code is above, I really hope for your help :

transformer = transforms.Compose([transforms.ToTensor()])
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transformer)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True)
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transformer)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=True)

def cnn_train(model):
 iter = 0
 start = time.time()
 for epoch in range(epochs):
    for images, labels in trainloader:
        images = images.to(device)
        labels = labels.to(device)
        optimizer.zero_grad()
        outputs = model(images)
        outputs = outputs.to(device)
        loss = criterion(outputs, labels).to(device)
        loss.backward()
        optimizer.step()
        
        iter+=1
        
        correct_train = 0
        total_train = 0
        correct_test = 0
        total_test = 0
        accuracy_train = 0
        accuracy_test = 0
        if iter% 1000 == 0:
            # calculate Accuracy for test set
            for images, labels in testloader:
                images = images.to(device)
                labels = labels.to(device)
                outputs = model(images)
                outputs = outputs.to(device)
                _, predicted = torch.max(outputs.data, 1)
                total_test += labels.size(0)
                correct_test += (predicted == labels).sum()
            accuracy_test = 100 * correct_test/total_test
            
            predicted_label = torch.max(outputs.data,1)[1]
            total_train += labels.size(0)
            correct_train += (predicted_label == labels).sum()
            accuracy_train = 100 * correct_train/total_train
            
            
            print("Iter: {}. Loss: {}. Accuracy(train): {}%.Accuracy(test): {}%.".format(iter, loss.item(), accuracy_train,accuracy_test))
 end = time.time()
 print('Finished , time cost :', round(end-start,2),' seconds')

class CNN_c(nn.Module):
    def __init__(self):
        super(CNN_c, self).__init__()
        self.conv1 = nn.Conv2d(1, 5, 4) 
        self.pool = nn.MaxPool2d(2, 1)
        self.conv2 = nn.Conv2d(5, 20, 4)
        self.conv3 = nn.Conv2d(20, 100, 4)
        self.fc1 = nn.Linear(100 * 4 * 4, 1000)
        self.fc2 = nn.Linear(1000, 100)


    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = x.reshape(-1, 1600)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

myCNN_c = CNN_c().to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(myCNN_c.parameters(), lr=0.001, momentum=0.9)

cnn_train(myCNN_c)

Replace x = x.reshape(-1, 1600) with x = x.view(x.size(0), -1) to keep the batch size constant and you would most likely get a shape mismatch error in the following linear layer (and should adapt the in_features of it).

Thank you so much for your help !