Multiple classification with cnn

class Net(nn.Module):
def init(self):
super(Net, self).init()
self.conv1 = nn.Conv2d(3, 32, 5)
self.conv2 = nn.Conv2d(32, 64, 5)
self.conv3 = nn.Conv2d(64, 128, 5)
self.pool = nn.MaxPool2d(2, 2)

    self.fc1 = nn.Linear(128 * 9 * 9, 256)
    self.fc2 = nn.Linear(256, 128)
    self.fc3 = nn.Linear(128, 84)
    self.fc4 = nn.Linear(84, 10)

    self.dropout1 = nn.Dropout(p=0.5, inplace=False)

def forward(self, x):
    x = self.pool(F.relu(self.conv1(x)))

    x = self.pool(F.relu(self.conv2(x)))

    x = F.relu(self.conv3(x))
    #x = self.dropout1(x)
    x = x.view(-1, 128*9*9)
    x = F.relu(self.fc1(x))
    x = self.dropout1(x)
    x = F.relu(self.fc2(x))
    x = F.relu(self.fc3(x))
    x = self.fc4(x)
    return x

this is cnn with inputsize(3,64,64)

for epoch in range(2): # loop over the dataset multiple times

running_loss = 0.0
for i, data in enumerate(trainloader, 0):
    # get the inputs; data is a list of [inputs, labels]
    inputs, labels = data
    # zero the parameter gradients
    inputs = concat_img(inputs)

    optimizer.zero_grad()

    # forward + backward + optimize
    outputs = net(inputs)

    labels = torch.stack(labels.split(4))
    one_hot_labels = torch.zeros(inputs.size(0), 10, dtype=torch.long).scatter_(1, labels, 1)
    one_hot_labels = one_hot_labels.float()

    loss = criterion(outputs, one_hot_labels)
    loss.backward()
    optimizer.step()

    # print statistics
    running_loss += loss.item()
    if i % 2000 == 1999:    # print every 2000 mini-batches
        print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
        running_loss = 0.0

but with this code, there is a value eror: Target size (torch.Size([3, 10])) must be the same as input size (torch.Size([1, 10])) what should i do? And there is no decrese of loss at all. help me please

Change:

x = x.view(-1, 128*9*9)

to:

x = x.view(x.size(0), -1)

to keep the batch size equal and adapt the in_features of the next linear layer, if needed.

thanks for replying. but there is a still error in >>outputs=net(inputs) with
RuntimeError: mat1 and mat2 shapes cannot be multiplied (128x81 and 10368x256)
i also want to know how to reduce loss.
although there are lots of epoch, loss is still 0.680 .0.680 0.680 … like this
I think it means there is no improvement in my model.

This would be expected as your in_features is set to a wrong value:

Change the in_features of self.fc1 to 81 and it should work.