Expected input batch_size (1) to match target batch_size (10)

class Network(nn.Module):
def init(self):
super().init()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=5) # Convolution layer 1
self.conv2 = nn.Conv2d(in_channels=8, out_channels=12, kernel_size=5) # Convolution layer 2

    self.fc1 = nn.Linear(in_features=12 * 100 * 272, out_features=120) # Linear layer 1 [Linear layer also called Fully Connected Layers]
    self.fc2 = nn.Linear(in_features=120, out_features=60) # Linear layer 2
    self.out = nn.Linear(in_features=60, out_features=3) # Linear layer 3 (output layer)
    
def forward(self, t):
    # input layer
    t=t
    # (2) hidden conv layer
    t = self.conv1(t)
    t = F.relu(t)
    t = F.max_pool2d(t, kernel_size=2, stride=2)

    # (3) hidden conv layer
    t = self.conv2(t)
    t = F.relu(t)
    t = F.max_pool2d(t, kernel_size=2, stride=2)
    
    # (4) hidden linear layer
    t = t.reshape(-1, 12 * 100 * 272)
    t = self.fc1(t)
    t = F.relu(t)

    # (5) hidden linear layer
    t = self.fc2(t)
    t = F.relu(t)
    
    # (6) output layer
    t = self.out(t)
    #t = F.softmax(t, dim=1)
    
    return t

network = Network()

train_loader = torch.utils.data.DataLoader(train, batch_size=10)
optimizer = optim.Adam(network.parameters(), lr=0.01)

for epoch in range(8):

total_loss = 0
total_correct = 0

for batch in train_loader: # Get Batch
    images, labels = batch 

    preds = network(images) # Pass Batch
    loss = F.cross_entropy(preds, labels) # Calculate Loss

    optimizer.zero_grad()
    loss.backward() # Calculate Gradients
    optimizer.step() # Update Weights

    total_loss += loss.item()
    total_correct += get_num_correct(preds, labels)

print("epoch", epoch, "total_correct:", total_correct, "loss:", total_loss)

Hi,

Do you have a particular question you want to ask?
The error seems quite clear? I guess the batch size of preds and labels don’t match?