I have some code:
from torchvision.datasets import ImageFolder
import torchvision.transforms as T
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as optim
path ="/Users/edenbrown/Downloads/kagglecatsanddogs_3367a/PetImages"
transform = T.Compose([T.Resize((50, 50)),T.ToTensor()])
dataset = ImageFolder(root=path, transform=transform)
dataloader = DataLoader(dataset, batch_size=10, shuffle=True)
for batch_number, (images, labels) in enumerate(dataloader):
break
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(50 * 50, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 2)
def forward(self, x):
x = f.relu(self.fc1(x))
x = f.relu(self.fc2(x))
x = f.relu(self.fc3(x))
x = self.fc4(x)
return f.log_softmax(x, dim = 1)
net = Net()
optimizer = optim.Adam(net.parameters(), lr = 0.001)
Epochs = 3
index = 0
for epoch in range(Epochs):
a = labels[index]
b = images[index]
net.zero_grad()
output = net(b.view(-1, 50 * 50))
loss = f.nll_loss(output, a)
loss.backward()
optimizer.step()
print(loss)
index += 1
but I get this error:
Traceback (most recent call last):
File “/Users/edenbrown/sample.ws45/new.py”, line 50, in
loss = f.nll_loss(output, a)
File “/Users/edenbrown/opt/anaconda3/envs/env_pytorch3/lib/python3.10/site-packages/torch/nn/functional.py”, line 2671, in nll_loss
return torch._C._nn.nll_loss_nd(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
ValueError: Expected input batch_size (3) to match target batch_size (0).
Is there a way to fix this error?