I’m trying to make a model predict the race of a 75x75 image’s ethnicity, but when ever I train the model, the accuracy always stays completely still at 42.5%. I didn’t realize why until I actually ran it on some of photos. It turned out, that no matter what the photo was, it would always predict ‘white’. I’m not entirely sure why though.
I copied the code over from the Quickstart tutorial, and in that dataset or the standard MNIST data, it worked fine. I changed the dataset to the UTKFace, and then it started only predicting one label, all the time.
Here’s my code:
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import ToTensor
import torch.nn.functional as F
training_data = ImageFolder(
root = "data_training/",
transform = ToTensor(),
)
testing_data = ImageFolder(
root = "data_testing/",
transform = ToTensor()
)
training_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(testing_data, batch_size=64, shuffle=True)
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(1296, 1024)
self.fc2 = nn.Linear(1024, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512, 84)
self.fc5 = nn.Linear(84, 5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
model = NeuralNetwork().to("cpu")
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
X, y = X.to("cpu"), y.to("cpu")
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def tests(dataloader, model):
size = len(dataloader.dataset)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to("cpu"), y.to("cpu")
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(training_dataloader, model, loss_fn, optimizer)
tests(test_dataloader, model)
torch.save(model.state_dict(), "model.pth")
No matter how many epochs I set it to, or how many layers I add in to try to get it to overfit, it always just seems to guess the same thing over and over again, with no signs of improvement.
I separated the UTKFace dataset into folders based on the ethnicity category of the name. There are 23705 images in the training data and 10134 in the testing.