I get this error while applying data augmentation on UTKFace dataset. This is my program code.
‘’’
class UTKFaceDataset(Dataset):
def init(self, root_dir, transform=None):
self.root_dir = root_dir
self.transform = transform
self.image_files = sorted(os.listdir(‘/content/drive/MyDrive/UTKFace’))
def len(self):
return len(self.image_files)
def getitem(self, idx):
image_path = os.path.join(self.root_dir, self.image_files[idx])
image = Image.open(image_path).convert(‘RGB’)
age = int(self.image_files[idx].split(‘‘)[0])
gender = int(self.image_files[idx].split(’’)[1])
if self.transform:
image = self.transform(image)
return image, age, gender
‘’’
‘’’
Define the CNN architecture
class CNN(nn.Module):
def init(self):
super(CNN, self).init()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(128 * 16 * 16, 512)
self.fc2 = nn.Linear(512, 2) # gender out
self.fc3 = nn.Linear(512, 1) # age out
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = self.pool(nn.functional.relu(self.conv3(x)))
x = x.view(-1, 128 * 16 * 16)
age = self.fc3(nn.functional.relu(self.fc1(x)))
gender = self.fc2(nn.functional.relu(self.fc1(x)))
return {'label1': age, 'label2': gender}
‘’’
‘’’
Define the data augmentation transforms
transform_train = transforms.Compose([
transforms.RandomCrop(128, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
transform_test = transforms.Compose([
transforms.Resize(128),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
‘’’
‘’’
Initialize the dataset and dataloaders
train_dataset = UTKFaceDataset(‘/content/drive/MyDrive/UTKFace’, transform=transform_train)
test_dataset = UTKFaceDataset(‘/content/drive/MyDrive/UTKFace’, transform=transform_test)
train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=4)
test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=False, num_workers=4)
‘’’
‘’’
Define the loss function and optimizer
criterion_age = nn.MSELoss()
criterion_gender = nn.CrossEntropyLoss()
optimizer = optim.Adam(cnn.parameters(), lr=0.001)
training_loss_list = []
iter_list=[]
def train(model, criterion1, criterion2, train_dataloader, optimizer, epoch):
Set the model to training mode
model.train()
train_loss = 0
print(“Epoch:”, epoch)
Process the images in batches
for batch_idx, (data, target1, target2) in enumerate(train_dataloader):
data, target1, target2 = data.to(device), target1.to(device), target2.to(device)
# Reset the optimizer
optimizer.zero_grad()
#Push the data forward through the model layer
output = model(data.long())
label1_hat = output['label1']
label2_hat = output['label2']
# Get the loss
loss1 = criterion1(label1_hat, target1.long())
loss2 = criterion2(label2_hat, target2.long())
loss = loss1 + loss2
# Backpropagate
loss.backward()
optimizer.step()
train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
if batch_idx % 50 == 0:
print('Epoch %d, Batch %d loss: %.6f' % (epoch, batch_idx + 1, train_loss))
return average loss for the epoch
#avg_loss = train_loss / (batch_idx + 1)
iter_list.append(epoch)
training_loss_list.append(train_loss)
print(‘Training set: Average loss: {:.6f}’.format(train_loss))
return train_loss
age_accuracy_list = []
gender_accuracy_list = []
validation_loss_list=[]
def test(model, criterion1, criterion2, test_dataloader, optimizer, epoch):
Switch the model to evaluation mode
model.eval()
correct_1 = 0
correct_2 = 0
total_1 = 0
total_2 = 0
valid_loss = 0
with torch.no_grad():
for batch_idx, (data, target1, target2) in enumerate(test_dataloader):
data, target1, target2 = data.to(device), target1.to(device), target2.to(device)
data = data.requires_grad_() # Load images(for accuracy)
output = model(data.long())
_, predicted1 = torch.max(output['label1'], 1)
_, predicted2 = torch.max(output['label2'], 1)
label1_hat = output['label1']
label2_hat = output['label2']
total_1 += target1.size(0)
total_2 += target2.size(0)
correct_1 += torch.sum(predicted1 == target1).item()
correct_2 += torch.sum(predicted2 == target2).item()
age_accuracy = 100 * correct_1 // total_1
gender_accuracy = 100 * correct_2 // total_2
Total_Accuracy = age_accuracy + gender_accuracy
# calculate loss
loss1 = criterion1(label1_hat, target1.long())
loss2 = criterion2(label2_hat, target2.long())
loss = loss1+loss2
valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f} \tAge_Accuracy: {} \tGender_Accuracy: {} \tTotal_Accuracy: {}'.format(
epoch, train_loss, valid_loss, age_accuracy, gender_accuracy, Total_Accuracy))
age_accuracy_list.append(age_accuracy)
gender_accuracy_list.append(gender_accuracy)
validation_loss_list.append(valid_loss)
return valid_loss, age_accuracy, gender_accuracy
epochs = 20
print(‘Training on’, device)
for epoch in range(1, epochs + 1):
train_loss = train(cnn, criterion_age, criterion_gender, train_dataloader, optimizer, epoch)
valid_loss = test(cnn, criterion_age, criterion_gender, test_dataloader, optimizer, epoch)
‘’’
I’m not understanding how to correct my code. Thank you in advance.