image_path = "drive/MyDrive/Animal_Breed/TRAIN (1)/"
train_data_transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(),
transforms.RandomAffine(20),
transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
train_dataset = ImageFolder(image_path, transform = train_data_transform)
val_path = "drive/MyDrive/Animal_Breed/VAL/"
val_data_transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
val_dataset = ImageFolder(val_path, transform = val_data_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, 32,
shuffle=True,)
val_loader = torch.utils.data.DataLoader(val_dataset, 1, shuffle = False)
def train(model,dataloader,validloader,criterion,optimizer,epochs=50,scheduler=scheduler):
max_valid_acc = 0
train_acc,val_acc = 0,0
for e in range(epochs):
train_loss = 0.0
model.train()
for data, labels in dataloader:
# print(labels.shape[0])
data, labels = data.to(device), labels.to(device)
# print(data.shape)
optimizer.zero_grad()
target = model(data)
# print(target.shape)
loss = criterion(target.float(),labels.long())
loss.backward()
optimizer.step()
train_loss = loss.item() * data.size(0)
train_acc += torch.sum((torch.max(target, 1)[1] == labels.data),0)
valid_loss = 0.0
model.eval()
for data, labels in validloader:
data, labels = data.to(device), labels.to(device)
target = model(data)
loss = criterion(target.float(),labels.long())
valid_loss = loss.item() * data.size(0)
val_acc += torch.sum((torch.max(target, 1)[1] == labels.data),0)
print(f'Epoch {e+1} \t\t Training Loss: {train_loss / len(dataloader)} \t\t Validation Loss: {valid_loss / len(validloader)}')
print("Validation Accuracy ... :",val_acc/(len(validloader)))
print("Train Accuracy ... :",train_acc/len(train_dataset))
if val_acc > max_valid_acc:
print(f'Validation Acc Increased({max_valid_acc:.6f}--->{val_acc:.6f}) \t Saving The Model')
max_valid_acc = val_acc
# Saving State Dict
torch.save(model.state_dict(), 'HIGH_ACC.pth')
scheduler.step(val_acc)
train_acc = 0
val_acc = 0
return model
This is the code , I’m using VGG16 pretrained model, It’s been 30 minutes on colab but it has not finished 1 epoch yet! why it’s so much time consuming?