"Element 0 of tensors does not require grad and does not have a grad_fn" using VGG

Hi!
I’m using pre-trained models from ImageNet available for my own dataset. I used the exact same code for resnet50 and it worked just fine. I just switched to vgg19_bn and this error appeared.
The error message traceback to “loss.backward()” under train() function.
Can someone please help me?

# (...)

train_transform = transforms.Compose([
    # transforms.RandomHorizontalFlip(p=0.5), # used for data augmentation
    # transforms.RandomVerticalFlip(p=0.5), # used for data augmentation
    transforms.Resize((im_size,im_size)), 
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225] )                            
])

img_transforms = transforms.Compose([
    transforms.Resize((im_size,im_size)), 
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225] )
    ])

def check_image(path):
    try:
        im = Image.open(path)
        return True
    except:
        return False

gc.collect()  # Summon the garbage collector

train_data_path = folder_path + "/training/"
train_data = torchvision.datasets.ImageFolder(root=train_data_path, transform=img_transforms, is_valid_file=check_image)
train_data_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,shuffle=True)

val_data_path = folder_path + "/validation/"
val_data = torchvision.datasets.ImageFolder(root=val_data_path, transform=img_transforms, is_valid_file=check_image)
val_data_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size,shuffle=True)

test_data_path = folder_path + "/test/"
test_data = torchvision.datasets.ImageFolder(root=test_data_path, transform=img_transforms, is_valid_file=check_image)
test_data_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,shuffle=True)

transfer_model = models.vgg19_bn(pretrained=True)

# Freezing the convolutional layers, except batch normalizations.
for name, param in transfer_model.named_parameters():
    if("bn" not in name):
        param.requires_grad = False

transfer_model.fc = nn.Sequential(nn.Linear(1000,500),
nn.ReLU(),                                 
nn.Dropout(), nn.Linear(500,num_classes))

found_lr = 0.001  # Learning rate

def train(model, optimizer, loss_fn, train_loader, val_loader, epochs=epochs, device="cpu"):
    for epoch in range(epochs):
        training_loss = 0.0
        valid_loss = 0.0
        model.train()
        for batch in train_loader:
            optimizer.zero_grad()
            inputs, targets = batch
            inputs = inputs.to(device)
            targets = targets.to(device)
            output = model(inputs)
            loss = loss_fn(output, targets)
            loss.backward()
            optimizer.step()
            training_loss += loss.data.item() * inputs.size(0)
        training_loss /= len(train_loader.dataset)
        
        model.eval()
        num_correct = 0 
        num_examples = 0
        for batch in val_loader:
            inputs, targets = batch
            inputs = inputs.to(device)
            output = model(inputs)
            targets = targets.to(device)
            loss = loss_fn(output,targets) 
            valid_loss += loss.data.item() * inputs.size(0)
            correct = torch.eq(torch.max(F.softmax(output), dim=1)[1], targets).view(-1)
            num_correct += torch.sum(correct).item()
            num_examples += correct.shape[0]
        valid_loss /= len(val_loader.dataset)

        print('Epoch: {}, Training Loss: {:.2f}, Validation Loss: {:.2f}, accuracy = {:.2f}'.format(epoch, training_loss,
        valid_loss, num_correct / num_examples))        
        
gc.collect() # Summon the garbage collector

optimizer = optim.Adam(transfer_model.parameters(), lr=found_lr)

# Training
train(transfer_model, optimizer,torch.nn.CrossEntropyLoss(), train_data_loader, val_data_loader, epochs=epochs, device=device)

# (...)

vgg19_bn uses the model.classifier attribute for the last classification block, while ResNets use model.fc.
You are currently freezing the complete model and assign a new attribute named fc with the nn.Sequential block.

Change it to

transfer_model.classifier = nn.Sequential(nn.Linear(25088, 500),
nn.ReLU(),                                 
nn.Dropout(), nn.Linear(500,num_classes))

and it should work.

PS: I also changed the in_features to 25088, as vgg19_bn expects this number of input features for the standard input shape of [batch_size, 3, 224, 224].

1 Like