My network initialized as below:
#VGG19
import torchvision
import torch.nn as nn
import torch.nn.functional as F
vgg19 = torchvision.models.vgg19(pretrained=True)
for param in vgg19.parameters():
param.requires_grad = False
requires_grad=True by default
vgg19.fc = nn.Linear(1000, 8) neurons, otherwise change it
vgg19.cuda()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(vgg19.fc.parameters(), lr=0.001, momentum=0.9)#lr 0.001
But when I train with code as below, there is a error in loss.backward():
code
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = vgg19(inputs)
#print(type(outputs),type(inputs))
loss = criterion(outputs, labels)
#loss = F.nll_loss(outputs, labels)
loss.backward()
optimizer.step()
step += 1
# print statistics
running_loss += loss.data[0]
error
RuntimeError: there are no graph nodes that require computing gradients