Sure,
here is the function to modify a pretrained resnet to two classes, I couldn’t get the indentation to work here, so, it will be relatively harder to read.
def get_modified_pretrained_model(name):
describe_model = ‘Basic ’ + name + ’ that outputs 2 rather than 1000 classes!’
if name == ‘resnet18’:
net = models.resnet18(pretrained=True)
if name == ‘resnet34’:
net = models.resnet34(pretrained=True)
if name == ‘resnet50’:
net = models.resnet50(pretrained=True)
if name == ‘resnet101’:
net = models.resnet101(pretrained=True)
if name == ‘resnet152’:
net = models.resnet152(pretrained=True)
num_ftrs = net.fc.in_features
net.fc = nn.Sequential(
nn.Linear(num_ftrs, 2)
)
return net, describe_model
here is the training protocol:
def training_protocol(model):
describe_training_protocol = ‘Modified training_protocol with nn.CrossEntropyLoss(), optim.SGD(model_ft.parameters(), lr = 0.0001, momentum=0.9, weight_decay = 0.00001)’
criterion = nn.CrossEntropyLoss().cuda()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model.parameters(), lr = 0.0001, momentum=0.9, weight_decay = 0.00001)
return criterion, optimizer_ft, describe_training_protocol
here is the training function:
def train_model(dataloaders, dataset_sizes, model, criterion, optimizer, num_epochs = 10, temp_save_name = None):
since = time.time()
best_model_wts = model.state_dict()
best_log_loss = 1
model = model.cuda()
for epoch in range(1, num_epochs+1):
print('Epoch {}/{}'.format(epoch, num_epochs))
print('*' * 10)
# Each epoch has a training and validation phase phase
model.train(True) # Set model to training mode
# Iterate over data.
for i, (input, target) in enumerate(dataloaders['train']):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# epoch statistics
print_training_set_performance(dataset_sizes, dataloaders['train'], model)
print('----------')
epoch_sk_log_loss = print_val_set_performance(dataset_sizes, dataloaders['val'], model)
# deep copy the model if better logloss
if epoch_sk_log_loss < best_log_loss:
best_log_loss = epoch_sk_log_loss
best_model_wts = model.state_dict()
if temp_save_name is not None:
print('model is saved after epcoh ' + str(epoch))
name = temp_save_name + '_' + '.pth'
torch.save(model, name)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val logloss: {:4f}'.format(best_log_loss))
# load best model weights
model.load_state_dict(best_model_wts)
return model
here is the full error message:
Traceback (most recent call last):
File “finetunacuda.py”, line 322, in
main()
File “finetunacuda.py”, line 320, in main
fine_tuna_protocol()
File “finetunacuda.py”, line 298, in fine_tuna_protocol
model_ft = train_model(dataloaders, dataset_sizes, model_ft, criterion, optimizer_ft, num_epochs = nep, temp_save_name = name_of_results_output_txt_file)
File “finetunacuda.py”, line 238, in train_model
output = model(input_var)
File “/home/ubuntu/envs/deepL/lib/python3.5/site-packages/torch/nn/modules/module.py”, line 325, in call
result = self.forward(*input, **kwargs)
File “/home/ubuntu/envs/deepL/lib/python3.5/site-packages/torchvision/models/resnet.py”, line 139, in forward
x = self.conv1(x)
File “/home/ubuntu/envs/deepL/lib/python3.5/site-packages/torch/nn/modules/module.py”, line 325, in call
result = self.forward(*input, **kwargs)
File “/home/ubuntu/envs/deepL/lib/python3.5/site-packages/torch/nn/modules/conv.py”, line 277, in forward
self.padding, self.dilation, self.groups)
File “/home/ubuntu/envs/deepL/lib/python3.5/site-packages/torch/nn/functional.py”, line 90, in conv2d
return f(input, weight, bias)
RuntimeError: Expected object of type torch.FloatTensor but found type torch.cuda.FloatTensor for argument #2 ‘weight’
So, as you can see, the issue obtains during the forward phase of the training.
Thanks a lot for the assistance