RuntimeError with pretrained inception v3 model

Hi, I want to finetuning the pretrained inception v3 model, but get type mismatch error. the following is my code.

#!/usr/bin/env python3

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import models
import torchvision.transforms as transforms
from bcdataset import BloodCellDataset
import time

n_classes = 4
lr = 1e-3
batch_size = 32
epoches = 30
data_root_dir = '/home/chen/database/blood-cells/dataset2-master/dataset2-master/images/'

train_data = BloodCellDataset(
    data_root_dir,
    './data/train.txt',
    transform=transforms.Compose([
        transforms.CenterCrop(240),
        transforms.Resize([299, 299]),  # inception v3 input size: 299x299x3
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]))
trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
                                          shuffle=True, num_workers=2)

if __name__ == '__main__':
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model_iv3 = models.inception_v3(pretrained=True, transform_input=True)

    for param in model_iv3.parameters():
        param.requires_grad = False

    n_features = model_iv3.fc.in_features
    model_iv3.fc = nn.Linear(n_features, n_classes)
    model_iv3.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model_iv3.parameters(), lr=lr, momentum=0.9)

    print('learning_rate: %f batch_size: %d' % (lr, batch_size))
    print('training...')

    for epoch in range(epoches):
        model_iv3.train()
        train_corrects = 0
        train_loss = 0
        with torch.set_grad_enabled(True):
            for i, data in enumerate(trainloader, 0):
                model_iv3.zero_grad()
                images, labels = data
                images.to(device)
                labels.to(device)
                outputs, _ = model_iv3(images)
                _, preds = torch.max(outputs, 1)
                # one-hot label
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                train_loss += loss
                train_corrects += torch.sum(preds == labels)

the output:

learning_rate: 0.001000 batch_size: 32
training...
Traceback (most recent call last):
  File "train_.py", line 56, in <module>
    outputs, _ = model_iv3(images)
  File "/home/chen/local/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
    result = self.forward(*input, **kwargs)
  File "/home/chen/local/anaconda3/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/inception.py", line 78, in forward
  File "/home/chen/local/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
    result = self.forward(*input, **kwargs)
  File "/home/chen/local/anaconda3/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/inception.py", line 325, in forward
  File "/home/chen/local/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
    result = self.forward(*input, **kwargs)
  File "/home/chen/local/anaconda3/lib/python3.6/site-packages/torch/nn/modules/conv.py", line 301, in forward
    self.padding, self.dilation, self.groups)
RuntimeError: Expected object of type torch.FloatTensor but found type torch.cuda.FloatTensor for argument #2 'weight'

i tried to modify model_iv3 to model_iv3.cuda() or add torch.set_default_tensor_type(‘torch.cuda.FloatTensor’), but it does not work. any idea?

You have to assign the images and labels when you push them onto the GPU:

images = images.to(device)
labels = labels.to(device)

Could you change it and try it again?

that works. :smiley: how silly mistake i made.
thanks!