RuntimeError: expected type torch.FloatTensor but got torch.cuda.FloatTensor,

i just want to validate the model about acc , but there are some errors when i launch it .
main code:

num_classes = 1000


def _pre_proc(input_size):
    # Data augmentation and normalization for training
    # Just normalization for validation
    data_transforms = {
        'train': transforms.Compose([
            transforms.RandomResizedCrop(input_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val': transforms.Compose([
            transforms.Resize(input_size),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }

    return data_transforms

def val_model(model, dataloader):
    since=time.time()
    model.eval()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
     running_corrects = 0
    for inputs, labels in dataloader['val']:
        inputs = inputs.to(device)
        labels = labels.to(device)
        print(labels)
      

        with torch.set_grad_enabled(False):
             outputs = model(inputs)
             _, preds = torch.max(outputs, 1)
             running_corrects += torch.sum(preds == labels.data)
    
    time_elapsed = time.time() - since
    print('val complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    acc = running_corrects.double() / len(dataloader['val'].dataset)
    print("acc is : {:.3f}%".format(acc*100))

   


if __name__ == "__main__":
    image_datasets = {x : datasets.ImageFolder(os.path.join(data_dir, x), 
 _pre_proc(cfg.model.input_size)[x]) for x in ['val']}
    # print(image_datasets['val'].classes)
    dataloader = {x: torch.utils.data.DataLoader(
                                            image_datasets[x], 
                                            batch_size=64, 
                                            shuffle=True,
                                            num_workers=4) for x in ['val']}
    
    model = build_net('classfiy', cfg.model.input_size, cfg.model)
    model.init_model('weights\squeezenet.pth','weights\classfier.pth')
    val_model(model,dataloader)

the error info:

Traceback (most recent call last):
File “c:\Users\GJF.vscode\extensions\ms-python.python-2019.6.22090\pythonFiles\ptvsd_launcher.py”, line 43, in
main(ptvsdArgs)
File “c:\Users\GJF.vscode\extensions\ms-python.python-2019.6.22090\pythonFiles\lib\python\ptvsd_main_.py”, line 434, in main
run()
File “c:\Users\GJF.vscode\extensions\ms-python.python-2019.6.22090\pythonFiles\lib\python\ptvsd_main_.py”, line 312, in run_file
runpy.run_path(target, run_name=‘main’)
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\runpy.py”, line 263, in run_path
pkg_name=pkg_name, script_name=fname)
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\runpy.py”, line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\runpy.py”, line 85, in _run_code
exec(code, run_globals)
File “c:\Users\GJF\Dev\VSCode\Net\val.py”, line 81, in
val_model(model,dataloader)
File “c:\Users\GJF\Dev\VSCode\Net\val.py”, line 49, in val_model
for inputs, labels in dataloader[‘val’]:
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\site-packages\torch\utils\data\dataloader.py”, line 658, in process_next_batch
raise batch.exc_type(batch.exc_msg)
RuntimeError: Traceback (most recent call last):
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\site-packages\torch\utils\data\dataloader.py”, line 138, in worker_loop
samples = collate_fn([dataset[i] for i in batch_indices])
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\site-packages\torch\utils\data\dataloader.py”, line 138, in
samples = collate_fn([dataset[i] for i in batch_indices])
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\site-packages\torchvision\datasets\folder.py”, line 134, in getitem
sample = self.transform(sample)
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\site-packages\torchvision\transforms\transforms.py”, line 60, in call
img = t(img)
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\site-packages\torchvision\transforms\transforms.py”, line 163, in call
return F.normalize(tensor, self.mean, self.std, self.inplace)
File “C:\ProgramData\Miniconda3\envs\pytorch\lib\site-packages\torchvision\transforms\functional.py”, line 208, in normalize
tensor.sub
(mean[:, None, None]).div
(std[:, None, None])
RuntimeError: expected type torch.FloatTensor but got torch.cuda.FloatTensor

i see that the problem is F.normalize() ,so i just modified

'val': transforms.Compose([
            transforms.Resize(input_size),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),

to

'val': transforms.Compose([
            transforms.Resize(input_size),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
           # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),

then no problem. i cannot figure out

version :smile:

print(torch.version)
1.0.1

os:windows10

What is the question exactly?

sorry i dont describe it clearly, now i reedit this post,thank u

Hi,

Thank you for your edit it is clearer now. To be honest I can’t find why you have this error because your data should not already be on the GPU at the dataloader step and so tensor type required should be torch.FloatTensor.
That being said, I would suggest to try two things:
1: change

dataloader = {x: torch.utils.data.DataLoader(
                                            image_datasets[x], 
                                            batch_size=64, 
                                            shuffle=True,
                                            num_workers=4) for x in ['val']}

to

dataloader = {x: torch.utils.data.DataLoader(
                                            image_datasets[x], 
                                            batch_size=64, 
                                            shuffle=True) for x in ['val']}

(back to default value of num_worker) because maybe this is what makes the preprocessing happen on GPU.
2: Or try to give torch.cuda.FloatTensor values for the mean and std of the normalization like this:

transforms.Normalize(torch.tensor([0.485, 0.456, 0.406]).to(device), torch.tensor([0.229, 0.224, 0.225]).to(device))

Final solution would be to code the normalization as a first step of your forward in the network.

Hope one of those will work!

thanks for your replying !!!
actually i have found some problems in C:\ProgramData\Miniconda3\envs\pytorch\lib\site-packages\torchvision\transforms\functional.py

def normalize(tensor, mean, std, inplace=False):
    """Normalize a tensor image with mean and standard deviation.

    .. note::
        This transform acts out of place by default, i.e., it does not mutates the input tensor.

    See :class:`~torchvision.transforms.Normalize` for more details.

    Args:
        tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channely.

    Returns:
        Tensor: Normalized Tensor image.
    """
    if not _is_tensor_image(tensor):
        raise TypeError('tensor is not a torch image.')

    if not inplace:
        tensor = tensor.clone()

    mean = torch.tensor(mean, dtype=torch.float32)
    std = torch.tensor(std, dtype=torch.float32)
    tensor.sub_(mean[:, None, None]).div_(std[:, None, None])
    return tensor
  > mean = torch.tensor(mean, dtype=torch.float32)
  >  std = torch.tensor(std, dtype=torch.float32)  

  this two lines code seem not to work, it should have set "mean, std" to torch.float32 but its is_cuda_type is true! .

my solution is:
the first step :comments transforms.Normalize()

'val': transforms.Compose([
            transforms.Resize(input_size),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            #transforms.Normalize(mean, std, inplace=False)
        ]),

the second step:do Normalize

for inputs, labels in dataloader['val']:
        inputs = inputs.to(device)
       
        **inputs.sub_(mean[:, None, None]).div_(std[:, None, None])**

        labels = labels.to(device)

that’s ok!