RuntimeError: Expected object of type torch.DoubleTensor but found type torch.cuda.FloatTensor

Hi, I am working on the implementation of a 3 layer conv net. I’ve been getting the error

Traceback (most recent call last):
File “trainingnet.py”, line 122, in
output_net = net.forward(inputs)
File “trainingnet.py”, line 79, in forward
x = self.conv1(x)
File “/home/drb12/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)
File “/home/drb12/anaconda3/lib/python3.6/site-packages/torch/nn/modules/conv.py”, line 301, in forward
self.padding, self.dilation, self.groups)
RuntimeError: Expected object of type torch.DoubleTensor but found type torch.cuda.FloatTensor for argument #2 ‘weight’

I’ve tried several ways of casting my net into Cuda, including casting each individual layers as a torch.cuda.Float and casting the entire net by net = net.cuda(0) but nothing seems to work to solve the type mismatch. This is my first use of pytorch aside from the tutorials so its possible I’m missing something. The code is below:

import scipy.io
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

c1 = 1
f1 = 9
f2 = 1
f3 = 5
n1 = 64
n2 = 32
n3 = 1
c2 = 1
pad1 = 0
pad2 = 0
pad3 = 0
stride1 = 1
stride2 = 1
stride3 = 1
epoch_g = 50

class ImageDataset(Dataset):
    def __init__(self):
        mat = scipy.io.loadmat('TrainingSet.mat')
        label_mat = mat['low_res']
        input_mat = mat['high_res']
        l = input_mat.shape[2]
        self.input = []
        self.label = []
        for i in range(l):
            img_in = input_mat[:,:,i]
            img_in = torch.from_numpy(img_in)
            img_in = img_in.unsqueeze(0)
            img_l  = label_mat[:,:,i]
            img_l = torch.from_numpy(img_l)
            img_l = img_l.unsqueeze(0)
            self.input.append(img_in)
            self.label.append(img_l)
        print(len(self.input))
        print(len(self.label))
        self.input
        self.label
        self.len = len(self.input)

    def __getitem__(self, index):
        return self.input[index], self.label[index]

    def __len__(self):
        return self.len

class Net(nn.Module):

    def __init__(self):
        dtype = torch.cuda.FloatTensor
        super(Net, self).__init__()
        # 1 input image channel, 6 output channels, 5x5 square convolution
        # kernel
        self.conv1 = nn.Conv2d(c1, n1, kernel_size = f1, padding = pad1, stride = stride1).type(dtype)
        self.conv2 = nn.Conv2d(n1, n2, kernel_size = f2, padding = pad2, stride = stride2).type(dtype)
        self.conv3 = nn.Conv2d(n2, c2, kernel_size = f3, padding = pad3, stride = stride3).type(dtype)
        self.relu = nn.ReLU(inplace = True)
        self.conv1.weight.data.normal_(0.0, 0.001)
        self.conv2.weight.data.normal_(0.0, 0.001)
        self.conv3.weight.data.normal_(0.0, 0.001)
        self.conv1.bias.data.fill_(0)
        self.conv2.bias.data.fill_(0)
        self.conv2.bias.data.fill_(0)


    def forward(self, x):
        x = self.conv1(x)
        x = self.relu(self.conv2(x))
        x = self.conv3(x)
        return x

    def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

def save_checkpoint(model, epoch):
    model_out_path = "checkpoint/" + "model_epoch_{}.pth".format(epoch)
    state = {"epoch": epoch ,"model": model}
    if not os.path.exists("checkpoint/"):
        os.makedirs("checkpoint/")
    torch.save(state, model_out_path)
    print("Checkpoint saved to {}".format(model_out_path))

if __name__ == '__main__':
    # Read Training Data Set From .MAT File
    dataset = ImageDataset()
    train_loader = DataLoader(dataset = dataset,
                              batch_size = 32,
                              shuffle = True,
                              num_workers = 1)
    net = Net()
    net = net.to(device)
    opt = optim.Adam([{'params': net.conv1.parameters(), 'lr': 1e-4}, {'params': net.conv2.parameters(),
        'lr': 1e-4}, {'params':net.conv3.parameters(), 'lr': 1e-5}])
    #opt = optim.Adam([{'params': net.conv1.weight, 'lr': 1}, {'params':net.conv1.bias, 'lr': 0.1},
     #   {'params': net.conv2.weight, 'lr': 1}, {'params':net.conv2.bias, 'lr': 0.1},
      #  {'params': net.conv3.weight, 'lr': 0.1}, {'params':net.conv3.bias, 'lr': 0.1}])
    loss = nn.MSELoss(size_average = True)
    loss_list = []
    loss_list_epoch = []
    accu_list_epoch = []
    accu_list =[]
    for epoch in range(epoch_g):
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            inputs, labels = Variable(inputs), Variable(labels)
            output_net = net.forward(inputs)
            output = loss(inputs, labels)
            net.zero_grad()
            output.backward()
            opt.step()
            loss_list.append(output)
            total =  h_t*w_t*32
            correct = (inputs == labels).sum().item()
            accu = correct/total
            accu_list.append(accu)
            print("Epoch[{}]({}/{}): Loss: {:.10f}, Accu: {:.10f}".format(epoch, i, l, output, accu*100))
        save_checkpoint(net, epoch)
        loss_list_epoch.append(loss_list)
        accu_list_epoch.append(accu_list)
        accu_list = []
        loss_list = []
    print(loss_list_epoch)
    print(accu_list_epoch)
    mean_accu_epoch = []
    mean_loss_epoch = []
    for epoch in range(epoch_g):
         print(epoch)
         mean_accu_epoch.append(np.mean(accu_list_epoch[epoch]))
    print(mean_accu_epoch)
    np.savetxt("Mean_Accu.csv", mean_accu_epoch, delimiter=",")

You have to allocate inputs in gpu
inputs, labels = Variable(inputs.float().cuda()), Variable(labels.cuda())
Consider that dtype should be the same (float usually), review ur labels dtype

1 Like