Expected object of type torch.cuda.FloatTensor but found type torch.FloatTensor for argument #2 'other'

I know there are many posts related to this error, but I still can’t seem to get my model to run. The error, Expected object of type torch.cuda.FloatTensor but found type torch.FloatTensor for argument #2 'other' always occurs it seems no matter where I put .cuda(). I have inserted the code below, and I would really appreciate if someone could point out where the type mismatch is happening. Thank you so much in advance for any help. Here is what I believe to be the relevant code.

class VGG(nn.Module):
    '''
    VGG model 
    '''
    def __init__(self, features): # features represents the layers array
        super(VGG, self).__init__()
        self.features = features
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(512,512),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(512, 512),
            nn.ReLU(True),
            nn.Linear(512, 10),
        )
         # Initialize weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                m.bias.data.zero_()


    def forward(self, x): # x is the image, we run x through the layers
        print("Running through features")
        x = self.features(x) # runs through all features, where each feature is a function
        print("Finsihed features, going to classifier")
        x = x.view(x.size(0), -1) 
        # after running through features, does sequential steps to finally classify
        x = self.classifier(x)
        return x


def make_layers(cfg, batch_norm=False):
   # print("Making layers!")
    layers = []
    # clearing the layers for next vgg model
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
            rlstm =RLSTM(v)
            rlstm.input_to_state = torch.nn.DataParallel(rlstm.input_to_state)
            rlstm.state_to_state = torch.nn.DataParallel(rlstm.state_to_state)
            rlstm=rlstm.cuda()
            layers+=[rlstm]

    return nn.Sequential(*layers)

class RLSTM(nn.Module):
    def __init__(self,ch):
       # torch.set_default_tensor_type('torch.cuda.FloatTensor')
        super(RLSTM,self).__init__()
        self.ch=ch
        self.input_to_state = torch.nn.Conv2d(self.ch,4*self.ch,kernel_size=(1,3),padding=(0,1)).cuda()
        self.state_to_state = torch.nn.Conv2d(self.ch,4*self.ch,kernel_size=(1,3),padding=(0,1)).cuda() # error is here: hidPrev is an array - not a valid number of input channel
      

    def forward(self, image):
      #  print("going in row forward")
        global current
        global _layer
        global isgates
        size = image.size()
        print("size: "+str(size))
        b = size[0]
        indvs = list(image.split(1,0)) # split up the batch into individual images
        #print(indvs[0].size())
        tensor_array = []
        for i in range(b):
            current = 0
            _layer = []
            isgates = []
            print(len(tensor_array))
            tensor_array.append(self.RowLSTM(indvs[i]))

        seq=tuple(tensor_array)
        trans = torch.cat(seq,0)
        print(trans.size())
        return trans.cuda() # trying to make floattensor error go away 
    def RowLSTM(self, image): 
    #    print("going in rowlstm")
        global current
        global _layer
        global isgates


        # input-to-state (K_is * x_i) : 3x1 convolution. generate 4h x n x n tensor. 4hxnxn tensor contains all i -> s info

    # the input to state convolution should only be computed one time 
        if current==0:
            n = image.size()[2]
            ch=image.size()[1]
           # input_to_state = torch.nn.Conv2d(ch,4*ch,kernel_size=(1,3),padding=(0,1))
          #  print("about to do convolution")
            isgates = self.splitIS(self.input_to_state(image)) # convolve, then split into gates (4 per row)

            cell=RowLSTMCell(0,torch.randn(ch,n,1),torch.randn(ch,n,1),torch.randn(ch,n,1),torch.randn(ch,n,1),torch.randn(ch,n,1),torch.randn(ch,n,1))
            # now have dummy, learnable variables for first row
            _layer.append(cell)
            print("layeres: "+str(len(_layer)))
        else:   
            Cell_prev = _layer[current-1] # access previous row
            hidPrev = Cell_prev.getHiddenState() 
            ch = image.size()[1] 
        #   print("about to apply conv1d")
           # state_to_state = torch.nn.Conv2d(ch,4*ch,kernel_size=(1,3),padding=(0,1)) # error is here: hidPrev is an array - not a valid number of input channel
        #   print("applied conv1d") 
            prevHid=Cell_prev.getHiddenState()
            ssgates = self.splitSS(self.state_to_state(prevHid.unsqueeze(0))) #need to unsqueeze (Ex: currently 16x5, need to make 1x16x5)
            gates = self.addGates(isgates,ssgates,current)
            # split gates
            ig, og, fg, gg = gates[0], gates[1], gates[2], gates[3] # into four, ADD SIGMOID!
            cell = RowLSTMCell(Cell_prev,ig,og,fg,gg,0,0)
            cell.compute()
            _layer.append(cell)
        # attempting to eliminate requirement of getting size

        #print(current)
        try:
            print("adding one to current")
            current+=1
            y=(isgates[0][0][1][current])
            
            return self.RowLSTM(image) #expecting floattensor, but gets cuda floattensor

        except Exception as error:
            print(error)
            concats=[]
            print(len(_layer))
            for cell in _layer:
                tensor=torch.unsqueeze(cell.h,0)
                concats.append(tensor)
            seq=tuple(concats)

            print("non catted tensor: "+str(tensor.size()))
            tense=torch.cat(seq,3)
            print("catted lstm tensor "+str(tense.size()))
            return tensor

Note: the code tries to run through the try block, but then the error is thrown.

If you show a fuller stack-trace, it’ll be easier to know where the error is generated from. Can you paste that?

Hi, thank you for your reply, I was actually able to find the solution to my problem. I was adding a floatTensor (cpu) to a cuda floatTensor (gpu) and this was causing errors. The error did not actually lie in this piece of code. :slight_smile:

1 Like

Traceback (most recent call last):
File “cam_new_30nov.py”, line 86, in
logit = net(img_variable)
File “/mnt/backup/medhani/pytorch/local/lib/python2.7/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)
File “/mnt/backup/medhani/pytorch/local/lib/python2.7/site-packages/torchvision/models/resnet.py”, line 139, in forward
x = self.conv1(x)
File “/mnt/backup/medhani/pytorch/local/lib/python2.7/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)
File “/mnt/backup/medhani/pytorch/local/lib/python2.7/site-packages/torch/nn/modules/conv.py”, line 301, in forward
self.padding, self.dilation, self.groups)
RuntimeError: Expected object of type torch.FloatTensor but found type torch.cuda.FloatTensor for argument #2 ‘weight’
I got the above error for the below code. Do you have any idea about it?

hook the feature extractor

features_blobs = []
def hook_feature(module, input, output):
features_blobs.append(output.data.cpu().numpy())

net._modules.get(finalconv_name).register_forward_hook(hook_feature)

print(net)

get the softmax weight

params = list(net.parameters())
weight_softmax = np.squeeze(params[-2].data.cpu().numpy())

def returnCAM(feature_conv, weight_softmax, class_idx):
# generate the class activation maps upsample to 256x256
size_upsample = (256, 256)
bz, nc, h, w = feature_conv.shape
output_cam = []
for idx in class_idx:
cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
print ‘A’
print(np.uint8(255 * cam_img))
cam_img = np.uint8(255 * cam_img)
output_cam.append(cv2.resize(cam_img, size_upsample))

return output_cam

normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
preprocess = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
normalize
])

#response = requests.get(IMG_URL)
img_pil = Image.open(“ADNI_002_S_0619_MR_MPR-R__GradWarp__N3__Scaled_2_Br_20081001115218896_S15145_I118678.png”)
#img_pil = Image.open(io.BytesIO(response.content))
img_pil.save(‘test.jpg’)

img_tensor = preprocess(img_pil)
img_variable = Variable(img_tensor.unsqueeze(0).cpu())
logit = net(img_variable)

download the imagenet category list

#classes = {int(key):value for (key, value)

in requests.get(LABELS_URL).json().items()}

dic = {0: ‘MCIs’, 1: ‘MCIc’ }
with open(‘label2.json’, ‘w’) as fp:
fp.write(json.dumps(dic, sort_keys=True, indent=4))

with open(‘label2.json’) as json_data:
d = json.load(json_data)
print(d)
#pdb.set_trace()
classes = {int(key):value for (key, value)

in requests.get(‘label2.json’).json().items()}

       in d.items()}

#pdb.set_trace()
h_x = F.softmax(logit, dim=1).data.squeeze()
probs, idx = h_x.sort(0, True)
probs = probs.numpy()
idx = idx.numpy()

output the prediction

#for i in range(0, 5):

print(’{:.3f} -> {}’.format(probs[i], classes[idx[i]]))

generate class activation mapping for the top1 prediction

CAMs = returnCAM(features_blobs[0], weight_softmax, [idx[0]])
print (idx[0])

render the CAM and output

Got an error

#print(‘output CAM.jpg for the top1 prediction: %s’%classes[idx[0]])
img = cv2.imread(‘test.jpg’)
height, width, _ = img.shape
heatmap = cv2.applyColorMap(cv2.resize(CAMs[0],(width, height)), cv2.COLORMAP_JET)
result = heatmap * 0.3 + img * 0.5
cv2.imwrite(‘CAM.jpg’, result)

#with open(‘label.json’) as json_data:

d = json.load(json_data)

print(d)

@rjsdebug: Im facing the same issue, could you please share more details where you have made the changes?