AttributeError: 'list' object has no attribute 'cuda'

Hello All,
I am getting an above error when I am calling

fit(siamese_train_loader, siamese_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval)

for

for batch_idx, (data, target) in enumerate(train_loader):
        target = target if len(target) > 0 else None
        if not type(data) in (tuple, list):
            data = (data,)
        if cuda:
            data = tuple(d.cuda() for d in data)
            if target is not None:
                print(target)
                target = target.cuda()

output of print list is as follows:

[tensor([[[[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.1943, -0.4279],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2509,  0.0435],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.2016, -0.2016, -0.0885],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2320,  0.2132,  0.2320],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.1943, -0.4279],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2509,  0.0435],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.2016, -0.2016, -0.0885],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2320,  0.2132,  0.2320],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.1943, -0.4279],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2509,  0.0435],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.2016, -0.2016, -0.0885],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2320,  0.2132,  0.2320],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]]],


        [[[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]]],


        [[[ 0.2697,  0.2697,  0.2697,  ..., -0.2393, -0.3148, -0.2959],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.1754,  0.1377,  0.1566],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.2582, -0.3713, -0.3336],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ..., -0.2393, -0.3148, -0.2959],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.1754,  0.1377,  0.1566],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.2582, -0.3713, -0.3336],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ..., -0.2393, -0.3148, -0.2959],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.1754,  0.1377,  0.1566],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.2582, -0.3713, -0.3336],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]]],


        ...,


        [[[ 0.0058, -0.0885,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [-0.3148, -0.5410,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.1639,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ..., -0.3148,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ..., -0.8615,  0.2132,  0.2697]],

         [[ 0.0058, -0.0885,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [-0.3148, -0.5410,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.1639,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ..., -0.3148,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ..., -0.8615,  0.2132,  0.2697]],

         [[ 0.0058, -0.0885,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [-0.3148, -0.5410,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ..., -0.1639,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ..., -0.3148,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ..., -0.8615,  0.2132,  0.2697]]],


        [[[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]]],


        [[[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]],

         [[ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          ...,
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697],
          [ 0.2697,  0.2697,  0.2697,  ...,  0.2697,  0.2697,  0.2697]]]]), 
tensor([24, 23, 12, 21,  7,  3, 24,  3, 14,  4, 10,  3, 11,  9, 15, 20, 14, 11,
         1, 11, 16,  2, 16, 26, 22, 10,  0, 17, 25,  0, 11, 11,  0,  5,  8, 13,
        20,  2, 26, 22, 19,  5,  6,  6, 10, 26,  2,  2,  1, 10, 11,  3, 17,  5,
        21,  6,  5, 19, 26,  1,  4,  5, 19, 12,  2, 25, 21, 18, 17,  4, 15, 20,
        24, 20,  5,  2, 23, 26, 11, 18, 14, 14,  0, 26, 20,  6, 19, 19, 12,  6,
        25, 21, 23, 18,  1, 17, 22, 18,  5,  1, 16, 15, 10,  5, 15,  2,  4, 19,
         8, 14, 19, 13,  1,  3, 19,  9, 11, 26, 10, 18, 21,  8, 13, 24, 15,  2,
        24,  2])]

I tried to convert this list into NumPy array but it gives me another error that
“ValueError: only one element tensors can be converted to Python scalars”

What is the solution for the above? Any suggestions are welcome.

Thanks in advance.

It looks like there is a custom dataloader implementation being used that doesn’t return target tensors but rather a list of tensors.
Do you want to convert the targets to a single tensor? You can likely just use stack or cat depending on whether you want a dimension to be added.

Otherwise, you can do something similar to what is already done for data (target = [t.cuda() for t in target]).

Thanks for the reply, after applying the solution and putting``(target = [t.cuda() for t in target] )`
it gives me another following error, what would be the reason for this?
shall I use cat/stack?
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [32, 3, 5, 5], but got 1-dimensional input of size [128] instead

It looks like you may be passing your target where you intend to pass the data (maybe double check the target and data are the shapes you expect).

I am using this neural network architecture.

File "/home/sharad/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/sharad/Few-shot-classification-siamese/siamese-triplet-master/networks.py", line 68, in forward
    output2 = self.embedding_net(x2)
  File "/home/sharad/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/sharad/Few-shot-classification-siamese/siamese-triplet-master/networks.py", line 21, in forward
    output = self.convnet(x)
  File "/home/sharad/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/sharad/miniconda3/lib/python3.8/site-packages/torch/nn/modules/container.py", line 119, in forward
   input = module(input)
  File "/home/sharad/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/sharad/miniconda3/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 399, in forward
    return self._conv_forward(input, self.weight, self.bias)
  File "/home/sharad/miniconda3/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 395, in _conv_forward
    return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [32, 3, 5, 5], but got 1-dimensional input of size [128] instead

according to errrorlog its causing problem in Siamesenet as I mentioned above.

import torch.nn as nn
import torch.nn.functional as F


class EmbeddingNet(nn.Module):
    def __init__(self):
        super(EmbeddingNet, self).__init__()
        self.convnet = nn.Sequential(nn.Conv2d(3, 32, 5), nn.PReLU(),
                                     nn.MaxPool2d(2, stride=2),
                                     nn.Conv2d(32, 64, 5), nn.PReLU(),
                                     nn.MaxPool2d(2, stride=2))

        self.fc = nn.Sequential(nn.Linear(64 * 61 * 61, 256),
                                nn.PReLU(),
                                nn.Linear(256, 256),
                                nn.PReLU(),
                                nn.Linear(256, 2)
                                )

    def forward(self, x):
        output = self.convnet(x)
        output = output.view(output.size()[0], -1)
        output = self.fc(output)
        return output

    def get_embedding(self, x):
        return self.forward(x)


class EmbeddingNetL2(EmbeddingNet):
    def __init__(self):
        super(EmbeddingNetL2, self).__init__()

    def forward(self, x):
        output = super(EmbeddingNetL2, self).forward(x)
        output /= output.pow(2).sum(1, keepdim=True).sqrt()
        return output

    def get_embedding(self, x):
        return self.forward(x)


class ClassificationNet(nn.Module):
    def __init__(self, embedding_net, n_classes):
        super(ClassificationNet, self).__init__()
        self.embedding_net = embedding_net
        self.n_classes = n_classes
        self.nonlinear = nn.PReLU()
        self.fc1 = nn.Linear(2, n_classes)

    def forward(self, x):
        output = self.embedding_net(x)
        output = self.nonlinear(output)
        scores = F.log_softmax(self.fc1(output), dim=-1)
        return scores

    def get_embedding(self, x):
        return self.nonlinear(self.embedding_net(x))


class SiameseNet(nn.Module):
    def __init__(self, embedding_net):
        super(SiameseNet, self).__init__()
        self.embedding_net = embedding_net

    def forward(self, x1, x2):
        output1 = self.embedding_net(x1)
        print(output1.shape)
        output2 = self.embedding_net(x2)
        print(output2.shape)
        return output1, output2

    def get_embedding(self, x):
        return self.embedding_net(x)


class TripletNet(nn.Module):
    def __init__(self, embedding_net):
        super(TripletNet, self).__init__()
        self.embedding_net = embedding_net

    def forward(self, x1, x2, x3):
        output1 = self.embedding_net(x1)
        output2 = self.embedding_net(x2)
        output3 = self.embedding_net(x3)
        return output1, output2, output3

    def get_embedding(self, x):
        return self.embedding_net(x)

I have tried to change the input size but still its giving me an error.