AttributeError: 'list' object has no attribute 'dim'

Hi there,

Could anyone help me resolve this issue of mine?

from __future__ import division

import igraph as ig
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data

from demo.graphcc import *


class SDNE(nn.Module):
    def __init__(self, encode_dim, N, batch_size=128):
        super(SDNE, self).__init__()
        self.encode_dim = encode_dim
        self.vectors = {}
        self.N = N
        self.batch_size = batch_size
        if encode_dim == 4:
            self.linear1 = nn.Linear(self.N, 5000)
            self.linear2 = nn.Linear(5000, 1000)
            self.linear3 = nn.Linear(1000, 100)
            self.linear4 = nn.Linear(100, 1000)
            self.linear5 = nn.Linear(1000, 5000)
            self.linear6 = nn.Linear(5000, self.N)
        if encode_dim == 3:
            self.linear1 = nn.Linear(self.N, 1000)
            self.linear2 = nn.Linear(1000, 100)
            self.linear3 = nn.Linear(100, 1000)
            self.linear4 = nn.Linear(1000, self.N)

    def forward(self, x):
        if self.encode_dim == 3:
            emb = nn.Sigmoid(self.linear1(x))
            emb = nn.Sigmoid(self.linear2(emb))
            recon = nn.Sigmoid(self.linear3(emb))
            recon = nn.Sigmoid(self.linear4(recon))
        else:
            emb = nn.Sigmoid(self.linear1(x))
            emb = nn.Sigmoid(self.linear2(emb))
            emb = nn.Sigmoid(self.linear3(emb))
            recon = nn.Sigmoid(self.linear4(emb))
            recon = nn.Sigmoid(self.linear5(recon))
            recon = nn.Sigmoid(self.linear6(recon))

        # returning embedding embedding layer and reconstruction output
        return emb, recon

This is the error that obtained
β€˜β€™β€™
Traceback (most recent call last):
File β€œ/anaconda/envs/py35/lib/python3.5/site-packages/IPython/core/interactiveshell.py”, line 2963, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File β€œβ€, line 1, in
embedding_final = train(model, optimizer, weighted_adj)
File β€œβ€, line 98, in train
emb_batch, recon_batch = m(data)
File β€œ/anaconda/envs/py35/lib/python3.5/site-packages/torch/nn/modules/module.py”, line 489, in call
result = self.forward(*input, **kwargs)
File β€œβ€, line 52, in forward
emb = nn.Sigmoid(self.linear1(x))
File β€œ/anaconda/envs/py35/lib/python3.5/site-packages/torch/nn/modules/module.py”, line 489, in call
result = self.forward(*input, **kwargs)
File β€œ/anaconda/envs/py35/lib/python3.5/site-packages/torch/nn/modules/linear.py”, line 67, in forward
return F.linear(input, self.weight, self.bias)
File β€œ/anaconda/envs/py35/lib/python3.5/site-packages/torch/nn/functional.py”, line 1350, in linear
if input.dim() == 2 and bias is not None:
AttributeError: β€˜list’ object has no attribute β€˜dim’
β€˜β€™β€™

Cheers,

XZ

You should not instantiate new nn.Sigmoid classes every time, but instead create one object, e.g. self.sigmoid = nn.Sigmoid() and then call it as emb = self.sigmoid(self.linear1(x)), and so on for each layer in your network.

This will call the forward() function of this sigmoid object rather than the constructor of the class nn.Sigmoid.

1 Like

Thank you for your kind reply. I have changed my code to the following as instructed, but still obtained the same error.

class SDNE(nn.Module):
    def __init__(self, encode_dim, N, batch_size=128):
        super(SDNE, self).__init__()
        self.encode_dim = encode_dim
        self.vectors = {}
        self.N = N
        self.batch_size = batch_size
        self.sigmoid = nn.Sigmoid()
        self.linear1 = nn.Linear(self.N, 5000)
        self.linear2 = nn.Linear(5000, 1000)

        self.linear3 = nn.Linear(self.N, 1000)
        self.linear4 = nn.Linear(1000, 100)
        self.linear5 = nn.Linear(100, 1000)
        self.linear6 = nn.Linear(1000, self.N)

        self.linear7 = nn.Linear(1000, 5000)
        self.linear8 = nn.Linear(5000, self.N)

    def forward(self, x):
        if self.encode_dim == 3:
            emb = self.sigmoid(self.linear3(x))
            emb = self.sigmoid(self.linear4(emb))
            recon = self.sigmoid(self.linear5(emb))
            recon = self.sigmoid(self.linear6(recon))
        else:
            emb = self.sigmoid(self.linear1(x))
            emb = self.sigmoid(self.linear2(emb))
            emb = self.sigmoid(self.linear4(emb))
            recon = self.sigmoid(self.linear5(emb))
            recon = self.sigmoid(self.linear7(recon))
            recon = self.sigmoid(self.linear8(recon))

        # returning embedding embedding layer and reconstruction output
        return emb, recon

Your code seems to work on my machine after you’ve fixed the issue as suggested by @alex.veuthey.

model = SDNE(1, 1, 1)
model(torch.randn(1, 1))

yields two valid tensors.

1 Like

It works now. Thank you very much!

Best,

XZ

I got AttributeError: β€˜list’ object has no attribute β€˜dim’ from this. My input for the LSTM is a list because the input supposed to be a time series input. But that creates a problem which I still I can’t seem to figure it out.

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import numpy as np
from tqdm import tqdm

inputs = [torch.randn(1,1,3) for _ in range(1)]



batch_size = 1
hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3))


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        
        self.lstm = nn.LSTM(3, 3, 1) #input_dim,output_dim and number of layer/depth 
        
        self.fc1 = nn.Linear(3,5)
        self.fc2 = nn.Linear(5,1)
        self.fc3 = nn.Linear(len(inputs),2)
        
    def RNN(self, inputs, hidden):
        
        for i in inputs:
            out, hidden = lstm(i.view(1, 1, -1), hidden)
            
        #out, hidden = lstm(inputs, hidden)
        
        #print(out,hidden)
        return out, hidden
        
    def forward(self, out):

        x = torch.tanh(self.fc1(out))
        x = torch.tanh(self.fc2(x))
        x = self.fc3(x.view(-1,len(inputs)))

        return x
        
    
        
net = Net()

output = net(inputs)
print(output)

nn.LSTM expects the input in the shape [seq_len, batch_size, features], so you could concatenate the tensors in the seq_len dimension before passing it to the model.
inputs = torch.cat(inputs, dim=0) should work.