Iris example - AttributeError: 'numpy.ndarray' object has no attribute 'dim'

I am running a simple CNN for a classification task. There is something is wrong with the way I am reading the array data as a tensor. I am running into this error AttributeError: 'numpy.ndarray' object has no attribute 'dim. Any suggestions?

import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable


class Net(nn.Module):
    # define nn
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(4, 100)
        self.fc2 = nn.Linear(100, 100)
        self.fc3 = nn.Linear(100, 3)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, X):
        X = F.relu(self.fc1(X))
        X = self.fc2(X)
        X = self.fc3(X)
        X = self.softmax(X)

        return X
    
# load IRIS dataset
dataset = pd.read_csv('dataset/iris.csv')

# transform species to numerics
dataset.loc[dataset.species=='Iris-setosa', 'species'] = 0
dataset.loc[dataset.species=='Iris-versicolor', 'species'] = 1
dataset.loc[dataset.species=='Iris-virginica', 'species'] = 2


train_X, test_X, train_y, test_y = train_test_split(dataset[dataset.columns[0:4]].values,
                                                    dataset.species.values, test_size=0.8)


# wrap up with Variable in pytorch
#train_X = Variable(torch.Tensor(train_X).float())
#test_X = Variable(torch.Tensor(test_X).float())
#train_y = Variable(torch.Tensor(train_y).long())
#test_y = Variable(torch.Tensor(test_y).long())

train_X = np.float32(train_X)
test_X = np.float32(test_X)
train_y = np.int64(train_y)
test_y = np.int64(test_y)

net = Net()

criterion = nn.CrossEntropyLoss()# cross entropy loss

optimizer = torch.optim.SGD(net.parameters(), lr=0.01)

for epoch in range(1000):
    optimizer.zero_grad()
    out = net(train_X)
    loss = criterion(out, train_y)
    loss.backward()
    optimizer.step()
    
    if epoch % 100 == 0:
        print('number of epoch', epoch, 'loss', loss.data[0])

predict_out = net(test_X)
_, predict_y = torch.max(predict_out, 1)

print('prediction accuracy', accuracy_score(test_y.data, predict_y.data))

print('macro precision', precision_score(test_y.data, predict_y.data, average='macro'))
print('micro precision', precision_score(test_y.data, predict_y.data, average='micro'))
print('macro recall', recall_score(test_y.data, predict_y.data, average='macro'))
print('micro recall', recall_score(test_y.data, predict_y.data, average='micro'))

You are passing a numpy array into a torch model. You would have to transform train_x: torch.Tensor(train_x) for it to generate an output.

However, train_x here doesn’t seem to be your batch but the whole dataset right? In which case you may prefer to wrap it with the DataLoader() class: train_x_loader = torch.utils.data.DataLoader(train_x, batch_size=4, shuffle=True)

So you probably may want something like this:

import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader

class Net(nn.Module):
    # define nn
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(4, 100)
        self.fc2 = nn.Linear(100, 100)
        self.fc3 = nn.Linear(100, 3)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, X):
        X = F.relu(self.fc1(X))
        X = self.fc2(X)
        X = self.fc3(X)
        X = self.softmax(X)

        return X
    
# load IRIS dataset
dataset = pd.read_csv('https://raw.githubusercontent.com/yangzhangalmo/pytorch-iris/master/dataset/iris.csv')

# transform species to numerics
dataset.loc[dataset.species=='Iris-setosa', 'species'] = 0
dataset.loc[dataset.species=='Iris-versicolor', 'species'] = 1
dataset.loc[dataset.species=='Iris-virginica', 'species'] = 2


train_X, test_X, train_y, test_y = train_test_split(dataset[dataset.columns[0:4]].values,
                                                    dataset.species.values, test_size=0.8)

train_X = np.float32(train_X)
test_X = np.float32(test_X)
train_y = np.int64(train_y)
test_y = np.int64(test_y)

train_dataset = TensorDataset(torch.Tensor(train_X), torch.Tensor(train_y))
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)

net = Net()

criterion = nn.CrossEntropyLoss()# cross entropy loss

optimizer = torch.optim.SGD(net.parameters(), lr=0.01)

net.train()

for epoch in range(1000):
    
    for inputs, targets in train_loader:
        optimizer.zero_grad()
        out = net(inputs)
        loss = criterion(out, targets.long())
        loss.backward()
        optimizer.step()

    if epoch % 100 == 0:
        print('number of epoch', epoch, 'loss', loss.item())

predict_out = net(torch.Tensor(test_X))
_, predict_y = torch.max(predict_out, 1)

print('prediction accuracy', accuracy_score(test_y.data, predict_y.data))

print('macro precision', precision_score(test_y.data, predict_y.data, average='macro'))
print('micro precision', precision_score(test_y.data, predict_y.data, average='micro'))
print('macro recall', recall_score(test_y.data, predict_y.data, average='macro'))
print('micro recall', recall_score(test_y.data, predict_y.data, average='micro'))
1 Like