I am trying to set up a FFNN for my external data. It has a shape of 256x11 (plus 1 for label) and I am splitting it in 171x11 for training and 85x11 for testing.
I am getting following error while executing:
Traceback (most recent call last):
File "C:\Users\MyMachine\PycharmProjects\MaProject\My_FFNN_Ex.py", line 159, in <module>
images = images.reshape(-1, input_size).to(device)
RuntimeError: shape '[-1, 1881]' is invalid for input of size 1100
<torch.utils.data.dataloader._SingleProcessDataLoaderIter object at 0x0000022D302AA940>
torch.Size([100, 11]) torch.Size([100])
How can I solve the problem?
My Code:
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
# total length: 256* (11 + Label)
df_ip = pd.read_csv('./data_lstm.csv')
class FeatureDatasetTrain(Dataset):
def __init__(self, file_name):
df_input = pd.read_csv('./data_lstm.csv')
# Set 2/3 of the dataset as training-data --> 171*11
x = df_input.values[0:round(df_input.shape[0]/3*2), :]
# Set label-column, based on value in "my_col"
for j in range(df_input.shape[0]):
tmp_float = df_input.iloc[j]['my_col']
if tmp_float < 500.0:
df_input.at[j, 'label'] = 1
elif tmp_float >= 500.0 and tmp_float < 600.0:
df_input.at[j, 'label'] = 2
else:
df_input.at[j, 'label'] = 3
# Set labels as y --> 171*1
y = df_input.iloc[0:round((df_input.shape[0]/3*2)), -1].values
sc = StandardScaler()
x_train = sc.fit_transform(x)
y_train = y
self.X_train = torch.tensor(x_train, dtype=torch.float32)
self.y_train = torch.tensor(y_train)
def __len__(self):
return len(self.y_train)
def __getitem__(self, idx):
return self.X_train[idx], self.y_train[idx]
# Do the same for Test-data, but use the last 1/3 of the dataset --> 85*11
class FeatureDatasetTest(Dataset):
def __init__(self, file_name):
df_input = pd.read_csv('./data_lstm.csv')
x = df_input.values[(round(df_input.shape[0] / 3 * 2)+1):, :]
for j in range(df_input.shape[0]):
tmp_float = df_input.iloc[j]['my_col']
if tmp_float < 500.0:
df_input.at[j, 'label'] = 1
elif tmp_float >= 500.0 and tmp_float < 600.0:
df_input.at[j, 'label'] = 2
else:
df_input.at[j, 'label'] = 3
# --> 85*1
y = df_input.iloc[(round(df_input.shape[0] / 3 * 2))+1:, -1].values
sc = StandardScaler()
x_test = sc.fit_transform(x)
y_test = y
self.X_test = torch.tensor(x_test, dtype=torch.float32)
self.y_test = torch.tensor(y_test)
def __len__(self):
return len(self.y_test)
def __getitem__(self, idx):
return self.X_test[idx], self.y_test[idx]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-Parameter
input_size = 616
hidden_size = 9
num_classes = 3 # Anzahl der Label
num_epochs = 10
batch_size = 100
learning_rate = 0.001
train_dataset = FeatureDatasetTrain('./data_lstm.csv')
test_dataset = FeatureDatasetTest('./data_lstm.csv')
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle= True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle= False)
examples = iter(train_loader)
print(examples)
samples, labels = examples.next()
print(samples.shape, labels.shape)
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.input_size = input_size
self.l1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.l2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
return out
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
n_total_steps = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
#images = images.to(device)
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
# forward
outputs = model(images)
loss = nn.NLLLoss()
# backward
optimizer.zero_grad()
optimizer.step()
if (i+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{n_total_steps}], Loss: {loss.item():.4f}')
# test
with torch.no_grad():
n_correct = 0
n_samples = 0
for images, labels in test_loader:
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
n_samples += labels.size(0)
n_correct += (predicted == labels).sum().item()
acc = 100.0 * n_correct / n_samples
print(f'Accuracy of the network on the 10000 test images: {acc} %')