Hello,
I am very new to PyTorch and I am now trying to train my first neuronal net with my own data.
Sadly I get an error message I could not figure out a solution for.
It would be great if you could help me.
I am getting the following error:
ValueError Traceback (most recent call last)
in
13 for inputs, labels in training_loader:
14 outputs = model(inputs)
β> 15 loss = criterion(outputs, labels)
16
17 optimizer.zero_grad()~\Anaconda3\envs\opencv4\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
β> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)~\Anaconda3\envs\opencv4\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
940 def forward(self, input, target):
941 return F.cross_entropy(input, target, weight=self.weight,
β> 942 ignore_index=self.ignore_index, reduction=self.reduction)
943
944~\Anaconda3\envs\opencv4\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
2054 if size_average is not None or reduce is not None:
2055 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2056 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
2057
2058~\Anaconda3\envs\opencv4\lib\site-packages\torch\nn\functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
1879 if target.size()[1:] != input.size()[2:]:
1880 raise ValueError(βExpected target size {}, got {}β.format(
-> 1881 out_size, target.size()))
1882 input = input.contiguous().view(n, c, 1, -1)
1883 target = target.contiguous().view(n, 1, -1)ValueError: Expected target size (100, 4), got torch.Size([100])
This is my code:
import torch
import matplotlib.pyplot as plt
import numpy as np
import torch.nn.functional as F
from torch import nn
from torchvision import datasets, transforms
from torchvision import transforms
import pandas
class CsvDataset(torch.utils.data.Dataset):
def __init__(self, csv_file, transforms=None):
"""
Args:
csv_file (string): Path to csv file
transforms (callable, optional): Optional tranforms to be applied on a sample
"""
self.df = pandas.read_csv(csv_file, sep=';')
self.df = self.df.loc[:, ~self.df.columns.str.contains('^Unnamed')] #Delete last colum
self.transforms = transforms
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_tensor = torch.tensor(self.df.iloc[idx:idx+1, 4:].values).float()
lbl_to_idx = {
'z': 0,
'g': 1,
'e': 2,
'u': 3
}
lbl_val = self.df.iloc[idx:idx+1, 1:2].values
label = torch.tensor(lbl_to_idx[lbl_val[0,0]]).float()
sample = {'sample': sample_tensor, 'label': label}
if self.transforms: # Something Wrong??????????
sample = self.transform(sample)
return sample_tensor, label
def __len__(self):
return self.df.shape[0]
dataset = CsvDataset(csv_file='C:/data.csv')
# __Split Dataset__
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, validation_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
training_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=100, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=100, shuffle=False)
class Classifier(nn.Module):
def __init__(self, D_in, H1, H2, D_out):
super().__init__()
#Define neural net:
self.linear1 = nn.Linear(D_in, H1)
self.linear2 = nn.Linear(H1, H2)
self.linear3 = nn.Linear(H2, D_out)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return
model = Classifier(224, 125, 65, 4) #224 Inputs, 125 nodes in H1, 65 nodes in H2, 4 ouput classes
model
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
epochs = 15
running_loss_history = []
running_corrects_history = []
val_running_loss_history = [] #For Validation with validation dataset
val_running_corrects_history = []
for e in range(epochs):
running_loss = 0.0
running_corrects = 0.0
val_running_loss = 0.0
val_running_corrects = 0.0
for inputs, labels in training_loader:
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
_, preds = torch.max(outputs, 1)
running_loss += loss.item()
running_corrects += torch.sum(preds == labels.data)
else:
#Validation:
with torch.no_grad():
for val_inputs, val_labels in validation_loader:
val_outputs = model(val_inputs)
val_loss = criterion(val_outputs, val_labels)
_, val_preds = torch.max(val_outputs, 1)
val_running_loss += val_loss.item()
val_running_corrects += torch.sum(val_preds == val_labels.data)
epoch_loss = running_loss/len(training_loader)
epoch_acc = running_corrects.float()/ len(training_loader)
running_loss_history.append(epoch_loss)
running_corrects_history.append(epoch_acc)
val_epoch_loss = val_running_loss/len(validation_loader)
val_epoch_acc = val_running_corrects.float()/ len(validation_loader)
val_running_loss_history.append(val_epoch_loss)
val_running_corrects_history.append(val_epoch_acc)
print('epoch :', (e+1))
print('training loss: {:.4f}, acc {:.4f} '.format(epoch_loss, epoch_acc.item()))
print('validation loss: {:.4f}, validation acc {:.4f} '.format(val_epoch_loss, val_epoch_acc.item()))
Thank you very much!