I am trying to build a simple NN to classify my data into 5 classes labeled as [1,2,3,4,5]. The inpput data (provided as X_t [for training] amd X_v[for test] is read from am instrument that with 15 pixels (X features = 15). I don’t get any error when running the code but I am suspicious something is not right because the loss converges to zero after 1 epoch and the accuracy for test set becomes 100%. Here is my code:
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, TensorDataset, DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
X_trn= torch.from_numpy(X_t).float()
y_trn = torch.from_numpy(y_t).float()
train_dataset = TensorDataset(X_trn, y_trn)
train_loader = DataLoader(train_dataset, batch_size=12, shuffle=True)
X_test = torch.from_numpy(X_v).float()
y_test = torch.from_numpy(y_v).float()
test_dataset = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_dataset, batch_size=12)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(15, 8)
self.fc2 = nn.Linear(8, 5)
def forward(self,x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
optimzier = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
criterion = nn.NLLLoss()
epochs = 10
for epoch in range(epochs):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data), Variable(target)
optimzier.zero_grad()
data = data.view(-1, 15)
net_out = net(data)
loss = criterion(net_out, torch.max(target, 1)[1])
loss.backward()
optimzier.step()
val_loss = 0
correct = 0
with torch.no_grad():
net.eval()
for data, target in test_loader:
data, target = Variable(data), Variable(target)
net_out = net(data)
val_loss += criterion(net_out, torch.max(target,1)[1])
pred = net_out.data.max(1)[1]
correct += pred.eq(torch.max(target,1)[1]).sum()
print(correct)