When executing the following in a Jupyter notebook (below) it always give the error
RuntimeError Traceback (most recent call last)
in ()
1 epochs=10
2 for epoch in range(1, epochs + 1):
----> 3 train(model, device, train_loader, optimizer, epoch)
4 test(model, device, test_loader)
in train(model, device, train_loader, optimizer, epoch)
59 optimizer.zero_grad()
60 output = model(data)
—> 61 loss = F.nll_loss(output, target)
62 loss.backward()
63 optimizer.step()
~/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
1788 .format(input.size(0), target.size(0)))
1789 if dim == 2:
-> 1790 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
1791 elif dim == 4:
1792 ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 ‘target’
Jupyter Notebook
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from torch.utils.data import DataLoader, Dataset
import torch.utils.data as data_utils
dtype = torch.float
#device = torch.device("cpu")
device = torch.device("cuda" )
# Prepare data
wdata = np.load("data.npy")
data1=torch.from_numpy(wdata)
X1 = data1[:,:4]
Y1 = data1[:,4:]
U1 = data1[:,:4]
V1 = data1[:,4:]
X=X1.type(torch.FloatTensor)
Y=Y1.type(torch.FloatTensor)
U=U1.type(torch.FloatTensor)
V=V1.type(torch.FloatTensor)
train = data_utils.TensorDataset(X, Y)
train_loader = data_utils.DataLoader(train, batch_size=128, shuffle=True)
test = data_utils.TensorDataset(U, V)
test_loader= data_utils.DataLoader(test, batch_size=4, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(4,512)
self.drp1 = nn.Dropout(0.2)
self.fc2 = nn.Linear(512, 512)
self.drp2 = nn.Dropout(0.2)
self.fc3 = nn.Linear(512, 4)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = Net().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-4)
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
epochs=10
for epoch in range(1, epochs + 1):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
However, when executing
input=s.to(device)
out = model(input)
print(out)
I get
tensor([[-0.0278, 0.0300, 0.0424, -0.1008]], device=‘cuda:0’, grad_fn=<AddmmBackward>)
Data.npy is 8 columns x 2203 Rows where
X = first 4 and Y= last four