import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
import numpy as np
import random
from matplotlib import pyplot as plt
ins = []
outs = []
for i in np.linspace(0,10,10):
outs.append(10*(np.sin(i))+random.random())
ins.append(i)
def criterion(out, label):
return (label - out)**2
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(1,5)
self.fc2 = nn.Linear(5,5)
self.fc3 = nn.Linear(5,5)
self.fc4 = nn.Linear(5,5)
self.fc5 = nn.Linear(5,1)
def forward(self, x):
x = self.fc1(x).relu()
x = self.fc2(x).relu()
x = self.fc3(x).relu()
x = self.fc4(x).relu()
x = self.fc5(x)
return x
losses=[]
net = Net()#.cuda()
#optimizer = optim.SGD(net.parameters(), lr=(1.0e-3)*5,momentum = 0.5)
optimizer = optim.Adam(net.parameters(), lr=(1.0e-3))
#optimizer = optim.RMSprop(net.parameters(), lr=(1.0e-3),weight_decay = 0)
for epoch in range(4000):
net.zero_grad()
X = torch.Tensor(ins).type(torch.FloatTensor)
Y = torch.Tensor(outs).type(torch.FloatTensor)
out = net(X.view(-1,1)).type(torch.FloatTensor)
#print(out,X.view(-1,1))
print(Y)
loss = F.nll_loss(out, Y)
print(float(loss))
loss.backward()
optimizer.step()
The error i get is
RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target' in call to _thnn_nll_loss_forward
nll_loss should be able to take float right?
Any guidance is appreciated