Hi,
I have constructed a neural network with as follows:
class Network(nn.Module):
class Network(nn.Module):
def __init__(self):
super().__init__()
self.fc1=nn.Linear(3840, 256)
self.fc2=nn.Linear(256,128)
self.fc3=nn.Linear(128,64)
self.fc4=nn.Linear(64,1)
def forward(self,x):
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=F.relu(self.fc3(x))
x=self.fc4(x)
return x
```
Here I expect the output to be just one value. However, I get the above error while calculating the crossentropyloss. When i checked my target , it has values ranging from 0 to 7 indicating the number of classes to be 8. I am providing a one dimensional vector as my target and converting it into tensor using
Variable(torch.FloatTensor(labels).long(),requires_grad=False)
However, when I changed the number of output nodes in my network to 8, my network is training properly.
Can anyone tell me why this problem is occuring?
My whole code is:
class Network(nn.Module):
def __init__(self):
super().__init__()
self.fc1=nn.Linear(3840, 256)
self.fc2=nn.Linear(256,128)
self.fc3=nn.Linear(128,64)
self.fc4=nn.Linear(64,1)
def forward(self,x):
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=F.relu(self.fc3(x))
x=self.fc4(x)
return x
data1 = pd.read_csv(“features_cnormal.csv”, index_col=0, header=0)
X = data1.values
target = pd.read_csv(“y.csv”)
y=target.values
model = Network()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr = 0.01)
scaler = preprocessing.StandardScaler()
Xs = scaler.fit_transform(X)
Xtrain, ytrain = Xs, y
XTrain = torch.from_numpy(Xtrain).type(torch.FloatTensor)
YTrain = torch.from_numpy(np.array(ytrain).reshape((-1, 1))).type(torch.FloatTensor)
train = torch.utils.data.TensorDataset(XTrain, YTrain)
train_loader = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)
#print(data for i, data in enumerate(train_loader))
epochs = 100
for e in range(epochs):
running_loss = 0
for i, data in enumerate(train_loader):
inputs, labels = data
X1, Y1 = Variable(torch.FloatTensor(inputs),requires_grad=False), \
Variable(torch.FloatTensor(labels).long(),requires_grad=False)
output = model(X1)
Y1=Y1.view(-1)
loss = criterion(output, Y1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(train_loader)}")