RuntimeError: multi-target not supported at ..\aten\src\THNN/generic/ClassNLLCriterion.c:20

I’m trying to train an NN on a dataset containg 1d vectors of features.

class data_loader(Dataset):
    def __init__(self, data):
        data = np.array(data.values, dtype=float)
        self.len = data.shape[0]
        self.X = torch.from_numpy(data[:, 0:-1])
        self.y = torch.from_numpy(data[:, [-1]])
        
    def __getitem__(self, index):
        return self.X[index], self.y[index]
    
    def __len__(self):
        return self.len   
    
train_set = data_loader(real_train)
train_loader = DataLoader(dataset=train_set,
                          batch_size=64,
                          shuffle=True,
                          num_workers=0)
val_set = data_loader(real_val)
val_loader = DataLoader(dataset=val_set,
                        batch_size=64,
                        shuffle=False,
                        num_workers=0)

class genreNN(nn.Module):
    def __init__(self, n_in=26, n_hidden=128, n_out=8, dropout=0.4):
        super(genreNN, self).__init__()
        self.n_in = n_in
        self.n_hidden = n_hidden
        self.n_out = n_out
        self.dropout = dropout
        
        self.fc = nn.Linear(n_in, n_hidden)
        self.drop = nn.Dropout(dropout)
        self.out = nn.Linear(n_hidden, n_out)
    
    def forward(self, x):
        x = x.view(-1, self.n_in)
        sigmoid = nn.Sigmoid()
        x = sigmoid(self.fc(x))
        x = self.drop(x)
        x = F.log_softmax(self.out(x))
        return x
        
   
model = genreNN()
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)

def train(epoch, model, train_loader, optimizer, print_every=50, cuda=None):
    model.train()
    correct = 0
    for k, (data, target) in enumerate(train_loader):
#        if cuda:
#            data, target= data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data.float())
        
        pred = output.data.max(1)[1]
        correct += pred.eq(target.long().data).cpu().sum()
        acc = 100. * correct / len(train_loader.dataset)
        loss = F.nll_loss(output, target.long())
        loss.backward()
        optimizer.step()
        if k % print_every == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} Accuracy: {}'.format(
                epoch, k * len(data), len(train_loader.dataset),
                100. * k / len(train_loader), loss.data[0], acc))

def validate(loss_vec, acc_vec, model, val_loader, cuda=None):
    model.eval()
    val_loss, correct = 0, 0
    for data, target in val_loader:
#        if cuda:
#            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data.float())
        val_loss += F.nll_loss(output, target.long()).data[0]
        pred = output.data.max(1)[1] # get the index of the max log-probability
        correct += pred.eq(target.long().data).cpu().sum()

    val_loss /= len(val_loader)
    loss_vec.append(val_loss)

    acc = 100. * correct / len(val_loader.dataset)
    acc_vec.append(acc)

    print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        val_loss, correct, len(val_loader.dataset), acc))

epochs = 100
loss_vec=[]
acc_vec=[]
gpu_available = torch.cuda.is_available()
for e in range(epochs):
    train(e, model, train_loader, optimizer, cuda=gpu_available)
    validate(loss_vec, acc_vec, model, val_loader, cuda=gpu_available)

I believe the loss function in train() is causing this error but I don’t know how to fix this

Could you print the shape of your target tensor?
Based on your model architecture, if should be a LongTensor with the shape [batch_size] containing class indices in the range [0, n_out-1].