Multi-Target not Supported (newbie too)

I’m running into a problem very similar to what everyone else is running into.

I have an output as follows (my batch_size is 4).

tensor([[-1.4665, -1.2378, -1.3317, -1.5361],
        [-1.4172, -1.2957, -1.3590, -1.4828],
        [-1.3997, -1.3388, -1.2520, -1.5835],
        [-1.4352, -1.2890, -1.2904, -1.5549]], grad_fn=<LogSoftmaxBackward>)

My tensor was one-hot encoded but I changed it (there are four classes, but I encoded them all into one single column (size 4 x 1), with values ranging from 0 to 3).

tensor([[0],
        [0],
        [0],
        [0]])
torch.Size([4, 1])

This is the stack trace:

Traceback (most recent call last):
  File "trade_probability_training_script.py", line 313, in <module>
    model = train_model(test_and_training_np_array, r_results_np_array)
  File "trade_probability_training_script.py", line 189, in train_model
    loss = nn.CrossEntropyLoss()(output, labels)
  File "/home/csumpter/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
    result = self.forward(*input, **kwargs)
  File "/home/csumpter/anaconda3/lib/python3.6/site-packages/torch/nn/modules/loss.py", line 862, in forward
    ignore_index=self.ignore_index, reduction=self.reduction)
  File "/home/csumpter/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 1550, in cross_entropy
    return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
  File "/home/csumpter/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 1407, in nll_loss
    return torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: multi-target not supported at /pytorch/aten/src/THNN/generic/ClassNLLCriterion.c:21

This is the code:

	training_features, testing_features, training_results, testing_results = train_test_split(initial_data_set, initial_results_dataset, test_size=0.33, random_state=42)
	training_features_np = torch.from_numpy(training_features).float()
	training_results_np = torch.from_numpy(training_results).long()
	train_data_tensor_dataset = torch.utils.data.TensorDataset(training_features_np, training_results_np)
	#train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)

	testing_features_np = torch.from_numpy(testing_features).float()
	testing_results_np = torch.from_numpy(testing_results).long()
	test_data_tensor_dataset = torch.utils.data.TensorDataset(testing_features_np, testing_results_np)
	#train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)

	#pytorch_testing_features = Dataset(testing_features, testing_results)
	valid_dataloader = torch.utils.data.DataLoader(test_data_tensor_dataset, batch_size = 4, shuffle=True)
	#pytorch_training_features = Dataset(training_features, training_results)
	train_dataloader = torch.utils.data.DataLoader(train_data_tensor_dataset, batch_size = 4, shuffle=True)

	model = nn.Sequential(OrderedDict([
	                                       ('fc1', nn.Linear(631, 400)),
	                                        ('drop1', nn.Dropout(0.5)),
	                                       ('relu1', nn.ReLU(inplace=True)),

	                                       ('fc2', nn.Linear(400, 200)),
	                                        ('drop2', nn.Dropout(0.5)),
	                                       ('relu2', nn.ReLU(inplace=True)),

											('fc3', nn.Linear(200, 100)),
											('drop3', nn.Dropout(0.5)),
											('relu3', nn.ReLU(inplace=True)),

											('fc4', nn.Linear(100, 50)),
											('drop4', nn.Dropout(0.5)),
											('relu4', nn.ReLU(inplace=True)),

											('fc5', nn.Linear(50, 25)),
											('drop5', nn.Dropout(0.5)),
											('relu5', nn.ReLU(inplace=True)),

	                                        ('fc6', nn.Linear(25, 4)),
	                                        ('output', nn.LogSoftmax(dim=1))]))
	model.to(ext_gpu_2)

	#criterion = nn.NLLLoss()
	criterion = nn.CrossEntropyLoss()


	for e in range(epochs):

	    running_loss = 0
	    
	    #rint("Test Point 3: " + str(epochs))

	    for inputs, labels in train_dataloader:

	        steps += 1
	        #rint("Test Point 4")
	        #print(ext_gpu_2)
	        inputs, labels = inputs.to(ext_gpu_2), labels.to(ext_gpu_2)
	        optimizer.zero_grad()
	        print(inputs)
	        print(inputs.size)
	        #inputs.view(1, -1)
	        output = model.forward(inputs)
	        #labels.view(1,-1)
	        print(labels)
	        print(output)
	        print(labels.shape)
	        #labels.squeeze_(1)
	        print(labels)
	        print(labels.shape)
	        labels = torch.autograd.Variable(labels)
	        output = torch.autograd.Variable(output)
	        print(labels[0])
	        print(output[0])
	        #loss = criterion(output, labels)
	        loss = nn.CrossEntropyLoss()(output, labels)
	        loss.backward()
	        optimizer.step()
	        #rint("Test Point 5")
	        running_loss += loss.item()

	        if steps % print_every == 0:

	            print("Epoch: {}/{}".format(e+1, epochs), "Loss: {:.4f}".format(running_loss/print_every))
	            running_loss = 0
	            
	    with torch.no_grad():
	        test_loss, accuracy = validation(model, valid_dataloader, criterion, ext_gpu)

	    print("Epoch: {}/{}".format(e+1, epochs), "Validation Test Loss: {:.3f} .. ".format(test_loss/len(valid_dataloader)),
	    "Validation Test Accuracy: {:.3f}".format(accuracy/len(valid_dataloader)))

Any help would be highly appreciated.

The target should have the shape [batch_size], i.e. without the additional dim1.
Just call labels = labels.squeeze(1) and try to run your code again.

4 Likes

Did anybody tell you you were the man today? Because if not, there you go. Happy Friday.

4 Likes