DataLoader changes dataset dimension, "CrossEntropyLoss" not work properly

I found that the dimensions of the data set processed by “DataLoader” cannot be “CrossEntropyLoss” is used.How can I modify to make this xor feedforward network work properly?

I also modified the data set according to “https://discuss.pytorch.org/t/1d-segmentation-multi-target-not-supported/63584”, but it still doesn’t work.

dataset(xor_dataset ) to list:
[(tensor([0., 0.]), tensor([0])), (tensor([0., 1.]), tensor([1])), (tensor([1., 0.]), tensor([1])), (tensor([1., 1.]), tensor([0]))] 

datasetloader(train_loader) to list result:
[[tensor([[0., 0.]]), tensor([[0]])], [tensor([[0., 1.]]), tensor([[1]])], [tensor([[1., 0.]]), tensor([[1]])], [tensor([[1., 1.]]), tensor([[0]])]]

The following code output error:
RuntimeError: 1D target tensor d, multi-target not supported.

import torch
import torch.nn as nn
from torch.utils.data import IterableDataset, DataLoader, TensorDataset

from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss 

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(2,3,True)
        self.fc2 = nn.Linear(3,1,True)

    def forward(self, x):
        x= torch.sigmoid(self.fc1(x))
        x = self.fc2(x)
        return x 

class XorDataset(IterableDataset):
    def __init__(self):
        super(XorDataset).__init__()
        self.inputs = torch.Tensor([
            [0,0],
            [0,1],
            [1,0],
            [1,1]
        ])
        self.targets = torch.tensor([0,1,1,0], dtype=torch.int64).view(-1,1)

    def __iter__(self):
        result = (i for i in zip(self.inputs, self.targets))
        return result

def get_data_loaders():
    xor_dataset = XorDataset()
    train_loader = val_looader = xor_dataloader = DataLoader(xor_dataset)
    return train_loader, val_looader


if __name__ == "__main__":
    net = Net()
    train_loader, val_loader = get_data_loaders()
    optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
    criterion = nn.CrossEntropyLoss()

    trainer = create_supervised_trainer(net, optimizer, criterion)
    
    def thresholded_output_transform(output):
        y_pred, y = output
        y_pred = torch.round(y_pred)
        return y_pred, y

    val_metrics = {
        "accuracy": Accuracy(thresholded_output_transform),
        "nll": Loss(criterion)
    }
    
    evaluator = create_supervised_evaluator(net, metrics=val_metrics)

    @trainer.on(Events.ITERATION_COMPLETED(every=5000))
    def log_training_loss(trainer):
        print("Epoch[{}] Loss: {:.4f}".format(trainer.state.epoch, trainer.state.output))


    trainer.run(train_loader, max_epochs=20000)

I guess the target tensor has two dimensions, while only the batch dimension is expected.
The two dimensions might be introduces by the view(-1, 1) op in:

self.targets = torch.tensor([0,1,1,0], dtype=torch.int64).view(-1,1)

You could either remove dim1 via:

loss = criterion(output, target.squeeze(1))

or try to use view(-1) instead (the view might not be necessary at all).