Simple time series Classification ERROR help pls

Hello, im having an hard time doing a simple time series classification using pytorch:

x = torch.randn(100, 5, requires_grad=true)
y = torch.empty(100, dtype=torch.long).random_(2)

trainBa = torch.utils.data.DataLoader(dataset=[x,y],
										  batch_size=10,
										  shuffle=true)
model = nn.Sequential(nn.Linear(100, 400),
                      nn.ReLU(),
                      nn.Linear(400, 1),
                      nn.LogSoftmax(dim=0)).to(device)


optimizer = optim.Adam(model.parameters(), lr=0.001)
for epoch in 1:10
        for (i_batch, (z,y)) in enumerate(trainBa)
            (z, y) = z.to(device), y.to(device)

            o = model(z)
			
            loss = nn.CrossEntropyLoss((o,2), y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
          
        end
      
    end

trainBa = torch.utils.data.DataLoader(dataset=[x,y], batch_size=10,shuffle=true)

i get error in:


RuntimeError('stack expects each tensor to be equal size, but got [100] at entry 0 and [100, 5] at entry 1',)
  File "/home/klop/.local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 435, in __next__
    data = self._next_data()
  File "/home/klop/.local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 475, in _next_data
    data = self._dataset_fetcher.fetch(index)  # may raise StopIteration
  File "/home/klop/.local/lib/python3.6/site-packages/torch/utils/data/_utils/fetch.py", line 47, in fetch
    return self.collate_fn(data)
  File "/home/klop/.local/lib/python3.6/site-packages/torch/utils/data/_utils/collate.py", line 55, in default_collate
    return torch.stack(batch, 0, out=out)

The error looks to be coming from the DataLoader due to the dataset setup. You could use it as below,

from torch.utils.data import TensorDataset, DataLoader

tr_dataset = TensorDataset(x, y)
trainBa = DataLoader(dataset=tr_dataset, batch_size=10, shuffle=True)

thank you.

im still getting an error though (on loss loss.backward() ):

x = torch.randn(100, 5, requires_grad=true)
y = torch.empty(100, dtype=torch.long).random_(2)

tr_dataset = torch.utils.data.TensorDataset(x, y)

trainBa = torch.utils.data.DataLoader(dataset=tr_dataset,batch_size=10,shuffle=true)

device = torch.device(ifelse( torch.cuda.is_available(), "cuda", "cpu"))

model = nn.Sequential(nn.Linear(5, 400),
                      nn.ReLU(),
                      nn.Linear(400, 1),
                      nn.LogSoftmax(dim=1)).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
losss = nn.NLLLoss() 
> 
> for epoch in 1:10
>     for (i_batch, (z,y)) in enumerate(trainBa)
>         (z, y) = z.to(device), y.to(device)
> 
>         o = model(z)
>         loss = losss(o, y)
> 
>         optimizer.zero_grad()
>         loss.backward()
>         optimizer.step()

RuntimeError(‘cuda runtime error (710) : device-side assert triggered at /pytorch/aten/src/THC/generic/THCTensorMath.cu:29’,)
File “/home/klop/.local/lib/python3.6/site-packages/torch/tensor.py”, line 221, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File “/home/klop/.local/lib/python3.6/site-packages/torch/autograd/init.py”, line 132, in backward
allow_unreachable=True) # allow_unreachable flag

i have checked the dimesion of o and target y:

(10,)
(10,)

without cuda i get:

in loss = losss(o, y)


IndexError('Target 1 is out of bounds.',)
  File "/home/klop/.local/lib/python3.6/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/klop/.local/lib/python3.6/site-packages/torch/nn/modules/loss.py", line 213, in forward
    return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
  File "/home/klop/.local/lib/python3.6/site-packages/torch/nn/functional.py", line 2264, in nll_loss
    ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

EDIT: fixed error:
nn.Linear(400, 1), -> nn.Linear(400,2) ,