Pytorch tensor size error in training loop

I have the code:

train_dataset, test_dataset = dataset[:1200], dataset[1200:]

# Create dataloader for training and test dataset.
train_loader = DataLoader(train_dataset,
                         batch_size=2,
                         shuffle=True)
test_loader = DataLoader(test_dataset,
                         batch_size=2,
                         shuffle=False)

# define network, optimizer, etc.
# ...

def train():
    model.train()
    for data in train_loader:
        data_cuda = data.to(device)
        optimizer.zero_grad()
        out = model(data_cuda)[0]
        y = data_cuda.y.type(torch.long)
        loss = CrossEntropyLoss(out, y, reduction=None)
        optimizer.step()

def test(test_loader, epoch):
    correct = []
    for data in test_loader:
        model.eval()
        data_cuda = data.to(device)
        pred = model(data_cuda)[0]
        correct += [1 if pred.eq(data_cuda.y).item() else 0]
    acc = sum(correct) / len(data)
    print('Epoch, ',epoch, 'Accuracy: {:.4f}'.format(acc))

for epoch in range(1, 101):
    train()
    test(test_loader, epoch)

and it throws the error:

---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-11-038c1ecd2a6f> in <module>
      1 for epoch in range(1, 101):
----> 2     train()
      3     test(test_loader, epoch)

<ipython-input-9-d72ada213a98> in train()
      1 def train():
      2     model.train()
----> 3     for data in train_loader:
      4         data_cuda = data.to(device)
      5         optimizer.zero_grad()

~/anaconda3/envs/py38/lib/python3.8/site-packages/torch/utils/data/dataloader.py in __next__(self)
    343 
    344     def __next__(self):
--> 345         data = self._next_data()
    346         self._num_yielded += 1
    347         if self._dataset_kind == _DatasetKind.Iterable and \

~/anaconda3/envs/py38/lib/python3.8/site-packages/torch/utils/data/dataloader.py in _next_data(self)
    383     def _next_data(self):
    384         index = self._next_index()  # may raise StopIteration
--> 385         data = self._dataset_fetcher.fetch(index)  # may raise StopIteration
    386         if self._pin_memory:
    387             data = _utils.pin_memory.pin_memory(data)

~/anaconda3/envs/py38/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
     45         else:
     46             data = self.dataset[possibly_batched_index]
---> 47         return self.collate_fn(data)

~/anaconda3/envs/py38/lib/python3.8/site-packages/torch_geometric/data/dataloader.py in <lambda>(batch)
     45         super(DataLoader,
     46               self).__init__(dataset, batch_size, shuffle,
---> 47                              collate_fn=lambda batch: collate(batch), **kwargs)
     48 
     49 

~/anaconda3/envs/py38/lib/python3.8/site-packages/torch_geometric/data/dataloader.py in collate(batch)
     24             elem = batch[0]
     25             if isinstance(elem, Data):
---> 26                 return Batch.from_data_list(batch, follow_batch)
     27             elif isinstance(elem, torch.Tensor):
     28                 return default_collate(batch)

~/anaconda3/envs/py38/lib/python3.8/site-packages/torch_geometric/data/batch.py in from_data_list(data_list, follow_batch)
     48                     item = item + cumsum[key]
     49                 if torch.is_tensor(item):
---> 50                     size = item.size(data.__cat_dim__(key, data[key]))
     51                 else:
     52                     size = 1

IndexError: dimension specified as 0 but tensor has no dimensions

The dataset is a list of dataset objects that I split between train and test sets (working with graph data).