There is a problem when I create my own dataset and using dataloader

I create Facelandmarks dataset following the here http://pytorch.org/tutorials/beginner/data_loading_tutorial.html ,the code is shown as below

from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import os,cv2
from skimage import io, transform
class FaceLandmarksDataset(Dataset):
    """Face Landmarks dataset."""

    def __init__(self, csv_file, root_dir, transform=None):
        """
        Args:
            csv_file (string): Path to the csv file with annotations.
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.landmarks_frame = pd.read_csv(csv_file)
        self.root_dir = root_dir
        self.transform = transform

    def __len__(self):
        return len(self.landmarks_frame)

    def __getitem__(self, idx):
        img_name = os.path.join(self.root_dir,
                                self.landmarks_frame.iloc[idx, 0])
        image = io.imread(img_name)
        landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix()
        landmarks = landmarks.astype('float').reshape(-1, 2)
        sample = {'image': image, 'landmarks': landmarks}

        if self.transform:
            sample = self.transform(sample)

        return sample

And get the instance

faceLandmarksDataset = FaceLandmarksDataset('data/faces/face_landmarks.csv', 'data/faces/')

when runing iter(dataloader).next(), an error occured:

RuntimeError                              Traceback (most recent call last)
<ipython-input-132-9d9c24b6d9a2> in <module>()
----> 1 iter(dataloader).next()

E:\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
    257         if self.num_workers == 0:  # same-process loading
    258             indices = next(self.sample_iter)  # may raise StopIteration
--> 259             batch = self.collate_fn([self.dataset[i] for i in indices])
    260             if self.pin_memory:
    261                 batch = pin_memory_batch(batch)

E:\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in default_collate(batch)
    130         return batch
    131     elif isinstance(batch[0], collections.Mapping):
--> 132         return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
    133     elif isinstance(batch[0], collections.Sequence):
    134         transposed = zip(*batch)

E:\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in <dictcomp>(.0)
    130         return batch
    131     elif isinstance(batch[0], collections.Mapping):
--> 132         return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
    133     elif isinstance(batch[0], collections.Sequence):
    134         transposed = zip(*batch)

E:\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in default_collate(batch)
    119                 raise TypeError(error_msg.format(elem.dtype))
    120 
--> 121             return torch.stack([torch.from_numpy(b) for b in batch], 0)
    122         if elem.shape == ():  # scalars
    123             py_type = float if elem.dtype.name.startswith('float') else int

E:\Anaconda3\lib\site-packages\torch\functional.py in stack(sequence, dim, out)
     62     inputs = [t.unsqueeze(dim) for t in sequence]
     63     if out is None:
---> 64         return torch.cat(inputs, dim)
     65     else:
     66         return torch.cat(inputs, dim, out=out)

RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 375 and 333 in dimension 1 at c:\anaconda2\conda-bld\pytorch_1519496000060\work\torch\lib\th\generic/THTensorMath.c:2897

Maybe there are some None values returned by __getitem__? e.g. if img_name does not exist, io.imread may silently return None, and None can not be collated.

You could try to print image and/or type(image) in your __getitem__ to see if there is anything unusual.