Cuda out of memory with custom dataloader

I tried to write a custom dataloader for mnist where I want only items with specific labels, and when I try to run my model Cuda gives me out of memory errors after a couple of epochs. When I run my model with the standard MNIST dataloader, the program works fine. Any idea why this is happening?

class MNISTCustomDataset(Dataset):
    def __init__(self, numbers, transform=None, data_dir='./data/'):
        #Training Data
        f = open('./data/train-images-idx3-ubyte')
        loaded = np.fromfile(file=f, dtype=np.uint8)
    
        all_images = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float32) / 255    
    
    
        f = open('./data/train-labels-idx1-ubyte')
        loaded = np.fromfile(file=f, dtype=np.uint8)
        
        all_labels = loaded[8:].reshape((60000)).astype(np.int32)
        
        self.images = []
        self.labels = []
        for idx in range(0,len(all_images)):
            if all_labels[idx] in numbers:
                self.images.append(all_images[idx])
                self.labels.append(all_labels[idx])
        
        self.transform=transform
    
    def __getitem__(self, index):
        img   = self.images[index]
        label = self.labels[index]

        if self.transform is not None:
            img = self.transform(img)

        return img, label
    
    def __len__(self):
        return len(self.images)

def load_custom_mnist(bsize, numbers):
    dataset = MNISTCustomDataset(numbers, transform=transforms.Compose([
                               transforms.ToTensor(),
                           ]))
    loader  = DataLoader(dataset, batch_size=bsize, shuffle=True)
    
    return loader

Check the size of the model and the number of batches you use, maybe your GPU is too small in memory to support some models with a large batch size.

Could be unrelated, but try to close your files after loading the numpy arrays or use a context manager, which closes the files automatically.