Problems during data loading procedure

Hello, I’m trying to load cifar datasets using class dataset(
Here is my code

import numpy as np
import os
from glob import glob
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

train_paths = glob('dataset/cifar/train/*.png')
test_paths = glob('dataset/cifar/test/*.png')
kwargs = {'num_workers':1, 'pin_memory' : True } if use_cuda else {}
def getlabel(path):
    return path.split('_')[-1].replace('.png', '')
label_names = [getlabel(path) for path in train_paths]
classes = np.unique(label_names)
def get_label(path):
    lbl_name =  path.split('_')[-1].replace('.png', '')
    label = np.argmax(classes == lbl_name) 
    return label

class Dataset(
    def __init__(self, data_paths, transform=None):
        self.data_paths = data_paths
        self.transform = transform

    def __len__(self):
        return len(self.data_paths)

    def __getitem__(self, idx):
        path = self.data_paths[idx]
        image =
        label = get_label(path)

        if self.transform:
            image = self.transform(image)
            return image, label

train_loader =, transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor(),
                                                                                    transforms.Normalize(mean = [0.406], std=[0.225])])), batch_size = 64,
                                           shuffle = True, **kwargs)
test_loader =, transforms.Compose([transforms.ToTensor(),
                                                                                    transforms.Normalize(mean = [0.406], std=[0.225])])), batch_size = 64,
                                           shuffle = True, **kwargs)
image, label = next(iter(train_loader))

However, error message
’ An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.

    This probably means that you are not using fork to start your
    child processes and you have forgotten to use the proper idiom
    in the main module:

        if __name__ == '__main__':

    The "freeze_support()" line can be omitted if the program
    is not going to be frozen to produce an executable.'

comes out.
Is there any way to solve this problem?
I am looking forward to see an help. Thanks in advance.
Kind regards,
Yoon Ho

Have you tried to add the if-clause protection as given in the error message?
If so, did you receive the same error?