[DataLoader] num_of_worker>0 make error

i use window env.

code1:
for ii , (X, y, y_weight) in enumerate(dataLoader[phase]): #for each of the batches
X = X.to(device) # [Nbatch, 3, H, W]
y_weight = y_weight.type(‘torch.FloatTensor’).to(device)
y = y.type(‘torch.LongTensor’).to(device)

error:

File “”, line 1, in
debugfile(‘C:/Users/mbmhm/Desktop/unet/train_unet.py’, wdir=‘C:/Users/mbmhm/Desktop/unet’)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\spyder_kernels\customize\spydercustomize.py”, line 856, in debugfile
debugger.run(“runfile(%r, args=%r, wdir=%r)” % (filename, args, wdir))

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\bdb.py”, line 585, in run
exec(cmd, globals, locals)

File “”, line 1, in

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\spyder_kernels\customize\spydercustomize.py”, line 827, in runfile
execfile(filename, namespace)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\spyder_kernels\customize\spydercustomize.py”, line 110, in execfile
exec(compile(f.read(), filename, ‘exec’), namespace)

File “c:/users/mbmhm/desktop/unet/train_unet.py”, line 267, in
for ii , (X, y, y_weight) in enumerate(dataLoader[phase]): #for each of the batches

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\torch\utils\data\dataloader.py”, line 193, in iter
return _DataLoaderIter(self)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\torch\utils\data\dataloader.py”, line 469, in init
w.start()

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\multiprocessing\process.py”, line 112, in start
self._popen = self._Popen(self)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\multiprocessing\context.py”, line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\multiprocessing\context.py”, line 322, in _Popen
return Popen(process_obj)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\multiprocessing\popen_spawn_win32.py”, line 89, in init
reduction.dump(process_obj, to_child)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\multiprocessing\reduction.py”, line 60, in dump
ForkingPickler(file, protocol).dump(obj)

File “stringsource”, line 2, in tables.hdf5extension.Array.reduce_cython

TypeError: self.dims,self.dims_chunk,self.maxdims cannot be converted to a Python object for pickling

code 2:
for x,y,w in dataLoader[‘train’]:
print(x.shape, y.shape, w.shape)

error:
File “”, line 1, in
debugfile(‘C:/Users/mbmhm/Desktop/unet/train_unet.py’, wdir=‘C:/Users/mbmhm/Desktop/unet’)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\spyder_kernels\customize\spydercustomize.py”, line 856, in debugfile
debugger.run(“runfile(%r, args=%r, wdir=%r)” % (filename, args, wdir))

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\bdb.py”, line 585, in run
exec(cmd, globals, locals)

File “”, line 1, in

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\spyder_kernels\customize\spydercustomize.py”, line 827, in runfile
execfile(filename, namespace)

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\spyder_kernels\customize\spydercustomize.py”, line 110, in execfile
exec(compile(f.read(), filename, ‘exec’), namespace)

File “c:/users/mbmhm/desktop/unet/train_unet.py”, line 200, in
for w, y, z in dataLoader[‘train’]:

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\torch\utils\data\dataloader.py”, line 576, in next
idx, batch = self._get_batch()

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\torch\utils\data\dataloader.py”, line 543, in _get_batch
success, data = self._try_get_batch()

File “C:\Users\mbmhm\ansel\Anaconda3\envs\moongpu\lib\site-packages\torch\utils\data\dataloader.py”, line 519, in _try_get_batch
raise RuntimeError(‘DataLoader worker (pid(s) {}) exited unexpectedly’.format(pids_str))

RuntimeError: DataLoader worker (pid(s) 7744, 4584) exited unexpectedly

Is this error related to this topic, this one and this?
If so, I would recommend to continue the discussion in one of the mentioned threads to avoid confusion.

Let me know, if these issues are related and where you would like to continue the discussion.

for ii , (X, y, y_weight) in enumerate(dataLoader[phase]): #for each of the batches
X = X.to(device) # [Nbatch, 3, H, W]
y_weight = y_weight.type(‘torch.FloatTensor’).to(device)
y = y.type(‘torch.LongTensor’).to(device)

error:
TypeError: self.dims,self.dims_chunk,self.maxdims cannot be converted to a Python object for pickling

====================================

for x,y,w in dataLoader[‘train’]:
print(x.shape, y.shape, w.shape)

error:
*RuntimeError: DataLoader worker (pid(s) 7744, 4584) exited unexpectedly

Could you post the line of code, which is raising this error:

TypeError: self.dims,self.dims_chunk,self.maxdims cannot be converted to a Python object for pickling

or alternatively the definition of your Dataset? I assume something is wrong in __getitem__.

it is my code

class Dataset(object):
def init(self, fname ,img_transform=None, mask_transform = None, edge_weight= False):
#nothing special here, just internalizing the constructor parameters
self.fname=fname
self.edge_weight = edge_weight

    self.img_transform=img_transform
    self.mask_transform = mask_transform
    
    self.tables=tables.open_file(self.fname)
    self.numpixels=self.tables.root.numpixels[:]
    self.nitems=self.tables.root.img.shape[0]
    self.tables.close()
    
    self.img = None
    self.mask = None
    
def __getitem__(self, index):
    #opening should be done in __init__ but seems to be
    #an issue with multithreading so doing here
    with tables.open_file(self.fname,'r') as db:
        self.img=db.root.img
        self.mask=db.root.mask
   
        #get the requested image and mask from the pytable
        img = self.img[index,:,:,:]
        mask = self.mask[index,:,:]
    
    #the original Unet paper assignes increased weights to the edges of the annotated objects
    #their method is more sophistocated, but this one is faster, we simply dilate the mask and 
    #highlight all the pixels which were "added"
    if(self.edge_weight):
        weight = scipy.ndimage.morphology.binary_dilation(mask==1, iterations =2) & ~mask
    else: #otherwise the edge weight is all ones and thus has no affect
        weight = np.ones(mask.shape,dtype=mask.dtype)
    
    mask = mask[:,:,None].repeat(3,axis=2) #in order to use the transformations given by torchvision
    weight = weight[:,:,None].repeat(3,axis=2) #inputs need to be 3D, so here we convert from 1d to 3d by repetition
    
    img_new = img
    mask_new = mask
    weight_new = weight
    
    seed = random.randrange(sys.maxsize) #get a random seed so that we can reproducibly do the transofrmations
    if self.img_transform is not None:
        random.seed(seed) # apply this seed to img transforms
        img_new = self.img_transform(img)

    if self.mask_transform is not None:
        random.seed(seed)
        mask_new = self.mask_transform(mask)
        mask_new = np.asarray(mask_new)[:,:,0].squeeze()
        
        random.seed(seed)
        weight_new = self.mask_transform(weight)
        weight_new = np.asarray(weight_new)[:,:,0].squeeze()

    return img_new, mask_new, weight_new
def __len__(self):
    return self.nitems