I’m new to PyTorch/Torchvision and object detection, so I was working through the tutorial available here.
If I run the pre-built tutorial hosted on Colab, it goes through just fine. But if I download the notebook and run it locally, when running the training cell I get aBrokenPipeError: [Errno32] Broken pipe
error. In particular, the train_one_epoch()
function seems to be causing a problem. Here’s the error traceback:
---------------------------------------------------------------------------
BrokenPipeError Traceback (most recent call last)
<ipython-input-19-1ea96e94502e> in <module>
4 for epoch in range(num_epochs):
5 # train for one epoch, printing every 10 iterations
----> 6 train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
7 # update the learning rate
8 lr_scheduler.step()
~\OneDrive - Microsoft\PytorchLearning\engine.py in train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq)
24 lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
25
---> 26 for images, targets in metric_logger.log_every(data_loader, print_freq, header):
27 images = list(image.to(device) for image in images)
28 targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
~\OneDrive - Microsoft\PytorchLearning\utils.py in log_every(self, iterable, print_freq, header)
199 ])
200 MB = 1024.0 * 1024.0
--> 201 for obj in iterable:
202 data_time.update(time.time() - end)
203 yield obj
~\AppData\Local\Continuum\anaconda3\envs\noaa_vessels\lib\site-packages\torch\utils\data\dataloader.py in __iter__(self)
191
192 def __iter__(self):
--> 193 return _DataLoaderIter(self)
194
195 def __len__(self):
~\AppData\Local\Continuum\anaconda3\envs\noaa_vessels\lib\site-packages\torch\utils\data\dataloader.py in __init__(self, loader)
467 # before it starts, and __del__ tries to join but will get:
468 # AssertionError: can only join a started process.
--> 469 w.start()
470 self.index_queues.append(index_queue)
471 self.workers.append(w)
~\AppData\Local\Continuum\anaconda3\envs\noaa_vessels\lib\multiprocessing\process.py in start(self)
103 'daemonic processes are not allowed to have children'
104 _cleanup()
--> 105 self._popen = self._Popen(self)
106 self._sentinel = self._popen.sentinel
107 # Avoid a refcycle if the target function holds an indirect
~\AppData\Local\Continuum\anaconda3\envs\noaa_vessels\lib\multiprocessing\context.py in _Popen(process_obj)
221 @staticmethod
222 def _Popen(process_obj):
--> 223 return _default_context.get_context().Process._Popen(process_obj)
224
225 class DefaultContext(BaseContext):
~\AppData\Local\Continuum\anaconda3\envs\noaa_vessels\lib\multiprocessing\context.py in _Popen(process_obj)
320 def _Popen(process_obj):
321 from .popen_spawn_win32 import Popen
--> 322 return Popen(process_obj)
323
324 class SpawnContext(BaseContext):
~\AppData\Local\Continuum\anaconda3\envs\noaa_vessels\lib\multiprocessing\popen_spawn_win32.py in __init__(self, process_obj)
63 try:
64 reduction.dump(prep_data, to_child)
---> 65 reduction.dump(process_obj, to_child)
66 finally:
67 set_spawning_popen(None)
~\AppData\Local\Continuum\anaconda3\envs\noaa_vessels\lib\multiprocessing\reduction.py in dump(obj, file, protocol)
58 def dump(obj, file, protocol=None):
59 '''Replacement for pickle.dump() using ForkingPickler.'''
---> 60 ForkingPickler(file, protocol).dump(obj)
61
62 #
BrokenPipeError: [Errno 32] Broken pipe
Any pointers would be appreciated.