Error when using torch.multiprocessing module

Hi,
I have a problem with use torch.multiprocessing modules.

In my code,

import torch.multiprocessing as mp

class MultiProcessing:
     def run_inference(self):
          # Load detection model
          model = MyModel()
          ...
          processes = []
          for process_idx in range(2):
               p = mp.Process(target=self.do_detection, args=(model, ))
               p.start()  # Error here
               processes.append(p)
          for p in processes:
               p.join()

     def do_detection(self, model):
          vid_cap = cv2.VideoCapture("<VideoFile>")
          while True:
               # Read video
               _, img = vid_cap.read()
               # Input video frame to detection model
               pred = model(img)
               # Visualize detection result
               ...
if __name__ == '__main__':
     inference = MultiProcessing()
     inference.run_inference()

Error like below

THCudaCheck FAIL file=..\torch/csrc/generic/StorageSharing.cpp line=249 error=801 : operation not supported
Traceback (most recent call last):
  File "D:\anaconda3\envs\conda\lib\multiprocessing\popen_spawn_win32.py", line 89, in __init__
    reduction.dump(process_obj, to_child)
  File "D:\anaconda3\envs\conda\lib\multiprocessing\reduction.py", line 60, in dump
    ForkingPickler(file, protocol).dump(obj)
  File "D:\anaconda3\envs\conda\lib\site-packages\torch\multiprocessing\reductions.py", line 247, in reduce_tensor
    event_sync_required) = storage._share_cuda_()
RuntimeError: cuda runtime error (801) : operation not supported at ..\torch/csrc/generic/StorageSharing.cpp:249
Traceback (most recent call last):
  File "D:\anaconda3\envs\conda\lib\multiprocessing\spawn.py", line 115, in _main
    self = reduction.pickle.load(from_parent)
EOFError: Ran out of input

To solve this problem, I search many solutions.
Most solutions say set the num_worker=0 in dataloader, but i’m not using dataloader.

How can i solve this problem?