Cowardly refusing to serialize non-leaf

import torch
import torch.multiprocessing as mp
import time
import copy

def producer(queue, event):
    while True:
        for _ in range(10):
            a = torch.ones(2,2).float().cuda()
            idx = torch.ByteTensor([[0, 0], [0, 1]]).cuda()
            a.requires_grad = True
            b = 3 * a

            queue.put(b)
            queue.put(idx)
            event.wait()
            event.clear()
        time.sleep(1000)
        return

def consumer(queue, event):
    while True:
        for _ in range(10):
            idx = queue.get()
            temp = queue.get()
            print("CONSUMER ", idx)
            print("CONSUMER ", temp)
            del idx
            del temp
            event.set()


if __name__ == '__main__':
    mp.set_start_method('spawn')

    queue = mp.Queue()
    queue2 = mp.Queue()

    event = mp.Event()
    p = mp.Process(target=producer, args=(queue, event))
    c = mp.Process(target=consumer, args=(queue, event))
    p.start()
    c.start()

    time.sleep(10)

    p.join()
    c.join()

Hello, What’s is the problem with pushing the non-leaf variable b to the queue and retrieve it? Any work-around is welcome.

I get the error below:
RuntimeError: Cowardly refusing to serialize non-leaf tensor which requires_grad, since autograd does not support crossing process boundaries. If you just want to transfer the data, call detach() on the tensor before serializing (e.g., putting it on the queue).

2 Likes