What is the benefit of torch.multiprocessing for shared memory?

I try to test the benefit of using torch.multiprocessing for shared memory. But when I use Queue from multiprocessing and torch.multiprocessing, there is nothing changes (processing time). Here is my test code:

import sys
import time

import torch
from torch.multiprocessing import Process
from torch.multiprocessing import Queue

# from multiprocessing import Process
# from multiprocessing import Queue


def torch_shared_mem_process(q):
    while True:
        data = q.get()
        if data is None:
            return
        print('Received data:', len(data), data)


def test_mem_share():
    q = Queue()
    p = Process(target=torch_shared_mem_process, args=(q,))
    p.start()

    def sample_data():
        return torch.zeros([100, 3, 1080, 1920], dtype=torch.float)

    data = sample_data()
    start = time.time()
    q.put(data)
    q.put(None)
    p.join()

    print(f'Finished sending tensor!')

    took_seconds = time.time() - start
    return took_seconds

def main():
    shared_memory = test_mem_share()
    print(f'Took {shared_memory:.1f} s with shared memory.')


if __name__ == '__main__':
    sys.exit(main())