I have two codes
import torch
import multiprocessing
def worker(tensor):
tensor += 1
print(f"Worker tensor: {tensor}")
if __name__ == '__main__':
tensor = torch.zeros(3)
p = multiprocessing.Process(target=worker, args=(tensor,))
p.start()
p.join()
print(f"Main tensor: {tensor}")
import torch
import multiprocessing
def worker(tensor):
tensor += 1
print(f"Worker tensor: {tensor}")
if __name__ == '__main__':
tensor = torch.zeros(3)
tensor.share_memory_()
p = multiprocessing.Process(target=worker, args=(tensor,))
p.start()
p.join()
print(f"Main tensor: {tensor}")
Only difference between two is the use of tensor.share_memory_(). My output is same in both cases
Worker tensor: tensor([1., 1., 1.])
Main tensor: tensor([1., 1., 1.])
Can anyone explain if tensors are shared directly, why do we even need shared memory?