Same parameters in different models

import concurrent.futures
import torch
import torch.nn as nn
import torch.optim as optimizer
from torch.distributions import Categorical

class mymodel1(nn.Module):
    def __init__(self):
        super(mymodel1,self).__init__()
        self.weight = nn.Linear(3,2)
        
    def forward(self, X):
        out = self.weight(X)
        out = nn.Softmax(dim = 0)(out)
        return out

class mymodel2(nn.Module):
    def __init__(self):
        super(mymodel2,self).__init__()
        self.weight = nn.Linear(3,2)
        
    def forward(self, X):
        out = self.weight(X)
        out = nn.Softmax(dim = 0)(out)
        return out


class mymodel3(nn.Module):
    def __init__(self):
        super(mymodel3,self).__init__()
        self.weight = nn.Linear(3,2)
        
    def forward(self, X):
        out = self.weight(X)
        out = nn.Softmax(dim = 0)(out)
        return out


        
def doTrain(model, X):     
    a1 = model()
    return list(a1.parameters())

X = torch.randn(12,3)


updatedParams = []
results = []


with concurrent.futures.ProcessPoolExecutor() as executor:
    f1 = executor.submit(doTrain, mymodel1, X[0*4:(0+1)*4])
    f2 = executor.submit(doTrain, mymodel2, X[1*4:(1+1)*4])
    f3 = executor.submit(doTrain, mymodel3, X[2*4:(2+1)*4])
    

print(f1.result())
print(f2.result())
print(f3.result())

Output

[Parameter containing:
tensor([[-0.3413, -0.4291,  0.0850],
        [-0.4270, -0.4523, -0.3700]], requires_grad=True), Parameter containing:
tensor([0.5327, 0.2588], requires_grad=True)]
[Parameter containing:
tensor([[-0.3413, -0.4291,  0.0850],
        [-0.4270, -0.4523, -0.3700]], requires_grad=True), Parameter containing:
tensor([0.5327, 0.2588], requires_grad=True)]
[Parameter containing:
tensor([[-0.3413, -0.4291,  0.0850],
        [-0.4270, -0.4523, -0.3700]], requires_grad=True), Parameter containing:
tensor([0.5327, 0.2588], requires_grad=True)]

Can somebody tell me why I am getting same parameters returned from the different processes although the models are different ?

This might be because the per-process RNG are initialized with the same seed by default? Can you try if manually changing seed using manual_seed solves the problem?

Thanks,
I used
torch.manual_seed(num)
with a different value for num in the init method of each of the models and that did it.

1 Like