Same Weight initialisation over different iterations of exactly same parameter

I want to have the same random weight initialisations everytime I run this neural net. I have a Feed forward network defined this way :

class Net(nn.Module):
    def __init__(self,layers_sizes,drop_prob):
        #first layer size has to be n_feat,last has to be 2
        super(Net, self).__init__()
        n_layers = len(layers_sizes)-2
       
        self.linears = nn.ModuleList([nn.Sequential(nn.Linear(layers_sizes[i],layers_sizes[i+1]),nn.Dropout(drop_prob)) for i in range(n_layers)])
        self.last_linear = nn.Linear(layers_sizes[n_layers],layers_sizes[n_layers+1])
    def forward(self, x):
        for i, l in enumerate(self.linears):       
                x = l(x)
                x = F.relu(x)
        x = self.last_linear(x)
        return x 

And I initialize the model as :

vocab_size = 3589
class_weights = torch.FloatTensor([0.2,0.8])
criterion = nn.CrossEntropyLoss(weight=class_weights)
layers_array = [vocab_size,100,10,2]
model = Net(layers_array,0.3)
print(model)
learning_rate = 0.02
batch_size = 256
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) 

how can I replicate the results for different iterations of the same architecture by taking same weight initialization for every run of this architecture? Thanks in advance

You could create a function for initializing the weights of your model and apply it.
This might be a good starter:

def weights_init(m):
    if isinstance(m, nn.Linear):
    xavier(m.weight.data)
    xavier(m.bias.data)

model.apply(weight_init)

Note that cuDNN is not deterministic, so maybe you would like to deactivate it.