Access the weights of neural network after each epoch

Hi I have a autoregressive neural network. While training my network I want to see weight and biases in every epoch. How to access them?

"class Neural_Made(nn.Module):
def init(self,n,m):
super(Neural_Made,self).init()
self.n=n
self.m=m
self.in_size=nmmmm
self.register_buffer(‘Mask1’,torch.ones([self.in_size]*2))
self.register_buffer(‘Mask2’,torch.eye(self.in_size))
self.Mask1=torch.tril(self.Mask1)
self.Mask2=torch.sub(self.Mask1,self.Mask2)

    self.fc1=nn.Linear(self.in_size,self.in_size)
    nn.init.xavier_uniform_(self.fc1.weight)
    self.fc1.weight.data=torch.mul(self.fc1.weight.data,self.Mask2)
    print('weights1',self.fc1.weight.data)

    self.fc2=nn.Linear(self.in_size,self.in_size)
    nn.init.xavier_uniform_(self.fc2.weight)
    self.fc2.weight.data=torch.mul(self.fc2.weight.data,self.Mask1)
    print('weights2',self.fc2.weight.data)

    self.out=nn.Linear(self.in_size,self.in_size)
    nn.init.xavier_uniform_(self.out.weight)
    self.out.weight.data=torch.mul(self.out.weight.data,self.Mask1)
    print('weights3',self.out.weight.data)

    self.prelu=nn.PReLU()
    self.sig=nn.Sigmoid()

def forward(self,x):
    y=self.fc1(x)
    z=self.prelu(y)
    y1=self.fc2(z)
    z1=self.prelu(y1)
    x1=self.out(z1)
    x2=self.sig(x1)
    return x2"

Hi @Vaibhav_Chahar,

If you want the weighs/biases of your network, you can simply call,

params = [p for p in net.parameters()]

or if you want the layer info as well as dict works well,

params = dict(net.named_parameters())

If you’re calling at each epoch, you may need to deepcopy the parameters if you’re looking at the weight/biases across epochs.