Pytorch: How to initialize weights?

Hi!
I have some conv layers like bellow:

self.conv_phi= nn.Sequential(
            nn.Conv3d(block_size*block_size,int(np.ceil(SR*block_size*block_size)), kernel_size=(1,1,1))   
        )
self.conv_p1= nn.Sequential(
            nn.Conv3d(int(np.ceil(SR*block_size*block_size)),block_size*block_size, kernel_size=(1,1,1)),
            nn.ReLU()
        )
self.conv_p2= nn.Sequential(
            nn.Conv3d(block_size*block_size,block_size*block_size, kernel_size=(1,1,1)),
            nn.ReLU()
        )
self.conv_p3= nn.Sequential(
            nn.Conv3d(block_size*block_size,block_size*block_size, kernel_size=(1,1,1))
        )

I want to initialize weights of the convolutional layers by normal distribution and different standard deviation.
I searched and found this code:

def weights_init(m):

    if isinstance(m, nn.Conv3d) :

      m.weight.data.normal_(0.0,0.001)

but how could I set different standard deviation for each conv layer?

You can use below

def normal_init(m, mean, std):
    if isinstance(m, (nn.Linear, nn.Conv2d)):
        m.weight.data.normal_(mean, std)
        if m.bias.data is not None:
            m.bias.data.zero_()
    elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
        m.weight.data.fill_(1)
        if m.bias.data is not None:
            m.bias.data.zero_()

in the network class

 self.weight_init()
 def weight_init(self):
        for block in self._modules:
            try:
                for m in self._modules[block]:
                    normal_init(m,mean,std)
            except:
                normal_init(block)

Thanks for your reply.
Could you mind explaining it?
What is block refers to?

self.weight_init()
 def weight_init(self):
        for block in self._modules:
            try:
                for m in self._modules[block]:
                    normal_init(m,mean,std)
            except:
                normal_init(block)

sorry for half reply. Basically that block goes to the each layer or module in the network and replaces it’s weights. You can write a if condition to skip any thing you want,

class VAE(nn.Module):
    """Encoder-Decoder architecture for both WAE-MMD and WAE-GAN."""
    def __init__(self, z_dim=32, nc=3):
        super(VAE, self).__init__()
        self.z_dim = z_dim
        self.nc = nc
        self.encoder = nn.Sequential(
            nn.Conv2d(nc, 128, 4, 2, 1, bias=False),              # B,  128, 32, 32
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            nn.Conv2d(128, 256, 4, 2, 1, bias=False),             # B,  256, 16, 16
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            nn.Conv2d(256, 512, 4, 2, 1, bias=False),             # B,  512,  8,  8
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.Conv2d(512, 1024, 4, 2, 1, bias=False),            # B, 1024,  4,  4
            nn.BatchNorm2d(1024),
            nn.ReLU(True),
            View((-1, 1024*2*2)),                                 # B, 1024*4*4
        )

        self.fc_mu = nn.Linear(1024*2*2, z_dim)                            # B, z_dim
        self.fc_logvar = nn.Linear(1024*2*2, z_dim)                            # B, z_dim
        self.decoder = nn.Sequential(
            nn.Linear(z_dim, 1024*4*4),                           # B, 1024*8*8
            View((-1, 1024, 4, 4)),                               # B, 1024,  8,  8
            nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias=False),   # B,  512, 16, 16
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),    # B,  256, 32, 32
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),    # B,  128, 64, 64
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            nn.ConvTranspose2d(128, nc, 1),                       # B,   nc, 64, 64
        )
        self.weight_init()

    def weight_init(self):
        for block in self._modules:
            try:
                for m in self._modules[block]:
                    normal_init(m)
            except:
                normal_init(block)

    def forward(self, x):
        z = self._encode(x)
        mu, logvar = self.fc_mu(z), self.fc_logvar(z)
        z = self.reparameterize(mu, logvar)
        x_recon = self._decode(z)

        return x_recon, z, mu, logvar

    def reparameterize(self, mu, logvar):
        stds = (0.5 * logvar).exp()
        epsilon = torch.randn(*mu.size())
        if mu.is_cuda:
            stds, epsilon = stds.cuda(), epsilon.cuda()
        latents = epsilon * stds + mu
        return latents

    def _encode(self, x):
        return self.encoder(x)

    def _decode(self, z):