Strange nan problem

I have built my RBF net, before training, I am testing it. But I have met a strange problem. Sometimes the output is normal, sometimes the output is all nan .
The following is the minimal script reproducing the problem.

import torch 
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn.functional as F

class Gaussian(nn.Module):
    
    def __init__(self,in_features,out_features):
        super(Gaussian,self).__init__()
        self.sigma=nn.Parameter(torch.Tensor(out_features))
        self.center=nn.Parameter(torch.Tensor(out_features,in_features))
        
    def forward(self,input):
        dist=self.center-input
        dist=torch.pow(dist,2)
        dist=torch.sum(dist,1)
        sigma=torch.pow(self.sigma,2)
        dist=-1/2*sigma*dist
        output=torch.exp(dist)
        return output

class RBF(nn.Module):
    def __init__(self,n_lin1,n_Gau1,n_lin2,n_Gau2,n_Soft):
        super(RBF,self).__init__()
        self.linear1 = nn.Linear(n_lin1,n_Gau1,bias=False)
        self.Gaussian1 = Gaussian(n_Gau1,n_lin2)
        self.linear2 = nn.Linear(n_lin2,n_Gau2,bias=False)
        self.Gaussian2 = Gaussian(n_Gau2,n_lin3)
        self.linear3 = nn.Linear(n_lin3,n_Gau3,bias=False)
        self.Gaussian3 = Gaussian(n_Gau3,n_Soft)
        self.Softmax = torch.nn.Softmax(0)
    def forward(self,x):
        x = self.linear1(x)
        x = self.Gaussian1(x)
        x = self.linear2(x)
        x = self.Gaussian2(x)
       
        x = self.Softmax (x)
        return x

def Weight_init(m):
    if isinstance(m,nn.Linear):
        nn.init.normal(m.weight)
        
import loadcents
cents=torch.Tensor(loadcents.loadcents()) 
import random    
def Gaussian_init(m):
    if isinstance(m, Gaussian):
        nn.init.normal(m.sigma,0,0.1)
        
        n_in=m.center.size()[1]
        n_out=m.center.size()[0]
        tmp=m.center.clone()
        nn.init.normal(tmp,0,0.3)

n_lin1=10
n_Gau1=10
n_lin2=10
n_Gau2=10
n_Soft=10

model=RBF(n_lin1,n_Gau1,n_lin2,n_Gau2,n_Soft)
model.apply(Weight_init)
model.apply(Gaussian_init)

x=Variable(torch.randn(10))
y=model(x)
print(y)

You never initialize your Gaussian.center so it might just be too large.

This is just the minimal script to reproduce the error. Actually I want to use some given Tensor( learned by KNN) to initialize the center. But I got “a leaf Variable that requires grad has been used in an in-place operation.” , then I learned from https://discuss.pytorch.org/t/in-place-operation-when-indexing-target-variable-with-bytetensor/15839, so I include tmp, but it doesn’t work

Use m.center.data.copy_(init_tensor) to initialize.