Fix bias and weights of a layer

Hello,

Suppose I have the following network:

    class Net3(torch.nn.Module): # Initiate the network
        def __init__(self):
            super(Net3,self).__init__()
            self.fc1 = torch.nn.Linear(1,neurons,bias=True)
            self.fc2 = torch.nn.Linear(neurons,1,bias=False)
            self.relu = torch.nn.ReLU()
        
        def forward(self,x):
            x = self.relu(self.fc1(x))
            return self.fc2(x)
            
    model = Net3()

I have two questions:

  1. How can I set myself the weights and bias of the layer fc1 instead of random initialization?
  2. How can I prevent the model to optimize the weights and bias of the layer fc1?

For example neurons = 3,

with torch.no_grad():
    model.fc1.weight = torch.nn.Parameter(torch.tensor([[1.], [2.], [3.]]))
    model.fc1.bias = torch.nn.Parameter(torch.tensor([1., 2, 3]))
# the tensor shape you assign should match the model parameter itself
model.fc1.requires_grad_(False)
1 Like

Hi,

Thank you for your help. I changed my code as you described and I made sure that the shape were correct:

    class Net3(torch.nn.Module): # Initiate the network
        def __init__(self):
            super(Net3,self).__init__()
            self.fc1 = torch.nn.Linear(1,neurons,bias=True)
            self.fc1.weight.data = torch.ones(neurons).reshape(-1,1)
            self.fc1.bias.data = torch.tensor(np.concatenate((np.linspace(-1,1,neurons)[1:],[0])).reshape(1,-1)[0])
            self.fc1.requires_grad_(False)
            self.fc2 = torch.nn.Linear(neurons,1,bias=False)
            self.relu = torch.nn.ReLU()
        
        def forward(self,x):
            x = self.relu(self.fc1(x))
            return self.fc2(x)
            
    model = Net3()

Here are the the weights and bias of fc1:

torch.ones(neurons).reshape(-1,1)
Out[132]: 
tensor([[1.],
        [1.],
        [1.],
        [1.],
        [1.],
        [1.]])

torch.tensor(np.concatenate((np.linspace(-1,1,neurons)[1:],[0])).reshape(1,-1)[0])
Out[133]: 
tensor([-0.6000, -0.2000,  0.2000,  0.6000,  1.0000,  0.0000],
       dtype=torch.float64)

But I am still getting this error:


  File ".../00_auto_gradientMODEL3.py", line 170, in Interpol
    y_pred = model(inputs)

  File "/opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
    result = self.forward(*input, **kwargs)

  File ".../00_auto_gradientMODEL3.py", line 140, in forward
    x = self.relu(self.fc1(x))

  File "/opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
    result = self.forward(*input, **kwargs)

  File "/opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 87, in forward
    return F.linear(input, self.weight, self.bias)

  File "/opt/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py", line 1370, in linear
    ret = torch.addmm(bias, input, weight.t())

RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #2 'mat1' in call to _th_addmm

try this - self.fc1.bias.data = torch.tensor(np.concatenate((np.linspace(-1,1,neurons)[1:],[0])).reshape(1,-1)[0]).float()

1 Like

Perfect thank you very much !

Nit: Don’t use the .data attribute anymore, as it will be removed.
Instead wrap the code in a with torch.no_grad() block and assign the new nn.Parameter.

4 Likes