Hello. I’m new to python and pytorch and I’m trying to transfer layers and weights of pre-trained model to another model for regression.

These two models have same structure and I want to transfer layers and weights of pre-trained model except last layer.

Please let me know how to do that.

here is my pre-trained model

class Net(torch.nn.Module):

def **init**(self, n_feature, n_hidden1, n_hidden2, n_hidden3, n_hidden4, n_hidden5, n_output):

super(Net, self).**init**()

self.hidden1 = torch.nn.Linear(n_feature, n_hidden1)

self.hidden2 = torch.nn.Linear(n_hidden1, n_hidden2)

self.hidden3 = torch.nn.Linear(n_hidden2, n_hidden3)

self.hidden4 = torch.nn.Linear(n_hidden3, n_hidden4)

self.hidden5 = torch.nn.Linear(n_hidden4, n_hidden5)

self.predict = torch.nn.Linear(n_hidden5, n_output)

```
def forward(self, x):
x = torch.relu(self.hidden1(x))
x = torch.relu(self.hidden2(x))
x = torch.relu(self.hidden3(x))
x = torch.relu(self.hidden4(x))
x = torch.relu(self.hidden5(x))
x = self.predict(x)
return x
```

net = Net(n_feature=4, n_hidden1=100, n_hidden2=80, n_hidden3=50, n_hidden4 = 35, n_hidden5 = 20, n_output=1)

print(net) # net architecture

optimizer = torch.optim.Adam(net.parameters(), lr=0.001)

loss_func = torch.nn.MSELoss() # this is for regression mean squared loss

for t in range(20000):

```
prediction = net(xs_train) # input x and predict based on x
loss = loss_func(prediction, ys_train)
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
```