Replace activation functions in model

I have a model that uses ReLU activation functions. I would like to replace every ReLU activation function of that model with another activation function.

I tried to iterate over the model using model.named_children() and model.named_modules() to find and replace the activation functions. However, they do not appear. I can only access the linear layers. The softmax function does also not appaer in the output. What do I do wrong? How can I access and replace relu() and softmax() of my model?

Here is my code:

import torch
import torch.nn as nn
import torch.functional as F


class MLP(nn.Module):

    def __init__(self, num_in, num_hidden, num_out):
        super().__init__()

        self.linear1 = nn.Linear(num_in, num_hidden)
        self.linear2 = nn.Linear(num_hidden, num_hidden)
        self.linear3 = nn.Linear(num_hidden, num_out)

    def forward(self, x):
        x = torch.relu(self.linear1(x))
        x = torch.relu(self.linear2(x))
        x = F.softmax(self.linear3(x), dim=1)
        return x


def main():
    model = MLP(num_in=2, num_hidden=16, num_out=2)

    print("named_children()")
    for name, layer in model.named_children():
        print(name, layer)

    print("\nnamed_modules()")
    for name, layer in model.named_modules():
        print(name, layer)


if __name__ == "__main__":
    main()

which produces the following output

named_children()
linear1 Linear(in_features=2, out_features=16, bias=True)
linear2 Linear(in_features=16, out_features=16, bias=True)
linear3 Linear(in_features=16, out_features=2, bias=True)

named_modules()
 MLP(
  (linear1): Linear(in_features=2, out_features=16, bias=True)
  (linear2): Linear(in_features=16, out_features=16, bias=True)
  (linear3): Linear(in_features=16, out_features=2, bias=True)
)
linear1 Linear(in_features=2, out_features=16, bias=True)
linear2 Linear(in_features=16, out_features=16, bias=True)
linear3 Linear(in_features=16, out_features=2, bias=True)

do you mean something like this,

import torch
import torch.nn as nn
import torch.functional as F


class MLP(nn.Module):

    def __init__(self, num_in, num_hidden, num_out):
        super().__init__()

        self.linear1 = nn.Linear(num_in, num_hidden)
        self.linear2 = nn.Linear(num_hidden, num_hidden)
        self.linear3 = nn.Linear(num_hidden, num_out)
        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        x = self.relu(self.linear1(x))
        x = self.relu(self.linear2(x))
        x = self.softmax(self.linear3(x), dim=1)
        return x

which gives output for your print statements

named_children()
linear1 Linear(in_features=2, out_features=16, bias=True)
linear2 Linear(in_features=16, out_features=16, bias=True)
linear3 Linear(in_features=16, out_features=2, bias=True)
relu ReLU()
softmax Softmax(dim=1)

named_modules()
 MLP(
  (linear1): Linear(in_features=2, out_features=16, bias=True)
  (linear2): Linear(in_features=16, out_features=16, bias=True)
  (linear3): Linear(in_features=16, out_features=2, bias=True)
  (relu): ReLU()
  (softmax): Softmax(dim=1)
)
linear1 Linear(in_features=2, out_features=16, bias=True)
linear2 Linear(in_features=16, out_features=16, bias=True)
linear3 Linear(in_features=16, out_features=2, bias=True)
relu ReLU()
softmax Softmax(dim=1)

Ok, that seems to work. Now, however, the ReLU layer appears only once. Why is that? And how could I extract the layers using torch.relu() and F.softmax()?