Does this work as intented

Hello!


class ActorCriticNet_Discrete(nn.Module):

    def __init__(self, num_inputs, num_actions, learning_rate, hidden_size,
                 number_of_layers, is_dual):
        super(ActorCriticNet_Discrete, self).__init__()

        self.actor_nn = None
        self.critic_nn = None
        self.optimizer = None
        if self.is_dual:
            self.actor_nn = nn.Sequential(
                nn.Linear(num_inputs, hidden_size), *[
                    nn.Linear(hidden_size, hidden_size)
                    for _ in range(number_of_layers - 1)
                ], nn.Linear(hidden_size, num_actions))
        
            self.critic_nn = nn.Sequential(
                nn.Linear(num_inputs, hidden_size), *[
                    nn.Linear(hidden_size, hidden_size)
                    for _ in range(number_of_layers - 1)
                ], nn.Linear(hidden_size, 1))
        
            self.optimizer = optim.Adam([{
                'params': self.actor_nn.parameters(),
                'lr': learning_rate
            }, {
                'params': self.critic_nn.parameters(),
                'lr': 0.001
            }])
        else:
          base_nn = nn.Sequential(
              nn.Linear(num_inputs, hidden_size), *[
                  nn.Linear(hidden_size, hidden_size)
                  for _ in range(number_of_layers - 1)
              ])
          
          self.actor_nn = nn.Sequential(base_nn,
                                        nn.Linear(hidden_size, num_actions))
          self.critic_nn = nn.Sequential(base_nn, nn.Linear(hidden_size, 1))
          
          self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)

    def actor(self, state):
        return self.actor_nn(state)
      
    def critic(self, state):
        return self.critic_nn(state)

Iā€™m trying to write a class that handles both types of neural networks (dual and split), and the focus of my questions is this part :

base_nn = nn.Sequential(
    nn.Linear(num_inputs, hidden_size), *[
        nn.Linear(hidden_size, hidden_size)
        for _ in range(number_of_layers - 1)
    ])

self.actor_nn = nn.Sequential(base_nn,
                              nn.Linear(hidden_size, num_actions))
self.critic_nn = nn.Sequential(base_nn, nn.Linear(hidden_size, 1))

is this equivalent to :

self.nn = nn.Sequential(
    nn.Linear(num_inputs, hidden_size), *[
        nn.Linear(hidden_size, hidden_size)
        for _ in range(number_of_layers - 1)
    ])

self.actor_layer = nn.Linear(hidden_size, num_actions)
self.critic_layer = nn.Linear(hidden_size, 1)

in terms of computations ? Because I want to keep my functions actor and critic without if statements

Thanks!

In the first example self.actor_nn and self.critic_nn will both share the same base_nn module in their Sequential blocks while you have removed the shared module and seem to use it as a standalone module via self.nn in the second approach. Both approaches should have the same number of parameters.

1 Like