Can not "_layer_init" network

I am following the template from : CleanRL: Implementing PPO - PettingZoo Documentation

import numpy
import torch
import torch.nn as nn
import torch.optim as optim
import pdb

from pettingzoo.butterfly import pistonball_v6

class centralized_ddpg_agent(torch.nn.Module):
    def __init__(self, action_space_size, state_space_size):
        super().__init__()

        self.critic_network = nn.Sequential(
            self._layer_init(nn.Linear(action_space_size + state_space_size, 1)),#<-- problematic line is here
        )
        
        self.critic_target_network = critic_network.clone
        
        self.learning_rate = 0.99 // gamma
        self.target_rate = 0.01 // tau

    def forward_critic(action_space, state_space):
        return critic_network(action_space + state_space)



if __name__ == "__main__":
    env = pistonball_v6.parallel_env()
    agent_size_modifier = len(env.possible_agents)
    num_actions = agent_size_modifier
    num_states = len(env.observation_space(env.possible_agents[0]).shape) * agent_size_modifier
    ddpg_agent = centralized_ddpg_agent(num_actions, num_states)
$ python test.py
Traceback (most recent call last):
  File "/home/master-andreas/test.py", line 32, in <module>
    ddpg_agent = centralized_ddpg_agent(num_actions, num_states)
  File "/home/master-andreas/test.py", line 14, in __init__
    self._layer_init(nn.Linear(action_space_size + state_space_size, 1)),#<-- problematic line is here
  File "/home/master-andreas/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1207, in __getattr__
    raise AttributeError("'{}' object has no attribute '{}'".format(
AttributeError: 'centralized_ddpg_agent' object has no attribute '_layer_init'

Add the following method to your centralized_ddpg_agent?

def _layer_init(self, layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)
    return layer

Thanks for the reply, I did not realize that _layer_init needed to be initialized, the issue has been solved

class centralized_ddpg_agent(torch.nn.Module):
    def __init__(self, action_space_size, state_space_size):
        super().__init__()

        self.actor_network = nn.Sequential(
            nn.Linear(state_space_size, action_space_size),
        )

        self.critic_network = nn.Sequential(
            nn.Linear(action_space_size + state_space_size, 1),
        )

        
        #self.actor_target_network = actor_network.clone
        #self.critic_target_network = critic_network.clone
        
        self.learning_rate = 0.99 # gamma
        self.target_rate = 0.01 # tau
        
    def foward_action(state_space):
        return action_network(state_space)

    def forward_critic(action_space, state_space):
        return critic_network(action_space + state_space)

I have further realized that I need to break up the “actor” and “critic” to separate torch.nn.modules, please anyone reading this do not use the code as is