I am following the template from : CleanRL: Implementing PPO - PettingZoo Documentation
import numpy
import torch
import torch.nn as nn
import torch.optim as optim
import pdb
from pettingzoo.butterfly import pistonball_v6
class centralized_ddpg_agent(torch.nn.Module):
def __init__(self, action_space_size, state_space_size):
super().__init__()
self.critic_network = nn.Sequential(
self._layer_init(nn.Linear(action_space_size + state_space_size, 1)),#<-- problematic line is here
)
self.critic_target_network = critic_network.clone
self.learning_rate = 0.99 // gamma
self.target_rate = 0.01 // tau
def forward_critic(action_space, state_space):
return critic_network(action_space + state_space)
if __name__ == "__main__":
env = pistonball_v6.parallel_env()
agent_size_modifier = len(env.possible_agents)
num_actions = agent_size_modifier
num_states = len(env.observation_space(env.possible_agents[0]).shape) * agent_size_modifier
ddpg_agent = centralized_ddpg_agent(num_actions, num_states)
$ python test.py
Traceback (most recent call last):
File "/home/master-andreas/test.py", line 32, in <module>
ddpg_agent = centralized_ddpg_agent(num_actions, num_states)
File "/home/master-andreas/test.py", line 14, in __init__
self._layer_init(nn.Linear(action_space_size + state_space_size, 1)),#<-- problematic line is here
File "/home/master-andreas/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1207, in __getattr__
raise AttributeError("'{}' object has no attribute '{}'".format(
AttributeError: 'centralized_ddpg_agent' object has no attribute '_layer_init'