Actor-Critic Model: How to mach the sizes between model and the action batch?

Hello guys, I am new to PyTorch and Reinforcement Learning and because of that sorry if this message will sound stupid or the solution too simple but I have no idea how to fix this problem and I’ve spent already a few days
researching this and trying to find a way to solve this and I couldn’t. I would really appreciate it if any of you could help me with this or at least give me some advice.

I am trying to build a model that is going to buy and sell stocks on the market, this model is going to have just 2 actions possible which are BUY and SELL.
Also, I am trying to implement an Actor-Critic model using 2 interconnected GRU models for the Actor and just some simple linear layers for the Critic model connected because I am trying to see how better or not is in comparison with a normal model in my case.


class ActorNN(nn.Module):
    def __init__(self, stock_env: StockEnv, conf: wandb):
        super(ActorNN, self).__init__()
        self.stock_env = stock_env
        self.input_size = 20
        self.hidden_size_1 = 235
        self.hidden_size_2 = 135
        self.num_layers_1 = 2
        self.num_layers_2 = 3
        self.batch_size = 350

        output_size = self.stock_env.action_space.n # (2 - BUY, SELL)
        self.lstm = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size_1,
                           num_layers=self.num_layers_1, dropout=conf.dropout_1)
        self.lstm_2 = nn.GRU(input_size=self.hidden_size_1, hidden_size=self.hidden_size_2,
                             num_layers=self.num_layers_2, dropout=conf.dropout_2)
        self.output_layer = nn.Linear(self.hidden_size_2, output_size)
        self.activation = nn.Tanh()

    def forward(self, x):
        x = self.activation(x.view(len(x), -1, self.input_size))
        out, new_hidden_1 = self.lstm(x)
        out = self.activation(new_hidden_1)
        out, _ = self.lstm_2(out)
        out = self.activation(out)
        out = self.output_layer(out)
        return out


class CriticNN(nn.Module):
    def __init__(self, stock_env: StockEnv):
        # stock_env.window_size = 300
        super(CriticNN, self).__init__()
        self.stock_env = stock_env
        self.l1 = nn.Linear(stock_env.window_size * 25, 128)
        self.l2 = nn.Linear(128, 256)
        self.l3 = nn.Linear(256, 1)
        self.activation = nn.ReLU()

    def forward(self, x):
        output = self.activation(self.l1(torch.flatten(x, start_dim=1)))
        output = self.activation(self.l2(output))
        output = self.l3(output)
        return output

Now my problem rise on the optimization function from the agent


def optimize(self):
    if len(self.memory) < self.config.batch_size:
        return
    
    state, action, new_state, reward, done = self.memory.sample(batch_size=self.config.batch_size)

    state = torch.Tensor(np.array(state)).to(device)
    new_state = torch.Tensor(np.array(new_state)).to(device)
    reward = torch.Tensor(reward).to(device)
    action = torch.LongTensor(action).to(device)
    done = torch.Tensor(done).to(device)
    dist = torch.distributions.Categorical(self.actor(state))
    advantage = reward + (1 - done) * self.config.gamma * self.critic(new_state).squeeze(1) - self.critic(state).squeeze(1)

    critic_loss = advantage.pow(2).mean()
    self.optimizer_critic.zero_grad()
    critic_loss.backward()
    self.optimizer_critic.step()

    actor_loss = -dist.log_prob(action) * advantage.detach()
    self.optimizer_actor.zero_grad()
    actor_loss.mean().backward()
    self.optimizer_actor.step()

When I am trying to initialize my dist variable it will be the shape [3(hidden layer), 300(windows_size), 2(nr of actions)] and my action is of shape [350 (batch_size)]

Now when I am trying to run dist.log_prob(action) I am getting an error that says:

The size of tensor a (350) must match the size of tensor b(300) at non-singleton dimension 1

This is because my dist is not the same shape as action, and here comes my question, how can I make them match? Can any of you help me with this? I’ve tried to use
multiple Linear layers to match their size but I couldn’t make them math.

What’s the shape of the state you’re feeding to the actor network?