Stacking LSTM and Linear layers in a time distributed manner

I want to pass the output from each timestep of an LSTM layer through the same linear layer. meaning that the output from each timestep would itself go through the linear layer and provide a vector of target_size and the same would happen for the following timesteps.
Is the following code the right way to do this?

class dynamicLayer(nn.Module):

    def __init__(self, input_dim, hidden_dim, num_layers, target_size=7):
        super(dynamicLayer, self).__init__()
        self.hidden_dim = hidden_dim
        self.input_dim = input_dim
        self.num_layers = num_layers
        self.hidden_latent = <random vector from torch.rand>
        
        self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers)
        self.hidden2out = nn.Linear(hidden_dim, target_size)
    
    def forward(self, input):
        # input (seq_len, batch, input_size)
        # hidden state (num_layers, batch, hidden_size)
        # output (seq_len, batch, hidden_size)

        lstm_out, (h_n, c_n) = self.lstm(input, self.hidden_latent)
        self.hidden_latent = (h_n, c_n) 
        out = self.hidden2out(lstm_out)
        return out