Hello, I am using an LSTM to take 5 sequences as input to predict another 5. I want to know how to predict more than 5 timesteps. I assume its got something to do with the hidden_dim but I can’t figure it out.
Here is my code
class LSTM(nn.Module):
def __init__(self, seq_len=5, n_features=256, n_hidden=256, n_layers=1, output_size=1):
super().__init__()
self.n_features = n_features
self.seq_len = seq_len
self.n_hidden = n_hidden
self.n_layers = n_layers
self.l_lstm = nn.LSTM(input_size=self.n_features, hidden_size=self.n_hidden, num_layers=self.n_layers, batch_first=True)
def init_hidden(self, batch_size):
hidden_state = torch.zeros(self.n_layers,batch_size,self.n_hidden).to(device)
cell_state = torch.zeros(self.n_layers,batch_size,self.n_hidden).to(device)
self.hidden = (hidden_state, cell_state)
def forward(self, x):
lstm_out, self.hidden = self.l_lstm(x,self.hidden)
return lstm_out
If anyone knows how to extend the prediction range or could suggest a better way of writing LSTM, I would really appreciate it.