Thanks al3x!

Does it makes sense to you the way I am doing it?

####################

#defining the neural network - LSTM

####################

class Sequence(nn.Module):

```
def __init__(self,hidden_dim):
super(Sequence, self).__init__()
self.hidden_dim = hidden_dim
self.lstm1 = nn.LSTMCell(1, hidden_dim)
self.lstm2 = nn.LSTMCell(hidden_dim, hidden_dim)
self.linear = nn.Linear(hidden_dim, 1)
def forward(self, input, future = 0):
outputs = []
h_t = torch.zeros(input.size(0), self.hidden_dim, dtype=torch.double)
c_t = torch.zeros(input.size(0), self.hidden_dim, dtype=torch.double)
h_t2 = torch.zeros(input.size(0), self.hidden_dim, dtype=torch.double)
c_t2 = torch.zeros(input.size(0), self.hidden_dim, dtype=torch.double)
for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
```

…

####################

#begin to train

####################

for i in range(5):

print('STEP: ', i)

```
def closure(): # this is needed because the LBFGS optimizer need to compute the error over a coupple of steps
optimizer.zero_grad() # optimizer
out_tab = torch.empty(1, 1, dtype=torch.float64)
target_tab = torch.empty(1, 1, dtype=torch.float64)
for j in range(NB_REC_TRAIN):
out = seq(torch.from_numpy(signal_arr[j]))
out_tab = torch.cat((out_tab, out), 0)
target_tab = torch.cat((target_tab, torch.from_numpy(anns_arr[j])), 0)
loss = criterion(out_tab, target_tab)
print('loss:', loss.item())
loss.backward()
return loss
optimizer.step(closure)
```

In the inner for loop, I go through each time series one by one. I included this loop within the closure() function.

Does this makes sense?