I am trying to create an LSTM based model to deal with time-series data (nearly a million rows). I created my train and test set and transformed the shapes of my tensors between sequence and labels as follows :

```
seq shape : torch.Size([1024, 1, 1])
labels shape : torch.Size([1024, 1, 1])
train_window =1 (one time step at a time)
```

Obviously my batch size as indicated in the shape is 1024. and I then I built my LSTM class

```
class LSTM(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers):
super(LSTM, self).__init__()
self.num_classes = num_classes
self.num_layers = num_layers
self.input_size = input_size
self.hidden_size = hidden_size
self.seq_length = train_window
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, input):
print('input :{}'.format(input.shape))
batch_size = input.shape[0]
hidden_state = Variable(torch.zeros(
self.num_layers, input.size(0), self.hidden_size))
cell_state = Variable(torch.zeros(
self.num_layers, input.size(0), self.hidden_size))
# Propagate input through LSTM
ula, (output, _) = self.lstm(input, (hidden_state, cell_state))
output = output.view(-1, self.hidden_size)
out = self.dropout(out)
output = self.fc(output)
return output
```

but when I started training using the follwing code:

```
model = LSTM(num_classes=1, input_size=1, hidden_size=100, num_layers=1)
for epoch in range(1, EPOCHS + 1):
# Train on the training data in a federated way
train(model, device, federated_train_loader, optimizer, epoch)
#inside train()
...
with torch.no_grad():
for batch_idx, (seq, labels) in enumerate(federated_test_loader):
# Send the model to the right gateway
model.send(seq.location)
# Move the data and target labels to the device (cpu/gpu) for computation
seq, labels = seq.to(device), labels.to(device)
# Make a prediction
output = model(seq)
```

I got an error , probaly related to my batch sizes. I tried playing around with the shapes , also changing the LSTM class but I was unable to detect the error. could you please help:

```
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<timed exec> in <module>
<ipython-input-35-7e1b9e7fe906> in train(model, device, federated_train_loader, optimizer, epoch)
13 print('seq shape : {}'.format(seq.shape))
14 print('labels shape : {}'.format(labels.shape))
---> 15 output = model(seq)
16 # Calculate huber loss for regression problems
17 labels =labels.view(-1)
~/anaconda3/envs/ftorch/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
--> 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)
<ipython-input-32-598be3037a3e> in forward(self, input)
26
27 # Propagate input through LSTM
---> 28 ula, (output, _) = self.lstm(input, (hidden_state, cell_state))
29 output = output.view(-1, self.hidden_size)
30
~/anaconda3/envs/ftorch/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
--> 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)
~/anaconda3/envs/ftorch/lib/python3.7/site-packages/torch/nn/modules/rnn.py in forward(self, input, hx)
562 return self.forward_packed(input, hx)
563 else:
--> 564 return self.forward_tensor(input, hx)
565
566
~/anaconda3/envs/ftorch/lib/python3.7/site-packages/torch/nn/modules/rnn.py in forward_tensor(self, input, hx)
541 unsorted_indices = None
542
--> 543 output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
544
545 return output, self.permute_hidden(hidden, unsorted_indices)
~/anaconda3/envs/ftorch/lib/python3.7/site-packages/torch/nn/modules/rnn.py in forward_impl(self, input, hx, batch_sizes, max_batch_size, sorted_indices)
521 hx = self.permute_hidden(hx, sorted_indices)
522
--> 523 self.check_forward_args(input, hx, batch_sizes)
524 if batch_sizes is None:
525 result = _VF.lstm(input, hx, self._get_flat_weights(), self.bias, self.num_layers,
~/anaconda3/envs/ftorch/lib/python3.7/site-packages/torch/nn/modules/rnn.py in check_forward_args(self, input, hidden, batch_sizes)
494 def check_forward_args(self, input, hidden, batch_sizes):
495 # type: (Tensor, Tuple[Tensor, Tensor], Optional[Tensor]) -> None
--> 496 self.check_input(input, batch_sizes)
497 expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
498
~/anaconda3/envs/ftorch/lib/python3.7/site-packages/torch/nn/modules/rnn.py in check_input(self, input, batch_sizes)
147 raise RuntimeError(
148 'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
--> 149 self.input_size, input.size(-1)))
150
151 def get_expected_hidden_size(self, input, batch_sizes):
RuntimeError: input.size(-1) must be equal to input_size. Expected 1, got 0
```