The size of tensor a (200) must match the size of tensor b (100) at non-singleton dimension 0

**Hi, **
I am trying to train LSTM for time series prediction. I am getting this error

   C:\Users\pltadmin\anaconda3\lib\site-packages\torch\nn\modules\loss.py:529: UserWarning: Using 
     a target size (torch.Size([100, 1])) that is different to the input size (torch.Size([200, 1])). This will 
     likely lead to incorrect results due to broadcasting. Please ensure they have the same size.
     return F.mse_loss(input, target, reduction=self.reduction)
     Traceback (most recent call last):
      File "C:/Users/pltadmin/PycharmProjects/MyDemo/LSTM_ML.py", line 219, in <module>
      train_loss = train_for_epoch(lstm, optimizer, train_loader)
      File "C:/Users/pltadmin/PycharmProjects/MyDemo/LSTM_ML.py", line 191, in train_for_epoch
      loss = criterion(outputs, Y)
      File "C:\Users\pltadmin\anaconda3\lib\site-packages\torch\nn\modules\module.py", line 1110, in    
       _call_impl
      return forward_call(*input, **kwargs)
      File "C:\Users\pltadmin\anaconda3\lib\site-packages\torch\nn\modules\loss.py", line 529, in 
      forward 
      return F.mse_loss(input, target, reduction=self.reduction)
       File "C:\Users\pltadmin\anaconda3\lib\site-packages\torch\nn\functional.py", line 3220, in 
       mse_loss
       expanded_input, expanded_target = torch.broadcast_tensors(input, target)
      File "C:\Users\pltadmin\anaconda3\lib\site-packages\torch\functional.py", line 75, in 
      broadcast_tensors
      return _VF.broadcast_tensors(tensors)  # type: ignore[attr-defined]  
      RuntimeError: The size of tensor a (200) must match the size of tensor b (100) at non-singleton    
     dimension 0

My code is:

  class TimeseriesDataset(torch.utils.data.Dataset):
       def __init__(self, X, y, seq_len=1):
           self.X = X
           self.y = y
           self.seq_len = seq_len

       def __len__(self):
        return self.X.__len__() - (self.seq_len - 1)

        def __getitem__(self, index):
          return self.X[index:index + self.seq_len], self.y[index + self.seq_len - 1]


       training_data = dataTrain.iloc[0:6241, 4:6].values
       test_data = test.iloc[0:1160, 4:6].values
       label = dataTrain['G'].values

       train_dataset = TimeseriesDataset(training_data, label, seq_len=7)
       test_dataset = TimeseriesDataset(test_data, label, seq_len=7)
       train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True)
       test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False)


    class LSTM(nn.Module):

    def __init__(self, num_classes, input_size, hidden_size, num_layers):
        super(LSTM, self).__init__()

        self.num_classes = num_classes
        self.num_layers = num_layers
        self.input_size = input_size
        self.hidden_size = hidden_size
        # self.seq_length = seq_length

        self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
                            num_layers=num_layers, batch_first=True)

        self.fc = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        h_0 = Variable(torch.zeros(
            self.num_layers, x.size(0), self.hidden_size))
        c_0 = Variable(torch.zeros(
            self.num_layers, x.size(0), self.hidden_size))

        # Propagate input through LSTM
        ula, (h_out, _) = self.lstm(x, (h_0, c_0))
        h_out = h_out.view(-1, self.hidden_size)
        out = self.fc(h_out)

        return out

Hi Syed!

Take this UserWarning seriously. Debug and fix it first, and then address
any remaining issues.

Best.

K. Frank