Model passes during train, and test, but when using it for inference it fails

My model has the following structure. I am able to train, and test, but when it comes to using new data later, it fails with the error Expected hidden[0] size (2, 188, 64), got (2, 150, 64).

Model

class BasicLSTM(nn.Module):
    def __init__(self, 
                 device,
                 input_dims: int, 
                 hidden_dims: int, 
                 output_dims: int,
                 num_layers: int = 2,
                 dropout: float = 0.5):
        super(BasicLSTM, self).__init__()
        
        self.model_type = "BasicLSTM"
        
        self.device = device
        self.input_dims = input_dims
        self.output_dims = output_dims 
        self.hidden_dims = hidden_dims
        self.num_layers = num_layers
        self.dropout = dropout
        
        self.lstm1 = nn.LSTM(input_size=input_dims, 
                             hidden_size=self.hidden_dims, 
                             num_layers=self.num_layers, 
                             dropout=self.dropout,
                             # batch_first=True
                             )
        
        self.fc = nn.Linear(self.hidden_dims, self.output_dims)
        
        hidden_state = torch.randn(self.num_layers, batch_size, self.hidden_dims).to(self.device)
        cell_state = torch.randn(self.num_layers, batch_size, self.hidden_dims).to(self.device)
        self.hidden_cell = (hidden_state, cell_state)
        
    def forward(self, x: Tensor):
        x = x.unsqueeze(0).float().cuda()
        out, self.hidden_cell = self.lstm1(x, self.hidden_cell)
        preds = self.fc(out)
        
        return preds[-1]
        

New predictions

def predict_future(time: int, future_data):
    model.eval()
    for i in range(time):
        seq = torch.FloatTensor(future_data)
        
        with torch.no_grad():
            np.append(future_data, model(seq).item())

The type of future_data is a numpy n-dimensional array.

I had more inputs than I was testing with. Changing the number of inputs fixed this but I’m not sure how it affected batch size.