LSTM Error For unbatched 2-D input, hx and cx should also be 2-D but got (3-D, 3-D) tensors

I need a help to solve the mentioned error please? here is my code and the exact script of the error I got. the data shape is (7, 2, 141)

Blockquote

import os
import numpy as np
from pathlib import Path
import json
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from torch.autograd import Variable
import torch
import torch.nn as nn
import librosa
import librosa.display
from tqdm import tqdm
features = r"C:\Users\data"
def normalize_features(features):

inputs_m, inputs_s = features.mean(), features.std()
features = (features - inputs_m) / inputs_s
return features

features_test=normalize_features(features_test)
def sliding_windows(data, seq_length):
x =
y =

for i in range(len(data)-seq_length-1):
    _x = data[i:(i+seq_length)]
    _y = data[i+seq_length]
    x.append(_x)
    y.append(_y)

return np.array(x),np.array(y)

seq_length = 5
x, y = sliding_windows(features_test, seq_length)
train_size = int(len(y) * 0.80)
test_size = len(y) - train_size
trainX = (torch.Tensor(np.array(x[0:train_size])))
trainY = (torch.Tensor(np.array(y[0:train_size])))

testX = (torch.Tensor(np.array(x[train_size:len(x)])))
testY = (torch.Tensor(np.array(y[train_size:len(y)])))
class LSTM(nn.Module):

def __init__(self, num_classes, input_size, hidden_size, num_layers):
    super(LSTM, self).__init__()
    
    self.num_classes = num_classes
    self.num_layers = num_layers
    self.input_size = input_size
    self.hidden_size = hidden_size
    self.seq_length = seq_length
    
    self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
                        num_layers=num_layers, batch_first=True)
    
    self.fc = nn.Linear(hidden_size, num_classes)

def forward(self, x):
    h_0 = Variable(torch.zeros(
        self.num_layers, x.size(0), self.hidden_size))
    
    c_0 = Variable(torch.zeros(
        self.num_layers, x.size(0), self.hidden_size))
    
    # Propagate input through LSTM
    ula, (h_out, _) = self.lstm(x, (h_0, c_0))
    
    h_out = h_out.view(-1, self.hidden_size)
    
    out = self.fc(h_out)
    
    return out

num_epochs = 2000
learning_rate = 0.01

input_size = 7
hidden_size = 13
num_layers = 1

num_classes = 1

lstm = LSTM(num_classes, input_size, hidden_size, num_layers)

criterion = torch.nn.MSELoss() # mean-squared error for regression
optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)
#optimizer = torch.optim.SGD(lstm.parameters(), lr=learning_rate)

Train the model

for epoch in range(num_epochs):
outputs = lstm(trainX)
optimizer.zero_grad()

# obtain the loss function
loss = criterion(outputs, trainY)

loss.backward()

optimizer.step()
if epoch % 100 == 0:
    print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))

Blockquote

the error

RuntimeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_19112/2621872190.py in
67 # Train the model
68 for epoch in range(num_epochs):
—> 69 outputs = lstm(trainX)
70 optimizer.zero_grad()
71

~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = ,

~\AppData\Local\Temp/ipykernel_19112/2621872190.py in forward(self, x)
43
44 # Propagate input through LSTM
—> 45 ula, (h_out, _) = self.lstm(x, (h_0, c_0))
46
47 h_out = h_out.view(-1, self.hidden_size)

~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = ,

~\anaconda3\lib\site-packages\torch\nn\modules\rnn.py in forward(self, input, hx)
750 msg = (“For unbatched 2-D input, hx and cx should "
751 f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors”)
→ 752 raise RuntimeError(msg)
753 hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
754

RuntimeError: For unbatched 2-D input, hx and cx should also be 2-D but got (3-D, 3-D) tensors