LSTM Classifier is giving very low accuracy for training dataset but it has high training accuracy

I am building an LSTM-based multi-class classifier with 17 input features and 13 output classes. When I train my network on training data it gives an accuracy of above 90% but when I test my trained model with the same data it gives an accuracy of 51%. I am new to pytorch, here is my code:

X_train = df[[“Ia”,“Ib”,“Ic”,“Id”,“Iq”,“Mft_1”,“D_1”,“Mft_2”,“D_2”,“Mft_3”,“D_3”,“Mft_4”,“D_4”,“Mft_5”,“D_5”,“Mft_6”,“D_6”]].values
y_train = df[[‘No_OCF’,‘OCF_1’,‘OCF_2’,‘OCF_3’,‘OCF_4’,‘OCF_5’,‘OCF_6’,‘OCF_14’,‘OCF_23’,‘OCF_36’,‘OCF_45’,‘OCF_16’,‘OCF_25’]].values
s1= MinMaxScaler(feature_range=(0,1))
X_train= s1.fit_transform(X_train)
X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))

#Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.tensor(y_train)
#Create DataLoader for training data
train_dataset = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=False)

seed = 42
torch.manual_seed(seed)
#Check if CUDA (GPU) is available and set the seed for GPU
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)

#Define LSTM model
class LSTMModel(nn.Module):
def init(self, input_size, hidden_size, num_layers, output_size):
super(LSTMModel, self).init()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
_, (hidden, _) = self.lstm(x)
x= hidden[-1]
x = self.fc(x)
return x

#Create an instance of the LSTM model
model = LSTMModel(input_size=17, hidden_size=64, num_layers=1, output_size=13)
#Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)

Train the model

num_epochs = 5
size = len(train_loader.dataset)
for epoch in range(num_epochs):
epoch_loss = 0.0
correct = 0.0
total_samples = 0.0
model.train()
for inputs, targets in train_loader:
optimizer.zero_grad()
targets = targets.argmax(dim=1)
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
# Compute accuracy
total_samples += targets.size(0)
correct += (outputs.argmax(1) == targets).sum().item()
avg_loss = epoch_loss / len(train_loader)
correct /= total_samples
print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {avg_loss}, Accuracy: {(100*correct):>0.1f}%")

Testing on training dataset

def evaluate(saved_model, dataloader):
saved_model.eval() # Set the model in evaluation mode

all_labels =
all_predictions =
with torch.no_grad():
for inputs, labels in dataloader:
# Forward pass
outputs = saved_model(inputs)
labels = labels.argmax(dim=1)
# Predict class labels
_, predictions = torch.max(outputs, 1)
all_labels.extend(labels.cpu().numpy())
all_predictions.extend(predictions.cpu().numpy())
return all_labels, all_predictions

Evaluate the model

true_labels, predicted_labels = evaluate(saved_model, test_loader)

Calculate accuracy

accuracy = accuracy_score(true_labels, predicted_labels)
print(f’Accuracy: {accuracy:.4f}')

I tried changing batch size and hidden size but it didn’t work.
Kindly let me know if there is anything wrong with the code. Thanks.