LSTM is not taking features after convolutional layer of GCN

I am trying to train Graph Convolutional Networks (GCN) and LSTM. I am facing an error when trying to pass data to LSTM after the convolutional layer of GCN. My complete pipeline is:

import torch
import torch.nn as nn
from torch_geometric.nn import GCNConv
from torch_geometric.data import Data
from torch.nn.utils.rnn import pad_sequence
import pandas as pd
from datetime import datetime, timedelta

start_date = datetime(2023, 5, 1)
num_days = 5
num_nodes = 3

data = { 'Date': [],     'Node_ID': [],   'Temperature': [],  'Speed': [],'Label': [] }

for i in range(num_days):
  current_date = start_date + timedelta(days=i)
  for j in range(num_nodes ):
    data['Date'].append(current_date.strftime('%Y-%m-%d'))
    data['Node_ID'].append([0, 1, 2][j])
    data['Temperature'].append(25.0 + (j * 0.5))
    data['Speed'].append(5.0 + (j * 0.3))
    data['Label'].append(j)

df = pd.DataFrame(data)

This generates the data. Next, I formed the graph.

nodes = []
extract_by = 'Node_ID'
for i in range (3):
  id = I
  node_features = df[df[extract_by] == id]
  node_features = node_features.sort_values(by = 'Date')
  node_features = node_features.drop([ 'Date' ], axis=1)
  node_label = i #id
  nodes.append([id, node_features.values, node_label])
   
  x = torch.tensor([node[1] for node in nodes], dtype=torch.float)
  y = torch.tensor([node[2] for node in nodes], dtype=torch.long)
  edges = np.array([[0,1],
                  [0,2],
                 [1,2]])
  edge_index= torch.tensor([[i, j] for i, j in edges], dtype=torch.long).t().contiguous()

  train_mask, test_mask = train_test_split(range(x.shape[0]), test_size=0.20)
  train_mask = torch.tensor(train_mask)
  test_mask = torch.tensor(test_mask)
  graph = Data(x=x, y=y, edge_index=edge_index, train_mask=train_mask, 
  test_mask=test_mask)# Data(x=x, y=y, edge_index=edge_index)
  graph.num_classes = torch.unique(y).size(0)

Next, I designed the model:

class GCN_LSTM(nn.Module):
  def __init__(self, input_dim, hidden_dim, num_classes):
    super(GCN_LSTM, self).__init__()
    self.gcn_conv1 = GCNConv(input_dim, hidden_dim)
    self.gcn_conv2 = GCNConv(hidden_dim, hidden_dim)
    self.lstm = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
    self.fc = nn.Linear(hidden_dim, num_classes)
    
  def forward(self, x, edge_index):
    x = self.gcn_conv1(x, edge_index)
    x = torch.relu(x)
    x = self.gcn_conv2(x, edge_index)
    x = torch.relu(x)
    x = x.unsqueeze(0)  
    x = x.transpose(1, 2)  
    #print (x)
    x, _ = self.lstm(x)
    x = x.squeeze(0)  
    x = self.fc(x)
    return x

Finally, here I passed data to train this model.

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_dim = x.shape[2] 
hidden_dim = 64  
num_classes = graph.num_classes

model = GCN_LSTM(input_dim, hidden_dim, num_classes).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

model.train()
num_epochs = 10
train_mask = graph.train_mask.to(device)
test_mask = graph.test_mask.to(device)

for epoch in range(num_epochs):
  optimizer.zero_grad()

  output = model(graph.x.to(device), graph.edge_index.to(device))
  loss = criterion(output[train_mask], graph.y[train_mask])

  loss.backward()
  optimizer.step()

The error I am facing is:

AssertionError: LSTM: Expected input to be 2-D or 3-D but received 4-D tensor

How can I solve this? I want to use GCN for capturing spatial features while LSTM for temporal features.