ValueError: Expected input batch_size (65) to match target batch_size (64)

Hi guys, I’m trying to use a CNN-BiLSTM network and the DeBERTa and Node2Vec vectors to classify some data.
This is my code

import torch.optim as optim
class TextClassificationModel(nn.Module):
      def __init__(self, deberta_model, deberta_embedding_dim, node2vec_embeddings, lstm_hidden_size, num_classes):
          super(TextClassificationModel, self).__init__()
  
          self.deberta_model = deberta_model
          self.deberta_embedding_dim = deberta_embedding_dim
          self.node2vec_embeddings = node2vec_embeddings
  
          self.lstm_hidden_size = lstm_hidden_size
          self.num_classes = num_classes
  
          # Linear layer for dimensionality reduction of DeBERTa embeddings
          self.linear_layer = nn.Linear(deberta_embedding_dim, 64)  # Adjust 64 according to your requirements
  
          # BiLSTM layer
          self.lstm = nn.LSTM(input_size=64 + list(self.node2vec_embeddings.values())[0].shape[0],
                              hidden_size=self.lstm_hidden_size, bidirectional=True, batch_first=True)
  
          # CNN layer
          self.cnn = nn.Conv1d(in_channels=2 * self.lstm_hidden_size, out_channels=128, kernel_size=3)
  
          # Fully connected layer
          self.fc = nn.Linear(128, self.num_classes)
  
      def forward(self, deberta_embeddings, node_ids):
          # Convert DeBERTa embeddings to PyTorch tensor and apply linear transformation
          deberta_embeddings_tensor = self.linear_layer(torch.tensor(deberta_embeddings))
  
          # Filter out node_ids that are not present in node2vec_embeddings
          valid_node_ids = [node_id for node_id in node_ids if node_id in self.node2vec_embeddings]
  
          # Check if any valid node_ids were found
          if not valid_node_ids:
              raise ValueError("None of the provided node_ids are present in node2vec_embeddings")
  
          # Convert node2vec embeddings from numpy arrays to tensors
          node2vec_embeds = torch.stack([torch.tensor(self.node2vec_embeddings[node_id]) for node_id in valid_node_ids])
  
          # Expand deberta_embeddings_tensor to match the size of node2vec_embeds
          deberta_embeddings_expanded = deberta_embeddings_tensor.unsqueeze(1).expand(-1, node2vec_embeds.size(1), -1)
  
          # Adjust the batch size of node2vec_embeds to match deberta_embeddings_expanded
          node2vec_embeds = node2vec_embeds[:deberta_embeddings_expanded.size(0)]
  
          # Concatenate DeBERTa embeddings and node2vec embeddings
          combined_embeddings = torch.cat([deberta_embeddings_expanded, node2vec_embeds.unsqueeze(0)], dim=0)
  
          # LSTM layer
          lstm_out, _ = self.lstm(combined_embeddings)
  
          # Permute LSTM output for CNN input
          lstm_out = lstm_out.permute(0, 2, 1)
  
          # CNN layer
          cnn_out = self.cnn(lstm_out)
  
          # Global max pooling
          pooled_out, _ = torch.max(cnn_out, dim=2)
  
          # Fully connected layer
          output = self.fc(pooled_out)
  
          return output
  
  # Instantiate the model
  model = TextClassificationModel(deberta_model, deberta_embedding_dim=768, node2vec_embeddings=embeddings, lstm_hidden_size=64, num_classes=4)
  
  # Define loss function and optimizer
  criterion = nn.CrossEntropyLoss()
  optimizer = optim.Adam(model.parameters(), lr=0.001)
  labels = twitter15_label_df['label'].tolist()
  label_encoder = LabelEncoder()
  encoded_labels = label_encoder.fit_transform(labels)
  
  X_train, X_test, y_train, y_test = train_test_split(twitter15_vectors, encoded_labels, test_size=0.2, random_state=42)
  y_train = torch.tensor(y_train)
  y_test = torch.tensor(y_test)
  
  num_epochs = 10
  batch_size = 64
  
  print(f'Length of input data: {len(X_train)}')
  print(f'Length of target labels: {len(y_train)}')
  
  # Define the training function
  def train_model(model, criterion, optimizer, X_train_deberta, node_ids, y_train, batch_size, num_epochs):
      model.train()
      for epoch in range(num_epochs):
          for i in range(0, len(X_train_deberta), batch_size):
              inputs_deberta = X_train_deberta[i:i+batch_size]
              labels = y_train[i:i+batch_size]
  
              print(f'Batch {i // batch_size + 1}: Input data length: {len(inputs_deberta)}, Target labels length: {len(labels)}')
  
              optimizer.zero_grad()
  
              inputs_node2vec = node_ids 
  
              # Check if the current batch is the last incomplete batch
             
              outputs = model(inputs_deberta, inputs_node2vec)
              loss = criterion(outputs, labels)
  
              loss.backward()
              optimizer.step()
  
              if (i+1) % 100 == 0:
                  print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{len(X_train_deberta)}], Loss: {loss.item():.4f}')
  node_ids = list(G.nodes())
  
  # Train the model
  train_model(model, criterion, optimizer, X_train, node_ids, y_train, batch_size, num_epochs)
  
  # Define the testing function
  def test_model(model, X_test_deberta, node_ids, y_test):
      model.eval()
      with torch.no_grad():
          outputs = model(X_test_deberta, node_ids)
          _, predicted = torch.max(outputs, 1)
          acc = accuracy_score(y_test, predicted)
          precision = precision_score(y_test, predicted, average=None)
          recall = recall_score(y_test, predicted, average=None)
          print(f'Accuracy: {acc:.4f}')
          print(f'Precision: {precision}')
          print(f'Recall: {recall}')
  
  # Test the model
  test_model(model, X_test, node_ids, y_test)

However I got this error and I don’t know what to do with it:
ValueError: Expected input batch_size (65) to match target batch_size (64).

I tried some debugging, but nothing worked. This is what I’ve printed so far
Length of input data: 1192
Length of target labels: 1192
Batch 1: Input data length: 64, Target labels length: 64