IndexError: Target 264 is out of bounds

i am going to feed input shape is torch.Size([8, 1387, 4]) and output shape is torch.Size([8, 1387, 1]) in this model but it occur this errorin loss = criterion(outputs, batch_labels), sorry for new to machine learning
now the batch_label and output shape is

torch.Size([8])
torch.Size([8, 25])
IndexError: Target 264 is out of bounds.

BiLSTM(4,64,2,25)

class BiLSTM(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, num_classes):
        super(BiLSTM, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(
            input_size, hidden_size, num_layers, batch_first=True, bidirectional=True
        )
        self.fc = nn.Linear(hidden_size * 2, num_classes)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(device)
        c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(device)

        out, _ = self.lstm(x, (h0, c0))
        out = self.fc(out[:, -1, :])

        return out
        
    
    def train_epoch(self, num_epochs, optimizer, criterion, train_dataloader, device):
        self.to(device)
        for epoch in range(num_epochs):
            self.train()
            total_loss = 0.0
            progress_bar = tqdm(train_dataloader, desc=f'Epoch {epoch + 1}/{num_epochs} training', unit='batch')

            for batch_features, batch_labels in progress_bar:
                optimizer.zero_grad()
                batch_features = batch_features.to(device).float()
                outputs = self(batch_features)
                #outputs = torch.permute(outputs,(0,2,1))
                batch_labels = batch_labels.squeeze().long().argmax(dim=1).to(device)
                print(batch_labels.shape)
                print(outputs.shape)
                loss = criterion(outputs, batch_labels)
                loss.backward()
                optimizer.step()
                total_loss += loss.item()
                progress_bar.set_postfix(loss=total_loss / len(progress_bar))

            epoch_loss = total_loss / len(train_dataloader)
            print(f'Epoch {epoch + 1}/{num_epochs}, Training Loss: {epoch_loss:.4f}')

Your output shape indicates you are working with 25 classes which would define the target values in the range [0, 24]. However, based on the error message your target contains at least one value of 264 which is out of bounds.