How to solve the error after loading .pt file - related to NLP multiclass classification?

I am using the following model for NLP multiclass classification after successful training (starting reference (slightly modified model in my example): Link):

...some code before...
class LSTM(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, 
                 bidirectional, dropout, pad_idx):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)
        self.rnn = nn.LSTM(embedding_dim, 
                           hidden_dim, 
                           num_layers=n_layers, 
                           bidirectional=bidirectional, 
                           dropout=dropout)
        self.fc1 = nn.Linear(hidden_dim * 2, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, OUTPUT_DIM)
        self.dropout = nn.Dropout(dropout)
       
    def forward(self, text, text_lengths):
        embedded = self.embedding(text)
        packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths)
        packed_output, (hidden, cell) = self.rnn(packed_embedded)      
        hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
        output = self.fc1(hidden)
        output = self.dropout(self.fc2(output))
        return output
...some code after....
I am using the following model for NLP multiclass classification (starting reference: Link):

class LSTM(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, 
                 bidirectional, dropout, pad_idx):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)
        self.rnn = nn.LSTM(embedding_dim, 
                           hidden_dim, 
                           num_layers=n_layers, 
                           bidirectional=bidirectional, 
                           dropout=dropout)
        self.fc1 = nn.Linear(hidden_dim * 2, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, OUTPUT_DIM)
        self.dropout = nn.Dropout(dropout)
       
    def forward(self, text, text_lengths):
        embedded = self.embedding(text)
        packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths)
        packed_output, (hidden, cell) = self.rnn(packed_embedded)      
        hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
        output = self.fc1(hidden)
        output = self.dropout(self.fc2(output))
        return output
model = LSTM(VOCAB_SIZE,
            EMBEDDING_DIM,
            HIDDEN_DIM,
            OUTPUT_DIM,
            N_LAYERS,
            BIDIRECTIONAL,
            DROPOUT,
            PAD_IDX)

path_pt = 'some_path_saved_weights.pt'
model.load_state_dict(torch.load(path_pt,map_location=torch.device('cpu')));
model = model.to(device)
model.eval();
import spacy
nlp = spacy.load('en_core_web_sm')
def predict(model, sentence):
    tokenized = [tok.text for tok in nlp.tokenizer(sentence)]  #tokenize the sentence 
    indexed = [TEXT.vocab.stoi[t] for t in tokenized]          #convert to integer sequence
    length = [len(indexed)]                                    #compute no. of words
    tensor = torch.LongTensor(indexed).to(device)              #convert to tensor
    tensor = tensor.unsqueeze(1).T                             #reshape in form of batch,no. of words
    length_tensor = torch.LongTensor(length)                  #convert to tensor               
    prediction = model(tensor.to(device), length_tensor.to(device))  #prediction 
    return prediction.item()  
VOCAB_SIZE      = len(TEXT.vocab)
EMBEDDING_DIM   = 400
HIDDEN_DIM      = 25
OUTPUT_DIM      = 7
N_LAYERS        = 2
BIDIRECTIONAL   = True
DROPOUT         = 0.4
PAD_IDX         = TEXT.vocab.stoi[TEXT.pad_token] # padding
dropout_value   = DROPOUT
model = LSTM(VOCAB_SIZE,
            EMBEDDING_DIM,
            HIDDEN_DIM,
            OUTPUT_DIM,
            N_LAYERS,
            BIDIRECTIONAL,
            DROPOUT,
            PAD_IDX)

path_pt = 'some_path_saved_weights.pt'
model.load_state_dict(torch.load(path_pt,map_location=torch.device('cpu')));
model = model.to(device)
model.eval();
import spacy
nlp = spacy.load('en_core_web_sm')
def predict(model, sentence):
    tokenized = [tok.text for tok in nlp.tokenizer(sentence)]  #tokenize the sentence 
    indexed = [TEXT.vocab.stoi[t] for t in tokenized]          #convert to integer sequence
    length = [len(indexed)]                                    #compute no. of words
    tensor = torch.LongTensor(indexed).to(device)              #convert to tensor
    tensor = tensor.unsqueeze(1).T                             #reshape in form of batch,no. of words
    length_tensor = torch.LongTensor(length)                  #convert to tensor               
    prediction = model(tensor.to(device), length_tensor.to(device))  #prediction 
    return prediction.item()  

the following code gives an error:

#make predictions
predict(model, "Are there any sports that you don't like?")

The error is: RuntimeError: Expected len(lengths) to be equal to batch_size, but got 1 (batch_size=10)

The whole error text is:

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-18-1abc072a78ef> in <module>
      1 #make predictions
----> 2 predict(model,"Are there any sports that you don't like?")

<ipython-input-17-2e4506080b73> in predict(model, sentence)
     27                  #unk_init   =  torch.Tensor.normal_ #torch.Tensor.zero_
     28                  #)
---> 29     prediction = model(tensor.to(device), length_tensor)
     30     return prediction.item()

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

<ipython-input-15-b92535739dab> in forward(self, text, text_lengths)
     28 
     29         #pack sequence
---> 30         packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths)
     31 
     32         packed_output, (hidden, cell) = self.rnn(packed_embedded)

/opt/conda/lib/python3.6/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
    242 
    243     data, batch_sizes = \
--> 244         _VF._pack_padded_sequence(input, lengths, batch_first)
    245     return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
    246 

RuntimeError: Expected `len(lengths)` to be equal to batch_size, but got 1 (batch_size=10)

Could anyone help me to fix the error?

Thank you