IndxError: index out of range

Hello, I’m working on the classification task of 8 classes using bert model, but I keep getting this error of ‘index out of range’

Your help, please.
Thanks

for e in range(epochs):
          
    print('Training...')

    # Reset the total loss for this epoch.
    total_loss = 0

    for batch in train_dataloader:
                   
        batch = [b.to(device) for b in batch]
 
        sent_id, mask, labels = batch

                       
        # Clear out the gradients 
        model.zero_grad() 

        #Forwad pass
        output = model(sent_id, mask, labels)
IndexError                                Traceback (most recent call last)
 in 
     26 
     27         #Forwad pass
---> 28         output = model(sent_id, mask, labels)
     29 
     30         loss, _ = output

C:\python\envs\pytorch\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
    720             result = self._slow_forward(*input, **kwargs)
    721         else:
--> 722             result = self.forward(*input, **kwargs)
    723         for hook in itertools.chain(
    724                 _global_forward_hooks.values(),

C:\python\envs\pytorch\lib\site-packages\transformers\modeling_bert.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels, output_attentions, output_hidden_states)
   1265             inputs_embeds=inputs_embeds,
   1266             output_attentions=output_attentions,
-> 1267             output_hidden_states=output_hidden_states,
   1268         )
   1269 

C:\python\envs\pytorch\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
    720             result = self._slow_forward(*input, **kwargs)
    721         else:
--> 722             result = self.forward(*input, **kwargs)
    723         for hook in itertools.chain(
    724                 _global_forward_hooks.values(),

C:\python\envs\pytorch\lib\site-packages\transformers\modeling_bert.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, output_attentions, output_hidden_states)
    751 
    752         embedding_output = self.embeddings(
--> 753             input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
    754         )
    755         encoder_outputs = self.encoder(

C:\python\envs\pytorch\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
    720             result = self._slow_forward(*input, **kwargs)
    721         else:
--> 722             result = self.forward(*input, **kwargs)
    723         for hook in itertools.chain(
    724                 _global_forward_hooks.values(),

C:\python\envs\pytorch\lib\site-packages\transformers\modeling_bert.py in forward(self, input_ids, token_type_ids, position_ids, inputs_embeds)
    178             inputs_embeds = self.word_embeddings(input_ids)
    179         position_embeddings = self.position_embeddings(position_ids)
--> 180         token_type_embeddings = self.token_type_embeddings(token_type_ids)
    181 
    182         embeddings = inputs_embeds + position_embeddings + token_type_embeddings

C:\python\envs\pytorch\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
    720             result = self._slow_forward(*input, **kwargs)
    721         else:
--> 722             result = self.forward(*input, **kwargs)
    723         for hook in itertools.chain(
    724                 _global_forward_hooks.values(),

C:\python\envs\pytorch\lib\site-packages\torch\nn\modules\sparse.py in forward(self, input)
    124         return F.embedding(
    125             input, self.weight, self.padding_idx, self.max_norm,
--> 126             self.norm_type, self.scale_grad_by_freq, self.sparse)
    127 
    128     def extra_repr(self) -> str:

C:\python\envs\pytorch\lib\site-packages\torch\nn\functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
   1812         # remove once script supports set_grad_enabled
   1813         _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1814     return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
   1815 
   1816 

IndexError: index out of range in self

Based on the stack trace it seems you are trying to use an invalid index in an embedding layer.
This might be the case, if e.g. you are using a larger vocabulary than the original model is supporting.