IndexError: Target 604 is out of bounds

Error message:

IndexError                                Traceback (most recent call last)
<ipython-input-57-83c130993917> in <cell line: 5>()
      3 get_ipython().system('rm -rf /content/models')
      4 get_ipython().system('mkdir /content/models')
----> 5 train_losses, val_losses =  train()
      6 torch.cuda.empty_cache()
      7 plt.plot(train_losses)

3 frames
<ipython-input-56-2997423f3ed9> in train()
    139           # loss = criterion(outputs.view(-1, 13), labels.view(-1))
    140           print(s_pred.shape)
--> 141           s_loss = criterion(s_pred, s_labels)
    142           r_loss = criterion(r_pred, r_labels)
    143           o_loss = criterion(o_pred, o_labels)

/usr/local/lib/python3.9/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
   1499                 or _global_backward_pre_hooks or _global_backward_hooks
   1500                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501             return forward_call(*args, **kwargs)
   1502         # Do not call functions when jit is used
   1503         full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.9/dist-packages/torch/nn/modules/loss.py in forward(self, input, target)
   1172 
   1173     def forward(self, input: Tensor, target: Tensor) -> Tensor:
-> 1174         return F.cross_entropy(input, target, weight=self.weight,
   1175                                ignore_index=self.ignore_index, reduction=self.reduction,
   1176                                label_smoothing=self.label_smoothing)

/usr/local/lib/python3.9/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
   3027     if size_average is not None or reduce is not None:
   3028         reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 3029     return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
   3030 
   3031 

IndexError: Target 604 is out of bounds.

This is our training loop: `for epoch in tqdm(range(1, epochs+1)):

  model.train()
  loss_train_total = 0
  # train_predictions, train_true_vals = [], []
  s_train_predictions, s_train_true_vals = [], []
  r_train_predictions, r_train_true_vals = [], []
  o_train_predictions, o_train_true_vals = [], []

  progress_bar = tqdm(dataloader_train, desc='Epoch {:1d}'.format(epoch), leave=False, disable=False)

  for batch in progress_bar:
      model.zero_grad()
      batch = tuple(b.to(device) for b in batch.values())

      inputs = {'image_emb':  batch[0],'text_emb': batch[1]} 
      s_labels =  batch[2]
      r_labels =  batch[3]
      o_labels =  batch[4]

      s_pred, r_pred, o_pred = model(**inputs)
      # loss = criterion(outputs.view(-1, 13), labels.view(-1))
      print(s_pred.shape)
      s_loss = criterion(s_pred, s_labels)
      r_loss = criterion(r_pred, r_labels)
      o_loss = criterion(o_pred, o_labels)

      loss = s_loss + r_loss + o_loss
      loss_train_total += s_loss.item() + r_loss.item() + o_loss.item()

      loss.backward()
      torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
      
      s_logits = s_pred.argmax(-1)
      r_logits = r_pred.argmax(-1)
      o_logits = o_pred.argmax(-1)`

This is the model: `class MultiTaskNet(nn.Module):

def __init__(self, hyperparms=None):

    super(MultiTaskNet, self).__init__()        
    self.dropout = nn.Dropout(0.3)
    self.vision_projection = nn.Linear(2048, 768) 
    self.text_projection = nn.Linear(768, 768) # here
    self.fc1 = nn.Linear(768, 256) 
    self.bn1 = nn.BatchNorm1d(256)

    self.s_classifier = nn.Linear(256, 6)
    self.r_classifier = nn.Linear(256, 163)
    self.o_classifier = nn.Linear(256, 45) 

    W = torch.Tensor(768, 768)
    self.W = nn.Parameter(W)
    self.relu_f = nn.ReLU()
    # initialize weight matrices
    nn.init.kaiming_uniform_(self.W, a=math.sqrt(5))
    
def forward(self, image_emb, text_emb):

    x1 = image_emb   
    x1 = torch.nn.functional.normalize(x1, p=2, dim=1)
    Xv = self.relu_f(self.vision_projection(x1))
    
    x2 = text_emb
    x2 = torch.nn.functional.normalize(x2, p=2, dim=1)
    Xt = self.relu_f(self.text_projection(x2))

    Xvt = Xv * Xt
    Xvt = self.relu_f(torch.mm(Xvt, self.W.t()))

    Xvt = self.fc1(Xvt)
    Xvt = self.bn1(Xvt)
    Xvt = self.dropout(Xvt)
    
    s_Xvt = self.s_classifier(Xvt)
    r_Xvt = self.r_classifier(Xvt)
    o_Xvt = self.o_classifier(Xvt)

    return s_Xvt, r_Xvt, o_Xvt`:

The classifiers defined in your model:

    self.s_classifier = nn.Linear(256, 6)
    self.r_classifier = nn.Linear(256, 163)
    self.o_classifier = nn.Linear(256, 45) 

output logits for 6, 163, 45 classes, respectively, while the targets contain class indices outside of the supported numbers of classes.
In particular s_labels seems to contain a class index if 604, so expects logits for 605 classes.
Make sure all labels contain class indices in [0, nb_classes-1] or increase the output features for the corresponding classifier in your model.

Fixed, thanks for the help!