The error is IndexError: Target 2 is out of bounds. I need guidance please

class DeBERTadataset:
def init(self, texts, target):
self.tokenizer = config.TOKENIZER
self.max_len = config.max_len
self.texts = texts
self.target = target

def __len__(self):
    return len(self.texts)

def __getitem__(self, index):
    texts = str(self.texts[index])
    texts = ''.join(texts.split())
    target = self.target[index]


    inputs = self.tokenizer.encode_plus(
        texts,
        None,
        add_special_tokens = True,
        max_length = self.max_len,
        padding = 'max_length',
        truncation = True
    )


    resp = {
        'ids': torch.tensor(inputs['input_ids'], dtype=torch.long), 
        'mask': torch.tensor(inputs['attention_mask'], dtype=torch.long),
        'token_type_ids': torch.tensor(inputs['token_type_ids'], dtype=torch.long),
        'target': torch.tensor(self.target[index], dtype=torch.long)
    
    }

    return resp

class BERTModel(nn.Module):
def init(self):
super(BERTModel, self).init()
self.bert = BertModel.from_pretrained(‘bert-base-uncased’, return_dict=False)
self.dropout = nn.Dropout(0.3)
self.classified = nn.Linear(768, 2)

def forward(self, ids, mask, token_type_ids):
    _, pooled_output = self.bert(
        input_ids = ids,
        attention_mask = mask,
        token_type_ids = token_type_ids
    )

    pooled_output = self.dropout(pooled_output)
    outputs = self.classified(pooled_output)
    return outputs

import torch.nn as nn
from tqdm import tqdm
import torch

def loss_fn(outputs, target):
return nn.CrossEntropyLoss()(outputs, target)

def train_fx(data_loader, model, optimizer, scheduler, device):
model.train()

train_loss = 0
for bi, d in tqdm(enumerate(data_loader)):
    ids = d['ids']
    mask = d['mask']
    token_type_ids = d['token_type_ids']
    target = d['target']
# putting them into the device

    ids = ids.to(device, dtype=torch.long)
    mask = mask.to(device, dtype=torch.long)
    token_type_ids = token_type_ids.to(device, dtype=torch.long)
    target = target.to(device, dtype=torch.long)
        
    optimizer.zero_grad()
    outputs = model(
        ids = ids,
        mask = mask,
        token_type_ids = token_type_ids
    )
    
    loss = loss_fn(outputs, target)
    loss.backward()
    optimizer.step()
    scheduler.step()
    train_loss += loss.item()
train_loss = train_loss / len(data_loader)

print(f'Train loss: {train_loss:.3f}')

def eval_fx(data_loader, model, device):
model.eval()
valid_loss = 0

#final_target = []
#final_outputs = []
with torch.no_grad():
    for bi, d in tqdm(enumerate(data_loader)):
        ids = d['ids']
        mask = d['mask']
        token_type_ids = d['token_type_ids']
        target = d['target']
    # putting them into the device

        ids = ids.to(device, dtype=torch.long)
        mask = mask.to(device, dtype=torch.long)
        token_type_ids = token_type_ids.to(device, dtype=torch.long)
        target = target.to(device, dtype=torch.long)
            
        outputs = model(
            ids = ids,
            mask = mask,
            token_type_ids = token_type_ids
        )
        loss = loss_fn(outputs, target)

        valid_loss += loss.item()
    valid_loss = valid_loss / len(data_loader)

print(f'valid loss: {valid_loss:.3f}')

def accuracy_metrics(self, outputs, target):
outputs = torch.argmax(outputs).cpu().detach().numpy() # Due to the linear layer in our feedforward function
target = target.cpu().detach().numpy()
return {‘Accuracy’: metrics.accuracy_score(outputs, target)}

from tqdm import tqdm
best_accuracy = 0
for epoch in range(config.epochs):
train_fx(train_data_loader, model, optimizer,scheduler, device)
outputs, target = eval_fx(valid_data_loader, model, device)
accuracy = accuracy_metrics(outputs, target)
print(f’Accuracy score ----- {accuracy}')
if accuracy > best_accuracy:
torch.save(model.state_dict(), config.model_path)
best_accuracy = accuracy_metrics

The error is raised in the loss calculation since your target contains values, which are out-of-bounds.
Based on your code, it seems you are working on a binary classification and treat it as a 2-class multi-class classification using nn.CrossEntropyLoss. In this case the target should be a LongTensor and contain class indices in the range [0, nb_classes-1], so [0, 1] in your case.
A target value of 2 is thus invalid and you would have to check why it’s there (and remove it).

I’m working on a multi class classification problem, where the target has 3 classes(effective, ineffective, adequate). I changed the num_classes from 2 to 3. Where it’ll have 0,1,2, and the problem was solved.
class BERTModel(nn.Module):
def init(self):
super(BERTModel, self).init()
self.bert = BertModel.from_pretrained(‘bert-base-uncased’, return_dict=False)
self.dropout = nn.Dropout(0.3)
self.classified = nn.Linear(768, 2)
I changed the 2 in the self.classified to a 3 and it worked. :grinning:

I’m having an issue: cannot unpack non-iterable float object. This is my code at the top of this comment. I’ve checked the shape of my outputs and target for both training and validation loops and it gave me this…….
Training:
Outputs - torch.size([4, 3])
Target - torch.size([4])

Validation:
Outputs - torch.size([1, 3])
Target - torch.size([1])

So I don’t know why it’s showing me errors in my validation loop.

Which line of code is raising this issue?
Could you post the complete stacktrace, please?

def eval_fx(data_loader, model, device):
model.eval()
valid_loss = 0

#final_target = []
#final_outputs = []
with torch.no_grad():
    for bi, d in tqdm(enumerate(data_loader)):
        ids = d['ids']
        mask = d['mask']
        token_type_ids = d['token_type_ids']
        target = d['target']
    # putting them into the device

        ids = ids.to(device, dtype=torch.long)
        mask = mask.to(device, dtype=torch.long)
        token_type_ids = token_type_ids.to(device, dtype=torch.long)
        target = target.to(device, dtype=torch.long)
            
        outputs = model(
            ids = ids,
            mask = mask,
            token_type_ids = token_type_ids
        )
           final_target.extend(target.cpu().detach.numpy.tolist())
final_outputs.extend(torch.argmax(outputs).cpu().detach().numpy().tolist())

return final_target, final_outputs

best_accuracy = 0
for epoch in range(config.epochs):
train_fx(train_data_loader, model, optimizer,scheduler, device)
outputs, target = eval_fx(valid_data_loader, model, device)
accuracy = accuracy_metrics(outputs, target)
print(f’Accuracy score ----- {accuracy}')
if accuracy > best_accuracy:
torch.save(model.state_dict(), config.model_path)
best_accuracy = accuracy_metrics

The problem lies here : outputs, target = eval_fx(valid_data_loader, model, device)

Based on the stacktrace I guess eval_fx is returning a single float value, while you are expecting two returned objects:

def eval_fx():
    return 1.

outputs, target = eval_fx()
# TypeError: cannot unpack non-iterable float object

Yes, I am expecting a double value instead of single

How do I go about it. And what is the remedy for this error?

Check the method definition and return two objects instead of one.

Hi,
From what I can understand, you are trying to store targets (final_target) and final outputs (final_outputs) corresponding to each batch of your dataset in the corresponding lists.

Now, according to this, these lines aren’t indented properly in the code you’ve shown here -

final_target.extend(target.cpu().detach.numpy.tolist())
final_outputs.extend(torch.argmax(outputs).cpu().detach().numpy().tolist())
return final_target, final_outputs

Can you try this:

def eval_fx(data_loader, model, device):
  model.eval()
  valid_loss = 0

  final_target = []
  final_outputs = []
  with torch.no_grad():
    for bi, d in tqdm(enumerate(data_loader)):
      ids = d['ids']
      mask = d['mask']
      token_type_ids = d['token_type_ids']
      target = d['target']
    

      ids = ids.to(device, dtype=torch.long)
      mask = mask.to(device, dtype=torch.long)
      token_type_ids = token_type_ids.to(device, dtype=torch.long)
      target = target.to(device, dtype=torch.long)
            
      outputs = model(ids = ids,
                      mask = mask,
                      token_type_ids = token_type_ids
                      )
      final_target.extend(target.cpu().detach.numpy.tolist())
      final_outputs.extend(torch.argmax(outputs).cpu().detach().numpy().tolist())

  return final_target, final_outputs

class Classifier(pl.LightningModule):

  def __init__(self):
    super().__init__()
    self.MFB = MFB(512,768,True,256,64,0.1)
    self.fin_y_shape = torch.nn.Linear(768,512)
    self.fin_old = torch.nn.Linear(64,2)
    self.fin = torch.nn.Linear(16 * 768, 64)
    self.fin_inten = torch.nn.Linear(64,5)
    self.fin_e1 = torch.nn.Linear(64,2)
    self.fin_e2 = torch.nn.Linear(64,2)
    self.fin_e3 = torch.nn.Linear(64,2)
    self.fin_e4 = torch.nn.Linear(64,2)
    self.fin_e5 = torch.nn.Linear(64,2)
    self.fin_e6 = torch.nn.Linear(64,2)
    self.fin_e7 = torch.nn.Linear(64,2)
    self.fin_e8 = torch.nn.Linear(64,2)
    self.fin_e9 = torch.nn.Linear(64,2)
    


    self.validation_step_outputs = []
    self.test_step_outputs = []

  def forward(self, x,y,rag):
      x_,y_,rag_ = x,y,rag
      print("x.shape", x.shape)
      z = self.MFB(torch.unsqueeze(y, axis=1), torch.unsqueeze(x, axis=1))
      #cross_attention= (rag and  x/y)
      z_new = torch.squeeze(z, dim=1)
      c_inten = self.fin_inten(z_new)
      c_e1 = self.fin_e1(z_new)
      c_e2 = self.fin_e2(z_new)
      c_e3 = self.fin_e3(z_new)
      c_e4 = self.fin_e4(z_new)
      c_e5 = self.fin_e5(z_new)
      c_e6 = self.fin_e6(z_new)
      c_e7 = self.fin_e7(z_new)
      c_e8 = self.fin_e8(z_new)
      c_e9 = self.fin_e9(z_new)
      c = self.fin_old(z_new)


      output = torch.log_softmax(c, dim=1)
      c_inten = torch.log_softmax(c_inten, dim=1)
      c_e1 = torch.log_softmax(c_e1, dim=1)
      c_e2 = torch.log_softmax(c_e2, dim=1)
      c_e3 = torch.log_softmax(c_e3, dim=1)
      c_e4 = torch.log_softmax(c_e4, dim=1)

      c_e5 = torch.log_softmax(c_e5, dim=1)
      c_e6 = torch.log_softmax(c_e6, dim=1)
      c_e7 = torch.log_softmax(c_e7, dim=1)

      c_e8 = torch.log_softmax(c_e8, dim=1)
      c_e9 = torch.log_softmax(c_e9, dim=1)


      return output,c_e1,c_e2,c_e3,c_e4,c_e5,c_e6,c_e7,c_e8,c_e9,c_inten

  def cross_entropy_loss(self, logits, labels):
    return F.nll_loss(logits, labels)

  def training_step(self, train_batch, batch_idx):
      #lab,txt,rag,img,name,per,iro,alli,ana,inv,meta,puns,sat,hyp= train_batch
      lab,txt,rag,img,name,e1,e2,e3,e4,e5,e6,e7,e8,e9,intensity = train_batch
      #logit_offen,a,b,c,d,e,f,g,h,i,logit_inten_target= self.forward(txt,img,rag)
      lab = train_batch[lab]
      #print(lab)
      name= train_batch[name]
      txt = train_batch[txt]
      rag = train_batch[rag]
      img = train_batch[img]
      e1 = train_batch[e1]
      e2 = train_batch[e2]
      e3 = train_batch[e3]
      e4 = train_batch[e4]
      e5 = train_batch[e5]
      e6 = train_batch[e6]
      e7 = train_batch[e7]
      e8 = train_batch[e8]
      e9 = train_batch[e9]
      intensity = train_batch[intensity]
      # per = train_batch[per]
      # iro= train_batch[iro]
      # alli = train_batch[alli]
      # ana = train_batch[ana]
      # inv = train_batch[inv]
      # meta = train_batch[meta]
      # puns = train_batch[puns]
      # sat = train_batch[sat]
      # hyp = train_batch[hyp]

      logit_offen,a,b,c,d,e,f,g,h,i,logit_inten_target= self.forward(txt,img,rag)

      loss1 = self.cross_entropy_loss(logit_offen, lab)
      loss4 = self.cross_entropy_loss(a, e1)
      loss5 = self.cross_entropy_loss(b, e2)
      loss6 = self.cross_entropy_loss(c, e3)
      loss7 = self.cross_entropy_loss(d, e4)
      loss8 = self.cross_entropy_loss(e, e5)
      loss9 = self.cross_entropy_loss(f, e6)
      loss10 = self.cross_entropy_loss(g, e7)
      loss11 = self.cross_entropy_loss(h, e8)
      loss12 = self.cross_entropy_loss(i, e9)
      loss17 = self.cross_entropy_loss(logit_inten_target, intensity)
      # loss2 = self.cross_entropy_loss(a,per)
      # loss3 = self.cross_entropy_loss(b,iro)
      # loss4 = self.cross_entropy_loss(c, alli)
      # loss5 = self.cross_entropy_loss(d,ana)
      # loss6 = self.cross_entropy_loss(e,inv)
      # loss7 = self.cross_entropy_loss(f,meta)
      # loss8 = self.cross_entropy_loss(g,puns)
      # loss9 = self.cross_entropy_loss(h,sat)
      # loss10 = self.cross_entropy_loss(i,hyp)

      loss = loss1 + loss4 + loss5 + loss6 + loss7 + loss8 +loss9 + loss10 +loss11 +loss12

      self.log('train_loss', loss)
      return loss


  def validation_step(self, val_batch, batch_idx):
      #lab,txt,rag,img,name,per,iro,alli,ana,inv,meta,puns,sat,hyp = val_batch
      lab,txt,rag,img,name,e1,e2,e3,e4,e5,e6,e7,e8,e9,intensity= val_batch
      lab = val_batch[lab]
      #print(lab)
      txt = val_batch[txt]
      rag = val_batch[rag]
      img = val_batch[img]
      name = val_batch[name]
      e1 = val_batch[e1]
      e2 = val_batch[e2]
      e3 = val_batch[e3]
      e4 = val_batch[e4]
      e5 = val_batch[e5]
      e6 = val_batch[e6]
      e7 = val_batch[e7]
      e8 = val_batch[e8]
      e9 = val_batch[e9]
      intensity = val_batch[intensity]
      # per = val_batch[per]
      # iro = val_batch[iro]
      # alli = val_batch[alli]
      # ana = val_batch[ana]
      # inv = val_batch[inv]
      # meta = val_batch[meta]
      # puns = val_batch[puns]
      # sat = val_batch[sat]
      # hyp = val_batch[hyp]


      logits,a,b,c,d,e,f,g,h,i,inten = self.forward(txt,img,rag)


      logits=logits.float()
    

      tmp = np.argmax(logits.detach().cpu().numpy(),axis=-1)
      loss = self.cross_entropy_loss(logits, lab)
      lab = lab.detach().cpu().numpy()
      self.log('val_acc', accuracy_score(lab,tmp))
      self.log('val_roc_auc',roc_auc_score(lab,tmp))
      self.log('val_loss', loss)
      tqdm_dict = {'val_acc': accuracy_score(lab,tmp)}
      self.validation_step_outputs.append({'progress_bar': tqdm_dict,'val_f1 offensive': f1_score(lab,tmp,average='macro')})

      return {
                'progress_bar': tqdm_dict,
      'val_f1 offensive': f1_score(lab,tmp,average='macro')
      }

  def on_validation_epoch_end(self):
    outs = []
    outs14=[]
    for out in self.validation_step_outputs:
       outs.append(out['progress_bar']['val_acc'])
       outs14.append(out['val_f1 offensive'])
    self.log('val_acc_all_offn', sum(outs)/len(outs))
    self.log('val_f1 offensive', sum(outs14)/len(outs14))
    print(f'***val_acc_all_offn at epoch end {sum(outs)/len(outs)}****')
    print(f'***val_f1 offensive at epoch end {sum(outs14)/len(outs14)}****')
    self.validation_step_outputs.clear()

  def test_step(self, batch, batch_idx):
      lab,txt,rag,img,name,e1,e2,e3,e4,e5,e6,e7,e8,e9,intensity= batch
      #lab,txt,rag,img,name,per,iro,alli,ana,inv,meta,puns,sat,hyp= batch

      lab = batch[lab]
      #print(lab)
      rag = batch[rag]

      txt = batch[txt]
      img = batch[img]
      name = batch[name]
      e1 = batch[e1]
      e2 = batch[e2]
      e3 = batch[e3]
      e4 = batch[e4]
      e5 = batch[e5]
      e6 = batch[e6]
      e7 = batch[e7]
      e8 = batch[e8]
      e9 = batch[e9]
      intensity = batch[intensity]
      # per = batch[per]
      # iro = batch[iro]
      # alli = batch[alli]
      # ana = batch[ana]
      # inv = batch[inv]
      # meta = batch[meta]
      # puns = batch[puns]
      # sat = batch[sat]
      # hyp = batch[hyp]

      logits,a,b,c,d,e,f,g,h,i,inten= self.forward(txt,img,rag)
      logits = logits.float()
      tmp = np.argmax(logits.detach().cpu().numpy(force=True),axis=-1)
      loss = self.cross_entropy_loss(logits, lab)
      lab = lab.detach().cpu().numpy()
      self.log('test_acc', accuracy_score(lab,tmp))
      self.log('test_roc_auc',roc_auc_score(lab,tmp))
      self.log('test_loss', loss)
      tqdm_dict = {'test_acc': accuracy_score(lab,tmp)}
      self.test_step_outputs.append({'progress_bar': tqdm_dict,'test_acc': accuracy_score(lab,tmp), 'test_f1_score': f1_score(lab,tmp,average='macro')})
      return {
                'progress_bar': tqdm_dict,
                'test_acc': accuracy_score(lab,tmp),
                'test_f1_score': f1_score(lab,tmp,average='macro')
      }
  def on_test_epoch_end(self):
      # OPTIONAL
      outs = []
      outs1,outs2,outs3,outs4,outs5,outs6,outs7,outs8,outs9,outs10,outs11,outs12,outs13,outs14 = \
      [],[],[],[],[],[],[],[],[],[],[],[],[],[]
      for out in self.test_step_outputs:
        outs.append(out['test_acc'])
        outs2.append(out['test_f1_score'])
      self.log('test_acc', sum(outs)/len(outs))
      self.log('test_f1_score', sum(outs2)/len(outs2))
      self.test_step_outputs.clear()

  def configure_optimizers(self):
    # optimizer = torch.optim.Adam(self.parameters(), lr=3e-2)
    optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)

    return optimizer


"""
Main Model:
Initialize
Forward Pass
Training Step
Validation Step
Testing Step

Pp
"""

class HmDataModule(pl.LightningDataModule):

  def setup(self, stage):
    self.hm_train = t_p
    self.hm_val = v_p
    # self.hm_test = test
    self.hm_test = te_p

  def train_dataloader(self):
    return DataLoader(self.hm_train, batch_size=10, drop_last=True)

  def val_dataloader(self):
    return DataLoader(self.hm_val, batch_size=10, drop_last=True)

  def test_dataloader(self):
    return DataLoader(self.hm_test, batch_size=10, drop_last=True)

data_module = HmDataModule()
checkpoint_callback = ModelCheckpoint(
     monitor='val_acc_all_offn',
     dirpath='mrinal/',
     filename='epoch{epoch:02d}-val_f1_all_offn{val_acc_all_offn:.2f}',
     auto_insert_metric_name=False,
     save_top_k=1,
    mode="max",
 )
all_callbacks = []
all_callbacks.append(checkpoint_callback)
# train
from pytorch_lightning import seed_everything
seed_everything(42, workers=True)
hm_model = Classifier()
gpus=1
#if torch.cuda.is_available():gpus=0
trainer = pl.Trainer(deterministic=True,max_epochs=2,precision=16,callbacks=all_callbacks)
trainer.fit(hm_model, data_module)

INFO:lightning_fabric.utilities.seed:Seed set to 42
INFO:pytorch_lightning.utilities.rank_zero:Using bfloat16 Automatic Mixed Precision (AMP)
INFO:pytorch_lightning.utilities.rank_zero:GPU available: False, used: False
INFO:pytorch_lightning.utilities.rank_zero:TPU available: False, using: 0 TPU cores
INFO:pytorch_lightning.utilities.rank_zero:IPU available: False, using: 0 IPUs
INFO:pytorch_lightning.utilities.rank_zero:HPU available: False, using: 0 HPUs
INFO:pytorch_lightning.callbacks.model_summary:
| Name | Type | Params

0 | MFB | MFB | 21.0 M
1 | fin_y_shape | Linear | 393 K
2 | fin_old | Linear | 130
3 | fin | Linear | 786 K
4 | fin_inten | Linear | 325
5 | fin_e1 | Linear | 130
6 | fin_e2 | Linear | 130
7 | fin_e3 | Linear | 130
8 | fin_e4 | Linear | 130
9 | fin_e5 | Linear | 130
10 | fin_e6 | Linear | 130
11 | fin_e7 | Linear | 130
12 | fin_e8 | Linear | 130
13 | fin_e9 | Linear | 130

22.2 M Trainable params
0 Non-trainable params
22.2 M Total params
88.745 Total estimated model params size (MB)
x.shape torch.Size([10, 768])
x.shape torch.Size([10, 768])
val_acc_all_offn at epoch end 0.3*
val_f1 offensive at epoch end 0.29292929292929293*
/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/fit_loop.py:298: The number of training batches (34) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.
Epoch 0: 0%
0/34 [00:00<?, ?it/s]
x.shape torch.Size([10, 768])

IndexError Traceback (most recent call last)
in <cell line: 296>()
294 #if torch.cuda.is_available():gpus=0
295 trainer = pl.Trainer(deterministic=True,max_epochs=2,precision=16,callbacks=all_callbacks)
→ 296 trainer.fit(hm_model, data_module)

29 frames
/usr/local/lib/python3.10/dist-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
2702 if size_average is not None or reduce is not None:
2703 reduction = _Reduction.legacy_get_string(size_average, reduce)
→ 2704 return torch._C._nn.nll_loss_nd(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2705
2706

IndexError: Target 2 is out of bounds.

can you please help me in that

Your linear layers return 2 logits corresponding to two classes. The target should thus contain class indices in [0, 1].

class indices is in [0,1] but still giving the same error

The error message shows a target index of 2 is used so you would need to double check where it’s coming from.