IndexErrors: Dimension out of range (expected to be in range of [-1, 0], but got 1)

I’m not sure how to resolve this error. I am trying to build a classifier for 36 different labels that are numbered from 0 to 35. Here is some of the code:

#one-hot encode vocabulary
alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789-._'
def sentence_to_id(data):
    #creates a dict that maps to every char of alphabet an unique int based on position
    char_to_int = dict((c,i) for i,c in enumerate(alphabet))
    encoded_data = []
    #Replaces every char in data with the mapped int
    encoded_data.append([char_to_int[char] for char in data])
    #print(encoded_data) #Prints the int encoded array
    #print('encoded data')
    #This part now replaces the int by an one-hot array with size  alphabet
    one_hot = []
    for value in encoded_data:
        for i in value:
            #At first, the whole array is initialized with 0
            letter = [0 for _ in range(len(alphabet))]
            #Only at the number of int, 1 is written
            letter[i] = 1
            one_hot.append(letter)
        x = torch.tensor(one_hot)
        print(x.shape)
        print(x.view)
        #x = x.view(-1,12,39)
        #print('tensor created')
        return x

#basic model, need to modify to situation
class NLP_model(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, num_classes):
        super(NLP_model, self).__init__()
        self.char_embedding = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers = 1, bidirectional= True)
        self.fc = nn.Linear(hidden_dim*2,num_classes)
        
    def forward(self, x):
        x = self.char_embedding(x)
        output, hidden = self.lstm(x)
        hidden = torch.cat((hidden[0][-2,:,:], hidden[0][-1,:,:]), dim=1)
        x = self.fc(hidden[0])
    
        return x

model = NLP_model(len(alphabet), 8, 16, 35) #Find the number of fismaids for last variable
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr = 0.001)

for epoch in range(100): #Look into DataLoader for batch processing
    y = list()
    z = list()
    for sentence, label in zip(ls_X_train, ls_y_train): #training_data should be an array of hostnames and labels
        model.zero_grad()
        output = model(sentence_to_id(sentence)) #sentence is the hostname, label is the fismaid
        
        print(output.shape)
        label = torch.tensor(label).unsqueeze(1)
        loss = criterion(output, label)
        loss.backward()
        optimizer.step()
        y.append(loss.item())
    model.eval()
    for sentence, label in zip(ls_X_test, ls_y_test):
        output = model(sentence_to_id(sentence))
        loss = criterion(output, label)
        z.append(loss.item())
    print(f'epoch {epoch} training loss: {np.array(y).mean()}')
    print(f'testing loss : {np.array(z).mean()}')
          

The error I get is the following:

---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-31-28c5b337be00> in <module>
     11 
     12         print(output.shape)
---> 13         label = torch.tensor(label).unsqueeze(1)
     14         loss = criterion(output, label)
     15         loss.backward()

IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)

How would I go about troubleshooting this error?

label might be a scalar value and this tensor(label) might be a 0-dim tensor:

label = 0
label = torch.tensor(label).unsqueeze(1)
> IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)

In that case you would need to either unsqueeze in dim0 or pass the scalar value packed in a list:

label = 0
label = torch.tensor(label).unsqueeze(0).unsqueeze(1)
label = torch.tensor([label]).unsqueeze(1)

I added your three lines of code, and get the following error instead. Thanks for your help!

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-32-1f9999ae048a> in <module>
     15         label = torch.tensor([label]).unsqueeze(1)
     16         label = torch.tensor(label).unsqueeze(1)
---> 17         loss = criterion(output, label)
     18         loss.backward()
     19         optimizer.step()

~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
    887             result = self._slow_forward(*input, **kwargs)
    888         else:
--> 889             result = self.forward(*input, **kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),

~\Anaconda3\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
    712         assert self.weight is None or isinstance(self.weight, Tensor)
    713         assert self.pos_weight is None or isinstance(self.pos_weight, Tensor)
--> 714         return F.binary_cross_entropy_with_logits(input, target,
    715                                                   self.weight,
    716                                                   pos_weight=self.pos_weight,

~\Anaconda3\lib\site-packages\torch\nn\functional.py in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight)
   2825 
   2826     if not (target.size() == input.size()):
-> 2827         raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
   2828 
   2829     return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)

ValueError: Target size (torch.Size([1, 1, 1])) must be the same as input size (torch.Size([35]))

nn.BCEWithLogitsLoss expects the model output and target to have the same shape.
Based on the new error message it seems that your model output has a shape of [35], while you are using a single target value only, which won’t work.

The expected shapes for a mulit-label classification would be [batch_size, nb_classes] for the model output and the target for nn.BCEWithLogitsLoss.

1 Like