When I use cross-entropy loss function for multiclass text classification, I get the error “Dimension out of range (expected to be in range of [-1, 0], but got 1)”
This is my code:
def train(model, iterator):
...
for batch in iterator:
text, text_lengths = batch.Turn
optimizer.zero_grad()
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = categorical_accuracy(predictions, batch.label)
...
Dataset:
TEXT = data.Field(tokenize = 'spacy', include_lengths = True)
LABEL = data.LabelField(dtype = torch.long)
Forward:
def forward(self, text, text_lengths):
embedded = self.embedding(text)
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths)
packed_output, (hidden, cell) = self.rnn(packed_embedded)
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
output = self.fc1(hidden)
output = self.dropout(self.fc2(output))
return output
LSTM setup for 3-class classification:
num_epochs = 25
learning_rate = 0.001
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 128
OUTPUT_DIM = 1
N_LAYERS = 1
BIDIRECTIONAL = True
DROPOUT = 0.2
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token] # padding