I’ve been stuck here for a while now. Any help.
batch_size = 64
The error occurs here.( label = label[sorted_idx] )
def train(args, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
min_loss = np.Inf
for epoch in range(args.num_epochs):
train_losses = []
train_acc = 0.0
total=0
print(f"[Epoch {epoch+1} / {args.num_epochs}]")
model.train()
for i, (text, label) in enumerate(tqdm(data_loader)):
model.eval()
text = text.to(args.device)
label = label.to(args.device)
input_lengths = torch.LongTensor([torch.max(text[i, :].nonzero()) +1 for i in range(text.size(0))])
input_lengths, sorted_idx = input_lengths.sort(0, descending=True)
# print(len(input_lengths), len(sorted_idx))
# print(input_lengths,"\n", sorted_idx)
text = text[sorted_idx]
# print("text pass")
label = label.squeeze()
label = label[sorted_idx]
# print("label pass")
optimizer.zero_grad()
output = model(text, input_lengths)
loss = criterion(output, label)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
total += label.size(0)
train_acc += acc(output, label)
epoch_train_loss = np.mean(train_losses)
epoch_train_acc = train_acc/total
print(f'Epoch {epoch+1}')
print(f'train_loss : {epoch_train_loss}')
print('train_accuracy : {:.3f}'.format(epoch_train_acc*100))