Hi ,
I am training NN using pytorch 1.7.0 , when i use CrossEntopyLoss() loss function then i dont have any negative loss in any epochs, since this competition evaluation metrics is multi-class logarithmic loss which i believe BCEWithLogitsLoss() in pytorch serve this logarithmic loss for multi class (correct me if i am wrong).
My question is why negative loss is coming when using BCEWithLogitsLoss()? How can i prevent it, i dont wanna use CrossEntopyLoss() , Please see below code , for clarity i am showing “y” actual target and “output” prediction of model in first epoch only
def get_optimizer(model, lr):
optim = torch_optim.Adam(model.parameters(), lr=lr, weight_decay=0.05)
return optim
batch_size = 2000
def train_loop(model, epochs, lr):
total = 0
sum_loss = 0
output = 0
criterion = nn.BCEWithLogitsLoss()
optim= get_optimizer(model)
for epoch in range(epochs):
for cat, y in train_dl:
model.train()
batch = y.shape[0]
output = model(cat)
if (epoch) ==1:
print(f'y is {y.float}')
print(f'y is {output[:,0]}')
loss = criterion(output[:,0],y.float())
optim.zero_grad()
loss.backward()
optim.step()
total += batch
sum_loss += batch*(loss.item())
valid_ds = ClassifierDataset(X_val,y_val , features)
batch_size = X_train.shape[0]
valid_dl=DataLoader(valid_ds,batch_size=batch_size,shuffle=False)
valid_dl=DeviceDataLoader(valid_dl, device)
for cat, y in valid_dl:
model.eval()
output = model(cat)
valid_loss = criterion(output[:,0],y.float())
print(f'epoch:{epoch+1},training loss:{loss},valid loss:{valid_loss} ')
train_dl = DataLoader(train_ds, batch_size=batch_size,shuffle=True)
train_dl = DeviceDataLoader(train_dl, device)
model = multiNet(embedding_sizes)
to_device(model, device)
model.apply(init_weights)
train_loop(model, epochs=120, lr=0.001)
epoch : 1,training loss : -127.1643,valid loss : -82.094856
y:tensor([7., 3., 1., ..., 8., 7., 1.], device='cuda:0')
output:tensor([ 0.945,0.189,-1.194,...,-1.03,0.80,-1.05],device='cuda:0',grad_fn=<SelectBackward>)
epoch : 2,training loss : -298.340728,valid loss : -293.701477
epoch : 3,training loss : -529.159423,valid loss : -535.595520
epoch : 4,training loss : -882.299377,valid loss : -906.745788