Binary classification -loss not decreasing

I am working on a binary classification problem, where based on the description of a product I have put it under one of the 2 categories. I have used Glove to convert my sentences into 100-dimensional embedding, then I have averaged the embedding for the complete sentence. My problem is that the loss is not decreasing beyond 1.0
My input dimensions are (1,100)
Model architecture is

class NN(nn.Module):
    def __init__(self):
        super(NN,self).__init__()
        self.l1=nn.Linear(100,60)
        kaiming_uniform_(self.l1.weight, nonlinearity='relu')
        self.relu1 = nn.ReLU()
        
        
        self.l2=nn.Linear(60,30)
        kaiming_uniform_(self.l2.weight, nonlinearity='relu')
        self.relu2=nn.ReLU()
        
        self.l3=nn.Linear(30,10)
        kaiming_uniform_(self.l3.weight, nonlinearity='relu')
        self.relu3=nn.ReLU()
        
        self.l4=nn.Linear(10,1)
        xavier_uniform_(self.l4.weight)
        self.sig = nn.Sigmoid() 
#       self.relu=nn.Relu
    def forward(self,x):
        x=self.relu1(self.l1(x))
        x=self.relu2(self.l2(x))
        x=self.relu3(self.l3(x))
        x=self.sig(self.l4(x))
        return x

Data loader

class loader(Dataset):
    def __init__(self):
        pass
    def __len__(self):
        return len(d)
    def __getitem__(self,idx):
        return d["embedding"][idx],d["category"][idx]

Training Loop

criterion = nn.BCELoss()
opt = optim.SGD(net.parameters(), lr=0.00001, momentum=0.9)
epochs=50
batch_size=64
num_iter=500
for epoch_num in range(1,epochs+1):
    running_loss=0.0
    total_train=0
    correct_train=0
    for i,samples in (enumerate(train_loader)):
        emb,label=samples[0].double(),samples[1].double()
#       print(emb.shape,label.shape)
        num=emb.shape[0]
        emb,label=emb.to(device),label.to(device)
        opt.zero_grad()
        outputs=net(emb)
#         print(outputs)
#         break
        outputs=outputs.reshape((outputs.shape[0],1))
        label=label.reshape((label.shape[0],1))
#       print(outputs.shape)
        loss=criterion(outputs,label)
        loss.backward()
        opt.step()

        running_loss+=(torch.exp(loss).item()) *(batch_size/num)
        valid_loss=0
        if(i%num_iter==0 and i!=0):
#           valid_loss=validation(valid_loader=valid_loader,criterion=criterion)
            print("Epoch [%3d] iteration [%4d] loss:[%.10f]"%(epoch_num,i,running_loss/num_iter),end="")
            print()
#           print(" validation_loss:[%.10f]"%(valid_loss),end="")
#           print("acc [%3d] current lr [%.10f]"%(correct_train/total_train,opt.param_groups[0]['lr']))
            running_loss=0.0


            

Thank you.

Probably unrelated to your problem, but I would recommend to remove the sigmoid and use nn.BCEWithLogitsLoss instead.

The code looks generally good, but I’, wondering, why you need these lines of code:

outputs=outputs.reshape((outputs.shape[0],1))
label=label.reshape((label.shape[0],1))

This reshape shouldn’t be necessary or what are the output and target shapes?

I changes the loss function as you said, I also replace (torch.exp(loss).item) -> loss.item. Now the loss is well below zero. Does this have anything to do with it? and when we have to use torch.exp(loss). Thank you very much.

You don’t need to use torch.exp(loss) and can just calculate the running loss directly with your operation.
If your loss is negative, could you please check the target range, as it’s supposed to be in the range [0, 1]?