Batches & loss don't update

Hi, I was training my model but some issues occurred.
The dataloader was supposed to update batches but instead I got this:

Output

  • [0, 0] loss: 0.009
  • [1, 0] loss: 0.009
  • [2, 0] loss: 0.009
  • [3, 0] loss: 0.009
  • [4, 0] loss: 0.009
  • [5, 0] loss: 0.009
  • [6, 0] loss: 0.009
  • [7, 0] loss: 0.009
  • [8, 0] loss: 0.009
  • [9, 0] loss: 0.009

Anyone know what’s wrong? My codes are as below

DataLoader

class MSourceDataSet(Dataset):
    
    def __init__(self, clean_dir, mix_dir, clean_label_dir, mix_label_dir):
                
        with open(clean_dir + 'clean1.json') as f:
            clean0 = torch.Tensor(json.load(f))
            
        with open(mix_dir + 'mix1.json') as f:
            mix0 = torch.Tensor(json.load(f))
        
        with open(clean_label_dir + 'clean_label1.json') as f:
            clean_label0 = torch.Tensor(json.load(f))
            

        with open(mix_label_dir + 'mix_label1.json') as f:
            mix_label0 = torch.Tensor(json.load(f))
        
        
        self.spec = torch.cat([clean0, mix0], 0)
        self.label = torch.cat([clean_label0, mix_label0], 0)
        
    def __len__(self):
        return self.spec.shape[0]

                
    def __getitem__(self, index): 

        spec = self.spec[index]
        label = self.label[index]
        return spec, label

In [78]:

trainset = MSourceDataSet(clean_dir, mix_dir, clean_label_dir, mix_label_dir)

trainloader = torch.utils.data.DataLoader(dataset = trainset,
                                                batch_size = 4,
                                                shuffle = True)

# testloader = torch.utils.data.DataLoader(dataset = testset,
#                                                batch_size = 4,
#                                                shuffle = True)

Training

model.train()

for epoch in range(10):
    running_loss = 0
    
    for i, data in enumerate(trainloader, 0):
        
        inputs, labels = data

        optimizer.zero_grad()
        
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
        if i % 1000 == 0:
            print ('[%d, %5d] loss: %.3f' % (epoch, i, running_loss/ 1000))
            running_loss = 0
            
torch.save(model, 'FeatureNet.pkl')

Did you make sure the Dataset is working properly?
Just try to get some random samples and see, if the values make sense:

print(trainset[0])
print(trainset[10])
print(trainset[-1])

If that looks fine, try to lower your learning rate as it might be a bit high so that your training gets stuck from the beginning.