Why is my validation loss starting from 0, and increase by 1

Why am i not getting the right value of validation loss

path = '/content/drive/My Drive/data_detection'

train_data = FaceAnnoDataset(root_dir=path, img_dir ='image', anno_dir='label', 
                            txtfile='image.txt', transform=transforms.Compose([
                                transforms.ToPILImage(),                                              
                                transforms.Resize((224,224)),
                                transforms.ToTensor(),
                                transforms.Normalize([0.2341, 0.2388, 0.2622], [0.2210, 0.2150, 0.2543])])
)


validate_data = FaceAnnoDataset(root_dir=path, img_dir ='val_image', anno_dir='val_label', 
                            txtfile='val_image.txt', transform=transforms.Compose([
                                transforms.ToPILImage(),                                              
                                transforms.Resize((224,224)),
                                transforms.ToTensor(),
                                transforms.Normalize([0.2341, 0.2388, 0.2622], [0.2210, 0.2150, 0.2543])])
)


train_loader = DataLoader(train_data, batch_size=16, shuffle=False, pin_memory=True,
                         num_workers=5, collate_fn=collate_fn)

validate_loader = DataLoader(validate_data, batch_size=16, shuffle=False, pin_memory=True,
                         num_workers=5, collate_fn=collate_fn)


model = DetectionNet()
optimizer = optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0005, amsgrad=False)
scheduler = lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.1)
num_epochs = 300

for epoch in range(num_epochs):
  model.cuda()
  model.train()
  start = time.time()
  for image, anno in train_loader:
    image = image.cuda()
    anno = anno.cuda()

    y_pred = model(image) # shape: batch_size x 5 x 7 x 7
    # before permute batch x 5 x 7 x 7
    y_pred = y_pred.permute(0,2,3,1) # reshape: batch_size x 7 x 7 x 5
    loss = loss_fn(y_pred, anno)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    end = time.time()
    scheduler.step()
  print('---Epochs: {}/{}---Training loss:{:.4f} time per epoch: {:.1f}s'.format(
        epoch, num_epochs, loss.item(), end-start))
  model.eval()
  with torch.no_grad():
    for image, anno in validate_loader:
      image = image.cuda()
      anno = anno.cuda()
      y_pred = model(image)
      y_pred = y_pred.permute(0,2,3,1)
      val_loss = loss_fn(y_pred, anno)
      print('---Validation loss:{:.4f}'.format(
            epoch, num_epochs, val_loss))
      print('-------------------------------------------------')
output:
---Epochs: 0/300---Training loss:17.9397 time per epoch: 93.7s
---Validation loss:0.0000
-------------------------------------------------
---Epochs: 1/300---Training loss:17.2703 time per epoch: 21.5s
---Validation loss:1.0000
-------------------------------------------------
---Epochs: 2/300---Training loss:16.9091 time per epoch: 21.6s
---Validation loss:2.0000
-------------------------------------------------
---Epochs: 3/300---Training loss:17.5637 time per epoch: 21.7s
---Validation loss:3.0000
-------------------------------------------------
---Epochs: 4/300---Training loss:17.2783 time per epoch: 21.5s
---Validation loss:4.0000
-------------------------------------------------
---Epochs: 5/300---Training loss:18.9444 time per epoch: 21.5s
---Validation loss:5.0000
-------------------------------------------------
---Epochs: 6/300---Training loss:18.1132 time per epoch: 21.5s
---Validation loss:6.0000
-------------------------------------------------
---Epochs: 7/300---Training loss:16.8943 time per epoch: 21.5s
---Validation loss:7.0000
-------------------------------------------------

You should maybe just display the val_loss as currently it is displaying the epochs…

I see…Thank you! Sorry for wasting your time for this meaningless question