I’ve tried to figure out where is the problem, But I can’t see the problem,
after every iteration, the error doesn’t reduce, I’ve put the code below, someone can help?
Thank you
for epoch in range(1000):
running_loss = 0.0
for i_batch, sample_batched in enumerate(dataloader):
inputs, labels = sample_batched['image'], sample_batched['semantic']
labels = labels.cuda().float()
#zero the parameter gradients
optimizer.zero_grad()
inputs.requires_grad_()
#forward + backward +optimize
mask_pred = unet(inputs.cuda())
mask_pred.cuda()
labels = labels.type(torch.cuda.DoubleTensor)
loss = criterion(mask_pred, labels)
loss.backward()
optimizer.step()
#print statistics
running_loss += loss.item()
if i_batch%10 == 9:
# print every 20 mini-batchs
print('[%d, %5f] loss: %.3f'%(epoch +1, i_batch+1, running_loss/10))
running_loss = 0.0
print('Finish Training')
Here is the output:
[1, 10.000000] loss: 0.665
[1, 20.000000] loss: 0.679
[1, 30.000000] loss: 0.675
[1, 40.000000] loss: 0.673
[1, 50.000000] loss: 0.673
[1, 60.000000] loss: 0.681
[1, 70.000000] loss: 0.673
[1, 80.000000] loss: 0.678
[1, 90.000000] loss: 0.669
[1, 100.000000] loss: 0.673
[1, 110.000000] loss: 0.673
[1, 120.000000] loss: 0.677
[1, 130.000000] loss: 0.674
[1, 140.000000] loss: 0.676
[1, 150.000000] loss: 0.676
[1, 160.000000] loss: 0.677