UNet isn't being trained

Hi, I have been trying to implement a Unet for lung nodule detection with pytorch but it just doesn’t seem to be learning. I looked into it for several days but I just can’t find whats wrong with it. I would appreciate for any help. I presume there is something wrong with the loss function itself. where the output of the model seems to have such a large number while the target has a small number. The output.view(-1) has a size over 262,144(512*512) while the target.view(-1) seems to have value ranging around 500~1000.

Batch size =8 , Image size=(512,512)
IFLAT SUM tensor(859688.1250, device=‘cuda:0’, grad_fn=)
TFLAT SUM tensor(838., device=‘cuda:0’)

This is my loss function

class DiceCoefLoss(nn.Module):
    def __init__(self):
        super(DiceCoefLoss,self).__init__()


    def forward(self,input,target):
        input, target = input.cuda(),target.cuda()
        smooth =1

        iflat = input.view(-1).float()
        tflat = target.view(-1).float()

        print("IFLAT SUM",iflat.sum())
        print("TFLAT SUM",tflat.sum())

        intersection = (iflat * tflat).sum()
        print("Intersection ",intersection)
        dice_coef= (2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)
        print("DICE LOSS IS",1-dice_coef)
        return 1-dice_coef

This is my training process

class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val
        self.count += n
        self.avg = self.sum / self.count

def train(args, train_loader, model, criterion, optimizer, epoch, scheduler=None):
    losses = AverageMeter()
    ious = AverageMeter()
    dices = AverageMeter() 

    model.train()

    dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
    for i, (input, target) in tqdm(enumerate(train_loader), total=len(train_loader)):
        
        input = input.type(dtype)
        target = target.unsqueeze(1)
        # compute output
        if args['deepsupervision']==True:
            outputs = model(input)
            loss = 0
            outputs = outputs.type(dtype)
            for idx,output in enumerate(outputs):
                loss += criterion(output, target[idx])
            loss /= len(outputs)
            iou = iou_score(outputs[-1], target)
            dice = dice_coef(outputs,target)
        else:
            output = model(input)
            loss = criterion(output, target)
            iou = iou_score(output, target)
            dice = dice_coef(output,target)
        losses.update(loss.item(), input.size(0))
        ious.update(iou, input.size(0))
        dices.update(dice.item(),input.size(0))
        # compute gradient and do optimizing step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    log = OrderedDict([
        ('loss', losses.avg),
        ('iou', ious.avg),
        ('dice',dices.avg)
    ])

    return log

What kind of outputs is your model returning?
If I;m not mistaken the dice loss expects probabilities as the model output.

Hi, this is my structure of U-net where the last output goes through a sigmoid

    def forward(self, input):
        x0_0 = self.conv0_0(input)
        x1_0 = self.conv1_0(self.pool(x0_0))
        x2_0 = self.conv2_0(self.pool(x1_0))
        x3_0 = self.conv3_0(self.pool(x2_0))
        x4_0 = self.conv4_0(self.pool(x3_0))
        x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))
        x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))
        x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))
        x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))
        output = self.final(x0_4)
        output = F.sigmoid(output)


        print("THE OUTPUT IS:" ,output.shape)
        return output