Hi, I am having some trouble in training a U-Net. I have implemented U-Net in keras before and am trying to do the same with pytorch. The problem is my U-Net in Pytroch doesn’t seem to be learning. The train loss remains well under 0.0005 which is terrible. For my keras Unet, the train loss improves drastically(compared to Pytorch) from the second epoch. I am assuming that something is wrong with either my loss function or metric function. Can anyone help me out with what is wrong here? thanks in advance
#Loss function
class DiceCoefLoss(nn.Module):
def __init__(self):
super(DiceCoefLoss,self).__init__()
def forward(self,input,target):
input, target = input.cuda(),target.cuda()
smooth =1
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
dice_coef= (2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)
print("DICE_COEF loss", -dice_coef)
return -dice_coef
#Metric function
def dice_coef(output, target):
smooth = 1
output, target = output.cpu(),target.cpu()
iflat = torch.flatten(output)
tflat = torch.flatten(target)
intersection = (iflat * tflat).sum()
return (2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)
def dice_coef_loss(output,target):
return - dice_coef(output,target)
#Dataset
class MyLidcDataset(Dataset):
def __init__(self, IMAGES_PATHS, MASK_PATHS):
"""
IMAGES_PATHS: list of images paths ['./Images/0001_01_images.npy','./Images/0001_02_images.npy']
MASKS_PATHS: list of masks paths ['./Masks/0001_01_masks.npy','./Masks/0001_02_masks.npy']
"""
self.image_paths = IMAGES_PATHS
self.mask_paths= MASK_PATHS
def transform(self, image, mask):
#Transform to tensor
image = TF.to_tensor(image)
mask = TF.to_tensor(mask)
image,mask = image.float(), mask.float()
return image,mask
def __getitem__(self, index):
image = np.load(self.image_paths[index])
mask = np.load(self.mask_paths[index])
image,mask = self.transform(image,mask)
return image,mask
def __len__(self):
return len(self.image_paths)