Printing the class probabilities for test images along with the name of test image by modifying the dataloader

In the following code, how can I modify it such that I can iterate over x, y, and im_path for test images?

import torchvision.datasets as datasets
class MonaDataset(datasets.folder.ImageFolder):
    def __init__(self, root, transform=None, target_transform=None,
                 loader=datasets.folder.default_loader):
        super(MonaDataset, self).__init__(root, transform, target_transform, loader)

    def __getitem__(self, index):
        path, target = self.samples[index]
        sample = self.loader(path)
        if self.transform is not None:
            sample = self.transform(sample)
        if self.target_transform is not None:
            target = self.target_transform(target)
        return sample, target, path

dataset = MonaDataset('10folds/10folds_9')
print(len(dataset))
x, y, im_path = dataset[0]


print("x is: {}, y is: {}, im_path is: {}".format(x, y, im_path))

image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
                                          data_transforms[x])
                  for x in ['train', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
                                             shuffle=True, num_workers=4)
              for x in ['train', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'test']}


class_names = image_datasets['train'].classes

I need it for the following code snippet. When I am printing the class probabilities, I need to know which image do they belong to from the test folder.

nb_classes = 9

import torch.nn.functional as F

confusion_matrix = torch.zeros(nb_classes, nb_classes)

_classes = []
_preds = []
predicted_labels = []
loocv_probs = []

with torch.no_grad():
    for i, (inputs, classes) in enumerate(dataloaders['test']):
    #for i, (inputs, classes, im_path) in enumerate(dataset):
        
      
        inputs = inputs.to(device)
        tmp_labels = model_ft(inputs)
        
        classes = classes.to(device)
        classes_list = classes.cpu().detach().numpy().tolist()
        _classes[:]=[i+1 for i in classes_list]
        outputs = model_ft(inputs)
        
        gpu_tensor_probs = F.softmax(outputs, 1)
        cpu_numpy_probs = gpu_tensor_probs.data.cpu().numpy()
        loocv_probs.append(cpu_numpy_probs.tolist())
    
        _, preds = torch.max(outputs, 1)
        preds_list = preds.cpu().detach().numpy().tolist()
        _preds[:]=[i+1 for i in preds_list]
          
        predicted_labels.append(preds.cpu().detach().numpy().tolist())
        for t, p in zip(classes.view(-1), preds.view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
                
print(confusion_matrix)
print(confusion_matrix.diag()/confusion_matrix.sum(1))
#print('Class probabilities:', loocv_probs)

for i in range(len(loocv_probs)): #21
    for j in range(len(loocv_probs[0])): #4
        print(*[f"{element:.2f}" for element in loocv_probs[i][j]], sep=', ', end='\n')


for i in range(9):
    print("class {:d} --> accuracy: {:.2f}, correct predictions: {:d}, all: {:d}".format(i+1, (confusion_matrix.diag()/confusion_matrix.sum(1))[i]*100, int(confusion_matrix[i][i].numpy()), int(confusion_matrix.sum(dim=1)[i].numpy())))
1 Like

Thanks a lot to @ptrblck for helping with most parts of the following code and guidance:

import torchvision.datasets as datasets
class MonaDataset(datasets.folder.ImageFolder):
    def __init__(self, root, transform=None, target_transform=None,
                 loader=datasets.folder.default_loader):
        super(MonaDataset, self).__init__(root, transform, target_transform, loader)

    def __getitem__(self, index):
        path, target = self.samples[index]
        sample = self.loader(path)
        if self.transform is not None:
            sample = self.transform(sample)
        if self.target_transform is not None:
            target = self.target_transform(target)
        return sample, target, path

dataset = MonaDataset('10folds/10fold_9')
print(len(dataset))
x, y, im_path = dataset[0]


print("x is: {}, y is: {}, im_path is: {}".format(x, y, im_path))

image_datasets = {x: MonaDataset(os.path.join(data_dir, x),
                                          data_transforms[x])
                  for x in ['train', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
                                             shuffle=True, num_workers=4)
              for x in ['train', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'test']}


class_names = image_datasets['train'].classes

and

import ntpath

nb_classes = 9

import torch.nn.functional as F

confusion_matrix = torch.zeros(nb_classes, nb_classes)

_classes = []
_preds = []
predicted_labels = []

class_probs = torch.Tensor()

im_paths = []
with torch.no_grad():
    for i, (inputs, classes, im_path) in enumerate(dataloaders['test']):
       

        im_paths.append(im_path)
        inputs = inputs.to(device)
        tmp_labels = model_ft(inputs)
        
        classes = classes.to(device)
        classes_list = classes.cpu().detach().numpy().tolist()
        _classes[:]=[i+1 for i in classes_list]
        outputs = model_ft(inputs)
        
  

        class_probs = class_probs.cuda()
        
        class_probs = torch.cat((class_probs, F.softmax(outputs, 1)))


    
        _, preds = torch.max(outputs, 1)
        preds_list = preds.cpu().detach().numpy().tolist()
        _preds[:]=[i+1 for i in preds_list]
          
        predicted_labels.append(preds.cpu().detach().numpy().tolist())
        for t, p in zip(classes.view(-1), preds.view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
                
print(confusion_matrix)
print(confusion_matrix.diag()/confusion_matrix.sum(1))



flattened_im_paths = flattened = [item for sublist in im_paths for item in sublist]

for i in range(len(flattened_im_paths)):
    print('img {}, class_prob is: {}'.format(ntpath.basename(flattened_im_paths[i]), class_probs[i]))


for i in range(nb_classes):
    print("class {:d} --> accuracy: {:.2f}, correct predictions: {:d}, all: {:d}".format(i+1, (confusion_matrix.diag()/confusion_matrix.sum(1))[i]*100, int(confusion_matrix[i][i].numpy()), int(confusion_matrix.sum(dim=1)[i].numpy())))

You get something like:

tensor([[ 0.,  0.,  0.,  0.,  0.,  1.,  1.,  0.,  0.],
        [ 0.,  4.,  3.,  0.,  2.,  0.,  3.,  0.,  1.],
        [ 0.,  1., 22.,  0.,  1.,  0.,  1.,  0.,  1.],
        [ 0.,  1.,  1.,  0.,  1.,  0.,  1.,  0.,  0.],
        [ 0.,  2.,  5.,  0.,  3.,  0.,  0.,  0.,  0.],
        [ 0.,  1.,  1.,  0.,  0.,  2.,  1.,  0.,  0.],
        [ 0.,  1.,  3.,  0.,  0.,  0., 11.,  0.,  0.],
        [ 0.,  0.,  0.,  0.,  1.,  0.,  1.,  0.,  0.],
        [ 0.,  3.,  1.,  0.,  0.,  0.,  1.,  0.,  1.]])
tensor([0.0000, 0.3077, 0.8462, 0.0000, 0.3000, 0.4000, 0.7333, 0.0000, 0.1667])
img 352.jpg, class_prob is: tensor([0.0021, 0.8629, 0.0088, 0.0019, 0.0700, 0.0005, 0.0299, 0.0043, 0.0196],
       device='cuda:0')
img 90258.jpg, class_prob is: tensor([0.0253, 0.0341, 0.0541, 0.1367, 0.2127, 0.2026, 0.1849, 0.1008, 0.0488],
       device='cuda:0')
img 183.jpg, class_prob is: tensor([0.0172, 0.0292, 0.7435, 0.0120, 0.0217, 0.0128, 0.1457, 0.0098, 0.0080],
       device='cuda:0')
img 100342.jpg, class_prob is: tensor([0.0309, 0.0268, 0.7362, 0.0402, 0.0391, 0.0463, 0.0256, 0.0194, 0.0356],
       device='cuda:0')
img 10149.jpg, class_prob is: tensor([0.0515, 0.0811, 0.0959, 0.1061, 0.1484, 0.0629, 0.2921, 0.0598, 0.1021],
       device='cuda:0')
img 100083.jpg, class_prob is: tensor([0.0032, 0.0497, 0.0892, 0.0604, 0.0291, 0.7322, 0.0188, 0.0114, 0.0059],
       device='cuda:0')
img 10274.jpg, class_prob is: tensor([0.0436, 0.1276, 0.3260, 0.0033, 0.1529, 0.0061, 0.0842, 0.0191, 0.2371],
       device='cuda:0')
img 10659.jpg, class_prob is: tensor([0.0133, 0.2016, 0.3928, 0.0270, 0.0962, 0.0011, 0.2383, 0.0228, 0.0070],
       device='cuda:0')
img 100044.jpg, class_prob is: tensor([0.0149, 0.0259, 0.9173, 0.0053, 0.0095, 0.0070, 0.0062, 0.0087, 0.0051],
       device='cuda:0')
img 10316.jpg, class_prob is: tensor([0.0033, 0.3977, 0.1576, 0.0450, 0.1999, 0.0621, 0.0940, 0.0207, 0.0197],
       device='cuda:0')
img 10590.jpg, class_prob is: tensor([0.0228, 0.0571, 0.0350, 0.0687, 0.1637, 0.0527, 0.5209, 0.0453, 0.0339],
       device='cuda:0')
img 10132.jpg, class_prob is: tensor([0.0686, 0.1016, 0.1488, 0.0164, 0.1288, 0.0062, 0.2033, 0.0319, 0.2945],
       device='cuda:0')
img 19.jpg, class_prob is: tensor([0.0327, 0.1787, 0.6138, 0.0145, 0.0874, 0.0198, 0.0199, 0.0140, 0.0191],
       device='cuda:0')
img 399.jpg, class_prob is: tensor([0.0379, 0.0256, 0.0970, 0.0300, 0.2091, 0.0182, 0.4842, 0.0303, 0.0676],
       device='cuda:0')
img 10608.jpg, class_prob is: tensor([0.0052, 0.0664, 0.2319, 0.0453, 0.1924, 0.0128, 0.3560, 0.0528, 0.0372],
       device='cuda:0')
img 90054.jpg, class_prob is: tensor([0.0274, 0.5300, 0.1293, 0.0415, 0.0300, 0.1012, 0.0657, 0.0305, 0.0444],
       device='cuda:0')
img 45.jpg, class_prob is: tensor([0.0131, 0.1924, 0.4044, 0.0258, 0.0733, 0.1507, 0.0925, 0.0127, 0.0352],
       device='cuda:0')
img 387.jpg, class_prob is: tensor([0.0391, 0.1142, 0.0487, 0.0249, 0.1007, 0.0144, 0.5883, 0.0420, 0.0277],
       device='cuda:0')
img 372.jpg, class_prob is: tensor([0.0664, 0.1080, 0.1106, 0.0049, 0.0611, 0.0033, 0.0603, 0.0159, 0.5694],
       device='cuda:0')
img 100375.jpg, class_prob is: tensor([0.0034, 0.0530, 0.5269, 0.1435, 0.1574, 0.0389, 0.0343, 0.0401, 0.0025],
       device='cuda:0')
img 10157.jpg, class_prob is: tensor([0.0052, 0.0323, 0.0511, 0.1004, 0.4424, 0.0094, 0.3234, 0.0167, 0.0190],
       device='cuda:0')
img 10318.jpg, class_prob is: tensor([0.0641, 0.1038, 0.6826, 0.0068, 0.0093, 0.0084, 0.0210, 0.0150, 0.0890],
       device='cuda:0')
img 100103.jpg, class_prob is: tensor([0.0062, 0.3813, 0.0362, 0.0286, 0.0615, 0.2253, 0.1815, 0.0474, 0.0319],
       device='cuda:0')
img 10022.jpg, class_prob is: tensor([0.0326, 0.0547, 0.6418, 0.0130, 0.1654, 0.0071, 0.0519, 0.0166, 0.0170],
       device='cuda:0')
img 10053.jpg, class_prob is: tensor([0.0083, 0.3553, 0.0077, 0.0558, 0.0891, 0.0594, 0.3704, 0.0235, 0.0305],
       device='cuda:0')
img 10697.jpg, class_prob is: tensor([0.0203, 0.0668, 0.6432, 0.0108, 0.1396, 0.0080, 0.0292, 0.0236, 0.0586],
       device='cuda:0')
img 10001.jpg, class_prob is: tensor([0.0400, 0.0805, 0.1437, 0.1071, 0.1454, 0.0374, 0.3614, 0.0459, 0.0387],
       device='cuda:0')
img 10509.jpg, class_prob is: tensor([0.0083, 0.0296, 0.9042, 0.0037, 0.0149, 0.0076, 0.0152, 0.0071, 0.0093],
       device='cuda:0')
img 100153.jpg, class_prob is: tensor([0.1175, 0.1032, 0.6336, 0.0068, 0.0617, 0.0087, 0.0295, 0.0194, 0.0195],
       device='cuda:0')
img 100280.jpg, class_prob is: tensor([0.0079, 0.5634, 0.1119, 0.0226, 0.0699, 0.0133, 0.1406, 0.0187, 0.0518],
       device='cuda:0')
img 194.jpg, class_prob is: tensor([0.0156, 0.0336, 0.1321, 0.0290, 0.4090, 0.0443, 0.1912, 0.0355, 0.1096],
       device='cuda:0')
img 100088.jpg, class_prob is: tensor([0.0150, 0.1104, 0.2590, 0.1620, 0.0683, 0.0826, 0.2287, 0.0478, 0.0262],
       device='cuda:0')
img 76.jpg, class_prob is: tensor([0.0219, 0.0049, 0.0389, 0.1124, 0.1977, 0.0165, 0.5528, 0.0421, 0.0127],
       device='cuda:0')
img 10577.jpg, class_prob is: tensor([0.0219, 0.1587, 0.6273, 0.0056, 0.0379, 0.0022, 0.0369, 0.0511, 0.0584],
       device='cuda:0')
img 10017.jpg, class_prob is: tensor([0.0438, 0.3121, 0.2318, 0.0161, 0.1514, 0.0423, 0.1211, 0.0112, 0.0701],
       device='cuda:0')
img 10479.jpg, class_prob is: tensor([0.0058, 0.4157, 0.2131, 0.0400, 0.0546, 0.1839, 0.0434, 0.0145, 0.0292],
       device='cuda:0')
img 10604.jpg, class_prob is: tensor([0.0056, 0.0993, 0.4794, 0.0245, 0.0576, 0.0898, 0.1823, 0.0163, 0.0453],
       device='cuda:0')
img 10285.jpg, class_prob is: tensor([0.0112, 0.3222, 0.4843, 0.0180, 0.0692, 0.0371, 0.0162, 0.0214, 0.0204],
       device='cuda:0')
img 90202.jpg, class_prob is: tensor([0.1037, 0.0657, 0.1044, 0.0224, 0.0853, 0.0015, 0.5637, 0.0370, 0.0163],
       device='cuda:0')
img 10160.jpg, class_prob is: tensor([0.0244, 0.1033, 0.0709, 0.0644, 0.3132, 0.0861, 0.1217, 0.0413, 0.1747],
       device='cuda:0')
img 100198.jpg, class_prob is: tensor([0.0153, 0.2387, 0.2866, 0.0096, 0.0584, 0.0060, 0.2811, 0.0601, 0.0442],
       device='cuda:0')
img 100034.jpg, class_prob is: tensor([0.0070, 0.0240, 0.7375, 0.0332, 0.1346, 0.0126, 0.0248, 0.0218, 0.0044],
       device='cuda:0')
img 10331.jpg, class_prob is: tensor([0.1055, 0.0490, 0.1435, 0.0637, 0.1599, 0.0504, 0.3353, 0.0253, 0.0675],
       device='cuda:0')
img 10214.jpg, class_prob is: tensor([0.0126, 0.5641, 0.0477, 0.0295, 0.0540, 0.0945, 0.0506, 0.0165, 0.1305],
       device='cuda:0')
img 240.jpg, class_prob is: tensor([0.0088, 0.4677, 0.0158, 0.0085, 0.0853, 0.0041, 0.0192, 0.0207, 0.3699],
       device='cuda:0')
img 10410.jpg, class_prob is: tensor([0.0368, 0.0628, 0.3392, 0.0753, 0.1717, 0.1086, 0.1817, 0.0195, 0.0044],
       device='cuda:0')
img 10236.jpg, class_prob is: tensor([0.0050, 0.0423, 0.9172, 0.0018, 0.0114, 0.0092, 0.0022, 0.0043, 0.0067],
       device='cuda:0')
img 42.jpg, class_prob is: tensor([0.0046, 0.0063, 0.0165, 0.0250, 0.0251, 0.0035, 0.8946, 0.0142, 0.0102],
       device='cuda:0')
img 90007.jpg, class_prob is: tensor([0.0287, 0.0517, 0.7361, 0.0202, 0.0359, 0.0685, 0.0261, 0.0238, 0.0089],
       device='cuda:0')
img 100373.jpg, class_prob is: tensor([0.0079, 0.0319, 0.5325, 0.0326, 0.2785, 0.0101, 0.0664, 0.0291, 0.0109],
       device='cuda:0')
img 100146.jpg, class_prob is: tensor([0.0297, 0.3012, 0.0568, 0.0731, 0.1679, 0.0060, 0.1998, 0.0690, 0.0966],
       device='cuda:0')
img 10543.jpg, class_prob is: tensor([0.0215, 0.2594, 0.0614, 0.0112, 0.0428, 0.0802, 0.3085, 0.0083, 0.2068],
       device='cuda:0')
img 64.jpg, class_prob is: tensor([0.0365, 0.0918, 0.1058, 0.0447, 0.2888, 0.0066, 0.3342, 0.0651, 0.0265],
       device='cuda:0')
img 100080.jpg, class_prob is: tensor([0.0074, 0.0326, 0.9456, 0.0017, 0.0026, 0.0025, 0.0031, 0.0026, 0.0019],
       device='cuda:0')
img 10388.jpg, class_prob is: tensor([0.0029, 0.0464, 0.0259, 0.0762, 0.1816, 0.6206, 0.0306, 0.0087, 0.0072],
       device='cuda:0')
img 10238.jpg, class_prob is: tensor([0.0139, 0.0817, 0.0415, 0.0073, 0.0454, 0.0025, 0.3419, 0.0251, 0.4407],
       device='cuda:0')
img 10085.jpg, class_prob is: tensor([0.0110, 0.0998, 0.8299, 0.0025, 0.0164, 0.0122, 0.0017, 0.0068, 0.0197],
       device='cuda:0')
img 10392.jpg, class_prob is: tensor([0.0074, 0.0144, 0.0278, 0.0164, 0.0157, 0.0028, 0.8931, 0.0142, 0.0081],
       device='cuda:0')
img 221.jpg, class_prob is: tensor([0.0228, 0.0883, 0.0344, 0.0408, 0.2939, 0.0865, 0.1654, 0.0312, 0.2367],
       device='cuda:0')
img 100222.jpg, class_prob is: tensor([0.0131, 0.1702, 0.4185, 0.0532, 0.1874, 0.0181, 0.1034, 0.0271, 0.0089],
       device='cuda:0')
img 10484.jpg, class_prob is: tensor([0.0190, 0.2676, 0.0652, 0.0931, 0.2483, 0.0554, 0.1531, 0.0608, 0.0376],
       device='cuda:0')
img 100263.jpg, class_prob is: tensor([0.0513, 0.1218, 0.3362, 0.0292, 0.1180, 0.0055, 0.2519, 0.0687, 0.0175],
       device='cuda:0')
img 100379.jpg, class_prob is: tensor([0.0060, 0.1531, 0.0784, 0.0539, 0.0652, 0.4153, 0.1938, 0.0075, 0.0268],
       device='cuda:0')
img 10058.jpg, class_prob is: tensor([0.0217, 0.0225, 0.8157, 0.0028, 0.0259, 0.0020, 0.0130, 0.0110, 0.0854],
       device='cuda:0')
img 44.jpg, class_prob is: tensor([0.0304, 0.3533, 0.1229, 0.0171, 0.0375, 0.1888, 0.0284, 0.0253, 0.1963],
       device='cuda:0')
img 356.jpg, class_prob is: tensor([0.0118, 0.0481, 0.7284, 0.0614, 0.0839, 0.0207, 0.0144, 0.0287, 0.0026],
       device='cuda:0')
img 10184.jpg, class_prob is: tensor([0.0180, 0.0630, 0.0571, 0.0114, 0.0653, 0.0024, 0.7470, 0.0141, 0.0217],
       device='cuda:0')
img 90038.jpg, class_prob is: tensor([0.0130, 0.0851, 0.1648, 0.0328, 0.2578, 0.0216, 0.2993, 0.0263, 0.0994],
       device='cuda:0')
img 100293.jpg, class_prob is: tensor([0.0163, 0.1146, 0.3101, 0.0808, 0.1430, 0.0223, 0.2678, 0.0280, 0.0172],
       device='cuda:0')
img 164.jpg, class_prob is: tensor([0.0387, 0.2200, 0.0386, 0.0152, 0.0999, 0.0106, 0.4888, 0.0199, 0.0684],
       device='cuda:0')
img 10213.jpg, class_prob is: tensor([0.0772, 0.1158, 0.5259, 0.0054, 0.0325, 0.0158, 0.0499, 0.0247, 0.1527],
       device='cuda:0')
img 10596.jpg, class_prob is: tensor([0.0039, 0.0704, 0.3960, 0.1109, 0.2341, 0.1079, 0.0249, 0.0409, 0.0109],
       device='cuda:0')
img 90097.jpg, class_prob is: tensor([0.0285, 0.1540, 0.0092, 0.0161, 0.1429, 0.0141, 0.5176, 0.0336, 0.0839],
       device='cuda:0')
img 10701.jpg, class_prob is: tensor([0.0345, 0.0778, 0.8156, 0.0084, 0.0159, 0.0194, 0.0072, 0.0050, 0.0160],
       device='cuda:0')
img 332.jpg, class_prob is: tensor([0.0041, 0.0296, 0.7350, 0.0356, 0.0441, 0.0344, 0.0867, 0.0198, 0.0108],
       device='cuda:0')
img 358.jpg, class_prob is: tensor([0.0130, 0.1856, 0.1176, 0.0475, 0.3586, 0.0123, 0.1686, 0.0506, 0.0462],
       device='cuda:0')
img 2.jpg, class_prob is: tensor([0.0856, 0.1166, 0.0873, 0.1253, 0.1327, 0.1465, 0.2541, 0.0437, 0.0083],
       device='cuda:0')
img 10321.jpg, class_prob is: tensor([0.0080, 0.0503, 0.8827, 0.0031, 0.0210, 0.0095, 0.0060, 0.0038, 0.0156],
       device='cuda:0')
img 100366.jpg, class_prob is: tensor([0.0052, 0.1807, 0.1158, 0.0151, 0.1243, 0.0334, 0.3535, 0.0323, 0.1396],
       device='cuda:0')
img 10389.jpg, class_prob is: tensor([0.0405, 0.1310, 0.1676, 0.0828, 0.2095, 0.0051, 0.1929, 0.0779, 0.0927],
       device='cuda:0')
img 10578.jpg, class_prob is: tensor([0.0149, 0.0446, 0.8703, 0.0042, 0.0121, 0.0011, 0.0149, 0.0032, 0.0348],
       device='cuda:0')
img 100242.jpg, class_prob is: tensor([0.0066, 0.2517, 0.0405, 0.0711, 0.1588, 0.1981, 0.2039, 0.0347, 0.0347],
       device='cuda:0')
img 10060.jpg, class_prob is: tensor([0.0459, 0.0720, 0.1534, 0.0360, 0.2780, 0.0389, 0.2456, 0.0995, 0.0308],
       device='cuda:0')
class 1 --> accuracy: 0.00, correct predictions: 0, all: 2
class 2 --> accuracy: 30.77, correct predictions: 4, all: 13
class 3 --> accuracy: 84.62, correct predictions: 22, all: 26
class 4 --> accuracy: 0.00, correct predictions: 0, all: 4
class 5 --> accuracy: 30.00, correct predictions: 3, all: 10
class 6 --> accuracy: 40.00, correct predictions: 2, all: 5
class 7 --> accuracy: 73.33, correct predictions: 11, all: 15
class 8 --> accuracy: 0.00, correct predictions: 0, all: 2
class 9 --> accuracy: 16.67, correct predictions: 1, all: 6
3 Likes

I have the same requirement but based on yours customised dataset couldn’t figure out how can I print the image name to be able to make sure I am getting the probability class of related image. do you have any idea how can I print the image name ?
this is snippet for the dataset.

        
class CustomDataset(Dataset):
    def __init__(self, image_paths, target_paths):   # initial logic happens like transform

        self.image_paths = image_paths
        self.target_paths = target_paths
        self.transforms = transforms.ToTensor()
        self.mapping = {
            0: 0,
            255: 1
     
        }
    def mask_to_class(self, mask):
        for k in self.mapping:
            mask[mask==k] = self.mapping[k]
        return mask
    
    def __getitem__(self, index):

        image = Image.open(self.image_paths[index])
        mask = Image.open(self.target_paths[index])
        t_image = image.convert('L')
        t_image = self.transforms(t_image)
        mask = torch.from_numpy(np.array(mask, dtype=np.uint8)) # this is for my dataset(lv)
        mask = self.mask_to_class(mask)
        mask = mask.long()
        return t_image, mask

    def __len__(self):  # return count of sample we have

        return len(self.image_paths)



folder_data = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\imagesResized\\*.png")
folder_mask = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\labelsResized\\*.png")

# split these path using a certain percentage
len_data = len(folder_data)
print("count of dataset: ", len_data)
train_size = 0.6

train_image_paths = folder_data[:int(len_data*train_size)]
print("count of train images is: ", len(train_image_paths)) # output is 55 image for train
test_image_paths = folder_data[int(len_data*train_size):]
print("count of test images is: ", len(test_image_paths)) # output is 37 image for test

train_mask_paths = folder_mask[:int(len_data*train_size)]
test_mask_paths = folder_mask[int(len_data*train_size):]


train_dataset = CustomDataset(train_image_paths, train_mask_paths)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2)

test_dataset = CustomDataset(test_image_paths, test_mask_paths)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2)

and this is the snippet when I test the test_loader


def test():
    
    model_load = torch.load('model.pth')
    
         #test model
    model_load.eval()
    total = 0
    test_loss = 0
    correct = 0
    count = 0
    #iterate through test dataset
    for ii, data in enumerate(test_loader):
                
                t_image, mask = data
                #print(t_image.shape) # torch.Size([1, 1, 240, 320])
                t_image, mask = t_image.to(device), mask.to(device)
                with torch.no_grad():

                    outputs = model_load(t_image)
                    #print(outputs.shape) # torch.Size([1, 2, 240, 320])
                    test_loss += criterion(outputs, mask).item() / len(test_loader)

                    probs = torch.exp(outputs)
                    probs_numpy = probs.detach().cpu().numpy()
                    b = probs_numpy.tolist()
                    file_path = "/Neda/Pytorch/U-net/output.json" ## your path variable
                    json.dump(b, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4) ### this saves the array in .json format


                    _, predicted = torch.max(outputs.data, 1)  
                    
                    total += mask.nelement()
                    correct += predicted.eq(mask.data).sum().item()
                    accuracy = 100 * correct / total
                    
                    count +=1                
                    print(count, "Test Loss: {:.3f}".format(test_loss), "Test Accuracy: %d %%" % (accuracy))

I can print out image name now by manipulating the print in test function

print(count, "Test Loss: {:.3f}".format(test_loss), "Test Accuracy: %d %%" % (accuracy), os.path.basename(os.path.normpath(test_image_paths[ii])))

I would recommend to stick to the other approach, i.e. to return the image paths along with the data and target as shown in @Mona_Jalal’s code snippet.
The ii index in your test_loader loop does not necessarily correspond to the index which is used in your __getitem__, so your code might yield the wrong paths, e.g. if you use shuffle=True in your test_loader.

You could return change your __getitem__(self, index) to this:

def __getitem__(self, index):
    ...
    return t_image, mask, self.image_paths[index]
1 Like

@ptrblck Thanks a lot for your advice. I also did return the target path from getitem to plot the target as well when I load test_loader.

1 Like

hi
I get BrokenPipeError

How can I get something like your results?

Thanks in advance for the help