I have created a Map-style dataset class to load my data. It works fine when training and testing.
However, I have implemented a call-back during training to evaluate my model on some images. This call back executes normally but when I revert to training the pytorch dataloader code gets stuck in an infinite loop calling self._data_queue.get(timeout=timeout) on line 779 of the Pytorch python code dataloader.py.
This even happens when my call-back uses data not even seen in training.
From trying to debug this I have found that if I convert my dataset into a list before passing it to the Dataloader, this infinite loop doesn’t occur:
train_dataset = [train_dataset[i] for i in range(len(train_dataset))]
I could use this but it adds a decent chunk of time per epoch, especially when re-augmenting the dataset for training. I was wondering if there was something wrong with my dataset class/how to modify it so that this issue doesn’t occur:
class Dataset(BaseDataset):
"""Read images, apply augmentation and preprocessing
transformations. """
OUTPUTS = ['Q', 'x', 'y', 'z', 'A', 'NA']
def __init__(
self,
dir,
augmentation=None,
preprocessing=None,
outputs=None,
single_image_training=False,
fixed_test_dataset=False,
crop_h=224,
crop_w=224
):
self.ids = os.listdir(dir)
self.images_fps = [os.path.join(dir, image_id) for image_id in self.ids]
self.augmentation = augmentation
self.preprocessing = preprocessing
self.single_image_training = single_image_training
self.fixed_test_dataset = fixed_test_dataset
self.crop_h = crop_h
self.crop_w = crop_w
def __getitem__(self, i):
# read data and converting from BGR to RGB
data = np.load(self.images_fps[i])
image = cv2.cvtColor(data['image_colors'], cv2.COLOR_BGR2RGB)
points_3d_world = np.float64(data['points_3d_world'])
points_3d_camera = np.float64(data['points_3d_camera'])
coordinates_available = data['mask']
bearing_vectors = np.float64(data['points_3d_sphere'])
camera_position = data['T_blender']
camera_rotation = data['R_blender']
# Making a list of items that need the "mask" augmentation
# https://albumentations.ai/docs/getting_started/mask_augmentation/
masks = [points_3d_world, points_3d_camera, coordinates_available, bearing_vectors]
# Random crop position
x_crop = random.randint(0, np.shape(image)[1] - 1)
y_crop = random.randint(0, np.shape(image)[0] - 1 - self.crop_h)
# Overriding the random crop position to train on a single image or if using
# a determined test dataset
if self.single_image_training is True:
x_crop = (np.shape(image)[1] - 1) // 2
y_crop = (np.shape(image)[0] - 1 - self.crop_h) // 2
elif self.fixed_test_dataset is True:
x_crop = [214, 6, 443, 189, 148, 336, 127, 26, 19, 152, 81, 447, 9, 468, 309, 58, 401, 419, 159, 23, 68, 162, 66, 445, 432, 294, 36, 448, 120, 180, 353, 165, 235, 504, 311, 314, 149, 3, 11, 194, 325, 415, 148, 71, 289, 269, 266, 368, 240, 82, 396, 276, 349, 285, 273, 136, 384, 409, 352, 194, 144, 329, 344, 437, 266, 498, 200, 421, 157, 55, 199, 70, 324, 8, 478, 84, 205, 273, 432, 411, 221, 161, 411, 142, 169, 156, 166, 194, 300, 363, 399, 217, 134, 181, 450, 245, 225, 353, 511, 97][i]
y_crop = [26, 30, 16, 14, 5, 6, 11, 16, 11, 7, 4, 13, 31, 12, 2, 11, 26, 1, 15, 4, 23, 0, 10, 7, 3, 24, 4, 10, 29, 8, 30, 3, 27, 8, 25, 1, 24, 19, 14, 10, 9, 30, 29, 23, 10, 15, 12, 8, 26, 0, 13, 17, 15, 29, 8, 31, 10, 12, 29, 1, 16, 25, 26, 14, 23, 8, 24, 8, 10, 8, 28, 17, 28, 30, 0, 28, 22, 2, 9, 13, 6, 2, 8, 7, 16, 31, 30, 11, 25, 9, 31, 12, 24, 25, 17, 23, 20, 14, 28, 8][i]
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, masks=masks)
image, masks = sample['image'], sample['masks']
image = image[y_crop:y_crop + self.crop_h, x_crop:x_crop + self.crop_w]
points_3d_world = masks[0][y_crop:y_crop + self.crop_h, x_crop:x_crop + self.crop_w]
points_3d_camera = masks[1][y_crop:y_crop + self.crop_h, x_crop:x_crop + self.crop_w]
coordinates_available = masks[2][y_crop:y_crop + self.crop_h, x_crop:x_crop + self.crop_w]
bearing_vectors = masks[3][y_crop:y_crop + self.crop_h, x_crop:x_crop + self.crop_w]
# Making a list of items that need the "mask" augmentation
# https://albumentations.ai/docs/getting_started/mask_augmentation/
masks = [points_3d_world, points_3d_camera, coordinates_available, bearing_vectors]
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, masks=masks)
image, masks = sample['image'], sample['masks']
points_3d_world = masks[0]
points_3d_camera = masks[1]
coordinates_available = masks[2]
bearing_vectors = masks[3]
one_minus_coordinates_available = np.ones(np.shape(coordinates_available)) - coordinates_available
coordinates_available = np.concatenate([one_minus_coordinates_available, coordinates_available], axis=0).astype('float')
return image, points_3d_world, points_3d_camera, coordinates_available, bearing_vectors, camera_position, camera_rotation, x_crop, y_crop
def __len__(self):
return len(self.ids)
My code used is a modified version of the code here: