ValueError: Expected x_max for bbox (tensor(0.9297, dtype=torch.float64), tensor(0.5198, dtype=torch.float64), tensor(1.0010, dtype=torch.float64), tensor(0.7504, dtype=torch.float64), tensor(1)) to be in the range [0.0, 1.0], got 1.0009765625

During augment with flip in pytorch got this error
here is my dataset and augment options.

class Dataset(Dataset):
    def __init__(self, root, split,dataset_num,input_size,transforms=None):
        self.root = root
        self.split = split
        self.images = []
        self.split = split.upper()
        self.num = dataset_num
        if self.split == 'TRAIN':
            self.csv_path = '/home/jake/PycharmProjects/balloon_detection/ballon_datasets/via_region_data_{}_{}.csv'.format(self.split,self.num)
        else:
            self.csv_path = '/home/jake/PycharmProjects/balloon_detection/ballon_datasets/via_region_data_{}_70.csv'.format(self.split)
        self.df = None
        self.input_size= input_size
        self.transforms = transforms


        #rgb_mean = (0.4914, 0.4822, 0.4465)
        #rgb_std = (0.2023, 0.1994, 0.2010)

        self.images = glob.glob(self.root +self.split +'_'+str(self.num)+'/*')

        # csv
        df = pd.read_csv(self.csv_path)
        self.df = process_bbox(df)


    def __getitem__(self, idx):
        image = self.images[idx]
        image_arr = cv2.imread(image, cv2.IMREAD_COLOR)
        image_arr = cv2.cvtColor(image_arr, cv2.COLOR_BGR2RGB).astype(np.float32)
        image_arr /= 255.0
        
        width = image_arr.shape[1]
        height = image_arr.shape[0]


        image_id = str(image.split('.')[0])
        image_id = str(image_id.split('/')[-1]) + '.jpg'

        point = self.df[self.df['#filename'] == image_id]
        boxes = point[['x', 'y', 'w', 'h']].values
        boxes[:, 2] = boxes[:, 0] + boxes[:, 2] 
        boxes[:, 3] = boxes[:, 1] + boxes[:, 3] 
        labels = torch.ones((point.shape[0],), dtype=torch.int64)

        # suppose all instances are not crowd
        iscrowd = torch.zeros((point.shape[0],), dtype=torch.int64)

        #transform resize image,boxes
        #image_arr, boxes = transform(image_arr, boxes, self.split,self.input_size,labels=labels)

        area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
        area = torch.as_tensor(area,dtype=torch.float32)

        target = {}
        
        image_arr = reshape_image(image_arr,input_size)
        #print('boxes->',boxes)
        #print('new_dims->',new_dims)
        #print(('old_dims->',old_dims))
        
        #boxes = FT.to_tensor(boxes)
        old_dims = torch.FloatTensor([width, height,width, height]).unsqueeze(0)
        new_dims = torch.FloatTensor([self.input_size, self.input_size,self.input_size, self.input_size]).unsqueeze(0)
        
        boxes = boxes / old_dims
        boxes = boxes * new_dims
        
        #image_arr, boxes = transform(image_arr, boxes, self.split,self.input_size,labels=labels)
        target['boxes'] = boxes
        target['labels'] = labels
        target['area'] = area
        target['iscrowd'] = iscrowd
        
        #image_arr = FT.to_pil_image(image_arr)
        #image_arr = np.array(image_arr)
        if self.transforms:
            sample = {
                #'image': FT.to_tensor(image_arr),
                #'image': FT.to_tensor(image_arr),
                'image': image_arr,
                'bboxes': target['boxes'],
                'labels': labels
            }
            #print('images->',np.array(images).shape)
            sample = self.transforms(**sample)
        
            image = sample['image']
            target['boxes'] = torch.stack(tuple(map(torch.tensor,zip(*sample['bboxes'])))).permute(1, 0)
        
        return image, target, image_id

    def __len__(self):
        # print('__len__')
        return len(self.images)


def collate_fn(batch):
    return tuple(zip(*batch))


# Albumentations
def get_train_transform():
    return A.Compose([
        A.Flip(0.5),
        A.ToGray(0.5),
        ToTensor()
        #ToTensorV2(p=1.0)
    ], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})

def get_valid_transform():
    return A.Compose([
        ToTensor()
        #ToTensorV2(p=1.0)
    ], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})

when I give flip option it gives error

File “/home/jake/venv/lib/python3.7/site-packages/albumentations/augmentations/bbox_utils.py”, line 330, in check_bbox
“to be in the range [0.0, 1.0], got {value}.”.format(bbox=bbox, name=name, value=value)
ValueError: Expected x_max for bbox (tensor(0.9297, dtype=torch.float64), tensor(0.5198, dtype=torch.float64), tensor(1.0010, dtype=torch.float64), tensor(0.7504, dtype=torch.float64), tensor(1)) to be in the range [0.0, 1.0], got 1.0009765625.

Hi,

It looks like the issue is that you ask for boxes that are outside the image as one of the bounding box value is 1.0010.
Do you know why this happens, you might want to clip the values between 0 and 1 to avoid issues.

Adding my solution (might help someone else )
If you are sure there is no problem in your bounding boxes ,only floating point precision issue then you can add this block to the function

def check_bbox(bbox):
    """Check if bbox boundaries are in range 0, 1 and minimums are lesser then maximums"""
   #my added block 
    bbox=list(bbox)
    for i in range(4):
      if (bbox[i]<0) :
        bbox[i]=0
      elif (bbox[i]>1) :
        bbox[i]=1
    bbox=tuple(bbox)
   #end of block
   #rest of the code as it is

the directory

/usr/local/lib/python3.6/dist-packages/albumentations/augmentations/bbox_utils.py

Where should this code be added?

@Sang, I don’t know whether you are still following this, but for the followers, I think I should mention this. You can put the code given by @abidKiller in your_path_to_pakages/albumentations/core/bbox_utils.py.

For example, in my case, it will be /home/doer/anaconda3/envs/venvMmDet/lib/python3.8/site-packages/albumentations/core/bbox_utils.py