Emergency plz help me I am getting folowing : error index 1281 is out of bounds for dimension 0 with size 7

def __getitem__(self, index):
        label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
        boxes = []
        class_lable=0
        with open(label_path) as f:
            for label in f.readlines():
                items=label.replace("\n", "").split()
                class_lable=0
                for i in range(len(self.class_list)):
                    if items[0] == self.class_list[i]:
                        class_lable=i
                        break
                x = (float(items[6])+ float(items[4]))//(2)
                y = (float(items[7])+float(items[5]))//(2)
                width = float(items[6]) - float(items[4])
                height = float(items[7]) - float(items[5])
                boxes.append([class_lable, x, y, width, height])

        img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
        image = Image.open(img_path)
        boxes = torch.tensor(boxes)

        if self.transform:
            # image = self.transform(image)
            image, boxes = self.transform(image, boxes)

        # Convert To Cells
        label_matrix = torch.zeros((self.S, self.S, self.C + 5 * self.B))
        for box in boxes:
            class_label, x, y, width, height = box.tolist()
            class_label = int(class_label)

            # i,j represents the cell row and cell column
            i, j = int(self.S * y), int(self.S * x)
            x_cell, y_cell = self.S * x - j, self.S * y - i

            """
            Calculating the width and height of cell of bounding box,
            relative to the cell is done by the following, with
            width as the example:
            
            width_pixels = (width*self.image_width)
            cell_pixels = (self.image_width)
            
            Then to find the width relative to the cell is simply:
            width_pixels/cell_pixels, simplification leads to the
            formulas below.
            """
            width_cell, height_cell = (
                width * self.S,
                height * self.S,
            )

            # If no object already found for specific cell i,j
            # Note: This means we restrict to ONE object
            # per cell!
            
            if label_matrix[i, j, 9] == 0:
                # Set that there exists an object
                label_matrix[i, j, 9] = 1

                # Box coordinates
                box_coordinates = torch.tensor(
                    [x_cell, y_cell, width_cell, height_cell]
                )

                label_matrix[i, j, 10:14] = box_coordinates

                # Set one hot encoding for class_label
                label_matrix[i, j, class_label] = 1

        return image, label_matrixdef __getitem__(self, index):
        label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
        boxes = []
        class_lable=0
        with open(label_path) as f:
            for label in f.readlines():
                items=label.replace("\n", "").split()
                class_lable=0
                for i in range(len(self.class_list)):
                    if items[0] == self.class_list[i]:
                        class_lable=i
                        break
                x = (float(items[6])+ float(items[4]))//(2)
                y = (float(items[7])+float(items[5]))//(2)
                width = float(items[6]) - float(items[4])
                height = float(items[7]) - float(items[5])
                boxes.append([class_lable, x, y, width, height])

        img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
        image = Image.open(img_path)
        boxes = torch.tensor(boxes)

        if self.transform:
            # image = self.transform(image)
            image, boxes = self.transform(image, boxes)

        # Convert To Cells
        label_matrix = torch.zeros((self.S, self.S, self.C + 5 * self.B))
        for box in boxes:
            class_label, x, y, width, height = box.tolist()
            class_label = int(class_label)

            # i,j represents the cell row and cell column
            i, j = int(self.S * y), int(self.S * x)
            x_cell, y_cell = self.S * x - j, self.S * y - i

            """
            Calculating the width and height of cell of bounding box,
            relative to the cell is done by the following, with
            width as the example:
            
            width_pixels = (width*self.image_width)
            cell_pixels = (self.image_width)
            
            Then to find the width relative to the cell is simply:
            width_pixels/cell_pixels, simplification leads to the
            formulas below.
            """
            width_cell, height_cell = (
                width * self.S,
                height * self.S,
            )

            # If no object already found for specific cell i,j
            # Note: This means we restrict to ONE object
            # per cell!
            
            if label_matrix[i, j, 9] == 0:
                # Set that there exists an object
                label_matrix[i, j, 9] = 1

                # Box coordinates
                box_coordinates = torch.tensor(
                    [x_cell, y_cell, width_cell, height_cell]
                )

                label_matrix[i, j, 10:14] = box_coordinates

                # Set one hot encoding for class_label
                label_matrix[i, j, class_label] = 1

        return image, label_matrix

IndexError Traceback (most recent call last)
/tmp/ipykernel_34/3223299113.py in
120
121 if name == “main”:
→ 122 main()

/tmp/ipykernel_34/3223299113.py in main()
98 model,
99 iou_threshold=0.5,
→ 100 threshold=0.4
101 )
102

/tmp/ipykernel_34/4116429681.py in get_bboxes(loader, model, iou_threshold, threshold, pred_format, box_format, device)
14 model.eval()
15 train_idx = 0
—> 16 for batch_idx, (x, labels) in enumerate(loader):
17 # print(x.shape)
18 x = x.to(device)

/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py in next(self)
519 if self._sampler_iter is None:
520 self._reset()
→ 521 data = self._next_data()
522 self._num_yielded += 1
523 if self._dataset_kind == _DatasetKind.Iterable and \

/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py in _next_data(self)
559 def _next_data(self):
560 index = self._next_index() # may raise StopIteration
→ 561 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
562 if self._pin_memory:
563 data = _utils.pin_memory.pin_memory(data)

/opt/conda/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
—> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]

/opt/conda/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py in (.0)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
—> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]

/tmp/ipykernel_34/1835178109.py in getitem(self, index)
73 # per cell!
74
—> 75 if label_matrix[i, j, 9] == 0:
76 # Set that there exists an object
77 label_matrix[i, j, 9] = 1

IndexError: index 1281 is out of bounds for dimension 0 with size 7

i and j seem to be created here:

i, j = int(self.S * y), int(self.S * x)

and used to index label_matrix:

if label_matrix[i, j, 9] == 0:

which is creating the index error, since apparently i has a value of 1281 while label_matrix has a size of 7 in dim0.
Make sure i and j contain valid values to index label_matrix.