TypeError: len() of a 0-d tensor while training Faster RCNN model

I’m trying to train a Faster RCNN object detection model but im getting this error. Why should it be?

Error line:

# apply the image transforms
        if self.transforms:
            sample = self.transforms(image = image_resized,
                                     bboxes = target['boxes'],
                                     labels = labels)
            image_resized = sample['image']
            target['boxes'] = torch.Tensor(sample['bboxes'])
            
        return image_resized, target

Error:

Traceback (most recent call last):
  File "c:\Users\lemon\Desktop\ap_py_2\train.py", line 135, in <module>
    train_loss = train(train_loader, model)
    for x, y_batch in prog_bar:
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\tqdm\std.py", line 1195, in __iter__
    for obj in iterable:
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torch\utils\data\dataloader.py", line 681, in __next__
    data = self._next_data()
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torch\utils\data\dataloader.py", line 1376, in _next_data
    return self._process_data(data)
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torch\utils\data\dataloader.py", line 1402, in _process_data
    data.reraise()
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torch\_utils.py", line 461, in reraise
    raise exception
TypeError: Caught TypeError in DataLoader worker process 0.
Original Traceback (most recent call last):
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torch\utils\data\_utils\worker.py", line 302, in _worker_loop
    data = fetcher.fetch(index)
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torch\utils\data\_utils\fetch.py", line 49, in fetch
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torch\utils\data\_utils\fetch.py", line 49, in <listcomp>
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "c:\Users\lemon\Desktop\ap_py_2\datasets.py", line 102, in __getitem__
    sample = self.transforms(image = image_resized,
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\albumentations\core\composition.py", line 194, in __call__
    p.ensure_data_valid(data)
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\albumentations\core\bbox_utils.py", line 103, in ensure_data_valid
    if data_exists and len(data[data_name][0]) < 5:
  File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torch\_tensor.py", line 705, in __len__
    raise TypeError("len() of a 0-d tensor")
TypeError: len() of a 0-d tensor

dataset.py:

import torch
import cv2
import numpy as np
import os
import glob as glob
from config import (
    CLASSES, RESIZE_TO, TRAIN_DIR, VALID_DIR, BATCH_SIZE
)
from torch.utils.data import Dataset, DataLoader
from custom_utils import collate_fn, get_train_transform, get_valid_transform
import dataset_2 # Load dataset from other python script
GetBoxes = dataset_2.get_paths()
train_boxes = GetBoxes["train_labels"]
test_boxes = GetBoxes["test_labels"]

# the dataset class
class CustomDataset(Dataset):
    def __init__(self, dir_path, width, height, classes, boxes_data=None, transforms=None):
        self.transforms = transforms
        self.dir_path = dir_path
        self.height = height
        self.width = width
        self.classes = classes
        
        self.boxes_data = boxes_data

        # get all the image paths in sorted order
        self.image_paths = glob.glob(f"{self.dir_path}/*.jpg")
        self.all_images = [image_path.split(os.path.sep)[-1] for image_path in self.image_paths]
        self.all_images = sorted(self.all_images)
    def __getitem__(self, idx):
        # capture the image name and the full image path
        image_name = self.all_images[idx]
        image_path = os.path.join(self.dir_path, image_name)
        # read the image
        image = cv2.imread(image_path)
        # convert BGR to RGB color format
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
        image_resized = cv2.resize(image, (self.width, self.height))
        image_resized /= 255.0
        
        # capture the corresponding XML file for getting the annotations
        # annot_filename = image_name[:-4] + '.xml'
        # annot_file_path = os.path.join(self.dir_path, annot_filename)
        
        boxes = []
        labels = []
        # tree = et.parse(annot_file_path)
        # root = tree.getroot()
        
        # get the height and width of the image
        image_width = image.shape[1]
        image_height = image.shape[0]
        # box coordinates for xml files are extracted and corrected for image size given
        box = self.boxes_data[idx]
        # map the current object name to `classes` list to get...
        # ... the label index and append to `labels` list
        labels.append(box[4])
        
        # xmin = left corner x-coordinates
        xmin = int(box[0])
        # xmax = right corner x-coordinates
        xmax = int(box[2])
        # ymin = left corner y-coordinates
        ymin = int(box[1])
        # ymax = right corner y-coordinates
        ymax = int(box[3])

        # resize the bounding boxes according to the...
        # ... desired `width`, `height`
        xmin_final = (xmin/image_width)*self.width
        xmax_final = (xmax/image_width)*self.width
        ymin_final = (ymin/image_height)*self.height
        yamx_final = (ymax/image_height)*self.height
        
        boxes.append([xmin_final, ymin_final, xmax_final, yamx_final])
        boxes = boxes[0]
        # bounding box to tensor
        boxes = torch.as_tensor(boxes, dtype=torch.float32)
        # area of the bounding boxes
        area = (boxes[3] - boxes[1]) * (boxes[2] - boxes[0])
        # no crowd instances
        iscrowd = torch.zeros((boxes.shape[0],), dtype=torch.int64)
        # labels to tensor
        labels = torch.as_tensor(labels, dtype=torch.int64)
        # prepare the final `target` dictionary
        target = {}
        target["boxes"] = boxes
        target["labels"] = labels
        target["area"] = area
        target["iscrowd"] = iscrowd
        image_id = torch.tensor([idx])
        target["image_id"] = image_id

        print(image_resized)
        print(target["boxes"])
        print(labels)
        print("qq")

        # apply the image transforms
        if self.transforms:
            sample = self.transforms(image = image_resized,
                                     bboxes = target['boxes'],
                                     labels = labels)
            image_resized = sample['image']
            target['boxes'] = torch.Tensor(sample['bboxes'])
            
        return image_resized, target
    def __len__(self):
        return len(self.all_images)
# prepare the final datasets and data loaders
def create_train_dataset():
    train_dataset = CustomDataset(TRAIN_DIR, RESIZE_TO, RESIZE_TO, CLASSES, train_boxes, get_train_transform())
    return train_dataset
def create_valid_dataset():
    valid_dataset = CustomDataset(VALID_DIR, RESIZE_TO, RESIZE_TO, CLASSES, test_boxes, get_valid_transform())
    return valid_dataset
def create_train_loader(train_dataset, num_workers=0):
    train_loader = DataLoader(
        train_dataset,
        batch_size=BATCH_SIZE,
        shuffle=True,
        num_workers=num_workers,
        collate_fn=collate_fn
    )
    return train_loader
def create_valid_loader(valid_dataset, num_workers=0):
    valid_loader = DataLoader(
        valid_dataset,
        batch_size=BATCH_SIZE,
        shuffle=False,
        num_workers=num_workers,
        collate_fn=collate_fn
    )
    return valid_loader
# execute datasets.py using Python command from Terminal...
# ... to visualize sample images
# USAGE: python datasets.py
if __name__ == '__main__':
    # sanity check of the Dataset pipeline with sample visualization
    dataset = CustomDataset(
        TRAIN_DIR, RESIZE_TO, RESIZE_TO, CLASSES, train_boxes
    )
    print(f"Number of training images: {len(dataset)}")
    
    # function to visualize a single sample
    def visualize_sample(image, target):
        box = target['boxes']
        label = CLASSES[target['labels']]
        cv2.rectangle(
            image, 
            (int(box[0]), int(box[1])), (int(box[2]), int(box[3])),
            (0, 255, 0), 2
        )
        cv2.putText(
            image, label, (int(box[0]), int(box[1]-5)), 
            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2
        )
        cv2.imshow('Image', image)
        cv2.waitKey(0)
        
    NUM_SAMPLES_TO_VISUALIZE = 5
    for i in range(NUM_SAMPLES_TO_VISUALIZE):
        image, target = dataset[i]
        visualize_sample(image, target)