How to determine class distribution

I am doing a semantic segmentation project. And since I have an imbalanced class, I should be using weighted cross entropy. I had never used this kind of method ever and according to the forums and articles I’ve read, most of the people using weighted cross entropy knows their class distribution just like in this thread: Weights in weighted loss (nn.CrossEntropyLoss) - PyTorch Forums

Now, I would like to know the class distribution in my dataset too. This is how I generated my dataset:

def getClassName(classID, cats):
    for i in range(len(cats)):
        if cats[i]['id']==classID:
            return cats[i]['name']
    return None

def getImage(imageObj, img_folder, input_image_size):
    # Read and normalize an image
    dataset_img = io.imread(img_folder + '/' + imageObj['file_name'])/255.0
    # Resize
    dataset_img = cv2.resize(dataset_img, input_image_size)
    if (len(dataset_img.shape)==3 and dataset_img.shape[2]==3): # If it is a RGB 3 channel image
        return dataset_img
    else: # To handle a black and white image, increase dimensions to 3
        stacked_img = np.stack((dataset_img,)*3, axis=-1)
        return stacked_img
    
def getNormalMask(imageObj, classes, coco, catIds, input_image_size):
    annIds = coco.getAnnIds(imageObj['id'], catIds=catIds, iscrowd=None)
    anns = coco.loadAnns(annIds)
    cats = coco.loadCats(catIds)
    dataset_mask = np.zeros(input_image_size)
    for a in range(len(anns)):
        className = getClassName(anns[a]['category_id'], cats)
        pixel_value = classes.index(className)+1
        new_mask = cv2.resize(coco.annToMask(anns[a])*pixel_value, input_image_size)
        dataset_mask = np.maximum(new_mask, dataset_mask)

    # Add extra dimension for parity with train_img size [X * X * 3]
    dataset_mask = dataset_mask.reshape(input_image_size[0], input_image_size[1], 1)
    return dataset_mask  


def dataGeneratorCoco(images, classes, coco, folder, 
                      input_image_size, total_batch, mode, mask_type):
    
    img_folder = '{}/Images/'.format(folder)
    dataset_size = len(images)
    catIds = coco.getCatIds(catNms=classes)
    
    c = 0
    while(True):
        img = np.zeros((total_batch, input_image_size[0], input_image_size[1], 3)).astype('float')
        mask = np.zeros((total_batch, input_image_size[0], input_image_size[1], 1)).astype('float')
        for i in tqdm(range(c, c+total_batch)): #initially from 0 to total_batch, when c = 0
            imageObj = images[i]
            
            ### Retrieve Image ###
            dataset_img = getImage(imageObj, img_folder, input_image_size)
                
            
            if mask_type=="normal":
                dataset_mask = getNormalMask(imageObj, classes, coco, catIds, input_image_size)                
               

            # Add to respective batch sized arrays
            img[i-c] = dataset_img
            mask[i-c] = dataset_mask

                  
            
        print(c)

        return img, mask

total_batch = 1500
input_image_size = (224,224)
mask_type = 'normal'

img_all, mask_all = dataGeneratorCoco(images_all, classes, coco_all, folder,
                            input_image_size, total_batch, mode, mask_type)

what approach should I use? I tried looking up on the internet but most method doesnt suit my data generator