RuntimeError: shape '[128, 128, 4, 9, 3]' is invalid for input of size 399384

Hello guys need your help
I have a data (matrix of array) with shape (HxWx4x9x3) not image data.
Each element of the matrix is composed by an (MxNx3)
M and N values do not change (4*9). The only value changes here is H and W. So to get the data in the same shape before sending it to the dataloader, i did some reshape(224,224,n,m,z)
I think the error is related the shape of the i am sending to the dataloader. I have tried many others solution but still getting errors such as : ValueError: size shape must match input shape. Input is 3D, size is 2

import os
import csv
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from glob import glob
import torchvision.transforms as transforms
from torch import nn
from csv import reader


class SkeletonDataset(Dataset):
    def __init__(self, array_path:str, csv_path:str):
        self.array_path = array_path
        # read a csv file
        self.labels = []
        self.categories = {}
        with open(csv_path, "r") as f:
            reader = csv.reader(f)
            for rows in reader:
                filename = rows[0]; indexclass = int(rows[1]); category = rows[2]
                self.labels.append([filename, indexclass])
                if indexclass not in self.categories:
                    self.categories[indexclass] = category
        self.num_classes = len(self.categories)
        # self.transform = transforms.Compose([
        #     transforms.Resize()
        #])
       

    def __len__(self) -> int:
        return len(self.labels)
    
    def get_category(self, index:int) -> str:
        return self.categories[index]
    
    def get_array(self, path, namefile):
        directory = path
        namefile = namefile

        array_matrix = np.load(directory+namefile+".npz", allow_pickle=True)
        data = array_matrix['arr_0']
        return data

    def __getitem__(self, index):
        
        filename, label = self.labels[index]
        data = self.get_array(self.array_path, filename)
        data = torch.from_numpy(data).long()
        H,W,n,m,z = data.shape
        if H != 4 and W !=9:
            data = data.reshape(n,m,z,H,W)
        data = data.reshape(224,224,n,m,z)
        print(data.shape)

        return data, label

import os
import argparse
import torch
import torch.nn as nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from Models.ModelCnn import CNNskl
from NTU_RGBD_120_SkeletonDataset import SkeletonDataset as SkeletonDataset
import matplotlib.pyplot as plt

""" from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter() """


class AverageMeter(object):
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].reshape(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res

# Plotting the accuracy and the validation accurary and loss
def plot_loss(v_train_loss, v_val_loss, namefile: str):
    epochs = []
    for i in range(1, 201):
        epochs.append(i)

    v_train_loss_rounded = [round(element, 0) for element in v_train_loss]
    v_val_loss_rounded = [round(element, 0) for element in v_val_loss]

    plot1 = plt.plot(epochs, v_train_loss_rounded)
    plot2 = plt.plot(epochs, v_val_loss_rounded)
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.title("Loss")
    plt.legend(["Training Loss", "Validation Loss"], loc = 1)
    plt.savefig(namefile +'.png')
    plt.show()

def plot_accurracy(v_train_accuracy, v_val_accuracy, namefile: str):
    epochs = []
    for i in range(1, 201):
        epochs.append(i)

    v_train_accuracy_rounded = [round(element, 0) for element in v_train_accuracy]
    v_val_accuracy_rounded = [round(element, 0) for element in v_val_accuracy]
    plot3 = plt.plot(epochs, v_train_accuracy_rounded)
    plot4 = plt.plot(epochs, v_val_accuracy_rounded)
    plt.xlabel("Epochs")
    plt.ylabel("Accuracy")
    plt.title("Accuracy")
    plt.legend(["Training Accuracy", "Validation Accuracy"], loc = 1)
    plt.savefig(namefile +'.png')
    plt.show()

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="action based skeleton args")
    parser.add_argument("--arraypath", default="Datasets/NTU/Skeleton/imagerepresentation/spatioVelo/", type=str)
    parser.add_argument("--crosssubjectcsvpath", default="dataloaders/Labels/cross_subject/", type=str)
    parser.add_argument("--crossviewcsvpath", default="dataloaders/Labels/cross_view/", type=str)
    parser.add_argument("--sequencelength", default=16, type=int)
    parser.add_argument("--epochs", default=200, type=int)
    parser.add_argument("--batchsize", default=64, type=int)
    parser.add_argument("--lr", default=1e-3, type=float)
    parser.add_argument("--gpu-number", default=0, type=int)

    # Ces variables sont utilisees dans le plotting de laccuracy et du loss
    pltloss_train = []
    pltloss_val = []
    pltaccuracy_train1 = []
    pltaccuracy_train3 = []
    pltaccuracy_val1 = []
    pltaccuracy_val3 = []
    pltloss_val = []
    # print args
    args = parser.parse_args()
    print(args)

    # get a device
    if torch.cuda.is_available():
        cudnn.benchmark = True
        device = torch.device(f"cuda:{args.gpu_number}")
    else:
        device = torch.device(f"cpu")

    # Prepare the loader
    train = SkeletonDataset(array_path=args.arraypath, csv_path=args.crosssubjectcsvpath + "cross_sbj2_vst_train_edit.csv")
    val = SkeletonDataset(array_path=args.arraypath, csv_path=args.crosssubjectcsvpath  + "cross_sbj2_vst_test_edit.csv")
    train_loader = DataLoader(train, batch_size=args.batchsize, shuffle=True, num_workers=4)
    val_loader = DataLoader(val, batch_size=args.batchsize, shuffle=False, num_workers=4)

    for i, (datas, labels) in enumerate(train_loader):
        datas = datas.to(device); labels = labels.to(device)
        print(datas)
Namespace(arraypath='Datasets/NTU/Skeleton/imagerepresentation/spatioVelo/', batchsize=64, crosssubjectcsvpath='dataloaders/Labels/cross_subject/', crossviewcsvpath='dataloaders/Labels/cross_view/', epochs=200, gpu_number=0, lr=0.001, sequencelength=16)
Traceback (most recent call last):
  File "Train.py", line 117, in <module>
    for i, (datas, labels) in enumerate(train_loader):
  File "/home/coco/.local/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 517, in __next__
    data = self._next_data()
  File "/home/coco/.local/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1199, in _next_data
    return self._process_data(data)
  File "/home/coco/.local/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1225, in _process_data
    data.reraise()
  File "/home/coco/.local/lib/python3.8/site-packages/torch/_utils.py", line 429, in reraise
    raise self.exc_type(msg)
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
  File "/home/coco/.local/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 202, in _worker_loop
    data = fetcher.fetch(index)
  File "/home/coco/.local/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/home/coco/.local/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/media/coco/12 TB Hard/skeleton_data_image/Skeleton_Image_representation/NTU_RGBD_120_SkeletonDataset.py", line 54, in __getitem__
    data = data.reshape(128,128,h,w,z)
RuntimeError: shape '[128, 128, 4, 9, 3]' is invalid for input of size 399384

Is every input example guaranteed to have 224x224x4x9x3 (5419008) elements? If not, you probably need to adjust the spatial extent (e.g., with something like torchvision.transforms — Torchvision 0.8.1 documentation (pytorch.org) or AdaptiveAvgPool2d — PyTorch 1.8.1 documentation). You might also need to be careful about rearranging your axes with reshape as it does not actually permute the axes (i.e., it does not change data layout).