How divide data set to training and validation in medicaltorch ??

from collections import defaultdict
import time
import os

import numpy as np

from tqdm import tqdm
import matplotlib.pyplot as plt

from tensorboardX import SummaryWriter

from medicaltorch import datasets as mt_datasets
from medicaltorch import models as mt_models
from medicaltorch import transforms as mt_transforms
from medicaltorch import losses as mt_losses
from medicaltorch import metrics as mt_metrics
from medicaltorch import filters as mt_filters

import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from torch import autograd, optim
import torch.backends.cudnn as cudnn
import torch.nn as nn

import torchvision.utils as vutils
cudnn.benchmark = True
ROOT_DIR = r'C:~~~~~~\Desktop\DATADATA\3Dircadb1\file'
mri_input_filename = os.path.join(ROOT_DIR,'imageTr',
                                          'site1-sc01-image.nii.gz')
mri_gt_filename = r''C:~~~~~~\Desktop\DATADATA\3Dircadb1\file\labelTr\site1-sc01-mask-r1.nii.gz'

pair = mt_datasets.SegmentationPair2D(mri_input_filename, mri_gt_filename)
slice_pair = pair.get_pair_slice(55)
input_slice = slice_pair["input"]
gt_slice = slice_pair["gt"]
print(input_slice.shape)
img_data,seg_data = pair.get_pair_data()
print(img_data.shape)
print(seg_data.shape)

img_list = os.listdir(os.path.join(ROOT_DIR,'imageTr'))
label_list = os.listdir(os.path.join(ROOT_DIR,'labelTr'))
filename_pairs = [(os.path.join(ROOT_DIR,'imageTr',x),os.path.join(ROOT_DIR,'labelTr',y)) for x,y in zip(img_list,label_list)]
train_transform =  transforms.Compose([
            mt_transforms.Resample(0.25, 0.25),
            mt_transforms.ElasticTransform(alpha_range=(40.0, 60.0),
                                       sigma_range=(1, 0),
                                       p=0.3),
            mt_transforms.ToTensor(),
])

train_dataset = mt_datasets.MRI2DSegmentationDataset(filename_pairs,transform=train_transform)
dataloader = DataLoader(train_dataset, batch_size=4,
                        collate_fn=mt_datasets.mt_collate)
minibatch = next(iter(dataloader))
def threshold_predictions(predictions, thr=0.999):
    thresholded_preds = predictions[:]
    low_values_indices = thresholded_preds < thr
    thresholded_preds[low_values_indices] = 0
    low_values_indices = thresholded_preds >= thr
    thresholded_preds[low_values_indices] = 1
    return thresholded_preds

def run_main():
    train_transform = transforms.Compose([
        mt_transforms.CenterCrop2D((200, 200)),
        mt_transforms.ElasticTransform(alpha_range=(28.0, 30.0),
                                       sigma_range=(3.5, 4.0),
                                       p=0.3),
        mt_transforms.RandomAffine(degrees=4.6,
                                   scale=(0.98, 1.02),
                                   translate=(0.03, 0.03)),
        mt_transforms.RandomTensorChannelShift((-0.10, 0.10)),
        mt_transforms.ToTensor(),
        mt_transforms.NormalizeInstance(),
    ])

    val_transform = transforms.Compose([
        mt_transforms.CenterCrop2D((200, 200)),
        mt_transforms.ToTensor(),
        mt_transforms.NormalizeInstance(),
    ])
TRAIN_ROOT_DIR_GMCHALLENGE = 'C:~~~~~~\Desktop\DATADATA\3Dircadb1\file

gmdataset_train = mt_datasets.SCGMChallenge2DTrain(root_dir=TRAIN_ROOT_DIR_GMCHALLENGE,
                                                   subj_ids=range(1,18),
                                                   transform=train_transform,
                                                   slice_filter_fn=mt_filters.SliceFilter())

gmdataset_val = mt_datasets.SCGMChallenge2DTrain(root_dir=TRAIN_ROOT_DIR_GMCHALLENGE,
                                                 subj_ids=range(18, 20),
                                                 transform=val_transform)

the error happend because it requiers 4 mask folder while i have just one folder for each image .

During handling of the above exception, another exception occurred:
raise FileNotFoundError(“No such file or no access: ‘%s’” % filename)

what can i do to divied the datainto training and validation ?

Use SubsetRandomSampler or maybe some random_splits, check docs for torch.utils.data link