Splitting dataset so data source doesn't appear in testing and training sets?

I have written the following code to classify audio files into Parkinsons and Healthy Controls. Due to the size of the dataset (34 participants) I have divided each file into 5m/s frames and generated melspectrograms of these wav files. However, to avoid introducing bias into my model, I want to ensure that each speaker is only in the testing or training set (not both). Each image starts with a participants ID number (e.g. ID33_pd_3_2_2_27.jpg). How would I modify my current code?

from google.colab import drive
drive.mount('/content/drive')
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import utils
from  torch.utils.data import Dataset

from sklearn.metrics import confusion_matrix
from skimage import io, transform, data
from skimage.color import rgb2gray

import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
import pandas as pd
import numpy as np
import csv
import os
import math
import cv2

root_dir = "/content/drive/My Drive/Read_Text/5_Second_Segments/"
class_names = [
  "Parkinsons_Disease",
  "Healthy_Control"
]

def get_meta(root_dir, dirs):
    """ Fetches the meta data for all the images and assigns labels.
    """
    paths, classes = [], []
    for i, dir_ in enumerate(dirs):
        for entry in os.scandir(root_dir + dir_):
            if (entry.is_file()):
                paths.append(entry.path)
                classes.append(i)
                
    return paths, classes


paths, classes = get_meta(root_dir, class_names)

data = {
    'path': paths,
    'class': classes
}

data_df = pd.DataFrame(data, columns=['path', 'class'])
data_df = data_df.sample(frac=1).reset_index(drop=True)

from pandas import option_context

print("Found", len(data_df), "images.")

with option_context('display.max_colwidth', 400):
    display(data_df.head(100))

class Audio(Dataset):

    def __init__(self, df, transform=None):
        """
        Args:
            image_dir (string): Directory with all the images
            df (DataFrame object): Dataframe containing the images, paths and classes
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.df = df
        self.transform = transform

    def __len__(self):
        return len(self.df)

    def __getitem__(self, index):
        # Load image from path and get label
        x = Image.open(self.df['path'][index])
        try:
          x = x.convert('RGB') # To deal with some grayscale images in the data
        except:
          pass
        y = torch.tensor(int(self.df['class'][index]))

        if self.transform:
            x = self.transform(x)

        return x, y


def compute_img_mean_std(image_paths):
    """
        Author: @xinruizhuang. Computing the mean and std of three channel on the whole dataset,
        first we should normalize the image from 0-255 to 0-1
    """

    img_h, img_w = 224, 224
    imgs = []
    means, stdevs = [], []

    for i in tqdm(range(len(image_paths))):
        img = cv2.imread(image_paths[i])
        img = cv2.resize(img, (img_h, img_w))
        imgs.append(img)

    imgs = np.stack(imgs, axis=3)
    print(imgs.shape)

    imgs = imgs.astype(np.float32) / 255.

    for i in range(3):
        pixels = imgs[:, :, i, :].ravel()  # resize to one row
        means.append(np.mean(pixels))
        stdevs.append(np.std(pixels))

    means.reverse()  # BGR --> RGB
    stdevs.reverse()

    print("normMean = {}".format(means))
    print("normStd = {}".format(stdevs))
    return means, stdevs

norm_mean, norm_std = compute_img_mean_std(paths)

data_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(256),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

train_split = 0.70 # Defines the ratio of train/valid/test data.
valid_split = 0.10

train_size = int(len(data_df)*train_split)
valid_size = int(len(data_df)*valid_split)

ins_dataset_train = Audio(
    df=data_df[:train_size],
    transform=data_transform,
)

ins_dataset_valid = Audio(
    df=data_df[train_size:(train_size + valid_size)].reset_index(drop=True),
    transform=data_transform,
)

ins_dataset_test = Audio(
    df=data_df[(train_size + valid_size):].reset_index(drop=True),
    transform=data_transform,
)
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_rows', None) 
print(ins_dataset_test.df['path'])
train_loader = torch.utils.data.DataLoader(
    ins_dataset_train,
    batch_size=8,
    shuffle=True,
    num_workers=2
)

test_loader = torch.utils.data.DataLoader(
    ins_dataset_test,
    batch_size=16,
    shuffle=True,
    num_workers=2
)

valid_loader = torch.utils.data.DataLoader(
    ins_dataset_valid,
    batch_size=16,
    shuffle=True,
    num_workers=2
)

for i, data in enumerate(valid_loader, 0):
  images, labels = data
  print("Batch", i, "size:", len(images))

You could use sklearn.model_selection.GroupShuffleSplit, which accepts a groups argument in the split method to create the split indices based on this groups (your participants).
Once you have the indices, you could use Subsets to create the different Datasets.