How to generate a CNN-LSTM for audio data

I am currently working with a dataset that contains 5 second audio files of patients with Parkinson’s disease and healthy controls. I have converted each 5 second wav file into a melspectrogram image and I was wondering whether it would appropriate to develop a CNN-LSTM model to classify audio files into Parkinsons/Healthy Control. Could each image be divided into 5 sub-images (1 second in length)? I’ve already developed a CNN Model (see code below) which achieves approximately 90-92% accuracy. Does anyone have any resources they could point me to, or advise how I would need to alter the code below to incorporate an LSTM Model?

import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import utils
from  torch.utils.data import Dataset

from sklearn.metrics import confusion_matrix
from skimage import io, transform, data
from skimage.color import rgb2gray

import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
import pandas as pd
import numpy as np
import csv
import os
import math
import cv2

root_dir = "/content/drive/My Drive/5_second_audio"
class_names = [
  "Parkinsons_Disease",
  "Healthy_Control"
]

def get_meta(root_dir, dirs):
    
    paths, classes = [], []
    for i, dir_ in enumerate(dirs):
        for entry in os.scandir(root_dir + dir_):
            if (entry.is_file()):
                paths.append(entry.path)
                classes.append(i)
                
    return paths, classes

paths, classes = get_meta(root_dir, class_names)

data = {
    'path': paths,
    'class': classes
}

data_df = pd.DataFrame(data, columns=['path', 'class'])
data_df = data_df.sample(frac=1).reset_index(drop=True) 

class Audio(Dataset):

    def __init__(self, df, transform=None):
        """
        Args:
            image_dir (string): Directory with all the images
            df (DataFrame object): Dataframe containing the images, paths and classes
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.df = df
        self.transform = transform

    def __len__(self):
        return len(self.df)

    def __getitem__(self, index):
        # Load image from path and get label
        x = Image.open(self.df['path'][index])
        try:
          x = x.convert('RGB') # To deal with some grayscale images in the data
        except:
          pass
        y = torch.tensor(int(self.df['class'][index]))

        if self.transform:
            x = self.transform(x)

        return x, y

def compute_img_mean_std(image_paths):
   
    img_h, img_w = 224, 224
    imgs = []
    means, stdevs = [], []

    for i in tqdm(range(len(image_paths))):
        img = cv2.imread(image_paths[i])
        img = cv2.resize(img, (img_h, img_w))
        imgs.append(img)

    imgs = np.stack(imgs, axis=3)
    print(imgs.shape)

    imgs = imgs.astype(np.float32) / 255.

    for i in range(3):
        pixels = imgs[:, :, i, :].ravel()  # resize to one row
        means.append(np.mean(pixels))
        stdevs.append(np.std(pixels))

    means.reverse()  # BGR --> RGB
    stdevs.reverse()

    print("normMean = {}".format(means))
    print("normStd = {}".format(stdevs))
    return means, stdevs

norm_mean, norm_std = compute_img_mean_std(paths)

data_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(256),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

train_split = 0.70 # Defines the ratio of train/valid/test data.
valid_split = 0.10

train_size = int(len(data_df)*train_split)
valid_size = int(len(data_df)*valid_split)

ins_dataset_train = Audio(
    df=data_df[:train_size],
    transform=data_transform,
)

ins_dataset_valid = Audio(
    df=data_df[train_size:(train_size + valid_size)].reset_index(drop=True),
    transform=data_transform,
)

ins_dataset_test = Audio(
    df=data_df[(train_size + valid_size):].reset_index(drop=True),
    transform=data_transform,
)

print(ins_dataset_test)

train_loader = torch.utils.data.DataLoader(
    ins_dataset_train,
    batch_size=8,
    shuffle=True,
    num_workers=2
)

test_loader = torch.utils.data.DataLoader(
    ins_dataset_test,
    batch_size=16,
    shuffle=False,
    num_workers=2
)

valid_loader = torch.utils.data.DataLoader(
    ins_dataset_valid,
    batch_size=16,
    shuffle=True,
    num_workers=2
)

for i, data in enumerate(valid_loader, 0):
  images, labels = data
  print("Batch", i, "size:", len(images))

from torchvision import models

final_model = models.densenet161(pretrained = False)

for param in final_model.parameters():
    param.require_grad = False

final_model.classifier = torch.nn.Linear(final_model.classifier.in_features, 2)
print(final_model)

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

model_gpu = final_model.to(device)
criterion = nn.CrossEntropyLoss()

optimizer = optim.Adam(model_gpu.parameters(), lr=0.001)

import timeit

training_loss = []
validation_loss = []

def train_and_validate_model(num_epochs):
    for epoch in range(num_epochs):
        train_loss = 0.0
        valid_loss = 0.0
        running_loss = 0.0

        for i, data in enumerate(train_loader, 0):
          images, labels = data
            
          images = images.to(device) 
          labels = labels.to(device) 

          optimizer.zero_grad()
          outputs = model_gpu(images)

          loss = criterion(outputs, labels)
          loss.backward()

          optimizer.step()
          running_loss += loss.item()
          train_loss += loss.item()

          if i % 10 == 9:    # print every 100 mini-batches
            print('Epoch / Batch [%d / %d] - Loss: %.3f' %
                  (epoch + 1, i + 1, running_loss / 10))
            running_loss = 0.0

        training_loss.append(train_loss/len(train_loader))
        
        for i, data in enumerate(valid_loader, 0):
          
          images, labels = data  
          images = images.to(device) 
          labels = labels.to(device) 

          optimizer.zero_grad()
          outputs = model_gpu(images)

          loss = criterion(outputs, labels)
          running_loss += loss.item()
          valid_loss += loss.item()

          if i % 10 == 9:    # print every 100 mini-batches
            print('Epoch / Batch [%d / %d] - Loss: %.3f' %
                  (epoch + 1, i + 1, running_loss / 10))
            running_loss = 0.0

        validation_loss.append(valid_loss/len(valid_loader))

import timeit
gpu_train_time = timeit.timeit(
     "train_and_validate_model(num_epochs)",
     setup="num_epochs=160",
     number=1,
     globals=globals(),
 )