I need to know whether my implementation is correct or not

im working on generating ecg signals using a gan and i need to know if my model is implemented correctly or not as i seem to get signals but all of them seem same to me im fairly new on gans so yeah plz do help me

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import os

import torch
import torch.nn as nn

import torch
import torch.nn as nn

class Generator(nn.Module):
def init(self, input_size, hidden_size, output_size, num_layers,num_directions=2):
super(Generator, self).init()
self.input_size = input_size
self.hidden_size = 4
self.output_size = output_size
self.num_layers = num_layers
self.num_directions = num_directions

    self.lstm = nn.LSTM(input_size,hidden_size , num_layers=num_layers, batch_first=True, bidirectional=True)
    self.gru = nn.GRU(hidden_size * 2, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=True)

    self.dropout = nn.Dropout(p=0.3)
    self.ln = nn.LayerNorm(hidden_size * 2)
    self.self_attention = nn.MultiheadAttention(2 * hidden_size, num_heads=2)
    self.fc = nn.Linear(hidden_size * 2, output_size)  # Adjust the output size
    self.relu = nn.LeakyReLU()  # LeakyReLU with a negative slope of 0.2

def forward(self, noise):
    batch_size = noise.size(0)
    h_0,c_0 = self.init_hidden(batch_size)
    output, _ = self.lstm(noise, (h_0, c_0))
    # print(f'output after lstm: {output.shape}')
    output_gru, _ = self.gru(output)

    output = self.dropout(output)
    output, _ = self.self_attention(output.permute(1, 0, 2), output.permute(1, 0, 2), output.permute(1, 0, 2))
    # print(f'output after attention: {output.shape}')
    output = output.permute(1, 0, 2)
    # print(f'output after permute: {output.shape}')

    output = self.fc(output[:, -1, :])
    # print(f'output after fc: {output.shape}')
    output = self.relu(output)  # Apply LeakyReLU activation
    output = output.view(batch_size, self.output_size, -1)
    # print(f'output after view: {output.shape}')
    return output

def init_hidden(self, batch_size):
  num_directions = 2
  h_0 = torch.randn(num_directions * self.num_layers, batch_size, self.hidden_size).to(device)
  c_0 = torch.randn(num_directions * self.num_layers, batch_size, self.hidden_size).to(device)

  return h_0, c_0

import torch
import torch.nn as nn

class Discriminator(nn.Module):
def init(self, input_size, hidden_size, output_size, channels):
super(Discriminator, self).init()
self.hidden_size = 64

    self.conv1 = nn.Conv1d(channels, 8, kernel_size=16, stride=1, padding=8)
    self.relu1 = nn.LeakyReLU()

    self.conv2 = nn.Conv1d(8, 16, kernel_size=16, stride=1, padding=8)
    self.relu2 = nn.LeakyReLU()

    self.maxpool = nn.MaxPool1d(kernel_size=2)

    self.conv3 = nn.Conv1d(16, 32, kernel_size=16, stride=1, padding=8)
    self.relu3 = nn.LeakyReLU()

    self.conv4 = nn.Conv1d(32, 64, kernel_size=16, stride=1, padding=8)
    self.relu4 = nn.LeakyReLU()

    self.conv5 = nn.Conv1d(64, 128, kernel_size=16, stride=1, padding=8)
    self.relu5 = nn.LeakyReLU()

    self.conv6 = nn.Conv1d(128, 256, kernel_size=16, stride=1, padding=8)
    self.relu6 = nn.LeakyReLU()

    self.flatten = nn.Flatten()
    self.fc = nn.Linear(256*45, 1)
    self.sigmoid = nn.Softmax()

def forward(self, x):
    batch_size = x.size(0)
    x = x.permute(0, 2, 1)  # Permute dimensions [batch_size, seq_length, channels] to [batch_size, channels, seq_length]
    # print(f'1 pass dc{x.shape}')

    x = self.conv1(x.float())
    x = self.relu1(x)
    # print(f'2 pass dc{x.shape}')

    x = self.conv2(x)
    x = self.relu2(x)
    x = self.maxpool(x)
    # print(f'3 pass dc{x.shape}')

    x = self.conv3(x)
    x = self.relu3(x)
    # print(f'3 pass dc{x.shape}')

    x = self.conv4(x)
    x = self.relu4(x)
    x = self.maxpool(x)
    # print(f'4 pass dc{x.shape}')

    x = self.conv5(x)
    x = self.relu5(x)
    # print(f'4 pass dc{x.shape}')

    x = self.conv6(x)
    x = self.relu6(x)
    x = self.maxpool(x)

    x = self.flatten(x)
    # print(f'flatten pass dc{x.shape}')

    x = self.fc(x)
    # print(f'fc pass dc{x.shape}')

    x = self.sigmoid(x)
    # print(f'sigmoid pass dc{x.shape}')

    x = x.view(batch_size, 1, input_size)
    # print(f'view pass dc{x.shape}')

    return x

torch.manual_seed(123)

save_dir = “models”
os.makedirs(save_dir, exist_ok=True)
generator_path = os.path.join(save_dir, “generator.pt”)
discriminator_path = os.path.join(save_dir, “discriminator.pt”)

input_channels = 1

generator = Generator(input_size, hidden_size, output_size,num_layers=2).to(device)
discriminator = Discriminator(input_size,hidden_size,output_size,channels=1).to(device)