Change loss for BCEWithLogitsLoss() and model return nans

Due to some issues with cuda it was need to change loss from BCELoss() to BCEWithLogitsLoss() for GAN. And all my preds and loss value became nan
comment and uncomment sigmoid lay in Discriminator did not help(
Could you be so kind to advice where is an issue?

class Discriminator(nn.Module):
def init(self):
super().init()
self.model = nn.Sequential(
nn.Linear(9, 256),
nn.ReLU(),
nn.Dropout(0.3),
nn.Linear(256, 128),
nn.ReLU(),
nn.Dropout(0.3),
nn.Linear(128, 64),
nn.ReLU(),
nn.Dropout(0.3),
nn.Linear(64, 1),
# nn.Sigmoid() #comment this due to change loss
)

def forward(self, x):
    output = self.model(x)
    return output

discriminator = Discriminator().to(DEVICE)

class Generator(nn.Module):
def init(self):
super().init()
self.model = nn.Sequential(
nn.Linear(9, 16),
nn.ReLU(),
nn.Linear(16, 32),
nn.ReLU(),
nn.Linear(32, 9))

def forward(self, x):
    output = self.model(x)
    return output

generator = Generator().to(DEVICE)

lr = 0.001
loss_function = nn.BCEWithLogitsLoss() #NEW

optimizer_discriminator = torch.optim.Adam(discriminator.parameters(), lr=lr) #optimizer
optimizer_generator = torch.optim.Adam(generator.parameters(), lr=lr)

for n, (real_samples, _) in enumerate(tqdm(train_loader)):
real_samples_labels = torch.ones((batch_size, 1))
latent_space_samples = torch.randn((batch_size, 9))
generated_samples = generator(latent_space_samples.cuda())
generated_samples_labels = torch.zeros((batch_size, 1))
all_samples = torch.cat((real_samples.cuda(), generated_samples))
all_samples_labels = torch.cat(
(real_samples_labels, generated_samples_labels))

    discriminator.zero_grad()
    output_discriminator = discriminator(all_samples.cuda()) 
    loss_discriminator = loss_function(
        output_discriminator, all_samples_labels.cuda())
    loss_discriminator.backward()
    optimizer_discriminator.step()

    # Данные для обучения генератора
    latent_space_samples = torch.randn((batch_size, 9)) 

    generator.zero_grad()
    generated_samples = generator(latent_space_samples.cuda())
    output_discriminator_generated = discriminator(generated_samples.cuda())
    loss_generator = loss_function(
        output_discriminator_generated, real_samples_labels.cuda())
    loss_generator.backward()
    optimizer_generator.step()

print(f"Loss D.: {loss_discriminator.item()}“)
print(f"Loss G.: {loss_generator.item()}”)

torch.save(discriminator, ‘/home/GAN/GAN_models/Discriminator4.pth’)
print(f’Model saved.')