PyTorch CUDA error

Hi, So this is my code:

#Initialize BCELoss function
criterion = nn.BCELoss()
#Establish convention for real and fake labels during training
real_label = random.uniform(0.9,1.0)
fake_label = 0
##Training Loop
#Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0
noise = torch.Tensor()
noise2 = torch.Tensor()
print("Starting Training Loop...")
#For each epoch
x=0
for epoch in range(num_epochs):
    x=0
    # For each batch in the dataloader
    for i, data in enumerate(dataloader, 0):
        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        ## Train with all-real batch
        netD.zero_grad()
        # Format batch
        real_cpu = data[0].to(device)
        b_size = real_cpu.size(0)
        label = torch.full((b_size,), real_label, device=device)
        # Generate batch of Spectra,  latent vectors, and Properties     
        for j in range(batch_size):
            excelIndex = x*batch_size+j
            try:
                gotdata = excelDataTensor[excelIndex]
            except IndexError:
                break
            tensorA = excelDataTensor[excelIndex].view(1,4)
            noise2 = torch.cat((noise2,tensorA),0)
            
            tensor1 = torch.cat((excelDataTensor[excelIndex],torch.rand(latent)))
            tensor2 = tensor1.unsqueeze(1).unsqueeze(1).unsqueeze(1)         
            tensor3 = tensor2.permute(1,0,2,3)
            noise = torch.cat((noise,tensor3),0)         
                              
        noise = noise.to(device)            
        noise2 = noise2.to(device)
        
        # Forward pass real batch through D
        output = netD.forward(real_cpu,noise2).view(-1)
        # Calculate loss on all-real batch
        errD_real = criterion(output, label)
        # Calculate gradients for D in backward pass
        errD_real.backward()
        D_x = output.mean().item()
              
        ## Train with all-fake batch                
        # Generate fake image batch with G
        fake = netG.forward(noise)
        fake = fake.reshape(int(b_size),nc,image_size,image_size)
        label.fill_(fake_label)
        # Classify all fake batch with D
        output = netD.forward(fake.detach(),noise2).view(-1)
        # Calculate D's loss on the all-fake batch
        errD_fake = criterion(output, label)
        # Calculate the gradients for this batch
        errD_fake.backward()
        D_G_z1 = output.mean().item()
        # Add the gradients from the all-real and all-fake batches
        errD = errD_real + errD_fake
        # Update D
        optimizerD.step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        netG.zero_grad()
        label.fill_(real_label)  # fake labels are real for generator cost
        # Since we just updated D, perform another forward pass of all-fake batch through D
        output = netD.forward(fake,noise2).view(-1)
        # Calculate G's loss based on this output
        errG = criterion(output, label)
        # Calculate gradients for G
        errG.backward()
        D_G_z2 = output.mean().item()
        # Update G
        optimizerG.step()

        # Output training stats
        if i % 50 == 0:
            print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                  % (epoch, num_epochs, i, len(dataloader),
                     errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
            print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                  % (epoch, num_epochs, i, len(dataloader),
                     errD.item(), errG.item(), D_x, D_G_z1, D_G_z2), file=f)

        # Save Losses for plotting later
        G_losses.append(errG.item())
        D_losses.append(errD.item())

       #  Check how the generator is doing by saving G's output on fixed_noise
        if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
            with torch.no_grad():
                fake = netG(testTensor).view(100,nc,image_size,image_size).detach().cpu()
            img_list.append(vutils.make_grid(fake, nrow=10, padding=2, normalize=True))

        iters += 1
        noise = torch.Tensor()
        noise2 = torch.Tensor()     
        x += 1
    if epoch % 2 == 0:
        ##Update folder location
        torch.save(netG, save_dir + 'netG' + str(epoch) + '.pt')
        torch.save(netD, save_dir + 'netD' + str(epoch) + '.pt')

I am getting this error:

Starting Training Loop...
../aten/src/ATen/native/cuda/Loss.cu:92: operator(): block: [0,0,0], thread: [1,0,0] Assertion `input_val >= zero && input_val <= one` failed.
../aten/src/ATen/native/cuda/Loss.cu:92: operator(): block: [0,0,0], thread: [2,0,0] Assertion `input_val >= zero && input_val <= one` failed.
../aten/src/ATen/native/cuda/Loss.cu:92: operator(): block: [0,0,0], thread: [5,0,0] Assertion `input_val >= zero && input_val <= one` failed.
../aten/src/ATen/native/cuda/Loss.cu:92: operator(): block: [0,0,0], thread: [8,0,0] Assertion `input_val >= zero && input_val <= one` failed.
../aten/src/ATen/native/cuda/Loss.cu:92: operator(): block: [0,0,0], thread: [10,0,0] Assertion `input_val >= zero && input_val <= one` failed.
../aten/src/ATen/native/cuda/Loss.cu:92: operator(): block: [0,0,0], thread: [13,0,0] Assertion `input_val >= zero && input_val <= one` failed.
../aten/src/ATen/native/cuda/Loss.cu:92: operator(): block: [0,0,0], thread: [14,0,0] Assertion `input_val >= zero && input_val <= one` failed.
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
Cell In[18], line 64
     62 output = netD.forward(fake.detach(),noise2).view(-1)
     63 # Calculate D's loss on the all-fake batch
---> 64 errD_fake = criterion(output, label)
     65 # Calculate the gradients for this batch
     66 errD_fake.backward()

File ~/miniconda3/envs/myenv/lib/python3.8/site-packages/torch/nn/modules/module.py:1501, in Module._call_impl(self, *args, **kwargs)
   1496 # If we don't have any hooks, we want to skip the rest of the logic in
   1497 # this function, and just call forward.
   1498 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
   1499         or _global_backward_pre_hooks or _global_backward_hooks
   1500         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501     return forward_call(*args, **kwargs)
   1502 # Do not call functions when jit is used
   1503 full_backward_hooks, non_full_backward_hooks = [], []

File ~/miniconda3/envs/myenv/lib/python3.8/site-packages/torch/nn/modules/loss.py:619, in BCELoss.forward(self, input, target)
    618 def forward(self, input: Tensor, target: Tensor) -> Tensor:
--> 619     return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)

File ~/miniconda3/envs/myenv/lib/python3.8/site-packages/torch/nn/functional.py:3098, in binary_cross_entropy(input, target, weight, size_average, reduce, reduction)
   3095     new_size = _infer_size(target.size(), weight.size())
   3096     weight = weight.expand(new_size)
-> 3098 return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum)

RuntimeError: CUDA error: device-side assert triggered
Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.

I saw previous queries on the same thing, but I am still not able to remove the error.
The PyTorch version is: '2.0.1+cu117'

The label for nn.BCELoss seems to contain values which are out of bounds. Did you try to print the target values for the failing operation to check their min. and max. values?

Do you mean torch.set_printoptions(profile="full") ? Or is it anything else? I didn’t understand your question.

No, just print the labels directly and check their stats:

print(label, label.min(), label.max())

then let it run and check the last output before the assert is raised.

Okay, So the result I got for print(label, label.min(), label.max()) before errD_fake = criterion(output, label) is:

tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
       device='cuda:0') tensor(0., device='cuda:0') tensor(0., device='cuda:0')

The values for iteration where the code fails are not interesting, so keep the code running until it fails and then check the values. Passing a target full of zeros will not cause the kernel assert.

Ok So the output of

print(output, output.min(), output.max())

and

print(label, label.min(), label.max())

are:

tensor([   nan,    nan, 0.4863,    nan, 0.4838, 0.4855, 0.4809,    nan, 0.4791,
        0.4831, 0.4830,    nan, 0.4775,    nan, 0.4814,    nan],
       device='cuda:0', grad_fn=<ViewBackward0>) tensor(nan, device='cuda:0', grad_fn=<MinBackward1>) tensor(nan, device='cuda:0', grad_fn=<MaxBackward1>)
tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
       device='cuda:0') tensor(0., device='cuda:0') tensor(0., device='cuda:0')

How to correct it?
By the way my discriminator is:

class Discriminator(nn.Module):
    """Fully connected classical discriminator"""

    def __init__(self):
        super().__init__()
                
        self.l1 = nn.Linear(4, image_size*image_size*nc, bias=False)

        self.model = nn.Sequential(
            # Inputs to first hidden layer (num_input_features -> 64)
            nn.Linear(2 * image_size * image_size * nc, 64),
            nn.ReLU(),
            # First hidden layer (64 -> 16)
            nn.Linear(64, 16),
            nn.ReLU(),
            # Second hidden layer (16 -> output)
            nn.Linear(16, 1),
            nn.Sigmoid(),
        )


    def forward(self, inputs, label):
        x1 = inputs
        x2 = self.l1(label)
        #x2 = x2.reshape(int(b_size),nc,image_size,image_size)
        combine = torch.cat((x1,x2),1)
        combine = self.model(combine)
        return combine

#Create the Discriminator
netD = Discriminator().to(device)

#Print the model
print(netD)

Try to isolate and debug why your model is returning NaNs in its output. I don’t see anything obviously wrong in your code so far.