ValueError: Using a target size (torch.Size([1])) that is different to the input size (torch.Size([16])) is deprecated. Please ensure they have the same size

def main():
    print("Starting Training Loop...")
    # For each epoch
    for epoch in range(args.num_epochs):
        # For each batch in the dataloader
        for i, data in enumerate(dataloader):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # Train with all-real batch
            discriminator.zero_grad()
            # Format batch
            real_cpu = data[0].to(device)
            b_size = real_cpu.size(0)
            label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
            # Forward pass real batch through D
            output = discriminator(real_cpu).view(-1)
            # Calculate loss on all-real batch
            errD_real = criterion(output, label)
            # Calculate gradients for D in backward pass
            errD_real.backward()
            D_x = output.mean().item()

            # Train with all-fake batch
            # Generate batch of latent vectors
            noise = torch.randn(b_size, args.z_size, 1, 1, device=device)
            # Generate fake voxels batch with G
            fake_voxels = generator(noise)
            # Render fake voxel batch with R
            fake = renderer(fake_voxels)
            label.fill_(fake_label)
            # Classify all fake batch with D
            output = discriminator(fake.detach()).view(-1)
            # Calculate D's loss on the all-fake batch
            errD_fake = criterion(output, label)
            # Calculate the gradients for this batch
            errD_fake.backward()
            D_G_z1 = output.mean().item()
            # Add the gradients from the all-real and all-fake batches
            errD = errD_real + errD_fake
            # Update D
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            generator.zero_grad()
            label.fill_(real_label)  # fake labels are real for generator cost
            # Since we just updated D, perform another forward pass of all-fake batch through D
            output = discriminator(fake).view(-1)
            # Calculate G's loss based on this output
            errG = criterion(output, label)
            # Calculate gradients for G
            errG.backward()
            D_G_z2 = output.mean().item()
            # Update G
            optimizerG.step()

            ############################
            # (3) Update R network: minimize l2(R) + lambda * DOM(R)
            ###########################
            renderer.zero_grad()
            # Since we just updated G, generate a new fake batch
            fake_voxels = generator(noise)
            # Render the voxels with the neural renderer
            nr = renderer(fake_voxels)
            # Render the voxels with an off-the-shelf renderer
            ots = render.render_tensor(fake_voxels, device)
            # Perform a forward pass of neural renderer output through D
            nr_output = discriminator(nr).view(-1)
            # Perform a forward pass of off-the-shelf renderer output through D
            ots_output = discriminator(ots).view(-1)
            # Calculate R's L2 loss based on squared error of pixel matrix
            errL2 = l2(nr, ots)
            # Calculate R's DOM loss based on squared log error of discriminator output
            errDOM = DOMLoss(ots_output, nr_output)
            errR = errL2 + args.dom_lambda * errDOM
            # Calculate gradients for R
            errR.backward()
            R_x = nr_output.mean().item()
            # Update R
            optimizerR.step()
            del ots

            # Output training stats
            if i % 50 == 0:
                print(f"[{epoch}/{args.num_epochs}][{i}/{len(dataloader)}]\t"
                      f"Loss_D: {errD.item():.4f}\tLoss_G: {errG.item():.4f}\tLoss_R: {errR.item():.4f}\t"
                      f"D(x):  {D_x:.4f}\tD(G(z)):  {D_G_z1:.4f}/ {D_G_z2:.4f}\tR(G(z):  {R_x:.4f}")

Hi I am new to using PyTorch. I’m trying to create 3d chairs by using 2d chair images. I input a dataset of images but I’m getting a ValueError. Can anyone give me some direction to fix this? Thanks in advance!

I guess the flattening of the output might cause this issue:

output = discriminator(real_cpu).view(-1)
# and/or
output = discriminator(fake.detach()).view(-1)

Depending on the used loss function, you might need to pass an output and/or target with multiple dimensions.