TypeError: forward() takes 2 positional arguments but 3 were given

class G(nn.Module):

    def __init__(self):
        super(G, self).__init__()
        self.main = nn.Sequential(
            nn.ConvTranspose2d(100, 512, 4, 1, 0, bias = False),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.ConvTranspose2d(512, 256, 4, 2, 1, bias = False),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            nn.ConvTranspose2d(256, 128, 4, 2, 1, bias = False),
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            nn.ConvTranspose2d(128, 64, 4, 2, 1, bias = False),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.ConvTranspose2d(64, 3, 4, 2, 1, bias = False),
            nn.Tanh()
        )

    def forward(self, input):
        output = self.main(input)
        return output

# Creating the generator
netG = G()
netG.apply(weights_init)

# Defining the discriminator

class D(nn.Module):

    def __init__(self):
        super(D, self).__init__()
        self.main = nn.Sequential(
            nn.Conv2d(3, 64, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(64, 128, 4, 2, 1, bias = False),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(128, 256, 4, 2, 1, bias = False),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(256, 512, 4, 2, 1, bias = False),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(512, 1, 4, 1, 0, bias = False),
            nn.Sigmoid()
        )

    def forward(self, input):
        output = self.main(input)
        return output.view(-1)

# Creating the discriminator
netD = D()
netD.apply(weights_init)
criterion = nn.Sigmoid()
optimizerD = optim.Adam(netD.parameters(), lr = 0.0002, betas = (0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr = 0.0002, betas = (0.5, 0.999))

Iam doing GAN using two classes dogs and cats.Its showing
TypeError: forward() takes 2 positional arguments but 3 were giventype or paste code here

1 Like

Could you post the code showing where your model tries to call forward?

for epoch in range(25):

    for i, data in enumerate(dataloader, 0):
        
        # 1st Step: Updating the weights of the neural network of the discriminator

        netD.zero_grad()
        
        # Training the discriminator with a real image of the dataset
        real, _ = data
        input = Variable(real)
        target = Variable(torch.ones(input.size()[0]))
        output = netD(input)
        errD_real = criterion(output, target)
        
        # Training the discriminator with a fake image generated by the generator
        noise = Variable(torch.randn(input.size()[0], 100, 1, 1))
        fake = netG(noise)
        target = Variable(torch.zeros(input.size()[0]))
        output = netD(fake.detach())
        errD_fake = criterion(output, target)
        
        # Backpropagating the total error
        errD = errD_real + errD_fake
        errD.backward()
        optimizerD.step()

        # 2nd Step: Updating the weights of the neural network of the generator

        netG.zero_grad()
        target = Variable(torch.ones(input.size()[0]))
        output = netD(fake)
        errG = criterion(output, target)
        errG.backward()
        optimizerG.step()

Thanks for the code. We would need to get more information on real.
Could you print it, its type, and its len?

real is image files of .jpg format.I gave batch_size=1.I have two targets dogs and cats,so i used sigmoid.Total of 936 images are there in the dataset.

Could you print the full call stack of the error message?

C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torchvision\transforms\transforms.py:188: UserWarning: The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.
  "please use transforms.Resize instead.")
Traceback (most recent call last):

  File "<ipython-input-1-ca7bb5f3dbe2>", line 115, in <module>
    errD_real = criterion(output, target)

  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\nn\modules\module.py", line 357, in __call__
    result = self.forward(*input, **kwargs)

TypeError: forward() takes 2 positional arguments but 3 were given

The nn.Sigmoid takes only 1 params, but you give 2. https://pytorch.org/docs/stable/torch.html?highlight=sigmoid#torch.sigmoid

I guess you actually are looking for this https://pytorch.org/docs/stable/nn.html?highlight=cross_entropy#torch.nn.functional.binary_cross_entropy

1 Like

I changed my loss function to

nn.BCELoss()

Now iam getting

  "please use transforms.Resize instead.")
C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\nn\functional.py:1189: UserWarning: Using a target size (torch.Size([1])) that is different to the input size (torch.Size([15])) is deprecated. Please ensure they have the same size.
  "Please ensure they have the same size.".format(target.size(), input.size()))
Traceback (most recent call last):

  File "<ipython-input-1-7e0b4c8bc7b1>", line 115, in <module>
    errD_real = criterion(output, target)

  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\nn\modules\module.py", line 357, in __call__
    result = self.forward(*input, **kwargs)

  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\nn\modules\loss.py", line 430, in forward
    size_average=self.size_average)

  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\nn\functional.py", line 1192, in binary_cross_entropy
    "!= input nelement ({})".format(target.nelement(), input.nelement()))

ValueError: Target and input must have the same number of elements. target nelement (1) != input nelement (15)

Iam new to pytorch.I chose pytorch over tensorflow bcoz I love pytorch.Please help me solve this error.Iam doing GAN with pytorcch.

The error message is very clear, size of targets should be the same of output. Just print the shape of them using xxx.size() to debug.

target.size()
Out[2]: torch.Size([1])

This is the output I got.I gave batch_size=1.This is the input shape that I got

input.size()
Out[3]: torch.Size([1, 3, 100, 128])

What you should print is the 2 variable you fed for criterion, which is output, target.

I suggest you to take a look at the official GAN example https://github.com/pytorch/examples/blob/master/dcgan/main.py .

criterion = nn.Sigmoid()
optimizerD = optim.Adam(netD.parameters(), lr = 0.0002, betas = (0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr = 0.0002, betas = (0.5, 0.999))

for epoch in range(25):

    for i, data in enumerate(dataloader, 0):
        
        # 1st Step: Updating the weights of the neural network of the discriminator

        netD.zero_grad()
        
        # Training the discriminator with a real image of the dataset
        real, _ = data
        input = Variable(real)
        target = Variable(torch.ones(input.size()[0]))
        output = netD(input)
        errD_real = criterion(output, target)


I have given in the training loop.I created two neural network.One for discriminator and another for generator.I have posted my training code also.

Your problem is, that the sigmoid is no criterion but an activation meaning it only accepts one input tensor and not two (which would be one for the prediction and one for the target). I think you are looking for the BCELoss instead.

Yes I changed to BCELoss() but still its showing the same error

Can you please do

print(output.size())
print(target.size()) # should be the same as output size

Above you are only printing the input size but we need the output size to help you.

print(output.size())
print(target.size())
print(input.size())

torch.Size([15])
torch.Size([1])
torch.Size([1, 3, 100, 133])

This is my output,target and input shapes.Output and target are different.How to solve this.Should I post my whole code?


from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable

batchSize = 64 
imageSize = 100 

# Creating the transformations
transform = transforms.Compose([transforms.Scale(imageSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),]) # We create a list of transformations (scaling, tensor conversion, normalization) to apply to the input images.

# Loading the dataset
dataset = dset.ImageFolder(root = 'Dataset/', transform = transform) # We download the training set in the ./data folder and we apply the previous transformations on each image.
dataloader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True, num_workers = 2) # We use dataLoader to get the images of the training set batch by batch.

# Defining the weights_init function that takes as input a neural network m and that will initialize all its weights.
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)

# Defining the generator

class G(nn.Module):

    def __init__(self):
        super(G, self).__init__()
        self.main = nn.Sequential(
            nn.ConvTranspose2d(100, 512, 4, 1, 0, bias = False),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.ConvTranspose2d(512, 256, 4, 2, 1, bias = False),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            nn.ConvTranspose2d(256, 128, 4, 2, 1, bias = False),
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            nn.ConvTranspose2d(128, 64, 4, 2, 1, bias = False),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.ConvTranspose2d(64, 3, 4, 2, 1, bias = False),
            nn.Tanh()
        )

    def forward(self, input):
        output = self.main(input)
        return output

# Creating the generator
netG = G()
netG.apply(weights_init)

# Defining the discriminator

class D(nn.Module):

    def __init__(self):
        super(D, self).__init__()
        self.main = nn.Sequential(
            nn.Conv2d(3, 64, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(64, 128, 4, 2, 1, bias = False),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(128, 256, 4, 2, 1, bias = False),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(256, 512, 4, 2, 1, bias = False),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(512, 1, 4, 1, 0, bias = False),
            nn.Sigmoid()
        )

    def forward(self, input):
        output = self.main(input)
        return output.view(-1)

# Creating the discriminator
netD = D()
netD.apply(weights_init)

# Training the DCGANs

criterion = nn.BCELoss()
optimizerD = optim.Adam(netD.parameters(), lr = 0.0002, betas = (0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr = 0.0002, betas = (0.5, 0.999))

for epoch in range(25):

    for i, data in enumerate(dataloader, 0):
        
        # 1st Step: Updating the weights of the neural network of the discriminator

        netD.zero_grad()
        
        # Training the discriminator with a real image of the dataset
        real, _ = data
        input = Variable(real)
        target = Variable(torch.ones(input.size()[0]))
        output = netD(input)
        errD_real = criterion(output, target)
        
        # Training the discriminator with a fake image generated by the generator
        noise = Variable(torch.randn(input.size()[0], 100, 1, 1))
        fake = netG(noise)
        target = Variable(torch.zeros(input.size()[0]))
        output = netD(fake.detach())
        errD_fake = criterion(output, target)
        target.size()
        input.size()
        # Backpropagating the total error
        errD = errD_real + errD_fake
        errD.backward()
        optimizerD.step()

        # 2nd Step: Updating the weights of the neural network of the generator

        netG.zero_grad()
        target = Variable(torch.ones(input.size()[0]))
        output = netD(fake)
        errG = criterion(output, target)
        errG.backward()
        optimizerG.step()
        
        # 3rd Step: Printing the losses and saving the real images and the generated images of the minibatch every 100 steps

        print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f' % (epoch, 25, i, len(dataloader), errD.data[0], errG.data[0]))
        if i % 100 == 0:
            vutils.save_image(real, '%s/real_samples.png' % "./results", normalize = True)
            fake = netG(noise)
            vutils.save_image(fake.data, '%s/fake_samples_epoch_%03d.png' % ("./results", epoch), normalize = True)

I have posted my entire code.Please say me the solution.I have two classes(Dogs and cats).

Could you please set the batchsize to a higher value than 1 and print the shapes again?

Edit: also you need to remove the .view(-1) part in your discriminators forward function.

Traceback (most recent call last):

  File "<ipython-input-5-ede2393ccf01>", line 104, in <module>
    for i, data in enumerate(dataloader, 0):

  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\utils\data\dataloader.py", line 281, in __next__
    return self._process_next_batch(batch)

  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\utils\data\dataloader.py", line 301, in _process_next_batch
    raise batch.exc_type(batch.exc_msg)

RuntimeError: Traceback (most recent call last):
  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\utils\data\dataloader.py", line 55, in _worker_loop
    samples = collate_fn([dataset[i] for i in batch_indices])
  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\utils\data\dataloader.py", line 135, in default_collate
    return [default_collate(samples) for samples in transposed]
  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\utils\data\dataloader.py", line 135, in <listcomp>
    return [default_collate(samples) for samples in transposed]
  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\utils\data\dataloader.py", line 112, in default_collate
    return torch.stack(batch, 0, out=out)
  File "C:\Users\anesh\Anaconda3\envs\virtual_platform\lib\site-packages\torch\functional.py", line 66, in stack
    return torch.cat(inputs, dim, out=out)
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 115 and 100 in dimension 2 at c:\anaconda2\conda-bld\pytorch_1519492996300\work\torch\lib\th\generic/THTensorMath.c:2897

I changed the batch_size=10 and removed the view(-1).Iam getting this error.I read that images in a batch must be of equal shape as they are stored as tensor.Thats y I set the batch_size=1.

Traceback (most recent call last):
File “/home/akb/pybook/pytorchupandrunning/chapter2/chapter2.py”, line 101, in
train(model, optimizer, torch.nn.CrossEntropyLoss(),train_data_loader, test_data_loader)
File “/home/akb/pybook/pytorchupandrunning/chapter2/chapter2.py”, line 75, in train
output = model(inputs)
File “/home/akb/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 547, in call
result = self.forward(*input)
TypeError: forward() takes 1 positional argument but 2 were given

Please help me to solve this error.