TypeError: forward() takes 2 positional arguments but 17 were given

hi i am learningpytroch. I’m trying to use tensorboard but I don’t know where I went wrong. Frankly, I do not know the accuracy of my codes, I learn by trying. sorry for my english. thank you

TypeError: forward() takes 2 positional arguments but 17 were given

Can you send the part of your code where this error shows up?

I’m new to pytorch and I’m learning to use tensorboards. I’m not sure if my code is correct

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        
        self.conv1=nn.Conv2d(in_channels=1,out_channels=16,kernel_size=(3,3),stride=1,padding=0)
        self.activation1=nn.ReLU()
        
        self.max1=nn.MaxPool2d(kernel_size=2)
        
        self.conv2=nn.Conv2d(in_channels=16,out_channels=32,kernel_size=(2,2),stride=1,padding=0)
        self.activation2=nn.ReLU()
        
        self.max2=nn.MaxPool2d(kernel_size=2)
        
        self.conv3=nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(3,3),stride=1,padding=0)
        self.activation3=nn.ReLU()
        
        self.fc1=nn.Linear(in_features=64*4*4, out_features=50, bias=True)
        self.fc_ac1=nn.ReLU()
        self.fc2=nn.Linear(in_features=50, out_features=50, bias=True)
        self.fc_ac2=nn.ReLU()
        self.fc3=nn.Linear(in_features=50, out_features=10, bias=True)
        
    def forward(self, x):
        x=self.conv1(x)
        x=self.activation1(x)
        x=self.max1(x)
        x=self.conv2(x)
        x=self.activation2(x)
        x=self.max2(x)
        x=self.conv3(x)
        x=self.activation3(x)
        x = x.view(-1, 64 * 4 * 4)
        x=self.fc1(x)
        x=self.fc_ac1(x)
        x=self.fc2(x)
        x=self.fc_ac2(x)
        x=self.fc3(x)
        return x
parameters=dict(lr=[0.1,0.01,0.001],
                batch_size=[16,32,64,128,256],
                shuffle=[True,False])
params=[v for v in parameters.values()]


for lr,batch_size,shuffle in product(*params):
    comment = f' batch_size={batch_size} lr={lr} shuffle={shuffle}'
    
    train_loader=torch.utils.data.DataLoader(train,batch_size=batch_size,shuffle=shuffle,drop_last=False)
    test_loader=torch.utils.data.DataLoader(test,batch_size=batch_size,shuffle=shuffle,drop_last=False)
    
    net=Net().to(device)

    images,labels=next(iter(train_loader))
    grid=torchvision.utils.make_grid(images)
    
    tensorboard=SummaryWriter(comment=comment)
    tensorboard.add_image("images",grid)
    tensorboard.add_graph(net, images)

    optimizer=torch.optim.Adam(net.parameters(),lr=lr)

    error=nn.CrossEntropyLoss()

    for epoch in range(5):
        total_loss=0
        total_correct=0
        total_index=0
        for i,(data,target) in enumerate(train_loader):
            
            data=Variable(data.view(-1, 64 * 4 * 4)).to(device)
            target=Variable(target).to(device)
            
            out=net(data.float())
            
            loss=error(out,target)
            
            optimizer.zero_grad()
            loss.backwars()
            optimizer.step()
            
            total_loss+=loss.item()*batch_size
            
            y_pred=torch.max(out.data,1)[1]
            total_correct+=(y_pred==target).sum()
            total_index+=len(target)
            accuracy = 100 * total_correct / float(total_index)
            
            
        tensorboard.add_scalar("Loss",total_loss,epoch)
        tensorboard.add_scalar("Loss",total_correct,epoch)
        
        tensorboard.add_histogram("conv1.bias",net.conv1.bias,epoch)
        tensorboard.add_histogram("conv2.bias",net.conv2.bias,epoch)
        tensorboard.add_histogram("conv3.bias",net.conv3.bias,epoch)
        
        tensorboard.add_histogram("conv1.weight",net.conv1.weight,epoch)
        tensorboard.add_histogram("conv2.weight",net.conv2.weight,epoch)
        tensorboard.add_histogram("conv3.weight",net.conv3.weight,epoch)
        
        tensorboard.add_histogram("conv1.weight.grad",net.conv1.weight.grad,epoch)
        tensorboard.add_histogram("conv2.weight.grad",net.conv2.weight.grad,epoch)
        tensorboard.add_histogram("conv3.weight.grad",net.conv3.weight.grad,epoch)
        
        print(
                "epoch", epoch
                ,"total_correct:", total_correct
                ,"loss:", total_loss
            ) 
        
        tensorboard.close()

Error occurs, No graph saved
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\utils\tensorboard\_pytorch_graph.py in graph(model, args, verbose, operator_export_type, omit_useless_nodes)
    275         try:
--> 276             trace, _ = torch.jit.get_trace_graph(model, args)
    277         except RuntimeError:

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\jit\__init__.py in get_trace_graph(f, args, kwargs, _force_outplace, return_inputs)
    230         args = (args,)
--> 231     return LegacyTracedModule(f, _force_outplace, return_inputs)(*args, **kwargs)
    232 

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    492         else:
--> 493             result = self.forward(*input, **kwargs)
    494         for hook in self._forward_hooks.values():

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\jit\__init__.py in forward(self, *args)
    293             trace_inputs = _unflatten(all_trace_inputs[:len(in_vars)], in_desc)
--> 294             out = self.inner(*trace_inputs)
    295             out_vars, _ = _flatten(out)

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    490         if torch._C._get_tracing_state():
--> 491             result = self._slow_forward(*input, **kwargs)
    492         else:

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\nn\modules\module.py in _slow_forward(self, *input, **kwargs)
    480         try:
--> 481             result = self.forward(*input, **kwargs)
    482         finally:

<ipython-input-8-b62cec869e0e> in forward(self, x)
     24     def forward(self, x):
---> 25         x=self.conv1(x)
     26         x=self.activation1(x)

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    490         if torch._C._get_tracing_state():
--> 491             result = self._slow_forward(*input, **kwargs)
    492         else:

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\nn\modules\module.py in _slow_forward(self, *input, **kwargs)
    480         try:
--> 481             result = self.forward(*input, **kwargs)
    482         finally:

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
    337         return F.conv2d(input, self.weight, self.bias, self.stride,
--> 338                         self.padding, self.dilation, self.groups)
    339 

RuntimeError: Expected 4-dimensional input for 4-dimensional weight 16 1, but got 2-dimensional input of size [16, 784] instead

During handling of the above exception, another exception occurred:

TypeError                                 Traceback (most recent call last)
<ipython-input-9-307b6a1597c9> in <module>
     18     tensorboard=SummaryWriter(comment=comment)
     19     tensorboard.add_image("images",grid)
---> 20     tensorboard.add_graph(net, images)
     21 
     22     optimizer=torch.optim.Adam(net.parameters(),lr=lr)

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\utils\tensorboard\writer.py in add_graph(self, model, input_to_model, verbose, **kwargs)
    532                     print('add_graph() only supports PyTorch v0.2.')
    533                     return
--> 534             self._get_file_writer().add_graph(graph(model, input_to_model, verbose, **kwargs))
    535         else:
    536             # Caffe2 models do not have the 'forward' method

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\utils\tensorboard\_pytorch_graph.py in graph(model, args, verbose, operator_export_type, omit_useless_nodes)
    277         except RuntimeError:
    278             print('Error occurs, No graph saved')
--> 279             _ = model(*args)  # don't catch, just print the error message
    280             print("Checking if it's onnx problem...")
    281             try:

C:\Anaconda3\envs\Genel_ortam\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    491             result = self._slow_forward(*input, **kwargs)
    492         else:
--> 493             result = self.forward(*input, **kwargs)
    494         for hook in self._forward_hooks.values():
    495             hook_result = hook(self, input, result)

TypeError: forward() takes 2 positional arguments but 17 were given

Can you print out the shape of images that you pass into the tensorboard add graph function. Ex:

    images,labels=next(iter(train_loader))
    print(images.shape)
    grid=torchvision.utils.make_grid(images)
    
    tensorboard=SummaryWriter(comment=comment)
    tensorboard.add_image("images",grid)
    tensorboard.add_graph(net, images)

what is the shape?

Thanks for be interested

images.shape
–>torch.Size([16, 784])

labels.shape
→ torch.Size([16])

I think you are handing data in the wrong shape to your first layer. Conv2D expects an input of dimension N x C x X x Y = 4 (with batch dimension N, C probably equal to 1 and then X and Y dimensions). If I am not mistaken, you use data.view to make your data 2-dimensional (instead of 4). This causes the original RuntimeError in Conv2d.
Assuming you work with the MNIST dataset (I am guessing because of the 784), your view command should probably be (with X, Y = 28):

data=Variable(data.view(-1, 1, X, Y)).to(device)

The TypeError in forward() during the execution of add_graph is probably caused by the images input. You get images, labels from your train_loader, which returns a tensor of shape N x [whatever your data shape is]. tensorboard.add_graph expects either a Tensor or a List of Tensors, according to the documentation.
You can find example code from the devs on how to give add_graph multiple inputs here.

P.S.: The Variable(something) command is deprecated. It’s not causing your error, but I think we’re not meant to use it anymore.

1 Like

hi FreddyJ . Thank you for your support. I did what they said and it worked. you saved me from a big problem. I appreciate you