TypeError: conv3d() received an invalid combination of arguments - got (builtin_function_or_method, Parameter, Parameter, tuple, tuple, tuple, int),

TypeError Traceback (most recent call last)
~\main.py in
387 return output
388 if name == ‘main’:
→ 389 main()

~\main.py in main()
200
201 for epoch in range(start_epoch, args.epochs + 1):
→ 202 train(train_loader, net, loss, epoch, optimizer, get_lr, args.save_freq, save_dir)
203 validate(val_loader, net, loss)
204

~\main.py in train(data_loader, net, loss, epoch, optimizer, get_lr, save_freq, save_dir)
218 coord = Variable(coord.cuda(non_blocking = True)).cpu
219
→ 220 output = net(data, coord)
221 loss_output = loss(output, target)
222 optimizer.zero_grad()

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\parallel\data_parallel.py in forward(self, *inputs, **kwargs)
164
165 if len(self.device_ids) == 1:
→ 166 return self.module(*inputs[0], **kwargs[0])
167 replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
168 outputs = self.parallel_apply(replicas, inputs, kwargs)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

~\res18.py in forward(self, x, coord)
95
96 def forward(self, x, coord):
—> 97 out = self.preBlock(x)#16
98 out_pool,indices0 = self.maxpool1(out)
99 out1 = self.forw1(out_pool)#32

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\container.py in forward(self, input)
137 def forward(self, input):
138 for module in self:
→ 139 input = module(input)
140 return input
141

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
585
586 def forward(self, input: Tensor) → Tensor:
→ 587 return self._conv_forward(input, self.weight, self.bias)
588
589

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\conv.py in _conv_forward(self, input, weight, bias)
580 self.groups,
581 )
→ 582 return F.conv3d(
583 input, weight, bias, self.stride, self.padding, self.dilation, self.groups
584 )

TypeError: conv3d() received an invalid combination of arguments - got (builtin_function_or_method, Parameter, Parameter, tuple, tuple, tuple, int), but expected one of:

  • (Tensor input, Tensor weight, Tensor bias, tuple of ints stride, tuple of ints padding, tuple of ints dilation, int groups)
    didn’t match because some of the arguments have invalid types: (builtin_function_or_method, Parameter, Parameter, tuple, tuple, tuple, int)
  • (Tensor input, Tensor weight, Tensor bias, tuple of ints stride, str padding, tuple of ints dilation, int groups)
    didn’t match because some of the arguments have invalid types: (builtin_function_or_method, Parameter, Parameter, tuple, tuple, tuple, int)

You probably want coord = Variable(coord.cuda(non_blocking = True)).cpu() rather than coord = Variable(coord.cuda(non_blocking = True)).cpu.

Thanks it worked! But I am facing the memory issue.

RuntimeError Traceback (most recent call last)
~\main.py in
387 return output
388 if name == ‘main’:
→ 389 main()

~\main.py in main()
200
201 for epoch in range(start_epoch, args.epochs + 1):
→ 202 train(train_loader, net, loss, epoch, optimizer, get_lr, args.save_freq, save_dir)
203 validate(val_loader, net, loss)
204

~\main.py in train(data_loader, net, loss, epoch, optimizer, get_lr, save_freq, save_dir)
218 coord = Variable(coord.cuda(non_blocking = True)).cpu()
219
→ 220 output = net(data, coord)
221 loss_output = loss(output, target)
222 optimizer.zero_grad()

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\parallel\data_parallel.py in forward(self, *inputs, **kwargs)
164
165 if len(self.device_ids) == 1:
→ 166 return self.module(*inputs[0], **kwargs[0])
167 replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
168 outputs = self.parallel_apply(replicas, inputs, kwargs)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

~\res18.py in forward(self, x, coord)
110 comb3 = self.back3(torch.cat((rev3, out3), 1))#96+96
111 #comb3 = self.drop(comb3)
→ 112 rev2 = self.path2(comb3)
113
114 comb2 = self.back2(torch.cat((rev2, out2,coord), 1))#64+64

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\container.py in forward(self, input)
137 def forward(self, input):
138 for module in self:
→ 139 input = module(input)
140 return input
141

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\batchnorm.py in forward(self, input)
165 used for normalization (i.e. in eval mode when buffers are not None).
166 “”"
→ 167 return F.batch_norm(
168 input,
169 # If buffers are not to be tracked, ensure that they won’t be updated

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
2279 _verify_batch_size(input.size())
2280
→ 2281 return torch.batch_norm(
2282 input, weight, bias, running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled
2283 )

RuntimeError: CUDA out of memory. Tried to allocate 54.00 MiB (GPU 0; 24.00 GiB total capacity; 8.34 GiB already allocated; 0 bytes free; 8.40 GiB reserved in total by PyTorch)