RuntimeError: 2D or 4D weight tensor expected, but got

Hello Sir,
In my project I’m trying to use 3D convloutional networks
on some video frames, but when I perform a forward propagation, the program
shows this error:

    Traceback (most recent call last):
          File "C3D_training.py", line 309, in <module>
            main()
          File "C3D_training.py", line 152, in main
            train(train_loader, model, criterion, optimizer, epoch)
          File "C3D_training.py", line 187, in train
            output = model.forward(input_var)
          File "C3D_training.py", line 41, in forward
            x = F.relu(self.conv1a(x))
          File "/usr/local/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 206, in __call__
            result = self.forward(*input, **kwargs)
          File "/usr/local/anaconda3/lib/python3.6/site-packages/torch/nn/modules/conv.py", line 330, in forward
            self.padding, self.dilation, self.groups)
          File "/usr/local/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 88, in conv3d
            return f(input, weight, bias)
        RuntimeError: 2D or 4D weight tensor expected, but got: [64 x 3 x 3 x 3 x 3] at /py/conda-bld/pytorch_1490903321756/work/torch/lib/THNN/generic/SpatialConvolutionMM.c:15

I don’t know exactly what is the origin of this error,
and what it means ?
any help will be appreciated.
Thank you,

you are sending 5D tensor for some reason instead of 4D. Maybe you are using nn.Conv2d where you actually need to use nn.Conv3d

I am actually using conv3d and maxpool3d in all the model,
something like this:

 self.conv1a = nn.Conv3d(3,64,(3,3,3),stride=(1,1,1),padding=(1,1,1),bias =True)
 self.pool1  = nn.MaxPool3d((1,2,2),stride=(1,2,2))

is there something wrong with this model definition ?

class C3D(nn.Module):
        def __init__(self):
                super(C3D,self).__init__()
                self.conv1a = nn.Conv3d(3,64,(3,3,3),stride=(1,1,1),padding=(1,1,1),bias =True)
                self.pool1  = nn.MaxPool3d((1,2,2),stride=(1,2,2))
                self.conv2a = nn.Conv3d(64,128,(3,3,3),stride=(1,1,1),padding=(1,1,1),bias =True)
                self.pool2  = nn.MaxPool3d((2,2,2),stride=(2,2,2))
                self.conv3a = nn.Conv3d(128,256,(3,3,3),stride=(1,1,1),padding=(1,1,1),bias =True)
                self.conv3b = nn.Conv3d(256,256,3,stride=(1,1,1),padding=(1,1,1),bias =True)
                self.pool3  = nn.MaxPool3d((2,2,2),stride=(2,2,2))
                self.conv4a = nn.Conv3d(256,512,(3,3,3),stride=(1,1,1),padding=(1,1,1),bias =True)
                self.conv4b = nn.Conv3d(512,512,(3,3,3),stride=(1,1,1),padding=(1,1,1),bias =True)
                self.pool4  = nn.MaxPool3d((2,2,2),stride=(2,2,2))
                self.conv5a = nn.Conv3d(512,512,(3,3,3),stride=(1,1,1),padding=(1,1,1),bias =True)
                self.conv5b = nn.Conv3d(512,512,(3,3,3),stride=(1,1,1),padding=(1,1,1),bias =True)
                self.pool5 = nn.MaxPool3d((2,2,2),stride=(2,2,2))
                self.fc6 = nn.Linear(512,4096,bias=True)
                self.fc7 = nn.Linear(4096,4096,bias=True)
                self.fc8 = nn.Linear(4096,487,bias=True)
        def forward(self,x):
                x = F.relu(self.conv1a(x))
                x = self.pool1(x)
                x = F.relu(self.conv2a(x))
                x = self.pool2(x)
                x = F.relu(self.conv3a(x))
                x = F.relu(self.conv3b(x))
                x = self.pool3(x)
                x = F.relu(self.conv4a(x))
                x = F.relu(self.conv4b(x))
                x = self.pool4(x)
                x = F.relu(self.conv5a(x))
                x = F.relu(self.conv5b(x))
                x = self.pool5(x)
                x = F.relu(self.fc6(x.view(1,512)))
                x = F.dropout(x,p=0.5)
                x = F.relu(self.fc7(x))
                x = F.dropout(x,p=0.5)
                return F.softmax(self.fc8(x))

and this is how I’m loading the data :

 traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'test')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(traindir, transforms.Compose([
            transforms.CenterCrop(112),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=50, shuffle=True, num_workers=args.workers, pin_memory=True)

I think I’m giving the model as input : Tensors which are not 5D !!?