About the input problem of the densenet201?

class DenseNet(nn.Module):
“”“Densenet-BC model class
Args:
growth_rate (int) - how many filters to add each layer (k in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
“””

def __init__(self,
             sample_size,
             sample_duration,
             growth_rate=32,
             block_config=(6, 12, 24, 16),
             num_init_features=64,
             bn_size=4,
             drop_rate=0,
             num_classes=1000):

    super(DenseNet, self).__init__()

    self.sample_size = sample_size
    self.sample_duration = sample_duration

    # First convolution
    self.features = nn.Sequential(
        OrderedDict([
            ('conv0',
             nn.Conv3d(
                 3,
                 num_init_features,
                 kernel_size=7,
                 stride=(1, 2, 2),
                 padding=(3, 3, 3),
                 bias=False)),
            ('norm0', nn.BatchNorm3d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool3d(kernel_size=3, stride=2, padding=1)),
        ]))

    # Each denseblock
    num_features = num_init_features
    for i, num_layers in enumerate(block_config):
        block = _DenseBlock(
            num_layers=num_layers,
            num_input_features=num_features,
            bn_size=bn_size,
            growth_rate=growth_rate,
            drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % (i + 1), block)
        num_features = num_features + num_layers * growth_rate
        if i != len(block_config) - 1:
            trans = _Transition(
                num_input_features=num_features,
                num_output_features=num_features // 2)
            self.features.add_module('transition%d' % (i + 1), trans)
            num_features = num_features // 2

    # Final batch norm
    self.features.add_module('norm5', nn.BatchNorm2d(num_features))

    for m in self.modules():
        if isinstance(m, nn.Conv3d):
            m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
        elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d):
            m.weight.data.fill_(1)
            m.bias.data.zero_()

    # Linear layer
    self.classifier = nn.Linear(num_features, num_classes)

def forward(self, x):
    features = self.features(x)
    out = F.relu(features, inplace=True)
    last_duration = int(math.ceil(self.sample_duration / 16))
    last_size = int(math.floor(self.sample_size / 32))
    out = F.avg_pool3d(
        out, kernel_size=(last_duration, last_size, last_size)).view(
            features.size(0), -1)
    out = self.classifier(out)
    return out

model = densenet.densenet201(sample_size=112, sample_duration=16, num_classes=400)

and I want to enter a sequence of behaviors **
** torch.Size([20, 3, 16, 112, 112]) batch :20 channel:3 sequence :16 and112x112

**but **
ValueError: expected 4D input (got 5D input)
**and **
Batch dimension becomes 1 torch.Size([1, 3, 16, 112, 112]) and squeeze(0)
**torch.Size([ 3, 16, 112, 112]) **
**But still Error: expected stride to be a single integer value or a list of 2 values to match the convolution dimensions, but got stride=[1, 2, 2]

Traceback (most recent call last):
File “desnext.py”, line 556, in
train()
File “desnext.py”, line 335, in train
output1 = net(X)
File “/home/linbb/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)
File “/home/linbb/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py”, line 112, in forward
return self.module(*inputs[0], **kwargs[0])
File “/home/linbb/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)
File “/home/linbb/C3D_siamese/model/densenet.py”, line 206, in forward
features = self.features(x)
File “/home/linbb/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)
File “/home/linbb/anaconda3/lib/python3.6/site-packages/torch/nn/modules/container.py”, line 91, in forward
input = module(input)
File “/home/linbb/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)
File “/home/linbb/anaconda3/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py”, line 45, in forward
self._check_input_dim(input)
File “/home/linbb/anaconda3/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py”, line 193, in _check_input_dim
.format(input.dim()))
ValueError: expected 4D input (got 5D input)

and i found nn.Conv3d(
3,
num_init_features,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False))

I’m not sure how _Denseblock is defined, but assuming you are using nn.Conv3d layers, this line might be the issue:

self.features.add_module('norm5', nn.BatchNorm2d(num_features))

Should this be BatchNorm2d or rather BatchNorm3d?
Or is the transition layer flattening your output already?

class _DenseBlock(nn.Sequential):

def __init__(self, num_layers, num_input_features, bn_size, growth_rate,
             drop_rate):
    super(_DenseBlock, self).__init__()
    for i in range(num_layers):
        layer = _DenseLayer(num_input_features + i * growth_rate,
                            growth_rate, bn_size, drop_rate)
        self.add_module('denselayer%d' % (i + 1), layer)

above is _Denseblock. I will try to turn nn.BatchNorm2d into nn.BatchNorm3d.