Error: Expected more than 1 value per channel when training

Thank you I have debug by printing outshape that help alot and solve the problem

If you are trying to debug by setting a batch_size of 1, you might see the error.
Modifying the batch_size to 2 will solve it.

Wow, that has solved my problem while testing different timm models. I did not understand why a random torch tensor wouldn’t pass through the network!

I am new to PyTorch and facing the same problem.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd

class Attention(nn.Module):
def init(self, in_planes, out_planes, kernel_size, groups=1, reduction=0.0625, kernel_num=4, min_channel=16):
super(Attention, self).init()
attention_channel = max(int(in_planes * reduction), min_channel)
#print(“Output of attention_channel calculation:”, attention_channel)
self.kernel_size = kernel_size
self.kernel_num = kernel_num
self.temperature = 1.0

    self.avgpool = nn.AdaptiveAvgPool2d(1)
    self.fc = nn.Conv2d(in_planes, attention_channel, 1, bias=False)
    self.bn = nn.BatchNorm2d(attention_channel)
    self.relu = nn.ReLU(inplace=True)

    self.channel_fc = nn.Conv2d(attention_channel, in_planes, 1, bias=True)

    if in_planes == groups and in_planes == out_planes:  # depth-wise convolution
        self.func_filter = self.skip
    else:
        self.filter_fc = nn.Conv2d(attention_channel, out_planes, 1, bias=True)
        self.func_filter = self.get_filter_attention

    if kernel_size == 1:  # point-wise convolution
        self.func_spatial = self.skip
    else:
        self.spatial_fc = nn.Conv2d(attention_channel, kernel_size * kernel_size, 1, bias=True)
        self.func_spatial = self.get_spatial_attention

    if kernel_num == 1:
        self.func_kernel = self.skip
    else:
        self.kernel_fc = nn.Conv2d(attention_channel, kernel_num, 1, bias=True)
        self.func_kernel = self.get_kernel_attention

    self._initialize_weights()

def _initialize_weights(self):
    for m in self.modules():
        if isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        if isinstance(m, nn.BatchNorm2d):
            nn.init.constant_(m.weight, 1)
            nn.init.constant_(m.bias, 0)

def update_temperature(self, temperature):
    self.temperature = temperature

@staticmethod
def skip(_):
    return 1.0

def get_channel_attention(self, x):
    channel_attention = torch.sigmoid(self.channel_fc(x).view(x.size(0), -1, 1, 1) / self.temperature)
    #print("Output of channel_attention:", channel_attention)
    return channel_attention


def get_filter_attention(self, x):
    filter_attention = torch.sigmoid(self.filter_fc(x).view(x.size(0), -1, 1, 1) / self.temperature)
    return filter_attention
    #print("Output of filter_attention:", filter_attention)
    

def get_spatial_attention(self, x):
    spatial_attention = self.spatial_fc(x).view(x.size(0), 1, 1, 1, self.kernel_size, self.kernel_size)
    spatial_attention = torch.sigmoid(spatial_attention / self.temperature)
    return spatial_attention
    ##print("Output of spatial_attention:", spatial_attention)

def get_kernel_attention(self, x):
    kernel_attention = self.kernel_fc(x).view(x.size(0), -1, 1, 1, 1, 1)
    kernel_attention = F.softmax(kernel_attention / self.temperature, dim=1)
    return kernel_attention
    #print("Output of kernel_attention:", kernel_attention)

def forward(self, x):
    x = self.avgpool(x)
    x = self.fc(x)
    x = self.bn(x)
    x = self.relu(x)
    return self.channel_fc(x), self.func_filter(x), self.func_spatial(x), self.func_kernel(x)

class ODConv2d(nn.Module):
def init(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1,
reduction=0.0625, kernel_num=4):
super(ODConv2d, self).init()
self.in_planes = in_planes
self.out_planes = out_planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.kernel_num = kernel_num
self.attention = Attention(in_planes, out_planes, kernel_size, groups=groups,
reduction=reduction, kernel_num=kernel_num)
self.weight = nn.Parameter(torch.randn(kernel_num, out_planes, in_planes//groups, kernel_size, kernel_size),
requires_grad=True)
self._initialize_weights()

    if self.kernel_size == 1 and self.kernel_num == 1:
        self._forward_impl = self._forward_impl_pw1x
    else:
        self._forward_impl = self._forward_impl_common

def _initialize_weights(self):
    for i in range(self.kernel_num):
        nn.init.kaiming_normal_(self.weight[i], mode='fan_out', nonlinearity='relu')

def update_temperature(self, temperature):
    self.attention.update_temperature(temperature)

def _forward_impl_common(self, x):
    # Multiplying channel attention (or filter attention) to weights and feature maps are equivalent,
    # while we observe that when using the latter method the models will run faster with less gpu memory cost.
    channel_attention, filter_attention, spatial_attention, kernel_attention = self.attention(x)
    batch_size, in_planes, height, width = x.size()
    x = x * channel_attention
    x = x.reshape(1, -1, height, width)
    aggregate_weight = spatial_attention * kernel_attention * self.weight.unsqueeze(dim=0)
    aggregate_weight = torch.sum(aggregate_weight, dim=1).view(
        [-1, self.in_planes // self.groups, self.kernel_size, self.kernel_size])
    output = F.conv2d(x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding,
                      dilation=self.dilation, groups=self.groups * batch_size)
    output = output.view(batch_size, self.out_planes, output.size(-2), output.size(-1))
    output = output * filter_attention
    #print("Output of _forward_impl_common :", output.shape)
    return output
    

def _forward_impl_pw1x(self, x):
    channel_attention, filter_attention, spatial_attention, kernel_attention = self.attention(x)
    x = x * channel_attention
    output = F.conv2d(x, weight=self.weight.squeeze(dim=0), bias=None, stride=self.stride, padding=self.padding,
                      dilation=self.dilation, groups=self.groups)
    output = output * filter_attention
    #print("Output of _forward_impl_pw1x:", output.shape)
    return output
    

def forward(self, x):
    return self._forward_impl(x)

when i calling this class
class ODConvBNReLU(nn.Module):
def init(self, in_planes, out_planes, kernel_size=1, stride=1, groups=1, norm_layer=nn.BatchNorm2d,
reduction=0.0625, kernel_num=1):
super(ODConvBNReLU, self).init()
self.conv = ODConv2d(in_planes, out_planes, kernel_size, stride, groups=groups,
reduction=reduction, kernel_num=kernel_num)
self.bn = norm_layer(out_planes)
self.relu = nn.SiLU(inplace=True)

def forward(self, x):
    x = self.conv(x)
    x = self.bn(x)
    x = self.relu(x)
    return x

and running train.py, i got this error
Traceback (most recent call last):
File “D:\yolov7-main\train.py”, line 616, in
train(hyp, opt, device, tb_writer)
File “D:\yolov7-main\train.py”, line 88, in train
model = Model(opt.cfg or ckpt[‘model’].yaml, ch=3, nc=nc, anchors=hyp.get(‘anchors’)).to(device) # create
File “D:\yolov7-main\models\yolo.py”, line 544, in init
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
File “D:\yolov7-main\models\yolo.py”, line 599, in forward
return self.forward_once(x, profile) # single-scale inference, train
File “D:\yolov7-main\models\yolo.py”, line 625, in forward_once
x = m(x) # run
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py”, line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py”, line 1520, in _call_impl
return forward_call(*args, **kwargs)
File “D:\yolov7-main\models\common.py”, line 126, in forward
x = self.conv(x)
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py”, line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py”, line 1520, in _call_impl
return forward_call(*args, **kwargs)
File “D:\yolov7-main\modules\odconv.py”, line 149, in forward
return self._forward_impl(x)
File “D:\yolov7-main\modules\odconv.py”, line 139, in _forward_impl_pw1x
channel_attention, filter_attention, spatial_attention, kernel_attention = self.attention(x)
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py”, line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py”, line 1520, in _call_impl
return forward_call(*args, **kwargs)
File “D:\yolov7-main\modules\odconv.py”, line 86, in forward
x = self.bn(x)
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py”, line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py”, line 1520, in _call_impl
return forward_call(*args, **kwargs)
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\batchnorm.py”, line 175, in forward
return F.batch_norm(
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\functional.py”, line 2480, in batch_norm
_verify_batch_size(input.size())
File “C:\Users\ujjaw\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\functional.py”, line 2448, in _verify_batch_size
raise ValueError(f"Expected more than 1 value per channel when training, got input size {size}")
ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 64, 1, 1])
Although i am providing batch size 32 in command line like this-python train.py --workers 8 --device ‘cpu’ –batch-size 32 --data data/custom.yaml --img 640 640 --cfg cfg/training/yolov7custom.yaml --weights ‘yolov7.pt’ --name yolov7 --hyp data/hyp.scratch.p5.yaml

Your model works fine:

model = ODConv2d(1, 1, 1)
x = torch.randn(32, 1, 640, 640)
out = model(x)
print(out.shape)
# torch.Size([32, 1, 640, 640])

I had the same error using DeepLabV3. I’m trying to finetune the model, but when I use the model.train() method when training it gives the above error. If I add model.eval() though it seems to work. Why is it supposed to throw an error when I use the model.train() method? I thought eval was only for evaluation.