I met the same problem. I’m using the Deeplabv3
from the model.zoo
, and I only like to train one image, so my batch size is 1, my input image size is 512*1024
,and I got the exactly same error.
Traceback (most recent call last):
File "/home/mengdietao/.pycharm_helpers/pydev/pydevd.py", line 1758, in <module>
main()
File "/home/mengdietao/.pycharm_helpers/pydev/pydevd.py", line 1752, in main
globals = debugger.run(setup['file'], None, None, is_module)
File "/home/mengdietao/.pycharm_helpers/pydev/pydevd.py", line 1147, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "/home/mengdietao/.pycharm_helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/home/mengdietao/tangshan/tangshan_data/image_segmentation/docs/data_check.py", line 356, in <module>
train(CONFIG, True)
File "/home/mengdietao/tangshan/tangshan_data/image_segmentation/docs/data_check.py", line 244, in train
pred = model(data)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/parallel/data_parallel.py", line 150, in forward
return self.module(*inputs[0], **kwargs[0])
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torchvision/models/segmentation/_utils.py", line 22, in forward
x = self.classifier(x)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/container.py", line 92, in forward
input = module(input)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torchvision/models/segmentation/deeplabv3.py", line 91, in forward
res.append(conv(x))
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torchvision/models/segmentation/deeplabv3.py", line 60, in forward
x = super(ASPPPooling, self).forward(x)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/container.py", line 92, in forward
input = module(input)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/modules/batchnorm.py", line 81, in forward
exponential_average_factor, self.eps)
File "/home/mengdietao/.conda/envs/mengdietao/lib/python3.7/site-packages/torch/nn/functional.py", line 1652, in batch_norm
raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size))
ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 256, 1, 1])