BatchNorm2d, ValueError: expected 4D input (got 2D input)

Hi everyone,
I got ValueError: expected 4D input (got 2D input) when I trained the following network:

class Net(nn.Module):
def init(self):
super(Net, self).init()
self.conv1=nn.Conv2d(3,32,3, padding = 1)
self.conv2=nn.Conv2d(32,64,3, padding = 1)
self.conv2_bn = nn.BatchNorm2d(64)
self.conv3=nn.Conv2d(64,128,3, padding = 1)
self.conv3_bn = nn.BatchNorm2d(128)
self.pool=nn.MaxPool2d(2,2)
self.fc1=nn.Linear(1282828,500)
self.dense1_bn = nn.BatchNorm2d(500)
self.fc2=nn.Linear(500,133)
self.dropout=nn.Dropout(0.20)

def forward(self, x):
   # add sequence of convolutional and max pooling layers
        x=self.pool(F.relu(self.conv1(x)))
        # print(x.shape)
        x=self.pool(F.relu(self.conv2_bn(self.conv2(x))))
        # print(x.shape)
        x=self.pool(F.relu(self.conv3_bn(self.conv3(x))))
        # print(x.shape)
        # flatten image input
        x=x.view(-1,128*28*28)
        x=self.dropout(x)
        x = F.relu(self.dense1_bn(self.fc1(x)))
        # add dropout layer
        x = self.dropout(x)
        x=self.fc2(x)
        return x

Can you please help me to know why this error happened?

Hi,
The problem is here:

nn.BatchNorm2d expects 4D inputs in shape of [batch, channel, height, width]. But in the quoted line, you have converted 4D tensor into 2D in shape of [batch, 500] which is not acceptable.

Using nn.BatchNorm1d will fix the issue.

self.dense1_bn = nn.BatchNorm1d(500)

Bests

3 Likes

Thank you… the problem is solved! but I have encounter other problem:
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 385 and 341 in dimension 2 at /opt/conda/conda-bld/pytorch_1524584710464/work/aten/src/TH/generic/THTensorMath.c:3586

I’m not sure if this related to the batch size I tried to manipulate it but still the error exists, any tips please?

Could you please share the stacktrace of error and the line it raises?

RuntimeError Traceback (most recent call last)
in ()
77 # train the model
78 model_scratch = train(20, loaders_scratch, model_scratch, optimizer_scratch,
—> 79 criterion_scratch, use_cuda, ‘model_scratch.pt’)
80
81 # load the model that got the best validation accuracy

in train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path)
42 ######################
43 model.eval()
—> 44 for batch_idx, (data, target) in enumerate(loaders[‘valid’]):
45 # move to GPU
46 if use_cuda:

/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py in next(self)
262 if self.num_workers == 0: # same-process loading
263 indices = next(self.sample_iter) # may raise StopIteration
–> 264 batch = self.collate_fn([self.dataset[i] for i in indices])
265 if self.pin_memory:
266 batch = pin_memory_batch(batch)

/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py in default_collate(batch)
136 elif isinstance(batch[0], collections.Sequence):
137 transposed = zip(*batch)
–> 138 return [default_collate(samples) for samples in transposed]
139
140 raise TypeError((error_msg.format(type(batch[0]))))

/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py in (.0)
136 elif isinstance(batch[0], collections.Sequence):
137 transposed = zip(*batch)
–> 138 return [default_collate(samples) for samples in transposed]
139
140 raise TypeError((error_msg.format(type(batch[0]))))

/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py in default_collate(batch)
113 storage = batch[0].storage().new_shared(numel)
114 out = batch[0].new(storage)
–> 115 return torch.stack(batch, 0, out=out)
116 elif elem_type.module == ‘numpy’ and elem_type.name != 'str

117 and elem_type.name != ‘string_’:

RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 385 and 341 in dimension 2 at /opt/conda/conda-bld/pytorch_1524584710464/work/aten/src/TH/generic/THTensorMath.c:3586

Can you share the way you are constructing loaders? Also, it would be great if you could wrap your codes by a pair of ```. It enhaces readabilty.
Like this:

```
code
```
#number of sub-processes
num_workers = 0
#how many samples per batch to load
batch_size = 40

#transform for the train dataset with aumuntation
transform_trn = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.RandomHorizontalFlip(), #randomly flip and rotate
    transforms.RandomRotation(10),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
    ])

#transform for validation and testing datasets
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
           transforms.Resize(256), 
            transforms.ToTensor(),
            normalize,
        ])

#choose the training and test datasets
train_data = datasets.ImageFolder('/data/dog_images/train', transform=transform_trn)
val_data = datasets.ImageFolder('/data/dog_images/valid', transform=transform)
test_data = datasets.ImageFolder('/data/dog_images/test', transform=transform)

train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
    shuffle=True, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, 
    shuffle=False, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,shuffle=False,
    num_workers=num_workers)

It means the images in your dataset do not have same sizes but I can see you are using transforms.Resize(256). I really cannot figure out, maybe some of your images are not in same mode such as RGB?

I think it would be much easier if you define your own custom dataset loader. You will have full control over data loading. dataloading tutorial

This is a post for similar case: Custom Dataset with some preprocessing

1 Like

I will check the links you added. Thank you!