Weird error about non-contiguous inputs

when it runs in cpu, it goes pretty well. but if it runs in gpu, the problem occurs. here are the codes which are quite simple.

class layernorm(nn.Module):
    def __init__(self):
        super(layernorm, self).__init__()
        self.batchnorm=nn.BatchNorm1d(1)
    def forward(self, x):
        c,h,w=x.size(1),x.size(2),x.size(3)
        x=x.view(c*h*w,1)
        print (x.is_contiguous())
        x=self.batchnorm(x)
        print (x.is_contiguous())
        x=x.view(1,c,h,w)
        print (x.is_contiguous())
        return  x

netG=layernorm().cuda()
input=Variable(torch.Tensor(1,20,256,512)).cuda()
out=netG(input)

and the errors are :

RuntimeError                              Traceback (most recent call last)
<ipython-input-5-5d571ce948ba> in <module>()
     15 netG=layernorm().cuda()
     16 input=Variable(torch.Tensor(1,20,256,512)).cuda()
---> 17 out=netG(input)

/home/icoz/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    222         for hook in self._forward_pre_hooks.values():
    223             hook(self, input)
--> 224         result = self.forward(*input, **kwargs)
    225         for hook in self._forward_hooks.values():
    226             hook_result = hook(self, input, result)

<ipython-input-5-5d571ce948ba> in forward(self, x)
      7         x=x.view(c*h*w,1)
      8         print (x.is_contiguous())
----> 9         x=self.batchnorm(x)
     10         print (x.is_contiguous())
     11         x=x.view(1,c,h,w)

/home/icoz/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    222         for hook in self._forward_pre_hooks.values():
    223             hook(self, input)
--> 224         result = self.forward(*input, **kwargs)
    225         for hook in self._forward_hooks.values():
    226             hook_result = hook(self, input, result)

/home/icoz/anaconda3/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py in forward(self, input)
     35         return F.batch_norm(
     36             input, self.running_mean, self.running_var, self.weight, self.bias,
---> 37             self.training, self.momentum, self.eps)
     38 
     39     def __repr__(self):

/home/icoz/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
    637                training=False, momentum=0.1, eps=1e-5):
    638     f = torch._C._functions.BatchNorm(running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled)
--> 639     return f(input, weight, bias)
    640 
    641 

RuntimeError: CUDNN_STATUS_NOT_SUPPORTED. This error may appear if you passed in a non-contiguous input.

Hi,
You can fix this by using x=x.view(c*h*w,1,1)
I am not sure that the documentation is correct, and passing a 2D input is not supported when using cudnn.