The implementation
class MaxPool2dSamePadding(nn.MaxPool2d):
def __init__(self, *args,**kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
in_height, in_width = input.shape[2:]
if type(self.stride) is not tuple:
self.stride = (self.stride, self.stride)
if type(self.kernel_size) is not tuple:
self.kernel_size = (self.kernel_size, self.kernel_size)
if (in_height % self.stride[0] == 0):
pad_along_height = max(self.kernel_size[0] - self.stride[0], 0)
else:
pad_along_height = max(self.kernel_size[0] - (in_height % self.stride[0]), 0)
if (in_width % self.stride[1] == 0):
pad_along_width = max(self.kernel_size[1] - self.stride[1], 0)
else:
pad_along_width = max(self.kernel_size[1] - (in_width % self.stride[1]), 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
input = F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), value = float('-inf'))
self.padding = (0, 0) # We did padding in the lane before. Force it to 0 by user
return F.max_pool2d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
class AvgPool2dSamePadding(nn.AvgPool2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
in_height, in_width = input.shape[2:]
if type(self.stride) is not tuple:
self.stride = (self.stride, self.stride)
if type(self.kernel_size) is not tuple:
self.kernel_size = (self.kernel_size, self.kernel_size)
if (in_height % self.stride[0] == 0):
pad_along_height = max(self.kernel_size[0] - self.stride[0], 0)
else:
pad_along_height = max(self.kernel_size[0] - (in_height % self.stride[0]), 0)
if (in_width % self.stride[1] == 0):
pad_along_width = max(self.kernel_size[1] - self.stride[1], 0)
else:
pad_along_width = max(self.kernel_size[1] - (in_width % self.stride[1]), 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
input = F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), value=0.0)
self.padding = (0, 0) # We did padding in the lane before. Force it to 0 by user
return F.avg_pool2d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
class Conv2dSamePadding(nn.Conv2d):
def __init__(self, *args,**kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
in_height, in_width = input.shape[2:]
if type(self.stride) is not tuple:
self.stride = tuple(self.stride)
if type(self.kernel_size) is not tuple:
self.kernel_size = tuple(self.kernel_size)
if (in_height % self.stride[0] == 0):
pad_along_height = max(self.kernel_size[0] - self.stride[0], 0)
else:
pad_along_height = max(self.kernel_size[0] - (in_height % self.stride[0]), 0)
if (in_width % self.stride[1] == 0):
pad_along_width = max(self.kernel_size[1] - self.stride[1], 0)
else:
pad_along_width = max(self.kernel_size[1] - (in_width % self.stride[1]), 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
input = F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), value=0.0)
self.padding = (0, 0) # We did padding in the lane before. Force it to 0 by user
return self._conv_forward(input, self.weight, self.bias)
I hope this can help someone. This is based on the source code of TensorFlow.