Here if input.is_mkldnn(), call at::mkldnn_adaptive_avg_pool2d and others call _adaptive_avg_pool2d. Why mkldnn need add at::? What’s the difference between those.
Tensor adaptive_avg_pool2d(at::Tensor const& input, IntArrayRef output_size) {
if (input.is_mkldnn()) {
return at::mkldnn_adaptive_avg_pool2d(input, output_size);
}
// TODO: fastpath for Channels_last should be explored later;
if (input.suggest_memory_format() == at::MemoryFormat::Contiguous && !input.is_quantized() && output_size[0] == 1 && output_size[1] == 1) {
// in this case, adaptive pooling is just computing mean over hw
// dimensions, which can be done more efficiently
int64_t mean_size = input.size(-1) * input.size(-2);
Tensor out = input.contiguous().view({-1, mean_size}).mean(-1);
return input.dim() == 3 ? out.view({input.size(0), 1, 1})
: out.view({input.size(0), input.size(1), 1, 1});
} else {
return _adaptive_avg_pool2d(input, output_size);
}
}