Hello, I encountered a memory leak when writing a convolutional layer using libtorch1.7GPU version. When I execute the code of torch::nn::functional::conv2d in a loop, the memory keeps growing. Excuse me, why is this?
torch::Tensor gaussian_filter(torch::Tensor x, int channels)
{
/*
*Parameters:
* tensor x [C,H,W] int channels
*Return tensor x [C,H,W]
*/
namespace F = torch::nn::functional;
torch::Tensor kernel =
torch::tensor({ {0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633},
{0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965},
{0.01330373, 0.11098164, 0.22508352, 0.11098164, 0.01330373},
{0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965},
{0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633} });
kernel = kernel.unsqueeze(0).unsqueeze(0);
kernel = torch::repeat_interleave(kernel, channels, 0);
torch::Tensor out_put = F::conv2d(x.unsqueeze(0), kernel.to(x.device()), F::Conv2dFuncOptions().stride(1).padding(2).groups(channels));
return out_put.squeeze(0);
}