Memory leak/ Random memory Allocation in Pytorch - CPU

This is pertaining to pytorch version - 1.1.0. I am doing a simple inference on images. On each iteration, it will take one image and forward it. But everytime I run the code, memory utilization is different. In some runs, memory just keeps on increasing with each iteration. While in other runs it becomes stable after few iterations(value it gets settled to is also random).
The code is

import torch
#import torch.backends.cudnn as cudnn
#cudnn.enabled = False
#torch.backends.cudnn.deterministic = True
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import time
import os
import math
import numpy as np
import pickle
import psutil
process = psutil.Process(os.getpid())


class GaussianBlur(nn.Module):
    def __init__(self):
        super(GaussianBlur, self).__init__()
        self.pad = 4
        ##weight is proxy for gaussian kernel
        weight = torch.from_numpy(np.random.uniform(0.0,1.0,(1,1,9,9)).astype(np.float32))
        self.register_buffer('buf', weight)
        return
    def forward(self, x):
        w = Variable(self.buf)
        blurred = F.conv2d(F.pad(x, (self.pad,self.pad,self.pad,self.pad), 'replicate'), w, padding = 0)
        return blurred

class ScalePyramid(nn.Module):
    def __init__(self):
        super(ScalePyramid,self).__init__()
        return
    def forward(self,x):
        curr = GaussianBlur().eval()(x)
        return curr

with torch.no_grad():
    ScalePyrGen = ScalePyramid()
    while True:
        ### y is proxy for image
        y = np.random.uniform(0.0,255.0,(1,1,321, 512)).astype(np.float32)
        y = torch.from_numpy(y)
        ### Trimmed down the scale pyramid generation for the image
        temp = ScalePyrGen(y)
        ##Printing Memory Utilization
        print(process.memory_info().rss/(1024*1024))