Adding custom function in Pretrained model in forward not working

Hello,
I am working with open source Neural Style Transfer algorithm by Leon Gatys.
I have a pre-trained vgg model. I modifyed the forward function and add some addional custom fuction to that like below:

class VGG(nn.Module):
def __init__(self, pool='avg'):
    super(VGG, self).__init__()
    #vgg modules
    self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
    self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
    self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
    self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
    self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
    self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
    self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
    self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
    if pool == 'max':
        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=3)
        self.pool2 = nn.MaxPool2d(kernel_size=3, stride=3)
       
    elif pool == 'avg':
        self.pool1 = nn.AvgPool2d(kernel_size=3, stride=3)
        self.pool2 = nn.AvgPool2d(kernel_size=3, stride=3)
       
        
def forward(self, x, out_keys):
    out = {}
    out['r11'] = F.relu(self.conv1_1(x))
    out['r12'] = F.relu(self.conv1_2(out['r11']))
    out['p1'] = self.pool1(out['r12'])
    out['r21'] = F.relu(self.conv2_1(out['p1']))
    out['r22'] = F.relu(self.conv2_2(out['r21']))
    out['p2'] = self.pool2(out['r22'])
    out['r31'] = F.relu(self.conv3_1(out['p2']))
    out['r32'] = F.relu(self.conv3_2(out['r31']))
    out['r33'] = F.relu(self.conv3_3(out['r32']))
    out['r34'] = F.relu(self.conv3_4(out['r33']))
    out['rd1'] = self.getReducedActivation(out['r34'])
    return [out[key] for key in out_keys]

def getReducedActivation(self,source_variable):
   
	#SOME hand picked activation maps in want. I do not need all 256 activation maps.
	#so i am filtering out some of them
    indices = [369,178,338]
    l_indices = torch.cuda.LongTensor(indices)
    reduced_target = Variable(torch.index_select(source_variable.data, 1, l_indices), requires_grad=True)
    return reduced_target

the forward function is returning selected maps all right. But when I declare a Variable with the random value and run a gradient descent by calculating MSE loss between filtered version i.e out[‘rd1’] on the layer of the content and random variable, the below code for LGBFS optimizer not reducing the loss of the white random image I declared.

max_iter = 600
show_iter = 50
optimizer = optim.LBFGS([opt_img]);
n_iter=[0]

while n_iter[0] <= max_iter:

    def closure():
        optimizer.zero_grad()
        out = vgg(opt_img, content_layers)
        #layer_losses = [weights[a] * loss_fns[a](A, targets[a]) for a,A in enumerate(out)]
        layer_losses = [nn.MSELoss().cuda()(A,content_targets[a]) for a, A in enumerate(out)]
        loss = sum(layer_losses)
        loss.backward()
        n_iter[0]+=1
        #print loss
        if n_iter[0]%show_iter == (show_iter-1):
            print('Iteration: %d, loss: %f'%(n_iter[0]+1, loss.data[0]))
#             print([loss_layers[li] + ': ' +  str(l.data[0]) for li,l in enumerate(layer_losses)]) #loss of each layer
        return loss
    
    optimizer.step(closure)

Can somebody explain and guide me to proper direction? Thanks

How did you define opt_img?

using following code:
opt_img = Variable(torch.randn(content_image.size()).type_as(content_image.data), requires_grad=True)