Variable not being optimized

I am trying to use gradient descent to iteratively update an image such that the end result is an adversarial example

from torch.autograd import Variable
import torch
from torch import optim
import torch.nn as nn
import torchvision
from torchvision import models, transforms
import matplotlib.pyplot as plt
import numpy as np
import cv2

model = models.inception_v3(pretrained=True)
model.eval().cuda()

img = plt.imread("hotdog.jpg")
img = cv2.resize(img,(299,299))

def preprocess(x):
	means = [0.485, 0.456, 0.406]
	stds = [0.229, 0.224, 0.225]
	x = x / 255.0
	x = (x-means)/stds
	x=x.transpose(2,0,1)

	x = torch.from_numpy(x).float()
	x.unsqueeze_(0)

	device = torch.device('cuda')

	#x = Variable(x, requires_grad=True, device='cuda')
	x = x.to(device)

	return x

def postprocess(x):
	x = x.cpu().numpy()
	x=np.squeeze(x)
	x=x.transpose(1,2,0)
	means = [0.485, 0.456, 0.406]
	stds = [0.229, 0.224, 0.225]
	x = x * stds + means
	x = np.round(x * 255)
	return x

def attack(image):

	loss = nn.CrossEntropyLoss().cuda()
	target = Variable(torch.from_numpy(np.asarray([200])).cuda().long(), requires_grad=False)

	for i in range(100):

		input_img = preprocess(image)
		optimizer = optim.Adam([input_img], lr=0.01)

		logits = model(input_img)
		_loss = loss(logits, target)

		model.zero_grad()
		_loss.backward()
		optimizer.step()

		image = postprocess(input_img)

		preds = nn.functional.softmax(model(input_img))
		adv_pred = preds[:, 200]
		print(adv_pred.cpu().data.numpy())


attack(img)

adv_pred does not get optimized and stays at the same value throughout training. Why is this? Thank you for any help.

Hi,

Variables are not needed anymore, you can remove them and use Tensors everywhere.
I think the input_img from your preprocess function does not require gradients. You should do input_img.requires_grad_(True) to make this Tensor require gradients.