# Poor minima while backpropagating affine_grid?

Hi,
I was just trying an experiment where I try to compute the affine transformation matrix from a given pair of images (original and transformed image). For this example I just use a small 5x5 grid with a straight line as the original image and the line tilted at 45 degrees as the transformed output. For some reason, it seems the loss comes down and the gradients become smaller and smaller (obviously). But the solution it converges to seems to be way off (totally does not look like a straight line).

Apologies for the formatting (the code is from a notebook)

``````import numpy as np
import matplotlib.pyplot as plt
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.functional as F

torch.manual_seed(989)

# source_image = torch.tensor([[0,1,0],[0,1,0],[0,1,0]])
source_image = torch.tensor([[0,0,1,0,0],[0,0,1,0,0],[0,0,1,0,0],[0,0,1,0,0],[0,0,1,0,0]])

plt.imshow(source_image)

# transformed_image = torch.eye(3)
transformed_image = torch.eye(5)

plt.imshow(transformed_image)

source_image = source_image.reshape(1, 1, source_image.shape, source_image.shape)
transformed_image = transformed_image.reshape(1, 1, transformed_image.shape, transformed_image.shape)
source_image = source_image.type(torch.FloatTensor)
transformed_image = transformed_image.type(torch.FloatTensor)

class AffineNet(nn.Module):
def __init__(self):
super(AffineNet, self).__init__()
self.M = torch.nn.Parameter(torch.randn(1, 2, 3))
def forward(self, im):
flow_grid = F.affine_grid(self.M, transformed_image.size())
return transformed_flow_image

affineNet = AffineNet()
optimizer = optim.SGD(affineNet.parameters(), lr=0.01)
criterion = nn.MSELoss()

for i in range(1000):
output = affineNet(transformed_image)
loss = criterion(output, source_image)
loss.backward()
if(i%10==0):
This is what the end result looks like: 