How to warp an image using deformed mesh

I am trying to reproduce the project document unwarping. Because i dont have enoguh data. I was trying to make fake dataset. Here is my code.

def create_grid(width=100, height=100):
    mr = width
    mc = height

    xx = np.arange(mr-1, -1, -1)
    yy = np.arange(0, mc, 1)
    [Y, X] = np.meshgrid(xx, yy)
    ms = np.transpose(np.asarray([X.flatten('F'), Y.flatten('F')]), (1, 0))

    perturbed_mesh = ms
    nv = np.random.randint(20) - 1
    for k in range(nv):
        # Choosing one vertex randomly
        vidx = np.random.randint(np.shape(ms)[1])
        vtex = ms[vidx, :]
        # Vector between all vertices and the selected one
        xv = perturbed_mesh - vtex
        # Random movement
        mv = (np.random.rand(1, 2) - 0.5)*20
        hxv = np.zeros((np.shape(xv)[0], np.shape(xv)[1] + 1))
        hxv[:, :-1] = xv
        hmv = np.tile(np.append(mv, 0), (np.shape(xv)[0], 1))
        d = np.cross(hxv, hmv)
        d = np.absolute(d[:, 2])
        d = d / (np.linalg.norm(mv, ord=2))
        wt = d

        curve_type = np.random.rand(1)
        if curve_type > 0.3:
            alpha = np.random.rand(1) * 20 + 20
            wt = alpha / (wt + alpha)
        else:
            alpha = np.random.rand(1) + 1
            wt = 1 - (wt / 100)**alpha
        msmv = mv * np.expand_dims(wt, axis=1)
        perturbed_mesh = perturbed_mesh + msmv
        
    perturbed_mesh[:, 0] = np.where(perturbed_mesh[:, 0] > height, height, perturbed_mesh[:, 0])
    perturbed_mesh[:, 1] = np.where(perturbed_mesh[:, 1] > width, width, perturbed_mesh[:, 1])
    perturbed_mesh[:, 0] = np.where(perturbed_mesh[:, 0] < 0, 0, perturbed_mesh[:, 0])
    perturbed_mesh[:, 1] = np.where(perturbed_mesh[:, 1] < 0, 0, perturbed_mesh[:, 1])

#     plt.scatter(perturbed_mesh[:, 0], perturbed_mesh[:, 1], c=np.arange(0, mr*mc))
#     plt.show()

    return perturbed_mesh[:, 0], perturbed_mesh[:, 1]

    src_img = cv2.imread('source.jpg')
    src_img = cv2.flip(src_img, 0)
    scale_percent = 30 # percent of original size
    width = int(src_img.shape[1] * scale_percent / 100)
    height = int(src_img.shape[0] * scale_percent / 100)
    dim = (width, height)
    src_img = cv2.resize(src_img, dim, interpolation=cv2.INTER_AREA)

    height, width, _ = src_img.shape
    dh = height // 10
    dw = width // 10
    img = cv2.copyMakeBorder(src_img, dh, dh, dw, dw, borderType=cv2.BORDER_CONSTANT, value=(0,0,0))
    nh, nw, _ = img.shape
    xs, ys = create_grid(nh, nw)
    xs = xs.reshape(nh, nw).astype(np.float32)
    ys = ys.reshape(nh, nw).astype(np.float32)
    dst = cv2.remap(img, xs, ys, cv2.INTER_CUBIC, borderValue=0)
    cv2.imwrite('result.jpg', dst)

Source image is


But my result is

Expected result is
target

What may go wrong in my code? Or is there any pytorch equivalent to make such fake dataset? Here is the paper talking about how to make fake data.

Based on your result vs. the expected one I would guess that your grid indices are flipped.
Did you check how these values are created and made sure that the “(x, y)” coordinates are in the expected image coordinate system (often having the origin in the top left and positive axes to the right and bottom)?