Pytorch Implementation of combined muti-scale structural similarity and l1 loss function

A Caffe implementation of the following paper is given below:

class MSSSIML1(caffe.Layer):
    "A loss layer that calculates alpha*(1-MSSSIM)+(1-alpha)*L1 loss. Assuming bottom[0] is output data and bottom[1] is label, meaning no back-propagation to bottom[1]."

    def setup(self, bottom, top):
        params = eval(self.param_str)
        self.C1 = params.get('C1', 0.01) ** 2
        self.C2 = params.get('C2', 0.03) ** 2
        self.sigma = params.get('sigma', (0.5, 1., 2., 4., 8.))
        self.alpha = params.get('alpha', 0.025)

        # check input pair
        if len(bottom) != 2:
            raise Exception("Need two inputs to compute distance.")

        if (bottom[0].width%2) != 1 or (bottom[1].width%2) != 1 :
            raise Exception("Odd patch size preferred")

    def reshape(self, bottom, top):
        # check input dimensions match
        if bottom[0].count != bottom[1].count:
            raise Exception("Inputs must have the same dimension.")
        # loss output is scalar
        top[0].reshape(1)

		# initialize the size to 5D
        num_scale = len(self.sigma)
        self.width = bottom[0].width
        self.channels = bottom[0].channels
        self.batch = bottom[0].num

        self.w = np.empty((num_scale, self.batch, self.channels, self.width, self.width))
        self.mux = np.empty((num_scale, self.batch, self.channels, 1, 1))
        self.muy = np.empty((num_scale, self.batch, self.channels, 1, 1))
        self.sigmax2 = np.empty((num_scale, self.batch, self.channels, 1, 1))
        self.sigmay2 = np.empty((num_scale, self.batch, self.channels, 1, 1))
        self.sigmaxy = np.empty((num_scale, self.batch, self.channels, 1, 1))
        self.l = np.empty((num_scale, self.batch, self.channels, 1, 1))
        self.cs = np.empty((num_scale, self.batch, self.channels, 1, 1))

		# initialize the gaussian filters based on the bottom size
        for i in range(num_scale):
            gaussian = np.exp(-1.*np.arange(-(self.width/2), self.width/2+1)**2/(2*self.sigma[i]**2))
            gaussian = np.outer(gaussian, gaussian.reshape((self.width, 1)))	# extend to 2D
            gaussian = gaussian/np.sum(gaussian)								# normailization
            gaussian = np.reshape(gaussian, (1, 1, self.width, self.width)) 	# reshape to 4D
            gaussian = np.tile(gaussian, (self.batch, self.channels, 1, 1))
            self.w[i,:,:,:,:] = gaussian

    def forward(self, bottom, top):

		# tile the bottom blob to 5D
		self.bottom0data = np.tile(bottom[0].data, (len(self.sigma), 1, 1, 1, 1))
		self.bottom1data = np.tile(bottom[1].data, (len(self.sigma), 1, 1, 1, 1))

		self.mux = np.sum(self.w * self.bottom0data, axis=(3, 4), keepdims=True)
		self.muy = np.sum(self.w * self.bottom1data, axis=(3, 4), keepdims=True)
		self.sigmax2 = np.sum(self.w * self.bottom0data ** 2, axis=(3, 4), keepdims=True) - self.mux **2
		self.sigmay2 = np.sum(self.w * self.bottom1data ** 2, axis=(3, 4), keepdims=True) - self.muy **2
		self.sigmaxy = np.sum(self.w * self.bottom0data * self.bottom1data, axis=(3, 4), keepdims=True) - self.mux * self.muy
		self.l = (2 * self.mux * self.muy + self.C1)/(self.mux ** 2 + self.muy **2 + self.C1)
		self.cs = (2 * self.sigmaxy + self.C2)/(self.sigmax2 + self.sigmay2 + self.C2)
		self.Pcs = np.prod(self.cs, axis=0)

		loss_MSSSIM = 1 - np.sum(self.l[-1, :, :, :, :] * self.Pcs)/(self.batch * self.channels)
		self.diff = bottom[0].data - bottom[1].data
		loss_L1 = np.sum(np.abs(self.diff) * self.w[-1, :, :, :, :]) / (self.batch * self.channels)  # L1 loss weighted by Gaussian

		top[0].data[...] = self.alpha * loss_MSSSIM + (1-self.alpha) * loss_L1

    def backward(self, top, propagate_down, bottom):
        self.dl = 2 * self.w * (self.muy - self.mux * self.l) / (self.mux**2 + self.muy**2 + self.C1)
        self.dcs = 2 / (self.sigmax2 + self.sigmay2 + self.C2) * self.w * ((self.bottom1data - self.muy) - self.cs * (self.bottom0data - self.mux))

        dMSSSIM = self.dl[-1, :, :, :, :]
        for i in range(len(self.sigma)):
            dMSSSIM += self.dcs[i, :, :, :, :] / self.cs[i, :, :, :, :] * self.l[-1, :, :, :, :]
        dMSSSIM *= self.Pcs

        diff_L1 = np.sign(self.diff) * self.w[-1, :, :, :, :] / (self.batch * self.channels)		# L1 gradient weighted by Gaussian
        diff_MSSSIM = -dMSSSIM/(self.batch * self.channels)

        bottom[0].diff[...] = self.alpha * diff_MSSSIM + (1-self.alpha) * diff_L1
        bottom[1].diff[...] = 0

Could you help me to write this in pyotrch. Thank you.

I think you should be able to directly replace the np.* operations with torch.* directly. At least I cannot spot an unsupported operation by quickly skimming through the code.

@ptrblck So I can directly use this function by replacing np. * operations with torch.* right?

Ye,s just try it for each operation and make sure the results are equal.