Weird results when using my custom layer

Hi everyone!

I’m new in Pytorch and i’m trying to reproduce the results presented in Estimation of distortion sensitivity for visual quality prediction using a convolutional neural network. Basically, it consists of a CNN that estimates the shifting parameter of a 4-parameter logistic function in order to map the PSNR, between the reference and distorted images, to the perceptual quality of the distorted image. The loss function to be minimized is the MAE the perceptual quality and its estimation.

I implemented a custom layer to map the PSNR to perceptual quality, as can be seen in the following code:

import torch
import torch.nn as nn
from torch.nn import functional as F

class QEstimator(nn.Module):
    def __init__(self):
        super(QEstimator, self).__init__()
    
    def paPSNR(self, Pr, Pd, s):
        if self.training == True:
            return (10**(s/10) * (Pr - Pd)**2).view(32,-1).mean(dim=1)
        else:
            return (10**(s/10) * (Pr - Pd)**2).mean()

    def forward(self, Pr, Pd, s):
        a, b, c = [100.0, 0.0, 0.21713241]
        return a + (b - a)/(1 + torch.exp(-c*self.paPSNR(Pr, Pd, s)))

class Default(nn.Module):
    def __init__(self):
        super(Default, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 32, 3, 1, 1),
            nn.BatchNorm2d(32),
            nn.LeakyReLU(0.2),
            nn.Conv2d(32, 32, 3, 1, 1),
            nn.BatchNorm2d(32),
            nn.LeakyReLU(0.2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 3, 1, 1),
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 64, 3, 1, 1),
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.2),
            nn.MaxPool2d(2),
            nn.Conv2d(64, 128, 3, 1, 1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
            nn.Conv2d(128, 128, 3, 1, 1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
            nn.MaxPool2d(2),
            nn.Conv2d(128, 256, 3, 1, 1),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2),
            nn.Conv2d(256, 256, 3, 1, 1),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2),
            nn.MaxPool2d(2),
            nn.Conv2d(256, 512, 3, 1, 1),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2),
            nn.Conv2d(512, 512, 3, 1, 1),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2),
            nn.MaxPool2d(2),
        )
        self.regressor = nn.Sequential(
            nn.Linear(512, 512),
            nn.ReLU(),
            nn.Linear(512, 1),
            nn.ReLU(),
        )
        self.QEstimator = QEstimator()
        
    def forward(self, Pr, Pd):
        output = self.features(Pr)
        n, c, _, _ = output.shape
        output = output.view(n, c)
        output = self.regressor(output)
        output = self.QEstimator(Pr, Pd, output)
        return output


if __name__ == "__main__":
    x = torch.rand(32, 1, 32, 32)
    y = torch.rand(32, 1, 32, 32)
    net = Default()

    print(net(x, y))

I’m getting weird results during the training of my model, as can be seen in the following picture:


where training and validation errors are represented by the blue and orange lines, respectively.

Is there some problem in my custom layer?

Thank you all.