How to use SSIM as loss function for training cycle GANS

Hello I am trying to use SSIM as loss function for 3D cycle GANS network.
But I am getting negative SSIM loss values .
Ideally SSIM should be the higher the better, as it is quality measure and hence higher the better. But as loss function we would need to minimize it ,that is 1-SSIM.
Please correct me where I am going wrong.

**epoch: 46, iters: 570, time: 3.734, data: 0.044) D_A: 0.058 G_A: 0.592 cycle_A_SSIM: -8.898 idt_A: 0.060 D_B: 0.067 G_B: 0.353 cycle_B_SSIM: -8.668 idt_B: 0.013
**

Below is the code for SSIM

import torch
import torch.nn.functional as F
from math import exp

def image_dim(img):
    return img.ndimension() - 2

def gaussian(window_size, sigma):
    gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
    return gauss/gauss.sum()


def create_window(window_size, n_dim, channel=1):
    _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
    _2D_window = _1D_window.mm(_1D_window.t()).float()

    _3D_window = torch.stack([ _2D_window * x for x in _1D_window],dim=2).float().unsqueeze(0).unsqueeze(0)
    _2D_window = _2D_window.unsqueeze(0).unsqueeze(0)

    if n_dim == 3:
        return _3D_window.expand(channel, 1, window_size, window_size,window_size).contiguous()
    else:
        return _2D_window.expand(channel, 1, window_size, window_size).contiguous()


def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
    # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
    if val_range is None:
        if torch.max(img1) > 128:
            max_val = 255
        else:
            max_val = 1

        if torch.min(img1) < -0.5:
            min_val = -1
        else:
            min_val = 0
        L = max_val - min_val
    else:
        L = val_range

    padd = 0

    n_dim = image_dim(img1)

    if n_dim == 2:
        (_, channel, height, width) = img1.size()
        convFunction = F.conv2d
    if n_dim == 3:
        convFunction = F.conv3d
        (_, channel, height, width, depth) = img1.size()

    if window is None:
        real_size = min(window_size, height, width)
        window = create_window(real_size, n_dim, channel=channel).to(img1.device)

    mu1 = convFunction(img1, window, padding=padd, groups=channel)
    mu2 = convFunction(img2, window, padding=padd, groups=channel)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1 * mu2

    sigma1_sq = convFunction(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
    sigma2_sq = convFunction(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
    sigma12 = convFunction(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2

    C1 = (0.01 * L) ** 2
    C2 = (0.03 * L) ** 2

    v1 = 2.0 * sigma12 + C2
    v2 = sigma1_sq + sigma2_sq + C2
    cs = torch.mean(v1 / v2)  # contrast sensitivity

    ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)

    if size_average:
        ret = ssim_map.mean()
    else:
        ret = ssim_map.mean(1).mean(1).mean(1)

    if full:
        return ret, cs
    return ret


def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
    device = img1.device
    weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
    levels = weights.size()[0]
    mssim = []
    mcs = []

    n_dim = image_dim(img1)

    pool_size = [2] * n_dim
    if n_dim == 2:
        pool_function = F.avg_pool2d
    if n_dim == 3:
        pool_function = F.avg_pool3d

    for _ in range(levels):
        sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
        mssim.append(sim)
        mcs.append(cs)

        img1 = pool_function(img1, pool_size)
        img2 = pool_function(img2, pool_size)

    mssim = torch.stack(mssim)
    mcs = torch.stack(mcs)

    # Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
    if normalize:
        mssim = (mssim + 1) / 2
        mcs = (mcs + 1) / 2

    pow1 = mcs ** weights
    pow2 = mssim ** weights
    # From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
    output = torch.prod(pow1[:-1] * pow2[-1])
    return output


# Classes to re-use window
class SSIM(torch.nn.Module):
    def __init__(self, window_size=11, size_average=True, val_range=None):
        super(SSIM, self).__init__()
        self.window_size = window_size
        self.size_average = size_average
        self.val_range = val_range

        # Assume 1 channel for SSIM
        self.channel = 1
        self.window = None

    def forward(self, img1, img2):
        (_, channel, _, _) = img1.size()
        #Initialize window on first call
        if self.window is None:
            create_window(self.window_size, image_dim(img1)).to(img1.device).type(img1.dtype)

        if channel == self.channel and self.window.dtype == img1.dtype:
            window = self.window
        else:
            window = create_window(self.window_size, image_dim(img1),channel).to(img1.device).type(img1.dtype)
            self.window = window
            self.channel = channel

        return ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)

class MSSSIM(torch.nn.Module):
    def __init__(self, window_size=11, size_average=True, channel=3):
        super(MSSSIM, self).__init__()
        self.window_size = window_size
        self.size_average = size_average
        self.channel = channel

    def forward(self, img1, img2):
        # TODO: store window between calls if possible
        return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)

The SSIM_Loss is calculated as

 self.criterionCycle_SSIM= pytorch_ssim.ssim

  def backward_G(self):
        """Calculate the loss for generators G_A and G_B"""
        lambda_idt = self.opt.lambda_identity
        lambda_A = self.opt.lambda_A
        lambda_B = self.opt.lambda_B
        # Identity loss
        if lambda_idt > 0:
            # G_A should be identity if real_B is fed: ||G_A(B) - B||
            self.idt_A = self.netG_A(self.real_B)
            self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
            # G_B should be identity if real_A is fed: ||G_B(A) - A||
            self.idt_B = self.netG_B(self.real_A)
            self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
        else:
            self.loss_idt_A = 0
            self.loss_idt_B = 0

        # GAN loss D_A(G_A(A))
        self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
        # GAN loss D_B(G_B(B))
        self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
        # Forward cycle loss || G_B(G_A(A)) - A||
        self.loss_cycle_A = 1- self.criterionCycle_SSIM(self.rec_A, self.real_A) * 100

        # Backward cycle loss || G_A(G_B(B)) - B||
        self.loss_cycle_B =  1- self.criterionCycle_SSIM(self.rec_B, self.real_B) *  100

        # combined loss and calculate gradients
        self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B

        self.loss_G.backward()