I am getting 0 loss and accuracy in thousands

here is my architecture:
I am trying to learn transfer learning by using mobilenet pretrained model as encoder and custom decoder for segmentation problem, I have ISIC 2016 dataset.

the problem am facing is that my loss is always 0 and the accuracy came in thousands.

class SegmentationModel(torch.nn.Module):
def init(self, dropout_prob=0.5):
super(SegmentationModel, self).init()
self.encoder = mobilenet_v2(weights=‘DEFAULT’, progress=True)
self.encoder_features = list(self.encoder.features.children())
self.encoder_features = self.encoder_features[:-1]

    for params in self.encoder.parameters():
        params.requires_grad = False
    
    self.upconv1 = torch.nn.ConvTranspose2d(320, 160, kernel_size=2, stride=2)
    self.conv1 = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)
    self.bn1 = torch.nn.BatchNorm2d(256)  
    self.dropout1 = torch.nn.Dropout2d(dropout_prob)
    self.upconv2 = torch.nn.ConvTranspose2d(256, 96, kernel_size=2, stride=2)
    self.conv2 = torch.nn.Conv2d(128, 64, kernel_size=3, padding=1)
    self.bn2 = torch.nn.BatchNorm2d(64)  
    self.dropout2 = torch.nn.Dropout2d(dropout_prob)
    self.upconv3 = torch.nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2)
    self.conv3 = torch.nn.Conv2d(56, 28, kernel_size=3, padding=1)
    self.bn3 = torch.nn.BatchNorm2d(28)  
    self.dropout3 = torch.nn.Dropout2d(dropout_prob)
    self.upconv4 = torch.nn.ConvTranspose2d(28, 16, kernel_size=2, stride=2)
    self.conv4 = torch.nn.Conv2d(32, 8, kernel_size=3, padding=1)
    self.bn4 = torch.nn.BatchNorm2d(8)  
    self.dropout4 = torch.nn.Dropout2d(dropout_prob)
    self.upconv5 = torch.nn.ConvTranspose2d(8, 1, kernel_size=2, stride=2)
    
    self._init_weights()
  
def _init_weights(self):
    for m in self.modules():
        if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.ConvTranspose2d):
            torch.nn.init.kaiming_normal_(m.weight)
            if m.bias is not None:
                torch.nn.init.constant_(m.bias, 0)

def forward(self, x):
    outputs = []
    for layer in self.encoder_features:
        x = layer(x)
        outputs.append(x)
    
    x = self.upconv1(x)
    x = torch.cat([x, outputs[-5]], dim=1)
    x = torch.nn.functional.relu(self.conv1(x))
    x = self.bn1(x)  
    x = self.dropout1(x)
    
    x = self.upconv2(x)
    x = torch.cat([x, outputs[-12]], dim=1)
    x = torch.nn.functional.relu(self.conv2(x))
    x = self.bn2(x)  
    x = self.dropout2(x)
    
    x = self.upconv3(x)
    x = torch.cat([x, outputs[-15]], dim=1)
    x = torch.nn.functional.relu(self.conv3(x))
    x = self.bn3(x)  
    x = self.dropout3(x)
    
    x = self.upconv4(x)
    x = torch.cat([x, outputs[-17]], dim=1)
    x = torch.nn.functional.relu(self.conv4(x))
    x = self.bn4(x)  
    x = self.dropout4(x)
    
    x = self.upconv5(x)
    
    return x

is there any problem in this??

Thank you.