Same loss value after certain no. of epochs at early stage of training

I am training UNet with (4,40,40,40) size of input data. I performed some preprocessing and then fed the data into the network that uses Adam optimimzer and weighted CE Loss. However, around 9th epoch (total 90) the loss value is always 23.01 with train and validation F1-accurcy around 25%. I have tried many thing and checked every code multiple times but coldnt find the problem.

Code for layers of UNet:

class Conv(nn.Module):

    def __init__(self, in_channels, out_channels, mid_channels=None):
        super().__init__()
        if not mid_channels:
            mid_channels = out_channels
        self.single_conv = nn.Sequential(
            nn.Conv3d(in_channels, mid_channels, kernel_size=5, padding=2, bias=False),
            nn.BatchNorm3d(mid_channels),
            nn.ReLU(inplace = True), #put inplace = True when using RELU    
            nn.Conv3d(mid_channels, out_channels, kernel_size=5, padding=2, bias=False),
            nn.BatchNorm3d(out_channels),
            nn.ReLU(inplace = True)
        )       #Same kernel , stride and padding throughout

    def forward(self, x):
        return self.single_conv(x)

class Down(nn.Module):

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.maxpool_conv = nn.Sequential(
            nn.MaxPool3d(2),
            nn.Dropout(p=0.2),
            Conv(in_channels, out_channels)
        )

    def forward(self, x):
        return self.maxpool_conv(x)


class Up(nn.Module):

    def __init__(self, in_channels, out_channels, trilinear=True):
        super().__init__()

        # if trilinear, use the normal convolutions to reduce the number of channels
        if trilinear:
            self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
            self.conv = Conv(in_channels, out_channels, in_channels // 2)
        else:
            self.up = nn.ConvTranspose3d(in_channels, in_channels // 2,  kernel_size=2, stride=2)
            self.conv = Conv(in_channels, out_channels)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        diffZ = x2.size()[2] - x1.size()[2]
        diffY = x2.size()[3] - x1.size()[3]
        diffX = x2.size()[4] - x1.size()[4]
        
        x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                        diffY // 2, diffY - diffY // 2,
                        diffZ // 2, diffZ - diffZ // 2])
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)


class OutConv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)

    def forward(self, x):
        return self.conv(x)

Code for model of UNet:

from UNet_Layers import *

class UNet_multichannels(nn.Module):
    
    def __init__(self, n_channels, n_classes, feature, trilinear=True):
        super(UNet_multichannels, self).__init__()
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.trilinear = trilinear
        self.feature = feature

        self.inp = Conv(self.n_channels,self.feature)
        self.down1 = Down(self.feature, 2*self.feature)
        factor = 2 if self.trilinear else 1
        self.down2 = Down(2*self.feature,4*self.feature // factor)
        self.up1 = Up(4*self.feature, 2*self.feature // factor, self.trilinear)
        self.up2 = Up(2*self.feature, self.feature, self.trilinear)
        self.out = OutConv(self.feature, self.n_classes) 

    def forward(self, x):
        x1 = self.inp(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x = self.up1(x3, x2)
        x = self.up2(x, x1)
        logits = self.out(x)
        return logits