Pytorch 3D CNN Model only outputs positive values

I am using 3D Encoder-Decoder model. My input to the model is a 3D grid with added noise. The values contained in the grid are both positive and negative. I want to denoise the model. I don’t know why but my model outputs only positive value. I am using the L1 loss function. My trainining as well as validation loss is also decreasing with the epochs. But when I test the model, it only outputs the postive values. I am using pytorch automatic mixed precision for the training. Can someone help me on this. I have already spend too much time on this. But I can not fix it. If training script is needed I can also provide that.

The pytorch model is given below:

import torch
import torch.nn as nn


class Model_ED(nn.Module):

    def __init__(self, bn=True):
        super(Model_ED, self).__init__()

        #### ENCODER ####
        self.en_1 = nn.Sequential(
            nn.Conv3d(1, 16, kernel_size=3, stride=1, padding=1),
            #nn.Tanh(),
            nn.ReLU(),
            nn.Conv3d(16, 32, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
        )

        self.en_2 = nn.Sequential(
            nn.Conv3d(32, 32, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
            nn.Conv3d(32, 64, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
        )

        self.en_3 = nn.Sequential(
            nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
            nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
            nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
        )

        #### DECODER ####
        self.de_3 = nn.Sequential(
            nn.Conv3d(128, 32, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
            nn.Conv3d(32, 32, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
        )

        self.de_2 = nn.Sequential(
            nn.Conv3d(64, 16, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
            nn.Conv3d(16, 16, kernel_size=3, stride=1, padding=1),
            # nn.Tanh(),
            nn.ReLU(),
        )

        self.de_1 = nn.Sequential(
            nn.Conv3d(16, 1, kernel_size=3, stride=1, padding=1),
        )

        self.mp = nn.MaxPool3d(kernel_size=2, stride=2)
        self.upsample = nn.Upsample(scale_factor=2)

    def forward(self, input_tsdf_vol):

        # feature encoding
        en_1 = self.en_1(input_tsdf_vol)
        en_2 = self.mp(self.en_2(en_1))
        en_3 = self.mp(self.en_3(en_2))

        # feature decoding
        up_3 = self.upsample(en_3)
        f_cat_3 = torch.cat([up_3, en_2], dim=1)
        de_3 = self.de_3(f_cat_3)

        up_2 = self.upsample(de_3)
        f_cat_2 = torch.cat([up_2, en_1], dim=1)
        de_2 = self.de_2(f_cat_2)

        de_1 = self.de_1(de_2)

        return de_1

The model itself returns positive and negative outputs for random inputs using:

model = Model_ED()
x = torch.randn(1, 1, 100, 100, 100)
out = model(x)
print(out)

Are you using any activation function afterwards or changing the output in any other way?