Change the latent space of autoencoder while training

I am training one autoencoder for two classes : real and fake. I have a latent space of 256 dimensions. I just wish to activate first 128 dimensions for the real class and last 128 for fake ones.
My forward function looks something like below. How can this be done.

    def forward(self, x):
        """
        pass input through the encoder and reconstruct with decoder
        :param x: original input
        :return: x_recon : reconstructed input, z : latent representation
        """
        unpool_info = []

        for m in self.encoder:
            if isinstance(m, nn.MaxPool2d):
                output_size = x.size()
                x, pool_idx = m(x)
                unpool_info.append({'output_size': output_size,
                                    'indices': pool_idx})
            else:
                x = m(x)
        z = x
        for m in self.decoder:
            if isinstance(m, nn.MaxUnpool2d):
                x = m(x, **unpool_info.pop())
            else:
                x = m(x)
        x_recon = x
        return z, x_recon

The solution

def forward(self, x, y):
        """
        pass input through the encoder and reconstruct with decoder
        :param x: original input
        :param x: original target
        :return: x_recon : reconstructed input, z : latent representation
        """
        unpool_info = []
        real = torch.zeros(256).cuda()
        real[:128] = 0
        fake = torch.zeros(256).cuda()
        fake[128:] = 0
        mult = [real, fake]

        for m in self.encoder:
            if isinstance(m, nn.MaxPool2d):
                output_size = x.size()
                x, pool_idx = m(x)
                unpool_info.append({'output_size': output_size,
                                    'indices': pool_idx})
            else:
                x = m(x)
        z = x
        for i, y_i in enumerate(y):
            z[i] = z[i] * mult[y_i]

        for m in self.decoder:
            if isinstance(m, nn.MaxUnpool2d):
                x = m(x, **unpool_info.pop())
            else:
                x = m(x)
        x_recon = x
        return z, x_recon