Define a layer to perform custom operations in pytorch

I am trying to convert a tensorflow code to pytorch and I have changed these lines from tensorflow

    def _build(self, weight_path):
        self._hidden_net = nb.build_dense_network(self._context_dim, output_dim=-1, output_activation=None,
                                                  params=self._hidden_dict, with_output_layer=False)

        self._mean_t = k.layers.Dense(self._sample_dim)(self._hidden_net.output)
        self._chol_covar_raw = k.layers.Dense(self._sample_dim ** 2)(self._hidden_net.output)
        self._chol_covar = k.layers.Lambda(self._create_chol)(self._chol_covar_raw)

        self._cond_params_model = k.models.Model(inputs=[self._hidden_net.inputs],
                                                 outputs=[self._mean_t, self._chol_covar])

to these lines in pytorch

class LambdaLayer(nn.Module):
    def __init__(self, lambd):
        super(LambdaLayer, self).__init__()
        self.lambd = lambd
    def forward(self, x):
        return self.lambd(x)


def _build(self):
        self._hidden_net, self.regularizer = build_dense_network(self._context_dim, output_dim=-1, output_activation=None,
                                                  params=self._hidden_dict, with_output_layer=False)        
        self._mean_t = nn.Linear(self._hidden_net._modules[list(self._hidden_net._modules)[-2]].out_features, self._sample_dim)
        self._chol_covar_raw = nn.Linear(self._hidden_net._modules[list(self._hidden_net._modules)[-2]].out_features, self._sample_dim ** 2)
        self._chol_covar =  LambdaLayer( lambda x:self._create_chol(x))(self._chol_covar_raw)
def _create_chol(self, chol_raw):
        samples =torch.triu(torch.reshape(chol_raw, [-1, self._sample_dim, self._sample_dim]).t(), diagonal=0).t()
        return samples.fill_diagonal_(torch.exp(torch.diagonal(samples, dim1=-2, dim2=-1))+ 1e-12)

Running this code, I am getting this error:

    self._model = self._build()
  File "EIM.py", line 337, in _build
    self._chol_covar =  LambdaLayer( lambda x:self._create_chol(x))(self._chol_covar_raw)
  File "/home/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
    return forward_call(*input, **kwargs)
  File "EIM.py", line 306, in forward
    return self.lambd(x)
  File "EIM.py", line 337, in <lambda>
    self._chol_covar =  LambdaLayer( lambda x:self._create_chol(x))(self._chol_covar_raw)
  File "EIM.py", line 359, in _create_chol
    samples =torch.triu(torch.reshape(chol_raw, [-1, self._sample_dim, self._sample_dim]).t(), diagonal=0).t()
TypeError: reshape(): argument 'input' (position 1) must be Tensor, not Linear

I am wondering whether my approach was correct for converting k.layers.Lambda to perform custom operations in a pytorch code? Any suggestion? Thanks.