ValueError: base_distribution needs to have shape with size at least , but got torch.Size([])

I am trying to sample from a custom design distribution using TransformedDistribution. I would like to sample a distribution with the size of mean tensor which is fed to the distribution:
Here is my attempt to build this distribution

class Split(torch.nn.Module):

    def __init__(self, module, n_parts: int, dim=1):
        super().__init__()
        self._n_parts = n_parts
        self._dim = dim
        self._module = module

    def forward(self, inputs):
        output = self._module(inputs)
        if output.ndim==1:
           result=torch.hsplit(output, self._n_parts )
        else:
           chunk_size = output.shape[self._dim] // self._n_parts
           result =torch.split(output, chunk_size, dim=self._dim)

        return result
class Network(nn.Module):
  def __init__(
      self,
      latent_spec,
      action_spec,
      fc_layer_params=(),
      ):
    super(Network, self).__init__()
    self._action_spec = action_spec
    self._layers = nn.ModuleList()
    for hidden_size in fc_layer_params:
        if len(self._layers)==0:
           self._layers.append(nn.Linear(latent_spec.shape[0], hidden_size))
        else:
           self._layers.append(nn.Linear(hidden_size, hidden_size))
        self._layers.append(nn.ReLU())
    output_layer = nn.Linear(hidden_size,self._action_spec.shape[0] * 2)
    self._layers.append(output_layer)
    
    self._action_means, self._action_mags = get_spec_means_mags(
        self._action_spec)


  @property
  def action_spec(self):
      return self._action_spec

  def _get_outputs(self, state):
      h = state
      
      for l in nn.Sequential(*(list(self._layers.children())[:-1])):
          h = l(h)

      self._mean_logvar_layers = Split(
         self._layers[-1],
         n_parts=2,
      )
      mean, log_std = self._mean_logvar_layers(h)
      print(f"mean {mean.shape} its shape{[mean.shape[-1]]}")
      a_tanh_mode = torch.tanh(mean) * self._action_mags + self._action_means
      log_std = torch.tanh(log_std)
      log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1)
      std = torch.exp(log_std)
      print(f"std {std.shape} its shape{[std.shape[-1]]}, this is maybe problematic {a_tanh_mode}")
      
      
      a_distribution= TransformedDistribution(
                                    Independent(Normal(loc=torch.full_like(mean, 0), scale=torch.full_like(mean, 1)),1),
                                              tT.ComposeTransform([
                                               tT.AffineTransform(loc=self._action_means, scale=self._action_mags, event_dim=mean.shape[-1]),
                                               tT.TanhTransform(cache_size=1),
                                               tT.AffineTransform(loc=mean, scale=std, event_dim=mean.shape[-1])]))
      
      return a_distribution, a_tanh_mode

  def get_log_density(self, state, action):
    a_dist, _ = self._get_outputs(state.to(device=self.device))
    log_density = a_dist.log_prob(action.to(device=self.device))
    return log_density


  def __call__(self, state):
    a_dist, a_tanh_mode = self._get_outputs(state.to(device=self.device))
    a_sample = a_dist.sample()
    print(f"sample action:{a_sample}")
    log_pi_a = a_dist.log_prob(a_sample)
    print(f"log sample action:{log_pi_a}")
    return a_tanh_mode, a_sample, log_pi_a

when I remove event_dim from TransformedDistribution, the og_pi_a has size 1 while I would like that it will have size 6 but when I add the event_dim attribute, I would get this error

mean torch.Size([6]) its shape[6]
std torch.Size([6]) its shape[6], this is maybe problematic tensor([-0.0180,  0.0033, -0.0030, -0.0431, -0.0161,  0.0583],
       grad_fn=<AddBackward0>)
     60         if len(base_shape) < domain_event_dim:
     61             raise ValueError("base_distribution needs to have shape with size at least {}, but got {}."
---> 62                              .format(domain_event_dim, base_shape))
     63         shape = transform.forward_shape(base_shape)
     64         expanded_base_shape = transform.inverse_shape(shape)

ValueError: base_distribution needs to have shape with size at least 6, but got torch.Size([6]).

How can I get a correct size output distribution or how can I get rid of this error?

I will appreciate for any help since I have tried different permutations of inputs and looked for the same error but I couldn’t find any.