Peephole LSTM cell implementation

Hi,
just want to confirm, is this implementation of peephole LSTM cell are correct?

class JitLSTMCell(jit.ScriptModule):
    
    def __init__(self, input_size, hidden_size):
        super(JitLSTMCell, self).__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size

        self.weight_ih = nn.Parameter(torch.Tensor(4 * hidden_size, input_size))
        self.weight_hh = nn.Parameter(torch.Tensor(4 * hidden_size, hidden_size))
        
        self.bias_ih = nn.Parameter(torch.Tensor(4 * hidden_size))
        self.bias_hh = nn.Parameter(torch.Tensor(4 * hidden_size))

        self.weight_ch_i = nn.Parameter(torch.Tensor(hidden_size))
        self.weight_ch_f = nn.Parameter(torch.Tensor(hidden_size))        
        self.weight_ch_o = nn.Parameter(torch.Tensor(hidden_size))

        self.reset_parameter()        

    @jit.ignore(drop=True)
    def reset_parameter(self):
        stdv = 1.0 / math.sqrt(self.hidden_size)
        for weight in self.parameters():
            nn.init.uniform_(weight, -stdv, stdv)

    @jit.script_method
    def forward(self, input, state):
        # type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
        hx, cx = state
        xh = (torch.mm(input, self.weight_ih.t()) + self.bias_ih + torch.mm(hx, self.weight_hh.t()) + self.bias_hh)

        i, f, _c, o = xh.chunk(4, 1)

        i = torch.sigmoid(i + (self.weight_ch_i * cx))
        f = torch.sigmoid(f + (self.weight_ch_f * cx))
        _c = torch.tanh(_c)

        cy = (f * cx) + (i * _c)

        o = torch.sigmoid(o + (self.weight_ch_o * cy))
        hy = o * torch.tanh(cy)

        return hy, (hy, cy)

Thanks