Multi layer LSTM thrown an error

class Classifier(nn.Module):
    
    def __init__(self):
        super(Classifier, self).__init__()
        
        self.LSTM1 = nn.LSTM(input_size = 256, hidden_size = 1024)
        self.LSTM2 = nn.LSTM(input_size = 1024, hidden_size = 1024)
        self.LSTM3 = nn.LSTM(input_size = 1024, hidden_size = 1024)
        self.LSTM4 = nn.LSTM(input_size = 1024, hidden_size = 1024)
        self.LSTM5 = nn.LSTM(input_size = 1024, hidden_size = 1024)
        self.LSTM6 = nn.LSTM(input_size = 1024, hidden_size = 256)
        
    def forward(self, x):
        
        result, (hn, cn) = self.LSTM1(x)
        result, (hn, cn) = self.LSTM2(result, (hn, cn))
        result, (hn, cn) = self.LSTM3(result, (hn, cn))
        result, (hn, cn) = self.LSTM4(result, (hn, cn))
        result, (hn, cn) = self.LSTM5(result, (hn, cn))
        result, (hn, cn) = self.LSTM6(result, (hn, cn))
         
        return result

Architecture that I made above thrown an error

RuntimeError: Expected hidden[0] size (1, 256, 256), got (1, 256, 1024)

But, if I remove (hn, cn) in self.LSTM6, it run well. why does hidden cell affect the LSTM layer?