ValueError: array with sequence when manipulating inputs inside Neural Network

I got a DCGAN and I want to modify it so the last TransConv2D layer output can be passed into a LSTM layer.
To test this, I got an array of images data with shape (2092, 64, 64, 3) which is also gonna be the input for the Neural Network . I’ve extracted the RGB arrays to pass each color array into the LSTM layer.
Here’s the TEST code, used outside the Neural Network:

Red = data[:,:,:,0]
outputR = []

for i in range(8, 64):
  outputR.append(Red[i-8:i, 0])

outputR = np.array(outputR)

Everything goes fine and I get outputR with shape (56,8,64)
However, when I try to apply this code to my Neural Network:

R = RGB[:,0,:,:] # [n_samples, RGB, height, width]
G = RGB[:,1,:,:]
B = RGB[:,2,:,:]

R = R.detach().cpu().numpy()
G = G.detach().cpu().numpy()
B = B.detach().cpu().numpy()
        
outputR = []
outputG = []
outputB = []

for i in range(8, 64):
  outputR.append(R[i-8:i,0])
  outputG.append(G[i-8:i,0])
  outputB.append(B[i-8:i,0])

outputR = np.array(outputR, dtype='float32')
outputG = np.array(outputG, dtype='float32')
outputB = np.array(outputB, dtype='float32')

I get the following error: ValueError: setting an array element with a sequence.

Why this difference? In both cases, I’m operating with sequences, arrays with a batch_size, yet, when I’m testing the operation outside a neural network, everything goes smoothly.

Here’s the complete code for the neural network Generator:

class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()
        # input is Z(batch_size, 100, 1,1), going into a convolution
        self.transconv1 = nn.ConvTranspose2d( 100, 64 * 8, 4, 1, 0, bias=False)
        self.batchnorm1 = nn.BatchNorm2d(64 * 8)
        self.ReLU = nn.ReLU(True)
        # state size. (ngf*8) x 4 x 4
        self.transconv2 = nn.ConvTranspose2d(64 * 8, 64 * 4, 4, 2, 1, bias=False)
        self.batchnorm2 = nn.BatchNorm2d(64 * 4)
        # state size. (ngf*4) x 8 x 8
        self.transconv3 = nn.ConvTranspose2d( 64 * 4, 64 * 2, 4, 2, 1, bias=False)
        self.batchnorm3 = nn.BatchNorm2d(64 * 2)
        # state size. (ngf*2) x 16 x 16
        self.transconv4 = nn.ConvTranspose2d(64 * 2, 64, 4, 2, 1, bias=False)
        self.batchnorm4 = nn.BatchNorm2d(64)
        # state size. (ngf) x 32 x 32
        self.transconv5 = nn.ConvTranspose2d( 64, 3, 4, 2, 1, bias=False)
        self.lstm5 = nn.LSTM(input_size=64,hidden_size=64,num_layers=10, bias=False, batch_first=True)
        # state size. (nc) x 64 x 64

    def forward(self, input):
        x = self.transconv1(input)
        x = self.batchnorm1(x)
        x = self.ReLU(x)
        x = self.transconv2(x)
        x = self.batchnorm2(x)
        x = self.ReLU(x)
        x = self.transconv3(x)
        x = self.batchnorm3(x)
        x = self.transconv4(x)
        x = self.batchnorm4(x)
        x = self.transconv5(x)
        R, G, B = self._preprocess(x)
        outR = self.lstm5(R)
        outG = self.lstm5(G)
        outB = self.lstm5(B)
        output = np.stack((outR, outG, outB), axis=1)

        return output

    def _preprocess(self, RGB):
        R = RGB[:,0,:,:]
        G = RGB[:,1,:,:]
        B = RGB[:,2,:,:]

        R = R.detach().cpu().numpy()
        G = G.detach().cpu().numpy()
        B = B.detach().cpu().numpy()
        print(R.shape)
        
        outputR = []
        outputG = []
        outputB = []

        for i in range(8, 64):
            outputR.append(R[i-8:i,0])
            outputG.append(G[i-8:i,0])
            outputB.append(B[i-8:i,0])

        outputR = np.array(outputR, dtype='float32')
        outputG = np.array(outputG, dtype='float32')
        outputB = np.array(outputB, dtype='float32')

        #(n_samples, timesteps, features)
        return torch.from_numpy(outputR), torch.from_numpy(outputG), torch.from_numpy(outputB)