Mix Conv 2D with LSTM

I have SCADA data (temporal data) for four vaiables and I want to o a forecasting. So I decided to combine a 2D conv layers to extract data features and then with these features use a LSTM to find a temporal information and make a prediction.

For the convolutional data I am creating a 12X12X4 matrix (because in my problem 144 samples are one day and I want to predict the nex sample). The number of channels is four because I have four variables.

After the Conv2D I am using a LSTM because I want to extract temporal characteristis. I have an error but I dont know why. Can u help me? I share the network model and the train fuctions

define the NN architecture

class HybridNetwork(nn.Module):
def init(self, n_hidden=128, n_layers=2, drop_prob=0.5, lr=0.001):
super(HybridNetwork, self).init()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr

    # convolutional layer (sees 12x12x4 image tensor)
    self.conv1 = nn.Conv2d(4, 32, 3, padding=1)  
    # max pooling layer
    self.pool = nn.MaxPool2d(2, 2)
    # linear layer (32 * 6 * 6 -> 64)
    self.fc = nn.Linear(32 * 6 * 6, 64)
    ## TODO: define the LSTM
    self.lstm = nn.LSTM(64, n_hidden, n_layers, dropout=drop_prob, batch_first=True)
    ## TODO: define a dropout layer
    self.dropout = nn.Dropout(drop_prob)
    self.fc2 = nn.Linear(n_hidden, 1)
         
    
   
def forward(self, x, hidden):
    #Ingresa imagen 4X12X12
    x = self.pool(F.relu(self.conv1(x)))#6X6
    #Se tiene imagen 64X6X6
    x = x.view(-1, 32 * 6 * 6)
    x = self.fc(x)
    r_output, hidden = self.lstm(x, hidden)
    out = self.dropout(r_output)
    out = out.contiguous().view(-1, self.n_hidden)
    out = self.fc(out)
    return out, hidden

def init_hidden(self, batch_size):
    ''' Initializes hidden state '''
    # Create two new tensors with sizes n_layers x batch_size x n_hidden,
    # initialized to zero, for hidden state and cell state of LSTM
    weight = next(self.parameters()).data
    
    if (train_on_gpu):
        hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
              weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
    else:
        hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
                  weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
    
    return hidden 

initialize the NN

model = HybridNetwork()
print(model)

move tensors to GPU if CUDA is available

if train_on_gpu:
model.cuda()

And the train fuction is:

n_epochs = 2000

valid_loss_min = np.Inf # track change in validation loss
train_losses, test_losses, accuracy_losses = [], [], []

for epoch in range(1, n_epochs+1):

# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0


h = model.init_hidden(batch_size)
val_h = model.init_hidden(batch_size)

###################
# train the model #
###################
model.train()
for data, target in train_loader:
    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        images, target = data.cuda(), target.cuda()
    
    # Creating new variables for the hidden state, otherwise
        # we'd backprop through the entire training history
        h = tuple([each.data for each in h])            
        
    # clear the gradients of all optimized variables
    optimizer.zero_grad()
    # forward pass: compute predicted outputs by passing inputs to the model
    output, h = model(images, h)
    # calculate the batch loss
    loss = criterion(output, target)
    # backward pass: compute gradient of the loss with respect to model parameters
    loss.backward()
    # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
    nn.utils.clip_grad_norm_(model.parameters(), clip)
    # perform a single optimization step (parameter update)
    optimizer.step()
    # update training loss
    train_loss += loss.item()*images.size(0)
    
######################    
# validate the model #
######################

model.eval()
for data, target in valid_loader:
    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        images, target = data.cuda(), target.cuda()
        
    # Creating new variables for the hidden state, otherwise
                # we'd backprop through the entire training history
    val_h = tuple([each.data for each in val_h])
    # forward pass: compute predicted outputs by passing inputs to the model
    output, val_h = model(images, val_h)
    # calculate the batch loss
    loss = criterion(output)
    # update average validation loss 
    valid_loss += loss.item()*data.size(0)

# calculate average losses
train_loss = train_loss/len(train_loader.sampler)
valid_loss = valid_loss/len(valid_loader.sampler)   
train_losses.append(train_loss)
test_losses.append(valid_loss)
# print training/validation statistics 
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
    epoch, train_loss, valid_loss))

# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
    print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
    valid_loss_min,
    valid_loss))
    torch.save(model.state_dict(), 'data_b8_lr000.pt')
    valid_loss_min = valid_loss

And I have the following error:

RuntimeError: input must have 3 dimensions, got 2