LSTM RuntimeError: input must have 3 dimensions, got 4

I am trying to run simple RNN on my dataset. Which has dimensions trainX = (480, 3), trainY = (480,1). In order to pass the input to the model I converted 2D to 3D which changed (480,3) to (1, 480, 3).

I am getting the RuntimeError: input must have 3 dimensions, got 4. But i am already passing 3d.

Following is the snippet of my code:

class Model(torch.nn.Module):

    def __init__(self, input_size, rnn_hidden_size, output_size):

        super(Model, self).__init__()
        self.rnn = torch.nn.RNN(input_size, rnn_hidden_size,
                                num_layers=2, nonlinearity='relu',
                                batch_first=True)
        self.h_0 = self.initialize_hidden(rnn_hidden_size)

        self.linear = torch.nn.Linear(rnn_hidden_size, output_size)

    def forward(self, x):

        x = x.unsqueeze(0)
        self.rnn.flatten_parameters()
        out, self.h_0 = self.rnn(x, self.h_0)

        out = self.linear(out)

        # third_output = self.relu(self.linear3(second_output))
        # fourth_output = self.relu(self.linear4(third_output))
        # output = self.rnn(lineared_output)
        # output = self.dropout(output)
        return out

    def initialize_hidden(self, rnn_hidden_size):
        # n_layers * n_directions, batch_size, rnn_hidden_size
        return Variable(torch.randn(2, 1, rnn_hidden_size),
                        requires_grad=True)

def Train(X,  Y):
    input_size = 3
    hidden_size = 32
    output_size = 1
    
    model = Model(input_size, hidden_size, output_size)
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
   
    trainX = torch.from_numpy(X).float() 
    trainY = torch.from_numpy(Y).float()
    
    trainX = trainX[:,np.newaxis]   # shape (samples, time_step, features)
    trainY = trainY[:,np.newaxis]
    
    for ep in range(5000): 

                model.train()
                optimizer.zero_grad()
                output = model(trainX)

                loss = criterion(output, trainY) 

                loss.backward()  
                optimizer.step()
                lossTrain = loss.data[0]
                

   

1 Like