Input.size(-1) must be equal to input_size. Expected 128, got 1

Before posting a query, check the FAQs - it might already be answered!

Hi everyone,

I am trying to build a bidirectional LSTM encoder decoder to predict RUL for CMAPPS dataset, while executing the code i am getting this error. and i cant figure it out. I have tried batch_first=True but it doesnt work. can anyone help? Here is my code:

-- coding: utf-8 --

import torch
import torch.nn as nn

device = torch.device(‘cuda’ if torch.cuda.is_available() else ‘cpu’)

class Encoder_LSTM(nn.Module):
“”"
Class for implementing the Bidirectional LSTM Encoder Structure.

Parameters
----------
input_dim : int
    No. of features in Input Data    
enc_dim : int
    Hidden Dimension Size for Encoder Cell.
dec_dim : int
    Hidden Dimension Size for Decoder Cell.
ablation : str
    Specifies the type of LSTM Encoder-Decoder architecture.
bidirectional : bool, optional
    If True, implements a Bidirectional LSTM Structure. Else, a Unidirectional LSTM.
    Default : True
    
Returns
-------
output : torch.Tensor
    Encoder Hidden Representations.
hidden : torch.Tensor
    Hidden State of Final Encoder LSTM Cell.
cell : torch.Tensor
    Cell State of Final Encoder LSTM Cell.
    
"""    
def __init__(self, input_dim, enc_dim,dec_dim, num_layers=2, ablation=None, bidirectional=True):     
    super(Encoder_LSTM, self).__init__()
    self.input_dim = input_dim
    self.enc_dim= enc_dim
    self.dec_dim = dec_dim
    self.num_layers = num_layers

    # define LSTM layer
    self.lstm = nn.LSTM(input_size = input_dim, hidden_size = enc_dim, num_layers=num_layers, batch_first=True)          
    self.fc_hidden = nn.Linear(enc_dim*2, dec_dim)
    self.fc_cell = nn.Linear(enc_dim*2, dec_dim)
def forward(self, src):
    output, (hidden,cell) = self.lstm(src)
    hidden = self.fc_hidden(torch.cat((hidden[0:1],hidden[1:2]),dim=2))
    cell = self.fc_cell(torch.cat((cell[0:1],cell[1:2]),dim=2))
    return output, (hidden, cell)        

class Decoder_LSTM(nn.Module):
“”"
Class for implementing a Unidirectional LSTM Decoder Cell.

Parameters
----------
output_dim : int
    No. of features in Output Data from instantaneous Decoder Cell.\n   
    Default : 1 for RUL.
enc_dim : int
    Hidden Dimension Size for Encoder Cell.
dec_dim : int
    Hidden Dimension Size for Decoder Cell.
    
Returns
-------
final_prediction : torch.Tensor
    Output from LSTM Decoder Cell.
hidden : torch.Tensor
    Hidden State of instantaneous Decoder LSTM Cell.
cell : torch.Tensor
    Cell State of instantaneous Decoder LSTM Cell.
    
"""    
def __init__(self, output_dim, enc_dim, dec_dim):
    super(Decoder_LSTM, self).__init__()
    self.output_dim= output_dim
    self.dec_dim = dec_dim
    self.lstm = nn.LSTM(input_size= enc_dim*2, hidden_size = dec_dim, batch_first=True)
    self.relu = nn.ReLU()
    self.linear = nn.Linear(dec_dim, output_dim) 

def forward(self, dec_input, hidden,cell): 

    output, (hidden,cell) = self.lstm(dec_input, (hidden,cell))
    out = self.relu(output)
    final_prediction = self.linear(out)
                                
    return final_prediction, hidden, cell

2021-11-14 19:41:27,826] Trial 0 failed because of the following error: RuntimeError(‘input.size(-1) must be equal to input_size. Expected 128, got 1’)
Traceback (most recent call last):
File “/opt/miniconda3/lib/python3.9/site-packages/optuna/study/_optimize.py”, line 213, in _run_trial
value_or_values = func(trial)
File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/main_trial.py”, line 108, in objective
pred_l, actual_l, trn_loss = train_test.training_loop(training_data, model, device,
File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/train_test.py”, line 65, in training_loop
y_pred = model(s, label, use_teacher_forcing)
File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1102, in _call_impl
return forward_call(*input, **kwargs)
File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/models.py”, line 135, in forward
output = ablation(src, trg, use_teacher_forcing)
File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/models.py”, line 97, in ablation_1
out, hidden, cell = self.decoder_lstm(decoder_input, hidden, cell)
File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1102, in _call_impl
return forward_call(*input, **kwargs)
File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/modules/encoder_decoder.py”, line 91, in forward
output, (hidden,cell) = self.lstm(dec_input, (hidden,cell))
File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1102, in _call_impl
return forward_call(*input, **kwargs)
File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/rnn.py”, line 689, in forward
File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/rnn.py”, line 632, in check_forward_args
):
File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/rnn.py”, line 205, in check_input
if self.input_size != input.size(-1):
RuntimeError: input.size(-1) must be equal to input_size. Expected 128, got 1
Traceback (most recent call last):

File “/var/folders/r5/qgh43p4j4dqgtxt1w69n502r0000gn/T/ipykernel_17568/1817966163.py”, line 1, in
runfile(’/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/main_trial.py’, wdir=’/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder’)

File “/opt/miniconda3/lib/python3.9/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/pydev_umd.py”, line 167, in runfile
execfile(filename, namespace)

File “/opt/miniconda3/lib/python3.9/site-packages/debugpy/_vendored/pydevd/_pydev_imps/_pydev_execfile.py”, line 25, in execfile
exec(compile(contents + “\n”, file, ‘exec’), glob, loc)

File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/main_trial.py”, line 140, in
study.optimize(objective, n_trials=200)

File “/opt/miniconda3/lib/python3.9/site-packages/optuna/study/study.py”, line 400, in optimize
_optimize(

File “/opt/miniconda3/lib/python3.9/site-packages/optuna/study/_optimize.py”, line 66, in _optimize
_optimize_sequential(

File “/opt/miniconda3/lib/python3.9/site-packages/optuna/study/_optimize.py”, line 163, in _optimize_sequential
trial = _run_trial(study, func, catch)

File “/opt/miniconda3/lib/python3.9/site-packages/optuna/study/_optimize.py”, line 264, in _run_trial
raise func_err

File “/opt/miniconda3/lib/python3.9/site-packages/optuna/study/_optimize.py”, line 213, in _run_trial
value_or_values = func(trial)

File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/main_trial.py”, line 108, in objective
pred_l, actual_l, trn_loss = train_test.training_loop(training_data, model, device,

File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/train_test.py”, line 65, in training_loop
y_pred = model(s, label, use_teacher_forcing)

File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1102, in _call_impl
return forward_call(*input, **kwargs)

File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/models.py”, line 135, in forward
output = ablation(src, trg, use_teacher_forcing)

File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/models.py”, line 97, in ablation_1
out, hidden, cell = self.decoder_lstm(decoder_input, hidden, cell)

File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1102, in _call_impl
return forward_call(*input, **kwargs)

File “/Users/hussamaleem/Documents/SEEM Assignment/Shared Temporal Attention augmented LSTM Encoder Decoder/modules/encoder_decoder.py”, line 91, in forward
output, (hidden,cell) = self.lstm(dec_input, (hidden,cell))

File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1102, in _call_impl
return forward_call(*input, **kwargs)

File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/rnn.py”, line 689, in forward

File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/rnn.py”, line 632, in check_forward_args
):

File “/opt/miniconda3/lib/python3.9/site-packages/torch/nn/modules/rnn.py”, line 205, in check_input
if self.input_size != input.size(-1):

RuntimeError: input.size(-1) must be equal to input_size. Expected 128, got 1

This is the complete error.