RuntimeError: input.size(-1) must be equal to input_size

Hi everyone, I

import midi
import os
from midi_to_statematrix import *
import glob
import numpy as np

batch_width = 10 # number of sequences in a batch
batch_len = 16*8 # length of each sequence
division_len = 16 # interval between possible start locations

piece = 0

pieces = loadPieces(“MIDIS_train”)
input = torch.FloatTensor(MusicToTensor(pieces))
lstm_input_size = batch_len
h1 = 128
hidden = torch.zeros(65, n_hidden, 2)
num_train = 2
batch_size = 1
num_epochs = 1000
print(np.shape(input))
print(np.shape(hidden))

def loadPieces(dirpath):

pieces = {}

for fname in os.listdir(dirpath):
    if fname[-4:] not in ('.mid','.MID'):
        continue

    name = fname[:-4]

    tensor = midiToNoteStateMatrix(os.path.join(dirpath, fname))
    if len(tensor) < batch_len:
        continue

    pieces[name] = tensor
    print ("Loaded {}".format(name))

return pieces 

def MusicToTensor(piece):

dirpath = "MIDIS_train"
pieces = {}

for fname in os.listdir(dirpath):
    if fname[-4:] not in ('.mid','.MID'):
        continue

matrix = midiToNoteStateMatrix(os.path.join(dirpath, fname))
tensor = torch.FloatTensor(matrix) 
return tensor

import torch.nn as nn

Here we define our model as a class

class LSTM(nn.Module):

def __init__(self, input_dim, hidden_dim, batch_size, output_dim=1,
                num_layers=2):
    super(LSTM, self).__init__()
    self.input_dim = input_dim
    self.hidden_dim = hidden_dim
    self.batch_size = batch_size
    self.num_layers = num_layers

    # Define the LSTM layer
    self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers)

    # Define the output layer
    self.linear = nn.Linear(self.hidden_dim, output_dim)

def init_hidden(self):
    # This is what we'll initialise our hidden state as
    return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim),
            torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))

def forward(self, input):
    # Forward pass through LSTM layer
    # shape of lstm_out: [input_size, batch_size, hidden_dim]
    # shape of self.hidden: (a, b), where a and b both 
    # have shape (num_layers, batch_size, hidden_dim).
    lstm_out, self.hidden = self.lstm(input.view(len(input), self.batch_size, -1))
    
    # Only take the output from the final timetep
    # Can pass on the entirety of lstm_out to the next layer if it is a seq2seq prediction
    y_pred = self.linear(lstm_out[-1].view(self.batch_size, -1))
    return y_pred.view(-1)

model = LSTM(lstm_input_size, h1, batch_size=num_train, output_dim=output_dim, num_layers=num_layers)

loss_fn = torch.nn.MSELoss(size_average=False)

optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate)

#####################

Train model

#####################

import torch.optim as optim

Device

device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)

Model instance

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

def get_accuracy(logit, target, batch_size):
‘’’ Obtain accuracy for training round ‘’’
corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
accuracy = 100.0 * corrects/batch_size
return accuracy.item()

for epoch in range(num_epochs): # loop over the dataset multiple times
train_running_loss = 0.0
train_acc = 0.0
model.train()

     # zero the parameter gradients
optimizer.zero_grad()
    
    # reset hidden states
model.hidden = model.init_hidden() 
    
    # get the inputs
inputs = input
inputs = inputs.view(batch_size, 65*78*2) 

    # forward + backward + optimize
outputs = model(inputs)

loss = criterion(outputs)
loss.backward()
optimizer.step()

train_running_loss += loss.detach().item()
train_acc += get_accuracy(outputs, labels, batch_size)
     
model.eval()
print('Epoch:  %d | Loss: %.4f | Train Accuracy: %.2f' 
      %(epoch, train_running_loss / i, train_acc/i))

The input is a matrix of 78 columns, 65 rows and a vector of 2 integers for each element of the matrix. I have tried to adjust all the dimensions but I got the error:
“RuntimeError: input.size(-1) must be equal to input_size. Expected 128, got 5070”

Could you please post the complete error? Or point to which line is generating the error?