Hi there, I am new to pytorch and I am trying to use an LSTM network to predict lane following - changing behaviors for autonomous driving. I am using data from the NGSIM database and I have 3 classes which I have encoded as one-hot vectors. I keep getting all my predictions on the same class and I think that something is fundamentally wrong with my code. Any suggestions would be greatly appreciated. Thank you

Here is a part of my code (based on code I found on the internet):

```
num_train = 3000
h1 = 32
output_dim = 3
num_layers = 5
learning_rate = 1e-3
num_epochs = 30
per_element = True
if per_element:
lstm_input_size = 1
else:
lstm_input_size = input_size
X_train = torch.from_numpy(xtrain).type(torch.Tensor)
X_train = X_train.view([input_size, -1, 1])
#Arrange labels as one-hot vectors
ytrain = np.zeros([3000, 3])
for i in range(3000):
if i < 1000:
ytrain[i,0] = 1
elif i < 2000:
ytrain[i,1] = 1
elif i < 3000:
ytrain[i,2] = 1
y_train = torch.from_numpy(ytrain).type(torch.Tensor).view(-1)
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, batch_size, output_dim, num_layers):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
# Define the LSTM layer
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers)
# Define the output layer
self.linear = nn.Linear(self.hidden_dim, output_dim)
def init_hidden(self):
return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim),
torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))
def forward(self, input):
lstm_out, self.hidden = self.lstm(input.view(len(input), self.batch_size, -1))
y_pred = self.linear(lstm_out[-1].view(self.batch_size, -1))
return y_pred.view(-1)
model = LSTM(lstm_input_size, h1, batch_size=num_train, output_dim=output_dim, num_layers=num_layers)
loss_fn = nn.MSELoss()
optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(num_epochs):
model.hidden = model.init_hidden()
y_pred = model(X_train)
loss = loss_fn(y_train, y_pred)
print("Epoch ", t, "\nMSE: ", loss.item())
hist[t] = loss.item()
optimiser. zero_grad()
loss.backward()
optimiser.step()
```