class RNN(nn.Module):

def **init**(self):

super(RNN, self).**init**()

```
self.rnn = nn.LSTM(
input_size=6,
hidden_size=6,
num_layers=2,
batch_first=True,
)
def forward(self, x):
out, (h_n, h_c) = self.rnn(x, None)
print(out)
return out[:, -1, :] # Return output at last time-step
```

X = torch.FloatTensor(X)

y = torch.LongTensor(Y)

rnn = RNN()

optimizer = torch.optim.Adam(rnn.parameters(), lr=0.001)

loss_func = nn.CrossEntropyLoss()

for j in range(500):

for i, item in enumerate(X):

item = item.unsqueeze(0)

print(item.shape)

output = rnn(item)

target = y[i]

target = target.squeeze_()

print(output.shape,target.shape)

loss = loss_func(output, target)

optimizer.zero_grad()

loss.backward()

optimizer.step()

```
if j % 5 == 0:
print('Loss: ', np.average(loss.detach()))
```

i am using somebody else code. and getting the error as mentioned in my Subject.

my shape of output.shape and target.shape is following

output.shape = torch.Size([1, 6])

target.shape = torch.Size([2])

output = tensor([[ 0.0463, -0.0402, -0.0437, 0.0302, -0.0994, 0.0320]],

grad_fn=)

target = tensor([0, 0])