Am I missing a shape? Conversion? Argument: 'target'

I am not sure how to troubleshoot this issue, if someone can advise on a solution that would be great but if someone could provide some tips on how to troubleshoot this issue so if it occurs again I can quickly identify the cause.

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable

dataset_path = "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data"

column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
                'Acceleration', 'Model Year', 'Origin'] 
raw_dataset = pd.read_csv(dataset_path, names=column_names,
                      na_values = "?", comment='\t',
                      sep=" ", skipinitialspace=True)

dataset.isna().sum()
dataset = dataset.dropna()

origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0

train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)

train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')

def norm(x):
    return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)

train_x = torch.Tensor(normed_train_data.values).float()
train_y = torch.Tensor(train_labels.values).float()

class Model(nn.Module):
    # define nn
    def __init__(self):
        super(Model, self).__init__()
        self.fc1 = nn.Linear(len(train_dataset.keys()), 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, 1)

    def forward(self, X):
        X = F.relu(self.fc1(X))
        X = F.relu(self.fc2(X))
        X = self.fc3(X)

        return X
    
model = Model()

criterion = nn.MSELoss()# mean squared error
optimizer = torch.optim.RMSprop(model.parameters(), lr=0.001)

for epoch in range(1000):
        optimizer.zero_grad()
        out = model(train_x)
        loss = criterion(train_y)
        loss.backward()
        optimizer.step()

        if epoch % 500 == 0:
            print('number of epoch', epoch, 'loss', loss.data)

Error:

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-53-8a9d94a7f74d> in <module>
      2         optimizer.zero_grad()
      3         out = model(train_x)
----> 4         loss = criterion(train_y)
      5         loss.backward()
      6         optimizer.step()

~\AppData\Local\Continuum\anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

TypeError: forward() missing 1 required positional argument: 'target'

nn.MSELoss expects to the output of your model and the corresponding target.
Based on your code, you should calculate the loss as loss = criterion(out, train_y).
Also, make sure out and train_y have the same shape.