Mat1 and mat2 shapes cannot be multiplied (16x13 and 16x1)

import torch 
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader ,Dataset 
import numpy as np
import math
from tqdm.auto import tqdm

class WineDataset(Dataset):
  def __init__(self):
    xy = np.loadtxt('/content/wine.csv',delimiter=',',dtype = np.float32,skiprows=1)
    self.x = torch.from_numpy(xy[:,1:]) 
    self.y = torch.from_numpy(xy[:,[0]])
    self.shape = xy.shape[0]

  def __getitem__(self,index):
    return self.x[index],self.y[index]

  def __len__(self):
    return self.shape

dataset = WineDataset()


class Model(nn.Module):
  def __init__(self):
    super(Model,self).__init__()
    self.fc1 = nn.Linear(13,30)
    self.fc2 = nn.Linear(30,1)

  def forward(self,x):
    out = self.fc1(x)
    out = torch.nn.functional.relu(out)
    out = self.fc2(x)
    return out


model = Model()
loss = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())

train_losses = []
for i in range(5):
  train_loss = []
  for x,y in tqdm(loader):
    optimizer.zero_grad()
    output = model(x)
    loss_ = loss(output,y)
    loss_.backward()
    train_loss.append(loss_.item())
    optimizer.step()
  train_loss = np.mean(train_loss)
  train_losses[i] = train_loss

you did not specify sizes of input…
this error means your weights sizes mismatch with input size.
your model expects input of size (N,*,13) where N is batch size.
I guess your input need some transpose or something alike to match the initialized weights size.