TypeError: linear(): argument 'input' (position 1) must be Tensor, not NoneType

Here is the model:

class Net(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.lin0 = torch.nn.Linear(1, dim, dtype=torch.float64)

        nn = Sequential(Linear(5, 128, dtype=torch.float64), ReLU(), Linear(128, dim * dim, dtype=torch.float64))
        self.conv = NNConv(dim, dim, nn, aggr='mean')
        self.gru = GRU(dim, dim)

        self.set2set = Set2Set(dim, processing_steps=3)
        self.lin1 = torch.nn.Linear(2 * dim, dim, dtype=torch.float64)
        self.lin2 = torch.nn.Linear(dim, 1, dtype=torch.float64)

    def forward(self, data):
        out = F.relu(self.lin0(data.x))
        h = out.unsqueeze(0)

        for i in range(3):
            m = F.relu(self.conv(out, data.edge_index))
            out, h = self.gru(m.unsqueeze(0), h)
            out = out.squeeze(0)

        out = self.set2set(out, data.batch)
        out = F.relu(self.lin1(out))
        out = self.lin2(out)
        return out.view(-1)

Here is the dimensions of the data: Data(edge_index=[2, 16], weight=[16], x=[14, 1], catalog=[1], compiled=[1], simulated=[1], Utility=[1], Objectives=[5])

out is type torch.float64 and data.edge_index is torch.int64 as its supposed to be, but why am I still getting TypeError: linear(): argument 'input' (position 1) must be Tensor, not NoneType?

Lastly here is the run code:

def train(epoch):
    model.train()
    loss_all = 0

    for data in train_loader:
        data = data.to(device)
        optimizer.zero_grad()
        loss = F.mse_loss(model(data), data.Utility)
        loss.backward()
        loss_all += loss.item() * data.num_graphs
        optimizer.step()
    return loss_all / len(train_loader.dataset)


def test(loader):
    model.eval()
    error = 0

    for data in loader:
        data = data.to(device)
        error += (model(data) * std - data.Utility * std).abs().sum().item()  # MAE
    return error / len(loader.dataset)


best_val_error = None
for epoch in range(1, 301):
    lr = scheduler.optimizer.param_groups[0]['lr']
    loss = train(epoch)
    val_error = test(val_loader)
    scheduler.step(val_error)

    if best_val_error is None or val_error <= best_val_error:
        test_error = test(test_loader)
        best_val_error = val_error

    print(f'Epoch: {epoch:03d}, LR: {lr:7f}, Loss: {loss:.7f}, '
          f'Val MAE: {val_error:.7f}, Test MAE: {test_error:.7f}')