Weird Train/Test accuracy in graph neural network model and Runtimeerror: pytorch

I am new to pytorch and while I tried my graph neural network model, I faced some very weird accuracy. I tried with other related resources in internet and this site but can not figure out what is the problem .My model is :

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
         self.conv1 = ChebConv(132, 32,20) # input layer
         self.conv2 = ChebConv(32, 12,20)
    def forward(self, data):  #define forward network
        x, edge_index = data.x, data.edge_index
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = self.conv2(x, edge_index)
Model=Net()
print(Model)

Training and Test function is:

optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4)
def train():
    model.train()
    cum_loss = 0
    for data in dataset:  # Iterate in batches over the training dataset.
        data = data.to(device)
        out = model(data) #, data.batch)  # Perform a single forward pass.
        loss = F.mse_loss(out, data.y)
        #loss = criterion(out, data.y)  # Compute the loss.
        cum_loss += loss
        loss.backward()  # Derive gradients.
        optimizer.step()  # Update parameters based on gradients.
        optimizer.zero_grad()  # Clear gradients.
    print(f'Epoch: {epoch:03d}, loss: {cum_loss/len(dataset):.4f}')
    csvwriter.writerow([epoch, (cum_loss/len(dataset)).cpu().detach().numpy()])
filename = "output1.1.csv"
filename_label = "output_label1.csv"
fields = ['a', 'b', ...]

#test function
def test(loader):
    model.eval()
    with open(filename, 'w') as csvfile, open(filename_label, 'w') as csvoutput:
        csvwriter = csv.writer(csvfile)
        csvoutwriter = csv.writer(csvoutput)
        csvwriter.writerow(fields)
        csvoutwriter.writerow(fields)
        correct = 0
        for data in loader:  
            data = data.to(device)
            out = model(data) 
            correct += int((out.argmax(-1)==data.y.view(-1,1)).sum())
            for i  in range(10):
                csvwriter.writerow(out.cpu().detach().numpy()[i,:])
                csvoutwriter.writerow((data.y.cpu().detach()).numpy()[i,:])
    return correct / len(loader)  # Derive ratio of correct predictions.
model.train()
filename_train = "trainloss1.1.csv"
with open(filename_train, 'w') as csvfile:
    csvwriter = csv.writer(csvfile)
    csvwriter.writerow(['Epoch', 'Loss'])
    

    for epoch in range(300): 
        train()
        test_acc = test(dataset) 
        print(f'Test Acc: {test_acc:.4f}')

I get this accuracy very low

Epoch: 000, loss: 4.2021
Test Acc: 9.9178

I am new to pytorch and while I tried my graph neural network model, I faced some very weird accuracy. I tried with other resources in internet but can not figure out what is the problem .My model is :

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
         self.conv1 = ChebConv(132, 32,20) # input layer
         self.conv2 = ChebConv(32, 12,20)
    def forward(self, data):  #define forward network
        x, edge_index = data.x, data.edge_index
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = self.conv2(x, edge_index)
Model=Net()
print(Model)

Training and Test function is:

optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4)
def train():
    model.train()
    cum_loss = 0
    for data in dataset:  # Iterate in batches over the training dataset.
        data = data.to(device)
        out = model(data) #, data.batch)  # Perform a single forward pass.
        loss = F.mse_loss(out, data.y)
        #loss = criterion(out, data.y)  # Compute the loss.
        cum_loss += loss
        loss.backward()  # Derive gradients.
        optimizer.step()  # Update parameters based on gradients.
        optimizer.zero_grad()  # Clear gradients.
    print(f'Epoch: {epoch:03d}, loss: {cum_loss/len(dataset):.4f}')
    csvwriter.writerow([epoch, (cum_loss/len(dataset)).cpu().detach().numpy()])
filename = "output1.1.csv"
filename_label = "output_label1.csv"
fields = ['a', 'b', ...]

#test function
def test(loader):
    model.eval()
    with open(filename, 'w') as csvfile, open(filename_label, 'w') as csvoutput:
        csvwriter = csv.writer(csvfile)
        csvoutwriter = csv.writer(csvoutput)
        csvwriter.writerow(fields)
        csvoutwriter.writerow(fields)
        correct = 0
        for data in loader:  
            data = data.to(device)
            out = model(data) 
            correct += int((out.argmax(-1)==data.y.view(-1,1)).sum())
            for i  in range(10):
                csvwriter.writerow(out.cpu().detach().numpy()[i,:])
                csvoutwriter.writerow((data.y.cpu().detach()).numpy()[i,:])
    return correct / len(loader)  # Derive ratio of correct predictions.
model.train()
filename_train = "trainloss1.1.csv"
with open(filename_train, 'w') as csvfile:
    csvwriter = csv.writer(csvfile)
    csvwriter.writerow(['Epoch', 'Loss'])
    

    for epoch in range(300): 
        train()
        test_acc = test(dataset) 
        print(f'Test Acc: {test_acc:.4f}')

I get this accuracy very low

Epoch: 000, loss: 4.2021
Test Acc: 9.9178

previously I tried changing the correct parameter in test data stating as

correct += int((out.argmax(-1)==data.y).sum())

which results in

RuntimeError: The size of tensor a (10) must match the size of tensor b (12) at non-singleton dimension 1

I got this runtime error and tried

correct += int((out.argmax(-1)==data.y.view(-1,1)).sum())

but having very low accuracy as I stated. How can I fix this problem? Am I doing something wrong here? My data size in loader is

 Data(edge_attr=[18], edge_index=[2, 18], x=[10, 132], y=[10, 12])