Accuracy remains constant in a GNN model

Hi,

I have a graph neural network model as follows:

class DeeperGCN(torch.nn.Module):
    def __init__(self, hidden_channels, num_layers):
        super(DeeperGCN, self).__init__()

        self.node_encoder = Linear(data.x.size(-1), hidden_channels)
        self.edge_encoder = Linear(data.edge_attr.size(-1), hidden_channels)

        self.layers = torch.nn.ModuleList()
        for i in range(1, num_layers + 1):
            conv = GENConv(hidden_channels, hidden_channels, aggr='softmax',
                           t=1.0, learn_t=True, num_layers=2, norm='layer')
            #conv = GENConv(hidden_channels, hidden_channels, aggr='add',
                            #num_layers=2, norm='layer')
            norm = LayerNorm(hidden_channels, elementwise_affine=True)
            act = ReLU(inplace=True)

            layer = DeepGCNLayer(conv, norm, act, block='res+', dropout=0.5,
                                 ckpt_grad=i % 3)
            self.layers.append(layer)

        self.lin = Linear(hidden_channels, 2)

    def forward(self, x, edge_index, edge_attr):
        x = self.node_encoder(x)
        edge_attr = self.edge_encoder(edge_attr)

        x = self.layers[0].conv(x, edge_index, edge_attr)

        for layer in self.layers[1:]:
            x = layer(x, edge_index, edge_attr)

        x = self.layers[0].act(self.layers[0].norm(x))
        

        return self.lin(x)
model = DeeperGCN(hidden_channels=64, num_layers=10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20,
                              min_lr=0.00001)

The data is heavily imbalanced with 99% 1 and 1%0. So I tried to to use class weights in cross entropy loss:

def train(epoch):
    model.train()
    loss_all = 0
    num_nodes = 0
    for data in train_loader:
        #data = data.to(device)
        optimizer.zero_grad()
        out = model(data.x, data.edge_index, data.edge_attr)
        loss = torch.nn.functional.cross_entropy(out.squeeze(), data.y.squeeze(),weight = torch.Tensor([1,99]),reduction='mean')
        loss.backward()
        loss_all += loss.item() * data.num_nodes
        num_nodes += data.num_nodes
        optimizer.step()
    scheduler.step(loss)
    
    return loss_all / num_nodes
def test(loader):
      model.eval()
      test_correct = 0
      pred_len = 0
      for data in loader:
          out = model(data.x, data.edge_index, data.edge_attr)
          pred = out.argmax(dim= -1)  # Use the class with highest probability.
          test_correct += int((pred == data.y).sum()) # Check against ground-truth labels
          pred_len += pred.numel()
      return test_correct/pred_len
for epoch in range(1, 250):
    loss = train(epoch)
    tes_acc = test(test_loader)
    print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, '
          f'Test Accuracy: {tes_acc:.4f}')

I thought after weighting I should be able to see an increase in accuracy as epochs increase, but it remains constant. The loss decreases though.

Am I doing the weighting wrong? I have tried to normalize it by using .99 and .01 instead of 99 and 1. The results remain the same.