Sorry I think I was being unclear
the actual code goes like this
class GCN(nn.Module):
def __init__(self, nfeat, nhid1, nhid2, nhid3, nclass, dropout, with_relu=True):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid1)
self.gc2 = GraphConvolution(nhid1, nclass)
self.dropout = dropout
self.with_relu = with_relu
def forward(self, x, adj):
axw , w0 = self.gc1(x, adj)
aaxw0w1, w1 = self.gc2(x, adj)
return F.log_softmax(aaxw0w1, dim=1)
and
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=False):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
return output, self.weight
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
In my neural network X would be the input and adj is used in the hidden layer and output layer and I want to get the gradient w.r.t the adj.
Thank you!