How to concatenate multiple input features at output layer?

The code for layer is given below:

   class Layer (nn.Module): 
          def __init__(self, in_features, out_features, dropout, alpha, concat=True):
            super(Layer, self).__init__()
            self.dropout = dropout
            self.in_features = in_features
            self.out_features = out_features
            self.alpha = alpha
            self.concat = concat
            self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
            nn.init.xavier_uniform_(self.W.data, gain=1.414)
            self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))
            nn.init.xavier_uniform_(self.a.data, gain=1.414)

            self.leakyrelu = nn.LeakyReLU(self.alpha)

def forward(self, h, adj):
    Wh = torch.mm(h, self.W) # h.shape: (N, in_features), Wh.shape: (N, out_features)
    e = self._prepare_attentional_mechanism_input(Wh)

    zero_vec = -9e15*torch.ones_like(e)
    attention = torch.where(adj > 0, e, zero_vec)
    attention = F.softmax(attention, dim=1)
    attention = F.dropout(attention, self.dropout, training=self.training)
    h_prime = torch.matmul(attention, Wh)

    if self.concat:
        return F.elu(h_prime)
    else:
        return h_prime

def _prepare_attentional_mechanism_input(self, Wh):
    # Wh.shape (N, out_feature)
    # self.a.shape (2 * out_feature, 1)
    # Wh1&2.shape (N, 1)
    # e.shape (N, N)
    Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
    Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
    # broadcast add
    e = Wh1 + Wh2.T
    return self.leakyrelu(e)

The code for model is given below:

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
   def __init__(self, nfeat, nlabels, nhid = 8, dropout = 0.5, alpha =0.2, nheads=4):
     super(Model, self).__init__()
     self.dropout = dropout
     self.nhid = nhid
     self.nlabels = nlabels
    self.attentions1 = nn.ModuleList([Layer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True)
                               for _ in range(nheads)])
    self.attentions2 = nn.ModuleList([Layer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True)
                               for _ in range(nheads)])
    self.out_att = Layer(nhid * nheads*2, nlabels, dropout=dropout, alpha=alpha, concat=False)
def forward(self, x1, adj1, x2, adj2):
    x1 = F.dropout(x1, self.dropout, training=self.training)
    x2 = F.dropout(x2, self.dropout, training=self.training)
    x1 = torch.cat([att(x1, adj1) for att in self.attentions1], dim=1)
    x2 = torch.cat([att(x2, adj2) for att in self.attentions2], dim=1)
    x = torch.cat((x1,x2), dim=1)
    x = F.dropout(x, self.dropout, training=self.training)
    adj = torch.cat((adj1,adj2), dim=1)
    x = F.elu(self.out_att(x, adj))

    return F.log_softmax(x, dim=1)

main code is given below:
adj1 = torch.randn(256,256)
adj2 = torch.randn(256,256)

x1 = torch.randn(256,256)
x2 = torch.randn(256,256)

nfeat=256
nlabels=300
model= Model(nfeat,nlabels)

model.forward(x1,adj1,x2,adj2)

When I run this code I have found an error. Is there anyway to fix this error.

When I run the foward block I have found an error. My error lies in the concatenation of multiple inputs at the output layer


RuntimeError Traceback (most recent call last)
/tmp/ipykernel_107617/2617667035.py in
----> 1 model.forward(x1,adj1,x2,adj2)

/tmp/ipykernel_107617/3230078694.py in forward(self, x1, adj1, x2, adj2)
28 x = F.dropout(x, self.dropout, training=self.training)
29 adj = torch.cat((adj1,adj2), dim=1)
—> 30 x = F.elu(self.out_att(x, adj))
31
32 return F.log_softmax(x, dim=1)

/userq/.conda/envs/PY/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []

/tmp/ipykernel_107617/640529098.py in forward(self, h, adj)
29
30 zero_vec = -9e15*torch.ones_like(e)
—> 31 attention = torch.where(adj > 0, e, zero_vec)
32 attention = F.softmax(attention, dim=1)
33 attention = F.dropout(attention, self.dropout, training=self.training)

RuntimeError: The size of tensor a (512) must match the size of tensor b (256) at non-singleton dimension 1
Is there anyway to fix this error?

In order to use a torch.where statement, the sizes of all tensor arguments must be the same.

attention = torch.where(adj > 0, e, zero_vec)

The error is telling you that adj is not the same size as e or zero_vec. You can try to print the sizes before these go into the where statement to see these do not match.

https://pytorch.org/docs/stable/generated/torch.where.html

1 Like