I have an Attention-based feature fusion layer for two input feature embeddings. The model is defined here:
class AFF(nn.Module):
def init(self, in_features, out_features):
super(AFF, self).init()
self.in_features = in_features
self.out_features = out_features
# Attention mechanism
self.attention = nn.Linear(in_features * 2, 1)
self.attention.weight = nn.Parameter(torch.Tensor(self.in_features * 2, 1))
nn.init.xavier_uniform_(self.attention.weight)
self.softmax = nn.Softmax(dim=1)
# Fusion layer
self.fc = nn.Linear(in_features * 2, out_features)
self.relu = nn.ReLU()
def forward(self, x1, x2):
print(“Shape of x1:”, x1.shape)
print(“Shape of x2:”, x2.shape)
if x1.size(0) != x2.size(0):
if x1.size(0) < x2.size(0):
padding = torch.nn.functional.pad(x1, (0,0,0, x2.size(0) - x1.size(0)), mode='constant', value=0)
x_concat = torch.cat((padding, x2), dim=1)
else:
padding = torch.nn.functional.pad(x2, (0,0,0, x1.size(0) - x2.size(0)), mode='constant', value=0)
x_concat = torch.cat((x1, padding), dim=1)
else:
x_concat = torch.cat((x1, x2), dim=1)
print("Shape of x_concat:", x_concat.shape)
# Calculate attention weights
attn_weights = self.softmax(self.attention(x_concat))
# Apply attention to feature embeddings
x1_weighted = torch.mul(x1, attn_weights[:, :1])
x2_weighted = torch.mul(x2, attn_weights[:, 1:])
# Concatenate weighted feature embeddings
x_weighted_concat = torch.cat((x1_weighted, x2_weighted), dim=1)
# Fusion layer
fused_features = self.fc(x_weighted_concat)
fused_features = self.relu(fused_features)
return fused_features
How can I fix this error when I run it in my training loop?
The shapes of x1, x2 and x_concat are:
Shape of x1: torch.Size([16880, 64])
Shape of x2: torch.Size([2956, 64])
Shape of x_concat: torch.Size([16880, 128])