I am trying to do some visual attention using an implementation of the EfficientNet found here which is already pretrained. In order to use the features of the attention I need to get specific layers from the EfficienNet model and then use them in my last Linear layer.
However, I do no know how to use get these intermediate layers in feed them to my attention blocks.
class AttentionNet(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.model = EfficientNet.from_pretrained('efficientnet-b2')
# This are my attention layers
self.projector = ProjectorBlock(256, 512)
self.attn1 = LinearAttentionBlock(in_features=512, normalize_attn=True)
self.attn2 = LinearAttentionBlock(in_features=512, normalize_attn=True)
self.attn3 = LinearAttentionBlock(in_features=512, normalize_attn=True)
#This is my classification layer
self.classification = nn.Linear(in_features=512*3, out_features=4, bias=True)
#This is the layer I want to access
self.inter_1 = self.model._blocks[0]._bn2
def forward(self, x):
x = self.model.forward(x)
# l1 = Get features from self.inter_1
c1, g1 = self.attn1(self.projector(l1), x)
x = self.classify(g1) # batch_sizexnum_classes
return [x, c1]