Hi there,
I have a question about using Resnet18 as feature extractor (no Fine Tuning for its parameter) in my new defined network. Here’s my code:
class TestNet(nn.Module):
def __init__(self, extractor): super(TestNet, self).__init__() self.features = nn.Sequential( # Select Feature *list(extractor.children())[:-2] ) self.maxpool1 = nn.MaxPool2d(2,2) self.conv1 = nn.Conv2d(512,1024,3,padding=1) self.conv2 = nn.Conv2d(1024,512,1) self.conv3 = nn.Conv2d(512,1024,3,padding=1) self.conv4 = nn.Conv2d(1024,512,1) self.conv5 = nn.Conv2d(512,1024,3,padding=1) self.final = nn.Conv2d(1024,30,1) def forward(self, input): output = self.features(input) output = self.maxpool1(output) output = self.conv1(output) output = self.conv2(output) output = self.conv3(output) output = self.conv4(output) output = self.conv5(output) output = f.dropout(output, p = 0.5) output = self.final(output) output = f.sigmoid(output) return output
resnet18 = torchvision.models.resnet18(pretrained=True)
volatile = V(torch.randn(1,3,224,224), volatile=True)
resnet18.eval();
output = resnet18(volatile)
net = TestNet(resnet18)
I would like to know is this approach correct or not? Actually, in my point of view, only the 6 or 8 last layers have learnable parameters. Am I right?