I am using a trained model as an extractor in another model and it seems like it is working ok, but I don’t know if I am doing it right or now. So this is my model and how it is declared.
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
image_modules = list(ae_model.children())[:-2] #get only the encoder layers
self.modelA = nn.Sequential(*image_modules)
# Shape of max pool = 4, 112, 112
self.fc1 = nn.Linear(4*112*112, 500)
self.fc2 = nn.Linear(500,500)
self.out = nn.Linear(500, 10)
self.drop = nn.Dropout(0.3)
def forward(self, x):
x = self.modelA(x)
x = x.view(x.size(0),4*112*112)
x = torch.relu(self.fc1(x))
x = self.drop(x)
x = torch.relu(self.fc2(x))
x = self.drop(x)
x = self.out(x)
return x
Now I have done this for a Resnet50 model but it did not look like this. My extractor that I am using is this
# define the NN architecture
class ConvAutoencoder(nn.Module):
def __init__(self):
super(ConvAutoencoder, self).__init__()
## encoder layers ##
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 4, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
## decoder layers ##
self.t_conv1 = nn.ConvTranspose2d(4, 16, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(16, 3, 2, stride=2)
def forward(self, x):
## encode ##
x = torch.relu(self.conv1(x))
x = self.pool(x)
x = torch.relu(self.conv2(x))
x = self.pool(x) # compressed representation
## decode ##
x = torch.relu(self.t_conv1(x))
x = torch.sigmoid(self.t_conv2(x))
return x
I think this is working but I would like to get a second opinion on it.
Thank you for any help with this.