After training my own CNN model and load it, I want to extract the features of the middle layer. Here’s my CNN model and codes.
Convoultional Nerual Net
class net(nn.Module):
def __init__(self):
super(net, self).__init__()
self.conv1_1 = nn.Conv2d(in_channels = 3, out_channels = 16, kernel_size = 11, stride = 3)
self.bn1 = nn.BatchNorm2d(16)
self.conv2_1 = nn.Conv2d(in_channels = 16, out_channels = 32, kernel_size = 7, stride = 2)
self.bn2 = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv3_1 = nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 5, stride = 1)
self.conv3_2 = nn.Conv2d(in_channels = 64, out_channels = 64, kernel_size = 5, stride = 1)
self.bn3 = nn.BatchNorm2d(64)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv4_1 = nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3, stride = 1)
self.conv4_2 = nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, stride = 1)
self.conv4_3 = nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, stride = 1)
self.bn4 = nn.BatchNorm2d(128)
self.pool3 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(128*5*5, 1000)
self.fc2 = nn.Linear(1000, 1000)
self.fc3 = nn.Linear(1000, 128)
self.out = nn.Linear(128, 1)
# activation, batch normalization
self.prelu = nn.PReLU()
self.bn0 = nn.BatchNorm1d(1000)
# dropout
self.dropout2d = nn.Dropout2d(0.25)
self.dropout1d = nn.Dropout(0.5)
def forward(self, x):
x = self.bn1(F.relu(self.conv1_1(x)))
x = self.bn2(F.relu(self.conv2_1(x)))
x = self.dropout2d(self.pool1(x))
x = self.bn3(F.relu(self.conv3_1(x)))
x = self.bn3(F.relu(self.conv3_2(x)))
x = self.dropout2d(self.pool2(x))
x = self.bn4(self.prelu(self.conv4_1(x)))
x = self.bn4(self.prelu(self.conv4_2(x)))
x = self.bn4(self.prelu(self.conv4_3(x)))
x = self.dropout2d(self.pool3(x))
x = x.view(-1, 128*5*5)
x = self.dropout1d(F.relu(self.fc1(x)))
x = self.dropout1d(F.relu(self.fc2(x)))
x = F.relu(self.fc3(x))
out = self.out(x)
return out
net = net()
Load Model
model = net.to('cuda:0')
num_model = 10
model.load_state_dict(torch.load('C:/Users/KIMSUNGHUN/Documents/TCGA-GBM/model4/params_{}.pt'.format(num_model)))
model.eval()
After loading a specific pre-trained model I saved, I want to freeze the parameters of the model and extract only the features of the last fully connected layer. the output is to be = (n, 128)
Is there any good way to use in models created and loaded through “nn.Module”?