Hello I am currently constructing ensembled methods and is wondering how i should save them.
For this example, taken from here
class MyEnsemble(nn.Module):
def __init__(self, modelA, modelB, nb_classes=10):
super(MyEnsemble, self).__init__()
self.modelA = modelA
self.modelB = modelB
# Remove last linear layer
self.modelA.fc = nn.Identity()
self.modelB.fc = nn.Identity()
# Create new classifier
self.classifier = nn.Linear(2048+512, nb_classes)
def forward(self, x):
x1 = self.modelA(x.clone()) # clone to make sure x is not changed by inplace methods
x1 = x1.view(x1.size(0), -1)
x2 = self.modelB(x)
x2 = x2.view(x2.size(0), -1)
x = torch.cat((x1, x2), dim=1)
x = self.classifier(F.relu(x))
return x
# Train your separate models
# ...
# We use pretrained torchvision models here
modelA = models.resnet50(pretrained=True)
modelB = models.resnet18(pretrained=True)
# Freeze these models
for param in modelA.parameters():
param.requires_grad_(False)
for param in modelB.parameters():
param.requires_grad_(False)
# Create ensemble model
ensembled_model = MyEnsemble(modelA, modelB)
If i were to save these 2 models, would it make more sense to save them seperately:
torch.save(modelA.state_dict(), "...")
torch.save(modelB.state_dict(), "...")
Or save the ensembled model:
torch.save(ensembled_model.state_dict(), "...")
Are there any difference in the two methods above?