model1:
def forward(self, x, labels=None, return_cam=False):
batch_size = x.shape[0]
x = self.features(x)
x1 = self.conv6(x)
x1 = self.relu(x1)
x1 = self.conv7(x1)
x1 = self.relu(x1)
x2 = self.conv8(x)
x2 = self.relu(x2)
x2 = self.conv9(x2)
x2 = self.relu(x2)
x3 = self.conv10(x)
x3 = self.relu(x3)
x3 = self.conv11(x3)
x3 = self.relu(x3)
x = x1 + x2 + x3
model2:
def forward(self, x, labels=None, return_cam=False):
batch_size = x.shape[0]
x1 = self.features(x)
x1 = self.conv6(x1)
x1 = self.relu(x1)
x1 = self.conv7(x1)
x1 = self.relu(x1)
x2 = self.features(x)
x2 = self.conv8(x2)
x2 = self.relu(x2)
x2 = self.conv9(x2)
x2 = self.relu(x2)
x3 = self.features(x)
x3 = self.conv10(x3)
x3 = self.relu(x3)
x3 = self.conv11(x3)
x3 = self.relu(x3)
x = x1 + x2 + x3
self.features
contain pretrained vgg layers
I get a little better accuracy using model2. Why is that?