VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace)
(2): Dropout(p=0.5)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace)
(5): Dropout(p=0.5)
(6): Linear(in_features=4096, out_features=10, bias=True)
)
(classifier1): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace)
(2): Dropout(p=0.5)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace)
(5): Dropout(p=0.5)
(6): Linear(in_features=4096, out_features=10, bias=True)
)
)```
I want the output of two classifiers each, can anyone provide the python code
If I understand you correctly, then after passing through features
and AdaptiveAvgPool
you want to get the outputs of classifier
and classifier1
class VGG(nn.Module):
def __init__(self):
. . .
def forward(self, x):
x = self.features(x)
return self.classifier(x), self.classifier1(x)
Hope that helps!
Thanks Arunava,But what i did the same,i am getting size mismatch error.
my code:
def show_batch_images(dataloader,model):
images, labels = next(iter(dataloader))
print(labels)
out = model.features(images)
out_class = model.classifier(out)
out_class1 = model.classifier1(out)
_,pred1 = torch.max(out_class.data,1)
_, pred2 = torch.max(out_class1.data ,1)
img = torchvision.utils.make_grid(images)
return image,pred1,pred2
From which line?
P.S. It will be nicer if you keep these in forward()
out = model.features(images)
out_class = model.classifier(out)
out_class1 = model.classifier1(out)
Will do,Thanks Arunava
This is the error i get:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-124-938a2a3c90b1> in <module>()
----> 1 images, pred = show_batch_images(train_loader,model)
<ipython-input-123-0b9e63821ed9> in show_batch_images(dataloader, model)
2 images, labels = next(iter(dataloader))
3 print(labels)
----> 4 out = custom_vgg.forward(images,model)
5 out_class = out[0]
6 out_class1 = out[1]
<ipython-input-121-35ba48ae34f5> in forward(self, images, model)
2 def forward(self, images,model):
3 out = model.features(images)
----> 4 return model.classifier(out), model.classifier1(out)
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
90 def forward(self, input):
91 for module in self._modules.values():
---> 92 input = module(input)
93 return input
94
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/linear.py in forward(self, input)
65 @weak_script_method
66 def forward(self, input):
---> 67 return F.linear(input, self.weight, self.bias)
68
69 def extra_repr(self):
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1352 ret = torch.addmm(torch.jit._unwrap_optional(bias), input, weight.t())
1353 else:
-> 1354 output = input.matmul(weight.t())
1355 if bias is not None:
1356 output += torch.jit._unwrap_optional(bias)
RuntimeError: size mismatch, m1: [3584 x 7], m2: [25088 x 4096] at /opt/conda/conda-bld/pytorch_1549630534704/work/aten/src/TH/generic/THTensorMath.cpp:940
Just after avgpool
what is the out .shape
you get? Can you print and check once?
output shape of avgpool [1,512,7,7]
Before passing it to the classifier
s Flatten it!
class VGG(nn.Module):
def __init__(self):
. . .
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.reshape(x.size(0), -1)
return self.classifier(x), self.classifier1(x)
it says too many values to unpack,expected 2
Update this to
out1, out2 = model(images)
Thanks mate,it worked
Happy to help