Hum, I’m afraid you can’t calculate that in __init__
without prior knowledge of the input shape.
You could imagine passing an input shape as argument to the __init__
. In this situation, you can infer the shape by performing a forward pass over the convolutional blocks. Something like
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Net(nn.Module):
def __init__(self, input_shape=(1, 28, 28)):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
n_size = self._get_conv_output(input_shape)
self.fc1 = nn.Linear(n_size, 50)
self.fc2 = nn.Linear(50, 10)
# generate input sample and forward to get shape
def _get_conv_output(self, shape):
bs = 1
input = Variable(torch.rand(bs, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(bs, -1).size(1)
return n_size
def _forward_features(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
return x
def forward(self, x):
x = self._forward_features(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
return F.log_softmax(x)