Thanks for the reply. I still don’t get what I do when use the same module twice (in different layers).
What is happening in such a case?
See for example below, max_pool is used twice.
class LeNet5(nn.Module):
def init(self, dim=32, in_channels=1,
out_channels_1=6, out_channels_2=16,
kernel_size=5, stride=1, padding=0, dilation=1,
mp_kernel_size=2, mp_stride=2, mp_padding=0, mp_dilation=1,
fcsize1=120, fcsize2=84, nclasses=10):super(LeNet5, self).__init__() # helper for calculating dimension after conv/max_pool op def convdim(dim): return (dim + 2*padding - dilation * (kernel_size - 1) - 1)//stride + 1 def mpdim(dim): return (dim + 2*mp_padding - mp_dilation * (mp_kernel_size - 1) - 1)//mp_stride + 1 self.conv1 = nn.Conv2d(in_channels, out_channels_1, kernel_size, stride) self.max_pool = nn.MaxPool2d(mp_kernel_size, stride=mp_stride, padding=mp_padding, dilation=mp_dilation) self.conv2 = nn.Conv2d(out_channels_1, out_channels_2, kernel_size, stride) # final dimension after applying conv->max_pool->conv->max_pool dim = mpdim(convdim(mpdim(convdim(dim)))) self.fc1 = nn.Linear(out_channels_2 * dim * dim, fcsize1) self.fc2 = nn.Linear(fcsize1, fcsize2) self.fc3 = nn.Linear(fcsize2, nclasses) def forward(self, x): nsamples = x.shape[0] x1 = F.relu(self.conv1(x)) x2 = self.max_pool(x1) x3 = F.relu(self.conv2(x2)) x4 = self.max_pool(x3) x5 = x4.view(nsamples, -1) x6 = F.relu(self.fc1(x5)) x7 = F.relu(self.fc2(x6)) x8 = self.fc3(x7) return x8