Hi,
I was trying to create a denoising AE, but as I was training it I got some size error eventhough both clear and pixelated images have same size.
Here is model Arch:
############### MODEL ################
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 64, (3,3)),
nn.MaxPool2d((2,2)),
nn.Conv2d(64, 32, (3,3)),
nn.MaxPool2d((2,2)),
nn.Conv2d(32, 16, (3,3)),
nn.MaxPool2d((2,2)),
nn.Conv2d(16, 8, (3,3)),
nn.MaxPool2d((2,2)),
)
self.decoder = nn.Sequential(
nn.Upsample((16,16)),
nn.ConvTranspose2d(8, 16, (3,3)),
nn.Upsample((32,32)),
nn.ConvTranspose2d(16, 32, (3,3)),
nn.Upsample((64,64)),
nn.ConvTranspose2d(32, 64, (3,3)),
nn.Upsample((128,128)),
nn.ConvTranspose2d(64, 3, (3,3)),
)
def forward(self, xb):
encoded = self.encoder(xb)
return self.decoder(encoded)
The size of images is 128, 128.
HERE IS A LINK OF THE NOTEBOOK AS WELL: