Size Mismatch Error - Autoencoder

Hi,

I’ve just recently started trying to begin work using an autoencoder on my custom dataset of 650x650 tif images. These are black and white binary masks that I generated. I am trying to feed them into a relatively simple out of the box autoencoder implementation but I am getting a mismatch error. My understanding of the architecture is fairly limited and I would appreciate any help that you can offer.

My architecture:

class AutoEncoder(nn.Module):
    def __init__(self):
        super(AutoEncoder, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(650 * 650, 128),
            nn.ReLU(True),
            nn.Linear(128, 64),
            nn.ReLU(True), nn.Linear(64, 12), nn.ReLU(True), nn.Linear(12, 3))
        self.decoder = nn.Sequential(
            nn.Linear(3, 12),
            nn.ReLU(True),
            nn.Linear(12, 64),
            nn.ReLU(True),
            nn.Linear(64, 128),
            nn.Linear(128, 650 * 650),
            nn.ReLU())

    def forward(self, x):
        x = x.view(x.size(0), -1)
        x = self.encoder(x)
        x = self.decoder(x)
        return x

The code to run the experiments:

#model = AutoEncoder().cuda()
model = AutoEncoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=1e-5)

for epoch in range(EPOCH):
    for data in train_loader:
        img, _ = data
        #img = img.view(img.size(0), -1)
        img = Variable(img)
        #img = Variable(img).cuda()
        # ===================forward=====================
        output = model(img)
        loss = criterion(output, img)
        # ===================backward====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    # ===================log========================
    print('epoch [{}/{}], loss:{:.4f}'
          .format(epoch + 1, num_epochs, loss.data[0]))
    if epoch % 10 == 0:
        pic = to_img(output.cpu().data)
        save_image(pic, './mlp_img/image_{}.png'.format(epoch))

torch.save(model.state_dict(), './sim_autoencoder.pth')

Thank you.