class autoencoder(nn.Module):
def __init__(self):
super(autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 6, 5),
nn.ReLU(True),
nn.MaxPool2d(2,2),
nn.Conv2d(6, 16, 5),
nn.ReLU(True),
nn.Linear(16 *57*77, 20000),
nn.Linear(20000, 14000),
nn.Linear(14000,2000),
)
self.decoder = nn.Sequential(
nn.Linear(2000,14000),
nn.Linear(14000,20000),
)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.encoder(x)
# x = x.unsqueeze(1)
# print(x.size())
x = self.decoder(x)
# print(x.size())
return x
model = autoencoder()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.005,
weight_decay=1e-5)
for epoch in range(2):
for i, data in enumerate(traindataloader,0):
img, labels = data
optimizer.zero_grad()
print(img.size())
#img = Variable(img)
# ===================forward=====================
output = model(img.unsqueeze(dim=2))
loss = criterion(output, img)
# ===================backward====================
loss.backward()
optimizer.step()
#===================log========================
running_loss += loss.item()
if i % 10 == 0:
# print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 10))
running_loss = 0.0
print(âFinished Trainingâ)
Please help!