I am training Autoencoder on images in order to extract best features from it then later use those features in CNN for doing classification. I want to know how to use those extracted features in CNN because I do not want CNN to do that.

Below is the code for Autoencoder

```
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 20 00:01:03 2019
@author: surekhagaikwad
"""
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
import torchvision
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision.utils import save_image
import os
num_epochs = 30
batch_size = 128
learning_rate = 1e-3
# Hyperparameters
num_classes = 1
loss_list = []
acc_list = []
if not os.path.exists('./mlp_img'):
os.mkdir('./mlp_img')
def to_img(x):
x = x.view(x.size(0), 3, 224, 224)
return x
def plot_sample_img(img, name):
img = img.view(1, 28, 28)
save_image(img, './sample_{}.png'.format(name))
def min_max_normalization(tensor, min_value, max_value):
min_tensor = tensor.min()
tensor = (tensor - min_tensor)
max_tensor = tensor.max()
tensor = tensor / max_tensor
tensor = tensor * (max_value - min_value) + min_value
return tensor
def tensor_round(tensor):
return torch.round(tensor)
img_transform = transforms.Compose([
transforms.Resize([224,224]),
transforms.Grayscale(3),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Lambda(lambda tensor:min_max_normalization(tensor, 0, 1)),
transforms.Lambda(lambda tensor:tensor_round(tensor))
])
transform = transforms.Compose([
transforms.Resize([224,224]),
#transforms.Grayscale(3),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
#transforms.Normalize((0.5,), (0.5,))
])
dataset = datasets.ImageFolder(root="./data/", transform=img_transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=20, shuffle=True, num_workers=1)
class autoencoder(nn.Module):
def __init__(self):
super(autoencoder, self).__init__()
## encoder layers ##
# conv layer (depth from 1 --> 16), 3x3 kernels
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
# conv layer (depth from 16 --> 4), 3x3 kernels
self.conv2 = nn.Conv2d(16, 4, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.t_conv1 = nn.ConvTranspose2d(4, 16, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(16, 3, 2, stride=2)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
## encode ##
# add hidden layers with relu activation function
# and maxpooling after
x = self.relu(self.conv1(x))
x = self.pool(x)
# add second hidden layer
x = self.relu(self.conv2(x))
x = self.pool(x) # compressed representation
## decode ##
# add transpose conv layers, with relu activation function
x = self.relu(self.t_conv1(x))
# output layer (with sigmoid for scaling from 0 to 1)
x = self.sigmoid(self.t_conv2(x))
return x
model = autoencoder()
# Loss and optimizer
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def train_model():
for epoch in range(num_epochs):
for data in dataloader:
img,_ = data
output = model(img)
loss = criterion(output, img)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================log========================
print('epoch [{}/{}], loss:{:.4f}'.format(epoch+1, num_epochs, loss.item()))
if epoch % 10 == 0:
x = to_img(img.cpu().data)
x_hat = to_img(output.cpu().data)
save_image(x, './mlp_img/x_{}.png'.format(epoch))
save_image(x_hat, './mlp_img/x_hat_{}.png'.format(epoch))
train_model()
```