Hello. I need to change last layer after training in test phase but the whole image was changed!! while I just need to put second row instead of first row. I think my code has problem.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
#Converting data to torch.FloatTensor
transform = transforms.ToTensor()
# Download the training and test datasets
train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform)
#Prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, num_workers=0)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, num_workers=0)
#Define the Convolutional Autoencoder
class ConvAutoencoder(nn.Module):
def __init__(self):
super(ConvAutoencoder, self).__init__()
#Encoder
self.conv1 = nn.Conv2d(1, 16, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 8, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(8,8,3)
#Decoder
self.conv4 = nn.ConvTranspose2d(8, 8, 3)
self.conv5 = nn.ConvTranspose2d(8, 16, 3, stride=2, padding=1, output_padding=1)
self.conv6 = nn.ConvTranspose2d(16, 1, 3, stride=2, padding=1, output_padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
return x
#Instantiate the model
model = ConvAutoencoder()
print(model)
def train(model, num_epochs=20, batch_size=64, learning_rate=1e-3):
torch.manual_seed(42)
criterion = nn.MSELoss() # mean square error loss
optimizer = torch.optim.Adam(model.parameters(),
lr=learning_rate,
weight_decay=1e-5) # <--
# train_loader =train_loader;
outputs = []
for epoch in range(num_epochs):
for data in train_loader:
img, _ = data
recon = model(img)
loss = criterion(recon, img)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print('Epoch:{}, Loss:{:.4f}'.format(epoch+1, float(loss)))
outputs.append((epoch, img, recon),)
return outputs
#test_image = test_loader.open(test_image_name).convert('RGB')
model = ConvAutoencoder()
max_epochs =10
outputs = train(model, num_epochs=max_epochs)
for k in range(0, max_epochs, 5):
plt.figure(figsize=(5, 2))
imgs = outputs[k][1].detach().numpy()
recon = outputs[k][2].detach().numpy()
for i, item in enumerate(imgs):
if i >= 5: break
plt.subplot(2, 5, i+1)
#plt.imshow(item[0])
plt.imshow(item[0].reshape(28,28), cmap="gray")
for i, item in enumerate(recon):
if i >= 5: break
plt.subplot(2, 5, 5+i+1)
# plt.imshow(item[0])
plt.imshow(item[0].reshape(28,28), cmap="gray")
b=(ConvAutoencoder().conv6.weight)
a0=b[0,0,0,:]
a1=b[0,0,1,:]
a0=a1
model.conv6.weight = nn.Parameter(b)
print('--------- b ----------')
print(b)
print('------- conv6 --------')
print(model.conv6.weight)
#test phase
def test(model):
outputs1 = []
with torch.no_grad():
for epoch in range(10):
for data1 in test_loader:
img1, _ = data1
recon1 = model(img1)
outputs1.append((epoch, img1, recon1),)
return outputs1
outputs1 = test(model)
for k in range(0, 10, 9):
plt.figure(figsize=(10, 2))
imgs1 = outputs1[k][1].detach().numpy()
recon1 = outputs1[k][2].detach().numpy()
for i, item in enumerate(imgs1):
if i >= 10: break
plt.subplot(2, 10, i+1)
#plt.imshow(item[0])
plt.imshow(item[0].reshape(28,28), cmap="gray")
for i, item in enumerate(recon1):
if i >= 10: break
plt.subplot(2, 10, 10+i+1)
plt.imshow(item[0].reshape(28,28), cmap="gray")
