Output image from a loading model

I´m trying to loading a image, and then i get this error: img=torch.view(img,(1,-1))

AttributeError: module ‘torch’ has no attribute ‘view’

This is the program to load the model:

import os
import cv2
import torch
import torch.nn.functional as F
import numpy as np
from scipy.spatial import distance
import torchvision
import torchvision.datasets as dsets
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from torchvision.utils import save_image

if not os.path.exists(’./teste_novo’):
os.mkdir(’./teste_novo’)

img=cv2.imread(".\bolinhas.png",0)
img=img.astype(float)
img=img/255
img=torch.Tensor(img)
img=img.unsqueeze(2)
img=img.permute(2,0,1)
img=img.unsqueeze(0)

class VAE(nn.Module):
def init(self):
super(VAE, self).init()

    self.fc1 = nn.Linear(784, 400)
    self.fc21 = nn.Linear(400, 20)
    self.fc22 = nn.Linear(400, 20)
    self.fc3 = nn.Linear(20, 400)
    self.fc4 = nn.Linear(400, 784)

def encode(self, x):
    h1 = F.relu(self.fc1(x))
    return self.fc21(h1), self.fc22(h1)

def reparametrize(self, mu, logvar):
    std = logvar.mul(0.5).exp_()
    if torch.cuda.is_available():
        eps = torch.cuda.FloatTensor(std.size()).normal_()
    else:
        eps = torch.FloatTensor(std.size()).normal_()
    eps = Variable(eps)
    return eps.mul(std).add_(mu)

def decode(self, z):
    h3 = F.relu(self.fc3(z))
    return F.sigmoid(self.fc4(h3))

def forward(self, x):
    mu, logvar = self.encode(x)
    z = self.reparametrize(mu, logvar)
    return self.decode(z), mu, logvar

model = VAE().cuda()
model.load_state_dict(torch.load(’./vae.pth’))
img=torch.view(img,(1,-1))
img = Variable(img)
out=model(img)
i=img.view(img.size(0), -1)
o=out.view(out.size(0), -1)
i=i.detach().cpu().numpy()
o=o.detach().cpu().numpy()
dt=distance.euclidean(o,i)
print(dt)

And this is the model:

import torch
import torchvision
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import save_image
from torchvision.datasets import MNIST
import os

if not os.path.exists(’./var_autoencoder_out’):
os.mkdir(’./var_autoencoder_out’)

if not os.path.exists(’./var_autoencoder_in’):
os.mkdir(’./var_autoencoder_in’)

def to_img(x):
x = x.view(x.size(0), 1, 28, 28)
return x

num_epochs = 300
batch_size = 251
learning_rate = 1e-3

img_transform = transforms.Compose([
transforms.ToTensor()
])

dataset = MNIST(’./data’, transform=img_transform, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

class VAE(nn.Module):
def init(self):
super(VAE, self).init()

    self.fc1 = nn.Linear(784, 400)
    self.fc21 = nn.Linear(400, 20)
    self.fc22 = nn.Linear(400, 20)
    self.fc3 = nn.Linear(20, 400)
    self.fc4 = nn.Linear(400, 784)

def encode(self, x):
    h1 = F.relu(self.fc1(x))
    return self.fc21(h1), self.fc22(h1)

def reparametrize(self, mu, logvar):
    std = logvar.mul(0.5).exp_()
    if torch.cuda.is_available():
        eps = torch.cuda.FloatTensor(std.size()).normal_()
    else:
        eps = torch.FloatTensor(std.size()).normal_()
    eps = Variable(eps)
    return eps.mul(std).add_(mu)

def decode(self, z):
    h3 = F.relu(self.fc3(z))
    return F.sigmoid(self.fc4(h3))

def forward(self, x):
    mu, logvar = self.encode(x)
    z = self.reparametrize(mu, logvar)
    return self.decode(z), mu, logvar

model = VAE()
if torch.cuda.is_available():
model.cuda()

reconstruction_function = nn.MSELoss(size_average=False)

def loss_function(recon_x, x, mu, logvar):
BCE = reconstruction_function(recon_x, x)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.sum(KLD_element).mul_(-0.5)
return BCE + KLD

optimizer = optim.Adam(model.parameters(), lr=1e-3)

for epoch in range(num_epochs):
model.train()
train_loss = 0
for batch_idx, data in enumerate(dataloader):
img, _ = data
img = img.view(img.size(0), -1)
img = Variable(img)
nova=to_img(img)
save_image(nova, ‘./var_autoencoder_in/image_{}.png’.format(epoch))
if torch.cuda.is_available():
img = img.cuda()
optimizer.zero_grad()
recon_batch, mu, logvar = model(img)
loss = loss_function(recon_batch, img, mu, logvar)
loss.backward()
train_loss += loss.data
optimizer.step()
if batch_idx % 100 == 0:
print(‘Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}’.format(
epoch,
batch_idx * len(img),
len(dataloader.dataset), 100. * batch_idx / len(dataloader),
loss.data / len(img)))

print('====> Epoch: {} Average loss: {:.4f}'.format(
    epoch, train_loss / len(dataloader.dataset)))
if epoch % 1 == 0:
    save = to_img(recon_batch.cpu().data)
    save_image(save, './var_autoencoder_out/image_{}.png'.format(epoch))

torch.save(model.state_dict(), ‘./vae.pth’)

view is a tensor method, so you should call img.view instead of torch.view(img,...).

Your model returns three tensors in its forward method:

return self.decode(z), mu, logvar

Assuming you would like to visualize the first returned tensor, this should work:

o = out[0].view(out[0].size(0), -1)
1 Like