Here is more detail about the error:
runfile('/home/zayanir/Documents/code/tuto/vae_1D.py', wdir='/home/zayanir/Documents/code/tuto')
Traceback (most recent call last):
File "<ipython-input-49-b9687dd3ae78>", line 1, in <module>
runfile('/home/zayanir/Documents/code/tuto/vae_1D.py', wdir='/home/zayanir/Documents/code/tuto')
File "/home/zayanir/anaconda3/envs/myenv/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 786, in runfile
execfile(filename, namespace)
File "/home/zayanir/anaconda3/envs/myenv/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/home/zayanir/Documents/code/tuto/vae_1D.py", line 107, in <module>
mu, var, loss, inp, out= train(train_dl, args.learning_rate, latitude.size)
File "/home/zayanir/Documents/code/tuto/vae_1D.py", line 65, in train
out, mu, var=model(data)
File "/home/zayanir/anaconda3/envs/myenv/lib/python3.6/site-packages/torch/nn/modules/module.py", line 491, in __call__
result = self.forward(*input, **kwargs)
File "/home/zayanir/Documents/code/tuto/vae_1D.py", line 43, in forward
mu,var=self.encoder(x)
File "/home/zayanir/Documents/code/tuto/vae_1D.py", line 19, in encoder
moy=F.relu(self.fc1(x))
File "/home/zayanir/anaconda3/envs/myenv/lib/python3.6/site-packages/torch/nn/modules/module.py", line 491, in __call__
result = self.forward(*input, **kwargs)
File "/home/zayanir/anaconda3/envs/myenv/lib/python3.6/site-packages/torch/nn/modules/linear.py", line 55, in forward
return F.linear(input, self.weight, self.bias)
File "/home/zayanir/anaconda3/envs/myenv/lib/python3.6/site-packages/torch/nn/functional.py", line 994, in linear
output = input.matmul(weight.t())
RuntimeError: size mismatch, m1: [1 x 6], m2: [1 x 3] at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/generic/THTensorMath.c:2033
The shapes for different variables:
eq shape= (23412, 21)
latitude shape= (23412,)
Input x of the encoder: torch.Size([6])
Here is all the code:
import torch
from torchvision.datasets import ImageFolder
from torchvision import transforms
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import pandas as pd
import argparse
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1=nn.Linear(1,3)
self.fc2=nn.Linear(3,1)
def encoder(self, x):
print(x.shape)
moy=F.relu(self.fc1(x))
variance=F.relu(self.fc1(x))
return moy, variance
def decoder(self, z):
x=self.fc2(z)
x=torch.sigmoid(x)
return x
def reparameterise(self, mu, var):
std = torch.exp(0.5*var)
eps = torch.randn_like(std)
return mu + eps*std
def forward(self, x):
mu,var=self.encoder(x)
z=self.reparameterise(mu, var)
x=self.decoder(z)
return x, mu, var
def loss_fct(x, x_rec, mu, var):
reconstruction_loss=nn.MSELoss()
ER=reconstruction_loss(x_rec, x)
KL=-0.5*torch.sum(1 + var - mu.pow(2) - var.exp())
return ER + KL
def train(train_dl, lr1, bs):
model=VAE()
model = model.double()
optimizer=torch.optim.Adam(model.parameters(), lr=lr1)
train_loss=0.0
for idx, data in enumerate(train_dl):
data= Variable(data)
############ forward ##############
out, mu, var=model(data)
loss=loss_fct(data, out, mu, var)
train_loss+=loss.data[0]
############ backward ##############
optimizer.zero_grad()
loss.backward()
optimizer.step()
return mu, var, train_loss/bs, data, out
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Autoencoder Sentinel DB')
parser.add_argument('-bs','--batch-size', type=int, default=6, metavar='',
help='input batch size for training (default: 6)')
parser.add_argument('-ep','--epochs', type=int, default=100, metavar='',
help='number of epochs to train (default: 100)')
parser.add_argument('-lr','--learning-rate', type=float, default=0.001, metavar='',
help='learning rate (default: 0.001)')
parser.add_argument('-oimg','--output-img', type=str, default='outp_images/output12.png', metavar='',
help='output image (default: outp_images/output12.png)')
parser.add_argument('-iimg','--input-img', type=str, default='outp_images/input12.png', metavar='',
help='input image (default: outp_images/input12.png)')
parser.add_argument('-outp','--output', type=str, default='out3.txt', metavar='',
help='output file (default: out.txt)')
args = parser.parse_args()
eq=pd.read_csv('/home/zayanir/Documents/data/database.csv')
latitude=eq['Latitude']
train_dl=torch.utils.data.DataLoader(latitude, batch_size=args.batch_size)
print("eq shape=", eq.shape)
print("latitude shape=", latitude.shape)
for epoch in range(1,args.epochs+1):
mu, var, loss, inp, out= train(train_dl, args.learning_rate, latitude.size)
print(f'loss {epoch} = {loss}')