Error with my first Autoencoder - setting up the Autoencoder with the proper tensor shape

Hi, I am trying to use an autoencoder on my own data set. Unfortunately, I am not able to get it to run. May someone is able to help me.
what I was capable off:

  • transform the data into tensor. tensor size 6000,20 - see below
  • split the data into training and test - see below

Problem starts by:
last code block : ValueError: too many values to unpack (expected 2)

code below:

# import the libraries

import torch


import pandas as pd
import glob # File listing
import numpy as np
import csv
import sys
from dateutil import parser as DUtil
import matplotlib.pyplot as plt
# import packages
import os

import torchvision
import torch.nn as nn
import torchvision.transforms as transforms
import torch.optim as optim
import matplotlib.pyplot as plt
import torch.nn.functional as F
 
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision.utils import save_image

print("Necessary libraries loaded")

# Setup figure sizes
#
plt.rcParams["figure.figsize"] = (14,10)
# utility functions
def get_device():
    if torch.cuda.is_available():
        device = 'cuda:0'
    else:
        device = 'cpu'
    return device
# create tensors
list_of_tensors = [torch.tensor(np.array(df)) for df in yp_clean]
torch.stack(list_of_tensors)
list_of_tensors[1].size() #get size of pytorch tensor

→ torch.Size([6000, 20])

traindata = list_of_tensors[:1500]
testdata = list_of_tensors[1501:1927]
# constants
NUM_EPOCHS = 50
LEARNING_RATE = 1e-3
BATCH_SIZE = 128
#Prepare data loaders
train_loader = torch.utils.data.DataLoader(traindata, batch_size=BATCH_SIZE, num_workers=0)
test_loader = torch.utils.data.DataLoader(testdata, batch_size=BATCH_SIZE, num_workers=0)
class Autoencoder(nn.Module):
    def __init__(self):
        super(Autoencoder, self).__init__()
        # encoder
        self.enc1 = nn.Linear(in_features=6000, out_features=256)
        self.enc2 = nn.Linear(in_features=256, out_features=128)
        self.enc3 = nn.Linear(in_features=128, out_features=64)
        self.enc4 = nn.Linear(in_features=64, out_features=32)
        self.enc5 = nn.Linear(in_features=32, out_features=16)
        # decoder 
        self.dec1 = nn.Linear(in_features=16, out_features=32)
        self.dec2 = nn.Linear(in_features=32, out_features=64)
        self.dec3 = nn.Linear(in_features=64, out_features=128)
        self.dec4 = nn.Linear(in_features=128, out_features=256)
        self.dec5 = nn.Linear(in_features=256, out_features=784)
    def forward(self, x):
        x = F.relu(self.enc1(x))
        x = F.relu(self.enc2(x))
        x = F.relu(self.enc3(x))
        x = F.relu(self.enc4(x))
        x = F.relu(self.enc5(x))
        x = F.relu(self.dec1(x))
        x = F.relu(self.dec2(x))
        x = F.relu(self.dec3(x))
        x = F.relu(self.dec4(x))
        x = F.relu(self.dec5(x))
        return x
net = Autoencoder()
print(net)

How do I have to set it up properly?

criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
def train(net, trainloader, NUM_EPOCHS):
    train_loss = []
    for epoch in range(NUM_EPOCHS):
        running_loss = 0.0
        for data in trainloader:
            img, _ = data
            img = img.to(device)
            img = img.view(img.size(0), -1)
            optimizer.zero_grad()
            outputs = net(img)
            loss = criterion(outputs, img)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        
        loss = running_loss / len(trainloader)
        train_loss.append(loss)
        print('Epoch {} of {}, Train Loss: {:.3f}'.format(
            epoch+1, NUM_EPOCHS, loss))
        if epoch % 5 == 0:
            save_decoded_image(outputs.cpu().data, epoch)
    return train_loss
# get the computation device
device = get_device()
#print(device)
# load the neural network onto the device
net.to(device)
make_dir()
# train the network
train_loss = train(net, trainloader, NUM_EPOCHS)
plt.figure()
plt.plot(train_loss)
plt.title('Train Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.savefig('deep_ae_fashionmnist_loss.png')
# test the network
test_image_reconstruction(net, testloader)

error: ValueError: too many values to unpack (expected 2)

Hi,

I suggest you take a look at the entire traceback error and find the final line of code where your model errors.

What this error signifies is that a function is returning 2 values and you have a different amount of variables accepting that output. This could possibly be in the data loader or model etc. I suggest you look at the entire traceback to localize where it is coming from.

Sarthak Jain