TypeError: conv2d(): argument 'input' (position 1) must be Tensor, not list

hello. please, could anyone help check how i remove this error? (this error occured in test phase )

‘’'import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

#%%

#Converting data to torch.FloatTensor
transform = transforms.ToTensor()

Download the training and test datasets

train_data = datasets.MNIST(root=‘data’, train=True, download=True, transform=transform)

test_data = datasets.MNIST(root=‘data’, train=False, download=True, transform=transform)

train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, num_workers=0)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, num_workers=0)

#train_loader=torch.to

#%%

#Define the Convolutional Autoencoder
class ConvAutoencoder(nn.Module):
def init(self):
super(ConvAutoencoder, self).init()

    #Encoder
    self.conv1 = nn.Conv2d(1, 16, 3, stride=2, padding=1)
    self.conv2 = nn.Conv2d(16, 8, 3, stride=2, padding=1)
    self.conv3 = nn.Conv2d(8,8,3)

    #Decoder
    self.conv4 = nn.ConvTranspose2d(8, 8, 3)
    self.conv5 = nn.ConvTranspose2d(8, 16, 3, stride=2, padding=1, output_padding=1)
    self.conv6 = nn.ConvTranspose2d(16, 1, 3, stride=2, padding=1, output_padding=1)

def forward(self, x):
    x = F.relu(self.conv1(x))      
    x = F.relu(self.conv2(x))
    x = F.relu(self.conv3(x))  
    #mask = torch.cat([torch.ones([8,8,2,3]), torch.zeros([8,8,1,3])], 2)
    #x=torch.multiply(x,mask)
   # x[7,7,:,:]=x[6,6,:,:]
    x = F.relu(self.conv4(x))
    x = F.relu(self.conv5(x))
    x = F.relu(self.conv6(x))

    return x

#Instantiate the model
model = ConvAutoencoder()
print(model)

#%%
def train(model, num_epochs=20, batch_size=64, learning_rate=1e-3):
torch.manual_seed(42)
criterion = nn.MSELoss() # mean square error loss
optimizer = torch.optim.Adam(model.parameters(),
lr=learning_rate,
weight_decay=1e-5) # ←

outputs = []
for epoch in range(num_epochs):
    for data in train_loader:
        img, _ = data
        recon = model(img)
        loss = criterion(recon, img)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

    print('Epoch:{}, Loss:{:.4f}'.format(epoch+1, float(loss)))
    outputs.append((epoch, img, recon),)
return outputs

#%%

#test_image = test_loader.open(test_image_name).convert(‘RGB’)

model = ConvAutoencoder()
max_epochs =20
outputs = train(model, num_epochs=max_epochs)
#%%

for k in range(0, max_epochs, 9):
plt.figure(figsize=(9, 2))
imgs = outputs[k][1].detach().numpy()
recon = outputs[k][2].detach().numpy()
for i, item in enumerate(imgs):
if i >= 9: break
plt.subplot(2, 9, i+1)
plt.imshow(item[0])

for i, item in enumerate(recon):
    if i >= 9: break
    plt.subplot(2, 9, 9+i+1)
    plt.imshow(item[0])
    
    #%%

a=(ConvAutoencoder().conv3.weight)
print("conv1 filters: ",a.data.size())

#%%
#test phase

with torch.no_grad():
for data in test_loader:

     data = torch.tensor(data)                                
     output = model(data)  '''

Based on the error message you are passing a list to the model while a tensor is expected.
You could check the type of the input via print(type(data)) and make sure that it’s a tensor.

PS: you can post code snippets by wrapping them into three backticks ```, which makes debugging easier.

1 Like

its my code. by your guiding, i notice the data is list data.i want to shift one hidden layer in column and row feature map to show changes of outputs.

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

#%%

#Converting data to torch.FloatTensor
transform = transforms.ToTensor()


# Download the training and test datasets
train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform)

test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform)

train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, num_workers=0)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, num_workers=0)

#train_loader=torch.to

#%%

#Define the Convolutional Autoencoder
class ConvAutoencoder(nn.Module):
    def __init__(self):
        super(ConvAutoencoder, self).__init__()
       
        #Encoder
        self.conv1 = nn.Conv2d(1, 16, 3, stride=2, padding=1)
        self.conv2 = nn.Conv2d(16, 8, 3, stride=2, padding=1)
        self.conv3 = nn.Conv2d(8,8,3)
    
        #Decoder
        self.conv4 = nn.ConvTranspose2d(8, 8, 3)
        self.conv5 = nn.ConvTranspose2d(8, 16, 3, stride=2, padding=1, output_padding=1)
        self.conv6 = nn.ConvTranspose2d(16, 1, 3, stride=2, padding=1, output_padding=1)

    def forward(self, x):
        x = F.relu(self.conv1(x))      
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))  
        #mask = torch.cat([torch.ones([8,8,2,3]), torch.zeros([8,8,1,3])], 2)
        #x=torch.multiply(x,mask)
       # x[7,7,:,:]=x[6,6,:,:]
        x = F.relu(self.conv4(x))
        x = F.relu(self.conv5(x))
        x = F.relu(self.conv6(x))

        return x

#Instantiate the model
model = ConvAutoencoder()
print(model)

#%%
def train(model, num_epochs=20, batch_size=64, learning_rate=1e-3):
    torch.manual_seed(42)
    criterion = nn.MSELoss() # mean square error loss
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learning_rate, 
                                 weight_decay=1e-5) # <--
   # train_loader =train_loader;

    outputs = []
    for epoch in range(num_epochs):
        for data in train_loader:
            img, _ = data
            recon = model(img)
            loss = criterion(recon, img)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

        print('Epoch:{}, Loss:{:.4f}'.format(epoch+1, float(loss)))
        outputs.append((epoch, img, recon),)
    return outputs
#%%

#test_image = test_loader.open(test_image_name).convert('RGB')

model =  ConvAutoencoder()
max_epochs =20
outputs = train(model, num_epochs=max_epochs)
#%%

for k in range(0, max_epochs, 9):
    plt.figure(figsize=(9, 2))
    imgs = outputs[k][1].detach().numpy()
    recon = outputs[k][2].detach().numpy()
    for i, item in enumerate(imgs):
        if i >= 9: break
        plt.subplot(2, 9, i+1)
        plt.imshow(item[0])
        
    for i, item in enumerate(recon):
        if i >= 9: break
        plt.subplot(2, 9, 9+i+1)
        plt.imshow(item[0])
        
        #%%
   
        
a=(ConvAutoencoder().conv3.weight)
print("conv1 filters: ",a.data.size())


#%%
#test phase        

with torch.no_grad():                                                                     #3
    for data in test_loader:    

         data = torch.tensor(data[0])                                
         output = model(data)     

    plt.imshow(output)

Your code doesn’t seem to raise this error, but the last line of code is failing because the shape of the numpy array to plt.imshow is wrong:

plt.imshow(output)
> TypeError: Invalid shape (16, 1, 28, 28) for image data

plt.imshow(output[0, 0]) # works

So I’m still unsure what raises the error.

1 Like

thanks a lot. your guiding is so helpful.