RuntimeError: shape '[-1, 400]' is invalid for input of size

In order to classify images with pytorch, I modified my local data while using ImageFolder based on the following URL
https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
However,”RuntimeError: shape ‘[-1, 400]’ is invalid for input of size” is displayed and I do not know the cause.

I asked you because I do not know how to fix it,please.

import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F

import torch.optim as optim
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

data_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
] )
#rootはローカルのアドレスがあると考えてください
trainset = torchvision.datasets.ImageFolder(root=‘my local dataset’, transform=data_transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=False, num_workers=2)
testset = torchvision.datasets.ImageFolder(root=‘my local dataset’, transform=data_transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = (‘1’, ‘2’, ‘3’, ‘4’,
‘5’, ‘6’, ‘7’, ‘8’, ‘9’)
class Net(nn.Module):
def init(self):
super(Net, self).init()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool(F.relu(self.conv1(x)))
    x = self.pool(F.relu(self.conv2(x)))
    x = x.view(-1, 16 * 5 * 5)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

net = Net()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(2): # loop over the dataset multiple times

running_loss = 0.0
for i, data in enumerate(trainloader, 0):
    # get the inputs
    inputs, labels = data

    # zero the parameter gradients
    optimizer.zero_grad()

    # forward + backward + optimize
    outputs = net(inputs)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()

    # print statistics
    running_loss += loss.item()
    if i % 2000 == 1999:    # print every 2000 mini-batches
        print('[%d, %5d] loss: %.3f' %
              (epoch + 1, i + 1, running_loss / 2000))
        running_loss = 0.0

print(‘Finished Training’)


RuntimeError Traceback (most recent call last)
in ()
11
12 # forward + backward + optimize
—> 13 outputs = net(inputs)
14 loss = criterion(outputs, labels)
15

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
→ 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)

in forward(self, x)
14 x = self.pool(F.relu(self.conv2(x)))
15 x = x.contiguous()
—> 16 x = x.view(-1, 16 * 5 * 5)
17 x = F.relu(self.fc1(x))
18 x = F.relu(self.fc2(x))

RuntimeError: shape ‘[-1, 400]’ is invalid for input of size 87616

The view operation which should flatten x is throwing this error, since the size of 16*5*5 is not matching your activation size.
In the tutorial the CIFAR10 dataset is used, which has a spatial resolution of 32x32.
Most likely your custom dataset has a different spatial size, such that the view is failing.
Based on the shape given in the error message, it looks like your activation should have the shape [batch_size=4, channels=16, height=37, width=37], which results in 87616 values.
Try to change the input size if your linear layer to 16*37*37 and the flattening to:

x = x.view(x.size(0), 16*37*37)
6 Likes

Thank you for your kindness. I tried to change “x = x.view(x.size(0), 163737)”.
But I have a new error,“RuntimeError: size mismatch, m1: [4 x 21904], m2: [4000 x 120] at /Users/administrator/nightlies/pytorch-1.0.0/wheel_build_dirs/conda_3.6/conda/conda-bld/pytorch_1544137972173/work/aten/src/TH/generic/THTensorMath.cpp:940”
I tried to search the answer.but I have no idea.
I predict my black and white images to be the cause.

When I changed self.fc1 = nn.Linear(16 * 37* 37, 120), Error is gone.
Thank you,so much!!!

Many Thanks for the help. I was working for grayscale image classifier for 64*64 images.

RuntimeError: shape ‘[-1, 400]’ is invalid for input of size 10816
I had this error, I knew it was issue with my net flattening, but didn’t knew what to change. Read your post and solved the issue with 90% accuracy for my classifier.
4- Batch size
13 = squareRoot(10816/4/16)

I had to set x = x.view(4, 16 * 13 * 13).

1 Like

Having this error

RuntimeError                              Traceback (most recent call last)
<ipython-input-27-8c2bbe9e4336> in <module>
      1 #Fit the model
----> 2 learn.fit_one_cycle(1,slice(lr))#5

/opt/conda/lib/python3.6/site-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, final_div, wd, callbacks, tot_epochs, start_epoch)
     20     callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor, pct_start=pct_start,
     21                                        final_div=final_div, tot_epochs=tot_epochs, start_epoch=start_epoch))
---> 22     learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
     23 
     24 def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, wd:float=None):

/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
    200         callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
    201         self.cb_fns_registered = True
--> 202         fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
    203 
    204     def create_opt(self, lr:Floats, wd:Floats=0.)->None:

/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
    104             if not cb_handler.skip_validate and not learn.data.empty_val:
    105                 val_loss = validate(learn.model, learn.data.valid_dl, loss_func=learn.loss_func,
--> 106                                        cb_handler=cb_handler, pbar=pbar)
    107             else: val_loss=None
    108             if cb_handler.on_epoch_end(val_loss): break

/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in validate(model, dl, loss_func, cb_handler, pbar, average, n_batch)
     61             if not is_listy(yb): yb = [yb]
     62             nums.append(first_el(yb).shape[0])
---> 63             if cb_handler and cb_handler.on_batch_end(val_losses[-1]): break
     64             if n_batch and (len(nums)>=n_batch): break
     65         nums = np.array(nums, dtype=np.float32)

/opt/conda/lib/python3.6/site-packages/fastai/callback.py in on_batch_end(self, loss)
    306         "Handle end of processing one batch with `loss`."
    307         self.state_dict['last_loss'] = loss
--> 308         self('batch_end', call_mets = not self.state_dict['train'])
    309         if self.state_dict['train']:
    310             self.state_dict['iteration'] += 1

/opt/conda/lib/python3.6/site-packages/fastai/callback.py in __call__(self, cb_name, call_mets, **kwargs)
    248         "Call through to all of the `CallbakHandler` functions."
    249         if call_mets:
--> 250             for met in self.metrics: self._call_and_update(met, cb_name, **kwargs)
    251         for cb in self.callbacks: self._call_and_update(cb, cb_name, **kwargs)
    252 

/opt/conda/lib/python3.6/site-packages/fastai/callback.py in _call_and_update(self, cb, cb_name, **kwargs)
    239     def _call_and_update(self, cb, cb_name, **kwargs)->None:
    240         "Call `cb_name` on `cb` and update the inner state."
--> 241         new = ifnone(getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs), dict())
    242         for k,v in new.items():
    243             if k not in self.state_dict:

/opt/conda/lib/python3.6/site-packages/fastai/callback.py in on_batch_end(self, last_output, last_target, **kwargs)
    342         if not is_listy(last_target): last_target=[last_target]
    343         self.count += first_el(last_target).size(0)
--> 344         val = self.func(last_output, *last_target)
    345         if self.world:
    346             val = val.clone()

<ipython-input-25-f37731fdae06> in accuracy_1(input, targs)
      5     targs = targs.view(-1).long()
      6     n = targs.shape[0]
----> 7     input = input.argmax(dim=-1).view(n,-1)
      8     targs = targs.view(n,-1)
      9     return (input==targs).float().mean()

RuntimeError: shape '[70912, -1]' is invalid for input of size 64

I just want to add on that I, too, had the same problem and this solved it.

My input images were of size 224*224 (1 channel, grayscale).
My batch size was chosen to be 32.

So for x = x.view(-1, channels*height*width) (note -1 since I’m flattening), I had to do:

height = width = sqrt( E / 32 / 16 ) where E comes from:
“RuntimeError: shape ‘[-1, 400]’ is invalid for input of size E”

and channels is the number of filter channels from the previous convolution layer.

I guess that the formula here is:

height = width = sqrt( E / batch_size / n_channels)

Hi! When I first met this same problem,I fixed it accordding to the below reply. But When I run the program , after it finishe one epoch, the same error appear again and I don’t know the cause, and what I should to do. I askes you, please.the program is below:

from future import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
device = torch.device(“cuda” if torch.cuda.is_available() else “cpu”)

def loadtraindata():
“”“加载数据”""
path = r"D:/MNIST_test/train"
trainset = torchvision.datasets.ImageFolder(path,
transform=transforms.Compose([transforms.Resize((32, 32)), # 将图片缩放到指定大小
transforms.CenterCrop(32), # 随机裁剪
transforms.ToTensor(), # 数据类型的转换
transforms.Normalize((0.1307,), (0.3081,))
]))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=0)
return trainloader

class Net(nn.Module):
“”“搭建网络”""
def init(self):
super(Net, self).init()
# self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
# self.fc1 = nn.Linear(320, 50)
self.fc1 = nn.Linear(500, 32)
# self.fc2 = nn.Linear(50, 10)
self.fc2 = nn.Linear(32, 10)

    # Spatial transformer localization-network
    self.localization = nn.Sequential(
        # nn.Conv2d(1, 8, kernel_size=7),
        nn.Conv2d(3, 8, kernel_size=7),
        nn.MaxPool2d(2, stride=2),
        nn.ReLU(True),
        nn.Conv2d(8, 10, kernel_size=5),
        nn.MaxPool2d(2, stride=2),
        nn.ReLU(True)
    )

    # Regressor for the 3 * 2 affine matrix
    self.fc_loc = nn.Sequential(
        # nn.Linear(10 * 3 * 3, 32),
        nn.Linear(10 * 4 * 4, 32),
        nn.ReLU(True),
        nn.Linear(32, 3 * 2)
    )

    # Initialize the weights/bias with identity transformation
    self.fc_loc[2].weight.data.zero_()
    self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))

# Spatial transformer network forward function
def stn(self, x):
    xs = self.localization(x)
    # print(xs.size(0))  # 4
    # xs = xs.view(-1, 10* 3* 3)
    xs = xs.view(xs.size(0), 10 * 4 * 4)  # 64*3*3*10-> 64*90
    theta = self.fc_loc(xs)  # 64*6
    theta = theta.view(-1, 2, 3)  # reshape 64*2*3 Transform matrix

    grid = F.affine_grid(theta, x.size())
    x = F.grid_sample(x, grid)

    return x

def forward(self, x):
    # transform the input
    x = self.stn(x)

    # Perform the usual forward pass
    x = F.relu(F.max_pool2d(self.conv1(x), 2))
    x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
    # print(x.size(0))  # 4
    x = x.view(4, 500)
    print("OK")
    x = F.relu(self.fc1(x))
    print(1)
    x = F.dropout(x, training=self.training)
    print(2)
    x = self.fc2(x)
    print(3)
    return F.log_softmax(x, dim=1)

model = Net().to(device)
print(4)

classes = (‘0’, ‘1’, ‘2’, ‘3’, ‘4’, ‘5’, ‘6’, ‘7’, ‘8’, ‘9’)

def train(epoch):
“”“训练模型”""
optimizer = optim.SGD(model.parameters(), lr=0.01)
train_loader = loadtraindata()
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)

    optimizer.zero_grad()
    output = model(data)
    loss = F.nll_loss(output, target)
    loss.backward()
    optimizer.step()
    if batch_idx % 500 == 0:
        print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
            epoch, batch_idx * len(data), len(train_loader.dataset),
            100. * batch_idx / len(train_loader), loss.item()))

print('Finished Training')
torch.save(model, 'model.pkl')  # 保存整个神经网络的结构和模型参数
torch.save(model.state_dict(), 'net_params.pkl')  # 只保存神经网络的模型参数

for epoch in range(1, 14):
train(epoch

def _compute_unrolled_model(self, input_source,input_target, domain_label_source,domain_label_target, eta, network_optimizer):

loss_source = self.model._loss(input_source, domain_label_source)

loss_target = self.model._loss(input_target,domain_label_target)

loss=loss_source+loss_target

theta = _concat(self.model.parameters()).data

try:

  moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).mul_(self.network_momentum)

except:

  moment = torch.zeros_like(theta)

dtheta = _concat(torch.autograd.grad(loss, self.model.parameters(),retain_graph=True)).data + self.network_weight_decay*theta

unrolled_model = self._construct_model_from_theta(theta.sub(eta, moment+dtheta))

return unrolled_model

def _construct_model_from_theta(self, theta):

model_new = self.model.new()

model_dict = self.model.state_dict()

params, offset = {}, 0

for k, v in self.model.named_parameters():

  v_length = np.prod(v.size())

  params[k] = theta[offset: offset+v_length].view(v.size())

  offset += v_length

assert offset == len(theta)

model_dict.update(params)

model_new.load_state_dict(model_dict)

return model_new.cuda()

File “/voyager-volume/code_1_test/code_1_test/code_1_test/code_1_test/code_1_test/architect.py”, line 158, in _construct_model_from_theta
params[k] = theta[offset: offset+v_length].view(v.size())
RuntimeError: shape ‘[48]’ is invalid for input of size 0
@ptrblck ,could you tell why I am facing this issue

It seems taht the offset and v_length calculation combined with the slicing of theta is wrong.
As you can see here:

torch.randn([10])[10:20].view(48)
> RuntimeError: shape '[48]' is invalid for input of size 0

you are most likely creating an empty tensor in theta[offset: offset+v_length] while the view operation expects to be used on 48 values.

1 Like