Raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size)) ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 256, 1, 1])

Why I got this error? my image and label are the same size(166*190 24bit rgb)
RuntimeError: stack expects each tensor to be equal size, but got [3, 185, 204] at entry 0 and [3, 190, 166] at entry 1

The following is the resize:where is wrong?
class Resize(object):

"""Resize image and/or masks."""

def __init__(self, imageresize, maskresize):

    self.imageresize = imageresize

    self.maskresize = maskresize

def __call__(self, sample):

    image, mask = sample['image'], sample['mask']

    if len(image.shape) == 3:

        image = image.transpose(1, 2, 0)

    if len(mask.shape) == 3:

        mask = mask.transpose(1, 2, 0)

    mask = cv2.resize(mask, self.maskresize, cv2.INTER_AREA)

    image = cv2.resize(image, self.imageresize, cv2.INTER_AREA)

    if len(image.shape) == 3:

        image = image.transpose(2, 0, 1)

    if len(mask.shape) == 3:

        mask = mask.transpose(2, 0, 1)

    return {'image': image,

            'mask': mask}

class ToTensor(object):

"""Convert ndarrays in sample to Tensors."""

def __call__(self, sample, maskresize=None, imageresize=None):

    image, mask = sample['image'], sample['mask']

    if len(mask.shape) == 2:

        mask = mask.reshape((1,)+mask.shape)

    if len(image.shape) == 2:

        image = image.reshape((1,)+image.shape)

    return {'image': torch.from_numpy(image),

            'mask': torch.from_numpy(mask)}

class Normalize(object):

'''Normalize image'''

def __call__(self, sample):

    image, mask = sample['image'], sample['mask']

    return {'image': image.type(torch.FloatTensor)/255,

            'mask': mask.type(torch.FloatTensor)/255}

This is probably due to BatchNorm. You cannot use BatchNorm with batch_size=1. You should use InstanceNorm instead.

Thanks, I made the change, but get this error:
RuntimeError: stack expects each tensor to be equal size, but got [3, 530, 500] at entry 0 and [3, 500, 530] at entry 3
image(32bit) and label (4bit) , the same size(500*530), is there anything wrong with my transpose process?

class Resize(object):
“”“Resize image and/or masks.”""

def __init__(self, imageresize, maskresize):
    self.imageresize = imageresize
    self.maskresize = maskresize

def __call__(self, sample):
    image, mask = sample['image'], sample['mask']
    if len(image.shape) == 3:
        image = image.transpose(1, 2, 0)
    if len(mask.shape) == 3:
        mask = mask.transpose(1, 2, 0)
    mask = cv2.resize(mask, self.maskresize, cv2.INTER_AREA)
    image = cv2.resize(image, self.imageresize, cv2.INTER_AREA)
    if len(image.shape) == 3:
        image = image.transpose(2, 0, 1)
    if len(mask.shape) == 3:
        mask = mask.transpose(2, 0, 1)

    return {'image': image,
            'mask': mask}

You probably have some image where the length condition is failing. This is a general error and you have to debug where the image is not getting transposed.

I am confusing…can you list some debug code for me?

Just use print statements in the resize and check manually. The resize method looks correct, so there must be something wrong elsewhere (maybe image loading)

Thanks a lot, I can not use the tensor in pytorch well, I will try it more.

1 Like