ValueError:Expected input batch_size(3) to match target' batch_size(9)

my code as:
def CrossEntropyLoss(self, logit, target):
n, c, h, w = logit.size()
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index,
size_average=self.size_average)
if self.cuda:
criterion = criterion.cuda()
target=target.reshape(-1,513,513)
loss = criterion(logit, target.long())

    if self.batch_average:
        loss /= n

    return loss

def training(self, epoch):
train_loss = 0.0
self.model.train()
tbar = tqdm(self.train_loader)
num_img_tr = len(self.train_loader)
for i, sample in enumerate(tbar):
image, target = sample[‘image’], sample[‘label’]
if self.config[‘network’][‘use_cuda’]:
image, target = image.cuda(), target.cuda()
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
output = self.model(image)
loss = self.criterion(output, target) #
loss.backward()
self.optimizer.step()
train_loss += loss.item()
tbar.set_description(‘Train loss: %.3f’ % (train_loss / (i + 1)))
self.writer.add_scalar(‘train/total_loss_iter’, loss.item(), i + num_img_tr * epoch)

        # Show 10 * 3 inference results each epoch
        if i % (num_img_tr // 10) == 0:
            global_step = i + num_img_tr * epoch
            self.summary.visualize_image(self.writer, self.config['dataset']['dataset_name'], image, target, output, global_step)

    self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)
    print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.config['training']['batch_size'] + image.data.shape[0]))
    print('Loss: %.3f' % train_loss)

    #save last checkpoint
    self.saver.save_checkpoint({
        'epoch': epoch + 1,

‘state_dict’: self.model.module.state_dict(),

        'state_dict': self.model.state_dict(),
        'optimizer': self.optimizer.state_dict(),
        'best_pred': self.best_pred,
    }, is_best = False, filename='checkpoint_last.pth.tar')

    #if training on a subset reshuffle the data 
    if self.config['training']['train_on_subset']['enabled']:
        self.train_loader.dataset.shuffle_dataset()

Could you print the shape of target before the reshape operation as well as the output for the failing batch?

yep,before reshape:torchsize[3,21,513,513],output:torchsize[3,21,513,513]

In this case, target = target.reshape(-1,513,513) will create a target tensor of [63, 513, 513], which won’t match the batch size of the output of your model.

Is your target one-hot encoded?
If so, create class indices via target = torch.argmax(target, dim=1) and rerun your code.

i did,but RuntimeError:input and target batch or spatial batch sizes dont match:target[3×513×3],input[3×21×513×513] at /pytorch/aten/src/THCUNN/generic/SpaticialClassNLLcriterion.cu:23

If you call target = torch.argmax(target, dim=1) on a target tensor of the shape [3, 21, 513, 513], the result will have a shape of [3, 513, 513].
I’m not sure, why your target has now the shape [3, 513, 3].

Could you recheck the shape of the target before applying the torch.argmax operation, please?

new error:
NotImplementedError

trainer.py
if i % (num_img_tr // 10) == 0:
global_step = i + num_img_tr * epoch
self.summary.visualize_image(self.writer, self.config[‘dataset’][‘dataset_name’], image, target, output, global_step)

summaries.py
def visualize_image(self, writer, dataset, image, target, output, global_step):
grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
writer.add_image(‘Image’, grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
** dataset=dataset), 3, normalize=False, range=(0, 255))**
** writer.add_image(‘Predicted label’, grid_image, global_step)**
grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image(‘Groundtruth label’, grid_image, global_step)

datagen_utils.py
def decode_segmap(label_mask, dataset, plot=False):
“”“Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
“””
if dataset == ‘pascal’ or dataset == ‘coco’:
n_classes = 21
label_colours = get_pascal_labels()
elif dataset == ‘cityscapes’:
n_classes = 19
label_colours = get_cityscapes_labels()

elif dataset == 'deepfashion':
    n_classes = 13
    label_colours = get_deepfashion_labels()

elif dataset == 'braintumor':
    n_classes = 3
    label_colours = get_braintumor_labels()


**else:**

** raise NotImplementedError**

It seems that your specified dataset is missing in the visualize_image method and you might need to write the function manually.