ValueError: Expected input batch_size (16) to match target batch_size (8)

def main():
    # 3 定义超参数
    BATCH_SIZE = 8#每批处理数量
    DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    EPOCH = 10
    # 4 图片转换
    data_transforms = {
        'train':
            transforms.Compose([
            transforms.Resize(224),
            transforms.RandomResizedCrop(300),
            transforms.RandomHorizontalFlip(),
            transforms.CenterCrop(256),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406],
                                 [0.229, 0.224, 0.225])
        ]),
        'val':
        transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(256),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406],
                                 [0.229, 0.224, 0.225])
        ])

    }
    # 5 操作数据集
    # 5.1 数据集路径
    data_path = 'E:\myjupyter\chest_xray'
    # 5.2 加载数据集
    image_datasets = {x:datasets.ImageFolder(os.path.join(data_path,x),
                                             data_transforms[x]) for x in ['train','val']}
    # 5.3 为数据集创建一个迭代器,读取数据
    data_loaders = {x:DataLoader(image_datasets[x],shuffle=True,
                                 batch_size=BATCH_SIZE) for x in ['train','val']}
    # 5.4 训练集和测试集的大小(图片的数量)
    data_size = {x:len(image_datasets[x]) for x in ['train','val']}

    # 5.5 获取标签的类别名称 NORMAL---PNEUMONIA
    target_names = image_datasets['train'].classes

    # 6 显示一个batch_size的图片(8张图片)
    # 6.1 读取8张图片
    datas,targets = next(iter(data_loaders['train']))

    # 6.2 将若干张图片拼成一个图像
    out = make_grid(datas,nrow=4,padding=10)
    # 6.3 显示图片
    #image_show(out,title=[target_names[x] for x in targets])

    #
    model = get_model().to(DEVICE)
    optimizer = optim.Adam(model.parameters())
    criterion = nn.CrossEntropyLoss()

    train(model,DEVICE,data_loaders['train'],criterion,optimizer,EPOCH)
    test(model,DEVICE,data_loaders['val'],criterion,EPOCH)
def get_model():
    model_pre = models.resnet50(pretrained=True)#获取预训练模型
    #冻结预训练模型中所有的参数
    for param in model_pre.parameters():
        param.requires_grad = False
    #微调模型:替换ResNet最后的两层网络,返回一个新的模型
    model_pre.avgpool = AdaptiveConcatPool2d() #池化层替换
    model_pre.fc = nn.Sequential(
        nn.Flatten(),#所有维度拉平
        nn.BatchNorm1d(2048),# 256*6*6 --> 4096
        nn.Dropout(0.5),#丢掉一些神经元
        nn.Linear(2048,512),#线性层的处理
        nn.ReLU(),#激活函数
        nn.BatchNorm1d(512),#正则化
        nn.Linear(512,2),
        nn.LogSoftmax(dim=1),#损失函数
    )
    return model_pre
def train(model,device,train_loader,criterion,optimizer,epoch,writer=None):
    model.train()
    total_loss = 0#总损失值,初始化为0
    #循环读取训练数据集,更新模型参数
    for batch_id,(data,target) in enumerate(train_loader):
        data,target = data.to(device),target.to(device)
        optimizer.zero_grad()#梯度初始化为0
        output = model(data)#训练后的输出
        loss = criterion(output,target)#计算损失
        loss.backward()#反向传播
        optimizer.step()#更新参数
        total_loss += loss#累计训练误差
    writer.add_scaler('Train Loss',total_loss/len(train_loader),epoch)
    writer.flush()#刷新
    return total_loss/len(train_loader)#返回平均损失值
class AdaptiveConcatPool2d(nn.Module):
    def __init__(self,size=None):
        super().__init__()
        size = size or (1,1)#池化层的卷积核大小,默认为(1,1)
        self.pool_one = nn.AdaptiveAvgPool2d(size)#池化层1
        self.pool_two = nn.AdaptiveAvgPool2d(size)#池化层2

    def forward(self,x):
        return torch.cat([self.pool_one(x),self.pool_two(x)])#连接两个池化层

When I run the code,I got “ValueError: Expected input batch_size (16) to match target batch_size (8).
And pycharm told me that the error happens in " loss = criterion(output,target)#计算损失" in function “train
So I used a breakpoint at there,and I got this:

The output is a tensor 16*2 and the target is 8,But I performed the DataLoader that batch_size=8,so I don’t know how to solve this.
Thanks to any suggestions:)

I have found the solution.

    def forward(self,x):
        return torch.cat([self.pool_one(x),self.pool_two(x)])

I didn’t use the dim,so the cat function default to be 0,that makes the problem.
the correct code is

    def forward(self,x):
        return torch.cat([self.pool_one(x),self.pool_two(x)],dim=1)

sorry to mind you.