ValueError: Expected input batch_size (6) to match target batch_size (16).

import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
import itertools

device=torch.device("cuda" if torch.cuda.is_available() else "cpu")

"""
class TrainSet(Dataset):
    def __init__(self, data):
        # 定义好 image 的路径
        self.data1, self.data2, self.label = data[:, 0:512].float(), data[:, 512:768].float(), data[:, -1].long()

    def __getitem__(self, index):
        return self.data1[index], self.data2[index], self.label[index]

    def __len__(self):
        return len(self.data)
"""

class TrainSet(Dataset):
    def __init__(self, data):
        # 定义好 image 的路径
        self.data, self.label = data[:, :-1].float(),  data[:, -1].long()

    def __getitem__(self, index):
        return self.data[index], self.label[index]

    def __len__(self):
        return len(self.data)



df = pd.read_csv("C:\\科研\\人工喉EMG\\峰值窗口\\Process_A\\train.csv",header=None)

df= np.array(df)
df= torch.Tensor(df)

#print(df.shape)

"""
df_numpy_mean = np.mean(df[:-1])
df_numpy_std = np.std(df[:-1])
"""
#df_numpy = (df[:, :-1] - 4.77E+01) / 432.0403024
#df_numpy2 = df[:, -1]
#df_numpy2 = df_numpy2.unsqueeze(1)
#print(df_numpy.shape,df_numpy2.shape)
#df_numpy = torch.cat((df_numpy,df_numpy2), 1)

#df = torch.Tensor(df_numpy)
#print(df.shape)
#print(df)

tf = pd.read_csv("C:\\科研\\人工喉EMG\\峰值窗口\\Process_A\\test.csv", header=None)

tf= np.array(tf)
tf= torch.Tensor(tf)

"""
df_numpy_mean = np.mean(df[:-1])
df_numpy_std = np.std(df[:-1])
"""
#tf_numpy = (tf[:, :-1] - 4.77E+01) / 432.0403024
#tf_numpy2 = tf[:, -1]
#tf_numpy2 = tf_numpy2.unsqueeze(1)
#print(tf_numpy.shape,tf_numpy2.shape)
#tf_numpy = torch.cat((tf_numpy,tf_numpy2), 1)

#tf = torch.Tensor(tf_numpy)

#print(tf)
#print(tf.shape)


attack_types = ['0', '1', '2', '3', '4', '5']

trainset = TrainSet(df)
trainloader = DataLoader(trainset, batch_size=16, shuffle=True)

testset = TrainSet(tf)
testloader = DataLoader(testset, batch_size=16, shuffle=True)
#print(tf)

class myNet(nn.Module):
      def __init__(self):
          super(myNet,self).__init__()
          self.layer1 = nn.Sequential(
                      nn.BatchNorm1d(1),
                      nn.Conv1d(1,5,3,1,1),
                      nn.ReLU(),
                      nn.MaxPool1d(2),
                      nn.Dropout(0.5),
                      nn.BatchNorm1d(5),
                      nn.Conv1d(5,10,3,1,1),
                      nn.ReLU(),
                      nn.MaxPool1d(2),
                      nn.Dropout(0.5),
                      nn.BatchNorm1d(10),
                      nn.Conv1d(10,20,3,1,1),
                      nn.ReLU(),
                      nn.MaxPool1d(3),
                      nn.Dropout(0.5),
                      nn.BatchNorm1d(20),
                      nn.Conv1d(20,40,3,1,1),
                      nn.ReLU(),
                      nn.MaxPool1d(5),
                      nn.Dropout(0.5),
                      nn.BatchNorm1d(40),
                      nn.Conv1d(40,80,3,1,1),
                      nn.ReLU(),
                      nn.MaxPool1d(5),
                      nn.Dropout(0.5))
          self.layer2 = nn.Sequential(
                      nn.BatchNorm1d(1),
                      nn.Conv1d(1,10,3,1,1),
                      nn.ReLU(),
                      nn.MaxPool1d(2),
                      nn.Dropout(0.5),
                      nn.BatchNorm1d(10),
                      nn.Conv1d(10,20,3,1,1),
                      nn.ReLU(),
                      nn.MaxPool1d(3),
                      nn.Dropout(0.5),
                      nn.BatchNorm1d(20),
                      nn.Conv1d(20,80,3,1,1),
                      nn.ReLU(),
                      nn.MaxPool1d(5),
                      nn.Dropout(0.5))
          self.fc1 = nn.Sequential(
                     nn.BatchNorm1d(400),
                     nn.Linear(400, 200),
                     nn.Dropout(0.5))
          self.fc2 = nn.Sequential(
                     nn.BatchNorm1d(200),
                     nn.Linear(200, 50),
                     nn.Dropout(0.5))
          self.fc3 = nn.Sequential(
                     nn.BatchNorm1d(50),
                     nn.Linear(50, 14))
      def forward(self, x):
          print(x.shape)
          out = x.view(x.size(0), 1, -1)  # 保持单通道的维度
          out = self.layer1(out)
          out = out.view(x.size(0), -1)
          out = self.fc1(out)
          out = self.fc2(out)
          out = self.fc3(out)
          return out


def confusion_matrix(preds, labels, conf_matrix):
    for p, t in zip(preds, labels):
        conf_matrix[p, t] += 1
    return conf_matrix

def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
   if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
   else:
        print("Confusion matrix, without normalization")
   print(cm)
   plt.imshow(cm, interpolation='nearest', cmap=cmap)
   plt.title(title)
   plt.colorbar()
   tick_marks = np.arange(len(classes))
   plt.xticks(tick_marks, classes, rotation=90,fontsize=10)
   plt.yticks(tick_marks, classes,fontsize=10)
   plt.axis("equal")
   ax = plt.gca()  # 获得当前axis
   left, right = plt.xlim()  # 获得x轴最大最小值
   ax.spines['left'].set_position(('data', left))
   ax.spines['right'].set_position(('data', right))
   for edge_i in ['top', 'bottom', 'right', 'left']:
        ax.spines[edge_i].set_edgecolor("white")
#for first_index in range(len(classes)):
#       for second_index in range(len(classes)):
#           plt.text(first_index, second_index, conf_matrix[first_index][second_index])
   thresh = cm.max() / 2.
   for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        num = '{:.2f}'.format(cm[i, j]) if normalize else int(cm[i, j])
        plt.text(j, i, num,
                verticalalignment='center',
                horizontalalignment="center",
                color="white" if num > thresh else "black", fontsize=20)
   plt.tight_layout()
   plt.ylabel('True label',fontsize=10)
   plt.xlabel('Predicted label',fontsize=10)
   plt.show()

net=myNet()

#net=torch.load('smallerEEG5.pkl')

net=net.to(device)

s=[]
speed = 0.00005
loss_func = nn.CrossEntropyLoss()
optimzer = torch.optim.Adam(net.parameters(), lr=speed)
for epoch in range(20):
   train_loss = 0.
   train_acc  = 0.
   if epoch > 49:
       speed=0.00001
   if epoch > 499:
       speed=0.000005
   if epoch > 999:
       speed=0.000001
   for t, (x, y) in enumerate(trainloader):
#     print(x.shape[0])
#     print(x.shape)
     x, y=x.to(device),y.to(device)
     # out = net(x[: , 0:3000],x[: , 3000:3150])
     out = net(x[: , 0:1500])
#     print(out)
#     print(y)
#     print(t)
     loss = loss_func(out, y)  # 计算误差
     train_loss += loss.item()
     _, pred = torch.max(out.data, dim=1)
     train_correct = (pred == y).sum()
     train_acc += train_correct.item()
     optimzer.zero_grad()  # 清除梯度
     loss.backward()
     optimzer.step()
     if epoch % 2 == 1 and t==36:
         print('Epoch:{:.6f} ,Train Loss: {:.6f}, Acc: {:.6f}'.format(epoch+1, train_loss / (df.shape[0]), train_acc / (df.shape[0])))
         s.append(epoch + 1)
         s.append(train_loss / (df.shape[0]))
         s.append(train_acc / (df.shape[0]))
         np.savetxt('Trainloss.csv', s, delimiter=',')

net=net.to("cpu")
net.eval()
for epoch in range(1):
 eval_loss = 0.
 eval_acc = 0.
 Loss_list = []
 Accuracy_list = []
 conf_matrix = torch.zeros(6,6)
 for batch_x, batch_y in testloader:
   with torch.no_grad():
      out = net(x)
      loss = loss_func(out, batch_y)
      eval_loss += loss.item()
      _, pred = torch.max(out.data, dim=1)
      conf_matrix = confusion_matrix(pred, labels=batch_y, conf_matrix=conf_matrix)
      num_correct = (pred == batch_y).sum()
      eval_acc += num_correct.item()
      print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (tf.shape[0]), eval_acc / (tf.shape[0])))
plot_confusion_matrix(conf_matrix.numpy(), classes=attack_types, normalize=False, title='Normalized confusion matrix')
np.savetxt('CM.csv', conf_matrix, delimiter=',')

torch.save(net, 'CATAT.pkl')

There is an error in my code, with a value error Expected input batch_size (6) to match target batch_size (16).

Since you are specifying batch_size=16 it seems the input tensor in the failing operation has a wrong shape. I don’t see anything obviously wrong in your code so could you post the stacktrace and add missing parts to your code to make it executable so we could reproduce the issue?