where is the failture

import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.utils import data
from torchvision import transforms
from numpy import array
from sklearn . preprocessing import LabelEncoder
from sklearn . preprocessing import OneHotEncoder
import time
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import string
batch_size=64

step=40
path1=‘E:\31data\data’
path=r’E:\31data\data’
pathlist=os.listdir(path)
print(np.load(os.path.join(path,pathlist[0])).shape)
class Dataset(data.Dataset):
def init(self,root_dir):
self.root=root_dir
self.root_dir=os.listdir(self.root)
def getitem(self, item):
sample1=[]
path_status=os.path.join(self.root,self.root_dir[item])
Data=np.load(path_status)
for index in range(0,Data.shape[0],9116):
sample_single=Data[0+index:243+index]
sample1.append(sample_single)
path = r’E:\31data\data’
samples = os.listdir(path)
characters = string.printable # 表示所有可打印的 ASCII 字符
token_index = dict(zip(characters, range(1, len(characters) + 1))) # 将所有可打印的 ASCII 字符和数字组成一个词典用于后面查询
max_length = 243
results = np.zeros((64, max_length, max(token_index.values()) - 69))
for i, sample in enumerate(samples):
for j, character in enumerate(sample):
index = token_index.get(31) # 得到字母所对应的数字索引
results[i, j, index] = 1
results=torch.tensor(results)
Data_tensor=torch.tensor(sample1)
return Data_tensor,results
def len(self):
return len(self.root_dir)
Mydataset=Dataset(path)
traindata=np.stack([Mydataset[i][0] for i in range(31)])
traindata=np.reshape(traindata,(-1,243,58))
print(traindata.shape)
traindata=torch.tensor(traindata)
traindata=traindata.permute(0,2,1)
print(traindata.shape)
labeldata=np.stack([Mydataset[i][1] for i in range(31)])
labeldata=torch.tensor(labeldata)
print(labeldata.shape)
labeldata=np.reshape(labeldata,(-1,243,31))
trainset=list(zip(traindata,labeldata))

class Model(torch.nn.Module):
class Model(torch.nn.Module):
def init(self):
super(Model, self).init()
self.conv = torch.nn.Sequential( # output=(input+2*padding-kernel_size)/stride+1
torch.nn.Conv1d(in_channels=58, out_channels=128, kernel_size=5, stride=3, padding=1),
torch.nn.ReLU(), # input(64,58,243)-(64,128,81)
torch.nn.MaxPool1d(stride=2, kernel_size=2), # (64,128,40)

            torch.nn.Conv1d(in_channels=128, out_channels=64, kernel_size=5, stride=3, padding=1),  # (128,64,13)
            torch.nn.ReLU(),
            torch.nn.MaxPool1d(stride=2, kernel_size=2),  # (64,64,6)

            torch.nn.Conv1d(in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1),  # (64,32,3)
            torch.nn.ReLU(),
            torch.nn.MaxPool1d(stride=2, kernel_size=2),  # (64,32,1)
        )
        self.fc = torch.nn.Linear(32, 31)

    def forward(self, x):
        x = self.conv(x)
        x = x.view(x.size()[0], -1)
        x = self.fc(x)
        y_hat = torch.nn.functional.softmax(x, dim=1)
        return y_hat

device = torch.device(“cuda” if torch.cuda.is_available() else “cpu”)

超参数

batch_size = 64
num_epochs = 10
learning_rate = 5e-5
device = torch.device(‘cuda’ if torch.cuda.is_available() else ‘cpu’)

获取数据集

定义模型

train_model = Model().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(train_model.parameters(), lr=learning_rate)

train_size = int(len(trainset) * 0.8) #len(train_dataset)=9372

print(train_size) #7497

valid_size = int(len(trainset)) - train_size

train_dataset, valid_dataset = torch.utils.data.random_split(trainset, [train_size, valid_size])

train_loader = DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True, drop_last=True)
print(len(train_loader))

valid_loader = DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=True, drop_last=True)

def test():

train_model.load_state_dict(torch.load(‘E:/cwru/pytorch-cnn-cwru-master/data/raw/data.npy’))

# torch.save(net, ‘data/best_model.pt’)

train_model.eval()

valid_acc = get_valid_acc(train_model)

print(valid_acc)

def get_valid_acc(valid_model):

valid_model.eval()

li = list()

for x_valid, y_valid in valid_loader:

x_valid = x_valid.to(device)

y_valid = y_valid.to(device)

y_hat = valid_model(torch.tensor(x_valid, dtype=torch.float64))

y_hat = torch.argmax(y_hat, 1)

y = torch.argmax(y_valid, 1)

res = y_hat == y

res2 = res.int()

acc = res2.sum().item() / batch_size

li.append(acc)

valid_model.train()

return sum(li) / len(li)

def train():
total_step = len(train_loader) #len(train_loader)=58 len(valid_loader)=14
acc_valid_old = 0
a = time.time()
for epoch in range(num_epochs):
# li = list()
for i, (trains, labels) in enumerate(train_loader):
trains = trains.to(device)
labels = labels.to(device,torch.float)
# 预测和损失
outputs = train_model(trains)
print(outputs)
loss = criterion(outputs, labels)
print(loss)
# 计算准确率
# y_hat = torch.argmax(outputs, 1)
# y = torch.argmax(labels, 1)
# res = y_hat == y
# res2 = res.int()
# acc = res2.sum().item() / batch_size
# li.append(acc)
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()

        if (i + 1) % 10 == 0:
            # acc_valid = get_valid_acc(train_model)
            print('Epoch: [{}/{}], Step: [{}/{}], Loss: {:.4f}  '
                  .format(epoch + 1, num_epochs, i + 1, total_step, loss.item() ))

            # if acc_valid > acc_valid_old:
            #     torch.save(train_model.state_dict(), 'data/best.pt')
            #     acc_valid_old = acc_valid
    #     li = list()
    # b = time.time()
    # print(b - a)
    # a = time.time()

if name == ‘main’:
train()
# test()

Traceback (most recent call last):
File “D:\pythonProject\fast.py”, line 101, in
optimizer = torch.optim.Adam(train_model.parameters(), lr=learning_rate)
File “D:\anaconda\envs\pytorch\lib\site-packages\torch\optim\adam.py”, line 137, in init
super(Adam, self).init(params, defaults)
File “D:\anaconda\envs\pytorch\lib\site-packages\torch\optim\optimizer.py”, line 61, in init
raise ValueError(“optimizer got an empty parameter list”)
ValueError: optimizer got an empty parameter list

I cannot reproduce the issue using this minimal code snippet taken from your post:

class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(in_channels=58, out_channels=128, kernel_size=5, stride=3, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool1d(stride=2, kernel_size=2),
            torch.nn.Conv1d(in_channels=128, out_channels=64, kernel_size=5, stride=3, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool1d(stride=2, kernel_size=2),
            torch.nn.Conv1d(in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1), 
            torch.nn.ReLU(),
            torch.nn.MaxPool1d(stride=2, kernel_size=2), 
                )
        self.fc = torch.nn.Linear(32, 31)

    def forward(self, x):
        x = self.conv(x)
        x = x.view(x.size()[0], -1)
        x = self.fc(x)
        y_hat = torch.nn.functional.softmax(x, dim=1)
        return y_hat

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

train_model = Model().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(train_model.parameters(), lr=1.)

so I don’t know what might be causing the issue.

You can post code snippets by wrapping them into three backticks ```, which makes debugging easier.
Feel free to update the code to a minimal and executable code snippet which would reproduce the issue, so that I could debug it further.