i can't get good result from LSTM model

HELLO guys, i really need some help

i try to use lstm model to train some audio data(they are 3-d tensors) to classify the man with audio if happy or not. i have try to make timesteps from 10000 to 20 since it’s proper for lstm.

but when i use lr from 0.0001-0.001, the loss may become down at first but up continuously.
with lower lr, it seems to work but later bad again.

this is my code. Could anyone give me some suggestions. thanks a lot.

MODEL:

#neural network
from torch import nn

class neural_network(nn.Module):
def init(self, input_size, hidden_dim, output_size, num_layers, dropout=0.5):
super(neural_network,self).init()

    self.hidden_dim  = hidden_dim
    self.output_size = output_size
    self.num_layers  = num_layers
    self.lstm        = nn.LSTM(input_size, hidden_dim, num_layers, 
                               dropout=dropout, batch_first=True,
                               bidirectional=True,
                              )
    '''
    self.lstm = nn.LSTM(input_size = 79,
                        hidden_size = 128,
                        num_layers = 3,
                        batch_first = False,
                        bidirectional =True,
                        dropout = 0.2)
    '''
    self.fc1 = nn.Linear(hidden_dim*2, output_size)
    self.softmax = torch.nn.Softmax(dim=1)

def forward(self,x,hidden):
    output,hidden = self.lstm(x,hidden)
    output = output[:,-1,:]
    # hidden = torch.cat([hidden[-2], hidden[-1]], dim = 1)
    # hidden = self.dropout(hidden)
    #print(output.shape)
    #print(output.shape)
    output = self.fc1(torch.relu(output))
    #output = self.fc1(hidden)
    #output =  self.softmax(output)
    return output,hidden



def init_hidden(self, batch_size):
    weight = next(self.parameters()).data
    # print("weghit :", weight.shape)
    hidden = (weight.new(self.num_layers*2, batch_size, self.hidden_dim).zero_().to(device),
            weight.new(self.num_layers*2, batch_size, self.hidden_dim).zero_().to(device))
    return hidden

Blockquote

some parameters

input_size = 79
output_size = 2
hidden_dim = 128
num_layers = 3
dropout = 0.2
batch_size = 128

criterion_audio = torch.nn.CrossEntropyLoss() #
optimizer_audio = torch.optim.Adam(model_audio.parameters(),lr=0.00001)
#optimizer_audio = torch.optim.SGD(model_audio.parameters(), lr=1e-6, momentum=0.8)
num_epochs = 300

TRAIN:

定义训练模型

def train(model, device, train_loader, criterion, optimizer, num_epochs,batch_size):
history = list()
#f1_scores =
#recall_scores =
#precision_scores =
for epoch in range(num_epochs):
t1 = time.time()
hs = model.init_hidden(batch_size)
train_loss =
accuracy_scores =
# train_correct = 0.0
model.train()
# accuracy_score_temp =
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
output,hs = model(data, hs)
hs = tuple([h.data for h in hs])
#output = output[:,-1,:]
loss = criterion(output, target.long())
train_loss.append(loss.item()) # 累计损失
loss.backward() # 反向传播
optimizer.step() # 参数更新
#print(output.shape)
preds = output.argmax(dim=1) # 找出概率最大的类
#print(preds)
accuracy_scores.append(accuracy_score(target.cpu().data, preds.cpu().data))

        '''
        while i<data.shape[1]:
            # print(data[:,i:i+200,:].shape)
            output, hs = model(data[:,i:i+200,:], hs) # 模型训练
            output = output[:,-1,:]
            hs = tuple([h.data for h in hs])
            # hs     = tuple([h.data for h in hs])
            loss   = criterion(output, target.long()) # 计算损失
            train_loss_temp.append(loss.item()) # 累计损失
            pred = torch.tensor([output.argmax()]) # 找出概率最大的类
            # 计算准确率
            print(output)
            accuracy_score_temp.append(accuracy_score(target.cpu().data, pred.cpu().data))
            loss.backward() # 反向传播
            optimizer.step() # 参数更新
            i += 200
            # train_correct += torch.sum(output==target) # 比较
            t3 = time.time()
        print(f'第{count}个样本 准确率{np.mean(accuracy_score_temp)} \
                用时{t3-t2}s  \
                误差{np.mean(train_loss_temp)} 标签{target.item()}')
        train_loss.append(np.mean(train_loss_temp))
        accuracy_scores.append(np.mean(accuracy_score_temp))
        '''
    print(f'Epoch {epoch}/{num_epochs} --- train loss {np.round(np.mean(train_loss), 5)}')
    print(f'---  准确率{np.mean(accuracy_scores)}')
    print(f'--- time_consume {np.round((time.time() - t1), 5)}s ')