Hello ptrblck Thanks for your reply
I just removed the acc from the train because of the unrealistic result so if there anyway to calc the acc in the train ,…please pass it to me
def train(epoch):
model.train()
correct = 0
total_train = 0
correct_train = 0
for batch_idx, (data, target,length) in enumerate(train_loader):
data = np.expand_dims(data, axis=1)
data = torch.FloatTensor(data)
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = data, target
optimizer.zero_grad()
output = model(data)
loss = criterion(output,torch.max(target.data, 1)[1])
loss.backward()
optimizer.step()
total_train += target.nelement()
train_accuracy = 100 * correct_train / total_train
pred = output.data.max(
1, keepdim=True)[1] # get the index of the max log-probability
correct += (pred.cpu() == torch.max(target.data, 1)[1].cpu()).sum()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test():
model.eval()
test_loss = 0
correct = 0
total =0
for data, target ,length in validation_loader:
data = np.expand_dims(data, axis=1)
data = torch.FloatTensor(data)
#print(target.size)
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = data, target
output = model(data)
test_loss += criterion(output, torch.max(target.data, 1)[1])
pred = output.data.max(
1, keepdim=True)[1] # get the index of the max log-probability
correct += (pred.cpu() == torch.max(target.data, 1)[1].cpu()).sum()
print(
'\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss.item(), correct, len(validation_loader.dataset),
100. * correct / len(validation_loader.dataset)))
test_loss = 0
for epoch in range(1, args.epochs + 1):
train(epoch)
test()