Get not Validation Accuracy

Hello,…
I’m train a CNN_LSTM model for video classification I get accuracy in the training but in the test I get not accuracy at the test at all even thought the loss in the test is less than the in the training does anyone has any Idea of what does this mean??

this is the model code

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 5, kernel_size=5, padding=1)
        self.conv2 = nn.Conv2d(5, 10, kernel_size=5)
        #self.conv2_drop = nn.Dropout2d()
        #self.fc1 = nn.Linear(32* 1`Preformatted text`93 * 7, args.output_dim* 2)
        #self.fc2 = nn.Linear( args.output_dim* 2, args.output_dim)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        #x = x.view(-1, 1369720)
        #x = F.relu(self.fc1(x))
        #x = F.dropout(x,p = 0.4, training=self.training)
        #x = self.fc2(x)
        #return F.log_softmax(x, dim=1)
        return x

class Combine(nn.Module):
    def __init__(self):
        super(Combine, self).__init__()
        self.cnn = CNN()
        self.rnn = nn.LSTM(
            input_size=31130, 
            hidden_size=args.unit_dim, 
            num_layers=args.layer_dim,
            batch_first=True)
        self.linear = nn.Linear(args.unit_dim,args.output_dim)

    def forward(self, x):
        #print(x.size())
        batch_size, C, H, W = x.size()
        
        timesteps = W
        
        c_in = x.view(batch_size , C, H, W)
        c_out = self.cnn(c_in)
        batch_size, C, H, W = c_out.size()
        
        timesteps = W
        #print(c_out.shape)
        r_in = c_out.view(batch_size,timesteps,  -1)
        # Initialize hidden state with zeros
        h0 = torch.zeros(args.layer_dim, r_in.size(0), args.unit_dim).requires_grad_().cuda()

        # Initialize cell state
        c0 = torch.zeros(args.layer_dim, x.size(0), args.unit_dim).requires_grad_().cuda()
        r_out, (h_n, h_c) = self.rnn(r_in, (h0.detach(), c0.detach()))
        r_out2 = self.linear(r_out[:, -1, :])
        
        return F.log_softmax(r_out2, dim=1)

and here’s the result


Train Epoch: 1 [0/148 (0%)]	Loss: 2.717366, Accuracy: 8/148 (5%)
Train Epoch: 1 [10/148 (8%)]	Loss: 2.718801, Accuracy: 8/148 (5%)
Train Epoch: 1 [20/148 (17%)]	Loss: 2.718715, Accuracy: 8/148 (5%)
Train Epoch: 1 [30/148 (25%)]	Loss: 2.686563, Accuracy: 19/148 (12%)
Train Epoch: 1 [40/148 (33%)]	Loss: 2.703117, Accuracy: 24/148 (16%)
Train Epoch: 1 [50/148 (42%)]	Loss: 2.713950, Accuracy: 34/148 (22%)
Train Epoch: 1 [60/148 (50%)]	Loss: 2.720159, Accuracy: 44/148 (29%)
Train Epoch: 1 [70/148 (58%)]	Loss: 2.680100, Accuracy: 54/148 (36%)
Train Epoch: 1 [80/148 (67%)]	Loss: 2.696464, Accuracy: 64/148 (43%)
Train Epoch: 1 [90/148 (75%)]	Loss: 2.705050, Accuracy: 74/148 (50%)
Train Epoch: 1 [100/148 (83%)]	Loss: 2.696478, Accuracy: 84/148 (56%)
Train Epoch: 1 [99/148 (92%)]	Loss: 2.744526, Accuracy: 84/148 (56%)

Test set: Average loss: 0.0550, Accuracy: 0/148 (0%)

Train Epoch: 2 [0/148 (0%)]	Loss: 2.695786, Accuracy: 20/148 (13%)
Train Epoch: 2 [10/148 (8%)]	Loss: 2.703209, Accuracy: 30/148 (20%)
Train Epoch: 2 [20/148 (17%)]	Loss: 2.720705, Accuracy: 30/148 (20%)
Train Epoch: 2 [30/148 (25%)]	Loss: 2.679636, Accuracy: 50/148 (33%)
Train Epoch: 2 [40/148 (33%)]	Loss: 2.698552, Accuracy: 50/148 (33%)
Train Epoch: 2 [50/148 (42%)]	Loss: 2.717396, Accuracy: 50/148 (33%)
Train Epoch: 2 [60/148 (50%)]	Loss: 2.736416, Accuracy: 50/148 (33%)
Train Epoch: 2 [70/148 (58%)]	Loss: 2.707628, Accuracy: 50/148 (33%)
Train Epoch: 2 [80/148 (67%)]	Loss: 2.713427, Accuracy: 58/148 (39%)
Train Epoch: 2 [90/148 (75%)]	Loss: 2.688998, Accuracy: 58/148 (39%)
Train Epoch: 2 [100/148 (83%)]	Loss: 2.727160, Accuracy: 63/148 (42%)
Train Epoch: 2 [99/148 (92%)]	Loss: 2.706110, Accuracy: 90/148 (60%)

Test set: Average loss: 0.0550, Accuracy: 0/148 (0%)

Train Epoch: 3 [0/148 (0%)]	Loss: 2.719501, Accuracy: 0/148 (0%)
Train Epoch: 3 [10/148 (8%)]	Loss: 2.714300, Accuracy: 30/148 (20%)
Train Epoch: 3 [20/148 (17%)]	Loss: 2.707472, Accuracy: 50/148 (33%)
Train Epoch: 3 [30/148 (25%)]	Loss: 2.699172, Accuracy: 60/148 (40%)
Train Epoch: 3 [40/148 (33%)]	Loss: 2.707073, Accuracy: 60/148 (40%)
Train Epoch: 3 [50/148 (42%)]	Loss: 2.726472, Accuracy: 60/148 (40%)
Train Epoch: 3 [60/148 (50%)]	Loss: 2.692422, Accuracy: 70/148 (47%)
Train Epoch: 3 [70/148 (58%)]	Loss: 2.712257, Accuracy: 70/148 (47%)
Train Epoch: 3 [80/148 (67%)]	Loss: 2.706228, Accuracy: 90/148 (60%)
Train Epoch: 3 [90/148 (75%)]	Loss: 2.697551, Accuracy: 100/148 (67%)
Train Epoch: 3 [100/148 (83%)]	Loss: 2.710042, Accuracy: 100/148 (67%)
Train Epoch: 3 [99/148 (92%)]	Loss: 2.698733, Accuracy: 100/148 (67%)

Test set: Average loss: 0.0550, Accuracy: 0/148 (0%)

Could you post the code to calculate the training and test accuracy?
Do both datasets have the same length, as 148 seems to be the total number of samples in both cases.

Hello ptrblck Thanks for your reply

I just removed the acc from the train because of the unrealistic result so if there anyway to calc the acc in the train ,…please pass it to me :slight_smile:

def train(epoch):
    model.train()
    correct = 0
    total_train = 0
    correct_train = 0
    for batch_idx, (data, target,length) in enumerate(train_loader):
        
        data = np.expand_dims(data, axis=1)
        data = torch.FloatTensor(data)
        if args.cuda:
            data, target = data.cuda(), target.cuda()
            

        
        data, target = data, target
        optimizer.zero_grad()
        
        output = model(data)
        
        loss = criterion(output,torch.max(target.data, 1)[1])
        loss.backward()
        optimizer.step()
        total_train += target.nelement()
        train_accuracy = 100 * correct_train / total_train
        pred = output.data.max(
            1, keepdim=True)[1]  # get the index of the max log-probability
        correct += (pred.cpu() == torch.max(target.data, 1)[1].cpu()).sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))


def test():
    model.eval()
    test_loss = 0
    correct = 0
    total =0 
    for data, target ,length in validation_loader:
        
        data = np.expand_dims(data, axis=1)
        data = torch.FloatTensor(data)
        #print(target.size)
        
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = data, target
        
        output = model(data)
        
        test_loss += criterion(output, torch.max(target.data, 1)[1])
        
        
        
        pred = output.data.max(
            1, keepdim=True)[1]  # get the index of the max log-probability
        
        
        correct += (pred.cpu() == torch.max(target.data, 1)[1].cpu()).sum()

    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss.item(), correct, len(validation_loader.dataset),
            100. * correct / len(validation_loader.dataset)))
    test_loss = 0

  
    
for epoch in range(1, args.epochs + 1):
    train(epoch)
    test()