Too many indices for tensor of dimension 2

hi just wonder can anyone explain what is this mean?
I try to test my model straight away after train and this error pop up and im not sure how would i able to fix it or what to do with it can anyone guild me to right direction

if __name__ == '__main__':

    #set_trace()
    args = edict({
        'operation' : 'train',
        'feature_file' : None,
         'result_sample_path' : None,
         'gpu' : 'GPU',
         'path_image_query' : None,
         'query_label' : 'Query label',
         'dataset' : None,
         'specific_dataset_folder_name' : 'lfw',
         'img_extension' : 'jpg',
         'preprocessing_method' : 'sphereface',
         'model_name' : 'mobiface',
         'batch_size' : 3,
         'image_query':'/content/drive/My Drive/recfaces13/recfaces/datasets/LFW/try',
         'train':True,
         'device':'cuda',
         'train':True
})
    print(args)

    # selecting the size of the crop based on the network
    if args.model_name == 'mobilefacenet' or args.model_name == 'sphereface':
        crop_size = (96, 112)
    elif args.model_name == 'mobiface' or args.model_name == 'shufflefacenet':
        crop_size = (112, 112)
    elif args.model_name == 'openface':
        crop_size = (96, 96)
    elif args.model_name == 'facenet':
        crop_size = (160, 160)
    else:
        raise NotImplementedError("Model " + args.model_name + " not implemented")

    if args.dataset is not None:
        # process whole dataset
        assert args.specific_dataset_folder_name is not None, 'To process a dataset, ' \
                                                              'the flag --specific_dataset_folder_name is required.'
        process_dataset(args.operation, args.model_name, args.batch_size,
                        args.dataset, args.specific_dataset_folder_name,
                        args.img_extension, args.preprocessing_method, crop_size,
                        args.result_sample_path, args.feature_file)

    elif args.operation == 'train':
      #set_trace()

      net = load_net('mobilefacenet', 'gpu')
      net = net.cuda()
      model_name=args.model_name
      
      datasettrain = LFW1(args.image_query,args.specific_dataset_folder_name, args.img_extension, args.preprocessing_method, crop_size,train=True)
      dataloadertrain = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2, drop_last=False)

      datasettest = LFW1(args.image_query,args.specific_dataset_folder_name, args.img_extension, args.preprocessing_method, crop_size,train=False)
      dataloadertest = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2, drop_last=False)
      

      features = None

      if args.feature_file is not None and os.path.isfile(args.feature_file):
            features = scipy.io.loadmat(args.feature_file)      
      epoch = 2
      criterion = nn.CrossEntropyLoss()
      optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
      train_loss = list()
      #set_trace()
      for i, data in enumerate(dataloadertrain):
        
        inps, labs = data
        inps, labs = inps.cuda(args['device']), labs.cuda(args['device'])

        inps = Variable(inps).cuda(args['device'])
        labs = Variable(labs).cuda(args['device'])
        optimizer.zero_grad()
        outs = net(inps.permute(0, 3, 1, 2).float())
        
        for j in range(len(outs['out'])):
          diff = outs['out'][j].argmax(0) - labs[j]
          diff = diff.int().cpu().numpy()
          unique, counts = np.unique(diff,return_counts=True)
          print(unique, counts)
          total += np.sum(counts)
          correct += np.sum(counts[unique == 0])
          print(total,correct)
      time_elapsed = time.time() - since_test ##Edit
      print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) # Edit
      print(f"accuracy is {(correct/total)*100}%")

Could you post the complete stack trace, please?

PS: probably unrelated to this issue, but Variables are deprecated since 0.4, so you can use tensors in newer versions. :wink: