<built-in method float of Tensor object at 0x7fc85283d640>

Hello…Needed some help…Am pretty new to pytorch :slight_smile:

I have the CNN model and I wanted to evaluate it:

def evaluate_model(model, test_loader, criterion, best_model_name):
model.load_state_dict(torch.load(best_model_name))

# Specify the heartbeat classes from above
classes = {
    0: 'N - Normal Beat', 
    1: 'S - Supraventricular premature or ectopic beat',
    2: 'V - Premature ventricular contraction', 
    3: 'F - Fusion of ventricular and normal beat', 
    4: 'Q - Unclassified beat'}

# track test loss
test_loss = 0.0
class_correct = list(0. for i in range(5))
class_total = list(0. for i in range(5))

model.eval()
# iterate over test data
for data, target in test_loader:
    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        data, target = data.cuda(), target.cuda()
    # forward pass: compute predicted outputs by passing inputs to the model
    print(data.float)
    output = model(data.float())
    # calculate the batch loss
    loss = criterion(output, target.long())
    # update test loss 
    test_loss += loss.item()*data.size(0)
    # convert output probabilities to predicted class
    _, pred = torch.max(output, 1)
    # compare predictions to true label
    correct_tensor = pred.eq(target.data.view_as(pred))
    correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
    # calculate test accuracy for each object class
    for i in range(batch_size):
        label = target.data[i].int()
        class_correct[label] += correct[i].item()
        class_total[label] += 1

# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))

for i in range(5):
    if class_total[i] > 0:
        print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
            classes[i], 100 * class_correct[i] / class_total[i],
            np.sum(class_correct[i]), np.sum(class_total[i])))
    else:
        print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))

print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
    100. * np.sum(class_correct) / np.sum(class_total),
    np.sum(class_correct), np.sum(class_total)))

When I run this code, instead of getting the desired output of accuracy of each class,
Rather I get an output as shown:

How can I correct this??Plz help!!

Hi,

When you do print(data.float) this is accessing a method and not calling it. You should do print(data.float())

1 Like

@albanD Thanks!!
But I have one more query!
This is my code to evaluate the model:

def evaluate_model(model, test_loader, criterion, best_model_name):
model.load_state_dict(torch.load(best_model_name))

# Specify the heartbeat classes from above
classes = {
    0: 'N - Normal Beat', 
    1: 'S - Supraventricular premature or ectopic beat',
    2: 'V - Premature ventricular contraction', 
    3: 'F - Fusion of ventricular and normal beat', 
    4: 'Q - Unclassified beat'}

# track test loss
test_loss = 0.0
class_correct = list(0. for i in range(5))
class_total = list(0. for i in range(5))

model.eval()
# iterate over test data
for data, target in test_loader:
    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        data, target = data.cuda(), target.cuda()
    # forward pass: compute predicted outputs by passing inputs to the model
    #print(data.float())
    output = model(data.float())
    # calculate the batch loss
    loss = criterion(output, target.long())
    # update test loss 
    test_loss += loss.item()*data.size(0)
    # convert output probabilities to predicted class
    _, pred = torch.max(output, 1)
    # compare predictions to true label
    correct_tensor = pred.eq(target.data.view_as(pred))
    correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
    # calculate test accuracy for each object class
    for i in range(batch_size):
        label = target.data[i].int()
        class_correct[label] += correct[i].item()
        class_total[label] += 1

# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))

for i in range(5):
    if class_total[i] > 0:
        print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
            classes[i], 100 * class_correct[i] / class_total[i],
            np.sum(class_correct[i]), np.sum(class_total[i])))
    else:
        print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))

print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
    100. * np.sum(class_correct) / np.sum(class_total),
    np.sum(class_correct), np.sum(class_total)))

When I run:
evaluate_model(model_1, test_loader_1, criterion, ‘model_ecg_heartbeat_categorization_1.pt’)

I get the error :slight_smile:

Criterion is NLLLoss and batch size is 32…
How do I correct this??Pls help !!:slight_smile: :slightly_smiling_face: :slightly_smiling_face: :slightly_smiling_face:

Looks like your target Tensor is too small as it has a batch size of 8. You should check why.