I am trying to to evaluate a model that I have already trained for two classes in pytorch. I have the checkpoint and extracted its values. However when I try to predict, the length of predicted bounding boxes are more than the length true bounding boxes. I have tried different ways but they were not helpful.
Here is my code,
checkpoint = torch.load(checkpoint_path,map_location=torch.device('cpu'))
model = torchvision.models.detection.retinanet_resnet50_fpn(
pretrained=False, num_classes=2
).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
after loading the data, I have
with torch.no_grad():
for i,(images, targets) in enumerate(tqdm(test_loader)):
if i ==0: # try the first iteration just for test
outputs = model(images,targets) # get the predictions on the image
#print(outputs[0].keys())
# predictions
scores = outputs[0]['scores'].cpu().numpy()
pboxes = outputs[0]['boxes'].cpu().numpy()
plabels = outputs[0]['labels'].cpu().numpy()
# true BB and labels
oboxes = targets[0]['boxes'].cpu().numpy()
olabels = targets[0]['labels'].cpu().numpy()
print(' len oboxes-->',len(oboxes)) #56
print(' len olabels-->',len(olabels)) #56
print(' len scores-->',len(scores)) #231
print(' len pboxes-->',len(pboxes))#231
print(' len plabels-->',len(plabels))#231
predictions and true values dont have same length. Any help is appreciated.