from torch import tensor
from torchmetrics.detection import MeanAveragePrecision
preds = [
dict(
boxes=tensor([[215.0, 41.0, 562.0, 285.0], [214.0, 41.0, 562.0, 285.0]]),
scores=tensor([0.99, 0.09]),
labels=tensor([0, 1]),
)
]
target = [
dict(
boxes=tensor([[214.0, 41.0, 562.0, 285.0]]),
labels=tensor([0]),
)
]
metric = MeanAveragePrecision(iou_type="bbox", class_metrics=True, iou_thresholds=[0.5, 0.75], average="micro")
metric.update(preds, target)
from pprint import pprint
pprint(metric.compute())
This code gives:
{'classes': tensor([0, 1], dtype=torch.int32),
'map': tensor(1.),
'map_50': tensor(1.),
'map_75': tensor(1.),
'map_large': tensor(1.),
'map_medium': tensor(-1.),
'map_per_class': tensor([ 1., -1.]),
'map_small': tensor(-1.),
'mar_1': tensor(1.),
'mar_10': tensor(1.),
'mar_100': tensor(1.),
'mar_100_per_class': tensor([ 1., -1.]),
'mar_large': tensor(1.),
'mar_medium': tensor(-1.),
'mar_small': tensor(-1.)}
But according to my understanding, should it not give 0.5?
I am simulating two preds masks of 2 seperate classes having 100% overlap with one of the classes.