Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feat] Add KeypointAUC metric #91

Merged
merged 13 commits into from
Feb 27, 2023
3 changes: 2 additions & 1 deletion mmeval/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from .f1_score import F1Score
from .gradient_error import GradientError
from .hmean_iou import HmeanIoU
from .keypoint_auc import KeypointAUC
from .mae import MeanAbsoluteError
from .matting_mse import MattingMeanSquaredError
from .mean_iou import MeanIoU
Expand All @@ -36,7 +37,7 @@
'StructuralSimilarity', 'SignalNoiseRatio', 'MultiLabelMetric',
'AveragePrecision', 'AVAMeanAP', 'BLEU', 'DOTAMeanAP',
'SumAbsoluteDifferences', 'GradientError', 'MattingMeanSquaredError',
'ConnectivityError', 'ROUGE'
'ConnectivityError', 'ROUGE', 'KeypointAUC'
]

_deprecated_msg = (
Expand Down
119 changes: 119 additions & 0 deletions mmeval/metrics/keypoint_auc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import numpy as np
from collections import OrderedDict
from typing import Dict, List

from mmeval.core.base_metric import BaseMetric
from .pck_accuracy import keypoint_pck_accuracy

logger = logging.getLogger(__name__)


def keypoint_auc_accuracy(pred: np.ndarray,
LareinaM marked this conversation as resolved.
Show resolved Hide resolved
gt: np.ndarray,
LareinaM marked this conversation as resolved.
Show resolved Hide resolved
mask: np.ndarray,
norm_factor: np.ndarray,
num_thrs: int = 20) -> float:
"""Calculate the Area under curve (AUC) of keypoint PCK accuracy.
Note:
- instance number: N
- keypoint number: K
Args:
LareinaM marked this conversation as resolved.
Show resolved Hide resolved
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
LareinaM marked this conversation as resolved.
Show resolved Hide resolved
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
norm_factor (float): Normalization factor.
num_thrs (int): number of thresholds to calculate auc.
Returns:
LareinaM marked this conversation as resolved.
Show resolved Hide resolved
float: Area under curve (AUC) of keypoint PCK accuracy.
"""
nor = np.tile(np.array([[norm_factor, norm_factor]]), (pred.shape[0], 1))
thrs = [1.0 * i / num_thrs for i in range(num_thrs)]
avg_accs = []
for thr in thrs:
_, avg_acc, _ = keypoint_pck_accuracy(pred, gt, mask, thr, nor)
avg_accs.append(avg_acc)

auc = 0
for i in range(num_thrs):
auc += 1.0 / num_thrs * avg_accs[i]
return auc


class KeypointAUC(BaseMetric):
"""AUC evaluation metric.

Calculate the Area Under Curve (AUC) of keypoint PCK accuracy.

By altering the threshold percentage in the calculation of PCK accuracy,
AUC can be generated to further evaluate the pose estimation algorithms.

Note:
- length of dataset: N
- num_keypoints: K
- number of keypoint dimensions: D (typically D = 2)

Args:
norm_factor (float): AUC normalization factor, Default: 30 (pixels).
num_thrs (int): number of thresholds to calculate auc. Default: 20.
LareinaM marked this conversation as resolved.
Show resolved Hide resolved
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be ``'cpu'`` or
``'gpu'``. Default: ``'cpu'``.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, ``self.default_prefix``
will be used instead. Default: ``None``.
C1rN09 marked this conversation as resolved.
Show resolved Hide resolved
"""
C1rN09 marked this conversation as resolved.
Show resolved Hide resolved

def __init__(self,
norm_factor: float = 30,
num_thrs: int = 20,
**kwargs) -> None:
super().__init__(**kwargs)
self.norm_factor = norm_factor
self.num_thrs = num_thrs

def add(self, predictions: List[Dict], groundtruths: List[Dict]) -> None: # type: ignore # yapf: disable # noqa: E501
"""Process one batch of predictions and groundtruths and add the
intermediate results to `self._results`.

Args:
predictions (Sequence[dict]): Predictions from the model.

groundtruths (Sequence[dict]): The ground truth labels.
LareinaM marked this conversation as resolved.
Show resolved Hide resolved
"""
for prediction, groundtruth in zip(predictions, groundtruths):
self._results.append((prediction, groundtruth))

def compute_metric(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.

Args:
results (list): The processed results of each batch.

Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
# split gt and prediction list
preds, gts = zip(*results)
C1rN09 marked this conversation as resolved.
Show resolved Hide resolved

# pred_coords: [N, K, D]
pred_coords = np.concatenate([pred['coords'] for pred in preds])
# gt_coords: [N, K, D]
gt_coords = np.concatenate([gt['coords'] for gt in gts])
# mask: [N, K]
mask = np.concatenate([gt['mask'] for gt in gts])

logger.info(f'Evaluating {self.__class__.__name__}...')

auc = keypoint_auc_accuracy(pred_coords, gt_coords, mask,
self.norm_factor, self.num_thrs)

metric_results: OrderedDict = OrderedDict()
metric_results[f'AUC@{self.num_thrs}'] = auc

return metric_results
zhouzaida marked this conversation as resolved.
Show resolved Hide resolved
47 changes: 47 additions & 0 deletions tests/test_metrics/test_keypoint_auc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from unittest import TestCase

from mmeval.metrics import KeypointAUC


class TestKeypointAUCandEPE(TestCase):
zhouzaida marked this conversation as resolved.
Show resolved Hide resolved

def setUp(self):
"""Setup some variables which are used in every test method.

TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
output = np.zeros((1, 5, 2))
target = np.zeros((1, 5, 2))
# first channel
output[0, 0] = [10, 4]
target[0, 0] = [10, 0]
# second channel
output[0, 1] = [10, 18]
target[0, 1] = [10, 10]
# third channel
output[0, 2] = [0, 0]
target[0, 2] = [0, -1]
# fourth channel
output[0, 3] = [40, 40]
target[0, 3] = [30, 30]
# fifth channel
output[0, 4] = [20, 10]
target[0, 4] = [0, 10]

keypoints_visible = np.array([[True, True, False, True, True]])

prediction = {'coords': output}
groundtruth = {'coords': target, 'mask': keypoints_visible}

self.predictions = [prediction]
self.groundtruths = [groundtruth]

def test_auc_evaluate(self):
"""test AUC evaluation metric."""
auc_metric = KeypointAUC(norm_factor=20, num_thrs=4)
auc_results = auc_metric(self.predictions, self.groundtruths)
target = {'AUC@4': 0.375}
self.assertDictEqual(auc_results, target)
LareinaM marked this conversation as resolved.
Show resolved Hide resolved