Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix unpredictable panoptic_quality output when return_per_class=True #2548

Merged
merged 5 commits into from
May 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Fixed

-

- Fixed class order of `panoptic_quality(..., return_per_class=True)` output ([#2548](https://github.com/Lightning-AI/torchmetrics/pull/2548))


## [1.4.0] - 2024-05-03
Expand Down
8 changes: 5 additions & 3 deletions src/torchmetrics/detection/panoptic_qualities.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,11 @@ class PanopticQuality(Metric):
single scalar tensor is returned with average panoptic quality over all classes. If ``return_sq_and_rq=True``
and ``return_per_class=False`` a tensor of length 3 is returned with panoptic, segmentation and recognition
quality (in that order). If If ``return_sq_and_rq=False`` and ``return_per_class=True`` a tensor of length
equal to the number of classes are returned, with panoptic quality for each class. Finally, if both arguments
are ``True`` a tensor of shape ``(3, C)`` is returned with individual panoptic, segmentation and recognition
quality for each class.
equal to the number of classes are returned, with panoptic quality for each class. The order of classes is
``things`` first and then ``stuffs``, and numerically sorted within each.
(ex. with ``things=[4, 1], stuffs=[3, 2]``, the output classes are ordered by ``[1, 4, 2, 3]``)
Finally, if both arguments are ``True`` a tensor of shape ``(3, C)`` is returned with individual panoptic,
segmentation and recognition quality for each class.

Args:
things:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,9 +148,9 @@ def _get_category_id_to_continuous_id(things: Set[int], stuffs: Set[int]) -> Dic

"""
# things metrics are stored with a continuous id in [0, len(things)[,
thing_id_to_continuous_id = {thing_id: idx for idx, thing_id in enumerate(things)}
thing_id_to_continuous_id = {thing_id: idx for idx, thing_id in enumerate(sorted(things))}
# stuff metrics are stored with a continuous id in [len(things), len(things) + len(stuffs)[
stuff_id_to_continuous_id = {stuff_id: idx + len(things) for idx, stuff_id in enumerate(stuffs)}
stuff_id_to_continuous_id = {stuff_id: idx + len(things) for idx, stuff_id in enumerate(sorted(stuffs))}
cat_id_to_continuous_id = {}
cat_id_to_continuous_id.update(thing_id_to_continuous_id)
cat_id_to_continuous_id.update(stuff_id_to_continuous_id)
Expand Down
27 changes: 27 additions & 0 deletions tests/unittests/detection/test_panoptic_quality.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,26 @@
.reshape((1, 1, 5, 2))
.repeat(2, 1, 1, 1),
)

_ARGS_0 = {"things": {0, 1}, "stuffs": {6, 7}}
_ARGS_1 = {"things": {2}, "stuffs": {3}, "allow_unknown_preds_category": True}
_ARGS_2 = {"things": {0, 1}, "stuffs": {10, 11}}


def _get_class_order_test_input_args(class_type, class1, class2, class3) -> (np.ndarray, dict):
a = [class1, 0]
b = [class2, 0]
c = [class3, 0]
_input = _Input(
# Shape of input tensors is (num_batches, batch_size, num_points, 2).
preds=torch.tensor([a, a, b, b, b, c]).reshape((1, 1, 6, 2)).repeat(2, 1, 1, 1),
target=torch.tensor([a, a, b, b, c, c]).reshape((1, 1, 6, 2)).repeat(2, 1, 1, 1),
)
_args = {"things": [], "stuffs": [], "return_per_class": True}
_args[class_type] = [class1, class2, class3]
return _input, _args


# TODO: Improve _reference_fn by calling https://github.com/cocodataset/panopticapi/blob/master/panopticapi/evaluation.py
# directly and compare at runtime on multiple examples.

Expand All @@ -84,6 +100,11 @@ def _reference_fn_1_2(preds, target) -> np.ndarray:
return np.array([(2 / 3 + 1 + 2 / 3) / 3])


def _reference_fn_class_order(preds, target) -> np.ndarray:
"""Baseline result for the result of _get_class_order_test_input_args."""
return np.array([1, 0, 2 / 3])


@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_12, reason="PanopticQuality metric only supports PyTorch >= 1.12")
class TestPanopticQuality(MetricTester):
"""Test class for `PanopticQuality` metric."""
Expand All @@ -95,6 +116,12 @@ class TestPanopticQuality(MetricTester):
(_INPUTS_0, _ARGS_0, _reference_fn_0_0),
(_INPUTS_0, _ARGS_1, _reference_fn_0_1),
(_INPUTS_1, _ARGS_2, _reference_fn_1_2),
(*_get_class_order_test_input_args("stuffs", 0, 2, 1), _reference_fn_class_order),
(*_get_class_order_test_input_args("stuffs", 0, 3, 2), _reference_fn_class_order),
(*_get_class_order_test_input_args("stuffs", 0, 10, 2), _reference_fn_class_order),
(*_get_class_order_test_input_args("things", 0, 2, 1), _reference_fn_class_order),
(*_get_class_order_test_input_args("things", 0, 3, 2), _reference_fn_class_order),
(*_get_class_order_test_input_args("things", 0, 10, 2), _reference_fn_class_order),
],
)
def test_panoptic_quality_class(self, ddp, inputs, args, reference_metric):
Expand Down
Loading