Skip to content

Commit

Permalink
Misc - Tracking output format and missing requirements (nutonomy#258)
Browse files Browse the repository at this point in the history
* Updated results table

* Increment version, add authors, reformat table

* Add missing requirement

* Additional comments and assertions

* Align tracking return type to detection return type

* Increment version

* Rewording

* Drop unnecessary argument from deserialize

* Added comment, corrected syntax
  • Loading branch information
holger-motional authored Nov 19, 2019
1 parent e73cda8 commit 7149db1
Show file tree
Hide file tree
Showing 12 changed files with 50 additions and 46 deletions.
4 changes: 2 additions & 2 deletions python-sdk/nuscenes/eval/detection/data_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -417,9 +417,9 @@ def serialize(self) -> dict:
return {key[0] + ':' + str(key[1]): value.serialize() for key, value in self.md.items()}

@classmethod
def deserialize(cls, content: dict, metric_data_cls):
def deserialize(cls, content: dict):
mdl = cls()
for key, md in content.items():
name, distance = key.split(':')
mdl.set(name, float(distance), metric_data_cls.deserialize(md))
mdl.set(name, float(distance), DetectionMetricData.deserialize(md))
return mdl
3 changes: 1 addition & 2 deletions python-sdk/nuscenes/eval/detection/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@
from nuscenes.eval.detection.data_classes import DetectionConfig, DetectionMetrics, DetectionBox, \
DetectionMetricDataList
from nuscenes.eval.common.data_classes import EvalBoxes
from nuscenes.eval.common.loaders import load_prediction, load_gt
from nuscenes.eval.common.loaders import add_center_dist, filter_eval_boxes
from nuscenes.eval.common.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample
from nuscenes.eval.common.config import config_factory

Expand Down
18 changes: 9 additions & 9 deletions python-sdk/nuscenes/eval/detection/render.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ def detailed_results_table_tex(metrics_path: str, output_path: str) -> None:
'\\textbf{AAE} \\\\ \\hline ' \
'\\hline\n'
for name in DETECTION_NAMES:
ap = np.mean(metrics['label_aps'][name].values())
ap = np.mean(metrics['label_aps'][name].values()) * 100
ate = metrics['label_tp_errors'][name]['trans_err']
ase = metrics['label_tp_errors'][name]['scale_err']
aoe = metrics['label_tp_errors'][name]['orient_err']
Expand Down Expand Up @@ -322,14 +322,14 @@ def detailed_results_table_tex(metrics_path: str, output_path: str) -> None:
tex += '\\end{tabular}\n'

# All one line
tex += '\\caption{Detailed detection performance. '
tex += 'AP: average precision, '
tex += 'ATE: average translation error (${}$), '.format(TP_METRICS_UNITS['trans_err'])
tex += 'ASE: average scale error (${}$), '.format(TP_METRICS_UNITS['scale_err'])
tex += 'AOE: average orientation error (${}$), '.format(TP_METRICS_UNITS['orient_err'])
tex += 'AVE: average velocity error (${}$), '.format(TP_METRICS_UNITS['vel_err'])
tex += 'AAE: average attribute error (${}$). '.format(TP_METRICS_UNITS['attr_err'])
tex += 'nuScenes Detection Score (NDS) = {:.1f} '.format(metrics['nd_score'] * 100)
tex += '\\caption{Detailed detection performance on the val set. \n'
tex += 'AP: average precision averaged over distance thresholds (%), \n'
tex += 'ATE: average translation error (${}$), \n'.format(TP_METRICS_UNITS['trans_err'])
tex += 'ASE: average scale error (${}$), \n'.format(TP_METRICS_UNITS['scale_err'])
tex += 'AOE: average orientation error (${}$), \n'.format(TP_METRICS_UNITS['orient_err'])
tex += 'AVE: average velocity error (${}$), \n'.format(TP_METRICS_UNITS['vel_err'])
tex += 'AAE: average attribute error (${}$). \n'.format(TP_METRICS_UNITS['attr_err'])
tex += 'nuScenes Detection Score (NDS) = {:.1f} \n'.format(metrics['nd_score'] * 100)
tex += '}\n'

tex += '\\end{table}\n'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def test_serialization(self):
mdl = DetectionMetricDataList()
for i in range(10):
mdl.set('name', 0.1, DetectionMetricData.random_md())
recovered = DetectionMetricDataList.deserialize(json.loads(json.dumps(mdl.serialize())), DetectionMetricData)
recovered = DetectionMetricDataList.deserialize(json.loads(json.dumps(mdl.serialize())))
self.assertEqual(mdl, recovered)


Expand Down
4 changes: 4 additions & 0 deletions python-sdk/nuscenes/eval/tracking/data_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,9 @@ def serialize(self) -> Dict[str, Any]:
metrics['label_metrics'] = self.label_metrics
metrics['eval_time'] = self.eval_time
metrics['cfg'] = self.cfg.serialize()
for metric_name in self.label_metrics.keys():
metrics[metric_name] = self.compute_metric(metric_name)

return metrics

@classmethod
Expand All @@ -239,6 +242,7 @@ def deserialize(cls, content: dict) -> 'TrackingMetrics':

def __eq__(self, other):
eq = True
eq = eq and self.label_metrics == other.label_metrics
eq = eq and self.eval_time == other.eval_time
eq = eq and self.cfg == other.cfg

Expand Down
8 changes: 4 additions & 4 deletions python-sdk/nuscenes/eval/tracking/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import json
import os
import time
from typing import Tuple, List
from typing import Tuple, List, Dict, Any

import numpy as np

Expand Down Expand Up @@ -195,11 +195,11 @@ def savepath(name):
recall_metric_curve(md_list, metric_name,
self.cfg.min_recall, savepath=savepath('%s' % metric_name))

def main(self, render_curves: bool = True) -> TrackingMetrics:
def main(self, render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param render_curves: Whether to render PR and TP curves to disk.
:return: The TrackingMetrics computed during evaluation.
:return: The serialized TrackingMetrics computed during evaluation.
"""
# Run evaluation.
metrics, metric_data_list = self.evaluate()
Expand All @@ -222,7 +222,7 @@ def main(self, render_curves: bool = True) -> TrackingMetrics:
if render_curves:
self.render(metric_data_list)

return metrics
return metrics_summary


if __name__ == "__main__":
Expand Down
43 changes: 21 additions & 22 deletions python-sdk/nuscenes/eval/tracking/tests/test_evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import random
import shutil
import unittest
from typing import Dict, Optional
from typing import Dict, Optional, Any

import numpy as np
from tqdm import tqdm
Expand All @@ -18,7 +18,6 @@
from nuscenes.eval.tracking.evaluate import TrackingEval
from nuscenes.eval.tracking.utils import category_to_tracking_name
from nuscenes.eval.tracking.constants import TRACKING_NAMES
from nuscenes.eval.tracking.data_classes import TrackingMetrics


class TestMain(unittest.TestCase):
Expand Down Expand Up @@ -137,7 +136,7 @@ def random_id(instance_token: str, _add_errors: bool = False) -> str:
def basic_test(self,
eval_set: str = 'mini_val',
add_errors: bool = False,
render_curves: bool = False) -> TrackingMetrics:
render_curves: bool = False) -> Dict[str, Any]:
"""
Run the evaluation with fixed randomness on the specified subset, with or without introducing errors in the
submission.
Expand Down Expand Up @@ -184,11 +183,11 @@ def test_delta_mock(self,

# Compare metrics to known solution.
if eval_set == 'mini_val':
self.assertAlmostEqual(metrics.compute_metric('amota'), 0.5383961573989436)
self.assertAlmostEqual(metrics.compute_metric('amotp'), 1.5275400961369252)
self.assertAlmostEqual(metrics.compute_metric('motar'), 0.8261827096838301)
self.assertAlmostEqual(metrics.compute_metric('mota'), 0.25003943918566174)
self.assertAlmostEqual(metrics.compute_metric('motp'), 1.2976508610883917)
self.assertAlmostEqual(metrics['amota'], 0.5383961573989436)
self.assertAlmostEqual(metrics['amotp'], 1.5275400961369252)
self.assertAlmostEqual(metrics['motar'], 0.8261827096838301)
self.assertAlmostEqual(metrics['mota'], 0.25003943918566174)
self.assertAlmostEqual(metrics['motp'], 1.2976508610883917)
else:
print('Skipping checks due to choice of custom eval_set: %s' % eval_set)

Expand All @@ -210,20 +209,20 @@ def test_delta_gt(self,
# - MT/TP (hard to figure out here).
# - AMOTA/AMOTP (unachieved recall values lead to hard unintuitive results).
if eval_set == 'mini_val':
self.assertAlmostEqual(metrics.compute_metric('amota'), 1.0)
self.assertAlmostEqual(metrics.compute_metric('amotp'), 0.0, delta=1e-5)
self.assertAlmostEqual(metrics.compute_metric('motar'), 1.0)
self.assertAlmostEqual(metrics.compute_metric('recall'), 1.0)
self.assertAlmostEqual(metrics.compute_metric('mota'), 1.0)
self.assertAlmostEqual(metrics.compute_metric('motp'), 0.0, delta=1e-5)
self.assertAlmostEqual(metrics.compute_metric('faf'), 0.0)
self.assertAlmostEqual(metrics.compute_metric('ml'), 0.0)
self.assertAlmostEqual(metrics.compute_metric('fp'), 0.0)
self.assertAlmostEqual(metrics.compute_metric('fn'), 0.0)
self.assertAlmostEqual(metrics.compute_metric('ids'), 0.0)
self.assertAlmostEqual(metrics.compute_metric('frag'), 0.0)
self.assertAlmostEqual(metrics.compute_metric('tid'), 0.0)
self.assertAlmostEqual(metrics.compute_metric('lgd'), 0.0)
self.assertAlmostEqual(metrics['amota'], 1.0)
self.assertAlmostEqual(metrics['amotp'], 0.0, delta=1e-5)
self.assertAlmostEqual(metrics['motar'], 1.0)
self.assertAlmostEqual(metrics['recall'], 1.0)
self.assertAlmostEqual(metrics['mota'], 1.0)
self.assertAlmostEqual(metrics['motp'], 0.0, delta=1e-5)
self.assertAlmostEqual(metrics['faf'], 0.0)
self.assertAlmostEqual(metrics['ml'], 0.0)
self.assertAlmostEqual(metrics['fp'], 0.0)
self.assertAlmostEqual(metrics['fn'], 0.0)
self.assertAlmostEqual(metrics['ids'], 0.0)
self.assertAlmostEqual(metrics['frag'], 0.0)
self.assertAlmostEqual(metrics['tid'], 0.0)
self.assertAlmostEqual(metrics['lgd'], 0.0)
else:
print('Skipping checks due to choice of custom eval_set: %s' % eval_set)

Expand Down
Empty file.
6 changes: 4 additions & 2 deletions python-sdk/nuscenes/scripts/export_2d_annotations_as_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,15 @@ def generate_record(ann_rec: dict,
def get_2d_boxes(sample_data_token: str, visibilities: List[str]) -> List[OrderedDict]:
"""
Get the 2D annotation records for a given `sample_data_token`.
:param sample_data_token: Sample data token belonging to a keyframe.
:param sample_data_token: Sample data token belonging to a camera keyframe.
:param visibilities: Visibility filter.
:return: List of 2D annotation record that belongs to the input `sample_data_token`
"""

# Get the sample data and the sample corresponding to that sample data.
sd_rec = nusc.get('sample_data', sample_data_token)

assert sd_rec['sensor_modality'] == 'camera', 'Error: get_2d_boxes only works for camera sample_data!'
if not sd_rec['is_key_frame']:
raise ValueError('The 2D re-projections are available only for keyframes.')

Expand All @@ -123,10 +124,11 @@ def get_2d_boxes(sample_data_token: str, visibilities: List[str]) -> List[Ordere
repro_recs = []

for ann_rec in ann_recs:

# Augment sample_annotation with token information.
ann_rec['sample_annotation_token'] = ann_rec['token']
ann_rec['sample_data_token'] = sample_data_token

# Get the box in global coordinates.
box = nusc.get_box(ann_rec['token'])

# Move them to the ego-pose frame.
Expand Down
3 changes: 1 addition & 2 deletions python-sdk/nuscenes/scripts/export_kitti.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,7 @@
import matplotlib.pyplot as plt

from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import transform_matrix
from nuscenes.utils.geometry_utils import BoxVisibility
from nuscenes.utils.geometry_utils import BoxVisibility, transform_matrix
from nuscenes.utils.data_classes import LidarPointCloud, Box
from nuscenes.utils.splits import create_splits_logs
from nuscenes.utils.kitti import KittiDB
Expand Down
1 change: 1 addition & 0 deletions setup/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
cachetools
descartes
fire
jupyter
matplotlib
motmetrics
Expand Down
4 changes: 2 additions & 2 deletions setup/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ def get_dirlist(_rootdir):

setuptools.setup(
name='nuscenes-devkit',
version='1.0.2',
version='1.0.4',
author='Holger Caesar, Oscar Beijbom, Qiang Xu, Varun Bankiti, Alex H. Lang, Sourabh Vora, Venice Erin Liong, '
'Chris Li, Sergi Widjaja, Kiwoo Shin et al.',
'Sergi Widjaja, Kiwoo Shin, Caglayan Dicle et al.',
author_email='nuscenes@nutonomy.com',
description='The official devkit of the nuScenes dataset (www.nuscenes.org).',
long_description=long_description,
Expand Down

0 comments on commit 7149db1

Please sign in to comment.