Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Evaluation code for nuScenes-lidarseg challenge #480

Merged
merged 47 commits into from
Oct 26, 2020
Merged
Changes from 1 commit
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
cd3650a
Class to map classes from nuScenes-lidarseg to challenge
Oct 19, 2020
c74bd14
Add method to convert a label
Oct 19, 2020
05cd0f6
Add methods to ensure labels are converted correctly
Oct 19, 2020
09d5023
Class for lidarseg evaluation
Oct 19, 2020
1ba0131
Add docstrings for evaluate.py
Oct 19, 2020
71eab3a
Add argparse to evaluate.py
Oct 19, 2020
e46feaa
Improve verbose behavior in evaluate.py
Oct 19, 2020
c72b5d3
Tidy up some docstrings in evaluate.py
Oct 19, 2020
0a690ee
Tidy up some docstrings for utils.py
Oct 19, 2020
669dd4c
More docstrings for utils.py
Oct 19, 2020
d1eae79
Add init file for unit tests
Oct 20, 2020
e86a727
Add leaderboard header to TOC for detection and lidarseg challenges
Oct 20, 2020
d3ff642
Clarify definition of external data
Oct 20, 2020
561b847
Tidy up language for challenge tracks
Oct 20, 2020
ebabc55
Change from void_ignore to ignore
Oct 20, 2020
3ba7525
Add method to get sample tokens for evaluation set among those in split.
Oct 20, 2020
a06ad41
Reformat dict output
Oct 20, 2020
ee532a2
Deal with IOU case when there are no points belonging to a particular…
Oct 20, 2020
52eb198
Make confusion matrix an object instead
Oct 21, 2020
dd33008
Tidy up some docstrings in utils.py
Oct 21, 2020
9544142
Add method to get frequency-weighted IOU
Oct 21, 2020
667defd
Add in readme that predictions should be save as uint8
Oct 21, 2020
fcf4a5d
Add docstring for ConfusionMatrix class
Oct 21, 2020
c100825
Add FWIOU to readme
Oct 21, 2020
067ecdb
Tidy up calculation for confusion matrix
Oct 21, 2020
a9d700f
Add banner link to readme
Oct 21, 2020
45e9dda
Shift miou method into ConfusionMatrix class
Oct 21, 2020
2422db5
Remove need to specify ignore idx in both eval and adaptor class; do …
Oct 21, 2020
67bb2cb
Shift get_samples_in_eval_set method to utils.py
Oct 21, 2020
58f5230
Disable progress bar is verbose=False
Oct 21, 2020
143b2bb
Add script to let user validate results folder
Oct 21, 2020
9e3c8c7
Tidy up docstring in validate_submission.py
Oct 21, 2020
0f21cad
Improve error msg when number of preds do not match number of points …
Oct 21, 2020
41ce9af
Add verbose param for validate_submission.py, remove redundant import…
Oct 21, 2020
54a12fa
Check len of preds against pcl rather than labels
Oct 22, 2020
07e0dc6
Amend docstring for get_per_class_iou method
Oct 22, 2020
e115e17
Print class even if metric is NaN
Oct 22, 2020
0a345ff
Specify in readme that preds should not contain the ignored class
Oct 22, 2020
e7f2a66
Zero out row and col for ignored class, assert >0 for preds, better e…
Oct 22, 2020
e19ac34
Address comments for readme
Oct 22, 2020
02717b6
Address comments for evaluate.py
Oct 22, 2020
616ec84
Address comments for validate_submission.py
Oct 22, 2020
5ecc55e
Address comments for utils.py
Oct 22, 2020
9f87bb4
Address comments for readme
Oct 24, 2020
8747c4d
Check if preds are between 1 and num_classes-1 in validate_submission
Oct 24, 2020
9c9fd01
Change name to fine2coarse
Oct 24, 2020
2f69668
Address typos in readme
Oct 26, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Tidy up some docstrings for utils.py
  • Loading branch information
whyekit-aptiv committed Oct 19, 2020
commit 0a690ee28ff037f1b676d59d437f0668b6191c5c
37 changes: 29 additions & 8 deletions python-sdk/nuscenes/eval/lidarseg/utils.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,18 @@
from typing import Dict

import numpy as np

from nuscenes import NuScenes


class LidarsegChallengeAdaptor:
whyekit-motional marked this conversation as resolved.
Show resolved Hide resolved
"""

"""
def __init__(self, nusc: NuScenes):
"""

"""
self.nusc = nusc

self.raw_name_2_merged_name_mapping = self.get_raw2merged()
Expand All @@ -15,7 +23,11 @@ def __init__(self, nusc: NuScenes):
self.raw_idx_2_merged_idx_mapping = self.get_raw_idx_2_merged_idx()

@staticmethod
def get_raw2merged():
def get_raw2merged() -> Dict:
"""
Returns the mapping from
:return:
"""
return {'noise': 'void_ignore',
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
Expand Down Expand Up @@ -50,8 +62,10 @@ def get_raw2merged():
'vehicle.ego': 'void_ignore'}

@staticmethod
def get_merged2idx():
def get_merged2idx() -> Dict:
"""
Returns the mapping from the merged class names to the merged class indices.
:return: A dictionary containing the mapping from the merged class names to the merged class indices.
"""
return {'void_ignore': 0,
whyekit-motional marked this conversation as resolved.
Show resolved Hide resolved
'barrier': 1,
Expand All @@ -71,14 +85,20 @@ def get_merged2idx():
'manmade': 15,
'vegetation': 16}

def get_raw_idx_2_merged_idx(self):
def get_raw_idx_2_merged_idx(self) -> Dict:
"""

"""
raw_idx_2_merged_idx_mapping = dict()
for raw_name, raw_idx in self.nusc.lidarseg_name2idx_mapping.items():
raw_idx_2_merged_idx_mapping[raw_idx] = self.merged_name_2_merged_idx_mapping[
self.raw_name_2_merged_name_mapping[raw_name]]
return raw_idx_2_merged_idx_mapping

def check_mapping(self):
def check_mapping(self) -> None:
"""

"""
merged_set = set()
for raw_name, merged_name in self.raw_name_2_merged_name_mapping.items():
merged_set.add(merged_name)
Expand All @@ -88,7 +108,7 @@ def check_mapping(self):

def convert_label(self, points_label: np.ndarray) -> np.ndarray:
"""
Convert the labels in a single .bin file according to the provided mapping
Convert the labels in a single .bin file according to the provided mapping.
:param points_label: The .bin to be converted (e.g. './i_contain_the_labels_for_a_pointcloud.bin')
"""
counter_before = self.get_stats(points_label) # get stats before conversion
Expand All @@ -112,7 +132,7 @@ def compare_stats(self, counter_before: np.array, counter_after: np.array) -> bo
no points, class 1 has 1 point, class 2 has 34 points, etc.
:param counter_after: A numPy array which contains the counts of each class (the index of the array corresponds
whyekit-motional marked this conversation as resolved.
Show resolved Hide resolved
to the class label) after conversion
:returns: True or False; True if the stats before and after conversion are the same, and False if otherwise.
:return: True or False; True if the stats before and after conversion are the same, and False if otherwise.
"""
counter_check = [0] * len(counter_after)
for i, count in enumerate(counter_before): # Note that it is expected that the class labels are 0-indexed.
whyekit-motional marked this conversation as resolved.
Show resolved Hide resolved
Expand All @@ -125,8 +145,9 @@ def compare_stats(self, counter_before: np.array, counter_after: np.array) -> bo
def get_stats(self, points_label: np.array) -> np.ndarray:
"""
Get frequency of each label in a point cloud.
:param points_label: A numPy array which contains the labels of the point cloud; e.g. np.array([2, 1, 34, ..., 38])
:returns: An array which contains the counts of each label in the point cloud. The index of the point cloud
:param points_label: A numPy array which contains the labels of the point cloud;
e.g. np.array([2, 1, 34, ..., 38])
:return: An array which contains the counts of each label in the point cloud. The index of the point cloud
corresponds to the index of the class label. E.g. [0, 2345, 12, 451] means that there are no points
in class 0, there are 2345 points in class 1, there are 12 points in class 2 etc.
"""
Expand Down