Skip to content

Evaluation Fixes #51

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 4, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion python-sdk/nuscenes/eval/eval_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def filter_boxes(sample_boxes: List[Dict], pose_record: Dict, cs_record: Dict, e
result = []
ego_dists = []
for box_sensor, box_global in zip(sample_boxes_sensor, sample_boxes):
dist = np.sqrt(np.sum(box_sensor.center ** 2))
dist = np.sqrt(np.sum(box_sensor.center[:2] ** 2))
if dist <= eval_range:
result.append(box_global) # Add the sample_box, not the box.
ego_dists.append(dist)
Expand Down
24 changes: 19 additions & 5 deletions python-sdk/nuscenes/eval/nuscenes_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,10 +421,6 @@ def average_precision(self, all_annotations: dict, all_results: dict, class_name
else:
rec = 0 * tp

# Store original values.
metrics['rec'] = rec
metrics['prec'] = prec

# IF there are no data points, add a point at (rec, prec) of (0.01, 0) such that the AP equals 0.
if len(prec) == 0:
rec = np.array([0.01])
Expand All @@ -434,14 +430,32 @@ def average_precision(self, all_annotations: dict, all_results: dict, class_name
if rec[0] != 0:
rec = np.append(0.0, rec)
prec = np.append(prec[0], prec)
metrics['trans_err'].insert(0, np.nan)
metrics['vel_err'].insert(0, np.nan)
metrics['scale_err'].insert(0, np.nan)
metrics['orient_err'].insert(0, np.nan)
metrics['attr_err'].insert(0, np.nan)
metrics['conf'].insert(0, 1)

# For debugging only.
metrics['ego_dist'].insert(0, np.nan)
metrics['vel_magn'].insert(0, np.nan)

# Store modified rec and prec values.
metrics['rec'] = rec
metrics['prec'] = prec

# If the max recall is below the minimum recall range, return the maximum error
if max(rec) < min(score_range):
return np.nan, dict()

# Find indices of rec that are close to the interpolated recall thresholds.
assert all(rec == sorted(rec)) # np.searchsorted requires sorted inputs.
thresh_count = int((score_range[1] - score_range[0]) * 100 + 1)
rec_interp = np.linspace(score_range[0], score_range[1], thresh_count)
threshold_inds = np.searchsorted(rec, rec_interp, side='left').astype(np.float32)
threshold_inds[threshold_inds == len(rec)] = np.nan # Mark unachieved recall values as such.
assert np.nanmax(threshold_inds) < len(sortind) # Check that threshold indices are not out of bounds.
assert np.nanmax(threshold_inds) < len(rec) # Check that threshold indices are not out of bounds.
metrics['threshold_inds'] = threshold_inds

# Interpolation of precisions to the nearest lower recall threshold.
Expand Down