Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Eval refactor step2 #53

Merged
merged 26 commits into from
Mar 14, 2019
Merged
Changes from 1 commit
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
edf0b94
reorganized repo to more allow for easier injection of tests. Moved d…
oscar-nutonomy Mar 6, 2019
b696a06
added README, minor renames and cleanup
oscar-nutonomy Mar 6, 2019
cd99b1f
Merge remote-tracking branch 'origin/release_v0.2' into eval_refactor
oscar-nutonomy Mar 6, 2019
548df59
removed v0.1 from the nuscenes asserts
oscar-nutonomy Mar 6, 2019
b8a41df
renamed main detection eval file
oscar-nutonomy Mar 6, 2019
e4b7120
fixed typo
oscar-nutonomy Mar 6, 2019
3d0d01a
Added verify and test instructions to main README
oscar-nutonomy Mar 6, 2019
8efede2
added more thorough regresssion type test. This seem to have triggere…
oscar-nutonomy Mar 6, 2019
6d76c3d
Update README.md
oscar-nutonomy Mar 7, 2019
3fe86dd
Minor cleanups, added more descriptive error messages
holger-motional Mar 7, 2019
d379353
Standardized dataroot to /data/sets/nuscenes
holger-motional Mar 7, 2019
873b30b
Catch case when obligatory attributes are missing, changed attribute_…
holger-motional Mar 7, 2019
e2e4d01
Merge branch 'release_v0.2' into eval_refactor_step2
holger-motional Mar 7, 2019
64d9a7d
fixed random seed. Passes unit-tests now
oscar-nutonomy Mar 8, 2019
f73fcfe
started re-factor
oscar-nutonomy Mar 8, 2019
9ddd59e
Added data-classes for the EvalBoxes. Cleaned up loading. Same result…
oscar-nutonomy Mar 8, 2019
481488a
updated all tests to use the EvalBox data class. All tests passing now.
oscar-nutonomy Mar 8, 2019
4cfe6c1
More cleanup. Still no changes to algorithm
oscar-nutonomy Mar 9, 2019
025c93b
changes to calculating distance from ego_vehicle (#57)
oscar-nutonomy Mar 9, 2019
e33051e
Typing and comments
holger-motional Mar 9, 2019
d21123b
Merge branch 'eval_refactor_step2' of github.com:nutonomy/nuscenes-de…
holger-motional Mar 9, 2019
273ff7a
Changed result format to accept at most one attribute (#60)
holger-motional Mar 11, 2019
ab1b69a
minor attr_acc cleanup
oscar-nutonomy Mar 11, 2019
e779114
reviewer comments
oscar-nutonomy Mar 11, 2019
fb9c4dc
merged conflice with v0.2 branch
oscar-nutonomy Mar 14, 2019
7a9a4d5
final conflict and reviewer comments
oscar-nutonomy Mar 14, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fixed random seed. Passes unit-tests now
  • Loading branch information
oscar-nutonomy committed Mar 8, 2019
commit 64d9a7d4df30171b6dd1f72f3ba5585344459fe6
60 changes: 7 additions & 53 deletions python-sdk/nuscenes/eval/detection/tests/test_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ def random_class(category_name):
else:
return class_names[np.random.randint(0, 9)]

random.seed(43)
mock_results = {}
splits = create_splits_scenes(nusc)
val_samples = []
Expand All @@ -59,7 +58,7 @@ def random_class(category_name):
sample_res.append(
{
'sample_token': sample['token'],
'translation': list(np.array(ann['translation']) + 10 * (np.random.rand(3) - 0.5)),
'translation': list(np.array(ann['translation']) + 5 * (np.random.rand(3) - 0.5)),
'size': list(np.array(ann['size']) * 2 * (np.random.rand(3) + 0.5)),
'rotation': list(np.array(ann['rotation']) + ((np.random.rand(4) - 0.5) * .1)),
'velocity': list(nusc.box_velocity(ann_token) * (np.random.rand(3) + 0.5)),
Expand All @@ -71,70 +70,25 @@ def random_class(category_name):
mock_results[sample['token']] = sample_res
return mock_results

@unittest.skip("TODO unskip once done with the others")
def test_simple(self):
"""
Creates a dummy result file and runs NuScenesEval.
This is intended to simply exersize a large part of the code to catch typos and syntax errors.
"""

random.seed(43)
assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v0.2', dataroot=os.environ['NUSCENES'], verbose=False)

splits = create_splits_scenes(nusc)
one_scene_token = nusc.field2token('scene', 'name', splits['val'][0])
one_scene = nusc.get('scene', one_scene_token[0])

def make_mock_entry(sample_token):
return {
'sample_token': sample_token,
'translation': [1.0, 2.0, 3.0],
'size': [1.0, 2.0, 3.0],
'rotation': [1.0, 2.0, 2.0, 3.0],
'velocity': [1.0, 2.0, 3.0],
'detection_name': 'vehicle.car',
'detection_score': random.random(),
'attribute_scores': [.1, .2, .3, .4, .5, .6, .7, .8]
}

pred = {
one_scene['first_sample_token']: [
make_mock_entry(one_scene['first_sample_token']),
make_mock_entry(one_scene['first_sample_token'])],
one_scene['last_sample_token']: [
make_mock_entry(one_scene['last_sample_token']),
make_mock_entry(one_scene['last_sample_token'])],
}

with open(self.res_mockup, 'w') as f:
json.dump(pred, f)

nusc_eval = NuScenesEval(nusc, self.res_mockup, eval_set='val', output_dir=self.res_eval_folder, verbose=True)
nusc_eval.run_eval()

# Trivial assert statement
self.assertEqual(nusc_eval.output_dir, self.res_eval_folder)

def test_delta(self):
"""
This tests evaluates the score of a plausible, arbitrary and random set of predictions.
The goal is to get some reasonable score.
This tests runs the evaluation for an arbitrary random set of predictions.
This score is then captured in this very test such that if we change the eval code,
this test will trigger if the results changed.
"""

random.seed(42)
np.random.seed(42)
assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v0.2', dataroot=os.environ['NUSCENES'], verbose=False)

with open(self.res_mockup, 'w') as f:
json.dump(self._mock_results(nusc), f, indent=2)

nusc_eval = NuScenesEval(nusc, self.res_mockup, eval_set='val', output_dir=self.res_eval_folder, verbose=True)
nusc_eval.run_eval()
metrics = nusc_eval.run_eval()

# Trivial assert statement
self.assertEqual(nusc_eval.output_dir, self.res_eval_folder)
# Score of 0.22082865720221012 was measured on the branch "release_v0.2" on March 7 2019.
self.assertAlmostEqual(metrics['weighted_sum'], 0.22082865720221012)


if __name__ == '__main__':
Expand Down