Skip to content

Structure re-org towards more tests. #52

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Mar 7, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ To download nuScenes you need to go to the [Download page](https://www.nuscenes.
create an account and confirm the nuScenes [Terms of Use](https://www.nuscenes.org/terms-of-use).
After logging in you will see multiple archives for images, pointclouds and meta data.
For the devkit to work you will need to download *all* archives.
Please unpack the archives to the `/data/nuscenes` folder \*without\* overwriting folders that occur in multiple archives.
Please unpack the archives to the `/data/sets/nuscenes` folder \*without\* overwriting folders that occur in multiple archives.
Eventually you should have the following folder structure:
```
/data/nuscenes
/data/sets/nuscenes
maps - Large image files (~500 Gigapixel) that depict the drivable surface and sidewalks in the scene.
samples - Sensor data for keyframes.
sweeps - Sensor data for intermediate frames.
Expand All @@ -51,6 +51,10 @@ following to your `~/.virtualenvs/nuscenes/bin/postactivate` (virtual environmen
```
export PYTHONPATH="${PYTHONPATH}:$HOME/nuscenes-devkit/python-sdk"
```
Finally, set NUSCENES env. variable that points to your data folder
```
export NUSCENES="/data/sets/nuscenes"
```

## Tutorial
To get started with the nuScenes devkit, please run the tutorial as an IPython notebook:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@
import matplotlib.pyplot as plt

from nuscenes.nuscenes import NuScenes
from nuscenes.eval.eval_utils import center_distance, category_to_detection_name, filter_boxes, \
from nuscenes.eval.detection.utils import center_distance, category_to_detection_name, filter_boxes, \
visualize_sample, scale_iou, yaw_diff, velocity_l2, attr_acc
from nuscenes.eval.create_splits_logs import create_splits_logs
from nuscenes.utils.splits import create_splits_logs


class NuScenesEval:
Expand Down
Empty file.
70 changes: 70 additions & 0 deletions python-sdk/nuscenes/eval/detection/tests/test_main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# nuScenes dev-kit.
# Code written by Oscar Beijbom, 2019.
# Licensed under the Creative Commons [see licence.txt]

import unittest
import random
import json
import os
import shutil

from nuscenes.eval.detection.main import NuScenesEval
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.splits import create_splits_scenes


class TestEndToEnd(unittest.TestCase):

def test_simple(self):
"""
Creates a dummy result file and runs NuScenesEval.
This is intended to simply exersize a large part of the code to catch typos and syntax errors.
"""

random.seed(43)
assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v0.2', dataroot=os.environ['NUSCENES'], verbose=False)

splits = create_splits_scenes(nusc)
one_scene_token = nusc.field2token('scene', 'name', splits['val'][0])
one_scene = nusc.get('scene', one_scene_token[0])

def make_mock_entry(sample_token):
return {
'sample_token': sample_token,
'translation': [1.0, 2.0, 3.0],
'size': [1.0, 2.0, 3.0],
'rotation': [1.0, 2.0, 2.0, 3.0],
'velocity': [1.0, 2.0, 3.0],
'detection_name': 'vehicle.car',
'detection_score': random.random(),
'attribute_scores': [.1, .2, .3, .4, .5, .6, .7, .8]
}

pred = {
one_scene['first_sample_token']: [
make_mock_entry(one_scene['first_sample_token']),
make_mock_entry(one_scene['first_sample_token'])],
one_scene['last_sample_token']: [
make_mock_entry(one_scene['last_sample_token']),
make_mock_entry(one_scene['last_sample_token'])],
}

res_mockup = 'nsc_eval.json'
res_eval_folder = 'tmp'

with open(res_mockup, 'w') as f:
json.dump(pred, f)

nusc_eval = NuScenesEval(nusc, res_mockup, eval_set='val', output_dir=res_eval_folder, verbose=True)
nusc_eval.run_eval()

# Trivial assert statement
self.assertEqual(nusc_eval.output_dir, res_eval_folder)

os.remove(res_mockup)
shutil.rmtree(res_eval_folder)


if __name__ == '__main__':
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import numpy as np
from pyquaternion import Quaternion

from nuscenes.eval.eval_utils import scale_iou, quaternion_yaw, yaw_diff
from nuscenes.eval.detection.utils import scale_iou, quaternion_yaw, yaw_diff


class TestEval(unittest.TestCase):
Expand Down Expand Up @@ -127,3 +127,7 @@ def test_yaw_diff(self):
sr = {'rotation': Quaternion(axis=(0, 0, 1), angle=0.9 * np.pi).elements}
diff = yaw_diff(sa, sr)
self.assertAlmostEqual(diff, 0.2 * np.pi)


if __name__ == '__main__':
unittest.main()
1 change: 1 addition & 0 deletions python-sdk/nuscenes/scripts/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Misc scripts not part of the core code-base.
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,9 @@
from nuscenes.nuscenes import NuScenes


def test_dataset_complete(nusc: NuScenes):
def verify_setup(nusc: NuScenes):
"""
Script to verify that the nuScenes installation is complete.
Note: This script is not a unit test, as the dataset may be stored in another folder.
"""

# Check that each sample_data file exists.
Expand All @@ -29,13 +28,14 @@ def test_dataset_complete(nusc: NuScenes):


if __name__ == "__main__":

# Settings.
parser = argparse.ArgumentParser(description='Test that the installed dataset is complete.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataroot', type=str, default='/data/exp/nuScenes-blurring-data/nuscenes-v0.5',
parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v0.5',
help='Which version of the nuScenes dataset to evaluate on, e.g. v0.5.')
parser.add_argument('--version', type=str, default='full',
help='Which version of the nuScenes dataset to evaluate on, e.g. full.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
args = parser.parse_args()
Expand All @@ -44,7 +44,7 @@ def test_dataset_complete(nusc: NuScenes):
verbose = bool(args.verbose)

# Init.
nusc = NuScenes(version=version, verbose=verbose, dataroot=dataroot)
nusc_ = NuScenes(version=version, verbose=verbose, dataroot=dataroot)

# Run tests.
test_dataset_complete(nusc)
# Verify data blobs.
verify_setup(nusc_)
Empty file.
27 changes: 27 additions & 0 deletions python-sdk/nuscenes/tests/test_nuscenes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# nuScenes dev-kit.
# Code written by Oscar Beijbom, 2019.
# Licensed under the Creative Commons [see licence.txt]

import unittest
import os

from nuscenes.nuscenes import NuScenes


class TestNuScenes(unittest.TestCase):

def test_load(self):
"""
Loads up NuScenes.
This is intended to simply run the NuScenes class to check for import errors, typos, etc.
"""

assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v0.2', dataroot=os.environ['NUSCENES'], verbose=False)

# Trivial assert statement
self.assertEqual(nusc.table_root, os.path.join(os.environ['NUSCENES'], 'v0.2'))


if __name__ == '__main__':
unittest.main()