Skip to content

Commit

Permalink
Update NuScenesExplorer (nutonomy#99)
Browse files Browse the repository at this point in the history
* added out_path args to all render methods

* assert video extensions are .avi. bugfix in cv2.VideoWriter
  • Loading branch information
sourabh-nutonomy authored and oscar-nutonomy committed Apr 8, 2019
1 parent d969a17 commit 3e1f93e
Showing 1 changed file with 71 additions and 24 deletions.
95 changes: 71 additions & 24 deletions python-sdk/nuscenes/nuscenes.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,36 +379,37 @@ def list_sample(self, sample_token: str) -> None:
self.explorer.list_sample(sample_token)

def render_pointcloud_in_image(self, sample_token: str, dot_size: int = 5, pointsensor_channel: str = 'LIDAR_TOP',
camera_channel: str = 'CAM_FRONT') -> None:
camera_channel: str = 'CAM_FRONT', out_path: str = None) -> None:
self.explorer.render_pointcloud_in_image(sample_token, dot_size, pointsensor_channel=pointsensor_channel,
camera_channel=camera_channel)
camera_channel=camera_channel, out_path=out_path)

def render_sample(self, sample_token: str, box_vis_level: BoxVisibility = BoxVisibility.ANY, nsweeps: int = 1)\
-> None:
self.explorer.render_sample(sample_token, box_vis_level, nsweeps=nsweeps)
def render_sample(self, sample_token: str, box_vis_level: BoxVisibility = BoxVisibility.ANY, nsweeps: int = 1,
out_path: str = None) -> None:
self.explorer.render_sample(sample_token, box_vis_level, nsweeps=nsweeps, out_path=out_path)

def render_sample_data(self, sample_data_token: str, with_anns: bool = True,
box_vis_level: BoxVisibility = BoxVisibility.ANY, axes_limit: float = 40, ax: Axes = None,
nsweeps: int = 1) -> None:
self.explorer.render_sample_data(sample_data_token, with_anns, box_vis_level, axes_limit, ax, nsweeps=nsweeps)
nsweeps: int = 1, out_path: str = None) -> None:
self.explorer.render_sample_data(sample_data_token, with_anns, box_vis_level, axes_limit, ax, nsweeps=nsweeps,
out_path=out_path)

def render_annotation(self, sample_annotation_token: str, margin: float = 10, view: np.ndarray = np.eye(4),
box_vis_level: BoxVisibility = BoxVisibility.ANY) -> None:
self.explorer.render_annotation(sample_annotation_token, margin, view, box_vis_level)
box_vis_level: BoxVisibility = BoxVisibility.ANY, out_path: str = None) -> None:
self.explorer.render_annotation(sample_annotation_token, margin, view, box_vis_level, out_path)

def render_instance(self, instance_token: str) -> None:
self.explorer.render_instance(instance_token)
def render_instance(self, instance_token: str, out_path: str = None) -> None:
self.explorer.render_instance(instance_token, out_path=out_path)

def render_scene(self, scene_token: str, freq: float = 10, imsize: Tuple[float, float] = (640, 360),
out_path: str = None) -> None:
self.explorer.render_scene(scene_token, freq, imsize, out_path)

def render_scene_channel(self, scene_token: str, channel: str = 'CAM_FRONT',
imsize: Tuple[float, float] = (640, 360)) -> None:
self.explorer.render_scene_channel(scene_token, channel=channel, imsize=imsize)
def render_scene_channel(self, scene_token: str, channel: str = 'CAM_FRONT', freq: float=10,
imsize: Tuple[float, float] = (640, 360), out_path: str = None) -> None:
self.explorer.render_scene_channel(scene_token, channel=channel, freq=freq, imsize=imsize, out_path=out_path)

def render_egoposes_on_map(self, log_location: str, scene_tokens: List = None) -> None:
self.explorer.render_egoposes_on_map(log_location, scene_tokens)
def render_egoposes_on_map(self, log_location: str, scene_tokens: List = None, out_path: str = None) -> None:
self.explorer.render_egoposes_on_map(log_location, scene_tokens, out_path=out_path)


class NuScenesExplorer:
Expand Down Expand Up @@ -571,13 +572,15 @@ def render_pointcloud_in_image(self,
sample_token: str,
dot_size: int = 5,
pointsensor_channel: str = 'LIDAR_TOP',
camera_channel: str = 'CAM_FRONT') -> None:
camera_channel: str = 'CAM_FRONT',
out_path: str = None) -> None:
"""
Scatter-plots a point-cloud on top of image.
:param sample_token: Sample token.
:param dot_size: Scatter plot dot size.
:param pointsensor_channel: RADAR or LIDAR channel name, e.g. 'LIDAR_TOP'.
:param camera_channel: Camera channel name, e.g. 'CAM_FRONT'.
:param out_path: Optional path to save the rendered figure to disk.
"""
sample_record = self.nusc.get('sample', sample_token)

Expand All @@ -591,15 +594,20 @@ def render_pointcloud_in_image(self,
plt.scatter(points[0, :], points[1, :], c=coloring, s=dot_size)
plt.axis('off')

if out_path is not None:
plt.savefig(out_path)

def render_sample(self,
token: str,
box_vis_level: BoxVisibility = BoxVisibility.ANY,
nsweeps: int = 1) -> None:
nsweeps: int = 1,
out_path: str = None) -> None:
"""
Render all LIDAR and camera sample_data in sample along with annotations.
:param token: Sample token.
:param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
:param nsweeps: Number of sweeps for lidar and radar.
:param out_path: Optional path to save the rendered figure to disk.
"""
record = self.nusc.get('sample', token)

Expand Down Expand Up @@ -633,13 +641,17 @@ def render_sample(self,
plt.tight_layout()
fig.subplots_adjust(wspace=0, hspace=0)

if out_path is not None:
plt.savefig(out_path)

def render_sample_data(self,
sample_data_token: str,
with_anns: bool = True,
box_vis_level: BoxVisibility = BoxVisibility.ANY,
axes_limit: float = 40,
ax: Axes = None,
nsweeps: int = 1) -> None:
nsweeps: int = 1,
out_path: str = None) -> None:
"""
Render sample data onto axis.
:param sample_data_token: Sample_data token.
Expand All @@ -648,6 +660,7 @@ def render_sample_data(self,
:param axes_limit: Axes limit for lidar and radar (measured in meters).
:param ax: Axes onto which to render.
:param nsweeps: Number of sweeps for lidar and radar.
:param out_path: Optional path to save the rendered figure to disk.
"""

# Get sensor modality.
Expand Down Expand Up @@ -773,17 +786,22 @@ def render_sample_data(self,
ax.set_title(sd_record['channel'])
ax.set_aspect('equal')

if out_path is not None:
plt.savefig(out_path)

def render_annotation(self,
anntoken: str,
margin: float = 10,
view: np.ndarray = np.eye(4),
box_vis_level: BoxVisibility = BoxVisibility.ANY) -> None:
box_vis_level: BoxVisibility = BoxVisibility.ANY,
out_path: str = None) -> None:
"""
Render selected annotation.
:param anntoken: Sample_annotation token.
:param margin: How many meters in each direction to include in LIDAR view.
:param view: LIDAR view point.
:param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
:param out_path: Optional path to save the rendered figure to disk.
"""

ann_record = self.nusc.get('sample_annotation', anntoken)
Expand Down Expand Up @@ -829,10 +847,14 @@ def render_annotation(self,
c = np.array(self.get_color(box.name)) / 255.0
box.render(axes[1], view=camera_intrinsic, normalize=True, colors=(c, c, c))

def render_instance(self, instance_token: str) -> None:
if out_path is not None:
plt.savefig(out_path)

def render_instance(self, instance_token: str, out_path: str = None) -> None:
"""
Finds the annotation of the given instance that is closest to the vehicle, and then renders it.
:param instance_token: The instance token.
:param out_path: Optional path to save the rendered figure to disk.
"""
ann_tokens = self.nusc.field2token('sample_annotation', 'instance_token', instance_token)
closest = [np.inf, None]
Expand All @@ -845,7 +867,7 @@ def render_instance(self, instance_token: str) -> None:
if dist < closest[0]:
closest[0] = dist
closest[1] = ann_token
self.render_annotation(closest[1])
self.render_annotation(closest[1], out_path=out_path)

def render_scene(self,
scene_token: str,
Expand All @@ -862,6 +884,9 @@ def render_scene(self,

assert imsize[0] / imsize[1] == 16 / 9, "Aspect ratio should be 16/9."

if out_path is not None:
assert osp.splitext(out_path)[-1] == '.avi'

# Get records from DB.
scene_rec = self.nusc.get('scene', scene_token)
first_sample_rec = self.nusc.get('sample', scene_rec['first_sample_token'])
Expand Down Expand Up @@ -959,12 +984,16 @@ def render_scene(self,
def render_scene_channel(self,
scene_token: str,
channel: str = 'CAM_FRONT',
imsize: Tuple[float, float] = (640, 360)) -> None:
freq: float = 10,
imsize: Tuple[float, float] = (640, 360),
out_path: str = None) -> None:
"""
Renders a full scene for a particular camera channel.
:param scene_token: Unique identifier of scene to render.
:param channel: Channel to render.
:param freq: Display frequency (Hz).
:param imsize: Size of image to render. The larger the slower this will run.
:param out_path: Optional path to write a video file of the rendered frames.
"""

valid_channels = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
Expand All @@ -973,6 +1002,9 @@ def render_scene_channel(self,
assert imsize[0] / imsize[1] == 16 / 9, "Aspect ratio should be 16/9."
assert channel in valid_channels, 'Input channel {} not valid.'.format(channel)

if out_path is not None:
assert osp.splitext(out_path)[-1] == '.avi'

# Get records from DB
scene_rec = self.nusc.get('scene', scene_token)
sample_rec = self.nusc.get('sample', scene_rec['first_sample_token'])
Expand All @@ -983,6 +1015,12 @@ def render_scene_channel(self,
cv2.namedWindow(name)
cv2.moveWindow(name, 0, 0)

if out_path is not None:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(out_path, fourcc, freq, imsize)
else:
out = None

has_more_frames = True
while has_more_frames:

Expand All @@ -1001,6 +1039,8 @@ def render_scene_channel(self,
# Render
im = cv2.resize(im, imsize)
cv2.imshow(name, im)
if out_path is not None:
out.write(im)

key = cv2.waitKey(10) # Images stored at approx 10 Hz, so wait 10 ms.
if key == 32: # If space is pressed, pause.
Expand All @@ -1016,12 +1056,15 @@ def render_scene_channel(self,
has_more_frames = False

cv2.destroyAllWindows()
if out_path is not None:
out.release()

def render_egoposes_on_map(self, log_location: str,
scene_tokens: List = None,
close_dist: float = 100,
color_fg: Tuple[int, int, int] = (167, 174, 186),
color_bg: Tuple[int, int, int] = (255, 255, 255)) -> None:
color_bg: Tuple[int, int, int] = (255, 255, 255),
out_path: str = None) -> None:
"""
Renders ego poses a the map. These can be filtered by location or scene.
:param log_location: Name of the location, e.g. "singapore-onenorth", "singapore-hollandvillage",
Expand All @@ -1030,6 +1073,7 @@ def render_egoposes_on_map(self, log_location: str,
:param close_dist: Distance in meters for an ego pose to be considered within range of another ego pose.
:param color_fg: Color of the semantic prior in RGB format.
:param color_bg: Color of the non-semantic prior in RGB format.
:param out_path: Optional path to save the rendered figure to disk.
"""
# Get logs by location
log_tokens = [l['token'] for l in self.nusc.log if l['location'] == log_location]
Expand Down Expand Up @@ -1098,3 +1142,6 @@ def render_egoposes_on_map(self, log_location: str,
color_bar_ticklabels = plt.getp(color_bar.ax.axes, 'yticklabels')
plt.setp(color_bar_ticklabels, color='k')
plt.rcParams['figure.facecolor'] = 'white' # Reset for future plots

if out_path is not None:
plt.savefig(out_path)

0 comments on commit 3e1f93e

Please sign in to comment.