Skip to content

Commit

Permalink
fix test and format
Browse files Browse the repository at this point in the history
  • Loading branch information
pascal-roth committed Sep 28, 2024
1 parent 0289eee commit aafa60d
Show file tree
Hide file tree
Showing 5 changed files with 124 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class TiledCamera(Camera):
accurately captured in the RGB images. We are currently working on improving the fidelity of the RGB images.
.. note::
Compared to other cameras, the depth clipping behavior cannot be altered as the backend implementation
Compared to other cameras, the depth clipping behavior cannot be altered as the backend implementation
automatically clips the values exceeding the maximum range to zero.
.. versionadded:: v1.0.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,8 +294,10 @@ def _update_buffers_impl(self, env_ids: Sequence[int]):
# apply the maximum distance after the transformation
if self.cfg.depth_clipping_behavior == "max":
distance_to_image_plane = torch.clip(distance_to_image_plane, max=self.cfg.max_distance)
distance_to_image_plane[torch.isnan(distance_to_image_plane)] = self.cfg.max_distance
elif self.cfg.depth_clipping_behavior == "zero":
distance_to_image_plane[distance_to_image_plane > self.cfg.max_distance] = 0.0
distance_to_image_plane[torch.isnan(distance_to_image_plane)] = 0.0
self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view(-1, *self.image_shape)

if "distance_to_camera" in self.cfg.data_types:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ class OffsetCfg:
- ``"max"``: Values are clipped to the maximum value.
- ``"zero"``: Values are clipped to zero.
- ``"none``: No clipping is applied. Values will be returned as ``nan``.
- ``"none``: No clipping is applied. Values will be returned as ``inf`` for ``distance_to_camera`` and ``nan``
for ``distance_to_image_plane`` data type.
"""

def __post_init__(self):
Expand Down
88 changes: 74 additions & 14 deletions source/extensions/omni.isaac.lab/test/sensors/test_camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ def test_depth_clipping(self):
depth_clipping_behavior="zero",
)
camera_zero = Camera(camera_cfg_zero)

camera_cfg_none = copy.deepcopy(camera_cfg_zero)
camera_cfg_none.prim_path = "/World/CameraNone"
camera_cfg_none.depth_clipping_behavior = "none"
Expand All @@ -410,26 +410,86 @@ def test_depth_clipping(self):
# none clipping should contain inf values
self.assertTrue(torch.isinf(camera_none.data.output["distance_to_camera"]).any())
self.assertTrue(torch.isinf(camera_none.data.output["distance_to_image_plane"]).any())
self.assertTrue(camera_none.data.output["distance_to_camera"][~torch.isinf(camera_none.data.output["distance_to_camera"])].min() >= camera_cfg_zero.spawn.clipping_range[0])
self.assertTrue(camera_none.data.output["distance_to_camera"][~torch.isinf(camera_none.data.output["distance_to_camera"])].max() <= camera_cfg_zero.spawn.clipping_range[1])
self.assertTrue(camera_none.data.output["distance_to_image_plane"][~torch.isinf(camera_none.data.output["distance_to_image_plane"])].min() >= camera_cfg_zero.spawn.clipping_range[0])
self.assertTrue(camera_none.data.output["distance_to_image_plane"][~torch.isinf(camera_none.data.output["distance_to_camera"])].max() <= camera_cfg_zero.spawn.clipping_range[1])
self.assertTrue(
camera_none.data.output["distance_to_camera"][
~torch.isinf(camera_none.data.output["distance_to_camera"])
].min()
>= camera_cfg_zero.spawn.clipping_range[0]
)
self.assertTrue(
camera_none.data.output["distance_to_camera"][
~torch.isinf(camera_none.data.output["distance_to_camera"])
].max()
<= camera_cfg_zero.spawn.clipping_range[1]
)
self.assertTrue(
camera_none.data.output["distance_to_image_plane"][
~torch.isinf(camera_none.data.output["distance_to_image_plane"])
].min()
>= camera_cfg_zero.spawn.clipping_range[0]
)
self.assertTrue(
camera_none.data.output["distance_to_image_plane"][
~torch.isinf(camera_none.data.output["distance_to_camera"])
].max()
<= camera_cfg_zero.spawn.clipping_range[1]
)

# zero clipping should result in zero values
self.assertTrue(torch.all(camera_zero.data.output["distance_to_camera"][torch.isinf(camera_none.data.output["distance_to_camera"])] == 0.0))
self.assertTrue(torch.all(camera_zero.data.output["distance_to_image_plane"][torch.isinf(camera_none.data.output["distance_to_image_plane"])] == 0.0))
self.assertTrue(camera_zero.data.output["distance_to_camera"][camera_zero.data.output["distance_to_camera"] != 0.0].min() >= camera_cfg_zero.spawn.clipping_range[0])
self.assertTrue(
torch.all(
camera_zero.data.output["distance_to_camera"][
torch.isinf(camera_none.data.output["distance_to_camera"])
]
== 0.0
)
)
self.assertTrue(
torch.all(
camera_zero.data.output["distance_to_image_plane"][
torch.isinf(camera_none.data.output["distance_to_image_plane"])
]
== 0.0
)
)
self.assertTrue(
camera_zero.data.output["distance_to_camera"][camera_zero.data.output["distance_to_camera"] != 0.0].min()
>= camera_cfg_zero.spawn.clipping_range[0]
)
self.assertTrue(camera_zero.data.output["distance_to_camera"].max() <= camera_cfg_zero.spawn.clipping_range[1])
self.assertTrue(camera_zero.data.output["distance_to_image_plane"][camera_zero.data.output["distance_to_image_plane"] != 0.0].min() >= camera_cfg_zero.spawn.clipping_range[0])
self.assertTrue(camera_zero.data.output["distance_to_image_plane"].max() <= camera_cfg_zero.spawn.clipping_range[1])
self.assertTrue(
camera_zero.data.output["distance_to_image_plane"][
camera_zero.data.output["distance_to_image_plane"] != 0.0
].min()
>= camera_cfg_zero.spawn.clipping_range[0]
)
self.assertTrue(
camera_zero.data.output["distance_to_image_plane"].max() <= camera_cfg_zero.spawn.clipping_range[1]
)

# max clipping should result in max values
self.assertTrue(torch.all(camera_max.data.output["distance_to_camera"][torch.isinf(camera_none.data.output["distance_to_camera"])] == camera_cfg_zero.spawn.clipping_range[1]))
self.assertTrue(torch.all(camera_max.data.output["distance_to_image_plane"][torch.isinf(camera_none.data.output["distance_to_image_plane"])] == camera_cfg_zero.spawn.clipping_range[1]))
self.assertTrue(
torch.all(
camera_max.data.output["distance_to_camera"][torch.isinf(camera_none.data.output["distance_to_camera"])]
== camera_cfg_zero.spawn.clipping_range[1]
)
)
self.assertTrue(
torch.all(
camera_max.data.output["distance_to_image_plane"][
torch.isinf(camera_none.data.output["distance_to_image_plane"])
]
== camera_cfg_zero.spawn.clipping_range[1]
)
)
self.assertTrue(camera_max.data.output["distance_to_camera"].min() >= camera_cfg_zero.spawn.clipping_range[0])
self.assertTrue(camera_max.data.output["distance_to_camera"].max() <= camera_cfg_zero.spawn.clipping_range[1])
self.assertTrue(camera_max.data.output["distance_to_image_plane"].min() >= camera_cfg_zero.spawn.clipping_range[0])
self.assertTrue(camera_max.data.output["distance_to_image_plane"].max() <= camera_cfg_zero.spawn.clipping_range[1])
self.assertTrue(
camera_max.data.output["distance_to_image_plane"].min() >= camera_cfg_zero.spawn.clipping_range[0]
)
self.assertTrue(
camera_max.data.output["distance_to_image_plane"].max() <= camera_cfg_zero.spawn.clipping_range[1]
)

def test_camera_resolution_all_colorize(self):
"""Test camera resolution is correctly set for all types with colorization enabled."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,13 +160,13 @@ def test_depth_clipping(self):
prim_utils.create_prim("/World/CameraZero", "Xform")
prim_utils.create_prim("/World/CameraNone", "Xform")
prim_utils.create_prim("/World/CameraMax", "Xform")

# get camera cfgs
camera_cfg_zero = RayCasterCameraCfg(
prim_path="/World/CameraZero",
mesh_prim_paths=["/World/defaultGroundPlane"],
offset=RayCasterCameraCfg.OffsetCfg(
pos=(2.5, 2.5, 6.0), rot=(-0.125, 0.362, 0.873, -0.302), convention="ros"
pos=(2.5, 2.5, 6.0), rot=(0.9914449, 0.0, 0.1305, 0.0), convention="world"
),
pattern_cfg=patterns.PinholeCameraPatternCfg().from_intrinsic_matrix(
focal_length=38.0,
Expand Down Expand Up @@ -198,20 +198,56 @@ def test_depth_clipping(self):
camera_max.update(self.dt)

# none clipping should contain inf values
self.assertTrue(torch.isnan(camera_none.data.output["distance_to_camera"]).any())
self.assertTrue(torch.isinf(camera_none.data.output["distance_to_camera"]).any())
self.assertTrue(torch.isnan(camera_none.data.output["distance_to_image_plane"]).any())
self.assertTrue(camera_none.data.output["distance_to_camera"][~torch.isnan(camera_none.data.output["distance_to_camera"])].max() <= camera_cfg_zero.max_distance)
self.assertTrue(camera_none.data.output["distance_to_image_plane"][~torch.isnan(camera_none.data.output["distance_to_camera"])].max() <= camera_cfg_zero.max_distance)
self.assertTrue(
camera_none.data.output["distance_to_camera"][
~torch.isinf(camera_none.data.output["distance_to_camera"])
].max()
> camera_cfg_zero.max_distance
)
self.assertTrue(
camera_none.data.output["distance_to_image_plane"][
~torch.isnan(camera_none.data.output["distance_to_image_plane"])
].max()
> camera_cfg_zero.max_distance
)

# zero clipping should result in zero values
self.assertTrue(torch.all(camera_zero.data.output["distance_to_camera"][torch.isnan(camera_none.data.output["distance_to_camera"])] == 0.0))
self.assertTrue(torch.all(camera_zero.data.output["distance_to_image_plane"][torch.isnan(camera_none.data.output["distance_to_image_plane"])] == 0.0))
self.assertTrue(
torch.all(
camera_zero.data.output["distance_to_camera"][
torch.isinf(camera_none.data.output["distance_to_camera"])
]
== 0.0
)
)
self.assertTrue(
torch.all(
camera_zero.data.output["distance_to_image_plane"][
torch.isnan(camera_none.data.output["distance_to_image_plane"])
]
== 0.0
)
)
self.assertTrue(camera_zero.data.output["distance_to_camera"].max() <= camera_cfg_zero.max_distance)
self.assertTrue(camera_zero.data.output["distance_to_image_plane"].max() <= camera_cfg_zero.max_distance)

# max clipping should result in max values
self.assertTrue(torch.all(camera_max.data.output["distance_to_camera"][torch.isnan(camera_none.data.output["distance_to_camera"])] == camera_cfg_zero.max_distance))
self.assertTrue(torch.all(camera_max.data.output["distance_to_image_plane"][torch.isnan(camera_none.data.output["distance_to_image_plane"])] == camera_cfg_zero.max_distance))
self.assertTrue(
torch.all(
camera_max.data.output["distance_to_camera"][torch.isinf(camera_none.data.output["distance_to_camera"])]
== camera_cfg_zero.max_distance
)
)
self.assertTrue(
torch.all(
camera_max.data.output["distance_to_image_plane"][
torch.isnan(camera_none.data.output["distance_to_image_plane"])
]
== camera_cfg_zero.max_distance
)
)
self.assertTrue(camera_max.data.output["distance_to_camera"].max() <= camera_cfg_zero.max_distance)
self.assertTrue(camera_max.data.output["distance_to_image_plane"].max() <= camera_cfg_zero.max_distance)

Expand Down

0 comments on commit aafa60d

Please sign in to comment.