diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 801bf513e..1c37a21bc 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -796,7 +796,8 @@ def _toggle(self, obj, value): toggle_position = toggle_state.get_link_position() yield from self._navigate_if_needed(obj, toggle_position) - hand_orientation = self.robot.eef_links[self.arm].get_orientation() # Just keep the current hand orientation. + # Just keep the current hand orientation. + hand_orientation = self.robot.eef_links[self.arm].get_position_orientation()[1] desired_hand_pose = (toggle_position, hand_orientation) yield from self._move_hand(desired_hand_pose) @@ -1641,7 +1642,7 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): if th.norm(body_target_pose[0][:2]) < dist_threshold: break - diff_pos = end_pose[0] - self.robot.get_position() + diff_pos = end_pose[0] - self.robot.get_position_orientation()[0] intermediate_pose = ( end_pose[0], T.euler2quat(th.tensor([0, 0, math.atan2(diff_pos[1], diff_pos[0])], dtype=th.float32)), @@ -1783,7 +1784,11 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): raise ActionPrimitiveError( ActionPrimitiveError.Reason.SAMPLING_ERROR, "Could not find valid position near object.", - {"target object": obj.name, "target pos": obj.get_position(), "pose on target": pose_on_obj}, + { + "target object": obj.name, + "target pos": obj.get_position_orientation()[0], + "pose on target": pose_on_obj, + }, ) @staticmethod diff --git a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py index 4db909166..4db065c55 100644 --- a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py +++ b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py @@ -167,8 +167,8 @@ def _grasp(self, obj: DatasetObject): # yield from self._navigate_if_needed(obj) # Perform forced assisted grasp - obj.set_position(self.robot.get_eef_position(self.arm)) - self.robot._establish_grasp(self.arm, (obj, obj.root_link), obj.get_position()) + obj.set_position_orientation(position=self.robot.get_eef_position(self.arm)) + self.robot._establish_grasp(self.arm, (obj, obj.root_link), obj.get_position_orientation()[0]) # Execute for a moment yield from self._settle_robot() @@ -554,7 +554,10 @@ def _place_near_heating_element(self, heat_source_obj): # Get the position of the heat source on the thing we're placing near heating_element_positions = th.tensor( - [link.get_position() for link in heat_source_obj.states[object_states.HeatSourceOrSink].links.values()] + [ + link.get_position_orientation()[0] + for link in heat_source_obj.states[object_states.HeatSourceOrSink].links.values() + ] ) heating_distance_threshold = heat_source_obj.states[object_states.HeatSourceOrSink].distance_threshold diff --git a/omnigibson/controllers/null_joint_controller.py b/omnigibson/controllers/null_joint_controller.py index b94cc6aa2..26e5337bd 100644 --- a/omnigibson/controllers/null_joint_controller.py +++ b/omnigibson/controllers/null_joint_controller.py @@ -55,7 +55,7 @@ def __init__( applied """ # Store values - self._default_command = th.zeros(len(dof_idx)) if default_command is None else th.tensor(default_command) + self._default_command = th.zeros(len(dof_idx)) if default_command is None else default_command # Run super init super().__init__( diff --git a/omnigibson/envs/env_base.py b/omnigibson/envs/env_base.py index 2be5d40a4..6474e4cab 100644 --- a/omnigibson/envs/env_base.py +++ b/omnigibson/envs/env_base.py @@ -268,6 +268,13 @@ def _load_robots(self): robot_config["name"] = f"robot{i}" position, orientation = robot_config.pop("position", None), robot_config.pop("orientation", None) + if position is not None: + position = position if isinstance(position, th.Tensor) else th.tensor(position, dtype=th.float32) + if orientation is not None: + orientation = ( + orientation if isinstance(orientation, th.Tensor) else th.tensor(orientation, dtype=th.float32) + ) + # Make sure robot exists, grab its corresponding kwargs, and create / import the robot robot = create_class_from_registry_and_config( cls_name=robot_config["type"], @@ -277,8 +284,7 @@ def _load_robots(self): ) # Import the robot into the simulator self.scene.add_object(robot) - # TODO: Fix this after scene_local_position_orientation API is fixed - robot.set_local_pose(position=position, orientation=orientation) + robot.set_position_orientation(position=position, orientation=orientation, frame="scene") assert og.sim.is_stopped(), "Simulator must be stopped after loading robots!" @@ -302,7 +308,7 @@ def _load_objects(self): ) # Import the robot into the simulator and set the pose self.scene.add_object(obj) - obj.set_local_pose(position=position, orientation=orientation) + obj.set_position_orientation(position=position, orientation=orientation, frame="scene") assert og.sim.is_stopped(), "Simulator must be stopped after loading objects!" @@ -333,7 +339,7 @@ def _load_external_sensors(self): # Load an initialize this sensor sensor.load(self.scene) sensor.initialize() - sensor.set_local_pose(local_position, local_orientation) + sensor.set_position_orientation(position=local_position, orientation=local_orientation, frame="scene") self._external_sensors[sensor.name] = sensor self._external_sensors_include_in_obs[sensor.name] = include_in_obs diff --git a/omnigibson/examples/object_states/attachment_demo.py b/omnigibson/examples/object_states/attachment_demo.py index e4f8ebd9e..129e520f6 100644 --- a/omnigibson/examples/object_states/attachment_demo.py +++ b/omnigibson/examples/object_states/attachment_demo.py @@ -119,12 +119,12 @@ def main(random_selection=False, headless=False, short_exec=False): env.step([]) shelf_baseboard = env.scene.object_registry("name", "shelf_baseboard") - shelf_baseboard.set_position_orientation([0, -0.979, 0.26], [0, 0, 0, 1]) + shelf_baseboard.set_position_orientation(position=[0, -0.979, 0.26], orientation=[0, 0, 0, 1]) shelf_baseboard.keep_still() shelf_baseboard.set_linear_velocity(th.tensor([-0.2, 0, 0])) shelf_side_left = env.scene.object_registry("name", "shelf_side_left") - shelf_side_left.set_position_orientation([-0.4, 0.0, 0.2], [0, 0, 0, 1]) + shelf_side_left.set_position_orientation(position=[-0.4, 0.0, 0.2], orientation=[0, 0, 0, 1]) shelf_side_left.keep_still() input( diff --git a/omnigibson/examples/object_states/dicing_demo.py b/omnigibson/examples/object_states/dicing_demo.py index 1105f59b7..6141e8fee 100644 --- a/omnigibson/examples/object_states/dicing_demo.py +++ b/omnigibson/examples/object_states/dicing_demo.py @@ -91,7 +91,7 @@ def main(random_selection=False, headless=False, short_exec=False): knife.keep_still() knife.set_position_orientation( - position=apple.get_position() + th.tensor([-0.15, 0.0, 0.2]), + position=apple.get_position_orientation()[0] + th.tensor([-0.15, 0.0, 0.2]), orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0])), ) diff --git a/omnigibson/examples/object_states/heat_source_or_sink_demo.py b/omnigibson/examples/object_states/heat_source_or_sink_demo.py index 5d2a4c56f..91242545f 100644 --- a/omnigibson/examples/object_states/heat_source_or_sink_demo.py +++ b/omnigibson/examples/object_states/heat_source_or_sink_demo.py @@ -82,7 +82,7 @@ def main(random_selection=False, headless=False, short_exec=False): # Move stove, notify user if not short_exec: input("Heat source is now moving: Press ENTER to continue.") - stove.set_position(th.tensor([0, 1.0, 0.61])) + stove.set_position_orientation(position=th.tensor([0, 1.0, 0.61])) for i in range(100): env.step(th.empty(0)) diff --git a/omnigibson/examples/object_states/onfire_demo.py b/omnigibson/examples/object_states/onfire_demo.py index 057dcbd29..1dba249cb 100644 --- a/omnigibson/examples/object_states/onfire_demo.py +++ b/omnigibson/examples/object_states/onfire_demo.py @@ -86,10 +86,16 @@ def main(random_selection=False, headless=False, short_exec=False): stove.states[object_states.ToggledOn].set_value(True) # The first apple will be affected by the stove - apples[0].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.tensor([0.11, 0, 0.1])) + apples[0].set_position_orientation( + position=stove.states[object_states.HeatSourceOrSink].link.get_position_orientation()[0] + + th.tensor([0.11, 0, 0.1]) + ) # The second apple will NOT be affected by the stove, but will be affected by the first apple once it's on fire. - apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.tensor([0.32, 0, 0.1])) + apples[1].set_position_orientation( + position=stove.states[object_states.HeatSourceOrSink].link.get_position_orientation()[0] + + th.tensor([0.32, 0, 0.1]) + ) steps = 0 max_steps = -1 if not short_exec else 1000 diff --git a/omnigibson/examples/object_states/sample_kinematics_demo.py b/omnigibson/examples/object_states/sample_kinematics_demo.py index 9150392af..f5a60a864 100644 --- a/omnigibson/examples/object_states/sample_kinematics_demo.py +++ b/omnigibson/examples/object_states/sample_kinematics_demo.py @@ -128,8 +128,8 @@ def sample_microwave_plates_apples(env): og.log.info("Placing cabinet on the floor...") cabinet.set_orientation([0, 0, 0, 1.0]) env.step(th.empty(0)) - offset = cabinet.get_position()[2] - cabinet.aabb_center[2] - cabinet.set_position(th.tensor([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset) + offset = cabinet.get_position_orientation()[0][2] - cabinet.aabb_center[2] + cabinet.set_position_orientation(position=th.tensor([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset) env.step(th.empty(0)) # Set microwave on top of the cabinet, open it, and step 100 times @@ -172,8 +172,8 @@ def sample_boxes_on_shelf(env): og.log.info("Placing shelf on the floor...") shelf.set_orientation([0, 0, 0, 1.0]) env.step(th.empty(0)) - offset = shelf.get_position()[2] - shelf.aabb_center[2] - shelf.set_position(th.tensor([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset) + offset = shelf.get_position_orientation()[0][2] - shelf.aabb_center[2] + shelf.set_position_orientation(position=th.tensor([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset) env.step(th.empty(0)) # One step is needed for the object to be fully initialized og.log.info("Shelf placed.") diff --git a/omnigibson/examples/object_states/slicing_demo.py b/omnigibson/examples/object_states/slicing_demo.py index 242b74e4d..3067b895f 100644 --- a/omnigibson/examples/object_states/slicing_demo.py +++ b/omnigibson/examples/object_states/slicing_demo.py @@ -89,7 +89,7 @@ def main(random_selection=False, headless=False, short_exec=False): knife.keep_still() knife.set_position_orientation( - position=apple.get_position() + th.tensor([-0.15, 0.0, 0.2], dtype=th.float32), + position=apple.get_position_orientation()[0] + th.tensor([-0.15, 0.0, 0.2], dtype=th.float32), orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)), ) diff --git a/omnigibson/examples/object_states/temperature_demo.py b/omnigibson/examples/object_states/temperature_demo.py index b38e5d2c4..a3d3a917e 100644 --- a/omnigibson/examples/object_states/temperature_demo.py +++ b/omnigibson/examples/object_states/temperature_demo.py @@ -149,7 +149,10 @@ def main(random_selection=False, headless=False, short_exec=False): for apple in apples: apple.states[object_states.Temperature].set_value(-50) apples[0].states[object_states.Inside].set_value(oven, True) - apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.tensor([0, 0, 0.1])) + apples[1].set_position_orientation( + position=stove.states[object_states.HeatSourceOrSink].link.get_position_orientation()[0] + + th.tensor([0, 0, 0.1]) + ) apples[2].states[object_states.OnTop].set_value(tray, True) apples[3].states[object_states.Inside].set_value(fridge, True) apples[4].states[object_states.Inside].set_value(microwave, True) diff --git a/omnigibson/examples/objects/load_object_selector.py b/omnigibson/examples/objects/load_object_selector.py index 8469cb0aa..075415135 100644 --- a/omnigibson/examples/objects/load_object_selector.py +++ b/omnigibson/examples/objects/load_object_selector.py @@ -61,8 +61,8 @@ def main(random_selection=False, headless=False, short_exec=False): # Place the object so it rests on the floor obj = env.scene.object_registry("name", "obj") - center_offset = obj.get_position() - obj.aabb_center + th.tensor([0, 0, obj.aabb_extent[2] / 2.0]) - obj.set_position(center_offset) + center_offset = obj.get_position_orientation()[0] - obj.aabb_center + th.tensor([0, 0, obj.aabb_extent[2] / 2.0]) + obj.set_position_orientation(position=center_offset) # Step through the environment max_steps = 100 if short_exec else 10000 diff --git a/omnigibson/examples/objects/visualize_object.py b/omnigibson/examples/objects/visualize_object.py index ba41009a9..c5cd95650 100644 --- a/omnigibson/examples/objects/visualize_object.py +++ b/omnigibson/examples/objects/visualize_object.py @@ -111,8 +111,8 @@ def main(random_selection=False, headless=False, short_exec=False): env.step(th.empty(0)) # Move the object so that its center is at [0, 0, 1] - center_offset = obj.get_position() - obj.aabb_center + th.tensor([0, 0, 1.0]) - obj.set_position(center_offset) + center_offset = obj.get_position_orientation()[0] - obj.aabb_center + th.tensor([0, 0, 1.0]) + obj.set_position_orientation(position=center_offset) # Allow the user to easily move the camera around og.sim.enable_viewer_camera_teleoperation() diff --git a/omnigibson/examples/robots/advanced/ik_example.py b/omnigibson/examples/robots/advanced/ik_example.py index c6e38eceb..72d0a6434 100644 --- a/omnigibson/examples/robots/advanced/ik_example.py +++ b/omnigibson/examples/robots/advanced/ik_example.py @@ -66,7 +66,7 @@ def main(random_selection=False, headless=False, short_exec=False): robot = env.robots[0] # Set robot base at the origin - robot.set_position_orientation(th.tensor([0, 0, 0]), th.tensor([0, 0, 0, 1])) + robot.set_position_orientation(position=th.tensor([0, 0, 0]), orientation=th.tensor([0, 0, 0, 1])) # At least one simulation step while the simulator is playing must occur for the robot (or in general, any object) # to be fully initialized after it is imported into the simulator og.sim.play() @@ -128,7 +128,7 @@ def execute_ik(pos, quat=None, max_iter=100): # Get initial EE position and set marker to that location command = robot.get_eef_position() - marker.set_position(command) + marker.set_position_orientation(position=command) og.sim.step() # Setup callbacks for grabbing keyboard inputs from omni @@ -154,7 +154,7 @@ def keyboard_event_handler(event, *args, **kwargs): delta_cmd = input_to_xyz_delta_command(inp=event.input) if delta_cmd is not None: command = command + delta_cmd - marker.set_position(command) + marker.set_position_orientation(position=command) og.sim.step() # Callback must return True if valid diff --git a/omnigibson/examples/robots/all_robots_visualizer.py b/omnigibson/examples/robots/all_robots_visualizer.py index 9beed651f..a9516b886 100644 --- a/omnigibson/examples/robots/all_robots_visualizer.py +++ b/omnigibson/examples/robots/all_robots_visualizer.py @@ -55,7 +55,7 @@ def main(random_selection=False, headless=False, short_exec=False): # Then apply random actions for a bit for _ in range(30): - action_lo, action_hi = -1, 1 + action_lo, action_hi = -0.1, 0.1 action = th.rand(robot.action_dim) * (action_hi - action_lo) + action_lo if robot_name == "Tiago": tiago_lo, tiago_hi = -0.1, 0.1 diff --git a/omnigibson/examples/robots/grasping_mode_example.py b/omnigibson/examples/robots/grasping_mode_example.py index 417a814c1..89b5baf37 100644 --- a/omnigibson/examples/robots/grasping_mode_example.py +++ b/omnigibson/examples/robots/grasping_mode_example.py @@ -80,7 +80,7 @@ def main(random_selection=False, headless=False, short_exec=False): # Reset the robot robot = env.robots[0] - robot.set_position([0, 0, 0]) + robot.set_position_orientation(position=[0, 0, 0]) robot.reset() robot.keep_still() diff --git a/omnigibson/examples/scenes/scene_tour_demo.py b/omnigibson/examples/scenes/scene_tour_demo.py index 532dc95bc..4bcc57992 100644 --- a/omnigibson/examples/scenes/scene_tour_demo.py +++ b/omnigibson/examples/scenes/scene_tour_demo.py @@ -61,7 +61,7 @@ def main(random_selection=False, headless=False, short_exec=False): def add_waypoint(): nonlocal waypoints - pos = cam_mover.cam.get_position() + pos = cam_mover.cam.get_position_orientation()[0] print(f"Added waypoint at {pos}") waypoints.append(pos) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 709b3c38f..2a532a42e 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -118,7 +118,7 @@ def main(): env = og.Environment(configs=cfg) env.reset() # update viewer camera pose - og.sim.viewer_camera.set_position_orientation([-0.22, 0.99, 1.09], [-0.14, 0.47, 0.84, -0.23]) + og.sim.viewer_camera.set_position_orientation(position=[-0.22, 0.99, 1.09], orientation=[-0.14, 0.47, 0.84, -0.23]) # Start teleoperation system robot = env.robots[0] diff --git a/omnigibson/maps/map_base.py b/omnigibson/maps/map_base.py index d8a50739f..45b57fe43 100644 --- a/omnigibson/maps/map_base.py +++ b/omnigibson/maps/map_base.py @@ -55,5 +55,7 @@ def world_to_map(self, xy): xy: 2D location in world reference frame (metric) :return: 2D location in map reference frame (image) """ + + xy = th.tensor(xy) if not isinstance(xy, th.Tensor) else xy point_wrt_map = xy / self.map_resolution + self.map_size / 2.0 return th.flip(point_wrt_map, dims=tuple(range(point_wrt_map.dim()))).int() diff --git a/omnigibson/object_states/attached_to.py b/omnigibson/object_states/attached_to.py index 1d47292d0..41b355629 100644 --- a/omnigibson/object_states/attached_to.py +++ b/omnigibson/object_states/attached_to.py @@ -224,10 +224,12 @@ def _find_attachment_links( if other.states[AttachedTo].children[parent_link_name] is None: if bypass_alignment_checking: return child_link, parent_link - pos_diff = th.norm(child_link.get_position() - parent_link.get_position()) - orn_diff = T.get_orientation_diff_in_radian( - child_link.get_orientation(), parent_link.get_orientation() - ) + + child_pos, child_orn = child_link.get_position_orientation() + parent_pos, parent_orn = parent_link.get_position_orientation() + pos_diff = th.norm(child_pos - parent_pos) + orn_diff = T.get_orientation_diff_in_radian(child_orn, parent_orn) + if pos_diff < pos_thresh and orn_diff < orn_thresh: return child_link, parent_link @@ -301,7 +303,7 @@ def _attach(self, other, child_link, parent_link, joint_type=None, can_joint_bre new_child_root_quat = child_root_quat # Actually move the object and also keep it still for stability purposes. - self.obj.set_position_orientation(new_child_root_pos, new_child_root_quat) + self.obj.set_position_orientation(position=new_child_root_pos, orientation=new_child_root_quat) self.obj.keep_still() other.keep_still() diff --git a/omnigibson/object_states/heat_source_or_sink.py b/omnigibson/object_states/heat_source_or_sink.py index 0017d448b..b3e3f32fd 100644 --- a/omnigibson/object_states/heat_source_or_sink.py +++ b/omnigibson/object_states/heat_source_or_sink.py @@ -235,7 +235,7 @@ def overlap_callback(hit): if n_cloth_objs > 0: cloth_positions = th.zeros((n_cloth_objs, 3)) for i, obj in enumerate(cloth_objs): - cloth_positions[i] = obj.get_position() + cloth_positions[i] = obj.get_position_orientation()[0] for idx in th.where( th.all( (aabb_lower.reshape(1, 3) < cloth_positions) & (cloth_positions < aabb_upper.reshape(1, 3)), @@ -253,7 +253,11 @@ def overlap_callback(hit): else: # Position is either the AABB center of the default link or the metalink position itself - heat_source_pos = self.link.aabb_center if self.link == self._default_link else self.link.get_position() + heat_source_pos = ( + self.link.aabb_center + if self.link == self._default_link + else self.link.get_position_orientation()[0] + ) # Use overlap_sphere check! og.sim.psqi.overlap_sphere( @@ -268,7 +272,7 @@ def overlap_callback(hit): if n_cloth_objs > 0: cloth_positions = th.zeros((n_cloth_objs, 3)) for i, obj in enumerate(cloth_objs): - cloth_positions[i] = obj.get_position() + cloth_positions[i] = obj.get_position_orientation()[0] for idx in th.where( th.norm(heat_source_pos.reshape(1, 3) - cloth_positions, dim=-1) <= self.distance_threshold )[0]: diff --git a/omnigibson/object_states/overlaid.py b/omnigibson/object_states/overlaid.py index 96e64990c..12f6369c5 100644 --- a/omnigibson/object_states/overlaid.py +++ b/omnigibson/object_states/overlaid.py @@ -62,10 +62,7 @@ def _get_value(self, other): # Compute the base aligned bounding box of the rigid object. bbox_center, bbox_orn, bbox_extent, _ = other.get_base_aligned_bbox(xy_aligned=True) vertices_local = th.tensor(list(itertools.product((1, -1), repeat=3))) * (bbox_extent / 2) - vertices = th.tensor( - T.transform_points(vertices_local, T.pose2mat((bbox_center, bbox_orn))), - dtype=th.float32, - ) + vertices = T.transform_points(vertices_local, T.pose2mat((bbox_center, bbox_orn))) rigid_hull = ConvexHull(vertices[:, :2]) # The goal is to find the intersection of the convex hull and the bounding box. diff --git a/omnigibson/object_states/particle_modifier.py b/omnigibson/object_states/particle_modifier.py index 82a1a902b..d0c63dfe1 100644 --- a/omnigibson/object_states/particle_modifier.py +++ b/omnigibson/object_states/particle_modifier.py @@ -419,9 +419,10 @@ def overlap_callback(hit): else self._projection_mesh_params["extents"][2] / 2 ) - self.projection_mesh.set_local_pose( + self.projection_mesh.set_position_orientation( position=th.tensor([0, 0, -z_offset]), orientation=T.euler2quat(th.tensor([0, 0, 0], dtype=th.float32)), + frame="parent", ) # Generate the function for checking whether points are within the projection mesh @@ -514,7 +515,8 @@ def condition(obj) --> bool cond = ( lambda obj: ( th.dot( - T.quat2mat(obj.states[self.__class__].link.get_orientation()) @ th.tensor([0, 0, 1]), + T.quat2mat(obj.states[self.__class__].link.get_position_orientation()[1]) + @ th.tensor([0, 0, 1]), th.tensor([0, 0, 1]), ) > 0 @@ -1081,14 +1083,14 @@ def _initialize(self): self.projection_source_sphere.initialize() self.projection_source_sphere.visible = False # Rotate by 90 degrees in y-axis so that the projection visualization aligns with the projection mesh - self.projection_source_sphere.set_local_pose( - orientation=T.euler2quat(th.tensor([0, math.pi / 2, 0], dtype=th.float32)) + self.projection_source_sphere.set_position_orientation( + orientation=T.euler2quat(th.tensor([0, math.pi / 2, 0], dtype=th.float32)), frame="parent" ) # Make sure the meta mesh is aligned with the meta link if visualizing # This corresponds to checking (a) position of tip of projection mesh should align with origin of # metalink, and (b) zero relative orientation between the metalink and the projection mesh - local_pos, local_quat = self.projection_mesh.get_local_pose() + local_pos, local_quat = self.projection_mesh.get_position_orientation(frame="parent") assert th.all( th.isclose(local_pos + th.tensor([0, 0, height / 2.0]), th.zeros_like(local_pos)) ), "Projection mesh tip should align with metalink position!" @@ -1441,7 +1443,7 @@ def _sample_particle_locations_from_adjacency_area(self, system): lower_upper = th.cat([lower, upper], dim=0) # Sample in all directions, shooting from the center of the link / object frame - pos = self.link.get_position() + pos = self.link.get_position_orientation()[0] start_points = th.ones((n_samples, 3)) * pos.reshape(1, 3) end_points = th.rand(n_samples, 3) * (upper - lower) + lower sides, axes = th.randint(2, size=(n_samples,)), th.randint(3, size=(n_samples,)) diff --git a/omnigibson/object_states/robot_related_states.py b/omnigibson/object_states/robot_related_states.py index ab21b8f51..3871aad30 100644 --- a/omnigibson/object_states/robot_related_states.py +++ b/omnigibson/object_states/robot_related_states.py @@ -30,8 +30,8 @@ def _get_value(self, obj): # if not robot: # return False -# robot_pos = robot.get_position() -# object_pos = self.obj.get_position() +# robot_pos = robot.get_position_orientation()[0] +# object_pos = self.obj.get_position_orientation()[0] # return th.norm(object_pos - th.tensor(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD diff --git a/omnigibson/objects/dataset_object.py b/omnigibson/objects/dataset_object.py index a38ed97b4..5237995d0 100644 --- a/omnigibson/objects/dataset_object.py +++ b/omnigibson/objects/dataset_object.py @@ -312,7 +312,7 @@ def set_bbox_center_position_orientation(self, position=None, orientation=None): None means it will not be changed """ if orientation is None: - orientation = self.get_orientation() + orientation = self.get_position_orientation()[1] if position is not None: rotated_offset = T.pose_transform( th.tensor([0, 0, 0], dtype=th.float32), @@ -321,7 +321,7 @@ def set_bbox_center_position_orientation(self, position=None, orientation=None): th.tensor([0, 0, 0, 1], dtype=th.float32), )[0] position = position + rotated_offset - self.set_position_orientation(position, orientation) + self.set_position_orientation(position=position, orientation=orientation) @property def model(self): diff --git a/omnigibson/objects/object_base.py b/omnigibson/objects/object_base.py index 646b52167..54845b4a4 100644 --- a/omnigibson/objects/object_base.py +++ b/omnigibson/objects/object_base.py @@ -397,7 +397,7 @@ def get_base_aligned_bbox(self, link_name=None, visual=False, xy_aligned=False): desired_frame_to_world = xy_aligned_base_com_to_world else: # Default desired frame is base CoM frame. - desired_frame_to_world = th.tensor(base_frame_to_world, dtype=th.float32) + desired_frame_to_world = base_frame_to_world # Compute the world-to-base frame transform. world_to_desired_frame = th.linalg.inv_ex(desired_frame_to_world).inverse diff --git a/omnigibson/objects/stateful_object.py b/omnigibson/objects/stateful_object.py index 6d20059ef..a1bd5fc1e 100644 --- a/omnigibson/objects/stateful_object.py +++ b/omnigibson/objects/stateful_object.py @@ -1,5 +1,6 @@ import sys from collections import defaultdict +from typing import Literal import torch as th from bddl.object_taxonomy import ObjectTaxonomy @@ -603,8 +604,19 @@ def clear_states_cache(self): for _, obj_state in self._states.items(): obj_state.clear_cache() - def set_position_orientation(self, position=None, orientation=None): - super().set_position_orientation(position=position, orientation=orientation) + def set_position_orientation( + self, position=None, orientation=None, frame: Literal["world", "parent", "scene"] = "world" + ): + """ + Set the position and orientation of stateful object. + + Args: + position (None or 3-array): The position to set the object to. If None, the position is not changed. + orientation (None or 4-array): The orientation to set the object to. If None, the orientation is not changed. + frame (Literal): The frame in which to set the position and orientation. Defaults to world. parent frame + set position relative to the object parent. scene frame set position relative to the scene. + """ + super().set_position_orientation(position=position, orientation=orientation, frame=frame) self.clear_states_cache() @classproperty diff --git a/omnigibson/prims/entity_prim.py b/omnigibson/prims/entity_prim.py index 55aa66eee..7849ab279 100644 --- a/omnigibson/prims/entity_prim.py +++ b/omnigibson/prims/entity_prim.py @@ -1,4 +1,6 @@ +import math from functools import cached_property +from typing import Literal import networkx as nx import torch as th @@ -322,8 +324,10 @@ def _update_joint_limits(self): ), "Cannot update joint limits for a non-uniformly scaled object when already initialized." for link in self.links.values(): if joint.body0 == link.prim_path: - # Find the parent link frame orientation in the object frame - _, link_local_orn = link.get_local_pose() + # Find the parent link frame orientation in the object frame. Note that we + # are OK getting this from XFormPrim since we actually want it relative to + # the object frame, notwithstanding the physics. + _, link_local_orn = XFormPrim.get_position_orientation(link, frame="parent") # Find the joint frame orientation in the parent link frame joint_local_orn = th.tensor( @@ -989,86 +993,109 @@ def get_relative_linear_velocity(self): Returns: 3-array: (x,y,z) Linear velocity of root link in its own frame """ - return T.quat2mat(self.get_orientation()).T @ self.get_linear_velocity() + return T.quat2mat(self.get_position_orientation()[1]).T @ self.get_linear_velocity() def get_relative_angular_velocity(self): """ Returns: 3-array: (ax,ay,az) angular velocity of root link in its own frame """ - return T.quat2mat(self.get_orientation()).T @ self.get_angular_velocity() + return T.quat2mat(self.get_position_orientation()[1]).T @ self.get_angular_velocity() - def set_position_orientation(self, position=None, orientation=None): - position = ( - position - if isinstance(position, th.Tensor) - else th.tensor(position, dtype=th.float32) if position is not None else None - ) - orientation = ( - orientation - if isinstance(orientation, th.Tensor) - else th.tensor(orientation, dtype=th.float32) if orientation is not None else None - ) - # If kinematic only, clear cache for the root link - if self.kinematic_only: - self.root_link.clear_kinematic_only_cache() - # If the simulation isn't running, we should set this prim's XForm (object-level) properties directly - if og.sim.is_stopped(): - XFormPrim.set_position_orientation(self, position=position, orientation=orientation) - # Delegate to RigidPrim if we are not articulated - elif self._articulation_view is None: - self.root_link.set_position_orientation(position=position, orientation=orientation) - # Sim is running and articulation view exists, so use that physx API backend - else: - if position is not None: - position = th.asarray(position)[None, :] - if orientation is not None: - orientation = th.asarray(orientation)[None, [3, 0, 1, 2]] - self._articulation_view.set_world_poses(position, orientation) - PoseAPI.invalidate() + def set_position_orientation(self, position=None, orientation=None, frame: Literal["world", "scene"] = "world"): + """ + Set the position and orientation of entry prim object. - def get_position_orientation(self, clone=True): - # If the simulation isn't running, we should read from this prim's XForm (object-level) properties directly - if og.sim.is_stopped(): - return XFormPrim.get_position_orientation(self, clone=clone) - # Delegate to RigidPrim if we are not articulated - elif self._articulation_view is None: - return self.root_link.get_position_orientation(clone=clone) - # Sim is running and articulation view exists, so use that physx API backend - else: - positions, orientations = self._articulation_view.get_world_poses(clone=clone) - return positions[0], orientations[0][[1, 2, 3, 0]] + Args: + position (None or 3-array): The position to set the object to. If None, the position is not changed. + orientation (None or 4-array): The orientation to set the object to. If None, the orientation is not changed. + frame (Literal): The frame in which to set the position and orientation. Defaults to world. + scene frame sets position relative to the scene. + """ + assert frame in ["world", "scene"], f"Invalid frame '{frame}'. Must be 'world', or 'scene'." - def set_local_pose(self, position=None, orientation=None): # If kinematic only, clear cache for the root link if self.kinematic_only: self.root_link.clear_kinematic_only_cache() + # If the simulation isn't running, we should set this prim's XForm (object-level) properties directly if og.sim.is_stopped(): - return XFormPrim.set_local_pose(self, position, orientation) - # Delegate to RigidPrim if we are not articulated - elif self._articulation_view is None: - self.root_link.set_local_pose(position=position, orientation=orientation) - # Sim is running and articulation view exists, so use that physx API backend - else: - if position is not None: - position = th.asarray(position)[None, :] - if orientation is not None: - orientation = th.asarray(orientation)[None, [3, 0, 1, 2]] - self._articulation_view.set_local_poses(position, orientation) - PoseAPI.invalidate() + return XFormPrim.set_position_orientation(self, position=position, orientation=orientation, frame=frame) + + # Otherwise, we need to set our pose through PhysX. + # If we are not articulated, we can use the RigidPrim API. + if self._articulation_view is None: + return self.root_link.set_position_orientation(position=position, orientation=orientation, frame=frame) + + # Otherwise, we use the articulation view. + # If no position or no orientation are given, get the current position and orientation of the object + if position is None or orientation is None: + current_position, current_orientation = self.get_position_orientation(frame=frame) + position = current_position if position is None else position + orientation = current_orientation if orientation is None else orientation + + # Convert to th.Tensor if necessary + position = th.as_tensor(position, dtype=th.float32) + orientation = th.as_tensor(orientation, dtype=th.float32) + + # Convert to from scene-relative to world if necessary + if frame == "scene": + assert self.scene is not None, "cannot set position and orientation relative to scene without a scene" + position, orientation = self.scene.convert_scene_relative_pose_to_world(position, orientation) + + # Assert validity of the orientation + assert math.isclose( + th.norm(orientation).item(), 1, abs_tol=1e-3 + ), f"{self.prim_path} desired orientation {orientation} is not a unit quaternion." + + # Actually set the pose. + self._articulation_view.set_world_poses( + positions=position[None, :], orientations=orientation[None, [3, 0, 1, 2]] + ) + + # Invalidate the pose cache. + PoseAPI.invalidate() + + def get_position_orientation(self, frame: Literal["world", "scene"] = "world", clone=True): + """ + Gets prim's pose with respect to the specified frame. + + Args: + frame (Literal): frame to get the pose with respect to. Default to world. + scene frame get position relative to the scene. + clone (bool): Whether to clone the underlying tensor buffer or not + + Returns: + 2-tuple: + - th.Tensor: (x,y,z) position in the specified frame + - th.Tensor: (x,y,z,w) quaternion orientation in the specified frame + """ + assert frame in ["world", "scene"], f"Invalid frame '{frame}'. Must be 'world' or 'scene'." - def get_local_pose(self): # If the simulation isn't running, we should read from this prim's XForm (object-level) properties directly if og.sim.is_stopped(): - return XFormPrim.get_local_pose(self) + return XFormPrim.get_position_orientation(self, frame=frame, clone=clone) + # Delegate to RigidPrim if we are not articulated - elif self._articulation_view is None: - return self.root_link.get_local_pose() - # Sim is running and articulation view exists, so use that physx API backend - else: - positions, orientations = self._articulation_view.get_local_poses() - return positions[0], orientations[0][[1, 2, 3, 0]] + if self._articulation_view is None: + return self.root_link.get_position_orientation(frame=frame, clone=clone) + + # Otherwise, get the pose from the articulation view and convert to our format + positions, orientations = self._articulation_view.get_world_poses(clone=clone) + position = positions[0] + orientation = orientations[0][[1, 2, 3, 0]] + + # Assert that the orientation is a unit quaternion + assert math.isclose( + th.norm(orientations).item(), 1, abs_tol=1e-3 + ), f"{self.prim_path} orientation {orientations} is not a unit quaternion." + + # If requested, compute the scene-local transform + if frame == "scene": + assert self.scene is not None, "Cannot get position and orientation relative to scene without a scene" + position, orientation = self.scene.convert_world_pose_to_scene_relative(position, orientation) + + return position, orientation # TODO: Is the omni joint damping (used for driving motors) same as dissipative joint damping (what we had in pb)? @property @@ -1474,7 +1501,7 @@ def get_relative_jacobian(self, clone=True): the world frame) """ jac = self.get_jacobian(clone=clone) - ori_t = T.quat2mat(self.get_orientation()).T + ori_t = T.quat2mat(self.get_position_orientation()[1]).T tf = th.zeros((1, 6, 6), dtype=th.float32) tf[:, :3, :3] = ori_t tf[:, 3:, 3:] = ori_t diff --git a/omnigibson/prims/geom_prim.py b/omnigibson/prims/geom_prim.py index 3e8e35092..23ec97259 100644 --- a/omnigibson/prims/geom_prim.py +++ b/omnigibson/prims/geom_prim.py @@ -139,7 +139,7 @@ def points_in_parent_frame(self): points = self.points if points is None: return None - position, orientation = self.get_local_pose() + position, orientation = self.get_position_orientation(frame="parent") scale = self.scale points_scaled = points * scale points_rotated = (T.quat2mat(orientation) @ points_scaled.T).T diff --git a/omnigibson/prims/rigid_prim.py b/omnigibson/prims/rigid_prim.py index d58a45e1e..00ac105c4 100644 --- a/omnigibson/prims/rigid_prim.py +++ b/omnigibson/prims/rigid_prim.py @@ -1,5 +1,6 @@ import math from functools import cached_property +from typing import Literal import torch as th from scipy.spatial import ConvexHull, QhullError @@ -76,7 +77,6 @@ def __init__( # Caches for kinematic-only objects # This exists because RigidPrimView uses USD pose read, which is very slow self._kinematic_world_pose_cache = None - self._kinematic_local_pose_cache = None # Run super init super().__init__( @@ -212,7 +212,7 @@ def update_meshes(self): volume, com = get_mesh_volume_and_com(mesh_prim) # We need to transform the volume and CoM from the mesh's local frame to the link's local frame - local_pos, local_orn = mesh.get_local_pose() + local_pos, local_orn = mesh.get_position_orientation(frame="parent") vols.append(volume * th.prod(mesh.scale)) coms.append(T.quat2mat(local_orn) @ (com * mesh.scale) + local_pos) # If the ratio between the max extent and min radius is too large (i.e. shape too oblong), use @@ -315,69 +315,90 @@ def get_angular_velocity(self, clone=True): """ return self._rigid_prim_view.get_angular_velocities(clone=clone)[0] - def set_position_orientation(self, position=None, orientation=None): + def set_position_orientation(self, position=None, orientation=None, frame: Literal["world", "scene"] = "world"): + """ + Set the position and orientation of XForm Prim. + + Args: + position (None or 3-array): The position to set the object to. If None, the position is not changed. + orientation (None or 4-array): The orientation to set the object to. If None, the orientation is not changed. + frame (Literal): The frame in which to set the position and orientation. Defaults to world. + Scene frame sets position relative to the scene. + """ + assert frame in ["world", "scene"], f"Invalid frame '{frame}'. Must be 'world' or 'scene'." + + # If no position or no orientation are given, get the current position and orientation of the object + if position is None or orientation is None: + current_position, current_orientation = self.get_position_orientation(frame=frame) + position = current_position if position is None else position + orientation = current_orientation if orientation is None else orientation + + # Convert to th.Tensor if necessary + position = th.as_tensor(position, dtype=th.float32) + orientation = th.as_tensor(orientation, dtype=th.float32) + + # Convert to from scene-relative to world if necessary + if frame == "scene": + assert self.scene is not None, "cannot set position and orientation relative to scene without a scene" + position, orientation = self.scene.convert_scene_relative_pose_to_world(position, orientation) + + # Assert validity of the orientation + assert math.isclose( + th.norm(orientation).item(), 1, abs_tol=1e-3 + ), f"{self.prim_path} desired orientation {orientation} is not a unit quaternion." + + # Actually set the pose. + self._rigid_prim_view.set_world_poses(positions=position[None, :], orientations=orientation[None, [3, 0, 1, 2]]) + # Invalidate kinematic-only object pose caches when new pose is set if self.kinematic_only: self.clear_kinematic_only_cache() - if position is not None: - position = th.asarray(position)[None, :] - if orientation is not None: - assert math.isclose( - th.norm(orientation), 1, abs_tol=1e-3 - ), f"{self.prim_path} desired orientation {orientation} is not a unit quaternion." - orientation = th.asarray(orientation)[None, [3, 0, 1, 2]] - self._rigid_prim_view.set_world_poses(positions=position, orientations=orientation) PoseAPI.invalidate() - def get_position_orientation(self, clone=True): + def get_position_orientation(self, frame: Literal["world", "scene"] = "world", clone=True): """ - Gets prim's pose with respect to the world's frame. + Gets prim's pose with respect to the specified frame. Args: + frame (Literal): frame to get the pose with respect to. Default to world. + scene frame gets position relative to the scene. clone (bool): Whether to clone the internal buffer or not when grabbing data Returns: 2-tuple: - - 3-array: (x,y,z) position in the world frame - - 4-array: (x,y,z,w) quaternion orientation in the world frame + - th.Tensor: (x,y,z) position in the specified frame + - th.Tensor: (x,y,z,w) quaternion orientation in the specified frame """ - # Return cached pose if we're kinematic-only - if self.kinematic_only and self._kinematic_world_pose_cache is not None: - return self._kinematic_world_pose_cache - - pos, ori = self._rigid_prim_view.get_world_poses(clone=clone) - - # Make sure we have a valid orientation - assert -1e-3 < (th.sum(ori * ori) - 1) < 1e-3, f"{self.prim_path} orientation {ori} is not a unit quaternion." - - pos = pos[0] - ori = ori[0][[1, 2, 3, 0]] - if self.kinematic_only: - self._kinematic_world_pose_cache = (pos, ori) - return pos, ori + assert frame in ["world", "scene"], f"Invalid frame '{frame}'. Must be 'world', or 'scene'." - def set_local_pose(self, position=None, orientation=None): - # Invalidate kinematic-only object pose caches when new pose is set + # Try to use caches for kinematic-only objects + if self.kinematic_only and self._kinematic_world_pose_cache is not None: + position, orientation = self._kinematic_world_pose_cache + if frame == "scene": + assert self.scene is not None, "Cannot get position and orientation relative to scene without a scene" + position, orientation = self.scene.convert_world_pose_to_scene_relative(position, orientation) + return position, orientation + + # Otherwise, get the pose from the rigid prim view and convert to our format + positions, orientations = self._rigid_prim_view.get_world_poses(clone=clone) + position = positions[0] + orientation = orientations[0][[1, 2, 3, 0]] + + # Assert that the orientation is a unit quaternion + assert math.isclose( + th.norm(orientations).item(), 1, abs_tol=1e-3 + ), f"{self.prim_path} orientation {orientations} is not a unit quaternion." + + # Cache world pose if we're kinematic-only if self.kinematic_only: - self.clear_kinematic_only_cache() - if position is not None: - position = th.asarray(position)[None, :] - if orientation is not None: - orientation = th.asarray(orientation)[None, [3, 0, 1, 2]] - self._rigid_prim_view.set_local_poses(position, orientation) - PoseAPI.invalidate() + self._kinematic_world_pose_cache = (position, orientation) - def get_local_pose(self): - # Return cached pose if we're kinematic-only - if self.kinematic_only and self._kinematic_local_pose_cache is not None: - return self._kinematic_local_pose_cache + # If requested, compute the scene-local transform + if frame == "scene": + assert self.scene is not None, "Cannot get position and orientation relative to scene without a scene" + position, orientation = self.scene.convert_world_pose_to_scene_relative(position, orientation) - positions, orientations = self._rigid_prim_view.get_local_poses() - positions = positions[0] - orientations = orientations[0][[1, 2, 3, 0]] - if self.kinematic_only: - self._kinematic_local_pose_cache = (positions, orientations) - return positions, orientations + return position, orientation @property def _rigid_prim_view(self): @@ -809,7 +830,6 @@ def clear_kinematic_only_cache(self): changes without explicitly calling this prim's pose setter """ assert self.kinematic_only - self._kinematic_local_pose_cache = None self._kinematic_world_pose_cache = None def _dump_state(self): diff --git a/omnigibson/prims/xform_prim.py b/omnigibson/prims/xform_prim.py index b660a1e79..27f71f8f0 100644 --- a/omnigibson/prims/xform_prim.py +++ b/omnigibson/prims/xform_prim.py @@ -1,5 +1,6 @@ import math from collections.abc import Iterable +from typing import Literal import torch as th @@ -10,8 +11,12 @@ from omnigibson.prims.material_prim import MaterialPrim from omnigibson.prims.prim_base import BasePrim from omnigibson.utils.transform_utils import quat2euler +from omnigibson.utils.ui_utils import create_module_logger from omnigibson.utils.usd_utils import PoseAPI +# Create module logger +logger = create_module_logger(module_name=__name__) + class XFormPrim(BasePrim): """ @@ -168,55 +173,100 @@ def has_material(self): material_path = self._binding_api.GetDirectBinding().GetMaterialPath().pathString return material_path != "" - def set_position_orientation(self, position=None, orientation=None): + def set_position_orientation( + self, position=None, orientation=None, frame: Literal["world", "parent", "scene"] = "world" + ): """ - Sets prim's pose with respect to the world frame + Sets prim's pose with respect to the specified frame Args: position (None or 3-array): if specified, (x,y,z) position in the world frame Default is None, which means left unchanged. orientation (None or 4-array): if specified, (x,y,z,w) quaternion orientation in the world frame. Default is None, which means left unchanged. + frame (Literal): frame to set the pose with respect to, defaults to "world". parent frame + set position relative to the object parent. scene frame set position relative to the scene. """ + assert frame in ["world", "parent", "scene"], f"Invalid frame '{frame}'. Must be 'world', 'parent', or 'scene'." + + # If no position or no orientation are given, get the current position and orientation of the object if position is None or orientation is None: - current_position, current_orientation = self.get_position_orientation() - position = current_position if position is None else position - orientation = current_orientation if orientation is None else orientation + current_position, current_orientation = self.get_position_orientation(frame=frame) + position = current_position if position is None else position + orientation = current_orientation if orientation is None else orientation + + # Convert to th.Tensor if necessary + position = th.as_tensor(position, dtype=th.float32) + orientation = th.as_tensor(orientation, dtype=th.float32) - position = position if isinstance(position, th.Tensor) else th.tensor(position, dtype=th.float32) - orientation = orientation if isinstance(orientation, th.Tensor) else th.tensor(orientation, dtype=th.float32) + # Convert to from scene-relative to world if necessary + if frame == "scene": + assert self.scene is not None, "cannot set position and orientation relative to scene without a scene" + position, orientation = self.scene.convert_scene_relative_pose_to_world(position, orientation) + + # If the current pose is not in parent frame, convert to parent frame since that's what we can set. + if frame != "parent": + position, orientation = PoseAPI.convert_world_pose_to_local(self._prim, position, orientation) + + # Assert validity of the orientation assert math.isclose( th.norm(orientation).item(), 1, abs_tol=1e-3 ), f"{self.prim_path} desired orientation {orientation} is not a unit quaternion." - my_world_transform = T.pose2mat((position, orientation)) - - parent_prim = lazy.omni.isaac.core.utils.prims.get_prim_parent(self._prim) - parent_path = str(parent_prim.GetPath()) - parent_world_transform = PoseAPI.get_world_pose_with_scale(parent_path) - - local_transform = th.linalg.inv_ex(parent_world_transform).inverse @ my_world_transform - # unscale local transform's rotation - local_transform[:3, :3] /= th.linalg.norm(local_transform[:3, :3], dim=0) - product = local_transform[:3, :3] @ local_transform[:3, :3].T - assert th.allclose( - product, th.diag(th.diag(product)), atol=1e-3 - ), f"{self.prim_path} local transform is not diagonal." - self.set_local_pose(*T.mat2pose(local_transform)) + # Actually set the local pose now. + properties = self.prim.GetPropertyNames() + position = lazy.pxr.Gf.Vec3d(*position.tolist()) + if "xformOp:translate" not in properties: + logger.error("Translate property needs to be set for {} before setting its position".format(self.name)) + self.set_attribute("xformOp:translate", position) + orientation = orientation[[3, 0, 1, 2]].tolist() + if "xformOp:orient" not in properties: + logger.error("Orient property needs to be set for {} before setting its orientation".format(self.name)) + xform_op = self._prim.GetAttribute("xformOp:orient") + if xform_op.GetTypeName() == "quatf": + rotq = lazy.pxr.Gf.Quatf(*orientation) + else: + rotq = lazy.pxr.Gf.Quatd(*orientation) + xform_op.Set(rotq) + PoseAPI.invalidate() + if gm.ENABLE_FLATCACHE: + # If flatcache is on, make sure the USD local pose is synced to the fabric local pose. + # Ideally we should call usdrt's set local pose directly, but there is no such API. + # The only available API is SetLocalXformFromUsd, so we update USD first, and then sync to fabric. + xformable_prim = lazy.usdrt.Rt.Xformable( + lazy.omni.isaac.core.utils.prims.get_prim_at_path(self.prim_path, fabric=True) + ) + assert ( + not xformable_prim.HasWorldXform() + ), "Fabric's world pose is set for a non-rigid prim which is unexpected. Please report this." + xformable_prim.SetLocalXformFromUsd() - def get_position_orientation(self, clone=True): + def get_position_orientation(self, frame: Literal["world", "scene", "parent"] = "world", clone=True): """ - Gets prim's pose with respect to the world's frame. + Gets prim's pose with respect to the specified frame. + + Args: + frame (Literal): frame to get the pose with respect to. Default to world. parent frame + get position relative to the object parent. scene frame get position relative to the scene. + clone (bool): Whether to clone the internal buffer or not when grabbing data Args: clone (bool): Whether to clone the internal buffer or not when grabbing data Returns: 2-tuple: - - 3-array: (x,y,z) position in the world frame - - 4-array: (x,y,z,w) quaternion orientation in the world frame - """ - return PoseAPI.get_world_pose(self.prim_path) + - th.Tensor: (x,y,z) position in the specified frame + - th.Tensor: (x,y,z,w) quaternion orientation in the specified frame + """ + assert frame in ["world", "parent", "scene"], f"Invalid frame '{frame}'. Must be 'world', 'parent', or 'scene'." + if frame == "world": + return PoseAPI.get_world_pose(self.prim_path) + elif frame == "scene": + assert self.scene is not None, "Cannot get position and orientation relative to scene without a scene" + return self.scene.convert_world_pose_to_scene_relative(*PoseAPI.get_world_pose(self.prim_path)) + else: + position, orientation = lazy.omni.isaac.core.utils.xforms.get_local_pose(self.prim_path) + return th.as_tensor(position, dtype=th.float32), th.as_tensor(orientation[[1, 2, 3, 0]], dtype=th.float32) def set_position(self, position): """ @@ -225,7 +275,10 @@ def set_position(self, position): Args: position (3-array): (x,y,z) global cartesian position to set """ - self.set_position_orientation(position=position) + logger.warning( + "set_position is deprecated and will be removed in a future release. Use set_position_orientation(position=position) instead" + ) + return self.set_position_orientation(position=position) def get_position(self): """ @@ -234,6 +287,9 @@ def get_position(self): Returns: 3-array: (x,y,z) global cartesian position of this prim """ + logger.warning( + "get_position is deprecated and will be removed in a future release. Use get_position_orientation()[0] instead." + ) return self.get_position_orientation()[0] def set_orientation(self, orientation): @@ -243,6 +299,9 @@ def set_orientation(self, orientation): Args: orientation (4-array): (x,y,z,w) global quaternion orientation to set """ + logger.warning( + "set_orientation is deprecated and will be removed in a future release. Use set_position_orientation(orientation=orientation) instead" + ) self.set_position_orientation(orientation=orientation) def get_orientation(self): @@ -252,6 +311,9 @@ def get_orientation(self): Returns: 4-array: (x,y,z,w) global quaternion orientation of this prim """ + logger.warning( + "get_orientation is deprecated and will be removed in a future release. Use get_position_orientation()[1] instead" + ) return self.get_position_orientation()[1] def get_rpy(self): @@ -261,14 +323,14 @@ def get_rpy(self): Returns: 3-array: (roll, pitch, yaw) global euler orientation of this prim """ - return quat2euler(self.get_orientation()) + return quat2euler(self.get_position_orientation()[1]) def get_xy_orientation(self): """ Get this prim's orientation on the XY plane of the world frame. This is obtained by projecting the forward vector onto the XY plane and then computing the angle. """ - return T.calculate_xy_plane_angle(self.get_orientation()) + return T.calculate_xy_plane_angle(self.get_position_orientation()[1]) def get_local_pose(self): """ @@ -279,8 +341,10 @@ def get_local_pose(self): - 3-array: (x,y,z) position in the local frame - 4-array: (x,y,z,w) quaternion orientation in the local frame """ - pos, ori = lazy.omni.isaac.core.utils.xforms.get_local_pose(self.prim_path) - return th.tensor(pos, dtype=th.float32), th.tensor(ori[[1, 2, 3, 0]], dtype=th.float32) + logger.warning( + 'get_local_pose is deprecated and will be removed in a future release. Use get_position_orientation(frame="parent") instead' + ) + return self.get_position_orientation(self.prim_path, frame="parent") def set_local_pose(self, position=None, orientation=None): """ @@ -292,44 +356,10 @@ def set_local_pose(self, position=None, orientation=None): orientation (None or 4-array): if specified, (x,y,z,w) quaternion orientation in the local frame of the prim (with respect to its parent prim). Default is None, which means left unchanged. """ - properties = self.prim.GetPropertyNames() - if position is not None: - position = position.tolist() if isinstance(position, th.Tensor) else position - position = lazy.pxr.Gf.Vec3d(*position) - if "xformOp:translate" not in properties: - lazy.carb.log_error( - "Translate property needs to be set for {} before setting its position".format(self.name) - ) - self.set_attribute("xformOp:translate", position) - if orientation is not None: - orientation = ( - orientation[[3, 0, 1, 2]].tolist() - if isinstance(orientation, th.Tensor) - else [float(orientation[i]) for i in [3, 0, 1, 2]] - ) - if "xformOp:orient" not in properties: - lazy.carb.log_error( - "Orient property needs to be set for {} before setting its orientation".format(self.name) - ) - xform_op = self._prim.GetAttribute("xformOp:orient") - if xform_op.GetTypeName() == "quatf": - rotq = lazy.pxr.Gf.Quatf(*orientation) - else: - rotq = lazy.pxr.Gf.Quatd(*orientation) - xform_op.Set(rotq) - PoseAPI.invalidate() - if gm.ENABLE_FLATCACHE: - # If flatcache is on, make sure the USD local pose is synced to the fabric local pose. - # Ideally we should call usdrt's set local pose directly, but there is no such API. - # The only available API is SetLocalXformFromUsd, so we update USD first, and then sync to fabric. - xformable_prim = lazy.usdrt.Rt.Xformable( - lazy.omni.isaac.core.utils.prims.get_prim_at_path(self.prim_path, fabric=True) - ) - assert ( - not xformable_prim.HasWorldXform() - ), "Fabric's world pose is set for a non-rigid prim which is unexpected. Please report this." - xformable_prim.SetLocalXformFromUsd() - return + logger.warning( + 'set_local_pose is deprecated and will be removed in a future release. Use set_position_orientation(position=position, orientation=orientation, frame="parent") instead' + ) + return self.set_position_orientation(self.prim_path, position, orientation, frame="parent") def get_world_scale(self): """ @@ -384,7 +414,7 @@ def scale(self, scale): scale = lazy.pxr.Gf.Vec3d(*scale.tolist()) properties = self.prim.GetPropertyNames() if "xformOp:scale" not in properties: - lazy.carb.log_error("Scale property needs to be set for {} before setting its scale".format(self.name)) + logger.error("Scale property needs to be set for {} before setting its scale".format(self.name)) self.set_attribute("xformOp:scale", scale) @property @@ -431,22 +461,21 @@ def remove_filtered_collision_pair(self, prim): prim._collision_filter_api.GetFilteredPairsRel().RemoveTarget(self.prim_path) def _dump_state(self): - pos, ori = self.get_position_orientation() - - # If we are in a scene, compute the scene-local transform (and save this as the world transform - # for legacy compatibility) - if self.scene is not None: - pos, ori = T.mat2pose(self.scene.pose_inv @ T.pose2mat((pos, ori))) + if self.scene is None: + # this is a special case for objects (e.g. viewer_camera) that are not in a scene. Default to world frame + pos, ori = self.get_position_orientation() + else: + pos, ori = self.get_position_orientation(frame="scene") return dict(pos=pos, ori=ori) def _load_state(self, state): - pos, ori = state["pos"], state["ori"] - pos = pos if isinstance(pos, th.Tensor) else th.tensor(pos, dtype=th.float32) - ori = ori if isinstance(ori, th.Tensor) else th.tensor(ori, dtype=th.float32) - if self.scene is not None: - pos, ori = T.mat2pose(self.scene.pose @ T.pose2mat((pos, ori))) - self.set_position_orientation(pos, ori) + pos, orn = state["pos"], state["ori"] + if self.scene is None: + # this is a special case for objects (e.g. viewer_camera) that are not in a scene. Default to world frame + self.set_position_orientation(pos, orn) + else: + self.set_position_orientation(pos, orn, frame="scene") def serialize(self, state): return th.cat([state["pos"], state["ori"]]) diff --git a/omnigibson/reward_functions/grasp_reward.py b/omnigibson/reward_functions/grasp_reward.py index e1caf46dc..b3eea461f 100644 --- a/omnigibson/reward_functions/grasp_reward.py +++ b/omnigibson/reward_functions/grasp_reward.py @@ -117,7 +117,7 @@ def _step(self, task, env, action): info["postgrasp_dist_reward"] = 0.0 if not current_grasping: # TODO: If we dropped the object recently, penalize for that - obj_center = self.obj.get_position() + obj_center = self.obj.get_position_orientation()[0] dist = T.l2_distance(eef_pos, obj_center) dist_reward = math.exp(-dist) * self.dist_coeff reward += dist_reward @@ -131,8 +131,8 @@ def _step(self, task, env, action): info["grasp_reward"] = self.grasp_reward # Then apply a distance reward to take us to a tucked position - robot_center = robot.links["torso_lift_link"].get_position() - obj_center = self.obj.get_position() + robot_center = robot.links["torso_lift_link"].get_position_orientation()[0] + obj_center = self.obj.get_position_orientation()[0] dist = T.l2_distance(robot_center, obj_center) dist_reward = math.exp(-dist) * self.dist_coeff reward += dist_reward diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 480ae0aaa..8c5369e16 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -3,7 +3,7 @@ import os from abc import ABC from collections import OrderedDict -from typing import Iterable, List, Tuple +from typing import Iterable, List, Literal, Tuple import torch as th @@ -359,11 +359,37 @@ def base_footprint_link_name(self): """ return "base" - def get_position_orientation(self, clone=True): - return self.base_footprint_link.get_position_orientation(clone=clone) + def get_position_orientation(self, frame: Literal["world", "scene"] = "world", clone=True): + """ + Gets robot's pose with respect to the specified frame. + + Args: + frame (Literal): frame to get the pose with respect to. Default to world. + scene frame get position relative to the scene. + clone (bool): Whether to clone the internal buffer or not when grabbing data + + Returns: + 2-tuple: + - th.Tensor: (x,y,z) position in the specified frame + - th.Tensor: (x,y,z,w) quaternion orientation in the specified frame + """ + return self.base_footprint_link.get_position_orientation(frame=frame, clone=clone) + + def set_position_orientation( + self, position=None, orientation=None, frame: Literal["world", "parent", "scene"] = "world" + ): + """ + Sets behavior robot's pose with respect to the specified frame - def set_position_orientation(self, position=None, orientation=None): - super().set_position_orientation(position, orientation) + Args: + position (None or 3-array): if specified, (x,y,z) position in the world frame + Default is None, which means left unchanged. + orientation (None or 4-array): if specified, (x,y,z,w) quaternion orientation in the world frame. + Default is None, which means left unchanged. + frame (Literal): frame to set the pose with respect to, defaults to "world". + scene frame set position relative to the scene. + """ + super().set_position_orientation(position, orientation, frame=frame) # Move the joint frame for the world_base_joint if self._world_base_fixed_joint_prim is not None: if position is not None: @@ -474,7 +500,7 @@ def teleop_data_to_action(self, teleop_action) -> th.Tensor: action[self.controller_action_idx[controller_name]] = th.cat((des_local_part_pos, des_part_rpy)) # If we reset, teleop the robot parts to the desired pose if part_name in self.arm_names and teleop_action.reset[part_name]: - self.parts[part_name].set_position_orientation(des_local_part_pos, des_part_rpy) + self.parts[part_name].set_position_orientation(position=des_local_part_pos, orientation=des_part_rpy) return action @@ -561,7 +587,7 @@ def update_ghost_hands(self, pos: Iterable[float], orn: Iterable[float]) -> None """ assert self.eef_type == "hand", "ghost hand is only valid for BR hand!" # Ghost hand tracks real hand whether it is hidden or not - self.ghost_hand.set_position_orientation(pos, orn) + self.ghost_hand.set_position_orientation(position=pos, orientation=orn) # If distance between hand and controller is greater than threshold, # ghost hand appears diff --git a/omnigibson/robots/locomotion_robot.py b/omnigibson/robots/locomotion_robot.py index 7b5a727c5..b613d7ef4 100644 --- a/omnigibson/robots/locomotion_robot.py +++ b/omnigibson/robots/locomotion_robot.py @@ -123,8 +123,8 @@ def move_by(self, delta): Args: delta (float):float], (x,y,z) cartesian delta base position """ - new_pos = th.tensor(delta) + self.get_position() - self.set_position(position=new_pos) + new_pos = th.tensor(delta) + self.get_position_orientation()[0] + self.set_position_orientation(position=new_pos) def move_forward(self, delta=0.05): """ @@ -133,7 +133,7 @@ def move_forward(self, delta=0.05): Args: delta (float): delta base position forward """ - self.move_by(quat2mat(self.get_orientation()).dot(th.tensor([delta, 0, 0]))) + self.move_by(quat2mat(self.get_position_orientation()[1]).dot(th.tensor([delta, 0, 0]))) def move_backward(self, delta=0.05): """ @@ -142,7 +142,7 @@ def move_backward(self, delta=0.05): Args: delta (float): delta base position backward """ - self.move_by(quat2mat(self.get_orientation()).dot(th.tensor([-delta, 0, 0]))) + self.move_by(quat2mat(self.get_position_orientation()[1]).dot(th.tensor([-delta, 0, 0]))) def move_left(self, delta=0.05): """ @@ -151,7 +151,7 @@ def move_left(self, delta=0.05): Args: delta (float): delta base position left """ - self.move_by(quat2mat(self.get_orientation()).dot(th.tensor([0, -delta, 0]))) + self.move_by(quat2mat(self.get_position_orientation()[1]).dot(th.tensor([0, -delta, 0]))) def move_right(self, delta=0.05): """ @@ -160,7 +160,7 @@ def move_right(self, delta=0.05): Args: delta (float): delta base position right """ - self.move_by(quat2mat(self.get_orientation()).dot(th.tensor([0, delta, 0]))) + self.move_by(quat2mat(self.get_position_orientation()[1]).dot(th.tensor([0, delta, 0]))) def turn_left(self, delta=0.03): """ @@ -169,9 +169,9 @@ def turn_left(self, delta=0.03): Args: delta (float): delta angle to rotate the base left """ - quat = self.get_orientation() + quat = self.get_position_orientation()[1] quat = quat_multiply((euler2quat(-delta, 0, 0)), quat) - self.set_orientation(quat) + self.set_position_orientation(orientation=quat) def turn_right(self, delta=0.03): """ @@ -180,9 +180,9 @@ def turn_right(self, delta=0.03): Args: delta (float): angle to rotate the base right """ - quat = self.get_orientation() + quat = self.get_position_orientation()[1] quat = quat_multiply((euler2quat(delta, 0, 0)), quat) - self.set_orientation(quat) + self.set_position_orientation(orientation=quat) @property def base_action_idx(self): diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 890199665..b4d76f5c7 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -1,6 +1,7 @@ import math from abc import abstractmethod from collections import namedtuple +from typing import Literal import networkx as nx import torch as th @@ -304,14 +305,28 @@ def _find_gripper_contacts(self, arm="default", return_contact_positions=False): return contact_data, robot_contact_links - def set_position_orientation(self, position=None, orientation=None): + def set_position_orientation( + self, position=None, orientation=None, frame: Literal["world", "parent", "scene"] = "world" + ): + """ + Sets manipulation robot's pose with respect to the specified frame + + Args: + position (None or 3-array): if specified, (x,y,z) position in the world frame + Default is None, which means left unchanged. + orientation (None or 4-array): if specified, (x,y,z,w) quaternion orientation in the world frame. + Default is None, which means left unchanged. + frame (Literal): frame to set the pose with respect to, defaults to "world".parent frame + set position relative to the object parent. scene frame set position relative to the scene. + """ + # Store the original EEF poses. original_poses = {} for arm in self.arm_names: original_poses[arm] = (self.get_eef_position(arm), self.get_eef_orientation(arm)) # Run the super method - super().set_position_orientation(position=position, orientation=orientation) + super().set_position_orientation(position=position, orientation=orientation, frame=frame) # Now for each hand, if it was holding an AG object, teleport it. for arm in self.arm_names: @@ -727,7 +742,7 @@ def get_eef_position(self, arm="default"): to arm @arm """ arm = self.default_arm if arm == "default" else arm - return self._links[self.eef_link_names[arm]].get_position() + return self._links[self.eef_link_names[arm]].get_position_orientation()[0] def get_eef_orientation(self, arm="default"): """ @@ -740,7 +755,7 @@ def get_eef_orientation(self, arm="default"): to arm @arm """ arm = self.default_arm if arm == "default" else arm - return self._links[self.eef_link_names[arm]].get_orientation() + return self._links[self.eef_link_names[arm]].get_position_orientation()[1] def get_relative_eef_pose(self, arm="default", mat=False): """ @@ -795,7 +810,7 @@ def get_relative_eef_lin_vel(self, arm="default"): 3-array: (x,y,z) Linear velocity of end-effector relative to robot base frame """ arm = self.default_arm if arm == "default" else arm - base_link_quat = self.get_orientation() + base_link_quat = self.get_position_orientation()[1] return T.quat2mat(base_link_quat).T @ self.eef_links[arm].get_linear_velocity() def get_relative_eef_ang_vel(self, arm="default"): @@ -808,7 +823,7 @@ def get_relative_eef_ang_vel(self, arm="default"): 3-array: (ax,ay,az) angular velocity of end-effector relative to robot base frame """ arm = self.default_arm if arm == "default" else arm - base_link_quat = self.get_orientation() + base_link_quat = self.get_position_orientation()[1] return T.quat2mat(base_link_quat).T @ self.eef_links[arm].get_angular_velocity() def _calculate_in_hand_object_rigid(self, arm="default"): @@ -842,7 +857,7 @@ def _calculate_in_hand_object_rigid(self, arm="default"): return None # Find the closest object to the gripper center - gripper_center_pos = self.eef_links[arm].get_position() + gripper_center_pos = self.eef_links[arm].get_position_orientation()[0] candidate_data = [] for prim_path in candidates_set: @@ -852,7 +867,7 @@ def _calculate_in_hand_object_rigid(self, arm="default"): if candidate_obj is None or link_name not in candidate_obj.links: continue candidate_link = candidate_obj.links[link_name] - dist = th.norm(th.tensor(candidate_link.get_position()) - th.tensor(gripper_center_pos)) + dist = th.norm(th.tensor(candidate_link.get_position_orientation()[0]) - th.tensor(gripper_center_pos)) candidate_data.append((prim_path, dist)) if not candidate_data: @@ -1382,7 +1397,7 @@ def _calculate_in_hand_object_cloth(self, arm="default"): # TODO (eric): Only AG one cloth at any given moment. # Returns the first cloth that overlaps with the "ghost" box volume for cloth_obj in cloth_objs: - attachment_point_pos = cloth_obj.links["attachment_point"].get_position() + attachment_point_pos = cloth_obj.links["attachment_point"].get_position_orientation()[0] particles_in_volume = self._ag_check_in_volume[arm]([attachment_point_pos]) if particles_in_volume.sum() > 0: return cloth_obj, cloth_obj.links["attachment_point"], attachment_point_pos @@ -1471,11 +1486,11 @@ def _dump_state(self): # Include AG_state ag_params = self._ag_obj_constraint_params.copy() for arm in ag_params.keys(): - if len(ag_params[arm]) > 0 and self.scene is not None: - ag_params[arm]["contact_pos"], _ = T.relative_pose_transform( + if len(ag_params[arm]) > 0: + assert self.scene is not None, "Cannot get position and orientation relative to scene without a scene" + ag_params[arm]["contact_pos"], _ = self.scene.convert_world_pose_to_scene_relative( ag_params[arm]["contact_pos"], th.tensor([0, 0, 0, 1], dtype=th.float32), - *self.scene.get_position_orientation(), ) state["ag_obj_constraint_params"] = ag_params return state @@ -1499,12 +1514,11 @@ def _load_state(self, state): obj = self.scene.object_registry("prim_path", data["ag_obj_prim_path"]) link = obj.links[data["ag_link_prim_path"].split("/")[-1]] contact_pos_global = data["contact_pos"] - if self.scene is not None: - contact_pos_global, _ = T.pose_transform( - *self.scene.get_position_orientation(), - contact_pos_global, - th.tensor([0, 0, 0, 1], dtype=th.float32), - ) + assert self.scene is not None, "Cannot set position and orientation relative to scene without a scene" + contact_pos_global, _ = self.scene.convert_scene_relative_pose_to_world( + contact_pos_global, + th.tensor([0, 0, 0, 1], dtype=th.float32), + ) self._establish_grasp(arm=arm, ag_data=(obj, link), contact_pos=contact_pos_global) def serialize(self, state): diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index e4a1b86d5..2a8862458 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,5 +1,6 @@ import math import os +from typing import Literal import torch as th @@ -13,7 +14,7 @@ from omnigibson.robots.manipulation_robot import GraspingPoint from omnigibson.robots.untucked_arm_pose_robot import UntuckedArmPoseRobot from omnigibson.utils.python_utils import assert_valid_key, classproperty -from omnigibson.utils.usd_utils import ControllableObjectViewAPI, JointType +from omnigibson.utils.usd_utils import ControllableObjectViewAPI class Tiago(HolonomicBaseRobot, ArticulatedTrunkRobot, UntuckedArmPoseRobot, ActiveCameraRobot): diff --git a/omnigibson/scene_graphs/graph_builder.py b/omnigibson/scene_graphs/graph_builder.py index 81d668309..1f987748c 100644 --- a/omnigibson/scene_graphs/graph_builder.py +++ b/omnigibson/scene_graphs/graph_builder.py @@ -4,7 +4,6 @@ import torch as th from matplotlib import pyplot as plt from PIL import Image -from torchvision.transforms import ToPILImage, ToTensor from omnigibson import object_states from omnigibson.object_states.factory import get_state_name @@ -12,6 +11,7 @@ from omnigibson.robots import BaseRobot from omnigibson.sensors import VisionSensor from omnigibson.utils import transform_utils as T +from omnigibson.utils.numpy_utils import pil_to_tensor def _formatted_aabb(obj): @@ -289,14 +289,10 @@ def _draw_graph(): robot_view = (robot_camera_sensor.get_obs()[0]["rgb"][..., :3]).to(th.uint8) imgheight, imgwidth, _ = robot_view.shape - pil_transform = ToPILImage() - torch_transform = ToTensor() - - # check imgheight and imgwidth; if they are too small, we need to upsample the image to 1280x1280 + # check imgheight and imgwidth; if they are too small, we need to upsample the image to 640x640 if imgheight < 640 or imgwidth < 640: - robot_view = torch_transform( - pil_transform((robot_view.permute(2, 0, 1).cpu())).resize((640, 640), Image.BILINEAR) - ).permute(1, 2, 0) + # Convert to PIL Image to upsample, then write back to tensor + robot_view = pil_to_tensor(Image.fromarray(robot_view.cpu().numpy()).resize((640, 640), Image.BILINEAR)) imgheight, imgwidth, _ = robot_view.shape figheight = 4.8 diff --git a/omnigibson/scenes/scene_base.py b/omnigibson/scenes/scene_base.py index a44a3bd28..81e1d7636 100644 --- a/omnigibson/scenes/scene_base.py +++ b/omnigibson/scenes/scene_base.py @@ -354,14 +354,16 @@ def _load_scene_prim_with_objects(self, last_scene_edge, initial_scene_prim_z_of log.warning(f"System {system_name} is not supported without GPU dynamics! Skipping...") # Position the scene prim initially at a z offset to avoid collision - self._scene_prim.set_position(th.tensor([0, 0, initial_scene_prim_z_offset if self.idx != 0 else 0])) + self._scene_prim.set_position_orientation( + position=th.tensor([0, 0, initial_scene_prim_z_offset if self.idx != 0 else 0]) + ) # Now load the objects with their own logic for obj_name, obj in self._init_objs.items(): # Import into the simulator self.add_object(obj) # Set the init pose accordingly - obj.set_local_pose( + obj.set_position_orientation( position=self._init_state[obj_name]["root_link"]["pos"], orientation=self._init_state[obj_name]["root_link"]["ori"], ) @@ -370,7 +372,9 @@ def _load_scene_prim_with_objects(self, last_scene_edge, initial_scene_prim_z_of if self.idx != 0: aabb_min, aabb_max = lazy.omni.usd.get_context().compute_path_world_bounding_box(scene_absolute_path) left_edge_to_center = -aabb_min[0] - self._scene_prim.set_position([last_scene_edge + scene_margin + left_edge_to_center, 0, 0]) + self._scene_prim.set_position_orientation( + position=[last_scene_edge + scene_margin + left_edge_to_center, 0, 0] + ) new_scene_edge = last_scene_edge + scene_margin + (aabb_max[0] - aabb_min[0]) else: aabb_min, aabb_max = lazy.omni.usd.get_context().compute_path_world_bounding_box(scene_absolute_path) @@ -448,6 +452,7 @@ def load(self, idx, **kwargs): # Cache this scene's pose self._pose = T.pose2mat(self._scene_prim.get_position_orientation()) + assert self._pose is not None self._pose_inv = th.linalg.inv_ex(self._pose).inverse if gm.ENABLE_TRANSITION_RULES: @@ -697,9 +702,10 @@ def set_position_orientation(self, position=None, orientation=None): position (th.Tensor): (3,) position of the scene orientation (th.Tensor): (4,) orientation of the scene """ - self._scene_prim.set_position_orientation(position, orientation) + self._scene_prim.set_position_orientation(position=position, orientation=orientation) # Update the cached pose and inverse pose self._pose = T.pose2mat(self.get_position_orientation()) + assert self._pose is not None self._pose_inv = th.linalg.inv_ex(self._pose).inverse @property @@ -757,6 +763,36 @@ def pose_inv(self): """ return self._pose_inv + def convert_world_pose_to_scene_relative(self, position, orientation): + """ + Convert a world pose to a scene-relative pose. + + Args: + position (th.Tensor): (3,) position in world frame + orientation (th.Tensor): (4,) orientation in world frame + + Returns: + 2-tuple: + - th.Tensor: (3,) position in scene frame + - th.Tensor: (4,) orientation in scene frame + """ + return T.mat2pose(self.pose_inv @ T.pose2mat((position, orientation))) + + def convert_scene_relative_pose_to_world(self, position, orientation): + """ + Convert a scene-relative pose to a world pose. + + Args: + position (th.Tensor): (3,) position in scene frame + orientation (th.Tensor): (4,) orientation in scene frame + + Returns: + 2-tuple: + - th.Tensor: (3,) position in world frame + - th.Tensor: (4,) orientation in world frame + """ + return T.mat2pose(self.pose @ T.pose2mat((position, orientation))) + def is_system_active(self, system_name): return self.get_system(system_name, force_init=False).initialized diff --git a/omnigibson/scenes/static_traversable_scene.py b/omnigibson/scenes/static_traversable_scene.py index ff942db68..79416ca97 100644 --- a/omnigibson/scenes/static_traversable_scene.py +++ b/omnigibson/scenes/static_traversable_scene.py @@ -96,9 +96,9 @@ def _load(self): # Move the first floor to be at the floor level by default. default_floor = 0 floor_height = self.floor_heights[default_floor] + m.FLOOR_Z_OFFSET - scene_position = self._scene_prim.get_position() + scene_position = self._scene_prim.get_position_orientation()[0] scene_position[2] = floor_height - self._scene_prim.set_position(scene_position) + self._scene_prim.set_position_orientation(position=scene_position) # Filter the collision between the scene mesh and the floor plane self._scene_mesh.add_filtered_collision_pair(prim=og.sim.floor_plane) @@ -119,8 +119,10 @@ def move_floor_plane(self, floor=0, additional_elevation=0.02, height=None): if height is not None: height_adjustment = height - self.floor_heights[floor] else: - height_adjustment = self.floor_heights[floor] - self._scene_prim.get_position()[2] + additional_elevation - self._scene_prim.set_position(th.tensor([0, 0, height_adjustment])) + height_adjustment = ( + self.floor_heights[floor] - self._scene_prim.get_position_orientation()[0][2] + additional_elevation + ) + self._scene_prim.set_position_orientation(position=th.tensor([0, 0, height_adjustment])) def get_floor_height(self, floor=0): """ diff --git a/omnigibson/systems/macro_particle_system.py b/omnigibson/systems/macro_particle_system.py index 041e3666a..80f866807 100644 --- a/omnigibson/systems/macro_particle_system.py +++ b/omnigibson/systems/macro_particle_system.py @@ -684,7 +684,7 @@ def _compute_batch_particles_position_orientation(self, particles, local=False): if local: poses = th.zeros((n_particles, 4, 4)) for i, name in enumerate(particles): - poses[i] = T.pose2mat(self.particles[name].get_local_pose()) + poses[i] = T.pose2mat(self.particles[name].get_position_orientation(frame="parent")) else: # Iterate over all particles and compute link tfs programmatically, then batch the matrix transform link_tfs = dict() @@ -697,9 +697,9 @@ def _compute_batch_particles_position_orientation(self, particles, local=False): if obj not in link_tfs: # We want World --> obj transform, NOT the World --> root_link transform, since these particles # do NOT exist under a link but rather the object prim itself. So we use XFormPrim to directly - # get the transform, and not obj.get_local_pose() which will give us the local pose of the + # get the transform, and not obj.get_position_orientation(frame="parent") which will give us the local pose of the # root link! - link_tfs[obj] = T.pose2mat(XFormPrim.get_local_pose(obj)) + link_tfs[obj] = T.pose2mat(XFormPrim.get_position_orientation(obj, frame="parent")) link = obj else: link = self._particles_info[name]["link"] @@ -734,7 +734,7 @@ def get_particle_position_orientation(self, idx): is_cloth = self._is_cloth_obj(obj=parent_obj) local_mat = self._particles_local_mat[name] link_tf = ( - T.pose2mat(XFormPrim.get_local_pose(parent_obj)) + T.pose2mat(XFormPrim.get_position_orientation(parent_obj, frame="parent")) if is_cloth else T.pose2mat(self._particles_info[name]["link"].get_position_orientation()) ) @@ -744,7 +744,7 @@ def get_particle_position_orientation(self, idx): def get_particle_local_pose(self, idx): name = list(self.particles.keys())[idx] - return self.particles[name].get_local_pose() + return self.particles[name].get_position_orientation(frame="parent") def _modify_batch_particles_position_orientation(self, particles, positions=None, orientations=None, local=False): """ @@ -788,9 +788,9 @@ def _modify_batch_particles_position_orientation(self, particles, positions=None if obj not in link_tfs: # We want World --> obj transform, NOT the World --> root_link transform, since these particles # do NOT exist under a link but rather the object prim itself. So we use XFormPrim to directly - # get the transform, and not obj.get_local_pose() which will give us the local pose of the + # get the transform, and not obj.get_position_orientation(frame="parent") which will give us the local pose of the # root link! - link_tfs[obj] = T.pose2mat(XFormPrim.get_local_pose(obj)) + link_tfs[obj] = T.pose2mat(XFormPrim.get_position_orientation(obj, frame="parent")) link_tf = link_tfs[obj] else: link = self._particles_info[name]["link"] @@ -831,6 +831,9 @@ def set_particle_position_orientation(self, idx, position=None, orientation=None position = pos if position is None else position orientation = ori if orientation is None else orientation + position = position if isinstance(position, th.Tensor) else th.tensor(position, dtype=th.float32) + orientation = orientation if isinstance(orientation, th.Tensor) else th.tensor(orientation, dtype=th.float32) + name = list(self.particles.keys())[idx] global_mat = th.zeros((4, 4)) global_mat[-1, -1] = 1.0 @@ -840,7 +843,7 @@ def set_particle_position_orientation(self, idx, position=None, orientation=None parent_obj = self._particles_info[name]["obj"] is_cloth = self._is_cloth_obj(obj=parent_obj) link_tf = ( - T.pose2mat(XFormPrim.get_local_pose(parent_obj)) + T.pose2mat(XFormPrim.get_position_orientation(parent_obj, frame="parent")) if is_cloth else T.pose2mat(self._particles_info[name]["link"].get_position_orientation()) ) @@ -854,6 +857,9 @@ def set_particle_local_pose(self, idx, position=None, orientation=None): position = pos if position is None else position orientation = ori if orientation is None else orientation + position = position if isinstance(position, th.Tensor) else th.tensor(position, dtype=th.float32) + orientation = orientation if isinstance(orientation, th.Tensor) else th.tensor(orientation, dtype=th.float32) + name = list(self.particles.keys())[idx] local_mat = th.zeros((4, 4)) local_mat[-1, -1] = 1.0 @@ -888,7 +894,7 @@ def _compute_particle_local_mat(self, name, ignore_scale=False): parent_obj = self._particles_info[name]["obj"] is_cloth = self._is_cloth_obj(obj=parent_obj) scale = th.ones(3) if is_cloth else self._particles_info[name]["link"].scale - local_pos, local_quat = particle.get_local_pose() + local_pos, local_quat = particle.get_position_orientation(frame="parent") local_pos = local_pos if ignore_scale else local_pos * scale return T.pose2mat((local_pos, local_quat)) @@ -907,7 +913,7 @@ def _modify_particle_local_mat(self, name, mat, ignore_scale=False): scale = th.ones(3) if is_cloth else self._particles_info[name]["link"].scale local_pos, local_quat = T.mat2pose(mat) local_pos = local_pos if ignore_scale else local_pos / scale - particle.set_local_pose(local_pos, local_quat) + particle.set_position_orientation(position=local_pos, orientation=local_quat, frame="parent") # Store updated value self._particles_local_mat[name] = mat diff --git a/omnigibson/systems/micro_particle_system.py b/omnigibson/systems/micro_particle_system.py index 8b8a228ff..20b98ac51 100644 --- a/omnigibson/systems/micro_particle_system.py +++ b/omnigibson/systems/micro_particle_system.py @@ -348,8 +348,9 @@ def _dump_state(self): local_positions = [] local_orientations = [] for global_pos, global_ori in zip(self.particle_positions, self.particle_orientations): - local_pos, local_ori = T.relative_pose_transform( - global_pos, global_ori, *self.scene.get_position_orientation() + local_pos, local_ori = self.scene.convert_world_pose_to_scene_relative( + global_pos, + global_ori, ) local_positions.append(local_pos) local_orientations.append(local_ori) @@ -386,7 +387,7 @@ def _load_state(self, state): else: global_positions, global_orientations = zip( *[ - T.pose_transform(*self.scene.get_position_orientation(), local_pos, local_ori) + self.scene.convert_scene_relative_pose_to_world(local_pos, local_ori) for local_pos, local_ori in zip(local_positions, local_orientations) ] ) diff --git a/omnigibson/tasks/grasp_task.py b/omnigibson/tasks/grasp_task.py index 081690a73..40198119c 100644 --- a/omnigibson/tasks/grasp_task.py +++ b/omnigibson/tasks/grasp_task.py @@ -59,8 +59,7 @@ def _load(self, env): obj_pos = [0.0, 0.0, 0.0] if "position" not in obj_config else obj_config["position"] obj_orn = [0.0, 0.0, 0.0, 1.0] if "orientation" not in obj_config else obj_config["orientation"] - obj_pos, obj_orn = T.pose_transform(*env.scene.get_position_orientation(), obj_pos, obj_orn) - obj.set_position_orientation(obj_pos, obj_orn) + obj.set_position_orientation(position=obj_pos, orientation=obj_orn, frame="scene") def _create_termination_conditions(self): terminations = dict() @@ -90,9 +89,7 @@ def _reset_agent(self, env): robot.set_joint_positions(robot_pose["joint_pos"], joint_control_idx) robot_pos = th.tensor(robot_pose["base_pos"]) robot_orn = th.tensor(robot_pose["base_ori"]) - # Move it to the appropriate scene. TODO: The scene should provide a function for this. - robot_pos, robot_orn = T.pose_transform(*robot.scene.get_position_orientation(), robot_pos, robot_orn) - robot.set_position_orientation(robot_pos, robot_orn) + robot.set_position_orientation(position=robot_pos, orientation=robot_orn, frame="scene") # Otherwise, reset using the primitive controller. else: @@ -147,7 +144,7 @@ def _reset_agent(self, env): raise ValueError("Robot could not settle") # Check if the robot has toppled - robot_up = T.quat_apply(robot.get_orientation(), th.tensor([0, 0, 1], dtype=th.float32)) + robot_up = T.quat_apply(robot.get_position_orientation()[1], th.tensor([0, 0, 1], dtype=th.float32)) if robot_up[2] < 0.75: raise ValueError("Robot has toppled over") @@ -166,8 +163,7 @@ def _reset_scene(self, env): # Set object pose obj_pos = [0.0, 0.0, 0.0] if "position" not in obj_config else obj_config["position"] obj_orn = [0.0, 0.0, 0.0, 1.0] if "orientation" not in obj_config else obj_config["orientation"] - obj_pos, obj_orn = T.pose_transform(*env.scene.get_position_orientation(), obj_pos, obj_orn) - obj.set_position_orientation(obj_pos, obj_orn) + obj.set_position_orientation(position=obj_pos, orientation=obj_orn, frame="scene") # Overwrite reset by only removeing reset scene def reset(self, env): diff --git a/omnigibson/tasks/point_navigation_task.py b/omnigibson/tasks/point_navigation_task.py index a26b1f7bd..dba27ccb2 100644 --- a/omnigibson/tasks/point_navigation_task.py +++ b/omnigibson/tasks/point_navigation_task.py @@ -344,8 +344,8 @@ def _reset_agent(self, env): # Update visuals if requested if self._visualize_goal: - self._initial_pos_marker.set_position(self._initial_pos) - self._goal_pos_marker.set_position(self._goal_pos) + self._initial_pos_marker.set_position_orientation(position=self._initial_pos) + self._goal_pos_marker.set_position_orientation(position=self._goal_pos) def _reset_variables(self, env): # Run super first @@ -376,10 +376,10 @@ def _global_pos_to_robot_frame(self, env, pos): Args: env (TraversableEnv): Environment instance - pos (3-array): global (x,y,z) position + pos (th.Tensor): global (x,y,z) position Returns: - 3-array: (x,y,z) position in self._robot_idn agent's local frame + th.Tensor: (x,y,z) position in self._robot_idn agent's local frame """ delta_pos_global = pos - env.robots[self._robot_idn].states[Pose].get_value()[0] return T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T @ delta_pos_global @@ -458,11 +458,11 @@ def _step_visualization(self, env): floor_height = env.scene.get_floor_height(self._floor) num_nodes = min(self._n_vis_waypoints, shortest_path.shape[0]) for i in range(num_nodes): - self._waypoint_markers[i].set_position( + self._waypoint_markers[i].set_position_orientation( position=th.tensor([shortest_path[i][0], shortest_path[i][1], floor_height]) ) for i in range(num_nodes, self._n_vis_waypoints): - self._waypoint_markers[i].set_position(position=th.tensor([0.0, 0.0, 100.0])) + self._waypoint_markers[i].set_position_orientation(position=th.tensor([0.0, 0.0, 100.0])) def step(self, env, action): # Run super method first diff --git a/omnigibson/termination_conditions/falling.py b/omnigibson/termination_conditions/falling.py index 88f860ede..08dde7536 100644 --- a/omnigibson/termination_conditions/falling.py +++ b/omnigibson/termination_conditions/falling.py @@ -31,14 +31,14 @@ def __init__(self, robot_idn=0, fall_height=0.03, topple=True, tilt_tolerance=0. def _step(self, task, env, action): # Terminate if the specified robot is falling out of the scene - robot_z = env.scene.robots[self._robot_idn].get_position()[2] + robot_z = env.scene.robots[self._robot_idn].get_position_orientation()[0][2] if robot_z < (env.scene.get_floor_height() - self._fall_height): return True # Terminate if the robot has toppled over if self._topple: robot_up = T.quat_apply( - env.scene.robots[self._robot_idn].get_orientation(), th.tensor([0, 0, 1], dtype=th.float32) + env.scene.robots[self._robot_idn].get_position_orientation()[1], th.tensor([0, 0, 1], dtype=th.float32) ) if robot_up[2] < self._tilt_tolerance: return True diff --git a/omnigibson/utils/bddl_utils.py b/omnigibson/utils/bddl_utils.py index 63b805cf2..368b94dfd 100644 --- a/omnigibson/utils/bddl_utils.py +++ b/omnigibson/utils/bddl_utils.py @@ -1186,7 +1186,7 @@ def _import_sampleable_objects(self): # Move the robot object frame to a far away location, similar to other newly imported objects below self._agent.set_position_orientation( - th.tensor([300, 300, 300], dtype=th.float32), th.tensor([0, 0, 0, 1], dtype=th.float32) + position=th.tensor([300, 300, 300], dtype=th.float32), orientation=th.tensor([0, 0, 0, 1], dtype=th.float32) ) self._sampled_objects = set() @@ -1283,7 +1283,9 @@ def _import_sampleable_objects(self): self._env.scene.add_object(simulator_obj) # Set these objects to be far-away locations - simulator_obj.set_position(th.tensor([100.0, 100.0, -100.0]) + th.ones(3) * num_new_obj * 5.0) + simulator_obj.set_position_orientation( + position=th.tensor([100.0, 100.0, -100.0]) + th.ones(3) * num_new_obj * 5.0 + ) self._sampled_objects.add(simulator_obj) self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst, entity=simulator_obj) diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py index 49d260c1a..dd44f04f5 100644 --- a/omnigibson/utils/grasping_planning_utils.py +++ b/omnigibson/utils/grasping_planning_utils.py @@ -200,7 +200,7 @@ def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint center_of_selected_surface_along_push_axis, _, ) = _get_closest_point_to_point_in_world_frame( - points_along_push_axis, (bbox_center_in_world, bbox_quat_in_world), robot.get_position() + points_along_push_axis, (bbox_center_in_world, bbox_quat_in_world), robot.get_position_orientation()[0] ) push_axis_closer_side_sign = 1 if push_axis_closer_side_idx == 0 else -1 @@ -333,7 +333,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint, link_name=link_name, visual=False ) - bbox_quat_in_world = link.get_orientation() + bbox_quat_in_world = link.get_position_orientation()[1] bbox_extent_in_link_frame = th.tensor( target_obj.native_link_bboxes[link_name]["collision"]["axis_aligned"]["extent"] ) @@ -371,7 +371,9 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint, for point in points_along_open_axis ] open_axis_closer_side_idx, _, _ = _get_closest_point_to_point_in_world_frame( - points_along_open_axis_after_rotation, (bbox_center_in_world, bbox_quat_in_world), robot.get_position() + points_along_open_axis_after_rotation, + (bbox_center_in_world, bbox_quat_in_world), + robot.get_position_orientation()[0], ) open_axis_closer_side_sign = 1 if open_axis_closer_side_idx == 0 else -1 center_of_selected_surface_along_push_axis = points_along_open_axis[open_axis_closer_side_idx] @@ -472,7 +474,7 @@ def _get_orientation_facing_vector_with_random_yaw(vector): side = th.linalg.cross(rand_vec, forward) side /= th.norm(3) up = th.linalg.cross(forward, side) - # assert th.isclose(th.norm(up), 1, atol=1e-3) + # assert math.isclose(th.norm(up).item(), 1, abs_tol=1e-3) rotmat = th.tensor([forward, side, up]).T return T.mat2quat(rotmat) diff --git a/omnigibson/utils/motion_planning_utils.py b/omnigibson/utils/motion_planning_utils.py index 4f9698bbf..3cfe392c9 100644 --- a/omnigibson/utils/motion_planning_utils.py +++ b/omnigibson/utils/motion_planning_utils.py @@ -159,8 +159,8 @@ def remove_unnecessary_rotations(path): return new_path - pos = robot.get_position() - yaw = T.quat2euler(robot.get_orientation())[2] + pos, orn = robot.get_position_orientation() + yaw = T.quat2euler(orn)[2] start_conf = (pos[0], pos[1], yaw) # create an SE(2) state space diff --git a/omnigibson/utils/numpy_utils.py b/omnigibson/utils/numpy_utils.py index 1d540c27b..fda0a3164 100644 --- a/omnigibson/utils/numpy_utils.py +++ b/omnigibson/utils/numpy_utils.py @@ -15,3 +15,7 @@ def vtarray_to_torch(vtarray, dtype=th.float32, device="cpu"): else: assert device.startswith("cuda") return th.tensor(np.array(vtarray), dtype=dtype, device=device) + + +def pil_to_tensor(pil_image): + return th.tensor(np.array(pil_image), dtype=th.uint8) diff --git a/omnigibson/utils/object_state_utils.py b/omnigibson/utils/object_state_utils.py index 3b27e4dfe..9e6852bba 100644 --- a/omnigibson/utils/object_state_utils.py +++ b/omnigibson/utils/object_state_utils.py @@ -140,7 +140,7 @@ def sample_kinematics( # Position needs to be set to be very far away because the object's # original position might be blocking rays (use_ray_casting_method=True) old_pos = th.tensor([100, 100, 10]) - objA.set_position_orientation(old_pos, orientation) + objA.set_position_orientation(position=old_pos, orientation=orientation) objA.keep_still() # We also need to step physics to make sure the pose propagates downstream (e.g.: to Bounding Box computations) og.sim.step_physics() @@ -186,7 +186,7 @@ def sample_kinematics( success = False else: pos[2] += z_offset - objA.set_position_orientation(pos, orientation) + objA.set_position_orientation(position=pos, orientation=orientation) objA.keep_still() og.sim.step_physics() @@ -208,14 +208,14 @@ def sample_kinematics( # until it settles aabb_lower_a, aabb_upper_a = objA.states[AABB].get_value() aabb_lower_b, aabb_upper_b = objB.states[AABB].get_value() - bbox_to_obj = objA.get_position() - (aabb_lower_a + aabb_upper_a) / 2.0 + bbox_to_obj = objA.get_position_orientation()[0] - (aabb_lower_a + aabb_upper_a) / 2.0 desired_bbox_pos = (aabb_lower_b + aabb_upper_b) / 2.0 desired_bbox_pos[2] = aabb_upper_b[2] + (aabb_upper_a[2] - aabb_lower_a[2]) / 2.0 pos = desired_bbox_pos + bbox_to_obj success = True if success and not skip_falling: - objA.set_position_orientation(pos, orientation) + objA.set_position_orientation(position=pos, orientation=orientation) objA.keep_still() # Step until either (a) max steps is reached (total of 0.5 second in sim time) or (b) contact is made, then @@ -294,7 +294,7 @@ def sample_cloth_on_rigid(obj, other, max_trials=40, z_offset=0.05, randomize_xy z_lo, z_hi = 0, math.pi * 2 orn = T.euler2quat(th.tensor([0.0, 0.0, (th.rand(1) * (z_hi - z_lo) + z_lo).item()])) - obj.set_position_orientation(pos, orn) + obj.set_position_orientation(position=pos, orientation=orn) obj.root_link.reset() obj.keep_still() diff --git a/omnigibson/utils/object_utils.py b/omnigibson/utils/object_utils.py index e8fee653a..296cd6252 100644 --- a/omnigibson/utils/object_utils.py +++ b/omnigibson/utils/object_utils.py @@ -28,17 +28,17 @@ def sample_stable_orientations(obj, n_samples=10, drop_aabb_offset=0.1): aabb_extent = obj.aabb_extent radius = th.norm(aabb_extent) / 2.0 drop_pos = th.tensor([0, 0, radius + drop_aabb_offset]) - center_offset = obj.get_position() - obj.aabb_center + center_offset = obj.get_position_orientation()[0] - obj.aabb_center drop_orientations = T.random_quaternion(n_samples) stable_orientations = th.zeros_like(drop_orientations) for i, drop_orientation in enumerate(drop_orientations): # Sample orientation, drop, wait to stabilize, then record pos = drop_pos + T.quat2mat(drop_orientation) @ center_offset - obj.set_position_orientation(pos, drop_orientation) + obj.set_position_orientation(position=pos, orientation=drop_orientation) obj.keep_still() for j in range(25): og.sim.step() - stable_orientations[i] = obj.get_orientation() + stable_orientations[i] = obj.get_position_orientation()[1] return stable_orientations @@ -56,8 +56,8 @@ def compute_bbox_offset(obj): """ og.sim.stop() assert th.all(obj.scale == 1.0) - obj.set_position_orientation(th.zeros(3), th.tensor([0, 0, 0, 1.0])) - return obj.aabb_center - obj.get_position() + obj.set_position_orientation(position=th.zeros(3), orientation=th.tensor([0, 0, 0, 1.0])) + return obj.aabb_center - obj.get_position_orientation()[0] def compute_native_bbox_extent(obj): @@ -73,7 +73,7 @@ def compute_native_bbox_extent(obj): """ og.sim.stop() assert th.all(obj.scale == 1.0) - obj.set_position_orientation(th.zeros(3), th.tensor([0, 0, 0, 1.0])) + obj.set_position_orientation(position=th.zeros(3), orientation=th.tensor([0, 0, 0, 1.0])) return obj.aabb_extent @@ -85,7 +85,7 @@ def compute_base_aligned_bboxes(obj): pts_in_link_frame = [] for mesh_name, mesh in mesh_list.items(): pts = mesh.get_attribute("points") - local_pos, local_orn = mesh.get_local_pose() + local_pos, local_orn = mesh.get_position_orientation(frame="parent") pts_in_link_frame.append(get_particle_positions_from_frame(local_pos, local_orn, mesh.scale, pts)) pts_in_link_frame = th.cat(pts_in_link_frame, dim=0) max_pt = th.max(pts_in_link_frame, dim=0).values diff --git a/omnigibson/utils/sim_utils.py b/omnigibson/utils/sim_utils.py index fda089f3c..1a0265c56 100644 --- a/omnigibson/utils/sim_utils.py +++ b/omnigibson/utils/sim_utils.py @@ -306,9 +306,11 @@ def place_base_pose(obj, pos, quat=None, z_offset=None): from omnigibson.object_states import AABB lower, _ = obj.states[AABB].get_value() - cur_pos = obj.get_position() + cur_pos = obj.get_position_orientation()[0] z_diff = cur_pos[2] - lower[2] - obj.set_position_orientation(pos + th.tensor([0, 0, z_diff if z_offset is None else z_diff + z_offset]), quat) + obj.set_position_orientation( + position=pos + th.tensor([0, 0, z_diff if z_offset is None else z_diff + z_offset]), orientation=quat + ) def test_valid_pose(obj, pos, quat=None, z_offset=None): diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 1835fed22..9a48eec31 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -95,7 +95,7 @@ def get_action(self, robot_obs: TeleopObservation) -> th.Tensor: rel_target_orn = T.quat_multiply(delta_orn, robot_obs[arm_name][3:7]) base_pos, base_orn = self.robot.get_position_orientation() target_pos, target_orn = T.pose_transform(base_pos, base_orn, rel_target_pos, rel_target_orn) - self.control_markers[arm_name].set_position_orientation(target_pos, target_orn) + self.control_markers[arm_name].set_position_orientation(position=target_pos, orientation=target_orn) return self.robot.teleop_data_to_action(self.teleop_action) def reset(self) -> None: @@ -336,8 +336,10 @@ def reset_transform_mapping(self, arm: str = "right") -> None: Args: arm(str): name of the arm, one of "left" or "right". Default is "right". """ - robot_base_orn = self.robot.get_orientation() - robot_eef_pos = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position() + robot_base_orn = self.robot.get_position_orientation()[1] + robot_eef_pos = self.robot.eef_links[ + self.robot.arm_names[self.robot_arms.index(arm)] + ].get_position_orientation()[0] target_transform = self.og2xr(pos=robot_eef_pos, orn=robot_base_orn) self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( target_transform, self.controllers[arm] diff --git a/omnigibson/utils/ui_utils.py b/omnigibson/utils/ui_utils.py index d8b0ccf47..4be556d63 100644 --- a/omnigibson/utils/ui_utils.py +++ b/omnigibson/utils/ui_utils.py @@ -559,9 +559,10 @@ def _sub_keyboard_event(self, event, *args, **kwargs): if command is not None: # Convert to world frame to move the camera - transform = T.quat2mat(self.cam.get_orientation()) + pos, orn = self.cam.get_position_orientation() + transform = T.quat2mat(orn) delta_pos_global = transform @ command - self.cam.set_position(self.cam.get_position() + delta_pos_global) + self.cam.set_position_orientation(position=pos + delta_pos_global) return True diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 086470400..37dea68ab 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -633,10 +633,10 @@ def sync_raw_object_transforms_in_usd(cls, prim): from omnigibson.prims.xform_prim import XFormPrim # 1. For every link, update its xformOp properties based on the delta_tf between object frame and link frame - obj_pos, obj_quat = XFormPrim.get_local_pose(prim) + obj_pos, obj_quat = XFormPrim.get_position_orientation(prim, frame="parent") for link in prim.links.values(): rel_pos, rel_quat = T.relative_pose_transform(*link.get_position_orientation(), obj_pos, obj_quat) - XFormPrim.set_local_pose(link, rel_pos, rel_quat) + XFormPrim.set_position_orientation(link, position=rel_pos, orientation=rel_quat, frame="parent") # 2. For every joint, update its linear / angular joint state if prim.n_joints > 0: joints_pos = prim.get_joint_positions() @@ -680,7 +680,9 @@ def reset_raw_object_transforms_in_usd(cls, prim): # 1. For every link, update its xformOp properties to be 0 for link in prim.links.values(): - XFormPrim.set_local_pose(link, th.zeros(3), th.tensor([0, 0, 0, 1.0])) + XFormPrim.set_position_orientation( + link, position=th.zeros(3), orientation=th.tensor([0, 0, 0, 1.0]), frame="parent" + ) # 2. For every joint, update its linear / angular joint state to be 0 if prim.n_joints > 0: for joint in prim.joints.values(): @@ -745,6 +747,15 @@ def _refresh(cls): @classmethod def get_world_pose(cls, prim_path): + """ + Gets pose of the prim object with respect to the world frame + Args: + Prim_path: the path of the prim object + Returns: + 2-tuple: + - torch.Tensor: (x,y,z) position in the world frame + - torch.Tensor: (x,y,z,w) quaternion orientation in the world frame + """ # Add to stored prims if not already existing if prim_path not in cls.PRIMS: cls.PRIMS[prim_path] = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path=prim_path, fabric=True) @@ -773,6 +784,25 @@ def get_world_pose_with_scale(cls, prim_path): return th.tensor(_get_world_pose_transform_w_scale(cls.PRIMS[prim_path]), dtype=th.float32).T + @classmethod + def convert_world_pose_to_local(cls, prim, position, orientation): + """Converts a world pose to a local pose under a prim's parent.""" + world_transform = T.pose2mat((position, orientation)) + parent_path = str(lazy.omni.isaac.core.utils.prims.get_prim_parent(prim).GetPath()) + parent_world_transform = cls.get_world_pose_with_scale(parent_path) + + local_transform = th.linalg.inv_ex(parent_world_transform).inverse @ world_transform + local_transform[:3, :3] /= th.linalg.norm(local_transform[:3, :3], dim=0) # unscale local transform's rotation + + # Check that the local transform consists only of a position, scale and rotation + product = local_transform[:3, :3] @ local_transform[:3, :3].T + assert th.allclose( + product, th.diag(th.diag(product)), atol=1e-3 + ), f"{prim.GetPath()} local transform is not orthogonal." + + # Return the local pose + return T.mat2pose(local_transform) + class BatchControlViewAPIImpl: """ diff --git a/setup.py b/setup.py index f69a32992..82eae318b 100644 --- a/setup.py +++ b/setup.py @@ -22,8 +22,8 @@ packages=find_packages(), install_requires=[ "gymnasium>=0.28.1", - "numpy~=1.23.5", - "scipy~=1.10.1", + "numpy>=1.23.5", + "scipy>=1.10.1", "GitPython~=3.1.40", "transforms3d~=0.4.1", "networkx~=3.2.1", @@ -35,7 +35,7 @@ "h5py~=3.10.0", "cryptography~=41.0.7", "bddl~=3.5.0", - "opencv-python~=4.8.1", + "opencv-python>=4.8.1", "nest_asyncio~=1.5.8", "imageio~=2.33.1", "imageio-ffmpeg~=0.4.9", @@ -46,8 +46,7 @@ "aenum~=3.1.15", "rtree~=1.2.0", "graphviz~=0.20", - "numba~=0.60.0", - "torchvision~=0.18.1", + "numba>=0.60.0", ], extras_require={ "isaac": ["isaacsim-for-omnigibson>=4.1.0"], diff --git a/tests/benchmark/benchmark_interactive_scene.py b/tests/benchmark/benchmark_interactive_scene.py index b1f2103ff..5f887f10b 100644 --- a/tests/benchmark/benchmark_interactive_scene.py +++ b/tests/benchmark/benchmark_interactive_scene.py @@ -42,7 +42,7 @@ def benchmark_scene(scene_name, non_rigid_simulation=False, import_robot=True): og.sim.import_scene(scene) if gm.RENDER_VIEWER_CAMERA: - og.sim.viewer_camera.set_position_orientation([0, 0, 0.2], [0.5, -0.5, -0.5, 0.5]) + og.sim.viewer_camera.set_position_orientation(position=[0, 0, 0.2], orientation=[0.5, -0.5, -0.5, 0.5]) print(time.time() - start) if import_robot: @@ -69,7 +69,7 @@ def benchmark_scene(scene_name, non_rigid_simulation=False, import_robot=True): og.sim.play() if non_rigid_simulation: - cloth.set_position([1, 0, 1]) + cloth.set_position_orientation(position=[1, 0, 1]) og.sim.step() fps = [] physics_fps = [] diff --git a/tests/benchmark/benchmark_object_count.py b/tests/benchmark/benchmark_object_count.py index 960a41927..041148944 100644 --- a/tests/benchmark/benchmark_object_count.py +++ b/tests/benchmark/benchmark_object_count.py @@ -65,7 +65,7 @@ def benchmark_scene(sim): # x, y, z = _get_position(obj_idx, RAND_POSITION) x, y = 0, 0 z = 0.5 + j * OBJ_SCALE * 2.25 - obj.set_position(position=th.tensor([x, y, z])) + obj.set_position_orientation(position=th.tensor([x, y, z])) new_objs.append(obj) # Take a step to initialize the new objects (done in _non_physics_step()). diff --git a/tests/benchmark/profiling.py b/tests/benchmark/profiling.py index 24ed6dc8a..eb449d9f2 100644 --- a/tests/benchmark/profiling.py +++ b/tests/benchmark/profiling.py @@ -143,7 +143,7 @@ def main(): for n, knife in enumerate(knifes): knife.set_position_orientation( - position=apples[n].get_position() + th.tensor([-0.15, 0.0, 0.1 * (n + 2)]), + position=apples[n].get_position_orientation()[0] + th.tensor([-0.15, 0.0, 0.1 * (n + 2)]), orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)), ) knife.keep_still() @@ -153,7 +153,9 @@ def main(): output, results = [], [] # Update the simulator's viewer camera's pose so it points towards the robot - og.sim.viewer_camera.set_position([SCENE_OFFSET[args.scene][0], -3 + SCENE_OFFSET[args.scene][1], 1]) + og.sim.viewer_camera.set_position_orientation( + position=[SCENE_OFFSET[args.scene][0], -3 + SCENE_OFFSET[args.scene][1], 1] + ) # record total load time total_load_time = time.time() - load_start diff --git a/tests/test_controllers.py b/tests/test_controllers.py index 07142882c..ec7ff9732 100644 --- a/tests/test_controllers.py +++ b/tests/test_controllers.py @@ -168,7 +168,7 @@ def check_ori_error(curr_orientation, init_orientation, tol=0.1): for i, robot in enumerate(env.robots): controller_config = {f"arm_{arm}": {"name": controller, **controller_kwargs} for arm in robot.arm_names} robot.set_position_orientation( - th.tensor([0.0, i * 5.0, 0.0]), T.euler2quat(th.tensor([0.0, 0.0, np.pi / 3])) + position=th.tensor([0.0, i * 5.0, 0.0]), orientation=T.euler2quat(th.tensor([0.0, 0.0, np.pi / 3])) ) robot.reset() robot.keep_still() diff --git a/tests/test_multiple_envs.py b/tests/test_multiple_envs.py index 2ff76d009..3a9740218 100644 --- a/tests/test_multiple_envs.py +++ b/tests/test_multiple_envs.py @@ -1,12 +1,16 @@ +import pytest import torch as th import omnigibson as og +import omnigibson.lazy as lazy +import omnigibson.utils.transform_utils as T from omnigibson import object_states from omnigibson.macros import gm from omnigibson.utils.constants import ParticleModifyCondition +from omnigibson.utils.transform_utils import quat_multiply -def setup_multi_environment(num_of_envs, additional_objects_cfg=[]): +def setup_multi_environment(num_of_envs, robot="Fetch", additional_objects_cfg=[]): cfg = { "scene": { "type": "InteractiveTraversableScene", @@ -15,7 +19,7 @@ def setup_multi_environment(num_of_envs, additional_objects_cfg=[]): }, "robots": [ { - "type": "Fetch", + "type": robot, "obs_modalities": [], } ], @@ -38,49 +42,142 @@ def setup_multi_environment(num_of_envs, additional_objects_cfg=[]): return vec_env -def test_multi_scene_dump_and_load(): +def test_multi_scene_dump_load_states(): vec_env = setup_multi_environment(3) - robot_displacement = th.tensor([1.0, 0.0, 0.0], dtype=th.float32) - scene_three_robot = vec_env.envs[2].scene.robots[0] - robot_new_pos = scene_three_robot.get_position() + robot_displacement - scene_three_robot.set_position(robot_new_pos) - scene_three_state = vec_env.envs[2].scene._dump_state() + robot_0 = vec_env.envs[0].scene.robots[0] + robot_1 = vec_env.envs[1].scene.robots[0] + robot_2 = vec_env.envs[2].scene.robots[0] + + robot_0_pos = robot_0.get_position_orientation()[0] + robot_1_pos = robot_1.get_position_orientation()[0] + robot_2_pos = robot_2.get_position_orientation()[0] + + dist_0_1 = robot_1_pos - robot_0_pos + dist_1_2 = robot_2_pos - robot_1_pos + + assert th.allclose(dist_0_1, dist_1_2, atol=1e-3) + + # Set different poses for the cube in each environment + pose_1 = (th.tensor([1, 1, 1], dtype=th.float32), th.tensor([0, 0, 0, 1], dtype=th.float32)) + pose_2 = (th.tensor([0, 2, 1], dtype=th.float32), th.tensor([0, 0, 0.7071, 0.7071], dtype=th.float32)) + pose_3 = (th.tensor([-1, -1, 0.5], dtype=th.float32), th.tensor([0.5, 0.5, 0.5, 0.5], dtype=th.float32)) + + robot_0.set_position_orientation(*pose_1, frame="scene") + robot_1.set_position_orientation(*pose_2, frame="scene") + robot_2.set_position_orientation(*pose_3, frame="scene") + + # Run simulation for a bit + for _ in range(10): + og.sim.step() + + initial_robot_pos_scene_1 = robot_1.get_position_orientation(frame="scene") + initial_robot_pos_scene_2 = robot_2.get_position_orientation(frame="scene") + initial_robot_pos_scene_0 = robot_0.get_position_orientation(frame="scene") + + # Save states + robot_0_state = vec_env.envs[0].scene._dump_state() + robot_1_state = vec_env.envs[1].scene._dump_state() + robot_2_state = vec_env.envs[2].scene._dump_state() og.clear() + # recreate the environments vec_env = setup_multi_environment(3) - initial_robot_pos_scene_one = vec_env.envs[0].scene.robots[0].get_position() - vec_env.envs[0].scene._load_state(scene_three_state) - new_robot_pos_scene_one = vec_env.envs[0].scene.robots[0].get_position() - assert th.allclose(new_robot_pos_scene_one - initial_robot_pos_scene_one, robot_displacement, atol=1e-3) + + # Load the states in a different order + vec_env.envs[1].scene._load_state(robot_1_state) + vec_env.envs[2].scene._load_state(robot_2_state) + vec_env.envs[0].scene._load_state(robot_0_state) + + post_robot_pos_scene_1 = vec_env.envs[1].scene.robots[0].get_position_orientation(frame="scene") + post_robot_pos_scene_2 = vec_env.envs[2].scene.robots[0].get_position_orientation(frame="scene") + post_robot_pos_scene_0 = vec_env.envs[0].scene.robots[0].get_position_orientation(frame="scene") + + # Check that the poses are the same + assert th.allclose(initial_robot_pos_scene_0[0], post_robot_pos_scene_0[0], atol=1e-3) + assert th.allclose(initial_robot_pos_scene_1[0], post_robot_pos_scene_1[0], atol=1e-3) + assert th.allclose(initial_robot_pos_scene_2[0], post_robot_pos_scene_2[0], atol=1e-3) + + assert th.allclose(initial_robot_pos_scene_0[1], post_robot_pos_scene_0[1], atol=1e-3) + assert th.allclose(initial_robot_pos_scene_1[1], post_robot_pos_scene_1[1], atol=1e-3) + assert th.allclose(initial_robot_pos_scene_2[1], post_robot_pos_scene_2[1], atol=1e-3) og.clear() -def test_multi_scene_displacement(): +def test_multi_scene_get_local_position(): vec_env = setup_multi_environment(3) - robot_0_pos = vec_env.envs[0].scene.robots[0].get_position() - robot_1_pos = vec_env.envs[1].scene.robots[0].get_position() - robot_2_pos = vec_env.envs[2].scene.robots[0].get_position() - dist_0_1 = robot_1_pos - robot_0_pos - dist_1_2 = robot_2_pos - robot_1_pos - assert th.allclose(dist_0_1, dist_1_2, atol=1e-3) + robot_1_pos_local = vec_env.envs[1].scene.robots[0].get_position_orientation(frame="scene")[0] + robot_1_pos_global = vec_env.envs[1].scene.robots[0].get_position_orientation()[0] + + pos_scene = vec_env.envs[1].scene.get_position_orientation()[0] + + assert th.allclose(robot_1_pos_global, pos_scene + robot_1_pos_local, atol=1e-3) + og.clear() + + +def test_multi_scene_set_local_position(): + vec_env = setup_multi_environment(3) + + # Get the robot from the second environment + robot = vec_env.envs[1].scene.robots[0] + + # Get the initial global position of the robot + initial_global_pos = robot.get_position_orientation()[0] + + # Define a new global position + new_global_pos = initial_global_pos + th.tensor([1.0, 0.5, 0.0], dtype=th.float32) + + # Set the new global position + robot.set_position_orientation(position=new_global_pos) + + # Get the updated global position + updated_global_pos = robot.get_position_orientation()[0] + + # Get the scene's global position + scene_pos = vec_env.envs[1].scene.get_position_orientation()[0] + + # Get the updated local position + updated_local_pos = robot.get_position_orientation(frame="scene")[0] + + # Calculate expected local position + expected_local_pos = new_global_pos - scene_pos + + # Assert that the global position has been updated correctly + assert th.allclose( + updated_global_pos, new_global_pos, atol=1e-3 + ), f"Updated global position {updated_global_pos} does not match expected {new_global_pos}" + + # Assert that the local position has been updated correctly + assert th.allclose( + updated_local_pos, expected_local_pos, atol=1e-3 + ), f"Updated local position {updated_local_pos} does not match expected {expected_local_pos}" + + # Assert that the change in global position is correct + global_pos_change = updated_global_pos - initial_global_pos + expected_change = th.tensor([1.0, 0.5, 0.0], dtype=th.float32) + assert th.allclose( + global_pos_change, expected_change, atol=1e-3 + ), f"Global position change {global_pos_change} does not match expected change {expected_change}" + og.clear() def test_multi_scene_scene_prim(): vec_env = setup_multi_environment(1) - original_robot_pos = vec_env.envs[0].scene.robots[0].get_position() + original_robot_pos = vec_env.envs[0].scene.robots[0].get_position_orientation()[0] scene_state = vec_env.envs[0].scene._dump_state() scene_prim_displacement = th.tensor([10.0, 0.0, 0.0], dtype=th.float32) - original_scene_prim_pos = vec_env.envs[0].scene._scene_prim.get_position() + original_scene_prim_pos = vec_env.envs[0].scene._scene_prim.get_position_orientation()[0] vec_env.envs[0].scene.set_position_orientation(position=original_scene_prim_pos + scene_prim_displacement) vec_env.envs[0].scene._load_state(scene_state) - new_scene_prim_pos = vec_env.envs[0].scene._scene_prim.get_position() - new_robot_pos = vec_env.envs[0].scene.robots[0].get_position() + new_scene_prim_pos = vec_env.envs[0].scene._scene_prim.get_position_orientation()[0] + new_robot_pos = vec_env.envs[0].scene.robots[0].get_position_orientation()[0] assert th.allclose(new_scene_prim_pos - original_scene_prim_pos, scene_prim_displacement, atol=1e-3) assert th.allclose(new_robot_pos - original_robot_pos, scene_prim_displacement, atol=1e-3) + og.clear() + def test_multi_scene_particle_source(): sink_cfg = dict( @@ -116,3 +213,278 @@ def test_multi_scene_particle_source(): for _ in range(50): og.sim.step() + + og.clear() + + +def test_multi_scene_position_orientation_relative_to_scene(): + vec_env = setup_multi_environment(3) + + # Get the robot from the second environment + robot = vec_env.envs[1].scene.robots[0] + + # Define a new position and orientation relative to the scene + new_relative_pos = th.tensor([1.0, 2.0, 0.5]) + new_relative_ori = th.tensor([0, 0, 0.7071, 0.7071]) # 90 degrees rotation around z-axis + + # Set the new position and orientation relative to the scene + robot.set_position_orientation(position=new_relative_pos, orientation=new_relative_ori, frame="scene") + + # Get the updated position and orientation relative to the scene + updated_relative_pos, updated_relative_ori = robot.get_position_orientation(frame="scene") + + # Assert that the relative position has been updated correctly + assert th.allclose( + updated_relative_pos, new_relative_pos, atol=1e-3 + ), f"Updated relative position {updated_relative_pos} does not match expected {new_relative_pos}" + + # Assert that the relative orientation has been updated correctly + assert th.allclose( + updated_relative_ori, new_relative_ori, atol=1e-3 + ), f"Updated relative orientation {updated_relative_ori} does not match expected {new_relative_ori}" + + # Get the scene's global position and orientation + scene_pos, scene_ori = vec_env.envs[1].scene.get_position_orientation() + + # Get the robot's global position and orientation + global_pos, global_ori = robot.get_position_orientation() + + # Calculate expected global position + expected_global_pos = scene_pos + updated_relative_pos + + # Assert that the global position is correct + assert th.allclose( + global_pos, expected_global_pos, atol=1e-3 + ), f"Global position {global_pos} does not match expected {expected_global_pos}" + + # Calculate expected global orientation + expected_global_ori = quat_multiply(scene_ori, new_relative_ori) + + # Assert that the global orientation is correct + assert th.allclose( + global_ori, expected_global_ori, atol=1e-3 + ), f"Global orientation {global_ori} does not match expected {expected_global_ori}" + + og.clear() + + +def test_tiago_getter(): + vec_env = setup_multi_environment(2, robot="Tiago") + robot1 = vec_env.envs[0].scene.robots[0] + + robot1_world_position, robot1_world_orientation = robot1.get_position_orientation() + robot1_scene_position, robot1_scene_orientation = robot1.get_position_orientation(frame="scene") + + # Test the get_position_orientation method for 3 different frames + # since the robot is at the origin, the position and orientation should be the same + assert th.allclose(robot1_world_position, robot1_scene_position, atol=1e-3) + assert th.allclose(robot1_world_orientation, robot1_scene_orientation, atol=1e-3) + + # test if the scene position is non-zero, the getter with scene and world frame should return different values + robot2 = vec_env.envs[1].scene.robots[0] + scene_position, scene_orientation = vec_env.envs[1].scene.get_position_orientation() + + robot2_world_position, robot2_world_orientation = robot2.get_position_orientation() + robot2_scene_position, robot2_scene_orientation = robot2.get_position_orientation(frame="scene") + + combined_position, combined_orientation = T.pose_transform( + scene_position, scene_orientation, robot2_scene_position, robot2_scene_orientation + ) + assert th.allclose(robot2_world_position, combined_position, atol=1e-3) + assert th.allclose(robot2_world_orientation, combined_orientation, atol=1e-3) + + # Clean up + og.clear() + + +def test_tiago_setter(): + vec_env = setup_multi_environment(2, robot="Tiago") + + # use a robot with non-zero scene position + robot = vec_env.envs[1].scene.robots[0] + + # Test setting position and orientation in world frame + new_world_pos = th.tensor([1.0, 2.0, 0.5]) + new_world_ori = T.euler2quat(th.tensor([0, 0, th.pi / 2])) + robot.set_position_orientation(position=new_world_pos, orientation=new_world_ori) + + got_world_pos, got_world_ori = robot.get_position_orientation() + assert th.allclose(got_world_pos, new_world_pos, atol=1e-3) + assert th.allclose(got_world_ori, new_world_ori, atol=1e-3) + + # Test setting position and orientation in scene frame + new_scene_pos = th.tensor([0.5, 1.0, 0.25]) + new_scene_ori = T.euler2quat(th.tensor([0, th.pi / 4, 0])) + robot.set_position_orientation(position=new_scene_pos, orientation=new_scene_ori, frame="scene") + + got_scene_pos, got_scene_ori = robot.get_position_orientation(frame="scene") + assert th.allclose(got_scene_pos, new_scene_pos, atol=1e-3) + assert th.allclose(got_scene_ori, new_scene_ori, atol=1e-3) + + # Test setting position and orientation in scene frame + new_scene_pos = th.tensor([-1.0, -2.0, 0.1]) + new_scene_ori = T.euler2quat(th.tensor([th.pi / 6, 0, 0])) + robot.set_position_orientation(position=new_scene_pos, orientation=new_scene_ori, frame="scene") + + # Verify that world frame position/orientation has changed after setting in scene frame + got_world_pos, got_world_ori = robot.get_position_orientation() + assert not th.allclose(got_world_pos, new_world_pos, atol=1e-3) + assert not th.allclose(got_world_ori, new_world_ori, atol=1e-3) + + # Clean up + og.clear() + + # assert that when the simulator is stopped, the behavior for getter/setter is not affected + vec_env = setup_multi_environment(2) + og.sim.stop() + + # use a robot with non-zero scene position + robot = vec_env.envs[1].scene.robots[0] + + # Test setting position and orientation in world frame + new_world_pos = th.tensor([1.0, 2.0, 0.5]) + new_world_ori = T.euler2quat(th.tensor([0, 0, th.pi / 2])) + robot.set_position_orientation(position=new_world_pos, orientation=new_world_ori) + + got_world_pos, got_world_ori = robot.get_position_orientation() + assert th.allclose(got_world_pos, new_world_pos, atol=1e-3) + assert th.allclose(got_world_ori, new_world_ori, atol=1e-3) + + # Test setting position and orientation in scene frame + new_scene_pos = th.tensor([0.5, 1.0, 0.25]) + new_scene_ori = T.euler2quat(th.tensor([0, th.pi / 4, 0])) + robot.set_position_orientation(position=new_scene_pos, orientation=new_scene_ori, frame="scene") + + got_scene_pos, got_scene_ori = robot.get_position_orientation(frame="scene") + assert th.allclose(got_scene_pos, new_scene_pos, atol=1e-3) + assert th.allclose(got_scene_ori, new_scene_ori, atol=1e-3) + + # Test setting position and orientation in scene frame + new_scene_pos = th.tensor([-1.0, -2.0, 0.1]) + new_scene_ori = T.euler2quat(th.tensor([th.pi / 6, 0, 0])) + robot.set_position_orientation(position=new_scene_pos, orientation=new_scene_ori, frame="scene") + + got_scene_pos, got_scene_ori = robot.get_position_orientation(frame="scene") + assert th.allclose(got_scene_pos, new_scene_pos, atol=1e-3) + assert th.allclose(got_scene_ori, new_scene_ori, atol=1e-3) + + # Verify that world frame position/orientation has changed after setting in scene frame + got_world_pos, got_world_ori = robot.get_position_orientation() + assert not th.allclose(got_world_pos, new_world_pos, atol=1e-3) + assert not th.allclose(got_world_ori, new_world_ori, atol=1e-3) + + og.clear() + + +@pytest.mark.skip("Behavior getter is currently broken") +def test_behavior_getter(): + vec_env = setup_multi_environment(2, robot="BehaviorRobot") + robot1 = vec_env.envs[0].scene.robots[0] + + robot1_world_position, robot1_world_orientation = robot1.get_position_orientation() + robot1_scene_position, robot1_scene_orientation = robot1.get_position_orientation(frame="scene") + + # Test the get_position_orientation method for 3 different frames + # since the robot is at the origin, the position and orientation should be the same + assert th.allclose(robot1_world_position, robot1_scene_position, atol=1e-3) + assert th.allclose(robot1_world_position, robot1_scene_position, atol=1e-3) + assert th.allclose(robot1_world_orientation, robot1_scene_orientation, atol=1e-3) + assert th.allclose(robot1_world_orientation, robot1_scene_orientation, atol=1e-3) + + # test if the scene position is non-zero, the getter with scene and world frame should return different values + robot2 = vec_env.envs[1].scene.robots[0] + scene_position, scene_orientation = vec_env.envs[1].scene.get_position_orientation() + robot2_world_position, robot2_world_orientation = robot2.get_position_orientation() + robot2_scene_position, robot2_scene_orientation = robot2.get_position_orientation(frame="scene") + + combined_position, combined_orientation = T.pose_transform( + scene_position, scene_orientation, robot2_scene_position, robot2_scene_orientation + ) + assert th.allclose(robot2_world_position, combined_position, atol=1e-3) + assert th.allclose(robot2_world_orientation, combined_orientation, atol=1e-3) + + # Clean up + og.clear() + + +@pytest.mark.skip("Behavior setter is currently broken") +def test_behavior_setter(): + vec_env = setup_multi_environment(2, robot="BehaviorRobot") + + # use a robot with non-zero scene position + robot = vec_env.envs[1].scene.robots[0] + + # Test setting position and orientation in world frame + new_world_pos = th.tensor([1.0, 2.0, 0.5]) + new_world_ori = T.euler2quat(th.tensor([0, 0, th.pi / 2])) + + robot.set_position_orientation(position=new_world_pos, orientation=new_world_ori) + + got_world_pos, got_world_ori = robot.get_position_orientation() + assert th.allclose(got_world_pos, new_world_pos, atol=1e-3) + assert th.allclose(got_world_ori, new_world_ori, atol=1e-3) + + # Test setting position and orientation in scene frame + new_scene_pos = th.tensor([0.5, 1.0, 0.25]) + new_scene_ori = T.euler2quat(th.tensor([0, th.pi / 4, 0])) + robot.set_position_orientation(position=new_scene_pos, orientation=new_scene_ori, frame="scene") + + got_scene_pos, got_scene_ori = robot.get_position_orientation(frame="scene") + assert th.allclose(got_scene_pos, new_scene_pos, atol=1e-3) + assert th.allclose(got_scene_ori, new_scene_ori, atol=1e-3) + + # Test setting position and orientation in scene frame + new_scene_pos = th.tensor([-1.0, -2.0, 0.1]) + new_scene_ori = T.euler2quat(th.tensor([th.pi / 6, 0, 0])) + robot.set_position_orientation(position=new_scene_pos, orientation=new_scene_ori, frame="scene") + + got_scene_pos, got_scene_ori = robot.get_position_orientation(frame="scene") + assert th.allclose(got_scene_pos, new_scene_pos, atol=1e-3) + assert th.allclose(got_scene_ori, new_scene_ori, atol=1e-3) + + # Verify that world frame position/orientation has changed after setting in scene frame + got_world_pos, got_world_ori = robot.get_position_orientation() + assert not th.allclose(got_world_pos, new_world_pos, atol=1e-3) + assert not th.allclose(got_world_ori, new_world_ori, atol=1e-3) + + # Clean up + og.clear() + + # assert that when the simulator is stopped, the behavior for getter/setter is not affected + vec_env = setup_multi_environment(2) + og.sim.stop() + + # use a robot with non-zero scene position + robot = vec_env.envs[1].scene.robots[0] + + # Test setting position and orientation in world frame + new_world_pos = th.tensor([1.0, 2.0, 0.5]) + new_world_ori = T.euler2quat(th.tensor([0, 0, th.pi / 2])) + robot.set_position_orientation(position=new_world_pos, orientation=new_world_ori) + + got_world_pos, got_world_ori = robot.get_position_orientation() + assert th.allclose(got_world_pos, new_world_pos, atol=1e-3) + assert th.allclose(got_world_ori, new_world_ori, atol=1e-3) + + # Test setting position and orientation in scene frame + new_scene_pos = th.tensor([0.5, 1.0, 0.25]) + new_scene_ori = T.euler2quat(th.tensor([0, th.pi / 4, 0])) + robot.set_position_orientation(position=new_scene_pos, orientation=new_scene_ori, frame="scene") + + got_scene_pos, got_scene_ori = robot.get_position_orientation(frame="scene") + assert th.allclose(got_scene_pos, new_scene_pos, atol=1e-3) + assert th.allclose(got_scene_ori, new_scene_ori, atol=1e-3) + + # Test setting position and orientation in scene frame + new_scene_pos = th.tensor([-1.0, -2.0, 0.1]) + new_scene_ori = T.euler2quat(th.tensor([th.pi / 6, 0, 0])) + robot.set_position_orientation(position=new_scene_pos, orientation=new_scene_ori, frame="scene") + + got_scene_pos, got_scene_ori = robot.get_position_orientation(frame="scene") + assert th.allclose(got_scene_pos, new_scene_pos, atol=1e-3) + assert th.allclose(got_scene_ori, new_scene_ori, atol=1e-3) + + # Verify that world frame position/orientation has changed after setting in scene frame + got_world_pos, got_world_ori = robot.get_position_orientation() + assert not th.allclose(got_world_pos, new_world_pos, atol=1e-3) + assert not th.allclose(got_world_ori, new_world_ori, atol=1e-3) diff --git a/tests/test_object_states.py b/tests/test_object_states.py index b50293a71..57f6f204d 100644 --- a/tests/test_object_states.py +++ b/tests/test_object_states.py @@ -27,7 +27,7 @@ def test_on_top(env): assert obj.states[OnTop].get_value(breakfast_table) - obj.set_position(th.ones(3) * 10 * (i + 1)) + obj.set_position_orientation(position=th.ones(3) * 10 * (i + 1)) og.sim.step() assert not obj.states[OnTop].get_value(breakfast_table) @@ -46,8 +46,8 @@ def test_inside(env): dishtowel = env.scene.object_registry("name", "dishtowel") place_obj_on_floor_plane(bottom_cabinet) - bowl.set_position([0.0, 0.0, 0.08]) - dishtowel.set_position([0, 0.0, 0.5]) + bowl.set_position_orientation(position=[0.0, 0.0, 0.08]) + dishtowel.set_position_orientation(position=[0, 0.0, 0.5]) for _ in range(5): og.sim.step() @@ -55,8 +55,8 @@ def test_inside(env): assert bowl.states[Inside].get_value(bottom_cabinet) assert dishtowel.states[Inside].get_value(bottom_cabinet) - bowl.set_position([10.0, 10.0, 1.0]) - dishtowel.set_position([20.0, 20.0, 1.0]) + bowl.set_position_orientation(position=[10.0, 10.0, 1.0]) + dishtowel.set_position_orientation(position=[20.0, 20.0, 1.0]) for _ in range(5): og.sim.step() @@ -85,7 +85,7 @@ def test_under(env): assert obj.states[Under].get_value(breakfast_table) - obj.set_position(th.ones(3) * 10 * (i + 1)) + obj.set_position_orientation(position=th.ones(3) * 10 * (i + 1)) og.sim.step() assert not obj.states[Under].get_value(breakfast_table) @@ -112,7 +112,7 @@ def test_touching(env): assert obj.states[Touching].get_value(breakfast_table) assert breakfast_table.states[Touching].get_value(obj) - obj.set_position(th.ones(3) * 10 * (i + 1)) + obj.set_position_orientation(position=th.ones(3) * 10 * (i + 1)) og.sim.step() assert not obj.states[Touching].get_value(breakfast_table) @@ -139,7 +139,7 @@ def test_contact_bodies(env): assert obj.root_link in breakfast_table.states[ContactBodies].get_value() assert breakfast_table.root_link in obj.states[ContactBodies].get_value() - obj.set_position(th.ones(3) * 10 * (i + 1)) + obj.set_position_orientation(position=th.ones(3) * 10 * (i + 1)) og.sim.step() assert obj.root_link not in breakfast_table.states[ContactBodies].get_value() @@ -164,7 +164,7 @@ def test_next_to(env): assert obj.states[NextTo].get_value(bottom_cabinet) assert bottom_cabinet.states[NextTo].get_value(obj) - obj.set_position(th.ones(3) * 10 * (i + 1)) + obj.set_position_orientation(position=th.ones(3) * 10 * (i + 1)) og.sim.step() assert not obj.states[NextTo].get_value(bottom_cabinet) @@ -187,7 +187,7 @@ def test_overlaid(env): assert carpet.states[Overlaid].get_value(breakfast_table) - carpet.set_position(th.ones(3) * 20.0) + carpet.set_position_orientation(position=th.ones(3) * 20.0) og.sim.step() assert not carpet.states[Overlaid].get_value(breakfast_table) @@ -204,10 +204,10 @@ def test_pose(env): dishtowel = env.scene.object_registry("name", "dishtowel") pos1, orn1 = get_random_pose() - breakfast_table.set_position_orientation(pos1, orn1) + breakfast_table.set_position_orientation(position=pos1, orientation=orn1) pos2, orn2 = get_random_pose() - dishtowel.set_position_orientation(pos2, orn2) + dishtowel.set_position_orientation(position=pos2, orientation=orn2) assert th.allclose(breakfast_table.states[Pose].get_value()[0], pos1) assert th.allclose(breakfast_table.states[Pose].get_value()[1], orn1) or th.allclose( @@ -228,10 +228,10 @@ def test_aabb(env): dishtowel = env.scene.object_registry("name", "dishtowel") pos1, orn1 = get_random_pose() - breakfast_table.set_position_orientation(pos1, orn1) + breakfast_table.set_position_orientation(position=pos1, orientation=orn1) pos2, orn2 = get_random_pose() - dishtowel.set_position_orientation(pos2, orn2) + dishtowel.set_position_orientation(position=pos2, orientation=orn2) # Need to take one sim step og.sim.step() @@ -274,8 +274,8 @@ def test_adjacency(env): ) ) - bowl.set_position([0.0, 0.0, 1.0]) - dishtowel.set_position([0.0, 0.0, 2.0]) + bowl.set_position_orientation(position=[0.0, 0.0, 1.0]) + dishtowel.set_position_orientation(position=[0.0, 0.0, 2.0]) # Need to take one sim step og.sim.step() @@ -323,8 +323,8 @@ def test_temperature(env): microwave.joints["j_link_0"].set_pos(math.pi / 2) # Set the objects to be inside the microwave - bagel.set_position_orientation([0, 0, 0.11], [0, 0, 0, 1]) - dishtowel.set_position_orientation([-0.15, 0, 0.11], [0, 0, 0, 1]) + bagel.set_position_orientation(position=[0, 0, 0.11], orientation=[0, 0, 0, 1]) + dishtowel.set_position_orientation(position=[-0.15, 0, 0.11], orientation=[0, 0, 0, 1]) for _ in range(5): og.sim.step() @@ -370,8 +370,8 @@ def test_temperature(env): assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE # Set the objects to be on top of the stove - bagel.set_position_orientation([0.71, 0.11, 0.88], [0, 0, 0, 1]) - dishtowel.set_position_orientation([0.84, 0.11, 0.88], [0, 0, 0, 1]) + bagel.set_position_orientation(position=[0.71, 0.11, 0.88], orientation=[0, 0, 0, 1]) + dishtowel.set_position_orientation(position=[0.84, 0.11, 0.88], orientation=[0, 0, 0, 1]) for _ in range(5): og.sim.step() @@ -394,8 +394,8 @@ def test_temperature(env): assert dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE) # Set the objects to be inside the fridge - bagel.set_position_orientation([1.9, 0, 0.89], [0, 0, 0, 1]) - dishtowel.set_position_orientation([2.1, 0, 0.89], [0, 0, 0, 1]) + bagel.set_position_orientation(position=[1.9, 0, 0.89], orientation=[0, 0, 0, 1]) + dishtowel.set_position_orientation(position=[2.1, 0, 0.89], orientation=[0, 0, 0, 1]) for _ in range(5): og.sim.step() @@ -650,7 +650,7 @@ def test_toggled_on(env): stove.set_position_orientation( [1.48, 0.3, 0.443], T.euler2quat(th.tensor([0, 0, -math.pi / 2.0], dtype=th.float32)) ) - robot.set_position_orientation([0.0, 0.38, 0.0], [0, 0, 0, 1]) + robot.set_position_orientation(position=[0.0, 0.38, 0.0], orientation=[0, 0, 0, 1]) assert not stove.states[ToggledOn].get_value() @@ -706,9 +706,9 @@ def test_attached_to(env): shelf_shelf = env.scene.object_registry("name", "shelf_shelf") shelf_baseboard = env.scene.object_registry("name", "shelf_baseboard") - shelf_back_panel.set_position_orientation([0, 0, 0.01], [0, 0, 0, 1]) + shelf_back_panel.set_position_orientation(position=[0, 0, 0.01], orientation=[0, 0, 0, 1]) shelf_back_panel.keep_still() - shelf_shelf.set_position_orientation([0, 0.03, 0.17], [0, 0, 0, 1]) + shelf_shelf.set_position_orientation(position=[0, 0.03, 0.17], orientation=[0, 0, 0, 1]) shelf_shelf.keep_still() # The shelf should not be attached to the back panel (no contact yet) @@ -736,17 +736,17 @@ def test_attached_to(env): force_dir = th.tensor([0, 0, 1]) # A small force will not break the attachment force_mag = 10 - apply_force_at_pos(shelf_shelf.root_link, force_dir * force_mag, shelf_shelf.get_position()) + apply_force_at_pos(shelf_shelf.root_link, force_dir * force_mag, shelf_shelf.get_position_orientation()[0]) og.sim.step() assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) # A large force will break the attachment force_mag = 1000 - apply_force_at_pos(shelf_shelf.root_link, force_dir * force_mag, shelf_shelf.get_position()) + apply_force_at_pos(shelf_shelf.root_link, force_dir * force_mag, shelf_shelf.get_position_orientation()[0]) og.sim.step() assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) - shelf_shelf.set_position_orientation([0, 0, 10], [0, 0, 0, 1]) + shelf_shelf.set_position_orientation(position=[0, 0, 10], orientation=[0, 0, 0, 1]) assert not shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True) # The shelf should not be attached to the back panel because the alignment is wrong assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) @@ -756,7 +756,7 @@ def test_attached_to(env): assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) # The shelf baseboard should NOT be attached because the attachment has the wrong type - shelf_baseboard.set_position_orientation([0.37, -0.93, 0.03], [0, 0, 0, 1]) + shelf_baseboard.set_position_orientation(position=[0.37, -0.93, 0.03], orientation=[0, 0, 0, 1]) assert not shelf_baseboard.states[AttachedTo].set_value(shelf_back_panel, True, bypass_alignment_checking=True) assert not shelf_baseboard.states[AttachedTo].get_value(shelf_back_panel) @@ -800,7 +800,7 @@ def test_particle_sink(env): # There should be no water particles. assert water_system.n_particles == 0 - sink_pos = sink.states[ParticleSink].link.get_position() + sink_pos = sink.states[ParticleSink].link.get_position_orientation()[0] water_system.generate_particles(positions=[(sink_pos + th.tensor([0, 0, 0.05])).tolist()]) # There should be exactly 1 water particle. assert water_system.n_particles == 1 @@ -918,7 +918,7 @@ def test_particle_remover(env): # Test adjacency - vacuum.set_position(th.ones(3) * 50.0) + vacuum.set_position_orientation(position=th.ones(3) * 50.0) place_objA_on_objB_bbox(remover_dishtowel, breakfast_table, z_offset=0.03) og.sim.step() # Place single particle of water on middle of table @@ -1091,7 +1091,7 @@ def test_draped(env): assert carpet.states[Draped].get_value(breakfast_table) - carpet.set_position([20.0, 20.0, 1.0]) + carpet.set_position_orientation(position=[20.0, 20.0, 1.0]) for _ in range(5): og.sim.step() diff --git a/tests/test_primitives.py b/tests/test_primitives.py index 55eaac812..7cbb26966 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -86,7 +86,7 @@ def execute_controller(ctrl_gen, env): def primitive_tester(env, objects, primitives, primitives_args): for obj in objects: env.scene.add_object(obj["object"]) - obj["object"].set_position_orientation(obj["position"], obj["orientation"]) + obj["object"].set_position_orientation(position=obj["position"], orientation=obj["orientation"]) og.sim.step() controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) diff --git a/tests/test_robot_states_flatcache.py b/tests/test_robot_states_flatcache.py index 1b770fd8d..8e7f7f606 100644 --- a/tests/test_robot_states_flatcache.py +++ b/tests/test_robot_states_flatcache.py @@ -68,10 +68,10 @@ def camera_pose_test(flatcache): assert quaternions_close(sensor_world_ori, sensor_world_ori_gt, atol=1e-3) # Now, we want to move the robot and check if the sensor pose has been updated - old_camera_local_pose = vision_sensor.get_local_pose() + old_camera_local_pose = vision_sensor.get_position_orientation(frame="parent") robot.set_position_orientation(position=[100, 100, 100]) - new_camera_local_pose = vision_sensor.get_local_pose() + new_camera_local_pose = vision_sensor.get_position_orientation(frame="parent") new_camera_world_pose = vision_sensor.get_position_orientation() robot_pose_mat = pose2mat(robot.get_position_orientation()) expected_camera_world_pos, expected_camera_world_ori = mat2pose(robot_pose_mat @ robot_to_sensor_mat) @@ -81,8 +81,8 @@ def camera_pose_test(flatcache): # Then, we want to move the local pose of the camera and check # 1) if the world pose is updated 2) if the robot stays in the same position - old_camera_local_pose = vision_sensor.get_local_pose() - vision_sensor.set_local_pose(position=[10, 10, 10], orientation=[0, 0, 0, 1]) + old_camera_local_pose = vision_sensor.get_position_orientation(frame="parent") + vision_sensor.set_position_orientation(position=[10, 10, 10], orientation=[0, 0, 0, 1], frame="parent") new_camera_world_pose = vision_sensor.get_position_orientation() camera_parent_prim = lazy.omni.isaac.core.utils.prims.get_prim_parent(vision_sensor.prim) camera_parent_path = str(camera_parent_prim.GetPath()) @@ -93,22 +93,24 @@ def camera_pose_test(flatcache): ) assert th.allclose(new_camera_world_pose[0], expected_new_camera_world_pos, atol=1e-3) assert quaternions_close(new_camera_world_pose[1], expected_new_camera_world_ori, atol=1e-3) - assert th.allclose(robot.get_position(), th.tensor([100, 100, 100], dtype=th.float32), atol=1e-3) + assert th.allclose(robot.get_position_orientation()[0], th.tensor([100, 100, 100], dtype=th.float32), atol=1e-3) # Finally, we want to move the world pose of the camera and check # 1) if the local pose is updated 2) if the robot stays in the same position robot.set_position_orientation(position=[150, 150, 100]) - old_camera_local_pose = vision_sensor.get_local_pose() - vision_sensor.set_position_orientation([150, 150, 101.36912537], [-0.29444987, 0.29444981, 0.64288363, -0.64288352]) - new_camera_local_pose = vision_sensor.get_local_pose() + old_camera_local_pose = vision_sensor.get_position_orientation(frame="parent") + vision_sensor.set_position_orientation( + position=[150, 150, 101.36912537], orientation=[-0.29444987, 0.29444981, 0.64288363, -0.64288352] + ) + new_camera_local_pose = vision_sensor.get_position_orientation(frame="parent") assert not th.allclose(old_camera_local_pose[0], new_camera_local_pose[0], atol=1e-3) assert not quaternions_close(old_camera_local_pose[1], new_camera_local_pose[1], atol=1e-3) - assert th.allclose(robot.get_position(), th.tensor([150, 150, 100], dtype=th.float32), atol=1e-3) + assert th.allclose(robot.get_position_orientation()[0], th.tensor([150, 150, 100], dtype=th.float32), atol=1e-3) # Another test we want to try is setting the camera's parent scale and check if the world pose is updated camera_parent_prim.GetAttribute("xformOp:scale").Set(lazy.pxr.Gf.Vec3d([2.0, 2.0, 2.0])) camera_parent_world_transform = PoseAPI.get_world_pose_with_scale(camera_parent_path) - camera_local_pose = vision_sensor.get_local_pose() + camera_local_pose = vision_sensor.get_position_orientation(frame="parent") expected_mat = camera_parent_world_transform @ pose2mat(camera_local_pose) expected_mat[:3, :3] = expected_mat[:3, :3] / th.norm(expected_mat[:3, :3], dim=0, keepdim=True) expected_new_camera_world_pos, _ = mat2pose(expected_mat) diff --git a/tests/test_scene_graph.py b/tests/test_scene_graph.py index 9cfc444e5..50227a613 100644 --- a/tests/test_scene_graph.py +++ b/tests/test_scene_graph.py @@ -79,7 +79,7 @@ def create_robot_config(name, position): breakfast_table = scene.object_registry("name", "breakfast_table") bowl = scene.object_registry("name", "bowl") place_obj_on_floor_plane(breakfast_table) - bowl.set_position_orientation([0.0, -0.8, 0.1], [0, 0, 0, 1]) + bowl.set_position_orientation(position=[0.0, -0.8, 0.1], orientation=[0, 0, 0, 1]) # Test single robot scene graph scene_graph_builder_single = SceneGraphBuilder( diff --git a/tests/test_sensors.py b/tests/test_sensors.py index c9b95fe7f..0595be0a3 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -15,8 +15,10 @@ def test_segmentation_modalities(env): dishtowel = env.scene.object_registry("name", "dishtowel") robot = env.scene.robots[0] place_obj_on_floor_plane(breakfast_table) - dishtowel.set_position_orientation([-0.4, 0.0, 0.55], [0, 0, 0, 1]) - robot.set_position_orientation([0.0, 0.8, 0.0], T.euler2quat(th.tensor([0, 0, -math.pi / 2], dtype=th.float32))) + dishtowel.set_position_orientation(position=[-0.4, 0.0, 0.55], orientation=[0, 0, 0, 1]) + robot.set_position_orientation( + position=[0.0, 0.8, 0.0], orientation=T.euler2quat(th.tensor([0, 0, -math.pi / 2], dtype=th.float32)) + ) robot.reset() modalities_required = ["seg_semantic", "seg_instance", "seg_instance_id"] @@ -108,8 +110,10 @@ def test_bbox_modalities(env): dishtowel = env.scene.object_registry("name", "dishtowel") robot = env.scene.robots[0] place_obj_on_floor_plane(breakfast_table) - dishtowel.set_position_orientation([-0.4, 0.0, 0.55], [0, 0, 0, 1]) - robot.set_position_orientation([0, 0.8, 0.0], T.euler2quat(th.tensor([0, 0, -math.pi / 2], dtype=th.float32))) + dishtowel.set_position_orientation(position=[-0.4, 0.0, 0.55], orientation=[0, 0, 0, 1]) + robot.set_position_orientation( + position=[0, 0.8, 0.0], orientation=T.euler2quat(th.tensor([0, 0, -math.pi / 2], dtype=th.float32)) + ) robot.reset() modalities_required = ["bbox_2d_tight", "bbox_2d_loose", "bbox_3d"] diff --git a/tests/test_transition_rules.py b/tests/test_transition_rules.py index c2c1bf8c9..7421496a8 100644 --- a/tests/test_transition_rules.py +++ b/tests/test_transition_rules.py @@ -33,8 +33,10 @@ def test_dryer_rule(env): og.sim.step() # Place the two objects inside the dryer - remover_dishtowel.set_position_orientation([0.06, 0, 0.2], [0.0311883, -0.23199339, -0.06849886, 0.96980107]) - bowl.set_position_orientation([0.0, 0.0, 0.2], [0, 0, 0, 1]) + remover_dishtowel.set_position_orientation( + position=[0.06, 0, 0.2], orientation=[0.0311883, -0.23199339, -0.06849886, 0.96980107] + ) + bowl.set_position_orientation(position=[0.0, 0.0, 0.2], orientation=[0, 0, 0, 1]) og.sim.step() assert remover_dishtowel.states[Saturated].set_value(water, True) @@ -95,9 +97,11 @@ def test_washer_rule(env): # Place the two objects inside the washer # (Hacky) use baking_sheet as a stepping stone to elevate the objects so that they are inside the container volume. - baking_sheet.set_position_orientation([0.0, 0.0, 0.04], T.euler2quat(th.tensor([math.pi, 0, 0], dtype=th.float32))) - remover_dishtowel.set_position_orientation([0.0, 0.0, 0.05], [0, 0, 0, 1]) - bowl.set_position_orientation([0.10, 0.0, 0.08], [0, 0, 0, 1]) + baking_sheet.set_position_orientation( + position=[0.0, 0.0, 0.04], orientation=T.euler2quat(th.tensor([math.pi, 0, 0], dtype=th.float32)) + ) + remover_dishtowel.set_position_orientation(position=[0.0, 0.0, 0.05], orientation=[0, 0, 0, 1]) + bowl.set_position_orientation(position=[0.10, 0.0, 0.08], orientation=[0, 0, 0, 1]) og.sim.step() assert bowl.states[Covered].set_value(dust, True) @@ -161,7 +165,7 @@ def test_slicing_rule(env): og.sim.step() table_knife.set_position_orientation( - [-0.05, 0.0, 0.15], T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) + position=[-0.05, 0.0, 0.15], orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) ) og.sim.step() assert not table_knife.states[Touching].get_value(apple) @@ -171,7 +175,7 @@ def test_slicing_rule(env): assert env.scene.object_registry("name", obj.name) is not None table_knife.set_position_orientation( - [-0.05, 0.0, 0.10], T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) + position=[-0.05, 0.0, 0.10], orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) ) og.sim.step() final_half_apples = env.scene.object_registry("category", "half_apple", set()).copy() @@ -216,7 +220,7 @@ def test_dicing_rule_cooked(env): assert cooked_diced_apple.n_particles == 0 table_knife.set_position_orientation( - [-0.05, 0.0, 0.15], T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) + position=[-0.05, 0.0, 0.15], orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) ) og.sim.step() @@ -226,7 +230,7 @@ def test_dicing_rule_cooked(env): assert env.scene.object_registry("name", obj.name) is not None table_knife.set_position_orientation( - [-0.05, 0.0, 0.07], T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) + position=[-0.05, 0.0, 0.07], orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) ) og.sim.step() @@ -236,7 +240,7 @@ def test_dicing_rule_cooked(env): # Move the knife away so that it doesn't immediately dice the half_apple again once it's imported back table_knife.set_position_orientation( - [-0.05, 0.0, 1.15], T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) + position=[-0.05, 0.0, 1.15], orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) ) og.sim.step() @@ -266,7 +270,7 @@ def test_dicing_rule_uncooked(env): assert diced_apple.n_particles == 0 table_knife.set_position_orientation( - [-0.05, 0.0, 0.15], T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) + position=[-0.05, 0.0, 0.15], orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) ) og.sim.step() @@ -276,7 +280,7 @@ def test_dicing_rule_uncooked(env): assert env.scene.object_registry("name", obj.name) is not None table_knife.set_position_orientation( - [-0.05, 0.0, 0.07], T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) + position=[-0.05, 0.0, 0.07], orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) ) og.sim.step() @@ -286,7 +290,7 @@ def test_dicing_rule_uncooked(env): # Move the knife away so that it doesn't immediately dice the half_apple again once it's imported back table_knife.set_position_orientation( - [-0.05, 0.0, 1.15], T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) + position=[-0.05, 0.0, 1.15], orientation=T.euler2quat(th.tensor([-math.pi / 2, 0, 0], dtype=th.float32)) ) og.sim.step() @@ -313,11 +317,11 @@ def test_melting_rule(env): place_obj_on_floor_plane(stove) og.sim.step() - stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) + stockpot.set_position_orientation(position=[-0.24, 0.11, 0.89], orientation=[0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) - swiss_cheese.set_position_orientation([-0.24, 0.11, 0.92], [0, 0, 0, 1]) + swiss_cheese.set_position_orientation(position=[-0.24, 0.11, 0.92], orientation=[0, 0, 0, 1]) og.sim.step() assert swiss_cheese.states[Inside].get_value(stockpot) @@ -362,7 +366,7 @@ def test_cooking_physical_particle_rule_failure_recipe_systems(env): place_obj_on_floor_plane(stove) og.sim.step() - stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) + stockpot.set_position_orientation(position=[-0.24, 0.11, 0.89], orientation=[0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) @@ -402,7 +406,7 @@ def test_cooking_physical_particle_rule_success(env): place_obj_on_floor_plane(stove) og.sim.step() - stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) + stockpot.set_position_orientation(position=[-0.24, 0.11, 0.89], orientation=[0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) @@ -462,7 +466,7 @@ def test_mixing_rule_failure_recipe_systems(env): assert lemonade.n_particles == 0 assert sludge.n_particles == 0 - tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1]) + tablespoon.set_position_orientation(position=[0.04, 0.0, 0.11], orientation=[0, 0, 0, 1]) og.sim.step() assert tablespoon.states[Touching].get_value(bowl) @@ -506,7 +510,7 @@ def test_mixing_rule_failure_nonrecipe_systems(env): assert lemonade.n_particles == 0 assert sludge.n_particles == 0 - tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1]) + tablespoon.set_position_orientation(position=[0.04, 0.0, 0.11], orientation=[0, 0, 0, 1]) og.sim.step() assert tablespoon.states[Touching].get_value(bowl) @@ -546,7 +550,7 @@ def test_mixing_rule_success(env): assert lemonade.n_particles == 0 - tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1]) + tablespoon.set_position_orientation(position=[0.04, 0.0, 0.11], orientation=[0, 0, 0, 1]) og.sim.step() assert tablespoon.states[Touching].get_value(bowl) @@ -577,11 +581,11 @@ def test_cooking_system_rule_failure_recipe_systems(env): place_obj_on_floor_plane(stove) og.sim.step() - stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) + stockpot.set_position_orientation(position=[-0.24, 0.11, 0.89], orientation=[0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) - chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1]) + chicken.set_position_orientation(position=[-0.24, 0.11, 0.86], orientation=[0, 0, 0, 1]) # This fails the recipe because chicken broth (recipe system) is not in the stockpot chicken_broth.generate_particles(positions=[[-0.33, 0.05, 1.93]]) diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]]) @@ -633,11 +637,11 @@ def test_cooking_system_rule_failure_nonrecipe_systems(env): place_obj_on_floor_plane(stove) og.sim.step() - stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) + stockpot.set_position_orientation(position=[-0.24, 0.11, 0.89], orientation=[0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) - chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1]) + chicken.set_position_orientation(position=[-0.24, 0.11, 0.86], orientation=[0, 0, 0, 1]) # This fails the recipe because water (nonrecipe system) is inside the stockpot water.generate_particles(positions=[[-0.24, 0.11, 0.93]]) chicken_broth.generate_particles(positions=[[-0.33, 0.05, 0.93]]) @@ -692,13 +696,13 @@ def test_cooking_system_rule_failure_nonrecipe_objects(env): place_obj_on_floor_plane(stove) og.sim.step() - stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) + stockpot.set_position_orientation(position=[-0.24, 0.11, 0.89], orientation=[0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) - chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1]) + chicken.set_position_orientation(position=[-0.24, 0.11, 0.86], orientation=[0, 0, 0, 1]) # This fails the recipe because the bowl (nonrecipe object) is inside the stockpot - bowl.set_position_orientation([-0.20, 0.15, 1], [0, 0, 0, 1]) + bowl.set_position_orientation(position=[-0.20, 0.15, 1], orientation=[0, 0, 0, 1]) chicken_broth.generate_particles(positions=[[-0.33, 0.05, 0.93]]) diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]]) diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]]) @@ -856,13 +860,13 @@ def test_cooking_object_rule_failure_recipe_objects(env): place_obj_on_floor_plane(oven) og.sim.step() - baking_sheet.set_position_orientation([0.0, 0.05, 0.455], [0, 0, 0, 1]) + baking_sheet.set_position_orientation(position=[0.0, 0.05, 0.455], orientation=[0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) # This fails the recipe because it requires the bagel dough to be on top of the baking sheet - bagel_dough.set_position_orientation([1, 0, 0.5], [0, 0, 0, 1]) - raw_egg.set_position_orientation([1.02, 0, 0.55], [0, 0, 0, 1]) + bagel_dough.set_position_orientation(position=[1, 0, 0.5], orientation=[0, 0, 0, 1]) + raw_egg.set_position_orientation(position=[1.02, 0, 0.55], orientation=[0, 0, 0, 1]) og.sim.step() assert not bagel_dough.states[OnTop].get_value(baking_sheet) @@ -898,12 +902,12 @@ def test_cooking_object_rule_failure_unary_states(env): place_obj_on_floor_plane(oven) og.sim.step() - baking_sheet.set_position_orientation([0.0, 0.05, 0.455], [0, 0, 0, 1]) + baking_sheet.set_position_orientation(position=[0.0, 0.05, 0.455], orientation=[0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) - bagel_dough.set_position_orientation([0, 0, 0.492], [0, 0, 0, 1]) - raw_egg.set_position_orientation([0.02, 0, 0.534], [0, 0, 0, 1]) + bagel_dough.set_position_orientation(position=[0, 0, 0.492], orientation=[0, 0, 0, 1]) + raw_egg.set_position_orientation(position=[0.02, 0, 0.534], orientation=[0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) assert raw_egg.states[OnTop].get_value(bagel_dough) @@ -941,12 +945,12 @@ def test_cooking_object_rule_failure_binary_system_states(env): place_obj_on_floor_plane(oven) og.sim.step() - baking_sheet.set_position_orientation([0.0, 0.05, 0.455], [0, 0, 0, 1]) + baking_sheet.set_position_orientation(position=[0.0, 0.05, 0.455], orientation=[0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) - bagel_dough.set_position_orientation([0, 0, 0.492], [0, 0, 0, 1]) - raw_egg.set_position_orientation([0.02, 0, 0.534], [0, 0, 0, 1]) + bagel_dough.set_position_orientation(position=[0, 0, 0.492], orientation=[0, 0, 0, 1]) + raw_egg.set_position_orientation(position=[0.02, 0, 0.534], orientation=[0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) assert raw_egg.states[OnTop].get_value(bagel_dough) @@ -984,12 +988,12 @@ def test_cooking_object_rule_failure_binary_object_states(env): place_obj_on_floor_plane(oven) og.sim.step() - baking_sheet.set_position_orientation([0.0, 0.05, 0.455], [0, 0, 0, 1]) + baking_sheet.set_position_orientation(position=[0.0, 0.05, 0.455], orientation=[0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) - bagel_dough.set_position_orientation([0, 0, 0.492], [0, 0, 0, 1]) - raw_egg.set_position_orientation([0.12, 0.15, 0.47], [0, 0, 0, 1]) + bagel_dough.set_position_orientation(position=[0, 0, 0.492], orientation=[0, 0, 0, 1]) + raw_egg.set_position_orientation(position=[0.12, 0.15, 0.47], orientation=[0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) # This fails the recipe because it requires the raw egg to be on top of the bagel dough @@ -1028,12 +1032,12 @@ def test_cooking_object_rule_failure_wrong_heat_source(env): place_obj_on_floor_plane(stove) og.sim.step() - heat_source_position = stove.states[HeatSourceOrSink].link.get_position() - baking_sheet.set_position_orientation([-0.20, 0, 0.80], [0, 0, 0, 1]) + heat_source_position = stove.states[HeatSourceOrSink].link.get_position_orientation()[0] + baking_sheet.set_position_orientation(position=[-0.20, 0, 0.80], orientation=[0, 0, 0, 1]) og.sim.step() - bagel_dough.set_position_orientation([-0.20, 0, 0.84], [0, 0, 0, 1]) - raw_egg.set_position_orientation([-0.18, 0, 0.89], [0, 0, 0, 1]) + bagel_dough.set_position_orientation(position=[-0.20, 0, 0.84], orientation=[0, 0, 0, 1]) + raw_egg.set_position_orientation(position=[-0.18, 0, 0.89], orientation=[0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) assert raw_egg.states[OnTop].get_value(bagel_dough) @@ -1076,12 +1080,12 @@ def test_cooking_object_rule_success(env): place_obj_on_floor_plane(oven) og.sim.step() - baking_sheet.set_position_orientation([0.0, 0.05, 0.455], [0, 0, 0, 1]) + baking_sheet.set_position_orientation(position=[0.0, 0.05, 0.455], orientation=[0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) - bagel_dough.set_position_orientation([0, 0, 0.492], [0, 0, 0, 1]) - raw_egg.set_position_orientation([0.02, 0, 0.534], [0, 0, 0, 1]) + bagel_dough.set_position_orientation(position=[0, 0, 0.492], orientation=[0, 0, 0, 1]) + raw_egg.set_position_orientation(position=[0.02, 0, 0.534], orientation=[0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) assert raw_egg.states[OnTop].get_value(bagel_dough) @@ -1147,7 +1151,7 @@ def test_single_toggleable_machine_rule_output_system_failure_wrong_container(en milk.generate_particles(positions=th.tensor([[0.02, 0.06, 0.22]])) chocolate_sauce.generate_particles(positions=th.tensor([[-0.05, -0.04, 0.22]])) - ice_cream.set_position_orientation([0.03, -0.02, 0.23], [0, 0, 0, 1]) + ice_cream.set_position_orientation(position=[0.03, -0.02, 0.23], orientation=[0, 0, 0, 1]) og.sim.step() @@ -1197,7 +1201,7 @@ def test_single_toggleable_machine_rule_output_system_failure_recipe_systems(env # This fails the recipe because it requires the milk to be in the blender milk.generate_particles(positions=th.tensor([[0.02, 0, 1.57]], dtype=th.float32)) chocolate_sauce.generate_particles(positions=th.tensor([[0, -0.02, 0.57]], dtype=th.float32)) - ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1]) + ice_cream.set_position_orientation(position=[0, 0, 0.51], orientation=[0, 0, 0, 1]) og.sim.step() assert not blender.states[Contains].get_value(milk) @@ -1242,7 +1246,7 @@ def test_single_toggleable_machine_rule_output_system_failure_recipe_objects(env milk.generate_particles(positions=th.tensor([[0.02, 0, 0.57]])) chocolate_sauce.generate_particles(positions=th.tensor([[0, -0.02, 0.57]])) # This fails the recipe because it requires the ice cream to be inside the blender - ice_cream.set_position_orientation([0, 0, 1.51], [0, 0, 0, 1]) + ice_cream.set_position_orientation(position=[0, 0, 1.51], orientation=[0, 0, 0, 1]) og.sim.step() @@ -1287,7 +1291,7 @@ def test_single_toggleable_machine_rule_output_system_failure_nonrecipe_systems( chocolate_sauce.generate_particles(positions=th.tensor([[0, -0.02, 0.57]])) # This fails the recipe because water (nonrecipe system) is in the blender water.generate_particles(positions=th.tensor([[0, 0, 0.57]])) - ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1]) + ice_cream.set_position_orientation(position=[0, 0, 0.51], orientation=[0, 0, 0, 1]) og.sim.step() @@ -1336,9 +1340,9 @@ def test_single_toggleable_machine_rule_output_system_failure_nonrecipe_objects( milk.generate_particles(positions=th.tensor([[0.02, 0, 0.57]])) chocolate_sauce.generate_particles(positions=th.tensor([[0, -0.02, 0.57]])) - ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1]) + ice_cream.set_position_orientation(position=[0, 0, 0.51], orientation=[0, 0, 0, 1]) # This fails the recipe because the bowl (nonrecipe object) is in the blender - bowl.set_position_orientation([0, 0, 0.58], [0, 0, 0, 1]) + bowl.set_position_orientation(position=[0, 0, 0.58], orientation=[0, 0, 0, 1]) og.sim.step() @@ -1385,7 +1389,7 @@ def test_single_toggleable_machine_rule_output_system_success(env): milk.generate_particles(positions=th.tensor([[0.02, 0, 0.57]])) chocolate_sauce.generate_particles(positions=th.tensor([[0, -0.02, 0.57]])) - ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1]) + ice_cream.set_position_orientation(position=[0, 0, 0.51], orientation=[0, 0, 0, 1]) og.sim.step() @@ -1438,8 +1442,8 @@ def test_single_toggleable_machine_rule_output_object_failure_unary_states(env): place_obj_on_floor_plane(electric_mixer) og.sim.step() - another_raw_egg.set_position_orientation([-0.01, -0.14, 0.50], [0, 0, 0, 1]) - raw_egg.set_position_orientation([-0.01, -0.14, 0.47], [0, 0, 0, 1]) + another_raw_egg.set_position_orientation(position=[-0.01, -0.14, 0.50], orientation=[0, 0, 0, 1]) + raw_egg.set_position_orientation(position=[-0.01, -0.14, 0.47], orientation=[0, 0, 0, 1]) flour.generate_particles(positions=th.tensor([[-0.01, -0.15, 0.43]])) granulated_sugar.generate_particles(positions=th.tensor([[0.01, -0.15, 0.43]])) vanilla.generate_particles(positions=th.tensor([[0.03, -0.15, 0.43]])) @@ -1512,8 +1516,8 @@ def test_single_toggleable_machine_rule_output_object_success(env): place_obj_on_floor_plane(electric_mixer) og.sim.step() - another_raw_egg.set_position_orientation([-0.01, -0.14, 0.50], [0, 0, 0, 1]) - raw_egg.set_position_orientation([-0.01, -0.14, 0.47], [0, 0, 0, 1]) + another_raw_egg.set_position_orientation(position=[-0.01, -0.14, 0.50], orientation=[0, 0, 0, 1]) + raw_egg.set_position_orientation(position=[-0.01, -0.14, 0.47], orientation=[0, 0, 0, 1]) flour.generate_particles(positions=th.tensor([[-0.01, -0.15, 0.43]])) granulated_sugar.generate_particles(positions=th.tensor([[0.01, -0.15, 0.43]])) vanilla.generate_particles(positions=th.tensor([[0.03, -0.15, 0.43]])) diff --git a/tests/utils.py b/tests/utils.py index 9b265a571..0d4aeb0a4 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -41,7 +41,7 @@ def retrieve_obj_cfg(obj): "category": obj.category, "model": obj.model, "prim_type": obj.prim_type, - "position": obj.get_position(), + "position": obj.get_position_orientation()[0], "scale": obj.scale, "abilities": obj.abilities, "visual_only": obj.visual_only, @@ -220,14 +220,14 @@ def place_objA_on_objB_bbox(objA, objB, x_offset=0.0, y_offset=0.0, z_offset=0.0 objA_aabb_center, objA_aabb_extent = objA.aabb_center, objA.aabb_extent objB_aabb_center, objB_aabb_extent = objB.aabb_center, objB.aabb_extent - objA_aabb_offset = objA.get_position() - objA_aabb_center + objA_aabb_offset = objA.get_position_orientation()[0] - objA_aabb_center target_objA_aabb_pos = ( objB_aabb_center + th.tensor([0, 0, (objB_aabb_extent[2] + objA_aabb_extent[2]) / 2.0]) + th.tensor([x_offset, y_offset, z_offset]) ) - objA.set_position(target_objA_aabb_pos + objA_aabb_offset) + objA.set_position_orientation(position=target_objA_aabb_pos + objA_aabb_offset) def place_obj_on_floor_plane(obj, x_offset=0.0, y_offset=0.0, z_offset=0.01): @@ -237,10 +237,10 @@ def place_obj_on_floor_plane(obj, x_offset=0.0, y_offset=0.0, z_offset=0.01): obj.root_link.reset() obj_aabb_center, obj_aabb_extent = obj.aabb_center, obj.aabb_extent - obj_aabb_offset = obj.get_position() - obj_aabb_center + obj_aabb_offset = obj.get_position_orientation()[0] - obj_aabb_center target_obj_aabb_pos = th.tensor([0, 0, obj_aabb_extent[2] / 2.0]) + th.tensor([x_offset, y_offset, z_offset]) - obj.set_position(target_obj_aabb_pos + obj_aabb_offset) + obj.set_position_orientation(position=target_obj_aabb_pos + obj_aabb_offset) def remove_all_systems(scene):