|
| 1 | +""" |
| 2 | +Stitch multiple point clouds captured with a robot mounted camera. |
| 3 | +
|
| 4 | +The sample simulates a camera capturing point clouds at different robot poses. |
| 5 | +The point clouds are pre-aligned, i.e., transformed into the robot base frame using the robot's pose and the hand-eye |
| 6 | +calibration transform. The resulting stitched point cloud is displayed and saved to a PLY file. |
| 7 | +
|
| 8 | +The sample demonstrates stitching of a small and a big object. |
| 9 | +
|
| 10 | +For the small object that fully fits within the camera's field of view in every capture, the result of stitching multiple |
| 11 | +captures is a complete and detailed point cloud of the object. Since the object remains fully visible in each frame from |
| 12 | +different perspectives (front, back, left, and right), the stitching produces data for all sides of the object. |
| 13 | +
|
| 14 | +For the large object that does not fit within the camera's field of view, stitching is used to expand the effective |
| 15 | +field of view. By capturing different sections of the object and combining them, the result is a complete view of the |
| 16 | +object, but from a more limited number of perspectives (e.g., mostly front-facing), depending on the capture setup. |
| 17 | +
|
| 18 | +The resulting stitched point cloud is voxel downsampled if the `--full-resolution` flag is not set. |
| 19 | +
|
| 20 | +The dataset with ZDF and YML files (StitchingPointClouds.zip) for this sample can be found at |
| 21 | +https://support.zivid.com/en/latest/academy/applications/stitching.html. |
| 22 | +
|
| 23 | +Extract the content into %ProgramData%/Zivid: |
| 24 | + StitchingPointClouds/ |
| 25 | + ├── SmallObject/ |
| 26 | + └── BigObject/ |
| 27 | +
|
| 28 | +Each of these folders must contain point cloud in ZDF and robot poses and a hand-eye transform in YML format. |
| 29 | +
|
| 30 | +Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. |
| 31 | +""" |
| 32 | + |
| 33 | +import argparse |
| 34 | +from pathlib import Path |
| 35 | + |
| 36 | +import numpy as np |
| 37 | +import zivid |
| 38 | +from zivid.experimental.point_cloud_export import export_unorganized_point_cloud |
| 39 | +from zivid.experimental.point_cloud_export.file_format import PLY |
| 40 | +from zivid.experimental.toolbox.point_cloud_registration import ( |
| 41 | + LocalPointCloudRegistrationParameters, |
| 42 | + local_point_cloud_registration, |
| 43 | +) |
| 44 | +from zividsamples.display import display_pointcloud |
| 45 | +from zividsamples.paths import get_sample_data_path |
| 46 | +from zividsamples.save_load_matrix import load_and_assert_affine_matrix |
| 47 | + |
| 48 | + |
| 49 | +def _options() -> argparse.Namespace: |
| 50 | + """Parse command-line arguments. |
| 51 | +
|
| 52 | + Returns: |
| 53 | + argparse.Namespace: Parsed command-line arguments. |
| 54 | +
|
| 55 | + """ |
| 56 | + parser = argparse.ArgumentParser() |
| 57 | + parser.add_argument( |
| 58 | + "--full-resolution", |
| 59 | + action="store_true", |
| 60 | + help="Use full resolution for stitching. If not set, downsampling is applied.", |
| 61 | + ) |
| 62 | + |
| 63 | + return parser.parse_args() |
| 64 | + |
| 65 | + |
| 66 | +def _stitch_point_clouds(directory: Path, full_resolution: bool) -> zivid.UnorganizedPointCloud: |
| 67 | + """Stitch multiple point clouds captured at different robot poses. |
| 68 | +
|
| 69 | + Args: |
| 70 | + directory (Path): Path to directory containing point clouds in ZDF and robot poses and a hand-eye transform in YML format. |
| 71 | + full_resolution (bool): If True, use full resolution (no downsampling). Otherwise, voxel downsample. |
| 72 | +
|
| 73 | + Returns: |
| 74 | + zivid.UnorganizedPointCloud: The stitched point cloud. |
| 75 | +
|
| 76 | + Raises: |
| 77 | + FileNotFoundError: If required files (ZDFs, robot poses, or hand-eye calibration) are missing in the directory. |
| 78 | + ValueError: If the number of ZDF files and robot pose files do not match. |
| 79 | +
|
| 80 | + """ |
| 81 | + zdf_files = list(directory.rglob("capture_*.zdf")) |
| 82 | + pose_files = list(directory.rglob("robot_pose_*.yaml")) |
| 83 | + |
| 84 | + if not zdf_files or not pose_files or not (directory / "hand_eye_transform.yaml").exists(): |
| 85 | + raise FileNotFoundError("Required files are missing in the directory.") |
| 86 | + |
| 87 | + if len(zdf_files) != len(pose_files): |
| 88 | + raise ValueError("Number of ZDF files and robot pose files do not match.") |
| 89 | + |
| 90 | + hand_eye_transform = load_and_assert_affine_matrix(directory / "hand_eye_transform.yaml") |
| 91 | + |
| 92 | + stitched_point_clouds_to_current_point_cloud_transform = np.eye(4) |
| 93 | + unorganized_stitched_point_cloud_bucket = zivid.UnorganizedPointCloud() |
| 94 | + registration_params = LocalPointCloudRegistrationParameters(max_correspondence_distance=2) |
| 95 | + |
| 96 | + list_of_transforms = [] |
| 97 | + stitched_count = 1 |
| 98 | + |
| 99 | + for index, zdf in enumerate(zdf_files): |
| 100 | + robot_pose = load_and_assert_affine_matrix(pose_files[index]) |
| 101 | + frame = zivid.Frame(zdf) |
| 102 | + |
| 103 | + base_to_camera_transform = np.matmul(robot_pose, hand_eye_transform) |
| 104 | + unorganized_point_cloud_in_base_frame = ( |
| 105 | + frame.point_cloud() |
| 106 | + .to_unorganized_point_cloud() |
| 107 | + .voxel_downsampled(voxel_size=1.0, min_points_per_voxel=2) |
| 108 | + .transformed(base_to_camera_transform) |
| 109 | + ) |
| 110 | + |
| 111 | + if index != 0: |
| 112 | + local_point_cloud_registration_result = local_point_cloud_registration( |
| 113 | + target=unorganized_stitched_point_cloud_bucket, |
| 114 | + source=unorganized_point_cloud_in_base_frame, |
| 115 | + parameters=registration_params, |
| 116 | + ) |
| 117 | + |
| 118 | + # Aligning the new point cloud to the existing stitched result using local point cloud registration |
| 119 | + if not local_point_cloud_registration_result.converged(): |
| 120 | + print("Registration did not converge...") |
| 121 | + continue |
| 122 | + stitched_point_clouds_to_current_point_cloud_transform = ( |
| 123 | + local_point_cloud_registration_result.transform().to_matrix() |
| 124 | + ) |
| 125 | + unorganized_stitched_point_cloud_bucket.transform( |
| 126 | + np.linalg.inv(stitched_point_clouds_to_current_point_cloud_transform) |
| 127 | + ) |
| 128 | + stitched_count += 1 |
| 129 | + |
| 130 | + if full_resolution: |
| 131 | + print(f"{stitched_count} out of {len(zdf_files)} Point clouds aligned.") |
| 132 | + else: |
| 133 | + print(f"{stitched_count} out of {len(zdf_files)} Point clouds stitched.") |
| 134 | + |
| 135 | + list_of_transforms.append((base_to_camera_transform, stitched_point_clouds_to_current_point_cloud_transform)) |
| 136 | + |
| 137 | + unorganized_stitched_point_cloud_bucket.extend(unorganized_point_cloud_in_base_frame) |
| 138 | + |
| 139 | + final_point_cloud = zivid.UnorganizedPointCloud() |
| 140 | + |
| 141 | + if full_resolution: |
| 142 | + for index, transform in enumerate(list_of_transforms): |
| 143 | + frame = zivid.Frame(zdf_files[index]) |
| 144 | + frame.point_cloud().transform(transform[0]) |
| 145 | + final_point_cloud.transform(np.linalg.inv(transform[1])) |
| 146 | + final_point_cloud.extend(frame.point_cloud().to_unorganized_point_cloud()) |
| 147 | + |
| 148 | + if index > 0: |
| 149 | + print(f"{index + 1} out of {len(list_of_transforms)} point clouds stitched.") |
| 150 | + else: |
| 151 | + # Downsampling the final result for efficiency |
| 152 | + final_point_cloud = unorganized_stitched_point_cloud_bucket.voxel_downsampled( |
| 153 | + voxel_size=1.0, min_points_per_voxel=2 |
| 154 | + ) |
| 155 | + |
| 156 | + return final_point_cloud |
| 157 | + |
| 158 | + |
| 159 | +def _main() -> None: |
| 160 | + args = _options() |
| 161 | + app = zivid.Application() # noqa: F841 # pylint: disable=unused-variable |
| 162 | + |
| 163 | + # Ensure the dataset is extracted to %ProgramData%\Zivid: |
| 164 | + # The folder must contain: |
| 165 | + # StitchingPointClouds/ |
| 166 | + # ├── SmallObject/ |
| 167 | + # └── BigObject/ |
| 168 | + # Each folder must include: |
| 169 | + # - capture_*.zdf |
| 170 | + # - robot_pose_*.yaml |
| 171 | + # - hand_eye_transform.yaml |
| 172 | + small_object_dir = get_sample_data_path() / "StitchingPointClouds" / "SmallObject" |
| 173 | + big_object_dir = get_sample_data_path() / "StitchingPointClouds" / "BigObject" |
| 174 | + |
| 175 | + if not small_object_dir.exists() or not big_object_dir.exists(): |
| 176 | + raise FileNotFoundError( |
| 177 | + f"Missing dataset folders.\n" |
| 178 | + f"Make sure 'StitchingPointClouds/SmallObject' and 'StitchingPointClouds/BigObject' exist at {get_sample_data_path()}.\n\n" |
| 179 | + f"You can download the dataset (StitchingPointClouds.zip) from:\n" |
| 180 | + f"https://support.zivid.com/en/latest/academy/applications/stitching.html" |
| 181 | + ) |
| 182 | + |
| 183 | + print("Stitching small object...") |
| 184 | + final_point_cloud_small = _stitch_point_clouds(small_object_dir, args.full_resolution) |
| 185 | + display_pointcloud( |
| 186 | + xyz=final_point_cloud_small.copy_data("xyz"), |
| 187 | + rgb=final_point_cloud_small.copy_data("rgba_srgb")[:, 0:3], |
| 188 | + ) |
| 189 | + file_name_small = Path(__file__).parent / "StitchedPointCloudSmallObject.ply" |
| 190 | + export_unorganized_point_cloud(final_point_cloud_small, PLY(str(file_name_small), layout=PLY.Layout.unordered)) |
| 191 | + |
| 192 | + print("Stitching big object...") |
| 193 | + final_point_cloud_big = _stitch_point_clouds(big_object_dir, args.full_resolution) |
| 194 | + display_pointcloud( |
| 195 | + xyz=final_point_cloud_big.copy_data("xyz"), |
| 196 | + rgb=final_point_cloud_big.copy_data("rgba_srgb")[:, 0:3], |
| 197 | + ) |
| 198 | + file_name_big = Path(__file__).parent / "StitchedPointCloudBigObject.ply" |
| 199 | + export_unorganized_point_cloud(final_point_cloud_big, PLY(str(file_name_big), layout=PLY.Layout.unordered)) |
| 200 | + |
| 201 | + |
| 202 | +if __name__ == "__main__": |
| 203 | + _main() |
0 commit comments