|
| 1 | +import blenderproc as bproc |
| 2 | +import argparse |
| 3 | +import math |
| 4 | +import os |
| 5 | +import numpy as np |
| 6 | +import time |
| 7 | +import urllib.request |
| 8 | +from matplotlib import pyplot as plt |
| 9 | +import blenderproc.python.renderer.RendererUtility as RendererUtility |
| 10 | +from blenderproc.scripts.saveAsImg import save_array_as_image |
| 11 | +from blenderproc.scripts.visHdf5Files import vis_data |
| 12 | + |
| 13 | +# blenderproc run --custom-blender-path={BLD_DIR} --blender-install-path={BLD_DIR} render_single_glb.py --object_path {glb_path} \ |
| 14 | +# --output_dir {out_folder} --engine CYCLES --num_images 12 --camera_dist 1.2 |
| 15 | + |
| 16 | +import bpy |
| 17 | +from mathutils import Vector |
| 18 | + |
| 19 | +parser = argparse.ArgumentParser() |
| 20 | +parser.add_argument( |
| 21 | + "--object_path", |
| 22 | + type=str, |
| 23 | + required=True, |
| 24 | + help="Path to the object file", |
| 25 | +) |
| 26 | +parser.add_argument("--output_dir", type=str, default="./views") |
| 27 | +parser.add_argument( |
| 28 | + "--engine", type=str, default="BLENDER_EEVEE", choices=["CYCLES", "BLENDER_EEVEE"] |
| 29 | +) |
| 30 | +parser.add_argument("--num_images", type=int, default=12) |
| 31 | +parser.add_argument("--camera_dist", type=float, default=1.5) |
| 32 | + |
| 33 | +#argv = sys.argv[sys.argv.index("--") + 1 :] |
| 34 | +args = parser.parse_args() |
| 35 | + |
| 36 | +bproc.init() |
| 37 | + |
| 38 | +context = bpy.context |
| 39 | +scene = context.scene |
| 40 | +render = scene.render |
| 41 | + |
| 42 | +render.engine = args.engine |
| 43 | +render.image_settings.file_format = "PNG" |
| 44 | +render.image_settings.color_mode = "RGBA" |
| 45 | +render.resolution_x = 512 |
| 46 | +render.resolution_y = 512 |
| 47 | +render.resolution_percentage = 100 |
| 48 | + |
| 49 | +scene.cycles.device = "GPU" |
| 50 | +scene.cycles.samples = 32 |
| 51 | +scene.cycles.diffuse_bounces = 1 |
| 52 | +scene.cycles.glossy_bounces = 1 |
| 53 | +scene.cycles.transparent_max_bounces = 3 |
| 54 | +scene.cycles.transmission_bounces = 3 |
| 55 | +scene.cycles.filter_width = 0.01 |
| 56 | +scene.cycles.use_denoising = True |
| 57 | +scene.render.film_transparent = True |
| 58 | + |
| 59 | +def add_lighting() -> None: |
| 60 | + # delete the default light |
| 61 | + #bpy.data.objects["Light"].select_set(True) |
| 62 | + #bpy.ops.object.delete() |
| 63 | + # add a new light |
| 64 | + bpy.ops.object.light_add(type="AREA") |
| 65 | + light2 = bpy.data.lights["Area"] |
| 66 | + light2.energy = 30000 |
| 67 | + bpy.data.objects["Area"].location[2] = 0.5 |
| 68 | + bpy.data.objects["Area"].scale[0] = 100 |
| 69 | + bpy.data.objects["Area"].scale[1] = 100 |
| 70 | + bpy.data.objects["Area"].scale[2] = 100 |
| 71 | + |
| 72 | + |
| 73 | +def reset_scene() -> None: |
| 74 | + """Resets the scene to a clean state.""" |
| 75 | + # delete everything that isn't part of a camera or a light |
| 76 | + for obj in bpy.data.objects: |
| 77 | + if obj.type not in {"CAMERA", "LIGHT"}: |
| 78 | + bpy.data.objects.remove(obj, do_unlink=True) |
| 79 | + # delete all the materials |
| 80 | + for material in bpy.data.materials: |
| 81 | + bpy.data.materials.remove(material, do_unlink=True) |
| 82 | + # delete all the textures |
| 83 | + for texture in bpy.data.textures: |
| 84 | + bpy.data.textures.remove(texture, do_unlink=True) |
| 85 | + # delete all the images |
| 86 | + for image in bpy.data.images: |
| 87 | + bpy.data.images.remove(image, do_unlink=True) |
| 88 | + |
| 89 | + |
| 90 | +# load the glb model |
| 91 | +def load_object(object_path: str) -> None: |
| 92 | + """Loads a glb model into the scene.""" |
| 93 | + if object_path.endswith(".glb"): |
| 94 | + bpy.ops.import_scene.gltf(filepath=object_path, merge_vertices=True) |
| 95 | + elif object_path.endswith(".fbx"): |
| 96 | + bpy.ops.import_scene.fbx(filepath=object_path) |
| 97 | + else: |
| 98 | + raise ValueError(f"Unsupported file type: {object_path}") |
| 99 | + |
| 100 | +def scene_bbox(single_obj=None, ignore_matrix=False): |
| 101 | + bbox_min = (math.inf,) * 3 |
| 102 | + bbox_max = (-math.inf,) * 3 |
| 103 | + found = False |
| 104 | + for obj in scene_meshes() if single_obj is None else [single_obj]: |
| 105 | + found = True |
| 106 | + for coord in obj.bound_box: |
| 107 | + coord = Vector(coord) |
| 108 | + if not ignore_matrix: |
| 109 | + coord = obj.matrix_world @ coord |
| 110 | + bbox_min = tuple(min(x, y) for x, y in zip(bbox_min, coord)) |
| 111 | + bbox_max = tuple(max(x, y) for x, y in zip(bbox_max, coord)) |
| 112 | + if not found: |
| 113 | + raise RuntimeError("no objects in scene to compute bounding box for") |
| 114 | + return Vector(bbox_min), Vector(bbox_max) |
| 115 | + |
| 116 | +def scene_root_objects(): |
| 117 | + for obj in bpy.context.scene.objects.values(): |
| 118 | + if not obj.parent: |
| 119 | + yield obj |
| 120 | + |
| 121 | +def scene_meshes(): |
| 122 | + for obj in bpy.context.scene.objects.values(): |
| 123 | + if isinstance(obj.data, (bpy.types.Mesh)): |
| 124 | + yield obj |
| 125 | + |
| 126 | +def normalize_scene(): |
| 127 | + bbox_min, bbox_max = scene_bbox() |
| 128 | + scale = 1 / max(bbox_max - bbox_min) |
| 129 | + for obj in scene_root_objects(): |
| 130 | + obj.scale = obj.scale * scale * 0.8 |
| 131 | + # Apply scale to matrix_world. |
| 132 | + bpy.context.view_layer.update() |
| 133 | + bbox_min, bbox_max = scene_bbox() |
| 134 | + offset = -(bbox_min + bbox_max) / 2 |
| 135 | + for obj in scene_root_objects(): |
| 136 | + obj.matrix_world.translation += offset |
| 137 | + bpy.ops.object.select_all(action="DESELECT") |
| 138 | + |
| 139 | +def setup_camera(): |
| 140 | + cam = scene.objects["Camera"] |
| 141 | + cam.location = (0, 1.2, 0) |
| 142 | + cam.data.lens = 35 |
| 143 | + cam.data.sensor_width = 32 |
| 144 | + cam_constraint = cam.constraints.new(type="TRACK_TO") |
| 145 | + cam_constraint.track_axis = "TRACK_NEGATIVE_Z" |
| 146 | + cam_constraint.up_axis = "UP_Y" |
| 147 | + return cam, cam_constraint |
| 148 | + |
| 149 | +def sample_camera_loc(phi=None, theta=None, r=3.5): |
| 150 | + #phi = np.random.uniform(np.pi / 3, np.pi / 3 * 2) |
| 151 | + #theta = np.random.uniform(0, np.pi * 2) |
| 152 | + x = r * np.sin(phi) * np.cos(theta) |
| 153 | + y = r * np.sin(phi) * np.sin(theta) |
| 154 | + z = r * np.cos(phi) |
| 155 | + return np.array([x, y, z]) |
| 156 | + |
| 157 | +def save_images(object_file: str) -> None: |
| 158 | + """Saves rendered images of the object in the scene.""" |
| 159 | + os.makedirs(args.output_dir, exist_ok=True) |
| 160 | + #reset_scene() |
| 161 | + # load the object |
| 162 | + load_object(object_file) |
| 163 | + object_uid = os.path.basename(object_file).split(".")[0] |
| 164 | + normalize_scene() |
| 165 | + add_lighting() |
| 166 | + cam, cam_constraint = setup_camera() |
| 167 | + # create an empty object to track |
| 168 | + empty = bpy.data.objects.new("Empty", None) |
| 169 | + scene.collection.objects.link(empty) |
| 170 | + cam_constraint.target = empty |
| 171 | + |
| 172 | + views = [[np.pi / 3, np.pi / 6], |
| 173 | + [np.pi / 3, np.pi / 6 * 4], |
| 174 | + [np.pi / 3, np.pi / 6 * 7], |
| 175 | + [np.pi / 3, np.pi / 6 * 10], |
| 176 | + [np.pi / 2, np.pi / 6 * 2], |
| 177 | + [np.pi / 2, np.pi / 6 * 5], |
| 178 | + [np.pi / 2, np.pi / 6 * 8], |
| 179 | + [np.pi / 2, np.pi / 6 * 11], |
| 180 | + [np.pi / 3 * 2, 0], |
| 181 | + [np.pi / 3 * 2, np.pi / 2], |
| 182 | + [np.pi / 3 * 2, np.pi], |
| 183 | + [np.pi / 3 * 2, np.pi / 2 * 3]] |
| 184 | + |
| 185 | + for i in range(args.num_images): |
| 186 | + # Sample random camera location around the object |
| 187 | + location = sample_camera_loc(views[i][0], views[i][1], args.camera_dist) |
| 188 | + |
| 189 | + # Compute rotation based on vector going from location towards the location of the ShapeNet object |
| 190 | + rotation_matrix = bproc.camera.rotation_from_forward_vec([0,0,0] - location) |
| 191 | + # Add homog cam pose based on location an rotation |
| 192 | + cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix) |
| 193 | + bproc.camera.add_camera_pose(cam2world_matrix) |
| 194 | + |
| 195 | + RendererUtility.set_cpu_threads(10) |
| 196 | + bproc.renderer.enable_normals_output() |
| 197 | + bproc.renderer.enable_depth_output(activate_antialiasing=False) |
| 198 | + bproc.renderer.set_output_format(enable_transparency=True) |
| 199 | + |
| 200 | + data = bproc.renderer.render(verbose=True) |
| 201 | + for index, image in enumerate(data["colors"]): |
| 202 | + render_path = os.path.join(args.output_dir, f"{index:03d}.png") |
| 203 | + save_array_as_image(image, "colors", render_path) |
| 204 | + for index, image in enumerate(data["normals"]): |
| 205 | + render_path = os.path.join(args.output_dir, f"{index:03d}_normal.png") |
| 206 | + save_array_as_image(image, "normals", render_path) |
| 207 | + #cam_states = [] |
| 208 | + #for frame in range(bproc.utility.num_frames()): |
| 209 | + # cam_states.append({ |
| 210 | + # "cam2world": bproc.camera.get_camera_pose(frame), |
| 211 | + # "cam_K": bproc.camera.get_intrinsics_as_K_matrix() |
| 212 | + # }) |
| 213 | + # Adds states to the data dict |
| 214 | + #print(cam_states) |
| 215 | + |
| 216 | + for index, image in enumerate(data["depth"]): |
| 217 | + render_path = os.path.join(args.output_dir, f"{index:03d}_depth.png") |
| 218 | + #save_array_as_image(image, "depth", render_path) |
| 219 | + #print(image.min(), image[image < 100].max()) |
| 220 | + #vis_data("depth", image, None, "", save_to_file=render_path, depth_max = image[image < 100].max()) |
| 221 | + plt.imsave(render_path, image, cmap='gray', vmax=image[image < 100].max()) |
| 222 | + |
| 223 | + |
| 224 | +def download_object(object_url: str) -> str: |
| 225 | + """Download the object and return the path.""" |
| 226 | + # uid = uuid.uuid4() |
| 227 | + uid = object_url.split("/")[-1].split(".")[0] |
| 228 | + tmp_local_path = os.path.join("tmp-objects", f"{uid}.glb" + ".tmp") |
| 229 | + local_path = os.path.join("tmp-objects", f"{uid}.glb") |
| 230 | + # wget the file and put it in local_path |
| 231 | + os.makedirs(os.path.dirname(tmp_local_path), exist_ok=True) |
| 232 | + urllib.request.urlretrieve(object_url, tmp_local_path) |
| 233 | + os.rename(tmp_local_path, local_path) |
| 234 | + # get the absolute path |
| 235 | + local_path = os.path.abspath(local_path) |
| 236 | + return local_path |
| 237 | + |
| 238 | + |
| 239 | +if __name__ == "__main__": |
| 240 | + try: |
| 241 | + start_i = time.time() |
| 242 | + if args.object_path.startswith("http"): |
| 243 | + local_path = download_object(args.object_path) |
| 244 | + else: |
| 245 | + local_path = args.object_path |
| 246 | + save_images(local_path) |
| 247 | + end_i = time.time() |
| 248 | + print("Finished", local_path, "in", end_i - start_i, "seconds") |
| 249 | + # delete the object if it was downloaded |
| 250 | + if args.object_path.startswith("http"): |
| 251 | + os.remove(local_path) |
| 252 | + except Exception as e: |
| 253 | + print("Failed to render", args.object_path) |
| 254 | + print(e) |
0 commit comments