Spaces:
Paused
Paused
Add code/cube3d/render/blender_script.py
Browse files
code/cube3d/render/blender_script.py
ADDED
|
@@ -0,0 +1,723 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Blender script to render images of 3D models.
|
| 3 |
+
|
| 4 |
+
This script is adopted from the Trellis rendering script:
|
| 5 |
+
https://github.com/microsoft/TRELLIS/blob/main/dataset_toolkits/render.py
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
import math
|
| 11 |
+
import os
|
| 12 |
+
import platform
|
| 13 |
+
import random
|
| 14 |
+
import sys
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import Any, Callable, Dict, Generator, Literal, Optional, Tuple
|
| 17 |
+
|
| 18 |
+
import bpy
|
| 19 |
+
import numpy as np
|
| 20 |
+
from mathutils import Vector
|
| 21 |
+
|
| 22 |
+
pathdir = Path(__file__).parent
|
| 23 |
+
sys.path.append(pathdir.as_posix())
|
| 24 |
+
|
| 25 |
+
print(dir(bpy), bpy.__path__)
|
| 26 |
+
|
| 27 |
+
IMPORT_FUNCTIONS: Dict[str, Callable] = {
|
| 28 |
+
".obj": bpy.ops.wm.obj_import,
|
| 29 |
+
".glb": bpy.ops.import_scene.gltf,
|
| 30 |
+
".gltf": bpy.ops.import_scene.gltf,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def center_and_scale_mesh(scale_value: float = 1.0) -> None:
|
| 35 |
+
"""Centers and scales the scene to fit in a unit cube.
|
| 36 |
+
For example,
|
| 37 |
+
scale_value = 1.0 ==> [-0.5, 0.5]
|
| 38 |
+
scale_value = 2.0 ==> [-1.0, 1.0]
|
| 39 |
+
"""
|
| 40 |
+
# Get all mesh objects
|
| 41 |
+
mesh_objects = [obj for obj in bpy.context.scene.objects if obj.type == "MESH"]
|
| 42 |
+
if not mesh_objects:
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
# Calculate bounds
|
| 46 |
+
min_coords = Vector((float("inf"),) * 3)
|
| 47 |
+
max_coords = Vector((float("-inf"),) * 3)
|
| 48 |
+
|
| 49 |
+
for obj in mesh_objects:
|
| 50 |
+
# Get all vertices in world space
|
| 51 |
+
for vertex in obj.data.vertices:
|
| 52 |
+
world_coord = obj.matrix_world @ vertex.co
|
| 53 |
+
min_coords.x = min(min_coords.x, world_coord.x)
|
| 54 |
+
min_coords.y = min(min_coords.y, world_coord.y)
|
| 55 |
+
min_coords.z = min(min_coords.z, world_coord.z)
|
| 56 |
+
max_coords.x = max(max_coords.x, world_coord.x)
|
| 57 |
+
max_coords.y = max(max_coords.y, world_coord.y)
|
| 58 |
+
max_coords.z = max(max_coords.z, world_coord.z)
|
| 59 |
+
|
| 60 |
+
# Calculate center and dimensions
|
| 61 |
+
center = (min_coords + max_coords) / 2
|
| 62 |
+
dimensions = max_coords - min_coords
|
| 63 |
+
scale = scale_value / max(
|
| 64 |
+
dimensions.x, dimensions.y, dimensions.z
|
| 65 |
+
) # Scale to fit in [-scale_value/2, scale_value/2] cube
|
| 66 |
+
|
| 67 |
+
# Create an empty to serve as the parent
|
| 68 |
+
empty = bpy.data.objects.new("Parent_Empty", None)
|
| 69 |
+
bpy.context.scene.collection.objects.link(empty)
|
| 70 |
+
|
| 71 |
+
# Parent all mesh objects to the empty
|
| 72 |
+
for obj in mesh_objects:
|
| 73 |
+
obj.parent = empty
|
| 74 |
+
|
| 75 |
+
# Move empty to center everything
|
| 76 |
+
empty.location = -center
|
| 77 |
+
|
| 78 |
+
# Apply scale to empty
|
| 79 |
+
empty.scale = (scale, scale, scale)
|
| 80 |
+
|
| 81 |
+
bpy.context.view_layer.update()
|
| 82 |
+
bpy.ops.object.select_all(action="DESELECT")
|
| 83 |
+
empty.select_set(True)
|
| 84 |
+
bpy.context.view_layer.objects.active = empty
|
| 85 |
+
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
|
| 86 |
+
print(f"Empty location: {empty.location}")
|
| 87 |
+
print(f"Empty scale: {empty.scale}")
|
| 88 |
+
|
| 89 |
+
return scale
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def normalize_scene() -> None:
|
| 93 |
+
"""Normalizes the scene by scaling and translating it to fit in a unit cube centered
|
| 94 |
+
at the origin.
|
| 95 |
+
|
| 96 |
+
Mostly taken from the Point-E / Shap-E rendering script
|
| 97 |
+
(https://github.com/openai/point-e/blob/main/point_e/evals/scripts/blender_script.py#L97-L112),
|
| 98 |
+
but fix for multiple root objects: (see bug report here:
|
| 99 |
+
https://github.com/openai/shap-e/pull/60).
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
The new parent object that all objects descend from.
|
| 103 |
+
"""
|
| 104 |
+
if len(list(get_scene_root_objects())) > 1:
|
| 105 |
+
# create an empty object to be used as a parent for all root objects
|
| 106 |
+
parent_empty = bpy.data.objects.new("ParentEmpty", None)
|
| 107 |
+
bpy.context.scene.collection.objects.link(parent_empty)
|
| 108 |
+
|
| 109 |
+
# parent all root objects to the empty object
|
| 110 |
+
for obj in get_scene_root_objects():
|
| 111 |
+
if obj != parent_empty:
|
| 112 |
+
obj.parent = parent_empty
|
| 113 |
+
|
| 114 |
+
bbox_min, bbox_max = scene_bbox()
|
| 115 |
+
scale = 1 / max(bbox_max - bbox_min)
|
| 116 |
+
for obj in get_scene_root_objects():
|
| 117 |
+
obj.scale = obj.scale * scale
|
| 118 |
+
|
| 119 |
+
# Apply scale to matrix_world.
|
| 120 |
+
bpy.context.view_layer.update()
|
| 121 |
+
bbox_min, bbox_max = scene_bbox()
|
| 122 |
+
offset = -(bbox_min + bbox_max) / 2
|
| 123 |
+
for obj in get_scene_root_objects():
|
| 124 |
+
obj.matrix_world.translation += offset
|
| 125 |
+
bpy.ops.object.select_all(action="DESELECT")
|
| 126 |
+
bbox_min, bbox_max = scene_bbox()
|
| 127 |
+
print(f"After normalize_scene: bbox_min: {bbox_min}, bbox_max: {bbox_max}")
|
| 128 |
+
|
| 129 |
+
# unparent the camera
|
| 130 |
+
bpy.data.objects["Camera"].parent = None
|
| 131 |
+
|
| 132 |
+
return parent_empty
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def reset_cameras() -> None:
|
| 136 |
+
"""Resets the cameras in the scene to a single default camera."""
|
| 137 |
+
# Delete all existing cameras
|
| 138 |
+
bpy.ops.object.select_all(action="DESELECT")
|
| 139 |
+
bpy.ops.object.select_by_type(type="CAMERA")
|
| 140 |
+
bpy.ops.object.delete()
|
| 141 |
+
|
| 142 |
+
# Create a new camera with default properties
|
| 143 |
+
bpy.ops.object.camera_add()
|
| 144 |
+
|
| 145 |
+
# Rename the new camera to 'NewDefaultCamera'
|
| 146 |
+
new_camera = bpy.context.active_object
|
| 147 |
+
new_camera.name = "Camera"
|
| 148 |
+
|
| 149 |
+
# Set the new camera as the active camera for the scene
|
| 150 |
+
scene.camera = new_camera
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def get_camera_with_position(x, y, z, fov_degrees=40):
|
| 154 |
+
camera = bpy.data.objects["Camera"]
|
| 155 |
+
camera.data.angle = math.radians(fov_degrees)
|
| 156 |
+
camera.location = np.array([x, y, z])
|
| 157 |
+
direction = -camera.location
|
| 158 |
+
rot_quat = direction.to_track_quat("-Z", "Y")
|
| 159 |
+
camera.rotation_euler = rot_quat.to_euler()
|
| 160 |
+
return camera
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def reset_scene() -> None:
|
| 164 |
+
"""Resets the scene to a clean state.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
None
|
| 168 |
+
"""
|
| 169 |
+
# delete everything that isn't part of a camera or a light
|
| 170 |
+
for obj in bpy.data.objects:
|
| 171 |
+
if obj.type not in {"CAMERA", "LIGHT"}:
|
| 172 |
+
bpy.data.objects.remove(obj, do_unlink=True)
|
| 173 |
+
|
| 174 |
+
# delete all the materials
|
| 175 |
+
for material in bpy.data.materials:
|
| 176 |
+
bpy.data.materials.remove(material, do_unlink=True)
|
| 177 |
+
|
| 178 |
+
# delete all the textures
|
| 179 |
+
for texture in bpy.data.textures:
|
| 180 |
+
bpy.data.textures.remove(texture, do_unlink=True)
|
| 181 |
+
|
| 182 |
+
# delete all the images
|
| 183 |
+
for image in bpy.data.images:
|
| 184 |
+
bpy.data.images.remove(image, do_unlink=True)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def load_object(object_path: str) -> None:
|
| 188 |
+
"""Loads a model with a supported file extension into the scene.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
object_path (str): Path to the model file.
|
| 192 |
+
|
| 193 |
+
Raises:
|
| 194 |
+
ValueError: If the file extension is not supported.
|
| 195 |
+
|
| 196 |
+
Returns:
|
| 197 |
+
None
|
| 198 |
+
"""
|
| 199 |
+
file_extension = Path(object_path).suffix
|
| 200 |
+
if file_extension is None or file_extension == "":
|
| 201 |
+
raise ValueError(f"Unsupported file type: {object_path}")
|
| 202 |
+
|
| 203 |
+
# load from existing import functions
|
| 204 |
+
import_function = IMPORT_FUNCTIONS[file_extension]
|
| 205 |
+
|
| 206 |
+
if file_extension in {".glb", ".gltf"}:
|
| 207 |
+
import_function(filepath=object_path, merge_vertices=True)
|
| 208 |
+
else:
|
| 209 |
+
import_function(filepath=object_path)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def clear_lights():
|
| 213 |
+
bpy.ops.object.select_all(action="DESELECT")
|
| 214 |
+
for obj in bpy.context.scene.objects.values():
|
| 215 |
+
if isinstance(obj.data, bpy.types.Light):
|
| 216 |
+
obj.select_set(True)
|
| 217 |
+
bpy.ops.object.delete()
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def create_light(
|
| 221 |
+
location,
|
| 222 |
+
energy=1.0,
|
| 223 |
+
angle=0.5 * math.pi / 180,
|
| 224 |
+
light_type: Literal["POINT", "SUN", "SPOT", "AREA"] = "SUN",
|
| 225 |
+
):
|
| 226 |
+
# https://blender.stackexchange.com/questions/215624/how-to-create-a-light-with-the-python-api-in-blender-2-92
|
| 227 |
+
light_data = bpy.data.lights.new(name="Light", type=light_type)
|
| 228 |
+
light_data.energy = energy
|
| 229 |
+
if light_type != "AREA" and light_type != "POINT":
|
| 230 |
+
light_data.angle = angle
|
| 231 |
+
light_object = bpy.data.objects.new(name="Light", object_data=light_data)
|
| 232 |
+
|
| 233 |
+
direction = -location
|
| 234 |
+
rot_quat = direction.to_track_quat("-Z", "Y")
|
| 235 |
+
light_object.rotation_euler = rot_quat.to_euler()
|
| 236 |
+
bpy.context.view_layer.update()
|
| 237 |
+
|
| 238 |
+
bpy.context.collection.objects.link(light_object)
|
| 239 |
+
light_object.location = location
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def create_uniform_lights(
|
| 243 |
+
distance=2.0,
|
| 244 |
+
energy=3.0,
|
| 245 |
+
light_type: Literal["POINT", "SUN", "SPOT", "AREA"] = "SUN",
|
| 246 |
+
):
|
| 247 |
+
clear_lights()
|
| 248 |
+
create_light(Vector([1, 0, 0]) * distance, energy=energy, light_type=light_type)
|
| 249 |
+
create_light(-Vector([1, 0, 0]) * distance, energy=energy, light_type=light_type)
|
| 250 |
+
create_light(Vector([0, 1, 0]) * distance, energy=energy, light_type=light_type)
|
| 251 |
+
create_light(-Vector([0, 1, 0]) * distance, energy=energy, light_type=light_type)
|
| 252 |
+
create_light(Vector([0, 0, 1]) * distance, energy=energy, light_type=light_type)
|
| 253 |
+
create_light(-Vector([0, 0, 1]) * distance, energy=energy, light_type=light_type)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def create_light_at_camera_position(
|
| 257 |
+
camera_position: Vector,
|
| 258 |
+
energy=1.5,
|
| 259 |
+
use_shadow=False,
|
| 260 |
+
light_type: Literal["POINT", "SUN", "SPOT", "AREA"] = "SUN",
|
| 261 |
+
):
|
| 262 |
+
clear_lights()
|
| 263 |
+
create_light(camera_position, energy=energy, light_type=light_type)
|
| 264 |
+
# disable shadows
|
| 265 |
+
if not use_shadow:
|
| 266 |
+
for light in bpy.data.lights:
|
| 267 |
+
light.use_shadow = False
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def set_world_background_color(
|
| 271 |
+
color: Tuple[float, float, float, float] = (1.0, 1.0, 1.0, 1.0),
|
| 272 |
+
) -> None:
|
| 273 |
+
bpy.context.scene.world.use_nodes = True
|
| 274 |
+
bpy.context.scene.world.node_tree.nodes["Background"].inputs[
|
| 275 |
+
0
|
| 276 |
+
].default_value = color
|
| 277 |
+
bpy.context.scene.view_settings.view_transform = "Standard"
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def scene_bbox(
|
| 281 |
+
single_obj: Optional[bpy.types.Object] = None, ignore_matrix: bool = False
|
| 282 |
+
) -> Tuple[Vector, Vector]:
|
| 283 |
+
"""Returns the bounding box of the scene.
|
| 284 |
+
|
| 285 |
+
Taken from Shap-E rendering script
|
| 286 |
+
(https://github.com/openai/shap-e/blob/main/shap_e/rendering/blender/blender_script.py#L68-L82)
|
| 287 |
+
|
| 288 |
+
Args:
|
| 289 |
+
single_obj (Optional[bpy.types.Object], optional): If not None, only computes
|
| 290 |
+
the bounding box for the given object. Defaults to None.
|
| 291 |
+
ignore_matrix (bool, optional): Whether to ignore the object's matrix. Defaults
|
| 292 |
+
to False.
|
| 293 |
+
|
| 294 |
+
Raises:
|
| 295 |
+
RuntimeError: If there are no objects in the scene.
|
| 296 |
+
|
| 297 |
+
Returns:
|
| 298 |
+
Tuple[Vector, Vector]: The minimum and maximum coordinates of the bounding box.
|
| 299 |
+
"""
|
| 300 |
+
bbox_min = (math.inf,) * 3
|
| 301 |
+
bbox_max = (-math.inf,) * 3
|
| 302 |
+
found = False
|
| 303 |
+
for obj in get_scene_meshes() if single_obj is None else [single_obj]:
|
| 304 |
+
found = True
|
| 305 |
+
for coord in obj.bound_box:
|
| 306 |
+
coord = Vector(coord)
|
| 307 |
+
if not ignore_matrix:
|
| 308 |
+
coord = obj.matrix_world @ coord
|
| 309 |
+
bbox_min = tuple(min(x, y) for x, y in zip(bbox_min, coord))
|
| 310 |
+
bbox_max = tuple(max(x, y) for x, y in zip(bbox_max, coord))
|
| 311 |
+
|
| 312 |
+
if not found:
|
| 313 |
+
raise RuntimeError("no objects in scene to compute bounding box for")
|
| 314 |
+
|
| 315 |
+
return Vector(bbox_min), Vector(bbox_max)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def get_scene_root_objects() -> Generator[bpy.types.Object, None, None]:
|
| 319 |
+
"""Returns all root objects in the scene.
|
| 320 |
+
|
| 321 |
+
Yields:
|
| 322 |
+
Generator[bpy.types.Object, None, None]: Generator of all root objects in the
|
| 323 |
+
scene.
|
| 324 |
+
"""
|
| 325 |
+
for obj in bpy.context.scene.objects.values():
|
| 326 |
+
if not obj.parent and not isinstance(obj.data, bpy.types.Light):
|
| 327 |
+
yield obj
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def get_scene_meshes() -> Generator[bpy.types.Object, None, None]:
|
| 331 |
+
"""Returns all meshes in the scene.
|
| 332 |
+
|
| 333 |
+
Yields:
|
| 334 |
+
Generator[bpy.types.Object, None, None]: Generator of all meshes in the scene.
|
| 335 |
+
"""
|
| 336 |
+
for obj in bpy.context.scene.objects.values():
|
| 337 |
+
if isinstance(obj.data, (bpy.types.Mesh)):
|
| 338 |
+
yield obj
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def delete_missing_textures() -> Dict[str, Any]:
|
| 342 |
+
"""Deletes all missing textures in the scene.
|
| 343 |
+
|
| 344 |
+
Returns:
|
| 345 |
+
Dict[str, Any]: Dictionary with keys "count", "files", and "file_path_to_color".
|
| 346 |
+
"count" is the number of missing textures, "files" is a list of the missing
|
| 347 |
+
texture file paths, and "file_path_to_color" is a dictionary mapping the
|
| 348 |
+
missing texture file paths to a random color.
|
| 349 |
+
"""
|
| 350 |
+
missing_file_count = 0
|
| 351 |
+
out_files = []
|
| 352 |
+
file_path_to_color = {}
|
| 353 |
+
|
| 354 |
+
# Check all materials in the scene
|
| 355 |
+
for material in bpy.data.materials:
|
| 356 |
+
if material.use_nodes:
|
| 357 |
+
for node in material.node_tree.nodes:
|
| 358 |
+
if node.type == "TEX_IMAGE":
|
| 359 |
+
image = node.image
|
| 360 |
+
if image is not None:
|
| 361 |
+
file_path = bpy.path.abspath(image.filepath)
|
| 362 |
+
if file_path == "":
|
| 363 |
+
# means it's embedded
|
| 364 |
+
continue
|
| 365 |
+
|
| 366 |
+
if not os.path.exists(file_path):
|
| 367 |
+
# Find the connected Principled BSDF node
|
| 368 |
+
connected_node = node.outputs[0].links[0].to_node
|
| 369 |
+
|
| 370 |
+
if connected_node.type == "BSDF_PRINCIPLED":
|
| 371 |
+
if file_path not in file_path_to_color:
|
| 372 |
+
# Set a random color for the unique missing file path
|
| 373 |
+
random_color = [random.random() for _ in range(3)]
|
| 374 |
+
file_path_to_color[file_path] = random_color + [1]
|
| 375 |
+
|
| 376 |
+
connected_node.inputs[
|
| 377 |
+
"Base Color"
|
| 378 |
+
].default_value = file_path_to_color[file_path]
|
| 379 |
+
|
| 380 |
+
# Delete the TEX_IMAGE node
|
| 381 |
+
material.node_tree.nodes.remove(node)
|
| 382 |
+
missing_file_count += 1
|
| 383 |
+
out_files.append(image.filepath)
|
| 384 |
+
return {
|
| 385 |
+
"count": missing_file_count,
|
| 386 |
+
"files": out_files,
|
| 387 |
+
"file_path_to_color": file_path_to_color,
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def setup_environment_lighting(envmap_path):
|
| 392 |
+
world = bpy.context.scene.world
|
| 393 |
+
world.use_nodes = True
|
| 394 |
+
nodes = world.node_tree.nodes
|
| 395 |
+
links = world.node_tree.links
|
| 396 |
+
|
| 397 |
+
# Clear existing nodes
|
| 398 |
+
for node in nodes:
|
| 399 |
+
nodes.remove(node)
|
| 400 |
+
|
| 401 |
+
# Create Background node
|
| 402 |
+
bg_node = nodes.new(type="ShaderNodeBackground")
|
| 403 |
+
bg_node.location = (0, 0)
|
| 404 |
+
|
| 405 |
+
# Create Environment Texture node
|
| 406 |
+
env_tex_node = nodes.new(type="ShaderNodeTexEnvironment")
|
| 407 |
+
env_tex_node.location = (-300, 0)
|
| 408 |
+
|
| 409 |
+
# Set the environment texture path (replace this with your file path)
|
| 410 |
+
env_tex_node.image = bpy.data.images.load(envmap_path)
|
| 411 |
+
|
| 412 |
+
# Create World Output node
|
| 413 |
+
world_output_node = nodes.new(type="ShaderNodeOutputWorld")
|
| 414 |
+
world_output_node.location = (300, 0)
|
| 415 |
+
|
| 416 |
+
# Link nodes
|
| 417 |
+
links.new(env_tex_node.outputs["Color"], bg_node.inputs["Color"])
|
| 418 |
+
links.new(bg_node.outputs["Background"], world_output_node.inputs["Surface"])
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def create_solid_color_material(name, color):
|
| 422 |
+
mat = bpy.data.materials.new(name)
|
| 423 |
+
mat.use_nodes = True
|
| 424 |
+
node_tree = mat.node_tree
|
| 425 |
+
color_node = node_tree.nodes.new("ShaderNodeBsdfDiffuse")
|
| 426 |
+
color_node.inputs["Color"].default_value = color
|
| 427 |
+
mat_output = node_tree.nodes["Material Output"]
|
| 428 |
+
node_tree.links.new(color_node.outputs["BSDF"], mat_output.inputs["Surface"])
|
| 429 |
+
return mat
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def create_phong_material(name, color):
|
| 433 |
+
mat = bpy.data.materials.new(name)
|
| 434 |
+
mat.use_nodes = True
|
| 435 |
+
node_tree = mat.node_tree
|
| 436 |
+
spec_node = node_tree.nodes.new("ShaderNodeBsdfPrincipled")
|
| 437 |
+
print(spec_node.inputs.keys())
|
| 438 |
+
spec_node.inputs["Base Color"].default_value = color
|
| 439 |
+
spec_node.inputs["Roughness"].default_value = 0.5
|
| 440 |
+
spec_node.inputs["Metallic"].default_value = 1.0
|
| 441 |
+
mat_output = node_tree.nodes["Material Output"]
|
| 442 |
+
node_tree.links.new(spec_node.outputs["BSDF"], mat_output.inputs["Surface"])
|
| 443 |
+
return mat
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def render_object(
|
| 447 |
+
object_file: str,
|
| 448 |
+
num_renders: int,
|
| 449 |
+
output_dir: str,
|
| 450 |
+
transparent_background: bool = False,
|
| 451 |
+
environment_map: str = None,
|
| 452 |
+
) -> None:
|
| 453 |
+
"""Saves rendered images for given asset to specified output directory.
|
| 454 |
+
|
| 455 |
+
Args:
|
| 456 |
+
object_file (str): Path to the object file.
|
| 457 |
+
num_renders (int): Number of renders to save of the object.
|
| 458 |
+
output_dir (str): Path to the directory where the rendered images and metadata
|
| 459 |
+
will be saved. The rendered images will be saved in the subdirectory
|
| 460 |
+
`output_dir/stemname`.
|
| 461 |
+
transparent_background (bool): Whether to use transparent background,
|
| 462 |
+
otherwise the background is white.
|
| 463 |
+
Returns:
|
| 464 |
+
None
|
| 465 |
+
"""
|
| 466 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 467 |
+
|
| 468 |
+
# load the object
|
| 469 |
+
reset_scene()
|
| 470 |
+
load_object(object_file)
|
| 471 |
+
|
| 472 |
+
if transparent_background:
|
| 473 |
+
scene.render.film_transparent = True
|
| 474 |
+
else:
|
| 475 |
+
scene.render.film_transparent = False
|
| 476 |
+
|
| 477 |
+
set_world_background_color([0.2, 0.2, 0.2, 1.0])
|
| 478 |
+
|
| 479 |
+
# normalize the scene
|
| 480 |
+
_ = normalize_scene()
|
| 481 |
+
|
| 482 |
+
# Set up cameras
|
| 483 |
+
cam = scene.objects["Camera"]
|
| 484 |
+
fov_degrees = 40.0
|
| 485 |
+
cam.data.angle = np.radians(fov_degrees)
|
| 486 |
+
|
| 487 |
+
# Set up camera constraints
|
| 488 |
+
cam_constraint = cam.constraints.new(type="TRACK_TO")
|
| 489 |
+
cam_constraint.track_axis = "TRACK_NEGATIVE_Z"
|
| 490 |
+
cam_constraint.up_axis = "UP_Y"
|
| 491 |
+
empty = bpy.data.objects.new("Empty", None)
|
| 492 |
+
empty.location = (0, 0, 0)
|
| 493 |
+
scene.collection.objects.link(empty)
|
| 494 |
+
cam_constraint.target = empty
|
| 495 |
+
cam.parent = empty
|
| 496 |
+
|
| 497 |
+
# delete all objects that are not meshes
|
| 498 |
+
delete_missing_textures()
|
| 499 |
+
|
| 500 |
+
if environment_map:
|
| 501 |
+
setup_environment_lighting(environment_map)
|
| 502 |
+
else:
|
| 503 |
+
create_uniform_lights(energy=1.0, light_type="SUN")
|
| 504 |
+
|
| 505 |
+
camera_position = [0, -2, 0]
|
| 506 |
+
|
| 507 |
+
# determine how much to orbit camera by.
|
| 508 |
+
stepsize = 360.0 / num_renders
|
| 509 |
+
|
| 510 |
+
def render_views(name):
|
| 511 |
+
for i in range(num_renders):
|
| 512 |
+
# set camera
|
| 513 |
+
_ = get_camera_with_position(
|
| 514 |
+
camera_position[0],
|
| 515 |
+
camera_position[1],
|
| 516 |
+
camera_position[2],
|
| 517 |
+
fov_degrees=fov_degrees,
|
| 518 |
+
)
|
| 519 |
+
|
| 520 |
+
# Set output paths with absolute paths
|
| 521 |
+
render_path = os.path.abspath(
|
| 522 |
+
os.path.join(output_dir, f"{i:03d}_{name}.png")
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
# Set file output paths
|
| 526 |
+
scene.render.filepath = render_path
|
| 527 |
+
|
| 528 |
+
# Make sure the output directory exists
|
| 529 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 530 |
+
|
| 531 |
+
# Render
|
| 532 |
+
bpy.ops.render.render(write_still=True)
|
| 533 |
+
|
| 534 |
+
context.view_layer.objects.active = empty
|
| 535 |
+
empty.rotation_euler[2] += math.radians(stepsize)
|
| 536 |
+
|
| 537 |
+
# ensure that all objects have materials, if not then add a default
|
| 538 |
+
# one.
|
| 539 |
+
textured_mat = create_solid_color_material("default texture", [0.6, 0.6, 0.6, 1])
|
| 540 |
+
|
| 541 |
+
for obj in get_scene_meshes():
|
| 542 |
+
if obj.active_material is None:
|
| 543 |
+
obj.active_material = textured_mat
|
| 544 |
+
|
| 545 |
+
render_views("textured")
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def enable_gpus(device_type, use_cpus=False):
|
| 549 |
+
preferences = bpy.context.preferences
|
| 550 |
+
cycles_preferences = preferences.addons["cycles"].preferences
|
| 551 |
+
cycles_preferences.refresh_devices()
|
| 552 |
+
try:
|
| 553 |
+
devices = cycles_preferences.devices
|
| 554 |
+
except:
|
| 555 |
+
print("No devices detected")
|
| 556 |
+
if device_type == "CPU":
|
| 557 |
+
return []
|
| 558 |
+
else:
|
| 559 |
+
raise RuntimeError(f"No devices detected, set use_cpus to True")
|
| 560 |
+
|
| 561 |
+
assert device_type in [
|
| 562 |
+
"CUDA",
|
| 563 |
+
"METAL",
|
| 564 |
+
"OPENCL",
|
| 565 |
+
"CPU",
|
| 566 |
+
"NONE",
|
| 567 |
+
], f"Unsupported device type: {device_type}"
|
| 568 |
+
|
| 569 |
+
try:
|
| 570 |
+
# print(devices)
|
| 571 |
+
iter(devices)
|
| 572 |
+
except TypeError:
|
| 573 |
+
# print("Single GPU Detected")
|
| 574 |
+
devices = [devices]
|
| 575 |
+
|
| 576 |
+
activated_gpus = []
|
| 577 |
+
for device in devices:
|
| 578 |
+
if device.type == "CPU":
|
| 579 |
+
device.use = use_cpus
|
| 580 |
+
else:
|
| 581 |
+
device.use = True
|
| 582 |
+
activated_gpus.append(device.name)
|
| 583 |
+
|
| 584 |
+
if device_type == "CUDA":
|
| 585 |
+
cycles_preferences.compute_device_type = "CUDA"
|
| 586 |
+
bpy.context.scene.cycles.device = "GPU"
|
| 587 |
+
elif device_type == "METAL":
|
| 588 |
+
cycles_preferences.compute_device_type = "METAL"
|
| 589 |
+
bpy.context.scene.cycles.device = "GPU"
|
| 590 |
+
elif device_type == "OPENCL":
|
| 591 |
+
cycles_preferences.compute_device_type = "OPENCL"
|
| 592 |
+
bpy.context.scene.cycles.device = "GPU"
|
| 593 |
+
else:
|
| 594 |
+
raise RuntimeError(f"Unsupported device type: {device_type}")
|
| 595 |
+
|
| 596 |
+
return activated_gpus
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def set_render_settings(engine, resolution):
|
| 600 |
+
# Set render settings
|
| 601 |
+
render.engine = engine #
|
| 602 |
+
render.image_settings.file_format = "PNG"
|
| 603 |
+
render.image_settings.color_mode = "RGBA"
|
| 604 |
+
render.resolution_x = resolution
|
| 605 |
+
render.resolution_y = resolution
|
| 606 |
+
render.resolution_percentage = 100
|
| 607 |
+
|
| 608 |
+
# Set cycles settings
|
| 609 |
+
scene.cycles.device = "GPU"
|
| 610 |
+
scene.cycles.use_adaptive_sampling = True
|
| 611 |
+
scene.cycles.adaptive_threshold = 0.1
|
| 612 |
+
scene.cycles.samples = 64
|
| 613 |
+
scene.cycles.adaptive_min_samples = 1
|
| 614 |
+
scene.cycles.filter_width = 2
|
| 615 |
+
scene.cycles.use_fast_gi = True
|
| 616 |
+
scene.cycles.fast_gi_method = "REPLACE"
|
| 617 |
+
world.light_settings.ao_factor = 1.0
|
| 618 |
+
world.light_settings.distance = 10
|
| 619 |
+
scene.cycles.use_denoising = True # ML denoising
|
| 620 |
+
scene.cycles.denoising_use_gpu = True
|
| 621 |
+
|
| 622 |
+
# bake existing frames for faster future renders
|
| 623 |
+
scene.render.use_persistent_data = True
|
| 624 |
+
|
| 625 |
+
# Set eevee settings
|
| 626 |
+
scene.eevee.use_shadows = True
|
| 627 |
+
scene.eevee.use_raytracing = True
|
| 628 |
+
scene.eevee.ray_tracing_options.use_denoise = True
|
| 629 |
+
scene.eevee.use_fast_gi = True
|
| 630 |
+
scene.eevee.fast_gi_method = "GLOBAL_ILLUMINATION"
|
| 631 |
+
scene.eevee.ray_tracing_options.trace_max_roughness = 0.5
|
| 632 |
+
scene.eevee.fast_gi_resolution = "2"
|
| 633 |
+
scene.eevee.fast_gi_ray_count = 2
|
| 634 |
+
scene.eevee.fast_gi_step_count = 8
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
def print_devices():
|
| 638 |
+
print("Devices:")
|
| 639 |
+
preferences = bpy.context.preferences
|
| 640 |
+
cycles_preferences = preferences.addons["cycles"].preferences
|
| 641 |
+
cycles_preferences.refresh_devices()
|
| 642 |
+
|
| 643 |
+
devices = cycles_preferences.devices
|
| 644 |
+
for device in devices:
|
| 645 |
+
print(f' [{device.id}]<{device.type}> "{device.name}" Using: {device.use}')
|
| 646 |
+
|
| 647 |
+
print(f"Compute device type: {cycles_preferences.compute_device_type}")
|
| 648 |
+
print(f"Cycles device: {bpy.context.scene.cycles.device}")
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
if __name__ == "__main__":
|
| 652 |
+
parser = argparse.ArgumentParser()
|
| 653 |
+
parser.add_argument(
|
| 654 |
+
"--object_path",
|
| 655 |
+
type=str,
|
| 656 |
+
required=False,
|
| 657 |
+
help="Path to the object file",
|
| 658 |
+
)
|
| 659 |
+
parser.add_argument(
|
| 660 |
+
"--output_dir",
|
| 661 |
+
type=str,
|
| 662 |
+
required=True,
|
| 663 |
+
help="Path to the directory where the rendered images and metadata will be saved.",
|
| 664 |
+
)
|
| 665 |
+
parser.add_argument(
|
| 666 |
+
"--engine",
|
| 667 |
+
type=str,
|
| 668 |
+
default="BLENDER_EEVEE_NEXT", # BLENDER_BLENDER_EEVEE_NEXT rasterization, better than nvdifrast, CYCLES
|
| 669 |
+
choices=["CYCLES", "BLENDER_EEVEE_NEXT"],
|
| 670 |
+
)
|
| 671 |
+
parser.add_argument(
|
| 672 |
+
"--num_renders",
|
| 673 |
+
type=int,
|
| 674 |
+
default=12,
|
| 675 |
+
help="Number of renders to save of the object.",
|
| 676 |
+
)
|
| 677 |
+
parser.add_argument(
|
| 678 |
+
"--render_resolution",
|
| 679 |
+
type=int,
|
| 680 |
+
default=512,
|
| 681 |
+
help="Resolution of the rendered images.",
|
| 682 |
+
)
|
| 683 |
+
parser.add_argument(
|
| 684 |
+
"--transparent_background",
|
| 685 |
+
action="store_true",
|
| 686 |
+
help="Whether to use transparent background",
|
| 687 |
+
)
|
| 688 |
+
parser.add_argument(
|
| 689 |
+
"--environment_map",
|
| 690 |
+
default=None,
|
| 691 |
+
type=str,
|
| 692 |
+
help="Use the given environment map for lighting",
|
| 693 |
+
)
|
| 694 |
+
|
| 695 |
+
argv = sys.argv[sys.argv.index("--") + 1 :]
|
| 696 |
+
args = parser.parse_args(argv)
|
| 697 |
+
|
| 698 |
+
context = bpy.context
|
| 699 |
+
scene = context.scene
|
| 700 |
+
render = scene.render
|
| 701 |
+
world = bpy.data.worlds["World"]
|
| 702 |
+
|
| 703 |
+
set_render_settings(args.engine, args.render_resolution)
|
| 704 |
+
|
| 705 |
+
# detect platform and activate GPUs
|
| 706 |
+
platform = platform.system()
|
| 707 |
+
if platform == "Darwin":
|
| 708 |
+
activated_gpus = enable_gpus("METAL", use_cpus=True)
|
| 709 |
+
elif platform == "Linux":
|
| 710 |
+
activated_gpus = enable_gpus("CUDA", use_cpus=False)
|
| 711 |
+
else:
|
| 712 |
+
raise RuntimeError("Unsupported platform")
|
| 713 |
+
print(f"Activated GPUs: {activated_gpus}")
|
| 714 |
+
|
| 715 |
+
print_devices()
|
| 716 |
+
|
| 717 |
+
render_object(
|
| 718 |
+
object_file=args.object_path,
|
| 719 |
+
num_renders=args.num_renders,
|
| 720 |
+
output_dir=args.output_dir,
|
| 721 |
+
transparent_background=args.transparent_background,
|
| 722 |
+
environment_map=args.environment_map,
|
| 723 |
+
)
|