Spaces:
Sleeping
Sleeping
| import numpy as np | |
| from ..preprocessing import simplify_texture | |
| DEFAULT_GRAY_VALUE = 128 | |
| TEXTURE_SAMPLING_CHUNK_SIZE = 10000 | |
| def get_face_colors(mesh, simplify_details=True, detail_sensitivity=None): | |
| extracted_colors = try_extract_from_material( | |
| mesh, simplify_details, detail_sensitivity | |
| ) | |
| if extracted_colors is None: | |
| extracted_colors = try_extract_from_face_colors(mesh) | |
| if extracted_colors is None: | |
| extracted_colors = create_default_gray_colors(len(mesh.faces)) | |
| return extracted_colors | |
| def try_extract_from_material(mesh, simplify_details=True, detail_sensitivity=None): | |
| if not hasattr(mesh.visual, "material"): | |
| return None | |
| material = mesh.visual.material | |
| texture_image = get_texture_image(material) | |
| if texture_image and has_valid_uv_coordinates(mesh): | |
| # Always create a copy to avoid issues with shared textures | |
| if hasattr(texture_image, "copy"): | |
| texture_image = texture_image.copy() | |
| if simplify_details: | |
| if detail_sensitivity is not None: | |
| d, sigma_color, sigma_space = detail_sensitivity | |
| texture_image = simplify_texture( | |
| texture_image, | |
| enabled=True, | |
| d=d, | |
| sigma_color=sigma_color, | |
| sigma_space=sigma_space, | |
| ) | |
| else: | |
| texture_image = simplify_texture(texture_image) | |
| return sample_colors_from_texture(mesh, texture_image) | |
| if has_main_color(material): | |
| return create_uniform_color_array(material.main_color, len(mesh.faces)) | |
| return None | |
| def get_texture_image(material): | |
| if hasattr(material, "baseColorTexture") and material.baseColorTexture is not None: | |
| return material.baseColorTexture | |
| if hasattr(material, "image") and material.image is not None: | |
| return material.image | |
| return None | |
| def has_valid_uv_coordinates(mesh): | |
| return hasattr(mesh.visual, "uv") and mesh.visual.uv is not None | |
| def has_main_color(material): | |
| return hasattr(material, "main_color") and material.main_color is not None | |
| def create_uniform_color_array(color, face_count): | |
| rgb_values = np.array(color[:3], dtype=np.uint8) | |
| return np.tile(rgb_values, (face_count, 1)) | |
| def try_extract_from_face_colors(mesh): | |
| if hasattr(mesh.visual, "face_colors") and mesh.visual.face_colors is not None: | |
| return mesh.visual.face_colors[:, :3].astype(np.uint8) | |
| return None | |
| def create_default_gray_colors(face_count): | |
| return np.full((face_count, 3), DEFAULT_GRAY_VALUE, dtype=np.uint8) | |
| def sample_colors_from_texture(mesh, texture_image): | |
| try: | |
| rgb_texture_array = convert_to_rgb_array(texture_image) | |
| texture_height, texture_width = rgb_texture_array.shape[:2] | |
| uv_coordinates = mesh.visual.uv | |
| mesh_faces = mesh.faces | |
| sampled_face_colors = np.zeros((len(mesh_faces), 3), dtype=np.uint8) | |
| for chunk_start in range(0, len(mesh_faces), TEXTURE_SAMPLING_CHUNK_SIZE): | |
| chunk_end = min(chunk_start + TEXTURE_SAMPLING_CHUNK_SIZE, len(mesh_faces)) | |
| current_chunk_faces = mesh_faces[chunk_start:chunk_end] | |
| face_vertex_uvs = uv_coordinates[current_chunk_faces].reshape(-1, 3, 2) | |
| pixel_x_coords = convert_u_to_pixel_x( | |
| face_vertex_uvs[:, :, 0], texture_width | |
| ) | |
| pixel_y_coords = convert_v_to_pixel_y( | |
| face_vertex_uvs[:, :, 1], texture_height | |
| ) | |
| sampled_vertex_colors = rgb_texture_array[ | |
| pixel_y_coords.ravel(), pixel_x_coords.ravel(), :3 | |
| ] | |
| per_face_colors = sampled_vertex_colors.reshape( | |
| len(current_chunk_faces), 3, 3 | |
| ) | |
| average_face_colors = np.mean(per_face_colors, axis=1).astype(np.uint8) | |
| sampled_face_colors[chunk_start:chunk_end] = average_face_colors | |
| return sampled_face_colors | |
| except (IndexError, ValueError): | |
| return create_default_gray_colors(len(mesh.faces)) | |
| def convert_to_rgb_array(image): | |
| if hasattr(image, "convert"): | |
| image = image.convert("RGB") | |
| return np.array(image, dtype=np.uint8) | |
| def convert_u_to_pixel_x(u_values, width): | |
| pixel_values = (u_values * width).astype(int) | |
| return np.clip(pixel_values, 0, width - 1) | |
| def convert_v_to_pixel_y(v_values, height): | |
| flipped_v_values = 1 - v_values | |
| pixel_values = (flipped_v_values * height).astype(int) | |
| return np.clip(pixel_values, 0, height - 1) | |