Spaces:
Runtime error
Runtime error
| import tempfile | |
| import os | |
| # --- DA3 SETTINGS --- | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| CHECKPOINT = "depth-anything/da3-small" | |
| processor = AutoImageProcessor.from_pretrained(CHECKPOINT) | |
| model = AutoModelForDepthEstimation.from_pretrained(CHECKPOINT).to(DEVICE) | |
| def process_textured_mesh(input_image): | |
| if input_image is None: | |
| return None, None | |
| # 1. GENERATE DEPTH | |
| inputs = processor(images=input_image, return_tensors="pt").to(DEVICE) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| depth = torch.nn.functional.interpolate( | |
| outputs.predicted_depth.unsqueeze(1), | |
| size=input_image.size[::-1], | |
| mode="bicubic", | |
| ).squeeze().cpu().numpy() | |
| # 2. CREATE TEXTURED GRID | |
| # We use a step of 2 to keep the mesh lightweight for the browser | |
| width, height = input_image.size | |
| step = 2 | |
| x, y = np.meshgrid(np.arange(0, width, step), np.arange(0, height, step)) | |
| # Normalize Z (depth) and center X, Y in a unit-10 space | |
| z = (depth[::step, ::step] / (depth.max() + 1e-5)) * 3.0 | |
| x_centered = ((x / width) - 0.5) * 10.0 * (width / height) | |
| y_centered = (0.5 - (y / height)) * 10.0 | |
| points = np.stack((x_centered, y_centered, z), axis=-1) | |
| rows, cols, _ = points.shape | |
| # 3. VERTICES & UV MAPPING | |
| vertices = points.reshape(-1, 3) | |
| # UVs map the image (0-1 range) to the vertices | |
| uvs = np.stack((x / width, 1.0 - (y / height)), axis=-1).reshape(-1, 2) | |
| # Build Triangles | |
| faces = [] | |
| for i in range(rows - 1): | |
| for j in range(cols - 1): | |
| v0 = i * cols + j | |
| v1 = v0 + 1 | |
| v2 = (i + 1) * cols + j | |
| v3 = v2 + 1 | |
| faces.append([v0, v2, v1]) | |
| faces.append([v1, v2, v3]) | |
| # 4. CONSTRUCT MESH | |
| mesh = o3d.geometry.TriangleMesh() | |
| mesh.vertices = o3d.utility.Vector3dVector(vertices) | |
| mesh.triangles = o3d.utility.Vector3iVector(np.array(faces)) | |
| # Assign UVs (Open3D expects UVs per triangle vertex, so we tile them) | |
| mesh.triangle_uvs = o3d.utility.Vector2dVector(np.tile(uvs, (3, 1))) | |
| # 5. EXPORT | |
| temp_dir = tempfile.gettempdir() | |
| mesh_path = os.path.join(temp_dir, "model.obj") | |
| texture_path = os.path.join(temp_dir, "texture.png") | |
| # Save image as texture | |
| input_image.save(texture_path) | |
| # Save OBJ | |
| o3d.io.write_triangle_mesh(mesh_path, mesh) | |
| # To see textures in some viewers, we return the OBJ. | |
| # In Blender, you'll simply load this texture.png onto the model. | |
| return mesh_path, mesh_path | |
| # --- UI --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# 🎭 DA3 Textured 3D Mesh") | |
| with gr.Row(): | |
| with gr.Column(): | |
| img_in = gr.Image(type="pil", label="Input") | |
| btn = gr.Button("🔨 Generate Mesh", variant="primary") | |
| with gr.Column(): | |
| # Gradio 5.0+ focuses on the center (0,0,0) automatically | |
| v3d = gr.Model3D(label="3D Preview", camera_position=(0, 90, 15)) | |
| dl = gr.DownloadButton("💾 Download OBJ + PNG") | |
| btn.click(fn=process_textured_mesh, inputs=[img_in], outputs=[v3d, dl]) | |
| demo.launch() |