Update app.py
Browse files
app.py
CHANGED
|
@@ -1,290 +1,137 @@
|
|
| 1 |
import os
|
| 2 |
-
import
|
| 3 |
-
import subprocess
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
|
| 6 |
-
# Download models before importing anything else
|
| 7 |
-
def download_models_if_needed():
|
| 8 |
-
"""Download models if they don't exist"""
|
| 9 |
-
checkpoint_path = Path("checkpoints/spatial_order.pt")
|
| 10 |
-
|
| 11 |
-
if not checkpoint_path.exists():
|
| 12 |
-
print("=" * 70)
|
| 13 |
-
print("π First run: Downloading model checkpoints...")
|
| 14 |
-
print("=" * 70)
|
| 15 |
-
|
| 16 |
-
try:
|
| 17 |
-
# Run the download script
|
| 18 |
-
result = subprocess.run(
|
| 19 |
-
[sys.executable, "download_models.py"],
|
| 20 |
-
check=True,
|
| 21 |
-
capture_output=True,
|
| 22 |
-
text=True
|
| 23 |
-
)
|
| 24 |
-
print(result.stdout)
|
| 25 |
-
|
| 26 |
-
if result.returncode != 0:
|
| 27 |
-
print(f"Error downloading models: {result.stderr}")
|
| 28 |
-
raise RuntimeError("Failed to download model checkpoints")
|
| 29 |
-
|
| 30 |
-
except Exception as e:
|
| 31 |
-
print(f"β Error during model download: {e}")
|
| 32 |
-
print("\nPlease ensure:")
|
| 33 |
-
print("1. You have internet connectivity")
|
| 34 |
-
print("2. Hugging Face Hub is accessible")
|
| 35 |
-
print("3. The download_models.py script is present")
|
| 36 |
-
raise
|
| 37 |
-
else:
|
| 38 |
-
print("β
Model checkpoints already exist, skipping download")
|
| 39 |
-
|
| 40 |
-
# Download models before importing heavy dependencies
|
| 41 |
-
download_models_if_needed()
|
| 42 |
-
|
| 43 |
-
# Now import the rest
|
| 44 |
import torch
|
| 45 |
import trimesh
|
| 46 |
-
import numpy as np
|
| 47 |
-
import gradio as gr
|
| 48 |
import tempfile
|
| 49 |
-
|
| 50 |
-
from
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
return
|
| 73 |
-
|
| 74 |
-
print(f"Initializing MagicArticulate model (hierarchical={use_hierarchical})...")
|
| 75 |
-
|
| 76 |
-
# Create a simple args object with default parameters
|
| 77 |
-
class Args:
|
| 78 |
-
def __init__(self, hier_order=False):
|
| 79 |
-
self.input_pc_num = 8192
|
| 80 |
-
self.num_beams = 1
|
| 81 |
-
self.llm = "facebook/opt-350m"
|
| 82 |
-
self.pad_id = -1
|
| 83 |
-
self.n_discrete_size = 128
|
| 84 |
-
self.n_max_bones = 100
|
| 85 |
-
self.seed = 0
|
| 86 |
-
self.precision = "fp16"
|
| 87 |
-
self.hier_order = hier_order
|
| 88 |
-
# Select checkpoint based on ordering type
|
| 89 |
-
if hier_order:
|
| 90 |
-
self.pretrained_weights = "checkpoints/hier_order.pt"
|
| 91 |
-
else:
|
| 92 |
-
self.pretrained_weights = "checkpoints/spatial_order.pt"
|
| 93 |
-
|
| 94 |
-
args_config = Args(hier_order=use_hierarchical)
|
| 95 |
-
|
| 96 |
-
# Determine device
|
| 97 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 98 |
-
print(f"Using device: {device}")
|
| 99 |
-
|
| 100 |
-
# Load model
|
| 101 |
-
model = SkeletonGPT(args_config).to(device)
|
| 102 |
-
|
| 103 |
-
# Load pretrained weights
|
| 104 |
-
if os.path.exists(args_config.pretrained_weights):
|
| 105 |
-
print(f"Loading checkpoint: {args_config.pretrained_weights}")
|
| 106 |
-
pkg = torch.load(args_config.pretrained_weights, map_location=device)
|
| 107 |
-
model.load_state_dict(pkg["model"])
|
| 108 |
-
model.eval()
|
| 109 |
-
print("β
Model loaded successfully!")
|
| 110 |
-
else:
|
| 111 |
-
error_msg = f"Checkpoint not found: {args_config.pretrained_weights}"
|
| 112 |
-
print(f"β {error_msg}")
|
| 113 |
-
raise FileNotFoundError(error_msg)
|
| 114 |
|
| 115 |
def process_mesh(
|
| 116 |
input_file,
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
| 120 |
):
|
| 121 |
"""
|
| 122 |
-
Process
|
| 123 |
-
|
| 124 |
Args:
|
| 125 |
-
input_file: Uploaded mesh file (.obj, .ply,
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
Returns:
|
| 131 |
-
Tuple of (skeleton obj file, rig txt file, normalized mesh file, status message)
|
| 132 |
"""
|
| 133 |
try:
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
# Create temporary
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
args_config.input_pc_num,
|
| 161 |
-
apply_marching_cubes=apply_marching_cubes,
|
| 162 |
-
octree_depth=octree_depth
|
| 163 |
-
)
|
| 164 |
-
pc_normal = pc_list[0]
|
| 165 |
-
|
| 166 |
-
# Normalize point cloud
|
| 167 |
-
pc_coor = pc_normal[:, :3]
|
| 168 |
-
normals = pc_normal[:, 3:]
|
| 169 |
-
pc_coor, center, scale = normalize_to_unit_cube(pc_coor, scale_factor=0.9995)
|
| 170 |
-
|
| 171 |
-
pc_coor = pc_coor.astype(np.float32)
|
| 172 |
-
normals = normals.astype(np.float32)
|
| 173 |
-
|
| 174 |
-
# Calculate transform parameters
|
| 175 |
-
bounds = np.array([pc_coor.min(axis=0), pc_coor.max(axis=0)])
|
| 176 |
-
pc_center = (bounds[0] + bounds[1])[None, :] / 2
|
| 177 |
-
pc_scale = ((bounds[1] - bounds[0]).max() + 1e-5)
|
| 178 |
-
|
| 179 |
-
transform_params = torch.tensor([
|
| 180 |
-
center[0], center[1], center[2],
|
| 181 |
-
scale,
|
| 182 |
-
pc_center[0][0], pc_center[0][1], pc_center[0][2],
|
| 183 |
-
pc_scale
|
| 184 |
-
], dtype=torch.float32)
|
| 185 |
-
|
| 186 |
-
# Prepare batch data
|
| 187 |
-
pc_normal_tensor = torch.from_numpy(
|
| 188 |
-
np.concatenate([pc_coor, normals], axis=-1).astype(np.float16)
|
| 189 |
-
).unsqueeze(0).to(device)
|
| 190 |
-
|
| 191 |
-
batch_data = {
|
| 192 |
-
'pc_normal': pc_normal_tensor,
|
| 193 |
-
'file_name': [file_name],
|
| 194 |
-
'transform_params': transform_params.unsqueeze(0).to(device),
|
| 195 |
-
'vertices': torch.from_numpy(mesh.vertices).unsqueeze(0).to(device),
|
| 196 |
-
'faces': torch.from_numpy(mesh.faces).unsqueeze(0).to(device)
|
| 197 |
-
}
|
| 198 |
-
|
| 199 |
-
# Generate skeleton
|
| 200 |
-
print("Generating skeleton...")
|
| 201 |
-
with torch.no_grad():
|
| 202 |
-
pred_bone_coords = model.generate(batch_data)
|
| 203 |
-
|
| 204 |
-
# Process predictions
|
| 205 |
-
skeleton = pred_bone_coords[0].cpu().numpy()
|
| 206 |
-
pred_joints, pred_bones = pred_joints_and_bones(skeleton.squeeze())
|
| 207 |
-
|
| 208 |
-
print(f"Generated {len(pred_joints)} joints and {len(pred_bones)} bones")
|
| 209 |
-
|
| 210 |
-
# Post-process: merge duplicate joints
|
| 211 |
-
if hier_order:
|
| 212 |
-
pred_root_index = pred_bones[0][0]
|
| 213 |
-
pred_joints, pred_bones, pred_root_index = merge_duplicate_joints_and_fix_bones(
|
| 214 |
-
pred_joints, pred_bones, root_index=pred_root_index
|
| 215 |
)
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
# Save normalized mesh
|
| 260 |
-
vertices_norm = (vertices_np - trans) * scale_val
|
| 261 |
-
vertices_norm = (vertices_norm - pc_trans) / pc_scale_val
|
| 262 |
-
save_mesh(vertices_norm, mesh.faces, mesh_obj_path)
|
| 263 |
-
print(f"Saved mesh: {mesh_obj_path}")
|
| 264 |
-
|
| 265 |
-
status_msg = f"β
Success! Generated skeleton with {len(pred_joints)} joints and {len(pred_bones)} bones."
|
| 266 |
-
|
| 267 |
-
return skel_obj_path, rig_txt_path, mesh_obj_path, status_msg
|
| 268 |
-
|
| 269 |
except Exception as e:
|
|
|
|
|
|
|
| 270 |
import traceback
|
| 271 |
-
|
| 272 |
-
print(error_msg)
|
| 273 |
return None, None, None, error_msg
|
| 274 |
|
| 275 |
# Create Gradio interface
|
| 276 |
def create_interface():
|
| 277 |
-
"
|
| 278 |
-
|
| 279 |
-
with gr.Blocks(title="MagicArticulate - 3D Model Rigging", theme=gr.themes.Soft()) as demo:
|
| 280 |
gr.Markdown("""
|
| 281 |
-
#
|
| 282 |
-
|
| 283 |
-
Upload a 3D mesh
|
| 284 |
-
|
| 285 |
-
|
| 286 |
""")
|
| 287 |
-
|
| 288 |
with gr.Row():
|
| 289 |
with gr.Column(scale=1):
|
| 290 |
gr.Markdown("### π€ Input")
|
|
@@ -293,120 +140,99 @@ def create_interface():
|
|
| 293 |
file_types=[".obj", ".ply", ".stl"],
|
| 294 |
type="filepath"
|
| 295 |
)
|
| 296 |
-
|
| 297 |
-
gr.
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
|
|
|
| 321 |
with gr.Column(scale=1):
|
| 322 |
gr.Markdown("### π₯ Output")
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
)
|
| 344 |
-
|
| 345 |
gr.Markdown("""
|
| 346 |
-
### π
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
**
|
| 354 |
-
- **
|
| 355 |
-
- **
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
- For furniture/objects: Use spatial ordering (default)
|
| 360 |
-
- For characters/animals: Use hierarchical ordering
|
| 361 |
-
- Marching Cubes: Better for noisy meshes but slower
|
| 362 |
-
|
| 363 |
-
**Citation**:
|
| 364 |
-
```bibtex
|
| 365 |
@inproceedings{song2025magicarticulate,
|
| 366 |
title={MagicArticulate: Make Your 3D Models Articulation-Ready},
|
| 367 |
-
author={Song
|
| 368 |
booktitle={CVPR},
|
| 369 |
year={2025}
|
| 370 |
}
|
| 371 |
```
|
|
|
|
|
|
|
| 372 |
""")
|
| 373 |
-
|
| 374 |
-
# Connect
|
| 375 |
-
|
| 376 |
fn=process_mesh,
|
| 377 |
-
inputs=[input_file,
|
| 378 |
-
outputs=[
|
| 379 |
)
|
| 380 |
-
|
| 381 |
return demo
|
| 382 |
|
| 383 |
if __name__ == "__main__":
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
print(f"CUDA available: {torch.cuda.is_available()}")
|
| 391 |
-
if torch.cuda.is_available():
|
| 392 |
-
print(f"CUDA device: {torch.cuda.get_device_name(0)}")
|
| 393 |
-
print(f"CUDA memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
|
| 394 |
-
|
| 395 |
-
# Initialize model at startup (spatial ordering by default)
|
| 396 |
-
try:
|
| 397 |
-
print("\nInitializing model...")
|
| 398 |
-
initialize_model(use_hierarchical=False)
|
| 399 |
-
print("β
Model ready!")
|
| 400 |
-
except Exception as e:
|
| 401 |
-
print(f"β οΈ Warning: Could not initialize model at startup: {e}")
|
| 402 |
-
print("Model will be initialized on first request.")
|
| 403 |
-
|
| 404 |
-
# Launch Gradio app
|
| 405 |
-
print("\nLaunching Gradio interface...")
|
| 406 |
demo = create_interface()
|
| 407 |
-
demo.queue(max_size=
|
| 408 |
demo.launch(
|
| 409 |
server_name="0.0.0.0",
|
| 410 |
server_port=7860,
|
| 411 |
share=False
|
| 412 |
-
)
|
|
|
|
| 1 |
import os
|
| 2 |
+
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import torch
|
| 4 |
import trimesh
|
|
|
|
|
|
|
| 5 |
import tempfile
|
| 6 |
+
import numpy as np
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import gc
|
| 9 |
+
|
| 10 |
+
# Import after installing dependencies
|
| 11 |
+
from inference import SkeletonInferencer
|
| 12 |
+
|
| 13 |
+
# Initialize model globally to avoid reloading
|
| 14 |
+
MODEL = None
|
| 15 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 16 |
+
|
| 17 |
+
def initialize_model():
|
| 18 |
+
"""Initialize model once at startup"""
|
| 19 |
+
global MODEL
|
| 20 |
+
if MODEL is None:
|
| 21 |
+
print(f"Initializing model on {DEVICE}...")
|
| 22 |
+
MODEL = SkeletonInferencer(
|
| 23 |
+
pretrained_weights="skeleton_ckpt/checkpoint_trainonv2_spatial.pth",
|
| 24 |
+
device=DEVICE,
|
| 25 |
+
precision="fp16" if DEVICE == "cuda" else "fp32"
|
| 26 |
+
)
|
| 27 |
+
print("Model initialized successfully!")
|
| 28 |
+
return MODEL
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
def process_mesh(
|
| 31 |
input_file,
|
| 32 |
+
num_points=8192,
|
| 33 |
+
use_marching_cubes=False,
|
| 34 |
+
sequence_type="spatial",
|
| 35 |
+
progress=gr.Progress()
|
| 36 |
):
|
| 37 |
"""
|
| 38 |
+
Process uploaded mesh and generate skeleton rigging
|
| 39 |
+
|
| 40 |
Args:
|
| 41 |
+
input_file: Uploaded mesh file (.obj, .ply, .stl)
|
| 42 |
+
num_points: Number of points for point cloud sampling
|
| 43 |
+
use_marching_cubes: Apply marching cubes preprocessing
|
| 44 |
+
sequence_type: "spatial" or "hierarchical" ordering
|
| 45 |
+
progress: Gradio progress tracker
|
|
|
|
|
|
|
| 46 |
"""
|
| 47 |
try:
|
| 48 |
+
if input_file is None:
|
| 49 |
+
return None, None, None, "β Please upload a mesh file"
|
| 50 |
+
|
| 51 |
+
progress(0.1, desc="Loading model...")
|
| 52 |
+
model = initialize_model()
|
| 53 |
+
|
| 54 |
+
# Create temporary directory for outputs
|
| 55 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 56 |
+
temp_path = Path(temp_dir)
|
| 57 |
+
|
| 58 |
+
progress(0.2, desc="Processing mesh...")
|
| 59 |
+
|
| 60 |
+
# Get file extension
|
| 61 |
+
file_ext = Path(input_file.name).suffix.lower()
|
| 62 |
+
if file_ext not in ['.obj', '.ply', '.stl']:
|
| 63 |
+
return None, None, None, f"β Unsupported file format: {file_ext}"
|
| 64 |
+
|
| 65 |
+
progress(0.3, desc="Generating skeleton...")
|
| 66 |
+
|
| 67 |
+
# Run inference
|
| 68 |
+
results = model.infer(
|
| 69 |
+
input_path=input_file.name,
|
| 70 |
+
output_dir=str(temp_path),
|
| 71 |
+
input_pc_num=num_points,
|
| 72 |
+
apply_marching_cubes=use_marching_cubes,
|
| 73 |
+
sequence_type=sequence_type
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
)
|
| 75 |
+
|
| 76 |
+
progress(0.7, desc="Saving results...")
|
| 77 |
+
|
| 78 |
+
# Get output file paths
|
| 79 |
+
file_name = Path(input_file.name).stem
|
| 80 |
+
skeleton_file = temp_path / f"{file_name}_skel.obj"
|
| 81 |
+
rig_file = temp_path / f"{file_name}_pred.txt"
|
| 82 |
+
mesh_file = temp_path / f"{file_name}_mesh.obj"
|
| 83 |
+
|
| 84 |
+
# Prepare outputs
|
| 85 |
+
output_files = []
|
| 86 |
+
info_text = "β
**Processing Complete!**\n\n"
|
| 87 |
+
|
| 88 |
+
if skeleton_file.exists():
|
| 89 |
+
output_files.append(str(skeleton_file))
|
| 90 |
+
info_text += f"- Skeleton: `{skeleton_file.name}`\n"
|
| 91 |
+
|
| 92 |
+
if rig_file.exists():
|
| 93 |
+
output_files.append(str(rig_file))
|
| 94 |
+
with open(rig_file, 'r') as f:
|
| 95 |
+
rig_data = f.read()
|
| 96 |
+
info_text += f"- Rig data: `{rig_file.name}` ({len(rig_data.splitlines())} lines)\n"
|
| 97 |
+
|
| 98 |
+
if mesh_file.exists():
|
| 99 |
+
output_files.append(str(mesh_file))
|
| 100 |
+
info_text += f"- Normalized mesh: `{mesh_file.name}`\n"
|
| 101 |
+
|
| 102 |
+
info_text += f"\n**Statistics:**\n"
|
| 103 |
+
info_text += f"- Joints: {results.get('num_joints', 'N/A')}\n"
|
| 104 |
+
info_text += f"- Bones: {results.get('num_bones', 'N/A')}\n"
|
| 105 |
+
info_text += f"- Processing time: {results.get('time', 'N/A'):.2f}s\n"
|
| 106 |
+
|
| 107 |
+
progress(1.0, desc="Done!")
|
| 108 |
+
|
| 109 |
+
# Memory cleanup
|
| 110 |
+
if DEVICE == "cuda":
|
| 111 |
+
torch.cuda.empty_cache()
|
| 112 |
+
gc.collect()
|
| 113 |
+
|
| 114 |
+
return output_files, str(skeleton_file) if skeleton_file.exists() else None, \
|
| 115 |
+
str(rig_file) if rig_file.exists() else None, info_text
|
| 116 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
except Exception as e:
|
| 118 |
+
error_msg = f"β **Error:** {str(e)}"
|
| 119 |
+
print(f"Error processing mesh: {e}")
|
| 120 |
import traceback
|
| 121 |
+
traceback.print_exc()
|
|
|
|
| 122 |
return None, None, None, error_msg
|
| 123 |
|
| 124 |
# Create Gradio interface
|
| 125 |
def create_interface():
|
| 126 |
+
with gr.Blocks(title="MagicArticulate - 3D Mesh Rigging") as demo:
|
|
|
|
|
|
|
| 127 |
gr.Markdown("""
|
| 128 |
+
# π¨ MagicArticulate: Make Your 3D Models Articulation-Ready
|
| 129 |
+
|
| 130 |
+
**CVPR 2025** | Upload a 3D mesh and automatically generate skeletal rigging for animation.
|
| 131 |
+
|
| 132 |
+
Supported formats: `.obj`, `.ply`, `.stl`
|
| 133 |
""")
|
| 134 |
+
|
| 135 |
with gr.Row():
|
| 136 |
with gr.Column(scale=1):
|
| 137 |
gr.Markdown("### π€ Input")
|
|
|
|
| 140 |
file_types=[".obj", ".ply", ".stl"],
|
| 141 |
type="filepath"
|
| 142 |
)
|
| 143 |
+
|
| 144 |
+
with gr.Accordion("βοΈ Advanced Settings", open=False):
|
| 145 |
+
num_points = gr.Slider(
|
| 146 |
+
minimum=2048,
|
| 147 |
+
maximum=16384,
|
| 148 |
+
value=8192,
|
| 149 |
+
step=1024,
|
| 150 |
+
label="Point Cloud Size",
|
| 151 |
+
info="Higher values = more accurate but slower"
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
use_marching_cubes = gr.Checkbox(
|
| 155 |
+
label="Use Marching Cubes",
|
| 156 |
+
value=False,
|
| 157 |
+
info="Apply marching cubes for better point cloud (slower)"
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
sequence_type = gr.Radio(
|
| 161 |
+
choices=["spatial", "hierarchical"],
|
| 162 |
+
value="spatial",
|
| 163 |
+
label="Sequence Ordering",
|
| 164 |
+
info="Spatial is faster, hierarchical preserves hierarchy"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
process_btn = gr.Button("π Generate Rigging", variant="primary", size="lg")
|
| 168 |
+
|
| 169 |
with gr.Column(scale=1):
|
| 170 |
gr.Markdown("### π₯ Output")
|
| 171 |
+
info_output = gr.Markdown()
|
| 172 |
+
|
| 173 |
+
with gr.Tabs():
|
| 174 |
+
with gr.Tab("π¦ Download Files"):
|
| 175 |
+
output_files = gr.File(
|
| 176 |
+
label="Generated Files",
|
| 177 |
+
file_count="multiple"
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
with gr.Tab("𦴠Skeleton"):
|
| 181 |
+
skeleton_viewer = gr.Model3D(
|
| 182 |
+
label="Generated Skeleton",
|
| 183 |
+
height=400
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
with gr.Tab("π Rig Data"):
|
| 187 |
+
rig_text = gr.File(
|
| 188 |
+
label="Rig Text File"
|
| 189 |
+
)
|
| 190 |
+
|
|
|
|
|
|
|
| 191 |
gr.Markdown("""
|
| 192 |
+
### π How to Use
|
| 193 |
+
1. Upload your 3D mesh file (`.obj`, `.ply`, or `.stl`)
|
| 194 |
+
2. Adjust settings if needed (default works well)
|
| 195 |
+
3. Click "Generate Rigging" and wait for processing
|
| 196 |
+
4. Download the generated skeleton and rigging files
|
| 197 |
+
|
| 198 |
+
### π‘ Tips
|
| 199 |
+
- **Memory limited?** Use fewer points (e.g., 4096) for large meshes
|
| 200 |
+
- **Better quality?** Enable Marching Cubes (slower but more accurate)
|
| 201 |
+
- **Preserve hierarchy?** Use hierarchical ordering
|
| 202 |
+
|
| 203 |
+
### π Citation
|
| 204 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
@inproceedings{song2025magicarticulate,
|
| 206 |
title={MagicArticulate: Make Your 3D Models Articulation-Ready},
|
| 207 |
+
author={Chaoyue Song and Jianfeng Zhang and others},
|
| 208 |
booktitle={CVPR},
|
| 209 |
year={2025}
|
| 210 |
}
|
| 211 |
```
|
| 212 |
+
|
| 213 |
+
**Links:** [Paper](https://chaoyuesong.github.io/MagicArticulate/) | [GitHub](https://github.com/Seed3D/MagicArticulate)
|
| 214 |
""")
|
| 215 |
+
|
| 216 |
+
# Connect components
|
| 217 |
+
process_btn.click(
|
| 218 |
fn=process_mesh,
|
| 219 |
+
inputs=[input_file, num_points, use_marching_cubes, sequence_type],
|
| 220 |
+
outputs=[output_files, skeleton_viewer, rig_text, info_output]
|
| 221 |
)
|
| 222 |
+
|
| 223 |
return demo
|
| 224 |
|
| 225 |
if __name__ == "__main__":
|
| 226 |
+
# Download models on first run
|
| 227 |
+
if not os.path.exists("skeleton_ckpt"):
|
| 228 |
+
print("Downloading model checkpoints...")
|
| 229 |
+
os.system("python download_models.py")
|
| 230 |
+
|
| 231 |
+
# Create and launch interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
demo = create_interface()
|
| 233 |
+
demo.queue(max_size=5) # Limit queue to manage memory
|
| 234 |
demo.launch(
|
| 235 |
server_name="0.0.0.0",
|
| 236 |
server_port=7860,
|
| 237 |
share=False
|
| 238 |
+
)
|