|
|
import os |
|
|
import gradio as gr |
|
|
import torch |
|
|
import trimesh |
|
|
import tempfile |
|
|
import threading |
|
|
from pathlib import Path |
|
|
import gc |
|
|
|
|
|
from inference import SkeletonInferencer |
|
|
|
|
|
|
|
|
INFERENCER = None |
|
|
MODELS_READY = False |
|
|
DOWNLOAD_LOCK = threading.Lock() |
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
def check_models_exist(): |
|
|
"""Check if models are already downloaded""" |
|
|
required_files = [ |
|
|
"skeleton_ckpt/checkpoint_trainonv2_spatial.pth", |
|
|
"skeleton_ckpt/checkpoint_trainonv2_hier.pth", |
|
|
] |
|
|
return all(Path(f).exists() for f in required_files) |
|
|
|
|
|
def download_models_once(): |
|
|
"""Thread-safe model download (only runs once)""" |
|
|
global MODELS_READY |
|
|
|
|
|
with DOWNLOAD_LOCK: |
|
|
if MODELS_READY or check_models_exist(): |
|
|
MODELS_READY = True |
|
|
return True |
|
|
|
|
|
print("π₯ Downloading models (first time only)...") |
|
|
try: |
|
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
|
|
|
models_to_download = [ |
|
|
{ |
|
|
"repo_id": "Seed3D/MagicArticulate", |
|
|
"filename": "skeleton_ckpt/checkpoint_trainonv2_spatial.pth", |
|
|
"local_dir": "." |
|
|
}, |
|
|
{ |
|
|
"repo_id": "Seed3D/MagicArticulate", |
|
|
"filename": "skeleton_ckpt/checkpoint_trainonv2_hier.pth", |
|
|
"local_dir": "." |
|
|
} |
|
|
] |
|
|
|
|
|
for i, model_info in enumerate(models_to_download, 1): |
|
|
print(f"[{i}/{len(models_to_download)}] Downloading {model_info['filename']}...") |
|
|
hf_hub_download( |
|
|
repo_id=model_info["repo_id"], |
|
|
filename=model_info["filename"], |
|
|
local_dir=model_info["local_dir"] |
|
|
) |
|
|
print(f"β Downloaded: {model_info['filename']}") |
|
|
|
|
|
MODELS_READY = True |
|
|
print("β
All models ready!") |
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Download failed: {e}") |
|
|
return False |
|
|
|
|
|
def initialize_inferencer(): |
|
|
"""Initialize inferencer after models are ready""" |
|
|
global INFERENCER |
|
|
if INFERENCER is None: |
|
|
print("π§ Initializing inferencer...") |
|
|
INFERENCER = SkeletonInferencer( |
|
|
device=DEVICE, |
|
|
precision="fp16" if DEVICE == "cuda" else "fp32" |
|
|
) |
|
|
print("β
Inferencer ready!") |
|
|
return INFERENCER |
|
|
|
|
|
def process_mesh( |
|
|
input_file, |
|
|
num_points=8192, |
|
|
use_marching_cubes=False, |
|
|
sequence_type="spatial", |
|
|
progress=gr.Progress() |
|
|
): |
|
|
"""Process mesh with lazy model loading""" |
|
|
try: |
|
|
if input_file is None: |
|
|
return None, None, None, "β Please upload a mesh file" |
|
|
|
|
|
|
|
|
progress(0.0, desc="π Checking models...") |
|
|
if not MODELS_READY: |
|
|
progress(0.05, desc="π₯ Downloading models (this may take 20-30s, only happens once)...") |
|
|
if not download_models_once(): |
|
|
return None, None, None, "β Failed to download models. Please try again." |
|
|
|
|
|
progress(0.15, desc="βοΈ Loading model...") |
|
|
inferencer = initialize_inferencer() |
|
|
|
|
|
|
|
|
with tempfile.TemporaryDirectory() as temp_dir: |
|
|
temp_path = Path(temp_dir) |
|
|
|
|
|
progress(0.25, desc="π Processing mesh...") |
|
|
|
|
|
|
|
|
file_ext = Path(input_file.name).suffix.lower() |
|
|
if file_ext not in ['.obj', '.ply', '.stl']: |
|
|
return None, None, None, f"β Unsupported file format: {file_ext}" |
|
|
|
|
|
progress(0.35, desc="𦴠Generating skeleton...") |
|
|
|
|
|
|
|
|
results = inferencer.infer( |
|
|
input_path=input_file.name, |
|
|
output_dir=str(temp_path), |
|
|
input_pc_num=num_points, |
|
|
apply_marching_cubes=use_marching_cubes, |
|
|
sequence_type=sequence_type |
|
|
) |
|
|
|
|
|
progress(0.8, desc="πΎ Preparing outputs...") |
|
|
|
|
|
|
|
|
file_name = Path(input_file.name).stem |
|
|
skeleton_file = temp_path / f"{file_name}_skel.obj" |
|
|
rig_file = temp_path / f"{file_name}_pred.txt" |
|
|
mesh_file = temp_path / f"{file_name}_mesh.obj" |
|
|
|
|
|
|
|
|
output_files = [] |
|
|
info_text = "β
**Processing Complete!**\n\n" |
|
|
|
|
|
if skeleton_file.exists(): |
|
|
output_files.append(str(skeleton_file)) |
|
|
info_text += f"- Skeleton: `{skeleton_file.name}`\n" |
|
|
|
|
|
if rig_file.exists(): |
|
|
output_files.append(str(rig_file)) |
|
|
with open(rig_file, 'r') as f: |
|
|
rig_data = f.read() |
|
|
info_text += f"- Rig data: `{rig_file.name}` ({len(rig_data.splitlines())} lines)\n" |
|
|
|
|
|
if mesh_file.exists(): |
|
|
output_files.append(str(mesh_file)) |
|
|
info_text += f"- Normalized mesh: `{mesh_file.name}`\n" |
|
|
|
|
|
info_text += f"\n**Statistics:**\n" |
|
|
info_text += f"- Joints: {results.get('num_joints', 'N/A')}\n" |
|
|
info_text += f"- Bones: {results.get('num_bones', 'N/A')}\n" |
|
|
info_text += f"- Processing time: {results.get('time', 0):.2f}s\n" |
|
|
|
|
|
progress(1.0, desc="β
Done!") |
|
|
|
|
|
|
|
|
if DEVICE == "cuda": |
|
|
torch.cuda.empty_cache() |
|
|
gc.collect() |
|
|
|
|
|
return output_files, str(skeleton_file) if skeleton_file.exists() else None, \ |
|
|
str(rig_file) if rig_file.exists() else None, info_text |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"β **Error:** {str(e)}" |
|
|
print(f"Error processing mesh: {e}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
return None, None, None, error_msg |
|
|
|
|
|
|
|
|
def create_interface(): |
|
|
with gr.Blocks(title="MagicArticulate - 3D Mesh Rigging") as demo: |
|
|
gr.Markdown(""" |
|
|
# π¨ MagicArticulate: Make Your 3D Models Articulation-Ready |
|
|
|
|
|
**CVPR 2025** | Upload a 3D mesh and automatically generate skeletal rigging for animation. |
|
|
|
|
|
β οΈ **First inference will download models (~8GB, takes 20-30s)**. Subsequent inferences are fast! |
|
|
|
|
|
Supported formats: `.obj`, `.ply`, `.stl` |
|
|
""") |
|
|
|
|
|
|
|
|
if check_models_exist(): |
|
|
gr.Markdown("β
**Models ready!** You can start immediately.") |
|
|
else: |
|
|
gr.Markdown("π₯ **Models will download on first use** (only happens once)") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### π€ Input") |
|
|
input_file = gr.File( |
|
|
label="Upload 3D Mesh", |
|
|
file_types=[".obj", ".ply", ".stl"], |
|
|
type="filepath" |
|
|
) |
|
|
|
|
|
with gr.Accordion("βοΈ Advanced Settings", open=False): |
|
|
num_points = gr.Slider( |
|
|
minimum=2048, |
|
|
maximum=16384, |
|
|
value=8192, |
|
|
step=1024, |
|
|
label="Point Cloud Size", |
|
|
info="Higher values = more accurate but slower" |
|
|
) |
|
|
|
|
|
use_marching_cubes = gr.Checkbox( |
|
|
label="Use Marching Cubes", |
|
|
value=False, |
|
|
info="Better quality but slower" |
|
|
) |
|
|
|
|
|
sequence_type = gr.Radio( |
|
|
choices=["spatial", "hierarchical"], |
|
|
value="spatial", |
|
|
label="Sequence Ordering", |
|
|
info="Spatial is faster, hierarchical preserves hierarchy" |
|
|
) |
|
|
|
|
|
process_btn = gr.Button("π Generate Rigging", variant="primary", size="lg") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### π₯ Output") |
|
|
info_output = gr.Markdown() |
|
|
|
|
|
with gr.Tabs(): |
|
|
with gr.Tab("π¦ Download Files"): |
|
|
output_files = gr.File( |
|
|
label="Generated Files", |
|
|
file_count="multiple" |
|
|
) |
|
|
|
|
|
with gr.Tab("𦴠Skeleton"): |
|
|
skeleton_viewer = gr.Model3D( |
|
|
label="Generated Skeleton", |
|
|
height=400 |
|
|
) |
|
|
|
|
|
with gr.Tab("π Rig Data"): |
|
|
rig_text = gr.File( |
|
|
label="Rig Text File" |
|
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
|
### π How to Use |
|
|
1. Upload your 3D mesh file (`.obj`, `.ply`, or `.stl`) |
|
|
2. (First time only) Wait for model download (~20-30s) |
|
|
3. Click "Generate Rigging" and wait for processing |
|
|
4. Download the generated skeleton and rigging files |
|
|
|
|
|
### π‘ Tips |
|
|
- **Memory limited?** Use fewer points (e.g., 4096) |
|
|
- **Better quality?** Enable Marching Cubes (slower) |
|
|
|
|
|
### π Citation |
|
|
``` |
|
|
@inproceedings{song2025magicarticulate, |
|
|
title={MagicArticulate: Make Your 3D Models Articulation-Ready}, |
|
|
author={Song, Chaoyue and Zhang, Jianfeng and others}, |
|
|
booktitle={CVPR}, |
|
|
year={2025} |
|
|
} |
|
|
``` |
|
|
""") |
|
|
|
|
|
|
|
|
process_btn.click( |
|
|
fn=process_mesh, |
|
|
inputs=[input_file, num_points, use_marching_cubes, sequence_type], |
|
|
outputs=[output_files, skeleton_viewer, rig_text, info_output] |
|
|
) |
|
|
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("π Starting MagicArticulate Space...") |
|
|
|
|
|
|
|
|
if check_models_exist(): |
|
|
print("β
Models found, ready to serve!") |
|
|
else: |
|
|
print("β³ Models will be downloaded on first inference") |
|
|
|
|
|
|
|
|
demo = create_interface() |
|
|
demo.queue(max_size=3) |
|
|
demo.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
share=False |
|
|
) |
|
|
|