import os import gradio as gr import torch import trimesh import tempfile import threading from pathlib import Path import gc from inference import SkeletonInferencer # Global state INFERENCER = None MODELS_READY = False DOWNLOAD_LOCK = threading.Lock() DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def check_models_exist(): """Check if models are already downloaded""" required_files = [ "skeleton_ckpt/checkpoint_trainonv2_spatial.pth", "skeleton_ckpt/checkpoint_trainonv2_hier.pth", ] return all(Path(f).exists() for f in required_files) def download_models_once(): """Thread-safe model download (only runs once)""" global MODELS_READY with DOWNLOAD_LOCK: if MODELS_READY or check_models_exist(): MODELS_READY = True return True print("ðŸ“Ĩ Downloading models (first time only)...") try: from huggingface_hub import hf_hub_download # Download only the essential models (skip Michelangelo for now) models_to_download = [ { "repo_id": "Seed3D/MagicArticulate", "filename": "skeleton_ckpt/checkpoint_trainonv2_spatial.pth", "local_dir": "." }, { "repo_id": "Seed3D/MagicArticulate", "filename": "skeleton_ckpt/checkpoint_trainonv2_hier.pth", "local_dir": "." } ] for i, model_info in enumerate(models_to_download, 1): print(f"[{i}/{len(models_to_download)}] Downloading {model_info['filename']}...") hf_hub_download( repo_id=model_info["repo_id"], filename=model_info["filename"], local_dir=model_info["local_dir"] ) print(f"✓ Downloaded: {model_info['filename']}") MODELS_READY = True print("✅ All models ready!") return True except Exception as e: print(f"❌ Download failed: {e}") return False def initialize_inferencer(): """Initialize inferencer after models are ready""" global INFERENCER if INFERENCER is None: print("🔧 Initializing inferencer...") INFERENCER = SkeletonInferencer( device=DEVICE, precision="fp16" if DEVICE == "cuda" else "fp32" ) print("✅ Inferencer ready!") return INFERENCER def process_mesh( input_file, num_points=8192, use_marching_cubes=False, sequence_type="spatial", progress=gr.Progress() ): """Process mesh with lazy model loading""" try: if input_file is None: return None, None, None, "❌ Please upload a mesh file" # Download models on first use progress(0.0, desc="🔍 Checking models...") if not MODELS_READY: progress(0.05, desc="ðŸ“Ĩ Downloading models (this may take 20-30s, only happens once)...") if not download_models_once(): return None, None, None, "❌ Failed to download models. Please try again." progress(0.15, desc="⚙ïļ Loading model...") inferencer = initialize_inferencer() # Create temporary directory for outputs with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) progress(0.25, desc="📐 Processing mesh...") # Get file extension file_ext = Path(input_file.name).suffix.lower() if file_ext not in ['.obj', '.ply', '.stl']: return None, None, None, f"❌ Unsupported file format: {file_ext}" progress(0.35, desc="ðŸĶī Generating skeleton...") # Run inference results = inferencer.infer( input_path=input_file.name, output_dir=str(temp_path), input_pc_num=num_points, apply_marching_cubes=use_marching_cubes, sequence_type=sequence_type ) progress(0.8, desc="ðŸ’ū Preparing outputs...") # Get output file paths file_name = Path(input_file.name).stem skeleton_file = temp_path / f"{file_name}_skel.obj" rig_file = temp_path / f"{file_name}_pred.txt" mesh_file = temp_path / f"{file_name}_mesh.obj" # Prepare outputs output_files = [] info_text = "✅ **Processing Complete!**\n\n" if skeleton_file.exists(): output_files.append(str(skeleton_file)) info_text += f"- Skeleton: `{skeleton_file.name}`\n" if rig_file.exists(): output_files.append(str(rig_file)) with open(rig_file, 'r') as f: rig_data = f.read() info_text += f"- Rig data: `{rig_file.name}` ({len(rig_data.splitlines())} lines)\n" if mesh_file.exists(): output_files.append(str(mesh_file)) info_text += f"- Normalized mesh: `{mesh_file.name}`\n" info_text += f"\n**Statistics:**\n" info_text += f"- Joints: {results.get('num_joints', 'N/A')}\n" info_text += f"- Bones: {results.get('num_bones', 'N/A')}\n" info_text += f"- Processing time: {results.get('time', 0):.2f}s\n" progress(1.0, desc="✅ Done!") # Memory cleanup if DEVICE == "cuda": torch.cuda.empty_cache() gc.collect() return output_files, str(skeleton_file) if skeleton_file.exists() else None, \ str(rig_file) if rig_file.exists() else None, info_text except Exception as e: error_msg = f"❌ **Error:** {str(e)}" print(f"Error processing mesh: {e}") import traceback traceback.print_exc() return None, None, None, error_msg # Create Gradio interface def create_interface(): with gr.Blocks(title="MagicArticulate - 3D Mesh Rigging") as demo: gr.Markdown(""" # ðŸŽĻ MagicArticulate: Make Your 3D Models Articulation-Ready **CVPR 2025** | Upload a 3D mesh and automatically generate skeletal rigging for animation. ⚠ïļ **First inference will download models (~8GB, takes 20-30s)**. Subsequent inferences are fast! Supported formats: `.obj`, `.ply`, `.stl` """) # Check if models are pre-downloaded if check_models_exist(): gr.Markdown("✅ **Models ready!** You can start immediately.") else: gr.Markdown("ðŸ“Ĩ **Models will download on first use** (only happens once)") with gr.Row(): with gr.Column(scale=1): gr.Markdown("### ðŸ“Ī Input") input_file = gr.File( label="Upload 3D Mesh", file_types=[".obj", ".ply", ".stl"], type="filepath" ) with gr.Accordion("⚙ïļ Advanced Settings", open=False): num_points = gr.Slider( minimum=2048, maximum=16384, value=8192, step=1024, label="Point Cloud Size", info="Higher values = more accurate but slower" ) use_marching_cubes = gr.Checkbox( label="Use Marching Cubes", value=False, info="Better quality but slower" ) sequence_type = gr.Radio( choices=["spatial", "hierarchical"], value="spatial", label="Sequence Ordering", info="Spatial is faster, hierarchical preserves hierarchy" ) process_btn = gr.Button("🚀 Generate Rigging", variant="primary", size="lg") with gr.Column(scale=1): gr.Markdown("### ðŸ“Ĩ Output") info_output = gr.Markdown() with gr.Tabs(): with gr.Tab("ðŸ“Ķ Download Files"): output_files = gr.File( label="Generated Files", file_count="multiple" ) with gr.Tab("ðŸĶī Skeleton"): skeleton_viewer = gr.Model3D( label="Generated Skeleton", height=400 ) with gr.Tab("📄 Rig Data"): rig_text = gr.File( label="Rig Text File" ) gr.Markdown(""" ### 📖 How to Use 1. Upload your 3D mesh file (`.obj`, `.ply`, or `.stl`) 2. (First time only) Wait for model download (~20-30s) 3. Click "Generate Rigging" and wait for processing 4. Download the generated skeleton and rigging files ### ðŸ’Ą Tips - **Memory limited?** Use fewer points (e.g., 4096) - **Better quality?** Enable Marching Cubes (slower) ### 📚 Citation ``` @inproceedings{song2025magicarticulate, title={MagicArticulate: Make Your 3D Models Articulation-Ready}, author={Song, Chaoyue and Zhang, Jianfeng and others}, booktitle={CVPR}, year={2025} } ``` """) # Connect components process_btn.click( fn=process_mesh, inputs=[input_file, num_points, use_marching_cubes, sequence_type], outputs=[output_files, skeleton_viewer, rig_text, info_output] ) return demo if __name__ == "__main__": print("🚀 Starting MagicArticulate Space...") # Check if models exist if check_models_exist(): print("✅ Models found, ready to serve!") else: print("âģ Models will be downloaded on first inference") # Create and launch interface immediately demo = create_interface() demo.queue(max_size=3) # Limit concurrent requests demo.launch( server_name="0.0.0.0", server_port=7860, share=False )