# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ ActionMesh Gradio Demo A complete demo for video-to-4D mesh generation using ActionMesh. Input: Video file or list of images Output: Animated GLB mesh with shape key animation """ import glob import logging import os import shutil import subprocess import sys import tempfile from pathlib import Path import gradio as gr import spaces import torch # Configure logging for actionmesh modules logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) logger = logging.getLogger(__name__) # Path to examples directory EXAMPLES_DIR = Path(__file__).parent / "assets" # --- Setup functions --- def setup_blender() -> Path: """ Download and setup Blender 3.5.1 for Linux x64. First checks if the Blender archive exists in the home directory. If found, extracts it from there. Otherwise, downloads from the official release page. Returns the path to the blender executable. Returns: Path to the blender executable. """ import tarfile import urllib.request # Define paths repo_dir = Path(__file__).parent.parent demo_dir = Path(__file__).parent third_party_dir = repo_dir / "third_party" blender_archive_name = "blender-3.5.1-linux-x64.tar.xz" blender_archive = third_party_dir / blender_archive_name local_blender_archive = demo_dir / blender_archive_name blender_dir = third_party_dir / "blender-3.5.1-linux-x64" blender_executable = blender_dir / "blender" # Create third_party directory if it doesn't exist third_party_dir.mkdir(parents=True, exist_ok=True) # Check if Blender is already installed if blender_executable.exists(): print(f"Blender already installed at {blender_executable}") return blender_executable # Determine which archive to use for extraction archive_to_extract = None if local_blender_archive.exists(): print(f"Found Blender archive at: {local_blender_archive}") archive_to_extract = local_blender_archive elif blender_archive.exists(): archive_to_extract = blender_archive else: # Download URL blender_url = ( "https://download.blender.org/release/Blender3.5/" "blender-3.5.1-linux-x64.tar.xz" ) print(f"Downloading Blender from {blender_url}...") try: urllib.request.urlretrieve(blender_url, blender_archive) print("Blender downloaded successfully.") archive_to_extract = blender_archive except Exception as e: raise RuntimeError(f"Failed to download Blender: {e}") # Extract the archive print(f"Extracting Blender from {archive_to_extract}...") try: with tarfile.open(archive_to_extract, "r:xz") as tar: tar.extractall(path=third_party_dir) print("Blender extracted successfully.") except Exception as e: # Clean up partial extraction if blender_dir.exists(): shutil.rmtree(blender_dir) raise RuntimeError(f"Failed to extract Blender: {e}") # Verify installation if not blender_executable.exists(): raise RuntimeError(f"Blender executable not found: {blender_executable}") print(f"Blender installed successfully at {blender_executable}") return blender_executable def setup_actionmesh(): """Clone and install ActionMesh if not already installed.""" cache_dir = Path.home() / ".cache" / "actionmesh" try: import actionmesh print("ActionMesh already installed.") # Still need to add paths for current process actionmesh_path = str(cache_dir.resolve()) if actionmesh_path not in sys.path: sys.path.insert(0, actionmesh_path) triposg_path = str((cache_dir / "third_party" / "TripoSG").resolve()) if triposg_path not in sys.path: sys.path.insert(0, triposg_path) return cache_dir except ImportError: pass print("Cloning ActionMesh...") if cache_dir.exists(): shutil.rmtree(cache_dir) cache_dir.parent.mkdir(parents=True, exist_ok=True) subprocess.run( [ "git", "clone", "https://github.com/facebookresearch/actionmesh.git", str(cache_dir), ], check=True, ) print("ActionMesh cloned successfully.") # Configure git to use HTTPS instead of SSH (for submodules) subprocess.run( [ "git", "config", "--global", "url.https://github.com/.insteadOf", "git@github.com:", ], check=True, ) # Initialize submodules print("Initializing submodules...") subprocess.run( ["git", "submodule", "update", "--init", "--recursive"], cwd=cache_dir, check=True, ) print("Submodules initialized successfully.") # Install actionmesh in editable mode (ignore Python version requirement) print("Installing ActionMesh...") subprocess.run( [sys.executable, "-m", "pip", "install", "-e", ".", "--ignore-requires-python"], cwd=cache_dir, check=True, ) print("ActionMesh installed successfully.") # Add actionmesh to Python path for current process actionmesh_path = str(cache_dir.resolve()) if actionmesh_path not in sys.path: sys.path.insert(0, actionmesh_path) # Add TripoSG (submodule) to Python path for current process triposg_path = str((cache_dir / "third_party" / "TripoSG").resolve()) if triposg_path not in sys.path: sys.path.insert(0, triposg_path) return cache_dir def setup_environment(): """Setup the complete environment for ActionMesh.""" print("=" * 50) print("Setting up ActionMesh environment...") print("=" * 50) # Clone and install ActionMesh if needed setup_actionmesh() blender_path = setup_blender() print("=" * 50) print("Environment setup complete!") print("=" * 50) return blender_path # Run setup on import blender_path = setup_environment() from actionmesh.io.glb_export import create_animated_glb from actionmesh.io.mesh_io import save_deformation # --- Import ActionMesh modules after setup --- from actionmesh.io.video_input import load_frames from actionmesh.render.utils import save_rgba_video from gradio_pipeline import GradioPipeline # Global pipeline instance (loaded on CPU at startup) pipeline: GradioPipeline | None = None def get_available_examples() -> list[tuple[str, str]]: """ Get available examples from the assets directory. Returns: List of tuples (display_name, example_dir_path) for each example. """ examples = [] if EXAMPLES_DIR.exists(): for example_dir in sorted(EXAMPLES_DIR.iterdir()): if example_dir.is_dir(): # Get the first image as a thumbnail images = sorted(glob.glob(str(example_dir / "*.png"))) if images: display_name = example_dir.name.replace("_", " ").title() examples.append((display_name, str(example_dir))) return examples def get_example_thumbnails() -> list[str]: """ Get thumbnail images/GIFs for all available examples. Looks for a GIF file named "{folder_name}.gif" in the same parent directory as the example folder. Falls back to the first PNG image if no GIF is found. Returns: List of paths to the GIF or first image of each example. """ thumbnails = [] if EXAMPLES_DIR.exists(): for example_dir in sorted(EXAMPLES_DIR.iterdir()): if example_dir.is_dir(): # Try to find a GIF with the same name as the folder gif_path = example_dir.parent / f"{example_dir.name}.gif" if gif_path.exists(): thumbnails.append(str(gif_path)) else: # Fall back to first PNG image images = sorted(glob.glob(str(example_dir / "*.png"))) if images: thumbnails.append(images[0]) return thumbnails def load_example_images(evt: gr.SelectData) -> list[str]: """ Load images from the selected example. Args: evt: Gradio SelectData event containing the selected index. Returns: List of image paths from the selected example. """ examples = get_available_examples() if evt.index < len(examples): _, example_dir = examples[evt.index] images = sorted(glob.glob(os.path.join(example_dir, "*.png"))) return images return [] def load_pipeline_cpu() -> GradioPipeline: """Load the ActionMesh pipeline on CPU (called once at module load).""" global pipeline if pipeline is None: print("Loading ActionMesh pipeline on CPU...") # Get config path from actionmesh cache directory cache_dir = Path.home() / ".cache" / "actionmesh" config_dir = str(cache_dir / "actionmesh" / "configs") pipeline = GradioPipeline( config_name="actionmesh.yaml", config_dir=config_dir, ) print("Pipeline loaded on CPU successfully.") return pipeline # Initialize pipeline on CPU at module load (outside GPU time) print("Initializing pipeline on CPU...") load_pipeline_cpu() print("Pipeline ready (on CPU).") def _run_actionmesh_impl( video_input: str | None, image_files: list[str] | None, seed: int, reference_frame: int, quality_mode: str, progress: gr.Progress = gr.Progress(), ) -> tuple[str | None, str | None, str | None, str]: """ Internal implementation of ActionMesh pipeline. Args: video_input: Path to input video file. image_files: List of paths to input image files. seed: Random seed for generation. reference_frame: Reference frame index (1-indexed). quality_mode: Quality mode string. progress: Gradio progress tracker. Returns: Tuple of (animated_glb_path, animated_glb_path, input_video_path, status_message) """ # Create temporary output directory output_dir = tempfile.mkdtemp(prefix="actionmesh_") try: # Determine input source progress(0.0, desc="Loading input...") if video_input is not None: input_path = video_input elif image_files is not None and len(image_files) > 0: # Create temp directory with images img_dir = os.path.join(output_dir, "input_images") os.makedirs(img_dir, exist_ok=True) for i, img_path in enumerate(image_files): ext = Path(img_path).suffix shutil.copy(img_path, os.path.join(img_dir, f"{i:04d}{ext}")) input_path = img_dir else: return None, None, None, "Error: Please provide a video or images." # Load input input_data = load_frames(path=input_path, max_frames=16) if input_data.n_frames < 16: return None, None, None, "Error: At least 16 frames are required." # Get pipeline and move to GPU progress(0.0, desc="Moving pipeline to GPU...") pipe = load_pipeline_cpu() pipe.to("cuda") # Clear GPU cache before inference if torch.cuda.is_available(): torch.cuda.empty_cache() # Run inference progress(0.0, desc="Starting pipeline...") # Set steps based on quality mode if quality_mode == "⚡ Fast": stage_0_steps = 50 stage_1_steps = 15 else: # High Quality stage_0_steps = 100 stage_1_steps = 30 # Create progress callback for the pipeline def pipeline_progress_callback(value: float, desc: str) -> None: progress(value, desc=desc) meshes = pipe( input=input_data, anchor_idx=reference_frame - 1, # Convert from 1-indexed UI to 0-indexed stage_0_steps=stage_0_steps, stage_1_steps=stage_1_steps, seed=seed, progress_callback=pipeline_progress_callback, ) # Save input video input_video_path = f"{output_dir}/input_video.mp4" save_rgba_video(input_data.frames, output_path=input_video_path) if not meshes: return None, None, None, "Error: No meshes generated." # Save deformations and create animated GLB progress(1.0, desc="Creating animated GLB...") vertices_path, faces_path = save_deformation( meshes, path=f"{output_dir}/deformations" ) animated_glb_path = f"{output_dir}/animated_mesh.glb" create_animated_glb( blender_path=blender_path, vertices_npy=vertices_path, faces_npy=faces_path, output_glb=animated_glb_path, fps=8, ) progress(1.0, desc="Done!") status = f"Success! Generated animated mesh with {len(meshes)} frames." return animated_glb_path, animated_glb_path, input_video_path, status except Exception as e: return None, None, None, f"Error: {str(e)}" @spaces.GPU(duration=120) @torch.no_grad() def _run_actionmesh_fast( video_input: str | None, image_files: list[str] | None, seed: int, reference_frame: int, quality_mode: str, progress: gr.Progress = gr.Progress(), ) -> tuple[str | None, str | None, str | None, str]: """Fast mode wrapper with 120s GPU duration.""" return _run_actionmesh_impl( video_input, image_files, seed, reference_frame, quality_mode, progress ) @spaces.GPU(duration=240) @torch.no_grad() def _run_actionmesh_hq( video_input: str | None, image_files: list[str] | None, seed: int, reference_frame: int, quality_mode: str, progress: gr.Progress = gr.Progress(), ) -> tuple[str | None, str | None, str | None, str]: """High quality mode wrapper with 260s GPU duration.""" return _run_actionmesh_impl( video_input, image_files, seed, reference_frame, quality_mode, progress ) def run_actionmesh( video_input: str | None, image_files: list[str] | None, seed: int, reference_frame: int, quality_mode: str, progress: gr.Progress = gr.Progress(), ) -> tuple[str | None, str | None, str | None, str]: """ Run ActionMesh pipeline on input video or images. Dispatches to the appropriate GPU-decorated function based on quality mode. Args: video_input: Path to input video file. image_files: List of paths to input image files. seed: Random seed for generation. reference_frame: Reference frame index (1-indexed). quality_mode: Quality mode string. progress: Gradio progress tracker. Returns: Tuple of (animated_glb_path, animated_glb_path, input_video_path, status_message) """ if quality_mode == "⚡ Fast": return _run_actionmesh_fast( video_input, image_files, seed, reference_frame, quality_mode, progress ) else: return _run_actionmesh_hq( video_input, image_files, seed, reference_frame, quality_mode, progress ) def create_demo() -> gr.Blocks: """Create the Gradio demo interface.""" with gr.Blocks( title="ActionMesh - Video to 4D Mesh", theme=gr.themes.Soft(), ) as demo: gr.Markdown( """ # 🎬 ActionMesh: Video to Animated 3D Mesh [**Project Page**](https://remysabathier.github.io/actionmesh/) · [**GitHub**](https://github.com/facebookresearch/ActionMesh) [Remy Sabathier](https://remysabathier.github.io/RemySabathier/), [David Novotny](https://d-novotny.github.io/), [Niloy J. Mitra](http://www0.cs.ucl.ac.uk/staff/n.mitra/), [Tom Monnier](https://tmonnier.com/) **[Meta Reality Labs](https://ai.facebook.com/research/)** · **[SpAItial](https://www.spaitial.ai/)** · **[University College London](https://geometry.cs.ucl.ac.uk/)** Generate animated 3D meshes from video input using ActionMesh. **Instructions:** 1. Upload a video OR multiple images ⚠️ *Input is limited to exactly 16 frames. Extra frames will be discarded.* 2. Click "Generate" 3. View the animated 4D mesh in the viewer 4. Download the animated GLB mesh (ready for Blender) ⏱️ **Performance:** Inference on HuggingFace Space (ZeroGPU) is 2x slower than running locally. We recommend **Fast mode** (90s). For faster inference, run [locally via GitHub](https://github.com/facebookresearch/ActionMesh). """ ) with gr.Row(): with gr.Column(scale=1): gr.Markdown("### Input") gr.Markdown( """ ℹ️ **Input should have a uniform background**. See our [SAM2 tutorial](https://github.com/facebookresearch/actionmesh/blob/main/assets/docs/sam2_extraction_guide.md) to preprocess any video with background removal. """ ) with gr.Tab("Video"): video_input = gr.Video( label="Upload Video", sources=["upload"], ) with gr.Tab("Images"): image_input = gr.File( label="Upload Images (multiple frames)", file_count="multiple", file_types=["image"], ) # Examples gallery example_thumbnails = get_example_thumbnails() if example_thumbnails: gr.Markdown("### 📁 Example videos") gr.Markdown("*Click a video example to load it*") example_labels = [e[0] for e in get_available_examples()] examples_gallery = gr.Gallery( value=[ (thumb, label) for thumb, label in zip(example_thumbnails, example_labels) ], columns=3, rows=2, height=350, allow_preview=False, object_fit="cover", ) gr.Markdown("### Parameters") quality_mode = gr.Radio( label="Generation Mode", choices=["⚡ Fast", "✨ High Quality"], value="⚡ Fast", interactive=True, info="⚡ Fast: ~90s, ✨ High Quality: ~3min30s", ) reference_frame = gr.Slider( minimum=1, maximum=16, value=1, step=1, label="Reference Frame", info="Frame used as reference for 3D generation (1 recommended)", ) seed = gr.Slider( minimum=0, maximum=100, value=44, step=1, label="Random Seed", ) generate_btn = gr.Button("🎬 Generate", variant="primary", size="lg") with gr.Column(scale=2): gr.Markdown("### Output") status_text = gr.Textbox( label="Status", interactive=False, value="Ready", lines=2, ) gr.Markdown("### 4D Viewer") # Toggle between input video and 4D mesh viewer viewer_toggle = gr.Radio( label="Display Mode", choices=["4D Mesh Viewer", "Input Video"], value="4D Mesh Viewer", interactive=True, ) # 4D mesh display showing animated GLB mesh_display = gr.Model3D( label="4D Mesh Viewer", clear_color=[0.9, 0.9, 0.9, 1.0], height=500, visible=True, ) # Input video display input_video_display = gr.Video( label="Input Video", height=500, visible=False, interactive=False, ) # Interaction legend for 3D viewer gr.Markdown( """
🖱️ Drag to rotate · Scroll to zoom · Right-click drag to pan
""", visible=True, ) # Download button for the animated GLB download_glb = gr.DownloadButton( label="Download Animated GLB", visible=True, ) # State to store input video path input_video_state = gr.State(value=None) # Toggle handler to switch between mesh viewer and input video def toggle_display(choice: str, video_path: str | None): if choice == "4D Mesh Viewer": return gr.update(visible=True), gr.update(visible=False) else: return gr.update(visible=False), gr.update( visible=True, value=video_path ) viewer_toggle.change( fn=toggle_display, inputs=[viewer_toggle, input_video_state], outputs=[mesh_display, input_video_display], ) # Generate button click - runs pipeline and shows animated GLB generate_btn.click( fn=run_actionmesh, inputs=[video_input, image_input, seed, reference_frame, quality_mode], outputs=[ mesh_display, download_glb, input_video_state, status_text, ], ) # Example gallery click - loads example images into the image input if example_thumbnails: examples_gallery.select( fn=load_example_images, inputs=None, outputs=image_input, ) gr.Markdown( """ --- **Note:** This demo requires a GPU with sufficient VRAM. """ ) return demo if __name__ == "__main__": demo = create_demo() demo.queue() demo.launch(share=True)