Spaces:
Runtime error
Runtime error
| # omnihuman_generator.py | |
| import gradio as gr | |
| import torch | |
| from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler | |
| from diffusers.utils import export_to_video | |
| import os | |
| import tempfile | |
| import logging | |
| # Setup logging agar error muncul jelas | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| def create_gradio_interface(): | |
| """ | |
| Membuat antarmuka Gradio untuk human motion video generation. | |
| Menggunakan AnimateDiff-Lightning sebagai placeholder sampai OmniHuman 1.5 dirilis. | |
| """ | |
| # Gunakan model ringan yang kompatibel dengan CPU & GPU | |
| MODEL_ID = "emilianJR/epiCRealism" | |
| ADAPTER_ID = "guoyww/animatediff-lightning-2step" # Lebih cepat dari 4step | |
| pipe = None | |
| try: | |
| logger.info("π¦ Loading AnimateDiff-Lightning model...") | |
| adapter = MotionAdapter.from_pretrained( | |
| ADAPTER_ID, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 | |
| ) | |
| pipe = AnimateDiffPipeline.from_pretrained( | |
| MODEL_ID, | |
| motion_adapter=adapter, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| ) | |
| pipe.scheduler = DDIMScheduler.from_pretrained( | |
| MODEL_ID, | |
| subfolder="scheduler", | |
| clip_sample=False, | |
| timestep_spacing="linspace", | |
| beta_schedule="linear", | |
| steps_offset=1, | |
| ) | |
| if torch.cuda.is_available(): | |
| pipe.enable_vae_slicing() | |
| pipe.enable_model_cpu_offload() | |
| logger.info("β Model loaded on GPU with CPU offload") | |
| else: | |
| pipe.to("cpu") | |
| logger.info("β Model loaded on CPU") | |
| except Exception as e: | |
| logger.error(f"β Gagal load model: {e}") | |
| pipe = None | |
| def generate_human_video(prompt, steps=2, seed=42, progress=gr.Progress()): | |
| if not pipe: | |
| return "β Model failed to load. Please refresh or try later." | |
| if not prompt.strip(): | |
| prompt = "A person walking confidently, cinematic, slow motion" | |
| generator = torch.manual_seed(int(seed)) | |
| try: | |
| progress(0.3, desc="Generating frames...") | |
| output = pipe( | |
| prompt=prompt, | |
| negative_prompt="blurry, low quality, distorted, deformed, bad anatomy", | |
| num_inference_steps=int(steps), | |
| guidance_scale=1.0, | |
| generator=generator, | |
| ) | |
| video_frames = output.frames[0] | |
| progress(0.8, desc="Exporting video...") | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmpfile: | |
| output_path = tmpfile.name | |
| export_to_video(video_frames, output_path, fps=8) | |
| progress(1.0, desc="β Done!") | |
| return output_path | |
| except Exception as e: | |
| logger.error(f"Generation error: {e}") | |
| return f"β Error: {str(e)}" | |
| # === BUILD INTERFACE === | |
| with gr.Blocks(title="OmniHuman Video Generator") as demo: | |
| gr.Markdown("# π¬ OmniHuman 1.5 Demo (Community)") | |
| gr.Markdown(""" | |
| > β οΈ This is a **placeholder demo** using `AnimateDiff-Lightning`. | |
| > Official **OmniHuman 1.5** is not released yet. | |
| > Visit [OmniHuman Lab](https://omnihuman-lab.github.io/) for updates. | |
| """) | |
| with gr.Row(): | |
| with gr.Column(): | |
| prompt = gr.Textbox( | |
| label="Prompt (Describe human action)", | |
| value="A dancer spinning gracefully under spotlight, slow motion, cinematic" | |
| ) | |
| with gr.Row(): | |
| steps = gr.Slider(1, 4, value=2, step=1, label="Steps (2 recommended)") | |
| seed = gr.Number(value=42, label="Seed", precision=0) | |
| btn = gr.Button("Generate Video π", variant="primary") | |
| output = gr.Video(label="Result") | |
| gr.Examples( | |
| [ | |
| ["A martial artist performing flying kick, slow motion, sweat particles"], | |
| ["An old man walking through autumn leaves, emotional, film grain"], | |
| ["A woman laughing while twirling in summer dress, golden hour lighting"] | |
| ], | |
| inputs=prompt | |
| ) | |
| btn.click( | |
| fn=generate_human_video, | |
| inputs=[prompt, steps, seed], | |
| outputs=output, | |
| show_progress="full" | |
| ) | |
| gr.Markdown("π‘ Use motion verbs: dancing, running, jumping, walking, spinning, etc.") | |
| return demo |