Spaces:
Sleeping
Sleeping
| """ | |
| Gradio UI that calls the remote animation server. | |
| ANIM_API_URL νκ²½λ³μλ₯Ό μ€μ ν΄ μλ² μ£Όμλ₯Ό μ£Όμ νμΈμ. | |
| ANIM_API_URL=http://211.233.58.201:7788/ | |
| """ | |
| import os, logging | |
| from datetime import datetime | |
| import gradio as gr | |
| import httpx | |
| from gradio_client import Client, handle_file | |
| # ββββββββββββββββββββββββ λ‘κΉ ββββββββββββββββββββββββββ # | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format="%(asctime)s [%(levelname)s] %(message)s" | |
| ) | |
| log = logging.getLogger(__name__) | |
| # ββββββββββββββββ μλ² URL κ²°μ βββββββββββββββββββββββ # | |
| DEFAULT_URL = "http://127.0.0.1:7862/" | |
| REMOTE_URL = "http://211.233.58.201:7862/" | |
| API_URL = os.getenv("ANIM_API_URL", REMOTE_URL) | |
| if "127.0.0.1" in API_URL and os.getenv("HF_SPACE") == "true": | |
| raise RuntimeError( | |
| "HF Space 컨ν μ΄λ μμμλ κ³΅μΈ IPλ λλ©μΈμ ANIM_API_URL λ‘ μ§μ ν΄μΌ ν©λλ€." | |
| ) | |
| # ββββββββββββββββ ν¬μ€μ²΄ν¬ ν¨μ βββββββββββββββββββββββ # | |
| TIMEOUT = httpx.Timeout(connect=30.0, read=120.0, write=120.0, pool=30.0) | |
| def test_api_connection(): | |
| now = datetime.now().strftime("%H:%M:%S") | |
| try: | |
| resp = httpx.get(f"{API_URL.rstrip('/')}/healthz", timeout=TIMEOUT) | |
| ready = resp.json().get("ready", False) | |
| msg = f"[{now}] μλ² μ°κ²° μ±κ³΅ β (ready={ready})" | |
| log.info(msg) | |
| return True, msg | |
| except Exception as e: | |
| msg = f"[{now}] μλ² μ°κ²° μ€ν¨ β : {e}" | |
| log.error(msg) | |
| return False, msg | |
| # ββββββββββββββββ μ λλ©μ΄μ μμ± βββββββββββββββββββββ # | |
| def generate_animation(image, audio, guidance_scale, steps, progress=gr.Progress()): | |
| start = datetime.now().strftime("%H:%M:%S") | |
| logs = [f"[{start}] μμ² μμ"] | |
| try: | |
| if image is None or audio is None: | |
| raise ValueError("μ΄λ―Έμ§μ μ€λμ€λ₯Ό λͺ¨λ μ λ‘λνμΈμ.") | |
| progress(0.05, desc="νμΌ μ€λΉ") | |
| client = Client(API_URL) | |
| progress(0.15, desc="μλ² νΈμΆ μ€β¦ (μ λΆ μμ κ°λ₯)") | |
| result = client.predict( | |
| image_path=handle_file(image), | |
| audio_path=handle_file(audio), | |
| guidance_scale=guidance_scale, | |
| steps=steps, | |
| api_name="/generate_animation" | |
| ) | |
| progress(0.95, desc="κ²°κ³Ό μ 리") | |
| # κ²°κ³Ό μ²λ¦¬ - dict νν μ²λ¦¬ μΆκ° | |
| def extract_video_path(obj): | |
| """λΉλμ€ κ°μ²΄μμ κ²½λ‘ μΆμΆ""" | |
| if isinstance(obj, str): | |
| return obj | |
| elif isinstance(obj, dict): | |
| # Gradioμ FileData dict μ²λ¦¬ | |
| if 'video' in obj: | |
| return obj['video'] # {'video': 'κ²½λ‘', 'subtitles': None} νν μ²λ¦¬ | |
| elif 'path' in obj: | |
| return obj['path'] | |
| elif 'url' in obj: | |
| return obj['url'] | |
| elif 'name' in obj: | |
| return obj['name'] | |
| else: | |
| log.warning(f"Unexpected dict structure: {obj.keys()}") | |
| return None | |
| else: | |
| log.warning(f"Unexpected type: {type(obj)}") | |
| return None | |
| if isinstance(result, (list, tuple)) and len(result) >= 2: | |
| anim_path = extract_video_path(result[0]) | |
| comp_path = extract_video_path(result[1]) | |
| if anim_path and comp_path: | |
| logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] μ±κ³΅") | |
| return anim_path, comp_path, "\n".join(logs) | |
| else: | |
| raise RuntimeError(f"λΉλμ€ κ²½λ‘ μΆμΆ μ€ν¨: {result}") | |
| else: | |
| raise RuntimeError(f"μμμΉ λͺ»ν λ°ν νμ: {type(result)}") | |
| except Exception as e: | |
| logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] μ€λ₯: {e}") | |
| log.error(f"Animation generation error: {e}", exc_info=True) | |
| return None, None, "\n".join(logs) | |
| # βββββββββββββββββββββββ UI μ μ ββββββββββββββββββββββ # | |
| with gr.Blocks(title="Animation Generator Client") as demo: | |
| gr.Markdown("# π¬ Animation Generator β Client UI") | |
| # μλ² μν μ²΄ν¬ | |
| status_box = gr.Textbox(label="API μν", interactive=False) | |
| test_btn = gr.Button("μλ² μ°κ²° ν μ€νΈ") | |
| test_btn.click(test_api_connection, outputs=[status_box, status_box]) | |
| gr.Markdown("---") | |
| with gr.Row(): | |
| with gr.Column(): | |
| img_in = gr.Image(type="filepath", label="Portrait Image") | |
| aud_in = gr.Audio(type="filepath", label="Driving Audio") | |
| scale = gr.Slider(1, 10, value=3.0, step=0.1, label="Guidance Scale") | |
| steps = gr.Slider(5, 30, value=10, step=1, label="Inference Steps") | |
| gen_btn = gr.Button("π Generate") | |
| with gr.Column(): | |
| anim_out = gr.Video(label="Animation Result") | |
| comp_out = gr.Video(label="Side-by-Side") | |
| with gr.Accordion("μ€ν λ‘κ·Έ", open=False): | |
| log_out = gr.Textbox(label="Logs", lines=12, max_lines=20, interactive=False) | |
| gen_btn.click( | |
| generate_animation, | |
| inputs=[img_in, aud_in, scale, steps], | |
| outputs=[anim_out, comp_out, log_out] | |
| ) | |
| # βββββββββββββββββββββββ μ€ν βββββββββββββββββββββββββ # | |
| if __name__ == "__main__": | |
| demo.queue(max_size=4).launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| show_api=False | |
| ) |