from main import run_app, run_train, run_inference import spaces from PIL import Image import cv2 import os import gradio as gr with gr.Blocks() as demo: with gr.Row(): with gr.Column(): char_imgs = gr.Gallery(type="pil", label="Images of the Character") mocap = gr.Video(label="Motion-Capture Video") tr_steps = gr.Number(label="Training steps", value=10) inf_steps = gr.Number(label="Inference steps", value=10) fps = gr.Number(label="Output frame rate", value=12) modelId = gr.Text(label="Model Id", value="fine_tuned_pcdms") remove_bg = gr.Checkbox(label="Remove background", value=False) resize_inputs = gr.Checkbox(label="Resize images to match video", value=True) img_width = gr.Number(label="Output width", value=1920) img_height = gr.Number(label="Output height", value=1080) train_btn = gr.Button(value="Train") inference_btn = gr.Button(value="Inference") submit_btn = gr.Button(value="Generate") with gr.Column(): animation = gr.Video(label="Result") frames = gr.Gallery(type="pil", label="Frames", format="png") frames_thumb = gr.Gallery(type="pil", label="Thumbnails", format="png") pose_coords = gr.JSON(label="Pose Coordinates") submit_btn.click( run_app, inputs=[char_imgs, mocap, tr_steps, inf_steps, fps, remove_bg, resize_inputs], outputs=[animation, frames] ) train_btn.click( run_train, inputs=[char_imgs, tr_steps, modelId, remove_bg, resize_inputs], outputs=[] ) inference_btn.click( run_inference, inputs=[char_imgs, mocap, tr_steps, inf_steps, fps, modelId, img_width, img_height, remove_bg, resize_inputs], outputs=[animation, frames, frames_thumb, pose_coords] ) demo.launch(share=True)