Spaces:
Paused
Paused
File size: 2,995 Bytes
cdc3796 3366cca 317a82a c5aedf0 d3ec508 317a82a cdc3796 317a82a d3ec508 317a82a cdc3796 317a82a 53980f6 781071b 3366cca 317a82a 3366cca 317a82a 3366cca 317a82a c5aedf0 3366cca d3ec508 cdc3796 3366cca 317a82a 3366cca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
from main import run_app, run_train, run_inference, run_generate_frame, run_interpolate_frames
import spaces
from PIL import Image
import cv2
import os
import gradio as gr
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
char_imgs = gr.Gallery(type="pil", label="Images of the Character")
mocap = gr.Video(label="Motion-Capture Video")
frame_imgs = gr.Gallery(type="pil", label="Reference Images of Each Frame")
poses = gr.JSON(label="Pose Coordinates")
tr_steps = gr.Number(label="Training steps", value=10)
inf_steps = gr.Number(label="Inference steps", value=10)
fps = gr.Number(label="Output frame rate", value=12)
modelId = gr.Text(label="Model Id", value="fine_tuned_pcdms")
remove_bg = gr.Checkbox(label="Remove background", value=False)
resize_inputs = gr.Checkbox(label="Resize images to match video", value=True)
img_width = gr.Number(label="Output width", value=1920)
img_height = gr.Number(label="Output height", value=1080)
interp_frame1 = gr.Image(type="pil", label="Interpolation Start Frame")
interp_frame2 = gr.Image(type="pil", label="Interpolation End Frame")
times_to_interp = gr.Number(label="Times to Interpolate", value=1)
train_btn = gr.Button(value="Train")
inference_btn = gr.Button(value="Inference")
generate_frame_btn = gr.Button(value="Generate Frame")
submit_btn = gr.Button(value="Generate")
interp_btn = gr.Button(value="Interpolate Frames")
with gr.Column():
animation = gr.Video(label="Result")
frames = gr.Gallery(type="pil", label="Frames", format="png")
frames_thumb = gr.Gallery(type="pil", label="Thumbnails", format="png")
pose_coords = gr.JSON(label="Pose Coordinates")
reference = gr.Gallery(type="pil", label="Reference Images", format="png")
submit_btn.click(
run_app, inputs=[char_imgs, mocap, tr_steps, inf_steps, fps, remove_bg, resize_inputs], outputs=[animation, frames]
)
train_btn.click(
run_train, inputs=[char_imgs, tr_steps, modelId, remove_bg, resize_inputs], outputs=[]
)
inference_btn.click(
run_inference, inputs=[char_imgs, mocap, frame_imgs, tr_steps, inf_steps, fps, modelId, img_width, img_height, remove_bg, resize_inputs], outputs=[animation, frames, frames_thumb, pose_coords, reference]
)
generate_frame_btn.click(
run_generate_frame, inputs=[char_imgs, poses, tr_steps, inf_steps, modelId, img_width, img_height, remove_bg, resize_inputs], outputs=[frames, frames_thumb]
)
interp_btn.click(
run_interpolate_frames, inputs=[interp_frame1, interp_frame2, times_to_interp], outputs=[frames, frames_thumb]
)
demo.launch(share=True)
|