SUPIR / app.py
Fabrice-TIERCELIN's picture
Upload 9 files
e49ba69 verified
import os
# PyTorch 2.8 (temporary hack)
os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
# --- 1. Model Download and Setup (Diffusers Backend) ---
try:
import spaces
except:
class spaces():
def GPU(*args, **kwargs):
def decorator(function):
return lambda *dummy_args, **dummy_kwargs: function(*dummy_args, **dummy_kwargs)
return decorator
import torch
from diffusers import FlowMatchEulerDiscreteScheduler
from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
from diffusers.utils.export_utils import export_to_video
import gradio as gr
import imageio_ffmpeg
import tempfile
import shutil
import subprocess
import time
from datetime import datetime
import numpy as np
from PIL import Image
import random
import math
import traceback
import gc
from gradio_client import Client, handle_file # Import for API call
import zipfile
# Import optimization and access compiled artifacts
import optimization
# Import the optimization function from the separate file
from optimization import optimize_pipeline_
# --- Constants and Model Loading ---
MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
# --- NEW: Flexible Dimension Constants ---
MAX_DIMENSION = 832
MIN_DIMENSION = 480
DIMENSION_MULTIPLE = 16
SQUARE_SIZE = 480
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS = 24
MIN_FRAMES_MODEL = 8
MAX_FRAMES_MODEL = 81
MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS, 1)
MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS, 1)
input_image_debug_value = [None]
end_image_debug_value = [None]
prompt_debug_value = [None]
total_second_length_debug_value = [None]
resolution_debug_value = [None]
factor_debug_value = [None]
allocation_time_debug_value = [None]
default_negative_prompt = "Vibrant colors, overexposure, static, blurred details, subtitles, error, style, artwork, painting, image, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, many people in the background, walking backwards, overexposure, jumpcut, crossfader, "
transformer = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
subfolder='transformer',
torch_dtype=torch.bfloat16,
device_map='cuda',
)
transformer_2 = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
subfolder='transformer_2',
torch_dtype=torch.bfloat16,
device_map='cuda',
)
pipe = WanImageToVideoPipeline.from_pretrained(
MODEL_ID,
transformer = transformer,
transformer_2 = transformer_2,
torch_dtype=torch.bfloat16,
)
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=8.0)
pipe.to('cuda')
for i in range(3):
gc.collect()
torch.cuda.synchronize()
torch.cuda.empty_cache()
optimize_pipeline_(pipe,
image=Image.new('RGB', (MAX_DIMENSION, MIN_DIMENSION)),
prompt='prompt',
height=MIN_DIMENSION,
width=MAX_DIMENSION,
num_frames=MAX_FRAMES_MODEL,
)
def _escape_html(s: str) -> str:
return (s.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;"))
def _error_to_html(err: BaseException) -> str:
tb = traceback.format_exc()
return (
"<div style='padding:12px;border:1px solid #ff4d4f;background:#fff1f0;color:#a8071a;border-radius:8px;'>"
"<b>Generation failed</b><br/>"
f"<b>{_escape_html(type(err).__name__)}</b>: {_escape_html(str(err))}"
"<details style='margin-top:8px;'>"
"<summary>Show traceback</summary>"
f"<pre style='white-space:pre-wrap;margin-top:8px;'>{_escape_html(tb)}</pre>"
"</details>"
"</div>"
)
# 20250508 pftq: for saving prompt to mp4 metadata comments
def set_mp4_comments_imageio_ffmpeg(input_file, comments):
try:
# Get the path to the bundled FFmpeg binary from imageio-ffmpeg
ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
# Check if input file exists
if not os.path.exists(input_file):
#print(f"Error: Input file {input_file} does not exist")
return False
# Create a temporary file path
temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
# FFmpeg command using the bundled binary
command = [
ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
'-i', input_file, # input file
'-metadata', f'comment={comments}', # set comment metadata
'-c:v', 'copy', # copy video stream without re-encoding
'-c:a', 'copy', # copy audio stream without re-encoding
'-y', # overwrite output file if it exists
temp_file # temporary output file
]
# Run the FFmpeg command
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode == 0:
# Replace the original file with the modified one
shutil.move(temp_file, input_file)
#print(f"Successfully added comments to {input_file}")
return True
else:
# Clean up temp file if FFmpeg fails
if os.path.exists(temp_file):
os.remove(temp_file)
#print(f"Error: FFmpeg failed with message:\n{result.stderr}")
return False
except Exception as e:
# Clean up temp file in case of other errors
if 'temp_file' in locals() and os.path.exists(temp_file):
os.remove(temp_file)
print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
return False
# --- 2. Image Processing and Application Logic ---
def generate_end_frame(start_img, gen_prompt, progress=gr.Progress(track_tqdm=True)):
"""Calls an external Gradio API to generate an image."""
if start_img is None:
raise gr.Error("Please provide a Start Frame first.")
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
raise gr.Error("HF_TOKEN not found in environment variables. Please set it in your Space secrets.")
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
start_img.save(tmpfile.name)
tmp_path = tmpfile.name
progress(0.1, desc="Connecting to image generation API...")
client = Client("multimodalart/nano-banana-private")
progress(0.5, desc=f"Generating with prompt: '{gen_prompt}'...")
try:
result = client.predict(
prompt=gen_prompt,
images=[
{"image": handle_file(tmp_path)}
],
manual_token=hf_token,
api_name="/unified_image_generator"
)
finally:
os.remove(tmp_path)
progress(1.0, desc="Done!")
print(result)
return result
def switch_to_upload_tab():
"""Returns a gr.Tabs update to switch to the first tab."""
return gr.Tabs(selected="upload_tab")
def process_image_for_video(image: Image.Image, resolution: int) -> Image.Image:
"""
Resizes an image based on the following rules for video generation.
"""
width, height = image.size
if resolution < width * height:
scale = ((width * height) / resolution)**(.5)
new_width = width / scale
new_height = height / scale
final_width = int(math.floor(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
final_height = int(math.floor(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
elif width * height < (MIN_DIMENSION**2):
scale = ((MIN_DIMENSION**2) / (width * height))**(.5)
new_width = width * scale
new_height = height * scale
final_width = int(math.ceil(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
final_height = int(math.ceil(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
else:
final_width = int(round(width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
final_height = int(round(height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
return image.resize((final_width, final_height), Image.Resampling.LANCZOS)
def resize_and_crop_to_match(target_image, reference_image):
"""Resizes the target image to match the reference image's dimensions."""
ref_width, ref_height = reference_image.size
return target_image.resize((ref_width, ref_height), Image.Resampling.LANCZOS)
def crop_to_match(target_image, reference_image):
"""Resizes and center-crops the target image to match the reference image's dimensions."""
ref_width, ref_height = reference_image.size
target_width, target_height = target_image.size
scale = max(ref_width / target_width, ref_height / target_height)
new_width, new_height = int(target_width * scale), int(target_height * scale)
resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
return resized.crop((left, top, left + ref_width, top + ref_height))
def init_view():
return gr.update(interactive = True)
def output_video_change(output_video):
print('Log output: ' + str(output_video))
return [gr.update(visible = True)] * 2
def generate_video(
start_image_pil,
end_image_pil,
prompt,
negative_prompt=default_negative_prompt,
resolution=500000,
duration_seconds=2.1,
steps=8,
guidance_scale=1,
guidance_scale_2=1,
seed=42,
randomize_seed=True,
progress=gr.Progress(track_tqdm=True)
):
start = time.time()
allocation_time = 120
factor = 1
if input_image_debug_value[0] is not None or end_image_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None or allocation_time_debug_value[0] is not None or resolution_debug_value[0] is not None or factor_debug_value[0] is not None:
start_image_pil = input_image_debug_value[0]
end_image_pil = end_image_debug_value[0]
prompt = prompt_debug_value[0]
duration_seconds = total_second_length_debug_value[0]
resolution = resolution_debug_value[0]
factor = factor_debug_value[0]
allocation_time = allocation_time_debug_value[0]
if start_image_pil is None or end_image_pil is None:
raise gr.Error("Please upload both a start and an end image.")
# Step 1: Process the start image to get our target dimensions based on the new rules.
processed_start_image = process_image_for_video(start_image_pil, resolution)
# Step 2: Make the end image match the *exact* dimensions of the processed start image.
processed_end_image = resize_and_crop_to_match(end_image_pil, processed_start_image)
target_height, target_width = processed_start_image.height, processed_start_image.width
# Handle seed and frame count
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
progress(0.2, desc=f"Generating {num_frames} frames at {target_width}x{target_height} (seed: {current_seed})...")
progress(0.1, desc="Preprocessing images...")
print("Generate a video with the prompt: " + prompt)
output_frames_list = None
caught_error = None
while factor >= 1 and int(allocation_time) > 0:
try:
output_frames_list = generate_video_on_gpu(
start_image_pil,
end_image_pil,
prompt,
negative_prompt,
int(steps),
float(guidance_scale),
float(guidance_scale_2),
progress,
allocation_time,
target_height,
target_width,
current_seed,
(int(((num_frames * factor) - 1) / 4) * 4) + 1,
processed_start_image,
processed_end_image
)
factor = 0
caught_error = None
except BaseException as err:
print("An exception occurred: " + str(err))
caught_error = err
try:
print('e.message: ' + err.message) # No GPU is currently available for you after 60s
except Exception as e2:
print('Failure')
if not str(err).startswith("No GPU is currently available for you after 60s"):
factor -= .003
allocation_time = int(allocation_time) - 1
except:
print("An error occurred")
caught_error = None
if not str(e).startswith("No GPU is currently available for you after 60s"):
factor -= .003
allocation_time = int(allocation_time) - 1
if caught_error is not None:
return [gr.skip(), gr.skip(), gr.skip(), gr.update(value=_error_to_html(caught_error), visible=True), gr.skip()]
input_image_debug_value[0] = end_image_debug_value[0] = prompt_debug_value[0] = total_second_length_debug_value[0] = allocation_time_debug_value[0] = factor_debug_value[0] = None
progress(0.9, desc="Encoding and saving video...")
video_path = 'wan_' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f") + '.mp4'
export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
set_mp4_comments_imageio_ffmpeg(video_path, f"Prompt: {prompt} | Negative Prompt: {negative_prompt}");
print("Video exported: " + video_path)
progress(1.0, desc="Done!")
end = time.time()
secondes = int(end - start)
minutes = math.floor(secondes / 60)
secondes = secondes - (minutes * 60)
hours = math.floor(minutes / 60)
minutes = minutes - (hours * 60)
information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
"The video been generated in " + \
((str(hours) + " h, ") if hours != 0 else "") + \
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
str(secondes) + " sec (including " + str(allocation_time) + " seconds of GPU). " + \
"The video has " + str(int(num_frames * factor)) + " frames. " + \
"The video resolution is " + str(target_width) + \
" pixels large and " + str(target_height) + \
" pixels high, so a resolution of " + f'{target_width * target_height:,}' + " pixels." + \
" Your prompt is saved into the metadata of the video."
return [video_path, gr.update(value = video_path, visible = True, interactive = True), current_seed, gr.update(value = information, visible = True), gr.update(interactive = False)]
def get_duration(
start_image_pil,
end_image_pil,
prompt,
negative_prompt,
steps,
guidance_scale,
guidance_scale_2,
progress,
allocation_time,
target_height,
target_width,
current_seed,
num_frames,
processed_start_image,
processed_end_image
):
return allocation_time
@torch.no_grad()
@spaces.GPU(duration=get_duration)
def generate_video_on_gpu(
start_image_pil,
end_image_pil,
prompt,
negative_prompt,
steps,
guidance_scale,
guidance_scale_2,
progress,
allocation_time,
target_height,
target_width,
current_seed,
num_frames,
processed_start_image,
processed_end_image
):
"""
Generates a video by interpolating between a start and end image, guided by a text prompt,
using the diffusers Wan2.2 pipeline.
"""
output_frames_list = pipe(
image=processed_start_image,
last_image=processed_end_image,
prompt=prompt,
negative_prompt=negative_prompt,
height=target_height,
width=target_width,
num_frames=num_frames,
guidance_scale=guidance_scale,
guidance_scale_2=guidance_scale_2,
num_inference_steps=steps,
generator=torch.Generator(device="cuda").manual_seed(current_seed),
).frames[0]
return output_frames_list
def export_compiled_transformers_to_zip() -> str:
"""
Bundle compiled_transformer_1 and compiled_transformer_2 into a zip file and return the file path.
"""
ct1 = getattr(optimization, "COMPILED_TRANSFORMER_1", None)
ct2 = getattr(optimization, "COMPILED_TRANSFORMER_2", None)
if ct1 is None or ct2 is None:
raise gr.Error("Compiled transformers are not available yet (compilation may have failed).")
payload_1 = ct1.to_serializable_dict()
payload_2 = ct2.to_serializable_dict()
tmp_zip = tempfile.NamedTemporaryFile(suffix=".zip", delete=False)
tmp_zip.close()
with zipfile.ZipFile(tmp_zip.name, "w", compression=zipfile.ZIP_DEFLATED) as zf:
# store with torch.save so users can load easily with torch.load()
buf1 = tempfile.NamedTemporaryFile(suffix=".pt", delete=False)
buf1.close()
torch.save(payload_1, buf1.name)
buf2 = tempfile.NamedTemporaryFile(suffix=".pt", delete=False)
buf2.close()
torch.save(payload_2, buf2.name)
zf.write(buf1.name, arcname="compiled_transformer_1.pt")
zf.write(buf2.name, arcname="compiled_transformer_2.pt")
# cleanup intermediate .pt
try:
os.remove(buf1.name)
os.remove(buf2.name)
except:
pass
return tmp_zip.name
# --- 3. Gradio User Interface ---
js = """
function createGradioAnimation() {
window.addEventListener("beforeunload", function(e) {
if (document.getElementById('dummy_button_id') && !document.getElementById('dummy_button_id').disabled) {
var confirmationMessage = 'A process is still running. '
+ 'If you leave before saving, your changes will be lost.';
(e || window.event).returnValue = confirmationMessage;
}
return confirmationMessage;
});
return 'Animation created';
}
"""
# Gradio interface
with gr.Blocks(js=js) as app:
gr.Markdown("# Wan 2.2 First/Last Frame Video Fast")
gr.Markdown("Based on the [Wan 2.2 First/Last Frame workflow](https://www.reddit.com/r/StableDiffusion/comments/1me4306/psa_wan_22_does_first_frame_last_frame_out_of_the/), applied to 🧨 Diffusers + [lightx2v/Wan2.2-Lightning](https://huggingface.co/lightx2v/Wan2.2-Lightning) 8-step LoRA")
with gr.Row(elem_id="general_items"):
with gr.Column():
with gr.Group(elem_id="group_all"):
with gr.Row():
start_image = gr.Image(type="pil", label="Start Frame", sources=["upload", "clipboard"])
# Capture the Tabs component in a variable and assign IDs to tabs
with gr.Tabs(elem_id="group_tabs") as tabs:
with gr.TabItem("Upload", id="upload_tab"):
end_image = gr.Image(type="pil", label="End Frame", sources=["upload", "clipboard"])
with gr.TabItem("Generate", id="generate_tab"):
generate_5seconds = gr.Button("Generate scene 5 seconds in the future", elem_id="fivesec")
gr.Markdown("Generate a custom end-frame with an edit model like [Nano Banana](https://huggingface.co/spaces/multimodalart/nano-banana) or [Qwen Image Edit](https://huggingface.co/spaces/multimodalart/Qwen-Image-Edit-Fast)", elem_id="or_item")
prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images", placeholder="The creature starts to move")
with gr.Accordion("Advanced Settings", open=False):
duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
resolution = gr.Dropdown([
["400,000 px (working)", 400000],
["465,920 px (working)", 465920],
["495,616 px (working)", 495616],
["500,000 px (working)", 500000],
["600,000 px (working)", 600000],
["700,000 px (working)", 700000],
["800,000 px (working)", 800000],
["900,000 px (working)", 900000],
["1,000,000 px (working)", 1000000],
["1,100,000 px (untested)", 1100000],
["1,200,000 px (untested)", 1200000],
["1,300,000 px (untested)", 1300000],
["1,400,000 px (untested)", 1400000],
["1,500,000 px (untested)", 1500000]
], value=465920, label="Resolution (width x height)", info="Less if the image is smaller")
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=8, label="Inference Steps")
guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - low noise")
with gr.Row():
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
generate_button = gr.Button("🚀 Generate Video", variant="primary")
dummy_button = gr.Button(elem_id = "dummy_button_id", visible = False, interactive = False)
with gr.Column():
output_video = gr.Video(label="Generated Video", autoplay = True, loop = True)
download_button = gr.DownloadButton(elem_id="download_btn", interactive = True)
video_information = gr.HTML(value = "")
with gr.Accordion("🔧 Compilation artifacts (advanced)", open=False):
gr.Markdown(
"Télécharge les artefacts compilés AOTInductor générés au démarrage (transformer + transformer_2)."
)
export_btn = gr.Button("📦 Préparer l'archive des transformers compilés")
compiled_download = gr.DownloadButton(label="⬇️ Télécharger compiled_transformers.zip", interactive=False)
def _build_and_enable_download():
path = export_compiled_transformers_to_zip()
return gr.update(value=path, interactive=True)
export_btn.click(fn=_build_and_enable_download, inputs=None, outputs=compiled_download)
# Main video generation button
ui_inputs = [
start_image,
end_image,
prompt,
negative_prompt_input,
resolution,
duration_seconds_input,
steps_slider,
guidance_scale_input,
guidance_scale_2_input,
seed_input,
randomize_seed_checkbox
]
ui_outputs = [output_video, download_button, seed_input, video_information, dummy_button]
generate_button.click(fn = init_view, inputs = [], outputs = [dummy_button], queue = False, show_progress = False).success(
fn = generate_video,
inputs = ui_inputs,
outputs = ui_outputs
)
generate_5seconds.click(
fn=switch_to_upload_tab,
inputs=None,
outputs=[tabs]
).then(
fn=lambda img: generate_end_frame(img, "this image is a still frame from a movie. generate a new frame with what happens on this scene 5 seconds in the future"),
inputs=[start_image],
outputs=[end_image]
).success(
fn=generate_video,
inputs=ui_inputs,
outputs=ui_outputs
)
output_video.change(
fn=output_video_change,
inputs=[output_video],
outputs=[download_button, video_information],
js="document.getElementById('download_btn').click()"
)
with gr.Row(visible=False):
gr.Examples(
examples=[["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolboy puts on his schoolbag."]],
inputs=[start_image, end_image, prompt],
outputs=ui_outputs,
fn=generate_video,
run_on_click=True,
cache_examples=True,
cache_mode='lazy',
)
prompt_debug=gr.Textbox(label="Prompt Debug")
input_image_debug=gr.Image(type="pil", label="Image Debug")
end_image_debug=gr.Image(type="pil", label="End Image Debug")
total_second_length_debug=gr.Slider(label="Duration Debug", minimum=1, maximum=120, value=5, step=0.1)
resolution_debug = gr.Dropdown([
["400,000 px", 400000],
["465,920 px", 465920],
["495,616 px", 495616],
["500,000 px", 500000],
["600,000 px", 600000],
["700,000 px", 700000],
["800,000 px", 800000],
["900,000 px", 900000],
["1,000,000 px", 1000000],
["1,100,000 px", 1100000],
["1,200,000 px", 1200000],
["1,300,000 px", 1300000],
["1,400,000 px", 1400000],
["1,500,000 px", 1500000]
], value=500000, label="Resolution Debug")
factor_debug=gr.Slider(label="Factor Debug", minimum=1, maximum=100, value=3.2, step=0.1)
allocation_time_debug=gr.Slider(label="Allocation Debug", minimum=1, maximum=60 * 20, value=720, step=1)
def handle_field_debug_change(
input_image_debug_data,
end_image_debug_data,
prompt_debug_data,
total_second_length_debug_data,
resolution_debug_data,
factor_debug_data,
allocation_time_debug_data
):
input_image_debug_value[0] = input_image_debug_data
end_image_debug_value[0] = end_image_debug_data
prompt_debug_value[0] = prompt_debug_data
total_second_length_debug_value[0] = total_second_length_debug_data
resolution_debug_value[0] = resolution_debug_data
factor_debug_value[0] = factor_debug_data
allocation_time_debug_value[0] = allocation_time_debug_data
return []
inputs_debug=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug, resolution_debug, factor_debug, allocation_time_debug]
input_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
end_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
prompt_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
total_second_length_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
resolution_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
factor_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
allocation_time_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
gr.Examples(
label = "Examples from demo",
examples = [
["poli_tower.png", "tower_takes_off.png", "The man turns around."],
["ugly_sonic.jpeg", "squatting_sonic.png", "पात्रं क्षेपणास्त्रं चकमाति।"],
["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolboy puts on his schoolbag."],
],
inputs = [start_image, end_image, prompt],
outputs = ui_outputs,
fn = generate_video,
cache_examples = False,
)
if __name__ == "__main__":
app.launch(mcp_server=True, share=True)