Deforum_Soonr / dev /app2.py
AlekseyCalvin's picture
Rename app2.py to dev/app2.py
b91cd3e verified
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline
import os
import uuid
import animation_logic as anim
import video_utils as vid
# --- Model Config (SDXS Optimized) ---
device = "cpu"
model_id = "IDKiro/sdxs-512-dreamshaper"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
pipe.to(device)
def run_deforum(
prompt_list_str, neg_prompt, max_frames,
zoom_str, angle_str, tx_str, ty_str,
cadence, fps
):
# Setup
width, height = 256, 256
try:
prompts = eval(prompt_list_str)
except:
return None, None, "Error: Prompt dictionary format invalid."
# Parse Schedules
zoom_s = anim.parse_keyframe_string(zoom_str, max_frames)
angle_s = anim.parse_keyframe_string(angle_str, max_frames)
tx_s = anim.parse_keyframe_string(tx_str, max_frames)
ty_s = anim.parse_keyframe_string(ty_str, max_frames)
all_frames = []
prev_gen_frame = None
# Generation Loop
for f in range(max_frames):
if f % cadence == 0:
# Determine prompt
current_prompt = prompts[max(k for k in prompts.keys() if k <= f)]
# Warp previous frame if it exists
if prev_gen_frame is not None:
# We warp the frame based on the cumulative motion across the cadence gap
init_image = anim.anim_frame_warp(prev_gen_frame, angle_s[f], zoom_s[f], tx_s[f], ty_s[f])
# SDXS Inference (1-step, 0 guidance)
new_frame = pipe(
current_prompt,
image=init_image, # This mimics the 'strength' logic
negative_prompt=neg_prompt,
num_inference_steps=1,
guidance_scale=0.0,
width=width, height=height
).images[0]
else:
# First frame
new_frame = pipe(
current_prompt,
negative_prompt=neg_prompt,
num_inference_steps=1,
guidance_scale=0.0,
width=width, height=height
).images[0]
# Handle Cadence Interpolation for the gap behind us
if cadence > 1 and prev_gen_frame is not None:
start_gap = f - cadence
for i in range(1, cadence):
alpha = i / cadence
interp_frame = anim.lerp_frames(prev_gen_frame, new_frame, alpha)
all_frames.append(interp_frame)
all_frames.append(new_frame)
prev_gen_frame = new_frame
yield new_frame, None, None
# Finalize Video and Zip
video_file = vid.frames_to_video(all_frames, f"output_{uuid.uuid4().hex[:6]}.mp4", fps)
zip_file = vid.export_to_zip(all_frames, f"frames_{uuid.uuid4().hex[:6]}.zip")
yield all_frames[-1], video_file, zip_file
# --- Gradio Interface ---
with gr.Blocks(theme=gr.themes.Glass()) as demo:
gr.Markdown("# 🎨 Deforum Soonr Variant 2")
with gr.Row():
with gr.Column(scale=1):
prompts = gr.Textbox(label="Prompts (Frame: Prompt Dict)",
value='{0: "a snowy mountain", 15: "a fiery volcano"}', lines=3)
neg_p = gr.Textbox(label="Negative Prompt", value="blur, lowres, text")
with gr.Row():
frames_n = gr.Number(label="Max Frames", value=20)
cadence_n = gr.Slider(1, 4, value=2, step=1, label="Cadence (Skip Steps)")
fps_n = gr.Number(label="FPS", value=10)
with gr.Accordion("Motion Parameters", open=False):
zoom = gr.Textbox(label="Zoom", value="0:(1.04)")
angle = gr.Textbox(label="Angle", value="0:(2*sin(t/5))")
tx = gr.Textbox(label="Translation X", value="0:(0)")
ty = gr.Textbox(label="Translation Y", value="0:(0)")
btn = gr.Button("Generate", variant="primary")
with gr.Column(scale=1):
preview = gr.Image(label="Live Frame Preview")
video_out = gr.Video(label="Rendered Animation")
file_out = gr.File(label="Download Batch (ZIP)")
btn.click(
fn=run_deforum,
inputs=[prompts, neg_p, frames_n, zoom, angle, tx, ty, cadence_n, fps_n],
outputs=[preview, video_out, file_out]
)
demo.launch()