Deforum_Soonr / dev /app3.py
AlekseyCalvin's picture
Rename app3.py to dev/app3.py
81b935a verified
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline
import os
import uuid
import animation_logic as anim
import video_utils as vid
# --- Model Config (SDXS Optimized) ---
device = "cpu"
model_id = "IDKiro/sdxs-512-dreamshaper"
# Using float32 for CPU stability
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
pipe.to(device)
def run_deforum(
prompt_list_str, neg_prompt, max_frames,
zoom_str, angle_str, tx_str, ty_str,
cadence, fps, color_match
):
width, height = 256, 256
try:
# Expected format: {0: "prompt", 10: "prompt"}
prompts = eval(prompt_list_str)
except Exception as e:
return None, None, f"Error parsing prompts: {str(e)}"
# Parse Schedules
zoom_s = anim.parse_keyframe_string(zoom_str, int(max_frames))
angle_s = anim.parse_keyframe_string(angle_str, int(max_frames))
tx_s = anim.parse_keyframe_string(tx_str, int(max_frames))
ty_s = anim.parse_keyframe_string(ty_str, int(max_frames))
all_frames = []
prev_gen_frame = None
first_frame = None
for f in range(int(max_frames)):
if f % cadence == 0:
current_prompt = prompts[max(k for k in prompts.keys() if k <= f)]
if prev_gen_frame is not None:
# Warp
init_image = anim.anim_frame_warp(prev_gen_frame, angle_s[f], zoom_s[f], tx_s[f], ty_s[f])
# SDXS 1-step Inference
new_frame = pipe(
current_prompt,
image=init_image,
negative_prompt=neg_prompt,
num_inference_steps=1,
guidance_scale=0.0,
width=width, height=height
).images[0]
if color_match and first_frame is not None:
new_frame = anim.maintain_colors(first_frame, new_frame)
else:
# First frame base generation
new_frame = pipe(
current_prompt,
negative_prompt=neg_prompt,
num_inference_steps=1,
guidance_scale=0.0,
width=width, height=height
).images[0]
first_frame = new_frame
# Cadence Interpolation
if cadence > 1 and prev_gen_frame is not None:
for i in range(1, cadence):
alpha = i / cadence
interp_frame = anim.lerp_frames(prev_gen_frame, new_frame, alpha)
all_frames.append(interp_frame)
all_frames.append(new_frame)
prev_gen_frame = new_frame
yield new_frame, None, None
# Post-Processing
unique_id = uuid.uuid4().hex[:6]
video_file = vid.frames_to_video(all_frames, f"deforum_{unique_id}.mp4", fps)
zip_file = vid.export_to_zip(all_frames, f"frames_{unique_id}.zip")
yield all_frames[-1], video_file, zip_file
# --- UI Setup ---
with gr.Blocks() as demo:
gr.Markdown("# 🚀 Deforum SOON® Animator")
with gr.Row():
with gr.Column():
p_input = gr.Textbox(label="Prompt Map", value='{0: "hyperrealistic forest", 10: "burning forest"}')
n_input = gr.Textbox(label="Negative Prompt", value="blurry, text, watermark")
with gr.Row():
f_count = gr.Slider(5, 100, value=20, step=1, label="Frames")
c_val = gr.Slider(1, 5, value=1, step=1, label="Cadence")
fps_val = gr.Number(label="FPS", value=10)
color_check = gr.Checkbox(label="Color Match", value=True)
with gr.Accordion("2D Motion (Keyframes)", open=False):
z_in = gr.Textbox(label="Zoom", value="0:(1.03)")
a_in = gr.Textbox(label="Angle", value="0:(0)")
tx_in = gr.Textbox(label="TX", value="0:(0)")
ty_in = gr.Textbox(label="TY", value="0:(0)")
run_btn = gr.Button("Generate", variant="primary")
with gr.Column():
preview = gr.Image(label="Live Stream")
video_out = gr.Video(label="Final Render")
file_out = gr.File(label="Batch PNGs")
run_btn.click(
fn=run_deforum,
inputs=[p_input, n_input, f_count, z_in, a_in, tx_in, ty_in, c_val, fps_val, color_check],
outputs=[preview, video_out, file_out]
)
demo.launch(theme='SebastianBravo/simci_css')