Sora_2.11 / app.py
Smart44's picture
Create app.py
76e1f83 verified
import gradio as gr
import torch
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
from diffusers.utils import export_to_video
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import spaces
import os
# 1. Hardware Detection (The Fix)
# This checks if you have a GPU. If not, it switches to CPU mode (float32).
if torch.cuda.is_available():
device = "cuda"
dtype = torch.float16
print("✅ GPU detected: Running in fast mode (float16)")
else:
device = "cpu"
dtype = torch.float32
print("⚠️ No GPU detected: Running in slow mode (float32)")
# 2. Load the Model Components
print("Loading AnimateDiff-Lightning...")
# STEP A: Load the standard adapter
adapter = MotionAdapter.from_pretrained(
"guoyww/animatediff-motion-adapter-v1-5-2",
torch_dtype=dtype # Use the detected smart type
)
# STEP B: Download the Lightning weights
print("Downloading Lightning weights...")
file_path = hf_hub_download(
repo_id="ByteDance/AnimateDiff-Lightning",
filename="animatediff_lightning_4step_diffusers.safetensors"
)
# STEP C: Apply the Lightning update
adapter.load_state_dict(
load_file(file_path)
)
# STEP D: Load the base model
pipe = AnimateDiffPipeline.from_pretrained(
"emilianJR/epiCRealism",
motion_adapter=adapter,
torch_dtype=dtype # Use the detected smart type
)
# Set up the scheduler
pipe.scheduler = EulerDiscreteScheduler.from_config(
pipe.scheduler.config,
timestep_spacing="trailing",
beta_schedule="linear"
)
# Move to the detected device
pipe.to(device)
# 3. Define the Generation Function
# We wrap this in a try-except block to handle the @spaces decorator gracefully
try:
@spaces.GPU(duration=60)
def generate_video(prompt, negative_prompt):
return run_inference(prompt, negative_prompt)
except Exception:
# If @spaces fails (because we are on CPU), just run the function normally
def generate_video(prompt, negative_prompt):
return run_inference(prompt, negative_prompt)
def run_inference(prompt, negative_prompt):
print(f"Generating video for: {prompt}")
output = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=4,
guidance_scale=1.5,
num_frames=16,
)
frames = output.frames[0]
output_path = "output.mp4"
export_to_video(frames, output_path)
return output_path
# 4. Build the UI (Removed 'theme' to fix your second error)
with gr.Blocks() as demo:
gr.Markdown("# ⚡ AnimateDiff Lightning")
gr.Markdown("If this is running on CPU, it will take about 3-5 minutes per video.")
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(label="Prompt", lines=3)
neg_prompt_input = gr.Textbox(label="Negative Prompt", value="bad quality, deformed", lines=2)
generate_btn = gr.Button("Generate Video")
with gr.Column():
video_output = gr.Video(label="Generated Result")
generate_btn.click(
fn=generate_video,
inputs=[prompt_input, neg_prompt_input],
outputs=video_output
)
demo.launch()