|
|
import gradio as gr |
|
|
import torch |
|
|
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler |
|
|
from diffusers.utils import export_to_video |
|
|
import uuid |
|
|
|
|
|
|
|
|
model_id = "vdo/zeroscope_v2_576w" |
|
|
pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) |
|
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) |
|
|
pipe.to("cpu") |
|
|
|
|
|
def generate_video(prompt): |
|
|
try: |
|
|
|
|
|
|
|
|
frames = pipe( |
|
|
prompt, |
|
|
num_inference_steps=20, |
|
|
height=320, |
|
|
width=576, |
|
|
num_frames=16 |
|
|
).frames |
|
|
|
|
|
output_filename = f"viral_{uuid.uuid4()}.mp4" |
|
|
export_to_video(frames[0], output_filename, fps=8) |
|
|
|
|
|
|
|
|
return output_filename |
|
|
except Exception as e: |
|
|
print(f"HATA: {str(e)}") |
|
|
return None |
|
|
|
|
|
|
|
|
demo = gr.Interface(fn=generate_video, inputs="text", outputs="video", api_name="predict") |
|
|
demo.launch() |