| | |
| | import streamlit as st |
| | import torch |
| | from diffusers import WanPipeline |
| | from diffusers.utils import export_to_video |
| | import os |
| |
|
| | st.set_page_config(page_title="Text-to-Video Generator", page_icon="π¬", layout="wide") |
| |
|
| | @st.cache_resource |
| | def load_model(): |
| | """Load and cache the model""" |
| | pipe = WanPipeline.from_pretrained( |
| | "alibaba-pai/Wan2.2-Fun-A14B-Control", |
| | torch_dtype=torch.float16, |
| | variant="fp16" |
| | ) |
| | |
| | device = "cuda" if torch.cuda.is_available() else "cpu" |
| | pipe = pipe.to(device) |
| | pipe.enable_model_cpu_offload() |
| | pipe.enable_vae_slicing() |
| | |
| | return pipe |
| |
|
| | st.title("π¬ Text-to-Video Generator") |
| | st.markdown("### Powered by Wan2.2-Fun-A14B-Control") |
| |
|
| | |
| | with st.sidebar: |
| | st.header("βοΈ Settings") |
| | |
| | num_frames = st.slider("Number of Frames", 16, 81, 81) |
| | height = st.slider("Height", 256, 1024, 768, step=64) |
| | width = st.slider("Width", 256, 1536, 1360, step=64) |
| | num_inference_steps = st.slider("Inference Steps", 20, 100, 50) |
| | guidance_scale = st.slider("Guidance Scale", 1.0, 20.0, 7.5, step=0.5) |
| | seed = st.number_input("Seed (-1 for random)", value=-1, step=1) |
| |
|
| | |
| | prompt = st.text_area( |
| | "Prompt", |
| | value="A cat playing piano in a sunlit room, cinematic lighting, 4k", |
| | height=100 |
| | ) |
| |
|
| | negative_prompt = st.text_area( |
| | "Negative Prompt", |
| | value="blurry, low quality, distorted", |
| | height=80 |
| | ) |
| |
|
| | if st.button("π¬ Generate Video", type="primary"): |
| | with st.spinner("Loading model..."): |
| | pipe = load_model() |
| | |
| | with st.spinner("Generating video... This may take a few minutes."): |
| | try: |
| | |
| | generator = None |
| | if seed != -1: |
| | generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu") |
| | generator.manual_seed(int(seed)) |
| | |
| | |
| | output = pipe( |
| | prompt=prompt, |
| | negative_prompt=negative_prompt, |
| | num_frames=num_frames, |
| | height=height, |
| | width=width, |
| | num_inference_steps=num_inference_steps, |
| | guidance_scale=guidance_scale, |
| | generator=generator, |
| | ) |
| | |
| | |
| | frames = output.frames[0] |
| | output_path = "output_video.mp4" |
| | export_to_video(frames, output_path, fps=8) |
| | |
| | st.success("β
Video generated successfully!") |
| | st.video(output_path) |
| | |
| | |
| | with open(output_path, "rb") as file: |
| | st.download_button( |
| | label="π₯ Download Video", |
| | data=file, |
| | file_name="generated_video.mp4", |
| | mime="video/mp4" |
| | ) |
| | |
| | except Exception as e: |
| | st.error(f"β Error: {str(e)}") |