Spaces:
Paused
Paused
| import gradio as gr | |
| import torch | |
| from diffusers import LTXPipeline | |
| from diffusers.utils import export_to_video | |
| import tempfile | |
| import random | |
| # Load the LTX Video model | |
| pipe = LTXPipeline.from_pretrained("Lightricks/LTX-Video", torch_dtype=torch.bfloat16) | |
| pipe.to("cuda") | |
| def generate_video(prompt, negative_prompt, height, width, num_frames, num_inference_steps, seed): | |
| if seed == -1: | |
| seed = random.randint(0, 2**32 - 1) | |
| generator = torch.Generator(device="cuda").manual_seed(seed) | |
| video = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| height=height, | |
| width=width, | |
| num_frames=num_frames, | |
| num_inference_steps=num_inference_steps, | |
| generator=generator | |
| ).frames[0] | |
| # Export video to temporary file | |
| with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: | |
| export_to_video(video, tmpfile.name, fps=24) | |
| return tmpfile.name | |
| # Gradio Interface | |
| title = "LTX-Video Generator" | |
| description = "Generate high-quality videos from text using the Lightricks LTX-Video model." | |
| with gr.Blocks(title=title) as demo: | |
| gr.Markdown(f"## {title}\n{description}") | |
| with gr.Row(): | |
| prompt = gr.Textbox(label="Prompt", value="A woman with long brown hair and light skin smiles at another woman...", lines=5) | |
| negative_prompt = gr.Textbox(label="Negative Prompt", value="worst quality, inconsistent motion, blurry, jittery, distorted", lines=5) | |
| with gr.Row(): | |
| height = gr.Slider(minimum=64, maximum=720, step=32, value=480, label="Height") | |
| width = gr.Slider(minimum=64, maximum=1280, step=32, value=704, label="Width") | |
| num_frames = gr.Slider(minimum=9, maximum=257, step=8, value=161, label="Number of Frames") | |
| num_inference_steps = gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Inference Steps") | |
| seed = gr.Number(value=-1, label="Seed (set -1 for random)") | |
| generate_btn = gr.Button("Generate Video") | |
| output_video = gr.Video(label="Generated Video") | |
| generate_btn.click( | |
| fn=generate_video, | |
| inputs=[prompt, negative_prompt, height, width, num_frames, num_inference_steps, seed], | |
| outputs=output_video | |
| ) | |
| demo.launch() |