| import gradio as gr |
| import torch |
| import imageio |
| from diffusers import DiffusionPipeline |
|
|
| MODEL_ID = "Linum-AI/linum-v2-720p" |
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
| pipe = DiffusionPipeline.from_pretrained( |
| MODEL_ID, |
| torch_dtype=torch.float16, |
| variant="fp16" |
| ).to(device) |
|
|
| def generate_video(prompt, steps, cfg, seed): |
| generator = torch.Generator(device=device).manual_seed(seed) |
|
|
| video = pipe( |
| prompt=prompt, |
| num_inference_steps=steps, |
| guidance_scale=cfg, |
| generator=generator |
| ).frames |
|
|
| output_path = "output.mp4" |
| imageio.mimsave(output_path, video, fps=8) |
|
|
| return output_path |
|
|
| with gr.Blocks(title="Linum v2 β Text to Video") as demo: |
| gr.Markdown("# π¬ Linum v2 β Text to Video (720p)") |
| gr.Markdown("Masukkan prompt β generate video AI") |
|
|
| prompt = gr.Textbox( |
| label="Prompt", |
| placeholder="A cinematic POV walking through misty mountains at sunrise" |
| ) |
|
|
| steps = gr.Slider(10, 50, value=25, step=1, label="Inference Steps") |
| cfg = gr.Slider(1, 15, value=7, step=0.5, label="CFG Scale") |
| seed = gr.Number(value=42, label="Seed") |
|
|
| run = gr.Button("Generate Video π") |
| output = gr.Video(label="Result") |
|
|
| run.click( |
| fn=generate_video, |
| inputs=[prompt, steps, cfg, seed], |
| outputs=output |
| ) |
|
|
| demo.launch() |
|
|