Quantumbraid commited on
Commit
e0b7065
·
verified ·
1 Parent(s): 39747ad

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import gradio as gr
4
+ import spaces # Critical for ZeroGPU support in Spaces
5
+ from diffusers import DiffusionPipeline # Common for HF video models
6
+ from PIL import Image
7
+
8
+ # =============================================================
9
+ # INITIALIZATION & PIPELINE CONFIGURATION
10
+ # =============================================================
11
+ MODEL_ID = "MiniMaxAI/MiniMax-M1"
12
+
13
+ def load_pipeline():
14
+ # Load model with bfloat16 for modern GPU efficiency [17]
15
+ # Note: Ensure the model is available on the HF Hub or adjust path
16
+ pipe = DiffusionPipeline.from_pretrained(
17
+ MODEL_ID,
18
+ torch_dtype=torch.bfloat16,
19
+ use_safetensors=True
20
+ )
21
+
22
+ # Critical VRAM optimizations for deployment on 24GB or ZeroGPU [2]
23
+ pipe.enable_model_cpu_offload()
24
+ pipe.enable_vae_slicing()
25
+ return pipe
26
+
27
+ # Pipeline is initialized globally to avoid reloads on every click
28
+ pipe = load_pipeline()
29
+
30
+ # =============================================================
31
+ # GENERATION LOGIC
32
+ # =============================================================
33
+ @spaces.GPU(duration=300) # Allocated GPU time for complex generation [16]
34
+ def generate_video(prompt, negative_prompt, steps, guidance_scale, seed):
35
+ # Standard 5-second video length at 16 FPS [18-20]
36
+ num_frames = 81
37
+
38
+ # Use Generator for deterministic results [21, 22]
39
+ generator = torch.Generator("cuda").manual_seed(int(seed))
40
+
41
+ # Execution using VACE-style inputs [8, 23]
42
+ output = pipe(
43
+ prompt=prompt,
44
+ negative_prompt=negative_prompt,
45
+ num_inference_steps=int(steps),
46
+ guidance_scale=float(guidance_scale),
47
+ num_frames=num_frames,
48
+ generator=generator
49
+ ).frames
50
+
51
+ # Exporting generated frames to a video file [24, 25]
52
+ # For a real app, use diffusers.utils.export_to_video
53
+ from diffusers.utils import export_to_video
54
+ import tempfile
55
+
56
+ temp_path = tempfile.mktemp(suffix=".mp4")
57
+ export_to_video(output, temp_path, fps=16)
58
+ return temp_path
59
+
60
+ # =============================================================
61
+ # UI DESIGN (Using gr.Blocks for Professional Layout)
62
+ # =============================================================
63
+ # CSS for custom styling to improve "Joyful Experience" [26, 27]
64
+ css = """
65
+ .container { max-width: 1000px; margin: auto; }
66
+ .gen_btn { background-color: #7224f2 !important; color: white !important; }
67
+ """
68
+
69
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
70
+ gr.Markdown(f"# {MODEL_ID} High-Fidelity Video Generator")
71
+ gr.Markdown("Leveraging ZeroGPU and VRAM offloading for cinematic AI video [2, 28].")
72
+
73
+ with gr.Row():
74
+ with gr.Column(scale=1):
75
+ prompt_input = gr.Textbox(
76
+ label="Prompt",
77
+ placeholder="Describe the action and scene details...",
78
+ lines=4
79
+ )
80
+ neg_prompt = gr.Textbox(
81
+ label="Negative Prompt",
82
+ value="blurry, distorted, low quality, watermark, text"
83
+ )
84
+
85
+ with gr.Accordion("Advanced Settings", open=False):
86
+ steps = gr.Slider(20, 50, value=30, step=1, label="Inference Steps")
87
+ guidance = gr.Slider(1.0, 15.0, value=7.0, label="Guidance Scale")
88
+ seed = gr.Number(value=42, label="Seed")
89
+
90
+ generate_btn = gr.Button("Generate Video", variant="primary", elem_classes="gen_btn")
91
+
92
+ with gr.Column(scale=1):
93
+ video_output = gr.Video(label="Generated Output")
94
+
95
+ # Event listener mapping [29, 30]
96
+ generate_btn.click(
97
+ fn=generate_video,
98
+ inputs=[prompt_input, neg_prompt, steps, guidance, seed],
99
+ outputs=video_output
100
+ )
101
+
102
+ # =============================================================
103
+ # LAUNCH
104
+ # =============================================================
105
+ if __name__ == "__main__":
106
+ # Ensure app.py is at the root for automatic HF detection [14, 15]
107
+ demo.launch()