Account00a commited on
Commit
ac9a9f2
·
verified ·
1 Parent(s): 60f0957

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -11
app.py CHANGED
@@ -7,26 +7,47 @@ from diffusers.utils import export_to_video
7
  import tempfile
8
  import time
9
 
10
- pipe = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  @spaces.GPU(duration=240)
13
  def generate_video(prompt, negative_prompt, num_frames, height, width, num_inference_steps, guidance_scale):
14
- global pipe
15
- if pipe is None:
16
- print("📦 Loading Wan2.1-1.3B to GPU...")
17
- pipe = WanPipeline.from_pretrained("Wan-AI/Wan2.1-T2V-1.3B-Diffusers", torch_dtype=torch.float16, low_cpu_mem_usage=True)
18
- pipe.to("cuda")
19
- pipe.vae.enable_tiling()
20
- print("✅ Loaded!")
21
-
22
  with torch.inference_mode():
23
- result = pipe(prompt=prompt, negative_prompt=negative_prompt, num_frames=int(num_frames), height=int(height), width=int(width), num_inference_steps=int(num_inference_steps), guidance_scale=float(guidance_scale)).frames[0]
24
-
 
 
 
 
 
 
 
 
 
25
  output_path = tempfile.mktemp(suffix=".mp4")
26
  export_to_video(result, output_path, fps=16)
27
  gc.collect(); torch.cuda.empty_cache()
28
  return output_path
29
 
 
30
  with gr.Blocks(title="Shotarch Video Gen", theme=gr.themes.Soft()) as demo:
31
  gr.Markdown("# 🎬 Shotarch Video Generator\n### Wan2.1-1.3B on ZeroGPU")
32
  with gr.Row():
 
7
  import tempfile
8
  import time
9
 
10
+ # ============================================================
11
+ # Model loads at CONTAINER STARTUP (free time, no GPU limit)
12
+ # Download + CPU load happens ONCE when Space boots
13
+ # ============================================================
14
+ print("📦 Loading Wan2.1-1.3B on CPU (startup - no GPU timer)...")
15
+ start = time.time()
16
+
17
+ pipe = WanPipeline.from_pretrained(
18
+ "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
19
+ torch_dtype=torch.float16,
20
+ low_cpu_mem_usage=True,
21
+ )
22
+ pipe.vae.enable_tiling()
23
+ gc.collect()
24
+
25
+ print(f"✅ Model ready in {time.time()-start:.0f}s | GPU time saved for generation only!")
26
+
27
 
28
  @spaces.GPU(duration=240)
29
  def generate_video(prompt, negative_prompt, num_frames, height, width, num_inference_steps, guidance_scale):
30
+ pipe.to("cuda")
31
+
32
+ start = time.time()
 
 
 
 
 
33
  with torch.inference_mode():
34
+ result = pipe(
35
+ prompt=prompt,
36
+ negative_prompt=negative_prompt,
37
+ num_frames=int(num_frames),
38
+ height=int(height),
39
+ width=int(width),
40
+ num_inference_steps=int(num_inference_steps),
41
+ guidance_scale=float(guidance_scale),
42
+ ).frames[0]
43
+
44
+ print(f"✅ Generated in {time.time()-start:.1f}s")
45
  output_path = tempfile.mktemp(suffix=".mp4")
46
  export_to_video(result, output_path, fps=16)
47
  gc.collect(); torch.cuda.empty_cache()
48
  return output_path
49
 
50
+
51
  with gr.Blocks(title="Shotarch Video Gen", theme=gr.themes.Soft()) as demo:
52
  gr.Markdown("# 🎬 Shotarch Video Generator\n### Wan2.1-1.3B on ZeroGPU")
53
  with gr.Row():