orderlymirror commited on
Commit
5a00166
Β·
verified Β·
1 Parent(s): 4c6c800

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -8
app.py CHANGED
@@ -13,30 +13,37 @@ pipe.enable_model_cpu_offload()
13
  pipe.vae.enable_slicing()
14
 
15
  # 2. GPU-decorated generation function
16
- @spaces.GPU(duration=180)
17
- def generate_video(prompt: str, steps: int, frames: int, fps: int) -> str:
18
  """
19
- Generates video from text with adjustable steps, frames, and fps.
20
  Returns path to the saved MP4 file.
21
  """
 
 
 
 
 
22
  # Run pipeline; offload handles device placement
23
  output = pipe(
24
  prompt=prompt,
25
  num_inference_steps=steps,
26
- num_frames=frames
 
 
27
  )
28
  frame_list = output.frames[0]
29
 
30
  # Export to MP4 for browser playback
31
  return export_to_video(frame_list, "generated.mp4", fps=fps)
32
 
33
- # 3. Build the Gradio interface with sliders
34
  with gr.Blocks(title="CogVideoX Interactive Text-to-Video") as demo:
35
  gr.Markdown(
36
  """
37
  # 🎞️ Interactive Text‑to‑Video Demo
38
- Adjust the sliders below to control the number of diffusion steps,
39
- total frames (length), and frames per second (fps) for your video.
40
  """
41
  )
42
  with gr.Column():
@@ -57,12 +64,17 @@ with gr.Blocks(title="CogVideoX Interactive Text-to-Video") as demo:
57
  minimum=1, maximum=60, step=1, value=16,
58
  label="Frames per Second (fps)"
59
  )
 
 
 
 
 
60
  gen_button = gr.Button("Generate Video")
61
  video_output = gr.Video(label="Generated Video", format="mp4")
62
 
63
  gen_button.click(
64
  fn=generate_video,
65
- inputs=[prompt_input, steps_slider, frames_slider, fps_slider],
66
  outputs=video_output
67
  )
68
 
 
13
  pipe.vae.enable_slicing()
14
 
15
  # 2. GPU-decorated generation function
16
+ @spaces.GPU(duration=600)
17
+ def generate_video(prompt: str, steps: int, frames: int, fps: int, resolution: str) -> str:
18
  """
19
+ Generates video from text with adjustable steps, frames, fps, and resolution.
20
  Returns path to the saved MP4 file.
21
  """
22
+ # Parse resolution string (e.g., "720p" -> height=720)
23
+ height = int(resolution.rstrip('p'))
24
+ # Width is computed to maintain the model's aspect ratio (assumed 1360x768 -> 16:9)
25
+ width = int(height * (16/9))
26
+
27
  # Run pipeline; offload handles device placement
28
  output = pipe(
29
  prompt=prompt,
30
  num_inference_steps=steps,
31
+ num_frames=frames,
32
+ height=height,
33
+ width=width
34
  )
35
  frame_list = output.frames[0]
36
 
37
  # Export to MP4 for browser playback
38
  return export_to_video(frame_list, "generated.mp4", fps=fps)
39
 
40
+ # 3. Build the Gradio interface with sliders and resolution dropdown
41
  with gr.Blocks(title="CogVideoX Interactive Text-to-Video") as demo:
42
  gr.Markdown(
43
  """
44
  # 🎞️ Interactive Text‑to‑Video Demo
45
+ Adjust the sliders and select resolution to control the diffusion steps,
46
+ total frames (length), fps, and video resolution.
47
  """
48
  )
49
  with gr.Column():
 
64
  minimum=1, maximum=60, step=1, value=16,
65
  label="Frames per Second (fps)"
66
  )
67
+ resolution_dropdown = gr.Dropdown(
68
+ choices=["360p", "480p", "720p", "1080p"],
69
+ value="480p",
70
+ label="Resolution"
71
+ )
72
  gen_button = gr.Button("Generate Video")
73
  video_output = gr.Video(label="Generated Video", format="mp4")
74
 
75
  gen_button.click(
76
  fn=generate_video,
77
+ inputs=[prompt_input, steps_slider, frames_slider, fps_slider, resolution_dropdown],
78
  outputs=video_output
79
  )
80