fffiloni commited on
Commit
c088540
·
verified ·
1 Parent(s): 8a1c704

optimize for shared UI

Browse files
Files changed (1) hide show
  1. app.py +15 -4
app.py CHANGED
@@ -1,5 +1,9 @@
1
  import spaces
2
  import gradio as gr
 
 
 
 
3
  import torch
4
  import time
5
  from PIL import Image
@@ -74,6 +78,8 @@ def generate_video(
74
  ip_image = face_processor.process(pil_image)
75
  print("Face processing completed.")
76
 
 
 
77
  print("Generating video...")
78
  start_time = time.time()
79
  video = pipe(
@@ -101,6 +107,7 @@ with gr.Blocks() as demo:
101
  """
102
  )
103
  gr.Markdown("A Lightweight and Plug-and-Play Identity Control for Video Generation")
 
104
  gr.HTML("""
105
  <div style="display:flex;column-gap:4px;">
106
  <a href="https://github.com/WeChatCV/Stand-In">
@@ -149,6 +156,7 @@ with gr.Blocks() as demo:
149
  input_negative_prompt = gr.Textbox(
150
  label="Negative Prompt",
151
  lines=3,
 
152
  value="Vibrant colors, overexposure, static, blurred details, subtitles, style, artwork, painting, still image, overall grayness, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, crowded background, walking backwards",
153
  )
154
  input_steps = gr.Slider(
@@ -156,14 +164,17 @@ with gr.Blocks() as demo:
156
  minimum=10,
157
  maximum=50,
158
  step=1,
159
- value=20,
 
160
  info="More steps may improve details but will take longer to generate.",
161
  )
162
  output_fps = gr.Slider(
163
- label="Video FPS", minimum=10, maximum=30, step=1, value=25
 
164
  )
165
  output_quality = gr.Slider(
166
- label="Video Quality", minimum=1, maximum=10, step=1, value=9
 
167
  )
168
 
169
  generate_btn = gr.Button("Generate Video", variant="primary")
@@ -173,7 +184,7 @@ with gr.Blocks() as demo:
173
  gr.Markdown("### 3. View Generated Result")
174
  output_video = gr.Video(
175
  label="Generated Video",
176
- height=480,
177
  )
178
 
179
  examples = gr.Examples(
 
1
  import spaces
2
  import gradio as gr
3
+ import os
4
+
5
+ is_shared_ui = True if "fffiloni/Stand-In" in os.environ['SPACE_ID'] else False
6
+
7
  import torch
8
  import time
9
  from PIL import Image
 
78
  ip_image = face_processor.process(pil_image)
79
  print("Face processing completed.")
80
 
81
+ num_steps = 10 if is_shared_ui
82
+
83
  print("Generating video...")
84
  start_time = time.time()
85
  video = pipe(
 
107
  """
108
  )
109
  gr.Markdown("A Lightweight and Plug-and-Play Identity Control for Video Generation")
110
+ gr.Markdown("On fffiloni's shared UI, advanced settings are disabled to optimize best results on ZeroGPU.")
111
  gr.HTML("""
112
  <div style="display:flex;column-gap:4px;">
113
  <a href="https://github.com/WeChatCV/Stand-In">
 
156
  input_negative_prompt = gr.Textbox(
157
  label="Negative Prompt",
158
  lines=3,
159
+ interactive = False if is_shared_ui else True
160
  value="Vibrant colors, overexposure, static, blurred details, subtitles, style, artwork, painting, still image, overall grayness, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, crowded background, walking backwards",
161
  )
162
  input_steps = gr.Slider(
 
164
  minimum=10,
165
  maximum=50,
166
  step=1,
167
+ value=10,
168
+ interactive = False if is_shared_ui else True
169
  info="More steps may improve details but will take longer to generate.",
170
  )
171
  output_fps = gr.Slider(
172
+ label="Video FPS", minimum=10, maximum=30, step=1, value=25,
173
+ interactive = False if is_shared_ui else True
174
  )
175
  output_quality = gr.Slider(
176
+ label="Video Quality", minimum=1, maximum=10, step=1, value=9,
177
+ interactive = False if is_shared_ui else True
178
  )
179
 
180
  generate_btn = gr.Button("Generate Video", variant="primary")
 
184
  gr.Markdown("### 3. View Generated Result")
185
  output_video = gr.Video(
186
  label="Generated Video",
187
+ #height=480,
188
  )
189
 
190
  examples = gr.Examples(