hexware commited on
Commit
17a64d0
·
verified ·
1 Parent(s): f2ca4be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -7
app.py CHANGED
@@ -18,6 +18,7 @@ MAX_SEED = np.iinfo(np.int32).max
18
 
19
  # Optional HF login (works in Spaces if you set HF token as secret env var "hf")
20
  from huggingface_hub import login
 
21
  login(token=os.environ.get("hf"))
22
 
23
  dtype = torch.bfloat16
@@ -67,7 +68,34 @@ def imagelist_to_pptx(img_files):
67
  return tmp.name
68
 
69
 
70
- @spaces.GPU(duration=1000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  def infer(
72
  input_image,
73
  seed=777,
@@ -80,16 +108,14 @@ def infer(
80
  cfg_norm=True,
81
  use_en_prompt=True,
82
  resolution=640,
 
83
  ):
84
  # Seed
85
  if randomize_seed:
86
  seed = random.randint(0, MAX_SEED)
87
 
88
  # Normalize resolution input
89
- try:
90
- resolution = int(resolution)
91
- except Exception:
92
- resolution = 640
93
  if resolution not in (640, 1024):
94
  resolution = 640
95
 
@@ -117,12 +143,13 @@ def infer(
117
  "num_inference_steps": num_inference_steps,
118
  "num_images_per_prompt": 1,
119
  "layers": layer,
120
- "resolution": resolution, # <-- 640 or 1024
121
  "cfg_normalize": cfg_norm,
122
  "use_en_prompt": use_en_prompt,
123
  }
124
 
125
  print("INFER INPUTS:", inputs)
 
126
 
127
  with torch.inference_mode():
128
  out = pipeline(**inputs)
@@ -244,6 +271,14 @@ The text prompt is intended to describe the overall content of the input image
244
  value=True,
245
  )
246
 
 
 
 
 
 
 
 
 
247
  run_button = gr.Button("Decompose!", variant="primary")
248
 
249
  with gr.Column(scale=2):
@@ -275,7 +310,8 @@ The text prompt is intended to describe the overall content of the input image
275
  layer,
276
  cfg_norm,
277
  use_en_prompt,
278
- resolution, # <-- NEW
 
279
  ],
280
  outputs=[gallery, export_file, export_zip_file],
281
  )
 
18
 
19
  # Optional HF login (works in Spaces if you set HF token as secret env var "hf")
20
  from huggingface_hub import login
21
+
22
  login(token=os.environ.get("hf"))
23
 
24
  dtype = torch.bfloat16
 
68
  return tmp.name
69
 
70
 
71
+ def _clamp_int(x, default: int, lo: int, hi: int) -> int:
72
+ try:
73
+ v = int(x)
74
+ except Exception:
75
+ v = default
76
+ return max(lo, min(hi, v))
77
+
78
+
79
+ # Dynamic duration callable: must accept the same args as infer(). It returns seconds.
80
+ def get_duration(
81
+ input_image,
82
+ seed=777,
83
+ randomize_seed=False,
84
+ prompt=None,
85
+ neg_prompt=" ",
86
+ true_guidance_scale=4.0,
87
+ num_inference_steps=50,
88
+ layer=4,
89
+ cfg_norm=True,
90
+ use_en_prompt=True,
91
+ resolution=640,
92
+ gpu_duration=1000, # <-- NEW
93
+ ):
94
+ # Allow user override via UI (text field), but keep it sane
95
+ return _clamp_int(gpu_duration, default=1000, lo=20, hi=1500)
96
+
97
+
98
+ @spaces.GPU(duration=get_duration)
99
  def infer(
100
  input_image,
101
  seed=777,
 
108
  cfg_norm=True,
109
  use_en_prompt=True,
110
  resolution=640,
111
+ gpu_duration=1000, # <-- NEW (must match get_duration signature)
112
  ):
113
  # Seed
114
  if randomize_seed:
115
  seed = random.randint(0, MAX_SEED)
116
 
117
  # Normalize resolution input
118
+ resolution = _clamp_int(resolution, default=640, lo=640, hi=1024)
 
 
 
119
  if resolution not in (640, 1024):
120
  resolution = 640
121
 
 
143
  "num_inference_steps": num_inference_steps,
144
  "num_images_per_prompt": 1,
145
  "layers": layer,
146
+ "resolution": resolution, # 640 or 1024
147
  "cfg_normalize": cfg_norm,
148
  "use_en_prompt": use_en_prompt,
149
  }
150
 
151
  print("INFER INPUTS:", inputs)
152
+ print("REQUESTED GPU DURATION:", gpu_duration)
153
 
154
  with torch.inference_mode():
155
  out = pipeline(**inputs)
 
271
  value=True,
272
  )
273
 
274
+ # NEW: text field for GPU duration override (seconds)
275
+ gpu_duration = gr.Textbox(
276
+ label="GPU duration override (seconds, 20..1500)",
277
+ value="1000",
278
+ lines=1,
279
+ placeholder="e.g. 60, 120, 300, 1000, 1500",
280
+ )
281
+
282
  run_button = gr.Button("Decompose!", variant="primary")
283
 
284
  with gr.Column(scale=2):
 
310
  layer,
311
  cfg_norm,
312
  use_en_prompt,
313
+ resolution,
314
+ gpu_duration, # <-- NEW
315
  ],
316
  outputs=[gallery, export_file, export_zip_file],
317
  )