Bobby commited on
Commit
10d60cf
·
1 Parent(s): 2111e59
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -235,7 +235,7 @@ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
235
  step=1,
236
  )
237
  num_steps = gr.Slider(
238
- label="Number of steps", minimum=1, maximum=100, value=15, step=1
239
  ) # 20/4.5 or 12 without lora, 4 with lora
240
  guidance_scale = gr.Slider(
241
  label="Guidance scale", minimum=0.1, maximum=30.0, value=5.5, step=0.1
@@ -340,7 +340,7 @@ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
340
  def turn_buttons_on():
341
  return gr.update(visible=True), gr.update(visible=True)
342
 
343
- @spaces.GPU(duration=12)
344
  @torch.inference_mode()
345
  def process_image(
346
  image,
@@ -356,16 +356,16 @@ def process_image(
356
  seed,
357
  progress=gr.Progress(track_tqdm=True)
358
  ):
 
359
  print("processing image")
360
- start = time.time()
361
  preprocessor.load("NormalBae")
362
  # preprocessor.load("Canny") #20 steps, 9 guidance, 512, 512
363
-
364
  global compiled
365
  if not compiled:
366
  print("Not Compiled")
367
  compiled = True
368
-
369
  seed = random.randint(0, MAX_SEED)
370
  generator = torch.cuda.manual_seed(seed)
371
  control_image = preprocessor(
@@ -373,12 +373,14 @@ def process_image(
373
  image_resolution=image_resolution,
374
  detect_resolution=preprocess_resolution,
375
  )
 
376
  if style_selection is not None or style_selection != "None":
377
  prompt = "Photo from Pinterest of " + apply_style(style_selection) + " " + prompt + " " + a_prompt
378
  else:
379
  prompt=str(get_prompt(prompt, a_prompt))
380
  negative_prompt=str(n_prompt)
381
  print(prompt)
 
382
  results = pipe(
383
  prompt=prompt,
384
  negative_prompt=negative_prompt,
@@ -388,7 +390,9 @@ def process_image(
388
  generator=generator,
389
  image=control_image,
390
  ).images[0]
391
- print(f"\n-------------------------Processed in: {time.time() - start:.2f} seconds-------------------------")
 
 
392
 
393
  # timestamp = int(time.time())
394
  #if not os.path.exists("./outputs"):
 
235
  step=1,
236
  )
237
  num_steps = gr.Slider(
238
+ label="Number of steps", minimum=1, maximum=100, value=12, step=1
239
  ) # 20/4.5 or 12 without lora, 4 with lora
240
  guidance_scale = gr.Slider(
241
  label="Guidance scale", minimum=0.1, maximum=30.0, value=5.5, step=0.1
 
340
  def turn_buttons_on():
341
  return gr.update(visible=True), gr.update(visible=True)
342
 
343
+ @spaces.GPU(duration=6)
344
  @torch.inference_mode()
345
  def process_image(
346
  image,
 
356
  seed,
357
  progress=gr.Progress(track_tqdm=True)
358
  ):
359
+ preprocess_start = time.time()
360
  print("processing image")
 
361
  preprocessor.load("NormalBae")
362
  # preprocessor.load("Canny") #20 steps, 9 guidance, 512, 512
363
+
364
  global compiled
365
  if not compiled:
366
  print("Not Compiled")
367
  compiled = True
368
+
369
  seed = random.randint(0, MAX_SEED)
370
  generator = torch.cuda.manual_seed(seed)
371
  control_image = preprocessor(
 
373
  image_resolution=image_resolution,
374
  detect_resolution=preprocess_resolution,
375
  )
376
+ preprocess_time = time.time() - preprocess_start
377
  if style_selection is not None or style_selection != "None":
378
  prompt = "Photo from Pinterest of " + apply_style(style_selection) + " " + prompt + " " + a_prompt
379
  else:
380
  prompt=str(get_prompt(prompt, a_prompt))
381
  negative_prompt=str(n_prompt)
382
  print(prompt)
383
+ start = time.time()
384
  results = pipe(
385
  prompt=prompt,
386
  negative_prompt=negative_prompt,
 
390
  generator=generator,
391
  image=control_image,
392
  ).images[0]
393
+ torch.cuda.empty_cache()
394
+ print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
395
+ print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
396
 
397
  # timestamp = int(time.time())
398
  #if not os.path.exists("./outputs"):