Bobby commited on
Commit
50cbabf
·
1 Parent(s): 6ab8222

simplified it

Browse files
Files changed (1) hide show
  1. app.py +4 -31
app.py CHANGED
@@ -19,7 +19,7 @@ from diffusers import (
19
  StableDiffusionControlNetPipeline,
20
  AutoencoderKL,
21
  )
22
- from diffusers.models.attention_processor import AttnProcessor2_0
23
  MAX_SEED = np.iinfo(np.int32).max
24
  API_KEY = os.environ.get("API_KEY", None)
25
 
@@ -86,8 +86,6 @@ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Nun
86
  pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Shibari.pt", token="HDA_Shibari")
87
  pipe.to("cuda")
88
 
89
- pipe.enable_model_cpu_offload()
90
-
91
  print("loading preprocessor")
92
  from preprocess import Preprocessor
93
  preprocessor = Preprocessor()
@@ -386,7 +384,7 @@ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
386
  def turn_buttons_on():
387
  return gr.update(visible=True), gr.update(visible=True)
388
 
389
- @spaces.GPU(duration=8)
390
  @torch.inference_mode()
391
  def process_image(
392
  image,
@@ -437,37 +435,12 @@ def process_image(
437
  image=control_image,
438
  ).images[0]
439
  torch.cuda.synchronize()
440
- # torch.cuda.empty_cache()
441
  print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
442
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
443
-
444
- # timestamp = int(time.time())
445
- #if not os.path.exists("./outputs"):
446
- # os.makedirs("./outputs")
447
- # img_path = f"./{timestamp}.jpg"
448
- # results_path = f"./{timestamp}_out_{prompt}.jpg"
449
- # imageio.imsave(img_path, image)
450
- # results.save(results_path)
451
  results.save("temp_image.jpg")
452
-
453
- # api.upload_file(
454
- # path_or_fileobj=img_path,
455
- # path_in_repo=img_path,
456
- # repo_id="broyang/anime-ai-outputs",
457
- # repo_type="dataset",
458
- # token=API_KEY,
459
- # run_as_future=True,
460
- # )
461
- # api.upload_file(
462
- # path_or_fileobj=results_path,
463
- # path_in_repo=results_path,
464
- # repo_id="broyang/anime-ai-outputs",
465
- # repo_type="dataset",
466
- # token=API_KEY,
467
- # run_as_future=True,
468
- # )
469
-
470
  return results
 
471
  if prod:
472
  demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
473
  else:
 
19
  StableDiffusionControlNetPipeline,
20
  AutoencoderKL,
21
  )
22
+ # from diffusers.models.attention_processor import AttnProcessor2_0
23
  MAX_SEED = np.iinfo(np.int32).max
24
  API_KEY = os.environ.get("API_KEY", None)
25
 
 
86
  pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Shibari.pt", token="HDA_Shibari")
87
  pipe.to("cuda")
88
 
 
 
89
  print("loading preprocessor")
90
  from preprocess import Preprocessor
91
  preprocessor = Preprocessor()
 
384
  def turn_buttons_on():
385
  return gr.update(visible=True), gr.update(visible=True)
386
 
387
+ @spaces.GPU(duration=30)
388
  @torch.inference_mode()
389
  def process_image(
390
  image,
 
435
  image=control_image,
436
  ).images[0]
437
  torch.cuda.synchronize()
438
+ torch.cuda.empty_cache()
439
  print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
440
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
 
 
 
 
 
 
 
 
441
  results.save("temp_image.jpg")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  return results
443
+
444
  if prod:
445
  demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
446
  else: