arxivgpt kim commited on
Commit
7001deb
ยท
verified ยท
1 Parent(s): 5c4a99f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -32
app.py CHANGED
@@ -1,9 +1,6 @@
1
- #!/usr/bin/env python
2
-
3
  import os
4
  import random
5
  import uuid
6
-
7
  import gradio as gr
8
  import numpy as np
9
  from PIL import Image
@@ -13,18 +10,20 @@ from diffusers import DiffusionPipeline
13
 
14
  DESCRIPTION = """# Playground v2.5"""
15
  if not torch.cuda.is_available():
16
- DESCRIPTION += "\n<p>Running on CPU ๐Ÿฅถ This demo may not work on CPU.</p>"
17
 
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
20
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
21
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
22
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
23
 
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
25
 
26
- NUM_IMAGES_PER_PROMPT = 1
27
-
28
  if torch.cuda.is_available():
29
  pipe = DiffusionPipeline.from_pretrained(
30
  "playgroundai/playground-v2.5-1024px-aesthetic",
@@ -32,49 +31,34 @@ if torch.cuda.is_available():
32
  use_safetensors=True,
33
  add_watermarker=False,
34
  variant="fp16"
35
- )
 
36
  if ENABLE_CPU_OFFLOAD:
37
  pipe.enable_model_cpu_offload()
38
- else:
39
- pipe.to(device)
40
- print("Loaded on Device!")
41
 
42
  if USE_TORCH_COMPILE:
43
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
44
- print("Model Compiled!")
45
-
46
 
47
  def save_image(img):
48
  unique_name = str(uuid.uuid4()) + ".png"
49
  img.save(unique_name)
50
  return unique_name
51
 
52
-
53
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
54
  if randomize_seed:
55
  seed = random.randint(0, MAX_SEED)
56
  return seed
57
 
58
-
59
  @spaces.GPU(enable_queue=True)
60
- def generate(
61
- prompt: str,
62
- negative_prompt: str = "",
63
- use_negative_prompt: bool = False,
64
- seed: int = 0,
65
- width: int = 1024,
66
- height: int = 1024,
67
- guidance_scale: float = 3,
68
- randomize_seed: bool = False,
69
- use_resolution_binning: bool = True,
70
- progress=gr.Progress(track_tqdm=True),
71
- ):
72
- global pipe # ์ „์—ญ ๋ณ€์ˆ˜๋กœ์„œ์˜ pipe๋ฅผ ์ฐธ์กฐํ•˜๋„๋ก ํ•ฉ๋‹ˆ๋‹ค.
73
-
74
- if not torch.cuda.is_available():
75
- return [], "GPU๊ฐ€ ์‚ฌ์šฉ ๋ถˆ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ์ด ๋ฐ๋ชจ๋Š” GPU์—์„œ๋งŒ ์‹คํ–‰๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค."
76
-
77
- # GPU ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ์˜ ๋กœ์ง์„ ๊ณ„์† ์ง„ํ–‰ํ•ฉ๋‹ˆ๋‹ค.
78
  pipe.to(device)
79
  seed = int(randomize_seed_fn(seed, randomize_seed))
80
  generator = torch.Generator().manual_seed(seed)
 
 
 
1
  import os
2
  import random
3
  import uuid
 
4
  import gradio as gr
5
  import numpy as np
6
  from PIL import Image
 
10
 
11
  DESCRIPTION = """# Playground v2.5"""
12
  if not torch.cuda.is_available():
13
+ DESCRIPTION += "\n<p>CPU์—์„œ ์‹คํ–‰ ์ค‘ ๐Ÿฅถ ์ด ๋ฐ๋ชจ๋Š” CPU์—์„œ ์ž‘๋™ํ•˜์ง€ ์•Š์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.</p>"
14
 
15
+ # ์ผ๋ฐ˜ ์„ค์ •
16
  MAX_SEED = np.iinfo(np.int32).max
17
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
18
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
19
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
20
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
21
 
22
+ # ๋””๋ฐ”์ด์Šค ์„ค์ •
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
+ # CUDA๊ฐ€ ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•  ๊ฒฝ์šฐ ๊ธ€๋กœ๋ฒŒ๋กœ ํŒŒ์ดํ”„๋ผ์ธ ๋กœ๋“œ
26
+ pipe = None
27
  if torch.cuda.is_available():
28
  pipe = DiffusionPipeline.from_pretrained(
29
  "playgroundai/playground-v2.5-1024px-aesthetic",
 
31
  use_safetensors=True,
32
  add_watermarker=False,
33
  variant="fp16"
34
+ ).to(device)
35
+
36
  if ENABLE_CPU_OFFLOAD:
37
  pipe.enable_model_cpu_offload()
 
 
 
38
 
39
  if USE_TORCH_COMPILE:
40
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
 
41
 
42
  def save_image(img):
43
  unique_name = str(uuid.uuid4()) + ".png"
44
  img.save(unique_name)
45
  return unique_name
46
 
 
47
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
48
  if randomize_seed:
49
  seed = random.randint(0, MAX_SEED)
50
  return seed
51
 
 
52
  @spaces.GPU(enable_queue=True)
53
+ def generate(prompt: str, negative_prompt: str = "", use_negative_prompt: bool = False,
54
+ seed: int = 0, width: int = 1024, height: int = 1024, guidance_scale: float = 7.0,
55
+ randomize_seed: bool = False, use_resolution_binning: bool = True,
56
+ progress=gr.Progress(track_tqdm=True)):
57
+ global pipe
58
+ if pipe is None:
59
+ raise Exception("๋ชจ๋ธ ํŒŒ์ดํ”„๋ผ์ธ์ด ๋กœ๋“œ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ์ด ๋ฐ๋ชจ๋Š” GPU๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค.")
60
+
61
+ # pipe๊ฐ€ ์˜ฌ๋ฐ”๋ฅธ ๋””๋ฐ”์ด์Šค์— ์žˆ๋Š”์ง€ ํ™•์ธ
 
 
 
 
 
 
 
 
 
62
  pipe.to(device)
63
  seed = int(randomize_seed_fn(seed, randomize_seed))
64
  generator = torch.Generator().manual_seed(seed)