KaiShin1885 commited on
Commit
68ac071
ยท
verified ยท
1 Parent(s): b8b4dab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -9
app.py CHANGED
@@ -8,15 +8,14 @@ import transformers
8
  import os
9
 
10
  # HF_TOKEN ์„ค์ •
11
- os.environ["HF_TOKEN"] = "YOUR_TOKEN_HERE"
 
12
 
13
  # xformers ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์„ค์น˜
14
  try:
15
  import xformers
16
  except ImportError:
17
- print("xformers ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์„ค์น˜ํ•ฉ๋‹ˆ๋‹ค.")
18
- os.system("pip install xformers")
19
- import xformers
20
 
21
  transformers.utils.move_cache() # ์บ์‹œ ์—…๋ฐ์ดํŠธ๋ฅผ ๊ฐ•์ œ๋กœ ์ง„ํ–‰
22
 
@@ -25,14 +24,20 @@ torch_device = torch.device(device)
25
 
26
  if torch.cuda.is_available():
27
  torch.cuda.max_memory_allocated(device=device, max_memory_allocated=1024*1024*2) # 2GB ๋ฉ”๋ชจ๋ฆฌ ํ• ๋‹น๋Ÿ‰ ์„ค์ •
28
- pipe = DiffusionPipeline.from_pretrained("stable-diffusion-3-medium", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
 
 
 
29
  try:
30
  pipe.enable_xformers_memory_efficient_attention()
31
  except ImportError:
32
  print("xformers ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.")
33
  pipe = pipe.to(device)
34
  else:
35
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
 
 
 
36
  pipe = pipe.to(device)
37
 
38
  MAX_SEED = np.iinfo(np.int32).max
@@ -56,9 +61,10 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
56
 
57
  return image
58
 
59
- client = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", token=os.getenv("HF_TOKEN"))
60
- if client is None:
61
- print("HF_TOKEN์ด ์„ค์ •๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.")
 
62
 
63
  def respond(input):
64
  return client.chat_completion(
 
8
  import os
9
 
10
  # HF_TOKEN ์„ค์ •
11
+ if os.getenv("HF_TOKEN") is None:
12
+ raise ValueError("HF_TOKEN is not set")
13
 
14
  # xformers ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์„ค์น˜
15
  try:
16
  import xformers
17
  except ImportError:
18
+ raise ImportError("xformers is not installed. Please install it using pip install xformers")
 
 
19
 
20
  transformers.utils.move_cache() # ์บ์‹œ ์—…๋ฐ์ดํŠธ๋ฅผ ๊ฐ•์ œ๋กœ ์ง„ํ–‰
21
 
 
24
 
25
  if torch.cuda.is_available():
26
  torch.cuda.max_memory_allocated(device=device, max_memory_allocated=1024*1024*2) # 2GB ๋ฉ”๋ชจ๋ฆฌ ํ• ๋‹น๋Ÿ‰ ์„ค์ •
27
+ try:
28
+ pipe = DiffusionPipeline.from_pretrained("stable-diffusion-3-medium", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
29
+ except Exception as e:
30
+ raise ValueError("Failed to load DiffusionPipeline: {}".format(e))
31
  try:
32
  pipe.enable_xformers_memory_efficient_attention()
33
  except ImportError:
34
  print("xformers ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.")
35
  pipe = pipe.to(device)
36
  else:
37
+ try:
38
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
39
+ except Exception as e:
40
+ raise ValueError("Failed to load DiffusionPipeline: {}".format(e))
41
  pipe = pipe.to(device)
42
 
43
  MAX_SEED = np.iinfo(np.int32).max
 
61
 
62
  return image
63
 
64
+ try:
65
+ client = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", token=os.getenv("HF_TOKEN"))
66
+ except Exception as e:
67
+ raise ValueError("Failed to create InferenceClient: {}".format(e))
68
 
69
  def respond(input):
70
  return client.chat_completion(