oldmonk69 commited on
Commit
b8152ef
·
verified ·
1 Parent(s): 5fa6e05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -9
app.py CHANGED
@@ -16,11 +16,11 @@ from diffusers import (
16
  PNDMScheduler,
17
  )
18
 
19
- # ----------------- Config (set in Space Secrets if private) -----------------
20
  MODEL_REPO_ID = os.getenv("MODEL_REPO_ID", "DB2169/CyberPony_Lora").strip()
21
  CHECKPOINT_FILENAME = os.getenv("CHECKPOINT_FILENAME", "SAFETENSORS_FILENAME.safetensors").strip()
22
  HF_TOKEN = os.getenv("HF_TOKEN", None)
23
- DO_WARMUP = os.getenv("WARMUP", "1") == "1" # set WARMUP=0 to skip the first warmup call
24
 
25
  # Optional override: JSON string for LoRA manifest (same shape as loras.json)
26
  LORAS_JSON = os.getenv("LORAS_JSON", "").strip()
@@ -44,10 +44,8 @@ IS_SDXL = True
44
  LORA_MANIFEST: Dict[str, Dict[str, str]] = {}
45
  INIT_ERROR: Optional[str] = None
46
 
47
- # ----------------- Helpers -----------------
48
  def load_lora_manifest(repo_dir: str) -> Dict[str, Dict[str, str]]:
49
- """
50
- Manifest load order:
51
  1) Environment variable LORAS_JSON (if provided)
52
  2) loras.json inside the downloaded model repo
53
  3) loras.json at the Space root (next to app.py)
@@ -93,7 +91,6 @@ def load_lora_manifest(repo_dir: str) -> Dict[str, Dict[str, str]]:
93
  }
94
  }
95
 
96
- # ----------------- Bootstrap (download + load on CPU) -----------------
97
  def bootstrap_model():
98
  """
99
  Downloads MODEL_REPO_ID into REPO_DIR and loads the single-file checkpoint,
@@ -179,7 +176,6 @@ def apply_loras(selected: List[str], scale: float, repo_dir: str):
179
  except Exception as e:
180
  print(f"[WARN] set_adapters failed: {e}")
181
 
182
- # ----------------- Generation (ZeroGPU) -----------------
183
  @spaces.GPU
184
  def txt2img(
185
  prompt: str,
@@ -236,7 +232,7 @@ def warmup():
236
  except Exception as e:
237
  print(f"[WARN] Warmup failed: {e}")
238
 
239
- # ----------------- UI -----------------
240
  with gr.Blocks(title="SDXL Space (ZeroGPU, single-file, LoRA-ready)") as demo:
241
  status = gr.Markdown("")
242
 
@@ -286,4 +282,4 @@ with gr.Blocks(title="SDXL Space (ZeroGPU, single-file, LoRA-ready)") as demo:
286
  concurrency_id="gpu_queue",
287
  )
288
 
289
- demo.queue(max_size=32, default_concurrency_limit=1).launch()
 
16
  PNDMScheduler,
17
  )
18
 
19
+ # Config (set in Space Secrets if private)
20
  MODEL_REPO_ID = os.getenv("MODEL_REPO_ID", "DB2169/CyberPony_Lora").strip()
21
  CHECKPOINT_FILENAME = os.getenv("CHECKPOINT_FILENAME", "SAFETENSORS_FILENAME.safetensors").strip()
22
  HF_TOKEN = os.getenv("HF_TOKEN", None)
23
+ DO_WARMUP = os.getenv("WARMUP", "1") == "1" # set WARMUP=0 to skip the first warmup call
24
 
25
  # Optional override: JSON string for LoRA manifest (same shape as loras.json)
26
  LORAS_JSON = os.getenv("LORAS_JSON", "").strip()
 
44
  LORA_MANIFEST: Dict[str, Dict[str, str]] = {}
45
  INIT_ERROR: Optional[str] = None
46
 
 
47
  def load_lora_manifest(repo_dir: str) -> Dict[str, Dict[str, str]]:
48
+ """Manifest load order:
 
49
  1) Environment variable LORAS_JSON (if provided)
50
  2) loras.json inside the downloaded model repo
51
  3) loras.json at the Space root (next to app.py)
 
91
  }
92
  }
93
 
 
94
  def bootstrap_model():
95
  """
96
  Downloads MODEL_REPO_ID into REPO_DIR and loads the single-file checkpoint,
 
176
  except Exception as e:
177
  print(f"[WARN] set_adapters failed: {e}")
178
 
 
179
  @spaces.GPU
180
  def txt2img(
181
  prompt: str,
 
232
  except Exception as e:
233
  print(f"[WARN] Warmup failed: {e}")
234
 
235
+ # UI
236
  with gr.Blocks(title="SDXL Space (ZeroGPU, single-file, LoRA-ready)") as demo:
237
  status = gr.Markdown("")
238
 
 
282
  concurrency_id="gpu_queue",
283
  )
284
 
285
+ demo.queue(max_size=32, default_concurrency_limit=1).launch()