JS6969 commited on
Commit
ac631ab
·
verified ·
1 Parent(s): 24e0155

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -49
app.py CHANGED
@@ -183,7 +183,7 @@ else:
183
  # Helpers
184
  # ─────────────────────────────────────────────────────────────
185
 
186
- # Map UI model names (demo) to our internal model IDs
187
  def map_ui_model_to_internal(ui_name: str) -> str:
188
  mapping = {
189
  "RealESRGAN_x4plus": "x4plus",
@@ -202,26 +202,6 @@ def clamp_scale_for_model(outscale: int, model_id: str) -> int:
202
  # For x4plus / x4plus-anime, force 4 (ignore 5–6)
203
  return 4
204
 
205
-
206
- def sample_paths(paths: List[Path] | List[str], n: int = 30) -> List[str]:
207
- """Evenly sample up to n items across the entire list (deterministic), in natural numeric order."""
208
- if not paths:
209
- return []
210
- paths = sorted(paths, key=_natural_key) # ensure numeric order first
211
- total = len(paths)
212
- n = max(1, min(n, total))
213
- if n == total:
214
- return [str(p) for p in paths]
215
- step = (total - 1) / (n - 1) # cover both ends
216
- idxs = [round(i * step) for i in range(n)]
217
- out, seen = [], set()
218
- for i in idxs:
219
- if i not in seen:
220
- out.append(str(paths[int(i)]))
221
- seen.add(int(i))
222
- return out
223
-
224
-
225
  def sanitize_prefix(txt: str) -> str:
226
  txt = (txt or "").strip()
227
  if not txt:
@@ -594,14 +574,33 @@ def step2_process_next_batch(
594
  ):
595
  if not up_src_paths or not up_out_dir:
596
  return None, None, "Load sources first.", render_progress(0.0, "Idle"), up_done_idx, up_out_dir
 
597
  model_id = map_ui_model_to_internal(ui_model_name)
598
  scale = clamp_scale_for_model(int(outscale or 4), model_id)
599
  device = "cuda" if os.environ.get("CUDA_VISIBLE_DEVICES") else "cpu"
600
  half = (precision == "half") and (device == "cuda")
601
  tile = int(tile or 256)
602
  batch_size = max(1, int(batch_size or 8))
 
 
603
  upsampler = get_realesrganer(model_id, scale, tile, half, device=device)
604
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
  start = int(up_done_idx or 0)
606
  end = min(start + batch_size, int(up_total or 0))
607
  out_dir = Path(up_out_dir)
@@ -619,16 +618,26 @@ def step2_process_next_batch(
619
  try:
620
  with Image.open(fp) as im:
621
  img = im.convert("RGB")
622
- output, _ = upsampler.enhance(np.array(img), outscale=scale)
623
- Image.fromarray(output).save(out_dir / (Path(fp).stem + ".jpg"), quality=95)
624
- except Exception:
625
- pass
 
 
 
 
 
 
 
 
 
626
  processed_now += 1
627
 
628
  next_idx = end
629
  pct = int(round((next_idx / up_total) * 100)) if up_total else 0
630
  label = (f"Processed {processed_now} image(s) this batch. "
631
- f"{next_idx}/{up_total} done (x{scale}, model={ui_model_name}).")
 
632
  prog = render_progress(pct, f"Upscaling… {pct}%")
633
 
634
  gallery = _build_gallery_from_dir(out_dir, 30)
@@ -638,6 +647,7 @@ def step2_process_next_batch(
638
 
639
 
640
 
 
641
  def save_uploaded_images(files: List[gr.File] | None, prefix: str = "upload") -> Tuple[List[Path], Path]:
642
  tmp = Path(tempfile.mkdtemp(prefix="imgup_"))
643
  in_dir = tmp / "input"; in_dir.mkdir(parents=True, exist_ok=True)
@@ -654,19 +664,8 @@ def save_uploaded_images(files: List[gr.File] | None, prefix: str = "upload") ->
654
 
655
 
656
  # Map UI model names to internal IDs (make sure MODEL_MAP exists above)
657
- MODEL_MAP = {
658
- "RealESRGAN_x4plus": "x4plus",
659
- "RealESRGAN_x4plus_anime_6B": "x4plus-anime",
660
- "RealESRGAN_x2plus": "x2plus",
661
- # fallbacks for entries we don't wire separately here:
662
- "RealESRNet_x4plus": "x4plus",
663
- "realesr-general-x4v3": "x4plus",
664
- }
665
-
666
- def _clamp_scale_for_model(outscale: int, model_id: str) -> int:
667
- if model_id == "x2plus":
668
- return 2
669
- return 4 # x4 models
670
 
671
  def step2_upscale(
672
  frames_list: List[str] | None,
@@ -717,17 +716,9 @@ def step2_upscale(
717
  batch_size = 16
718
 
719
  # Map UI model -> internal id; clamp scale to model
720
- MODEL_MAP = {
721
- "RealESRGAN_x4plus": "x4plus",
722
- "RealESRNet_x4plus": "x4plus", # fallback
723
- "RealESRGAN_x4plus_anime_6B": "x4plus-anime",
724
- "RealESRGAN_x2plus": "x2plus",
725
- "realesr-general-x4v3": "x4plus", # fallback
726
- }
727
- model_id = MODEL_MAP.get(ui_model_name, "x4plus")
728
 
729
- def _clamp_scale_for_model(s: int, mid: str) -> int:
730
- return 2 if mid == "x2plus" else 4
731
 
732
  scale = _clamp_scale_for_model(int(outscale or 4), model_id)
733
  device = "cuda" if os.environ.get("CUDA_VISIBLE_DEVICES") else "cpu"
 
183
  # Helpers
184
  # ─────────────────────────────────────────────────────────────
185
 
186
+ # Map UI model names (demo) to our internal model IDs
187
  def map_ui_model_to_internal(ui_name: str) -> str:
188
  mapping = {
189
  "RealESRGAN_x4plus": "x4plus",
 
202
  # For x4plus / x4plus-anime, force 4 (ignore 5–6)
203
  return 4
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  def sanitize_prefix(txt: str) -> str:
206
  txt = (txt or "").strip()
207
  if not txt:
 
574
  ):
575
  if not up_src_paths or not up_out_dir:
576
  return None, None, "Load sources first.", render_progress(0.0, "Idle"), up_done_idx, up_out_dir
577
+
578
  model_id = map_ui_model_to_internal(ui_model_name)
579
  scale = clamp_scale_for_model(int(outscale or 4), model_id)
580
  device = "cuda" if os.environ.get("CUDA_VISIBLE_DEVICES") else "cpu"
581
  half = (precision == "half") and (device == "cuda")
582
  tile = int(tile or 256)
583
  batch_size = max(1, int(batch_size or 8))
584
+
585
+ # Build upsampler
586
  upsampler = get_realesrganer(model_id, scale, tile, half, device=device)
587
 
588
+ # Optional: GFPGAN face enhancer
589
+ face_enhancer = None
590
+ if face_enhance:
591
+ try:
592
+ from gfpgan import GFPGANer
593
+ face_enhancer = GFPGANer(
594
+ model_path="https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth",
595
+ upscale=scale,
596
+ arch="clean",
597
+ channel_multiplier=2,
598
+ bg_upsampler=upsampler
599
+ )
600
+ except Exception as e:
601
+ print("GFPGAN load failed:", e)
602
+ face_enhancer = None
603
+
604
  start = int(up_done_idx or 0)
605
  end = min(start + batch_size, int(up_total or 0))
606
  out_dir = Path(up_out_dir)
 
618
  try:
619
  with Image.open(fp) as im:
620
  img = im.convert("RGB")
621
+ cv_img = np.array(img)
622
+
623
+ if face_enhancer:
624
+ _, _, output = face_enhancer.enhance(
625
+ cv_img, has_aligned=False, only_center_face=False, paste_back=True
626
+ )
627
+ else:
628
+ output, _ = upsampler.enhance(cv_img, outscale=scale, denoise_strength=float(denoise_strength or 0.5))
629
+
630
+ Image.fromarray(output).save(out_dir / (Path(fp).stem + ".jpg"), quality=95)
631
+
632
+ except Exception as e:
633
+ print("Upscale error:", e)
634
  processed_now += 1
635
 
636
  next_idx = end
637
  pct = int(round((next_idx / up_total) * 100)) if up_total else 0
638
  label = (f"Processed {processed_now} image(s) this batch. "
639
+ f"{next_idx}/{up_total} done (x{scale}, model={ui_model_name}, "
640
+ f"denoise={denoise_strength}, face={face_enhance}).")
641
  prog = render_progress(pct, f"Upscaling… {pct}%")
642
 
643
  gallery = _build_gallery_from_dir(out_dir, 30)
 
647
 
648
 
649
 
650
+
651
  def save_uploaded_images(files: List[gr.File] | None, prefix: str = "upload") -> Tuple[List[Path], Path]:
652
  tmp = Path(tempfile.mkdtemp(prefix="imgup_"))
653
  in_dir = tmp / "input"; in_dir.mkdir(parents=True, exist_ok=True)
 
664
 
665
 
666
  # Map UI model names to internal IDs (make sure MODEL_MAP exists above)
667
+ model_id = map_ui_model_to_internal(ui_model_name)
668
+ scale = clamp_scale_for_model(int(outscale or 4), model_id)
 
 
 
 
 
 
 
 
 
 
 
669
 
670
  def step2_upscale(
671
  frames_list: List[str] | None,
 
716
  batch_size = 16
717
 
718
  # Map UI model -> internal id; clamp scale to model
719
+ model_id = map_ui_model_to_internal(ui_model_name)
720
+ scale = clamp_scale_for_model(int(outscale or 4), model_id)
 
 
 
 
 
 
721
 
 
 
722
 
723
  scale = _clamp_scale_for_model(int(outscale or 4), model_id)
724
  device = "cuda" if os.environ.get("CUDA_VISIBLE_DEVICES") else "cpu"