James040 commited on
Commit
9b2c620
·
verified ·
1 Parent(s): c32e0c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -11
app.py CHANGED
@@ -45,16 +45,31 @@ def upscale_bg_tiled(frame, tile_size=128, overlap=16):
45
  upscaled_img[py1:py2, px1:px2] = tile_out[oy : oy+(py2-py1), ox : ox+(px2-px1)]
46
  return upscaled_img
47
 
48
- def restore_face_core(img):
49
- """CodeFormer Face Restoration"""
50
- # Pre-process (CodeFormer expects 512x512)
51
  img_512 = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
52
  img_in = np.transpose(cv2.cvtColor(img_512, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0, (2, 0, 1))[np.newaxis, :]
53
 
54
- # AI
55
- out = face_session.run(None, {face_session.get_inputs()[0].name: img_in})[0]
 
56
 
57
- # Post-process
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  res = cv2.cvtColor((np.clip(np.squeeze(out), 0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8), cv2.COLOR_RGB2BGR)
59
  return cv2.resize(res, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_LANCZOS4)
60
 
@@ -67,13 +82,12 @@ def hybrid_enhancer(img_data, mode, face_strength, progress=gr.Progress()):
67
  progress(0, desc="Stage 1: Upscaling Background...")
68
  bg_upscaled = upscale_bg_tiled(img)
69
  progress(0.5, desc="Stage 2: Restoring Face Details...")
70
- # In a CPU-only "Noob" version, we treat the whole crop as a face for simplicity
71
- face_restored = restore_face_core(bg_upscaled)
72
- # Blend based on strength
73
  return cv2.addWeighted(face_restored, face_strength, bg_upscaled, 1 - face_strength, 0)
74
  else:
75
- # Just Face Restoration (No background upscale)
76
- return restore_face_core(img)
77
 
78
  # --- UI ---
79
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo")) as demo:
 
45
  upscaled_img[py1:py2, px1:px2] = tile_out[oy : oy+(py2-py1), ox : ox+(px2-px1)]
46
  return upscaled_img
47
 
48
+ def restore_face_core(img, fidelity=0.5):
49
+ """CodeFormer Face Restoration with Dynamic Input Support"""
50
+ # 1. Pre-process (CodeFormer expects 512x512)
51
  img_512 = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
52
  img_in = np.transpose(cv2.cvtColor(img_512, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0, (2, 0, 1))[np.newaxis, :]
53
 
54
+ # 2. Map Inputs Dynamically
55
+ input_names = [i.name for i in face_session.get_inputs()]
56
+ input_feed = {}
57
 
58
+ # Map the main image input (usually 'input' or 'x')
59
+ input_feed[input_names[0]] = img_in
60
+
61
+ # If the model requires the 'weight' or 'fidelity' input, provide it
62
+ if 'weight' in input_names:
63
+ # CodeFormer weights are usually 1D arrays
64
+ input_feed['weight'] = np.array([fidelity], dtype=np.float32)
65
+ elif len(input_names) > 1:
66
+ # Fallback for other potential names for the second input
67
+ input_feed[input_names[1]] = np.array([fidelity], dtype=np.float32)
68
+
69
+ # 3. AI Inference
70
+ out = face_session.run(None, input_feed)[0]
71
+
72
+ # 4. Post-process
73
  res = cv2.cvtColor((np.clip(np.squeeze(out), 0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8), cv2.COLOR_RGB2BGR)
74
  return cv2.resize(res, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_LANCZOS4)
75
 
 
82
  progress(0, desc="Stage 1: Upscaling Background...")
83
  bg_upscaled = upscale_bg_tiled(img)
84
  progress(0.5, desc="Stage 2: Restoring Face Details...")
85
+ # Use the strength slider as the internal model weight too
86
+ face_restored = restore_face_core(bg_upscaled, fidelity=face_strength)
 
87
  return cv2.addWeighted(face_restored, face_strength, bg_upscaled, 1 - face_strength, 0)
88
  else:
89
+ # Just Face Restoration
90
+ return restore_face_core(img, fidelity=face_strength)
91
 
92
  # --- UI ---
93
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo")) as demo: