mastari commited on
Commit
e004dc4
Β·
1 Parent(s): 2917c28

Add extreme logging to GFPGAN handler for debugging

Browse files
Files changed (2) hide show
  1. handler.py +145 -86
  2. requirements.txt +2 -5
handler.py CHANGED
@@ -1,115 +1,174 @@
1
  import os
2
  import io
3
- import torch
4
  import base64
 
5
  import requests
6
  import numpy as np
7
  from PIL import Image
8
  from gfpgan import GFPGANer
9
- from realesrgan import RealESRGANer
10
- from basicsr.archs.rrdbnet_arch import RRDBNet
11
 
12
 
13
  class EndpointHandler:
14
  def __init__(self, path="."):
15
- print("πŸš€ Initializing GFPGANv1 Face Restoration Pipeline...")
16
-
17
- # ------------------------------------------------------------
18
- # Load Real-ESRGAN (background upscaler)
19
- # ------------------------------------------------------------
20
- self.esrgan_url = (
21
- "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/"
22
- "RealESRGAN_x4plus.pth"
 
23
  )
24
- self.esrgan_path = os.path.join(path, "RealESRGAN_x4plus.pth")
25
-
26
- if not os.path.exists(self.esrgan_path):
27
- print("πŸ“₯ Downloading RealESRGAN_x4plus.pth...")
28
- r = requests.get(self.esrgan_url)
29
- r.raise_for_status()
30
- with open(self.esrgan_path, "wb") as f:
31
- f.write(r.content)
32
- print("βœ… Downloaded Real-ESRGAN model.")
33
-
34
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
 
 
 
 
 
 
 
 
 
 
35
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
36
 
37
- self.bg_upsampler = RealESRGANer(
38
- scale=4,
39
- model_path=self.esrgan_path,
40
- model=model,
41
- half=False,
42
- device=device,
43
- )
44
-
45
- # ------------------------------------------------------------
46
- # Load GFPGAN model
47
- # ------------------------------------------------------------
48
- self.gfpgan_url = (
49
- "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
50
- )
51
- self.gfpgan_path = os.path.join(path, "GFPGANv1.4.pth")
52
-
53
- if not os.path.exists(self.gfpgan_path):
54
- print("πŸ“₯ Downloading GFPGANv1.4.pth...")
55
- r = requests.get(self.gfpgan_url)
56
- r.raise_for_status()
57
- with open(self.gfpgan_path, "wb") as f:
58
- f.write(r.content)
59
- print("βœ… Downloaded GFPGANv1.4.pth.")
60
-
61
- self.face_enhancer = GFPGANer(
62
- model_path=self.gfpgan_path,
63
- upscale=4,
64
- arch="clean",
65
- channel_multiplier=2,
66
- bg_upsampler=self.bg_upsampler,
67
- )
68
 
69
- print("βœ… GFPGANv1 Face Restoration Pipeline Ready!")
70
 
71
- # ------------------------------------------------------------------
72
- # Main callable
73
- # ------------------------------------------------------------------
74
  def __call__(self, data):
 
 
 
 
75
  try:
76
- image = self._load_image(data)
77
- restored = self._restore_face(image)
78
- return self._encode_image(restored)
 
 
 
 
 
 
 
 
79
  except Exception as e:
80
- print("πŸ’₯ Error:", str(e))
 
81
  return {"error": str(e)}
82
 
83
- # ------------------------------------------------------------------
84
- # Helper functions
85
- # ------------------------------------------------------------------
86
- def _load_image(self, data):
87
- # Handles raw bytes, base64 JSON, or dict
 
 
 
88
  if isinstance(data, (bytes, bytearray)):
89
- return Image.open(io.BytesIO(data)).convert("RGB")
 
 
 
90
 
 
91
  if isinstance(data, dict):
92
- field = data.get("inputs") or data.get("image")
93
- if isinstance(field, str):
94
- field = base64.b64decode(field)
95
- return Image.open(io.BytesIO(field)).convert("RGB")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- if isinstance(data, str):
98
- decoded = base64.b64decode(data)
99
- return Image.open(io.BytesIO(decoded)).convert("RGB")
100
 
101
- raise ValueError("Expected image bytes or base64 string.")
 
 
102
 
103
- def _restore_face(self, image):
104
- img_np = np.array(image)[:, :, ::-1] # RGB -> BGR
105
- cropped_faces, restored_faces, restored_img = self.face_enhancer.enhance(
106
- img_np, has_aligned=False, only_center_face=False, paste_back=True
107
- )
108
- return Image.fromarray(restored_img[:, :, ::-1]) # back to RGB
109
 
110
- def _encode_image(self, pil_img):
111
- buf = io.BytesIO()
112
- pil_img.save(buf, format="PNG")
113
- encoded = base64.b64encode(buf.getvalue()).decode("utf-8")
114
- return {"image": encoded}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
 
1
  import os
2
  import io
 
3
  import base64
4
+ import torch
5
  import requests
6
  import numpy as np
7
  from PIL import Image
8
  from gfpgan import GFPGANer
 
 
9
 
10
 
11
  class EndpointHandler:
12
  def __init__(self, path="."):
13
+ print("πŸš€ [INIT] Starting GFPGAN EndpointHandler initialization...")
14
+ print(f"πŸ“ Working directory: {os.getcwd()}")
15
+ print(f"πŸ“‚ Handler path argument: {path}")
16
+
17
+ # ----------------------------
18
+ # MODEL DOWNLOAD
19
+ # ----------------------------
20
+ self.model_url = (
21
+ "https://github.com/TencentARC/GFPGAN/releases/download/v1.4.0/GFPGANv1.4.pth"
22
  )
23
+ self.model_path = os.path.join(path, "GFPGANv1.4.pth")
24
+ print(f"πŸ”— [MODEL] Model URL: {self.model_url}")
25
+ print(f"πŸ“¦ [MODEL] Local model path: {self.model_path}")
26
+
27
+ if not os.path.exists(self.model_path):
28
+ print("πŸ“₯ [DOWNLOAD] Model not found locally β€” starting download...")
29
+ try:
30
+ r = requests.get(self.model_url, timeout=60)
31
+ r.raise_for_status()
32
+ with open(self.model_path, "wb") as f:
33
+ f.write(r.content)
34
+ print(f"βœ… [DOWNLOAD] Model successfully saved to {self.model_path}")
35
+ except Exception as e:
36
+ print(f"πŸ’₯ [ERROR] Failed to download GFPGAN weights: {e}")
37
+ raise e
38
+ else:
39
+ print("πŸ“š [CACHE] Found existing model file, skipping download.")
40
+
41
+ # ----------------------------
42
+ # INITIALIZE RESTORER
43
+ # ----------------------------
44
  device = "cuda" if torch.cuda.is_available() else "cpu"
45
+ print(f"πŸ’» [DEVICE] Using device: {device}")
46
 
47
+ try:
48
+ print("🧠 [MODEL INIT] Initializing GFPGANer...")
49
+ self.restorer = GFPGANer(
50
+ model_path=self.model_path,
51
+ upscale=2,
52
+ arch="clean",
53
+ channel_multiplier=2,
54
+ bg_upsampler=None,
55
+ device=device,
56
+ )
57
+ print("βœ… [MODEL INIT] GFPGANer successfully initialized.")
58
+ except Exception as e:
59
+ print(f"πŸ’₯ [ERROR] Failed to initialize GFPGANer: {e}")
60
+ raise e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
+ print("πŸŽ‰ [INIT DONE] GFPGAN model ready for inference!")
63
 
64
+ # ----------------------------
65
+ # CALLABLE ENTRY POINT
66
+ # ----------------------------
67
  def __call__(self, data):
68
+ print("\nπŸ›°οΈ [CALL] ======= Incoming request =======")
69
+ print(f"πŸ” [CALL] Input type: {type(data)}")
70
+ print(f"πŸ” [CALL] Raw preview: {str(data)[:300]}")
71
+
72
  try:
73
+ image = self.preprocess(data)
74
+ print(f"πŸ“Έ [PREPROCESS DONE] Type: {type(image)}, Size: {image.size}")
75
+
76
+ restored = self.inference(image)
77
+ print(f"🎨 [INFERENCE DONE] Output type: {type(restored)}, Size: {restored.size}")
78
+
79
+ response = self.postprocess(restored)
80
+ print(f"πŸ“¦ [POSTPROCESS DONE] Keys: {list(response.keys())}")
81
+ print("βœ… [CALL COMPLETE] Successfully processed request.\n")
82
+ return response
83
+
84
  except Exception as e:
85
+ print("πŸ’₯ [CALL ERROR] Exception occurred during inference!")
86
+ print(f"❗ [TRACEBACK]: {repr(e)}")
87
  return {"error": str(e)}
88
 
89
+ # ----------------------------
90
+ # PREPROCESS STEP
91
+ # ----------------------------
92
+ def preprocess(self, data):
93
+ print("πŸ”§ [PREPROCESS] Starting preprocessing step...")
94
+ print(f"πŸ”§ [PREPROCESS] Received type: {type(data)}")
95
+
96
+ # Case 1: raw bytes directly
97
  if isinstance(data, (bytes, bytearray)):
98
+ print("πŸ“₯ [PREPROCESS] Detected raw bytes input.")
99
+ img = Image.open(io.BytesIO(data)).convert("RGB")
100
+ print(f"πŸ“Έ [PREPROCESS] Loaded image β€” size: {img.size}, mode: {img.mode}")
101
+ return img
102
 
103
+ # Case 2: dict payload (like {'inputs': base64_string})
104
  if isinstance(data, dict):
105
+ print("πŸ“¦ [PREPROCESS] Detected dict input.")
106
+ keys = list(data.keys())
107
+ print(f"πŸ“¦ [PREPROCESS] Dict keys: {keys}")
108
+ img_field = data.get("inputs") or data.get("image") or None
109
+
110
+ if img_field is None:
111
+ raise ValueError("Missing 'inputs' or 'image' key in JSON payload.")
112
+
113
+ # Base64 string case
114
+ if isinstance(img_field, str):
115
+ print("🧬 [PREPROCESS] Input is a base64 string β€” decoding...")
116
+ img_field = base64.b64decode(img_field)
117
+
118
+ # Raw bytes inside dict
119
+ if isinstance(img_field, (bytes, bytearray)):
120
+ print("πŸ“₯ [PREPROCESS] Input is bytes inside dict β€” converting to image...")
121
+ img = Image.open(io.BytesIO(img_field)).convert("RGB")
122
+ print(f"πŸ“Έ [PREPROCESS] Image loaded from dict β€” size: {img.size}, mode: {img.mode}")
123
+ return img
124
+
125
+ raise ValueError("Unsupported input format β€” expected bytes or base64 data")
126
+
127
+ # ----------------------------
128
+ # INFERENCE STEP
129
+ # ----------------------------
130
+ def inference(self, image):
131
+ print("🧠 [INFERENCE] Starting restoration process...")
132
+ print(f"🧠 [INFERENCE] Input image type: {type(image)}, size: {image.size}, mode: {image.mode}")
133
 
134
+ try:
135
+ np_img = np.array(image)
136
+ print(f"πŸ”’ [INFERENCE] NumPy array shape: {np_img.shape}, dtype: {np_img.dtype}")
137
 
138
+ # Convert RGB β†’ BGR
139
+ np_img = np_img[:, :, ::-1]
140
+ print("🎨 [INFERENCE] Converted RGB to BGR for OpenCV-style processing.")
141
 
142
+ # Run the GFPGAN restoration
143
+ print("πŸš€ [INFERENCE] Running GFPGANer.enhance()...")
144
+ _, _, restored_img = self.restorer.enhance(
145
+ np_img, has_aligned=False, only_center_face=False, paste_back=True
146
+ )
 
147
 
148
+ if restored_img is None:
149
+ raise RuntimeError("Restoration failed β€” GFPGAN returned None")
150
+
151
+ print("βœ… [INFERENCE] Restoration successful, converting to PIL.Image...")
152
+ restored_pil = Image.fromarray(restored_img[:, :, ::-1]) # Convert BGR β†’ RGB
153
+ print(f"πŸ–ΌοΈ [INFERENCE] Restored image size: {restored_pil.size}, mode: {restored_pil.mode}")
154
+ return restored_pil
155
+
156
+ except Exception as e:
157
+ print(f"πŸ’₯ [INFERENCE ERROR] {e}")
158
+ raise e
159
+
160
+ # ----------------------------
161
+ # POSTPROCESS STEP
162
+ # ----------------------------
163
+ def postprocess(self, restored_img):
164
+ print("πŸ“¦ [POSTPROCESS] Starting encoding...")
165
+ try:
166
+ buf = io.BytesIO()
167
+ restored_img.save(buf, format="PNG")
168
+ encoded = base64.b64encode(buf.getvalue()).decode("utf-8")
169
+ print("βœ… [POSTPROCESS] Image successfully encoded to base64 (len=%d)" % len(encoded))
170
+ return {"image": encoded}
171
+ except Exception as e:
172
+ print(f"πŸ’₯ [POSTPROCESS ERROR] {e}")
173
+ raise e
174
 
requirements.txt CHANGED
@@ -1,9 +1,6 @@
1
- torch==2.1.2
2
- torchvision==0.16.2
3
  gfpgan==1.3.8
4
- realesrgan==0.3.0
5
- basicsr==1.4.2
6
- facexlib==0.3.0
7
  numpy==1.26.4
8
  Pillow>=10.0.0
9
  opencv-python
 
1
+ torch>=2.1.0
2
+ torchvision>=0.16.0
3
  gfpgan==1.3.8
 
 
 
4
  numpy==1.26.4
5
  Pillow>=10.0.0
6
  opencv-python