mastari commited on
Commit
f787e04
Β·
1 Parent(s): 325d9a5
Files changed (1) hide show
  1. handler.py +32 -27
handler.py CHANGED
@@ -6,15 +6,22 @@ import numpy as np
6
  from PIL import Image, ImageOps
7
  from diffusers import StableDiffusionXLInpaintPipeline
8
 
 
9
  # ==========================================================
10
- # 🧠 EndpointHandler β€” Hugging Face Inference Entrypoint
11
  # ==========================================================
12
  class EndpointHandler:
13
- def __init__(self):
 
 
 
 
 
14
  print("[HANDLER] πŸš€ Initializing model...")
 
15
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
16
 
17
- # Load SDXL inpainting model (supports masks + expansion)
18
  self.pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
19
  model_id,
20
  torch_dtype=torch.float16,
@@ -24,18 +31,18 @@ class EndpointHandler:
24
  print("[HANDLER] βœ… Model loaded successfully on cuda")
25
 
26
  # -------------------------------------------------------
27
- # Main inference function called by Hugging Face Endpoint
28
  # -------------------------------------------------------
29
  def __call__(self, data: dict) -> dict:
30
  print("[HANDLER] πŸ“© Received request")
31
 
32
  try:
33
- # Hugging Face sends { "inputs": {...} }
34
  inputs = data.get("inputs", data)
35
  if not isinstance(inputs, dict):
36
  raise ValueError("Invalid input payload")
37
 
38
- # Extract fields
39
  b64_image = inputs.get("image")
40
  top = int(inputs.get("top", 0))
41
  bottom = int(inputs.get("bottom", 0))
@@ -47,42 +54,42 @@ class EndpointHandler:
47
  guidance = float(inputs.get("guidance_scale", 7.0))
48
  seed = int(inputs.get("seed", 42))
49
 
50
- print(f"[HANDLER] Params β†’ top={top}, bottom={bottom}, left={left}, right={right}, prompt='{prompt}'")
 
 
51
 
52
- # Decode image
53
  image_bytes = base64.b64decode(b64_image)
54
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
55
  width, height = image.size
56
  print(f"[HANDLER] Original image size: {width}x{height}")
57
 
58
- # -------------------------------------------------
59
- # Expand the canvas with transparent borders
60
- # -------------------------------------------------
61
  new_w = width + left + right
62
  new_h = height + top + bottom
63
  canvas = Image.new("RGB", (new_w, new_h), (128, 128, 128))
64
  canvas.paste(image, (left, top))
65
  print(f"[HANDLER] Canvas created: {canvas.size}")
66
 
67
- # Create a mask: white for new areas, black for existing
68
  mask = Image.new("L", (new_w, new_h), color=255)
69
- mask_draw = ImageOps.expand(Image.new("L", (width, height), color=0), (left, top, right, bottom), fill=255)
 
 
 
 
70
  mask.paste(mask_draw, (0, 0))
71
  print(f"[HANDLER] Mask created: {mask.size}")
72
 
73
- # -------------------------------------------------
74
- # Convert PIL to NumPy arrays (important fix)
75
- # -------------------------------------------------
76
  canvas_np = np.array(canvas.convert("RGB"))
77
  mask_np = np.array(mask.convert("L"))
78
-
79
  print(f"[HANDLER] Converted to NumPy β†’ canvas={canvas_np.shape}, mask={mask_np.shape}")
80
 
81
- # -------------------------------------------------
82
- # Diffusion process
83
- # -------------------------------------------------
84
  print("[HANDLER] πŸš€ Running diffusion process...")
85
  generator = torch.Generator(device="cuda").manual_seed(seed)
 
86
  result = self.pipe(
87
  prompt=prompt,
88
  negative_prompt=negative_prompt,
@@ -95,14 +102,12 @@ class EndpointHandler:
95
 
96
  print("[HANDLER] βœ… Diffusion complete")
97
 
98
- # -------------------------------------------------
99
- # Encode result
100
- # -------------------------------------------------
101
- buffered = io.BytesIO()
102
- result.save(buffered, format="PNG")
103
- img_b64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
104
-
105
  print("[HANDLER] βœ… Returning base64 image")
 
106
  return {"image_base64": img_b64}
107
 
108
  except Exception as e:
 
6
  from PIL import Image, ImageOps
7
  from diffusers import StableDiffusionXLInpaintPipeline
8
 
9
+
10
  # ==========================================================
11
+ # 🧠 EndpointHandler β€” main entry for Hugging Face Endpoint
12
  # ==========================================================
13
  class EndpointHandler:
14
+ def __init__(self, model_dir: str = None):
15
+ """
16
+ Hugging Face automatically passes model_dir when starting the endpoint.
17
+ We don't need to use it, but we must accept it in the signature to avoid
18
+ a TypeError on initialization.
19
+ """
20
  print("[HANDLER] πŸš€ Initializing model...")
21
+
22
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
23
 
24
+ # Load model
25
  self.pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
26
  model_id,
27
  torch_dtype=torch.float16,
 
31
  print("[HANDLER] βœ… Model loaded successfully on cuda")
32
 
33
  # -------------------------------------------------------
34
+ # Called automatically for each request
35
  # -------------------------------------------------------
36
  def __call__(self, data: dict) -> dict:
37
  print("[HANDLER] πŸ“© Received request")
38
 
39
  try:
40
+ # Handle both { "inputs": {...} } and {...}
41
  inputs = data.get("inputs", data)
42
  if not isinstance(inputs, dict):
43
  raise ValueError("Invalid input payload")
44
 
45
+ # Extract user parameters
46
  b64_image = inputs.get("image")
47
  top = int(inputs.get("top", 0))
48
  bottom = int(inputs.get("bottom", 0))
 
54
  guidance = float(inputs.get("guidance_scale", 7.0))
55
  seed = int(inputs.get("seed", 42))
56
 
57
+ print(
58
+ f"[HANDLER] Params β†’ top={top}, bottom={bottom}, left={left}, right={right}, prompt='{prompt}'"
59
+ )
60
 
61
+ # Decode base64 β†’ PIL
62
  image_bytes = base64.b64decode(b64_image)
63
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
64
  width, height = image.size
65
  print(f"[HANDLER] Original image size: {width}x{height}")
66
 
67
+ # Create expanded canvas
 
 
68
  new_w = width + left + right
69
  new_h = height + top + bottom
70
  canvas = Image.new("RGB", (new_w, new_h), (128, 128, 128))
71
  canvas.paste(image, (left, top))
72
  print(f"[HANDLER] Canvas created: {canvas.size}")
73
 
74
+ # Create mask (white = new area)
75
  mask = Image.new("L", (new_w, new_h), color=255)
76
+ mask_draw = ImageOps.expand(
77
+ Image.new("L", (width, height), color=0),
78
+ (left, top, right, bottom),
79
+ fill=255,
80
+ )
81
  mask.paste(mask_draw, (0, 0))
82
  print(f"[HANDLER] Mask created: {mask.size}")
83
 
84
+ # Convert to NumPy arrays (diffusers requires .shape)
 
 
85
  canvas_np = np.array(canvas.convert("RGB"))
86
  mask_np = np.array(mask.convert("L"))
 
87
  print(f"[HANDLER] Converted to NumPy β†’ canvas={canvas_np.shape}, mask={mask_np.shape}")
88
 
89
+ # Run diffusion
 
 
90
  print("[HANDLER] πŸš€ Running diffusion process...")
91
  generator = torch.Generator(device="cuda").manual_seed(seed)
92
+
93
  result = self.pipe(
94
  prompt=prompt,
95
  negative_prompt=negative_prompt,
 
102
 
103
  print("[HANDLER] βœ… Diffusion complete")
104
 
105
+ # Encode output as base64
106
+ buffer = io.BytesIO()
107
+ result.save(buffer, format="PNG")
108
+ img_b64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
 
 
 
109
  print("[HANDLER] βœ… Returning base64 image")
110
+
111
  return {"image_base64": img_b64}
112
 
113
  except Exception as e: