Gjm1234 commited on
Commit
fdb21d9
·
verified ·
1 Parent(s): e60d5f7

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +19 -14
handler.py CHANGED
@@ -1,6 +1,7 @@
1
  import io
2
  import base64
3
  import torch
 
4
  from PIL import Image
5
  from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel
6
 
@@ -9,30 +10,36 @@ CONTROLNET = "lllyasviel/controlnet-depth-sdxl-1.0"
9
 
10
  class EndpointHandler:
11
  def __init__(self, path=""):
12
- print("🔧 Initializing handler — loading remote models...")
13
 
14
- print("🔧 Loading ControlNet...")
 
 
 
 
15
  controlnet = ControlNetModel.from_pretrained(
16
  CONTROLNET,
17
- torch_dtype=torch.float16
 
18
  )
19
 
20
- print("🚀 Loading Juggernaut XL main model...")
21
  self.pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
22
  BASE_MODEL,
23
  controlnet=controlnet,
24
  torch_dtype=torch.float16,
25
- use_safetensors=True
 
26
  ).to("cuda")
27
 
28
  self.pipe.enable_xformers_memory_efficient_attention()
29
- print("✅ Pipeline ready")
30
 
31
  def __call__(self, data):
32
  prompt = data.get("inputs", "")
33
  img_b64 = data.get("image", None)
34
 
35
- # Decode input image OR generate blank white one
36
  if img_b64:
37
  img_bytes = base64.b64decode(img_b64)
38
  init = Image.open(io.BytesIO(img_bytes)).convert("RGB")
@@ -40,18 +47,16 @@ class EndpointHandler:
40
  init = Image.new("RGB", (1024, 1024), "white")
41
 
42
  outputs = []
43
- for _ in range(10): # always 10 variations
44
  result = self.pipe(
45
  prompt=prompt,
46
  image=init,
47
- num_inference_steps=25,
48
  guidance_scale=6.0,
49
- width=1024,
50
- height=1024,
51
  ).images[0]
52
 
53
- buf = io.BytesIO()
54
- result.save(buf, format="PNG")
55
- outputs.append(base64.b64encode(buf.getvalue()).decode())
56
 
57
  return { "images": outputs }
 
1
  import io
2
  import base64
3
  import torch
4
+ import os
5
  from PIL import Image
6
  from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel
7
 
 
10
 
11
  class EndpointHandler:
12
  def __init__(self, path=""):
13
+ print("🔧 Initializing handler")
14
 
15
+ HF_TOKEN = os.environ.get("HF_TOKEN")
16
+ if not HF_TOKEN:
17
+ raise RuntimeError("❌ HF_TOKEN not found in environment variables")
18
+
19
+ print("🔧 Loading ControlNet with token…")
20
  controlnet = ControlNetModel.from_pretrained(
21
  CONTROLNET,
22
+ torch_dtype=torch.float16,
23
+ token=HF_TOKEN
24
  )
25
 
26
+ print("🚀 Loading Juggernaut XL model with token…")
27
  self.pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
28
  BASE_MODEL,
29
  controlnet=controlnet,
30
  torch_dtype=torch.float16,
31
+ use_safetensors=True,
32
+ token=HF_TOKEN
33
  ).to("cuda")
34
 
35
  self.pipe.enable_xformers_memory_efficient_attention()
36
+ print("✅ Pipeline loaded successfully!")
37
 
38
  def __call__(self, data):
39
  prompt = data.get("inputs", "")
40
  img_b64 = data.get("image", None)
41
 
42
+ # Decode or create blank input
43
  if img_b64:
44
  img_bytes = base64.b64decode(img_b64)
45
  init = Image.open(io.BytesIO(img_bytes)).convert("RGB")
 
47
  init = Image.new("RGB", (1024, 1024), "white")
48
 
49
  outputs = []
50
+ for _ in range(10): # ALWAYS generate 10 images
51
  result = self.pipe(
52
  prompt=prompt,
53
  image=init,
54
+ num_inference_steps=20,
55
  guidance_scale=6.0,
 
 
56
  ).images[0]
57
 
58
+ buffer = io.BytesIO()
59
+ result.save(buffer, format="PNG")
60
+ outputs.append(base64.b64encode(buffer.getvalue()).decode())
61
 
62
  return { "images": outputs }