Update handler.py
Browse files- handler.py +46 -88
handler.py
CHANGED
|
@@ -1,91 +1,49 @@
|
|
| 1 |
-
#
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
from typing import Any, Dict, Optional, Union
|
| 5 |
-
import requests
|
| 6 |
from PIL import Image
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
class
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
@staticmethod
|
| 29 |
-
def _encode_image_to_b64(img: Image.Image) -> str:
|
| 30 |
-
buf = io.BytesIO(); img.save(buf, format="PNG")
|
| 31 |
-
return "data:image/png;base64," + base64.b64encode(buf.getvalue()).decode("utf-8")
|
| 32 |
-
|
| 33 |
-
def _image_to_payload(self, image: Union[str, Image.Image]) -> str:
|
| 34 |
-
if isinstance(image, str):
|
| 35 |
-
low = image.lower()
|
| 36 |
-
if low.startswith("http://") or low.startswith("https://"):
|
| 37 |
-
return image # URL ise aynen gönder
|
| 38 |
-
if not os.path.isfile(image):
|
| 39 |
-
raise FileNotFoundError(f"Görsel bulunamadı: {image}")
|
| 40 |
-
return self._encode_image_to_b64(Image.open(image).convert("RGB"))
|
| 41 |
-
elif isinstance(image, Image.Image):
|
| 42 |
-
return self._encode_image_to_b64(image.convert("RGB"))
|
| 43 |
else:
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
payload = self.build_payload(query, image, **gen_kwargs)
|
| 68 |
-
headers = {
|
| 69 |
-
"Accept": "application/json",
|
| 70 |
-
"Authorization": f"Bearer {self.hf_token}",
|
| 71 |
-
"Content-Type": "application/json",
|
| 72 |
-
}
|
| 73 |
-
t0 = time.time()
|
| 74 |
-
resp = self.session.post(self.endpoint_url, headers=headers, json=payload, timeout=self.timeout)
|
| 75 |
-
latency_ms = (time.time() - t0) * 1000.0
|
| 76 |
-
status = resp.status_code
|
| 77 |
-
try:
|
| 78 |
-
data = resp.json()
|
| 79 |
-
except Exception as e:
|
| 80 |
-
raise HFEndpointError(f"JSON olmayan cevap (status={status}): {resp.text[:500]}") from e
|
| 81 |
-
|
| 82 |
-
text = ""
|
| 83 |
-
if isinstance(data, list) and data:
|
| 84 |
-
text = data[0].get("generated_text", "") or data[0].get("text", "")
|
| 85 |
-
elif isinstance(data, dict):
|
| 86 |
-
text = data.get("generated_text", "") or data.get("text", "")
|
| 87 |
-
|
| 88 |
-
if status >= 400:
|
| 89 |
-
raise HFEndpointError(f"Endpoint hatası {status}: {json.dumps(data)[:500]}")
|
| 90 |
-
|
| 91 |
-
return {"text": text, "raw": data, "latency_ms": latency_ms, "status_code": status}
|
|
|
|
| 1 |
+
# /repository/handler.py
|
| 2 |
+
import base64, io, os, json
|
| 3 |
+
from typing import Any, Dict, List
|
|
|
|
|
|
|
| 4 |
from PIL import Image
|
| 5 |
|
| 6 |
+
# (Gerekiyorsa: from transformers import ... # model yükleme burada olur)
|
| 7 |
+
|
| 8 |
+
class EndpointHandler:
|
| 9 |
+
def __init__(self, path: str = "") -> None:
|
| 10 |
+
# Burada model/processor/tokenizer'ı yükleyin
|
| 11 |
+
# ör: self.model = ...
|
| 12 |
+
# self.processor = ...
|
| 13 |
+
pass
|
| 14 |
+
|
| 15 |
+
def _load_image(self, img_field: str) -> Image.Image:
|
| 16 |
+
if img_field.startswith("data:image"):
|
| 17 |
+
# data URL -> bytes
|
| 18 |
+
header, b64data = img_field.split(",", 1)
|
| 19 |
+
img_bytes = base64.b64decode(b64data)
|
| 20 |
+
return Image.open(io.BytesIO(img_bytes)).convert("RGB")
|
| 21 |
+
elif img_field.startswith("http://") or img_field.startswith("https://"):
|
| 22 |
+
import requests
|
| 23 |
+
resp = requests.get(img_field, timeout=20)
|
| 24 |
+
resp.raise_for_status()
|
| 25 |
+
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
else:
|
| 27 |
+
# Yerel yol (container içinden)
|
| 28 |
+
return Image.open(img_field).convert("RGB")
|
| 29 |
+
|
| 30 |
+
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 31 |
+
"""
|
| 32 |
+
Hugging Face Inference Toolkit burayı çağırır.
|
| 33 |
+
Beklenen dönüş genelde: [{"generated_text": "..."}]
|
| 34 |
+
"""
|
| 35 |
+
inputs = data.get("inputs") or {}
|
| 36 |
+
params = data.get("parameters") or {}
|
| 37 |
+
query = inputs.get("query", "")
|
| 38 |
+
img_field = inputs.get("image", "")
|
| 39 |
+
|
| 40 |
+
# Görseli hazırla (opsiyonel — modeliniz görsel kullanıyorsa)
|
| 41 |
+
image = None
|
| 42 |
+
if img_field:
|
| 43 |
+
image = self._load_image(img_field)
|
| 44 |
+
|
| 45 |
+
# Burada kendi inference kodunuzu çağırın:
|
| 46 |
+
# out_text = run_model(self.model, self.processor, query, image, **params)
|
| 47 |
+
out_text = f"(demo) prompt='{query[:50]}...' image={'yes' if image else 'no'}"
|
| 48 |
+
|
| 49 |
+
return [{"generated_text": out_text}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|