Update handler.py
Browse files- handler.py +38 -12
handler.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
# -*- coding: utf-8 -*-
|
| 2 |
-
# handler.py — Rapid_ECG / PULSE-7B — Stabil ve DEBUG'li sürüm (vision tower fix)
|
| 3 |
-
# -
|
| 4 |
-
# -
|
| 5 |
-
# -
|
| 6 |
# - Vision tower kontrolü: mm_vision_tower veya vision_tower
|
| 7 |
-
# - IMAGE_TOKEN_INDEX kullanımı
|
| 8 |
|
| 9 |
import os
|
| 10 |
import io
|
|
@@ -17,7 +17,7 @@ import torch
|
|
| 17 |
from PIL import Image
|
| 18 |
import requests
|
| 19 |
|
| 20 |
-
# ===== LLaVA
|
| 21 |
def _ensure_llava(tag: str = "v1.2.0"):
|
| 22 |
try:
|
| 23 |
import llava # noqa
|
|
@@ -39,11 +39,12 @@ from llava.constants import (
|
|
| 39 |
DEFAULT_IMAGE_TOKEN,
|
| 40 |
DEFAULT_IM_START_TOKEN,
|
| 41 |
DEFAULT_IM_END_TOKEN,
|
| 42 |
-
IMAGE_TOKEN_INDEX,
|
| 43 |
)
|
| 44 |
from llava.model.builder import load_pretrained_model
|
| 45 |
from llava.mm_utils import tokenizer_image_token
|
| 46 |
|
|
|
|
| 47 |
# ---------- yardımcılar ----------
|
| 48 |
def _get_env(name: str, default: Optional[str] = None) -> Optional[str]:
|
| 49 |
v = os.getenv(name)
|
|
@@ -113,6 +114,7 @@ def _get_conv_mode(model_name: str) -> str:
|
|
| 113 |
return "llava_v0"
|
| 114 |
|
| 115 |
def _build_prompt_with_image(prompt: str, model_cfg) -> str:
|
|
|
|
| 116 |
if DEFAULT_IMAGE_TOKEN in prompt or DEFAULT_IM_START_TOKEN in prompt:
|
| 117 |
return prompt
|
| 118 |
if getattr(model_cfg, "mm_use_im_start_end", False):
|
|
@@ -120,6 +122,14 @@ def _build_prompt_with_image(prompt: str, model_cfg) -> str:
|
|
| 120 |
return f"{token}\n{prompt}"
|
| 121 |
return f"{DEFAULT_IMAGE_TOKEN}\n{prompt}"
|
| 122 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
# ---------- Endpoint Handler ----------
|
| 124 |
class EndpointHandler:
|
| 125 |
def __init__(self, model_dir: Optional[str] = None):
|
|
@@ -134,14 +144,29 @@ class EndpointHandler:
|
|
| 134 |
self.model_name = None
|
| 135 |
|
| 136 |
def load(self):
|
| 137 |
-
|
| 138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
model_base = _get_env("HF_MODEL_BASE", None)
|
| 140 |
-
print(f"[DEBUG] load(): HF_MODEL_ID={model_path}, HF_MODEL_BASE={model_base}")
|
| 141 |
|
| 142 |
os.environ.setdefault("ATTN_IMPLEMENTATION", "flash_attention_2")
|
| 143 |
os.environ.setdefault("FLASH_ATTENTION", "1")
|
| 144 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
print("[DEBUG] calling load_pretrained_model ...")
|
| 146 |
self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model(
|
| 147 |
model_path=model_path,
|
|
@@ -164,7 +189,8 @@ class EndpointHandler:
|
|
| 164 |
raise RuntimeError(
|
| 165 |
"[ERROR] Vision tower not loaded (mm_vision_tower/vision_tower None). "
|
| 166 |
"Bu model multimodal değil veya yanlış checkpoint yüklendi. "
|
| 167 |
-
"
|
|
|
|
| 168 |
)
|
| 169 |
|
| 170 |
# tokenizer güvenliği
|
|
@@ -211,7 +237,7 @@ class EndpointHandler:
|
|
| 211 |
try:
|
| 212 |
out = self.image_processor.preprocess(image, return_tensors="pt")
|
| 213 |
images_tensor = out["pixel_values"].to(self.device, dtype=self.dtype)
|
| 214 |
-
image_sizes = [image.size]
|
| 215 |
print(f"[DEBUG] preprocess OK; images_tensor.shape={images_tensor.shape}")
|
| 216 |
except Exception as e:
|
| 217 |
return {"error": f"Image preprocessing failed: {e}"}
|
|
|
|
| 1 |
# -*- coding: utf-8 -*-
|
| 2 |
+
# handler.py — Rapid_ECG / PULSE-7B — Stabil ve DEBUG'li sürüm (local/hub + vision tower fix)
|
| 3 |
+
# - HF Endpoint uyumlu (EndpointHandler.load().__call__)
|
| 4 |
+
# - Yerel klasörden (HF_MODEL_DIR) veya hub'dan (HF_MODEL_ID) yükleme
|
| 5 |
+
# - Görsel sadece .preprocess() ile işlenir
|
| 6 |
# - Vision tower kontrolü: mm_vision_tower veya vision_tower
|
| 7 |
+
# - IMAGE_TOKEN_INDEX kullanımı ve kapsamlı [DEBUG] logları
|
| 8 |
|
| 9 |
import os
|
| 10 |
import io
|
|
|
|
| 17 |
from PIL import Image
|
| 18 |
import requests
|
| 19 |
|
| 20 |
+
# ===== LLaVA kütüphanesi (gerekirse kur) =====
|
| 21 |
def _ensure_llava(tag: str = "v1.2.0"):
|
| 22 |
try:
|
| 23 |
import llava # noqa
|
|
|
|
| 39 |
DEFAULT_IMAGE_TOKEN,
|
| 40 |
DEFAULT_IM_START_TOKEN,
|
| 41 |
DEFAULT_IM_END_TOKEN,
|
| 42 |
+
IMAGE_TOKEN_INDEX,
|
| 43 |
)
|
| 44 |
from llava.model.builder import load_pretrained_model
|
| 45 |
from llava.mm_utils import tokenizer_image_token
|
| 46 |
|
| 47 |
+
|
| 48 |
# ---------- yardımcılar ----------
|
| 49 |
def _get_env(name: str, default: Optional[str] = None) -> Optional[str]:
|
| 50 |
v = os.getenv(name)
|
|
|
|
| 114 |
return "llava_v0"
|
| 115 |
|
| 116 |
def _build_prompt_with_image(prompt: str, model_cfg) -> str:
|
| 117 |
+
# Kullanıcı prompt'a image token eklediyse yeniden eklemeyelim
|
| 118 |
if DEFAULT_IMAGE_TOKEN in prompt or DEFAULT_IM_START_TOKEN in prompt:
|
| 119 |
return prompt
|
| 120 |
if getattr(model_cfg, "mm_use_im_start_end", False):
|
|
|
|
| 122 |
return f"{token}\n{prompt}"
|
| 123 |
return f"{DEFAULT_IMAGE_TOKEN}\n{prompt}"
|
| 124 |
|
| 125 |
+
def _resolve_model_path(model_dir_hint: Optional[str], default_dir: str = "/repository") -> str:
|
| 126 |
+
# Öncelik sırası: HF_MODEL_DIR (yerel) -> verilen model_dir_hint -> default_dir
|
| 127 |
+
p = _get_env("HF_MODEL_DIR") or model_dir_hint or default_dir
|
| 128 |
+
p = os.path.abspath(p)
|
| 129 |
+
print(f"[DEBUG] resolved model path: {p}")
|
| 130 |
+
return p
|
| 131 |
+
|
| 132 |
+
|
| 133 |
# ---------- Endpoint Handler ----------
|
| 134 |
class EndpointHandler:
|
| 135 |
def __init__(self, model_dir: Optional[str] = None):
|
|
|
|
| 144 |
self.model_name = None
|
| 145 |
|
| 146 |
def load(self):
|
| 147 |
+
"""
|
| 148 |
+
Yükleme stratejisi:
|
| 149 |
+
- Eğer HF_MODEL_DIR set edilmişse veya repo kökünde ağırlıklar varsa: YERELDEN yükle.
|
| 150 |
+
- Aksi halde HF_MODEL_ID ile hub'dan yükle.
|
| 151 |
+
"""
|
| 152 |
+
local_path = _resolve_model_path(self.model_dir)
|
| 153 |
+
use_local = os.path.isdir(local_path) and any(
|
| 154 |
+
os.path.exists(os.path.join(local_path, f))
|
| 155 |
+
for f in ("config.json", "tokenizer_config.json")
|
| 156 |
+
)
|
| 157 |
model_base = _get_env("HF_MODEL_BASE", None)
|
|
|
|
| 158 |
|
| 159 |
os.environ.setdefault("ATTN_IMPLEMENTATION", "flash_attention_2")
|
| 160 |
os.environ.setdefault("FLASH_ATTENTION", "1")
|
| 161 |
|
| 162 |
+
if use_local:
|
| 163 |
+
model_path = local_path
|
| 164 |
+
print(f"[DEBUG] loading model LOCALLY from: {model_path}")
|
| 165 |
+
else:
|
| 166 |
+
model_path = _get_env("HF_MODEL_ID", "PULSE-ECG/PULSE-7B")
|
| 167 |
+
print(f"[DEBUG] loading model from HUB: {model_path} (HF_MODEL_BASE={model_base})")
|
| 168 |
+
|
| 169 |
+
# Modeli yükle
|
| 170 |
print("[DEBUG] calling load_pretrained_model ...")
|
| 171 |
self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model(
|
| 172 |
model_path=model_path,
|
|
|
|
| 189 |
raise RuntimeError(
|
| 190 |
"[ERROR] Vision tower not loaded (mm_vision_tower/vision_tower None). "
|
| 191 |
"Bu model multimodal değil veya yanlış checkpoint yüklendi. "
|
| 192 |
+
"Yerelden yükleyecekseniz HF_MODEL_DIR doğru klasörü göstermeli; "
|
| 193 |
+
"hub'dan yükleyecekseniz HF_MODEL_ID olarak PULSE/LLaVA tabanlı bir model verin (örn: 'PULSE-ECG/PULSE-7B')."
|
| 194 |
)
|
| 195 |
|
| 196 |
# tokenizer güvenliği
|
|
|
|
| 237 |
try:
|
| 238 |
out = self.image_processor.preprocess(image, return_tensors="pt")
|
| 239 |
images_tensor = out["pixel_values"].to(self.device, dtype=self.dtype)
|
| 240 |
+
image_sizes = [image.size] # bazı LLaVA sürümleri image_sizes ister
|
| 241 |
print(f"[DEBUG] preprocess OK; images_tensor.shape={images_tensor.shape}")
|
| 242 |
except Exception as e:
|
| 243 |
return {"error": f"Image preprocessing failed: {e}"}
|