CanerDedeoglu commited on
Commit
22fba5f
·
verified ·
1 Parent(s): 878edb5

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +8 -34
handler.py CHANGED
@@ -119,12 +119,7 @@ def load_image_any(image_input: Union[str, dict]) -> Image.Image:
119
  if isinstance(image_input, str):
120
  s = image_input.strip()
121
  if s.startswith(("http://", "https://")):
122
- # UA ekleyerek CDN/Cloudflare engellerini azalt
123
- r = requests.get(
124
- s,
125
- timeout=(5, 20),
126
- headers={"User-Agent": "Mozilla/5.0 (compatible; RapidECG/1.0)"}
127
- )
128
  r.raise_for_status()
129
  return Image.open(BytesIO(r.content)).convert("RGB")
130
  if os.path.exists(s):
@@ -275,8 +270,6 @@ def generate_response(
275
 
276
  # Görüntü ön-işleme → tensör
277
  try:
278
- if chatbot.image_processor is None:
279
- return {"error": "Image processing failed: image_processor is None"}
280
  processed = process_images([pil_img], chatbot.image_processor, chatbot.model.config)
281
  if isinstance(processed, (list, tuple)) and len(processed) > 0:
282
  image_tensor = processed[0]
@@ -368,7 +361,7 @@ def query(payload: dict):
368
  if not model_initialized:
369
  if not initialize_model():
370
  return {"error": "Model initialization failed"}
371
- # model_initialized artık initialize_model içinde True set ediliyor
372
 
373
  try:
374
  message = payload.get("message") or payload.get("query") or payload.get("prompt") or payload.get("istem") or ""
@@ -434,7 +427,7 @@ class _Args:
434
  self.debug = bool(int(os.getenv("DEBUG", "0")))
435
 
436
  def initialize_model():
437
- global tokenizer, model, image_processor, context_len, args, model_initialized
438
  if not LLAVA_AVAILABLE:
439
  print("[init] LLaVA not available; cannot init.")
440
  return False
@@ -444,23 +437,6 @@ def initialize_model():
444
  tokenizer_, model_, image_processor_, context_len_ = load_pretrained_model(
445
  args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit
446
  )
447
-
448
- # ✅ Fallback: image_processor None ise otomatik yükle (CLIP 336 varsayılan)
449
- if image_processor_ is None:
450
- try:
451
- from transformers import AutoProcessor, AutoImageProcessor, CLIPImageProcessor
452
- try:
453
- image_processor_ = AutoProcessor.from_pretrained(args.model_path, trust_remote_code=True)
454
- except Exception:
455
- try:
456
- image_processor_ = AutoImageProcessor.from_pretrained("openai/clip-vit-large-patch14-336")
457
- except Exception:
458
- image_processor_ = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14-336")
459
- print("[init] image_processor fallback activated.")
460
- except Exception as e:
461
- print(f"[init] image_processor fallback failed: {e}")
462
- return False
463
-
464
  # demo: model'ı genelde cuda’da çalıştırır
465
  try:
466
  _ = next(model_.parameters()).device
@@ -469,14 +445,12 @@ def initialize_model():
469
  model_ = model_.to(torch.device("cuda"))
470
  model_.eval()
471
 
472
- # Global state
473
- tokenizer = tokenizer_
474
- model = model_
475
- image_processor = image_processor_
476
- context_len = context_len_
477
 
478
  chat_manager.init_if_needed(args, args.model_path, tokenizer_, model_, image_processor_, context_len_)
479
- model_initialized = True # ✅ sağlıkta doğru raporla
480
  print("[init] model/tokenizer/image_processor loaded.")
481
  return True
482
  except Exception as e:
@@ -500,4 +474,4 @@ class EndpointHandler:
500
  return get_model_info()
501
 
502
  if __name__ == "__main__":
503
- print("Handler ready (Demo Parity + Style Hint + whitespace post-process). Use `EndpointHandler` or `query`.")
 
119
  if isinstance(image_input, str):
120
  s = image_input.strip()
121
  if s.startswith(("http://", "https://")):
122
+ r = requests.get(s, timeout=(5, 20))
 
 
 
 
 
123
  r.raise_for_status()
124
  return Image.open(BytesIO(r.content)).convert("RGB")
125
  if os.path.exists(s):
 
270
 
271
  # Görüntü ön-işleme → tensör
272
  try:
 
 
273
  processed = process_images([pil_img], chatbot.image_processor, chatbot.model.config)
274
  if isinstance(processed, (list, tuple)) and len(processed) > 0:
275
  image_tensor = processed[0]
 
361
  if not model_initialized:
362
  if not initialize_model():
363
  return {"error": "Model initialization failed"}
364
+ model_initialized = True
365
 
366
  try:
367
  message = payload.get("message") or payload.get("query") or payload.get("prompt") or payload.get("istem") or ""
 
427
  self.debug = bool(int(os.getenv("DEBUG", "0")))
428
 
429
  def initialize_model():
430
+ global tokenizer, model, image_processor, context_len, args
431
  if not LLAVA_AVAILABLE:
432
  print("[init] LLaVA not available; cannot init.")
433
  return False
 
437
  tokenizer_, model_, image_processor_, context_len_ = load_pretrained_model(
438
  args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit
439
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440
  # demo: model'ı genelde cuda’da çalıştırır
441
  try:
442
  _ = next(model_.parameters()).device
 
445
  model_ = model_.to(torch.device("cuda"))
446
  model_.eval()
447
 
448
+ globals()["tokenizer"] = tokenizer_
449
+ globals()["model"] = model_
450
+ globals()["image_processor"] = image_processor_
451
+ globals()["context_len"] = context_len_
 
452
 
453
  chat_manager.init_if_needed(args, args.model_path, tokenizer_, model_, image_processor_, context_len_)
 
454
  print("[init] model/tokenizer/image_processor loaded.")
455
  return True
456
  except Exception as e:
 
474
  return get_model_info()
475
 
476
  if __name__ == "__main__":
477
+ print("Handler ready (Demo Parity + Style Hint + whitespace post-process). Use `EndpointHandler` or `query`.")