yipengsun commited on
Commit
d98a1e6
·
1 Parent(s): a9ab92b

Fix MedSigLIP tokenizer loading with robust fallbacks for HF Space

Browse files
models/medasr_client.py CHANGED
@@ -22,7 +22,11 @@ _load_lock = threading.Lock()
22
  def _token_arg() -> dict:
23
  if os.path.isdir(MEDASR_MODEL_ID):
24
  return {}
25
- return {"token": HF_TOKEN}
 
 
 
 
26
 
27
 
28
  def load():
 
22
  def _token_arg() -> dict:
23
  if os.path.isdir(MEDASR_MODEL_ID):
24
  return {}
25
+ # Only pass `token` when explicitly provided; omitting it lets HF Hub fall back
26
+ # to `huggingface-cli login` cached credentials (useful on local/dev machines).
27
+ if HF_TOKEN:
28
+ return {"token": HF_TOKEN}
29
+ return {}
30
 
31
 
32
  def load():
models/medgemma_client.py CHANGED
@@ -36,7 +36,11 @@ def _token_arg(model_id: str) -> dict:
36
  """Return token kwarg only when loading from HF Hub (not local path)."""
37
  if _is_local_path(model_id):
38
  return {}
39
- return {"token": HF_TOKEN}
 
 
 
 
40
 
41
 
42
  def _get_quantization_config():
 
36
  """Return token kwarg only when loading from HF Hub (not local path)."""
37
  if _is_local_path(model_id):
38
  return {}
39
+ # Only pass `token` when explicitly provided; omitting it lets HF Hub fall back
40
+ # to `huggingface-cli login` cached credentials (useful on local/dev machines).
41
+ if HF_TOKEN:
42
+ return {"token": HF_TOKEN}
43
+ return {}
44
 
45
 
46
  def _get_quantization_config():
models/medsiglip_client.py CHANGED
@@ -23,7 +23,50 @@ _load_lock = threading.Lock()
23
  def _token_arg() -> dict:
24
  if os.path.isdir(MEDSIGLIP_MODEL_ID):
25
  return {}
26
- return {"token": HF_TOKEN}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
 
29
  def load():
@@ -37,7 +80,7 @@ def load():
37
  return _model, _processor
38
 
39
  import torch
40
- from transformers import AutoModel, AutoImageProcessor, AutoTokenizer, SiglipProcessor
41
 
42
  logger.info("Loading MedSigLIP from %s...", "local" if os.path.isdir(MEDSIGLIP_MODEL_ID) else "HF Hub")
43
 
@@ -46,10 +89,14 @@ def load():
46
  from transformers import AutoProcessor
47
  _processor = AutoProcessor.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
48
  except Exception as e:
49
- logger.warning("AutoProcessor failed (%s), loading components separately", e)
50
- image_processor = AutoImageProcessor.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
51
- tokenizer = AutoTokenizer.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
52
- _processor = SiglipProcessor(image_processor=image_processor, tokenizer=tokenizer)
 
 
 
 
53
 
54
  _model = AutoModel.from_pretrained(
55
  MEDSIGLIP_MODEL_ID, **_token_arg(), torch_dtype=torch.float32,
 
23
  def _token_arg() -> dict:
24
  if os.path.isdir(MEDSIGLIP_MODEL_ID):
25
  return {}
26
+ # Only pass `token` when explicitly provided; omitting it lets HF Hub fall back
27
+ # to `huggingface-cli login` cached credentials (useful on local/dev machines).
28
+ if HF_TOKEN:
29
+ return {"token": HF_TOKEN}
30
+ return {}
31
+
32
+
33
+ def _load_tokenizer():
34
+ """Load a SigLIP-compatible tokenizer with robust fallbacks.
35
+
36
+ Some Transformers builds can end up with `AutoTokenizer` resolving the SigLIP
37
+ tokenizer mapping to `None` (e.g., optional deps missing), which can surface
38
+ as `'NoneType' object has no attribute 'replace'`. When that happens, we
39
+ bypass `AutoTokenizer` and load the SigLIP tokenizer class directly.
40
+ """
41
+ errors: list[str] = []
42
+
43
+ try:
44
+ from transformers import AutoTokenizer
45
+ return AutoTokenizer.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
46
+ except Exception as e: # noqa: BLE001 - intentional broad fallback for env-specific HF/Transformers issues
47
+ errors.append(f"AutoTokenizer: {e}")
48
+
49
+ # Prefer a fast tokenizer (no SentencePiece runtime dependency) when available.
50
+ try:
51
+ from transformers import SiglipTokenizerFast
52
+ return SiglipTokenizerFast.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
53
+ except Exception as e: # noqa: BLE001
54
+ errors.append(f"SiglipTokenizerFast: {e}")
55
+
56
+ try:
57
+ from transformers import SiglipTokenizer
58
+ return SiglipTokenizer.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
59
+ except Exception as e: # noqa: BLE001
60
+ errors.append(f"SiglipTokenizer: {e}")
61
+
62
+ # Last resort: load as a generic fast tokenizer (uses tokenizer.json).
63
+ try:
64
+ from transformers import PreTrainedTokenizerFast
65
+ return PreTrainedTokenizerFast.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
66
+ except Exception as e: # noqa: BLE001
67
+ errors.append(f"PreTrainedTokenizerFast: {e}")
68
+
69
+ raise RuntimeError("Failed to load MedSigLIP tokenizer. " + " | ".join(errors))
70
 
71
 
72
  def load():
 
80
  return _model, _processor
81
 
82
  import torch
83
+ from transformers import AutoModel, AutoImageProcessor, SiglipProcessor
84
 
85
  logger.info("Loading MedSigLIP from %s...", "local" if os.path.isdir(MEDSIGLIP_MODEL_ID) else "HF Hub")
86
 
 
89
  from transformers import AutoProcessor
90
  _processor = AutoProcessor.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
91
  except Exception as e:
92
+ logger.warning("AutoProcessor failed (%s); trying SiglipProcessor", e)
93
+ try:
94
+ _processor = SiglipProcessor.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
95
+ except Exception as e2:
96
+ logger.warning("SiglipProcessor failed (%s); loading components separately", e2)
97
+ image_processor = AutoImageProcessor.from_pretrained(MEDSIGLIP_MODEL_ID, **_token_arg())
98
+ tokenizer = _load_tokenizer()
99
+ _processor = SiglipProcessor(image_processor=image_processor, tokenizer=tokenizer)
100
 
101
  _model = AutoModel.from_pretrained(
102
  MEDSIGLIP_MODEL_ID, **_token_arg(), torch_dtype=torch.float32,
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
  torch>=2.1.0
2
  transformers==5.0.0
 
3
  accelerate==1.12.0
4
  bitsandbytes==0.49.1
5
  huggingface_hub==1.3.4
 
1
  torch>=2.1.0
2
  transformers==5.0.0
3
+ sentencepiece>=0.2.0
4
  accelerate==1.12.0
5
  bitsandbytes==0.49.1
6
  huggingface_hub==1.3.4