fix(gguf): actually lazy-pull via /api/pull, fail loudly on probe
Browse filesTwo bugs the charon bring-up exposed:
1. Ollama's /v1/chat/completions does NOT lazy-pull missing models
When charon's cache was empty for Q4_K_M and Q8_0 (only BF16 was
pre-pulled), the first chat request returned 404. The wrapper
caught it as a generic Exception and returned empty text, which
lighteval processed as '?' extracted answers, and we committed
32 garbage rows (16 per quant, both sides '?' and zero hits)
to lthn/lemer and the lthn/LEM-benchmarks aggregator before the
wrapper bug was spotted.
2. The probe was defensive-to-a-fault
_probe() swallowed exceptions with a warning and 'continuing
anyway — Ollama may lazy-pull on first real request'. Since we
just learned Ollama doesn't lazy-pull, that assumption was
wrong, and the probe was silently covering up the failure.
Fixes:
- _ensure_cached() uses Ollama's native /api/pull endpoint to
actually pull missing models into the cache. Calls /api/tags
first to see if it's already there (cheap no-op when cached).
Streams pull progress to stdout. Raises RuntimeError on any
pull error (network, 404 on HF, disk full, etc.).
- _probe() now RAISES on any failure instead of warning. Also
rejects empty responses as an indicator that chat template or
quant integrity is broken.
- greedy_until() unchanged — still catches per-sample errors and
substitutes empty text, because a single round can legitimately
fail (e.g. timeout) without tanking the whole 8-round batch.
But since __init__ now fails fast, we never reach greedy_until
for a broken model setup.
Cleanup: garbage Q4_K_M / Q8_0 canons from the initial charon loop
are being git rm'd from both lthn/lemer and lthn/LEM-benchmarks in
separate commits.
Co-Authored-By: Virgil <virgil@lethean.io>
- gguf_wrapper.py +89 -8
|
@@ -69,7 +69,15 @@ class GGUFOllamaModel(LightevalModel):
|
|
| 69 |
self.config = config
|
| 70 |
self.model_name = config.model_name
|
| 71 |
|
|
|
|
| 72 |
self.base_url = os.environ.get("LEM_OLLAMA_URL", "http://localhost:11434/v1")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
self.api_key = os.environ.get("LEM_OLLAMA_API_KEY", "ollama")
|
| 74 |
|
| 75 |
try:
|
|
@@ -80,29 +88,102 @@ class GGUFOllamaModel(LightevalModel):
|
|
| 80 |
"dependencies in eval.py or install with `uv pip install openai`."
|
| 81 |
) from e
|
| 82 |
|
| 83 |
-
print(f"[gguf_wrapper] Ollama endpoint: {self.base_url}")
|
|
|
|
| 84 |
print(f"[gguf_wrapper] model: {self.model_name}")
|
| 85 |
self._client = OpenAI(base_url=self.base_url, api_key=self.api_key)
|
| 86 |
|
| 87 |
-
#
|
| 88 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
self._probe()
|
| 90 |
|
| 91 |
self._cache = SampleCache(config)
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
def _probe(self) -> None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
try:
|
| 95 |
-
|
| 96 |
model=self.model_name,
|
| 97 |
messages=[{"role": "user", "content": "ping"}],
|
| 98 |
max_tokens=1,
|
| 99 |
temperature=0.0,
|
| 100 |
)
|
| 101 |
-
print(f"[gguf_wrapper] probe OK")
|
| 102 |
except Exception as e:
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
@property
|
| 108 |
def tokenizer(self):
|
|
|
|
| 69 |
self.config = config
|
| 70 |
self.model_name = config.model_name
|
| 71 |
|
| 72 |
+
# The OpenAI-compat endpoint (chat/completions). Used for inference.
|
| 73 |
self.base_url = os.environ.get("LEM_OLLAMA_URL", "http://localhost:11434/v1")
|
| 74 |
+
# The Ollama-native endpoint (/api/pull, /api/tags). Used to ensure
|
| 75 |
+
# the model is cached BEFORE we start inferring — Ollama's
|
| 76 |
+
# /v1/chat/completions endpoint does NOT lazy-pull, it just 404s.
|
| 77 |
+
self.ollama_native_url = os.environ.get(
|
| 78 |
+
"LEM_OLLAMA_NATIVE_URL",
|
| 79 |
+
self.base_url.replace("/v1", ""),
|
| 80 |
+
)
|
| 81 |
self.api_key = os.environ.get("LEM_OLLAMA_API_KEY", "ollama")
|
| 82 |
|
| 83 |
try:
|
|
|
|
| 88 |
"dependencies in eval.py or install with `uv pip install openai`."
|
| 89 |
) from e
|
| 90 |
|
| 91 |
+
print(f"[gguf_wrapper] Ollama OpenAI endpoint: {self.base_url}")
|
| 92 |
+
print(f"[gguf_wrapper] Ollama native endpoint: {self.ollama_native_url}")
|
| 93 |
print(f"[gguf_wrapper] model: {self.model_name}")
|
| 94 |
self._client = OpenAI(base_url=self.base_url, api_key=self.api_key)
|
| 95 |
|
| 96 |
+
# Step 1: ensure the model is in Ollama's cache — pull via /api/pull
|
| 97 |
+
# if missing. Fails loudly if the pull itself errors (network, auth,
|
| 98 |
+
# repo/tag doesn't exist on HF, disk full, etc.).
|
| 99 |
+
self._ensure_cached()
|
| 100 |
+
|
| 101 |
+
# Step 2: 1-token probe via chat/completions to confirm the model
|
| 102 |
+
# actually serves. Raises on any failure — no more silent-empty.
|
| 103 |
self._probe()
|
| 104 |
|
| 105 |
self._cache = SampleCache(config)
|
| 106 |
|
| 107 |
+
def _ensure_cached(self) -> None:
|
| 108 |
+
"""Make sure `self.model_name` is present in Ollama's local cache.
|
| 109 |
+
|
| 110 |
+
Ollama's /v1/chat/completions doesn't lazy-pull — it 404s on unknown
|
| 111 |
+
models. So we explicitly call /api/pull first. If the model is
|
| 112 |
+
already cached this is a no-op (Ollama responds immediately).
|
| 113 |
+
"""
|
| 114 |
+
import urllib.request
|
| 115 |
+
import urllib.error
|
| 116 |
+
import json as _json
|
| 117 |
+
|
| 118 |
+
# Check cached models via /api/tags — cheap fast call.
|
| 119 |
+
try:
|
| 120 |
+
with urllib.request.urlopen(f"{self.ollama_native_url}/api/tags", timeout=10) as r:
|
| 121 |
+
tags = _json.loads(r.read().decode())
|
| 122 |
+
cached = {m.get("name") or m.get("model") for m in tags.get("models", [])}
|
| 123 |
+
if self.model_name in cached:
|
| 124 |
+
print(f"[gguf_wrapper] already cached: {self.model_name}")
|
| 125 |
+
return
|
| 126 |
+
except Exception as e:
|
| 127 |
+
print(f"[gguf_wrapper] /api/tags check failed ({type(e).__name__}: {e}), "
|
| 128 |
+
f"attempting pull anyway", file=sys.stderr)
|
| 129 |
+
|
| 130 |
+
# Not cached — pull via /api/pull. This is a streaming endpoint;
|
| 131 |
+
# we read the chunks and watch for a final success/error.
|
| 132 |
+
print(f"[gguf_wrapper] pulling {self.model_name} (not cached)...")
|
| 133 |
+
req = urllib.request.Request(
|
| 134 |
+
f"{self.ollama_native_url}/api/pull",
|
| 135 |
+
data=_json.dumps({"name": self.model_name, "stream": True}).encode(),
|
| 136 |
+
headers={"Content-Type": "application/json"},
|
| 137 |
+
)
|
| 138 |
+
try:
|
| 139 |
+
with urllib.request.urlopen(req, timeout=3600) as resp:
|
| 140 |
+
last_status = None
|
| 141 |
+
for line in resp:
|
| 142 |
+
try:
|
| 143 |
+
evt = _json.loads(line)
|
| 144 |
+
except _json.JSONDecodeError:
|
| 145 |
+
continue
|
| 146 |
+
if "error" in evt:
|
| 147 |
+
raise RuntimeError(
|
| 148 |
+
f"Ollama pull failed: {evt['error']}"
|
| 149 |
+
)
|
| 150 |
+
status = evt.get("status")
|
| 151 |
+
if status and status != last_status:
|
| 152 |
+
print(f"[gguf_wrapper] {status}")
|
| 153 |
+
last_status = status
|
| 154 |
+
except urllib.error.HTTPError as e:
|
| 155 |
+
raise RuntimeError(
|
| 156 |
+
f"Ollama /api/pull returned HTTP {e.code}: {e.read().decode()[:200]}"
|
| 157 |
+
) from e
|
| 158 |
+
print(f"[gguf_wrapper] pull complete: {self.model_name}")
|
| 159 |
+
|
| 160 |
def _probe(self) -> None:
|
| 161 |
+
"""1-token sanity check. Raises on any failure — no silent empty results.
|
| 162 |
+
|
| 163 |
+
If this wrapper is going to produce canon rows, the model MUST be
|
| 164 |
+
able to generate text. A failing probe means something upstream is
|
| 165 |
+
broken (Ollama down, model missing, chat template borked) and the
|
| 166 |
+
right response is to halt the run, not write empty ?-rows.
|
| 167 |
+
"""
|
| 168 |
try:
|
| 169 |
+
resp = self._client.chat.completions.create(
|
| 170 |
model=self.model_name,
|
| 171 |
messages=[{"role": "user", "content": "ping"}],
|
| 172 |
max_tokens=1,
|
| 173 |
temperature=0.0,
|
| 174 |
)
|
|
|
|
| 175 |
except Exception as e:
|
| 176 |
+
raise RuntimeError(
|
| 177 |
+
f"[gguf_wrapper] probe FAILED for {self.model_name}: "
|
| 178 |
+
f"{type(e).__name__}: {e}"
|
| 179 |
+
) from e
|
| 180 |
+
content = resp.choices[0].message.content or ""
|
| 181 |
+
if not content.strip():
|
| 182 |
+
raise RuntimeError(
|
| 183 |
+
f"[gguf_wrapper] probe returned empty content for {self.model_name} "
|
| 184 |
+
f"— model is reachable but not generating. Check chat template / quant integrity."
|
| 185 |
+
)
|
| 186 |
+
print(f"[gguf_wrapper] probe OK ({len(content)} chars returned)")
|
| 187 |
|
| 188 |
@property
|
| 189 |
def tokenizer(self):
|