File size: 11,557 Bytes
ec7f6ce de7952b ec7f6ce de7952b ec7f6ce de7952b ec7f6ce de7952b ec7f6ce de7952b ec7f6ce de7952b ec7f6ce de7952b ec7f6ce de7952b ec7f6ce | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 | # SPDX-License-Identifier: EUPL-1.2
"""Lighteval custom model backend — routes inference through an Ollama
(or any OpenAI-compatible) server hosting GGUF models.
Pairs with mlx_lm_wrapper.py in the same directory. eval.py picks which
wrapper to point lighteval at based on the target's `type` field
(`mlx` → mlx_lm_wrapper.py, `gguf` → this file).
Usage (CLI, via eval.py):
eval.py automatically points lighteval at this file when the target
type is `gguf`.
Usage (lighteval direct):
lighteval custom \\
"hf.co/lthn/lemer:Q4_K_M" \\
/path/to/gguf_wrapper.py \\
"mmlu_pro" \\
--max-samples 10
Endpoint configuration (env vars, both optional):
LEM_OLLAMA_URL default: http://localhost:11434/v1
LEM_OLLAMA_API_KEY default: "ollama" (Ollama ignores the value)
config.model_name resolution:
Passed straight through as the `model` parameter to the OpenAI chat
completions endpoint. Use whatever string Ollama expects — for example:
hf.co/lthn/lemer:Q4_K_M
hf.co/lthn/lemmy:Q4_K_M
gemma3:27b
Ollama will lazy-pull models it doesn't have yet on first request.
The wrapper sends a 1-token probe during __init__ to surface pull
failures early rather than mid-eval.
Sampling policy (identical to mlx_lm_wrapper.py):
Google-calibrated Gemma 4: temp=1.0, top_p=0.95, top_k=64. enable_thinking
is signalled via `think: true` in the OpenAI extra_body — Ollama >= 0.3
with a thinking-capable model will respect this; older versions ignore
it, so our fork's LEK'd models still want the chat template to set the
think-mode anchor server-side (which Ollama modelfiles can do).
"""
import os
import sys
from typing import List
from lighteval.models.abstract_model import LightevalModel
from lighteval.models.model_output import ModelResponse
from lighteval.tasks.requests import Doc
from lighteval.utils.cache_management import SampleCache
DEFAULT_TEMPERATURE = 1.0
DEFAULT_TOP_P = 0.95
DEFAULT_TOP_K = 64
DEFAULT_MAX_TOKENS = 4096 # CoT can run long with enable_thinking=True
class GGUFOllamaModel(LightevalModel):
"""Lighteval custom backend that runs inference via an OpenAI-compatible
server (Ollama by default) hosting GGUF quants.
The `config.model_name` field is used as the `model` parameter of the
chat completions request — so it must be a string Ollama recognises,
typically `hf.co/<org>/<repo>:<quant>` or a built-in `family:tag`.
"""
def __init__(self, config) -> None:
self.config = config
self.model_name = config.model_name
# The OpenAI-compat endpoint (chat/completions). Used for inference.
self.base_url = os.environ.get("LEM_OLLAMA_URL", "http://localhost:11434/v1")
# The Ollama-native endpoint (/api/pull, /api/tags). Used to ensure
# the model is cached BEFORE we start inferring — Ollama's
# /v1/chat/completions endpoint does NOT lazy-pull, it just 404s.
self.ollama_native_url = os.environ.get(
"LEM_OLLAMA_NATIVE_URL",
self.base_url.replace("/v1", ""),
)
self.api_key = os.environ.get("LEM_OLLAMA_API_KEY", "ollama")
try:
from openai import OpenAI
except ImportError as e:
raise ImportError(
"gguf_wrapper requires the `openai` package. Add it to PEP 723 "
"dependencies in eval.py or install with `uv pip install openai`."
) from e
print(f"[gguf_wrapper] Ollama OpenAI endpoint: {self.base_url}")
print(f"[gguf_wrapper] Ollama native endpoint: {self.ollama_native_url}")
print(f"[gguf_wrapper] model: {self.model_name}")
self._client = OpenAI(base_url=self.base_url, api_key=self.api_key)
# Step 1: ensure the model is in Ollama's cache — pull via /api/pull
# if missing. Fails loudly if the pull itself errors (network, auth,
# repo/tag doesn't exist on HF, disk full, etc.).
self._ensure_cached()
# Step 2: 1-token probe via chat/completions to confirm the model
# actually serves. Raises on any failure — no more silent-empty.
self._probe()
self._cache = SampleCache(config)
def _ensure_cached(self) -> None:
"""Make sure `self.model_name` is present in Ollama's local cache.
Ollama's /v1/chat/completions doesn't lazy-pull — it 404s on unknown
models. So we explicitly call /api/pull first. If the model is
already cached this is a no-op (Ollama responds immediately).
"""
import urllib.request
import urllib.error
import json as _json
# Check cached models via /api/tags — cheap fast call.
try:
with urllib.request.urlopen(f"{self.ollama_native_url}/api/tags", timeout=10) as r:
tags = _json.loads(r.read().decode())
cached = {m.get("name") or m.get("model") for m in tags.get("models", [])}
if self.model_name in cached:
print(f"[gguf_wrapper] already cached: {self.model_name}")
return
except Exception as e:
print(f"[gguf_wrapper] /api/tags check failed ({type(e).__name__}: {e}), "
f"attempting pull anyway", file=sys.stderr)
# Not cached — pull via /api/pull. This is a streaming endpoint;
# we read the chunks and watch for a final success/error.
print(f"[gguf_wrapper] pulling {self.model_name} (not cached)...")
req = urllib.request.Request(
f"{self.ollama_native_url}/api/pull",
data=_json.dumps({"name": self.model_name, "stream": True}).encode(),
headers={"Content-Type": "application/json"},
)
try:
with urllib.request.urlopen(req, timeout=3600) as resp:
last_status = None
for line in resp:
try:
evt = _json.loads(line)
except _json.JSONDecodeError:
continue
if "error" in evt:
raise RuntimeError(
f"Ollama pull failed: {evt['error']}"
)
status = evt.get("status")
if status and status != last_status:
print(f"[gguf_wrapper] {status}")
last_status = status
except urllib.error.HTTPError as e:
raise RuntimeError(
f"Ollama /api/pull returned HTTP {e.code}: {e.read().decode()[:200]}"
) from e
print(f"[gguf_wrapper] pull complete: {self.model_name}")
def _probe(self) -> None:
"""1-token sanity check. Raises on any failure — no silent empty results.
If this wrapper is going to produce canon rows, the model MUST be
able to generate text. A failing probe means something upstream is
broken (Ollama down, model missing, chat template borked) and the
right response is to halt the run, not write empty ?-rows.
"""
try:
resp = self._client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": "ping"}],
max_tokens=1,
temperature=0.0,
)
except Exception as e:
raise RuntimeError(
f"[gguf_wrapper] probe FAILED for {self.model_name}: "
f"{type(e).__name__}: {e}"
) from e
content = resp.choices[0].message.content or ""
if not content.strip():
raise RuntimeError(
f"[gguf_wrapper] probe returned empty content for {self.model_name} "
f"— model is reachable but not generating. Check chat template / quant integrity."
)
print(f"[gguf_wrapper] probe OK ({len(content)} chars returned)")
@property
def tokenizer(self):
# OpenAI-compatible endpoints don't expose a tokenizer. Lighteval's
# generative path doesn't call this when the metric is extractive
# regex-based (our case), so it's safe to return None and let
# something downstream complain if it actually needs one.
return None
def tok_encode(self, text: str):
# Not available via the OpenAI API. Return an empty list so
# lighteval's length-accounting code doesn't crash.
return []
@property
def add_special_tokens(self) -> bool:
return False
@property
def max_length(self) -> int:
# Gemma 4 E2B/E4B: 128K; 26B MoE / 31B: 256K. Generous default.
return 131072
def greedy_until(self, requests: List[Doc]) -> List[ModelResponse]:
"""Generate text responses via Ollama chat completions.
Despite the name, this is sampling (not greedy) — matches
mlx_lm_wrapper. Each request can ask for num_samples > 1; we send
one request per sample so the server's PRNG produces independent
outputs.
"""
results: List[ModelResponse] = []
for r in requests:
max_tokens = r.generation_size or DEFAULT_MAX_TOKENS
n_samples = getattr(r, "num_samples", 1) or 1
messages = [{"role": "user", "content": r.query}]
samples: List[str] = []
for i in range(n_samples):
try:
resp = self._client.chat.completions.create(
model=self.model_name,
messages=messages,
max_tokens=max_tokens,
temperature=DEFAULT_TEMPERATURE,
top_p=DEFAULT_TOP_P,
# top_k and think flags aren't in OpenAI's standard
# chat schema — pass through extra_body so Ollama
# can consume them. Ollama >= 0.3 respects `think`
# for thinking-capable models; earlier versions
# ignore unknown extras silently.
extra_body={
"top_k": DEFAULT_TOP_K,
"think": True,
},
)
text = resp.choices[0].message.content or ""
except Exception as e:
print(f"[gguf_wrapper] sample {i + 1}/{n_samples} failed: "
f"{type(e).__name__}: {e}", file=sys.stderr)
text = ""
samples.append(text)
results.append(ModelResponse(
text=samples,
input_tokens=[],
output_tokens=[[] for _ in samples],
reasonings=[None for _ in samples],
logprobs=[],
argmax_logits_eq_gold=[],
))
return results
def loglikelihood(self, requests):
raise NotImplementedError(
"gguf_wrapper does not implement loglikelihood. "
"Ollama's OpenAI endpoint doesn't expose per-token logprobs "
"suitable for multiple-choice loglikelihood scoring. Use a "
"generative task variant with num_samples + maj_at_k."
)
def loglikelihood_rolling(self, requests):
raise NotImplementedError(
"gguf_wrapper does not implement loglikelihood_rolling. "
"Perplexity-based metrics are not in our eval set."
)
|