| |
| """Lighteval custom model backend — routes inference through an Ollama |
| (or any OpenAI-compatible) server hosting GGUF models. |
| |
| Pairs with mlx_lm_wrapper.py in the same directory. eval.py picks which |
| wrapper to point lighteval at based on the target's `type` field |
| (`mlx` → mlx_lm_wrapper.py, `gguf` → this file). |
| |
| Usage (CLI, via eval.py): |
| eval.py automatically points lighteval at this file when the target |
| type is `gguf`. |
| |
| Usage (lighteval direct): |
| lighteval custom \\ |
| "hf.co/lthn/lemer:Q4_K_M" \\ |
| /path/to/gguf_wrapper.py \\ |
| "mmlu_pro" \\ |
| --max-samples 10 |
| |
| Endpoint configuration (env vars, both optional): |
| LEM_OLLAMA_URL default: http://localhost:11434/v1 |
| LEM_OLLAMA_API_KEY default: "ollama" (Ollama ignores the value) |
| |
| config.model_name resolution: |
| Passed straight through as the `model` parameter to the OpenAI chat |
| completions endpoint. Use whatever string Ollama expects — for example: |
| hf.co/lthn/lemer:Q4_K_M |
| hf.co/lthn/lemmy:Q4_K_M |
| gemma3:27b |
| |
| Ollama will lazy-pull models it doesn't have yet on first request. |
| The wrapper sends a 1-token probe during __init__ to surface pull |
| failures early rather than mid-eval. |
| |
| Sampling policy (identical to mlx_lm_wrapper.py): |
| Google-calibrated Gemma 4: temp=1.0, top_p=0.95, top_k=64. enable_thinking |
| is signalled via `think: true` in the OpenAI extra_body — Ollama >= 0.3 |
| with a thinking-capable model will respect this; older versions ignore |
| it, so our fork's LEK'd models still want the chat template to set the |
| think-mode anchor server-side (which Ollama modelfiles can do). |
| """ |
|
|
| import os |
| import sys |
| from typing import List |
|
|
| from lighteval.models.abstract_model import LightevalModel |
| from lighteval.models.model_output import ModelResponse |
| from lighteval.tasks.requests import Doc |
| from lighteval.utils.cache_management import SampleCache |
|
|
|
|
| DEFAULT_TEMPERATURE = 1.0 |
| DEFAULT_TOP_P = 0.95 |
| DEFAULT_TOP_K = 64 |
| DEFAULT_MAX_TOKENS = 4096 |
|
|
|
|
| class GGUFOllamaModel(LightevalModel): |
| """Lighteval custom backend that runs inference via an OpenAI-compatible |
| server (Ollama by default) hosting GGUF quants. |
| |
| The `config.model_name` field is used as the `model` parameter of the |
| chat completions request — so it must be a string Ollama recognises, |
| typically `hf.co/<org>/<repo>:<quant>` or a built-in `family:tag`. |
| """ |
|
|
| def __init__(self, config) -> None: |
| self.config = config |
| self.model_name = config.model_name |
|
|
| |
| self.base_url = os.environ.get("LEM_OLLAMA_URL", "http://localhost:11434/v1") |
| |
| |
| |
| self.ollama_native_url = os.environ.get( |
| "LEM_OLLAMA_NATIVE_URL", |
| self.base_url.replace("/v1", ""), |
| ) |
| self.api_key = os.environ.get("LEM_OLLAMA_API_KEY", "ollama") |
|
|
| try: |
| from openai import OpenAI |
| except ImportError as e: |
| raise ImportError( |
| "gguf_wrapper requires the `openai` package. Add it to PEP 723 " |
| "dependencies in eval.py or install with `uv pip install openai`." |
| ) from e |
|
|
| print(f"[gguf_wrapper] Ollama OpenAI endpoint: {self.base_url}") |
| print(f"[gguf_wrapper] Ollama native endpoint: {self.ollama_native_url}") |
| print(f"[gguf_wrapper] model: {self.model_name}") |
| self._client = OpenAI(base_url=self.base_url, api_key=self.api_key) |
|
|
| |
| |
| |
| self._ensure_cached() |
|
|
| |
| |
| self._probe() |
|
|
| self._cache = SampleCache(config) |
|
|
| def _ensure_cached(self) -> None: |
| """Make sure `self.model_name` is present in Ollama's local cache. |
| |
| Ollama's /v1/chat/completions doesn't lazy-pull — it 404s on unknown |
| models. So we explicitly call /api/pull first. If the model is |
| already cached this is a no-op (Ollama responds immediately). |
| """ |
| import urllib.request |
| import urllib.error |
| import json as _json |
|
|
| |
| try: |
| with urllib.request.urlopen(f"{self.ollama_native_url}/api/tags", timeout=10) as r: |
| tags = _json.loads(r.read().decode()) |
| cached = {m.get("name") or m.get("model") for m in tags.get("models", [])} |
| if self.model_name in cached: |
| print(f"[gguf_wrapper] already cached: {self.model_name}") |
| return |
| except Exception as e: |
| print(f"[gguf_wrapper] /api/tags check failed ({type(e).__name__}: {e}), " |
| f"attempting pull anyway", file=sys.stderr) |
|
|
| |
| |
| print(f"[gguf_wrapper] pulling {self.model_name} (not cached)...") |
| req = urllib.request.Request( |
| f"{self.ollama_native_url}/api/pull", |
| data=_json.dumps({"name": self.model_name, "stream": True}).encode(), |
| headers={"Content-Type": "application/json"}, |
| ) |
| try: |
| with urllib.request.urlopen(req, timeout=3600) as resp: |
| last_status = None |
| for line in resp: |
| try: |
| evt = _json.loads(line) |
| except _json.JSONDecodeError: |
| continue |
| if "error" in evt: |
| raise RuntimeError( |
| f"Ollama pull failed: {evt['error']}" |
| ) |
| status = evt.get("status") |
| if status and status != last_status: |
| print(f"[gguf_wrapper] {status}") |
| last_status = status |
| except urllib.error.HTTPError as e: |
| raise RuntimeError( |
| f"Ollama /api/pull returned HTTP {e.code}: {e.read().decode()[:200]}" |
| ) from e |
| print(f"[gguf_wrapper] pull complete: {self.model_name}") |
|
|
| def _probe(self) -> None: |
| """1-token sanity check. Raises on any failure — no silent empty results. |
| |
| If this wrapper is going to produce canon rows, the model MUST be |
| able to generate text. A failing probe means something upstream is |
| broken (Ollama down, model missing, chat template borked) and the |
| right response is to halt the run, not write empty ?-rows. |
| """ |
| try: |
| resp = self._client.chat.completions.create( |
| model=self.model_name, |
| messages=[{"role": "user", "content": "ping"}], |
| max_tokens=1, |
| temperature=0.0, |
| ) |
| except Exception as e: |
| raise RuntimeError( |
| f"[gguf_wrapper] probe FAILED for {self.model_name}: " |
| f"{type(e).__name__}: {e}" |
| ) from e |
| content = resp.choices[0].message.content or "" |
| if not content.strip(): |
| raise RuntimeError( |
| f"[gguf_wrapper] probe returned empty content for {self.model_name} " |
| f"— model is reachable but not generating. Check chat template / quant integrity." |
| ) |
| print(f"[gguf_wrapper] probe OK ({len(content)} chars returned)") |
|
|
| @property |
| def tokenizer(self): |
| |
| |
| |
| |
| return None |
|
|
| def tok_encode(self, text: str): |
| |
| |
| return [] |
|
|
| @property |
| def add_special_tokens(self) -> bool: |
| return False |
|
|
| @property |
| def max_length(self) -> int: |
| |
| return 131072 |
|
|
| def greedy_until(self, requests: List[Doc]) -> List[ModelResponse]: |
| """Generate text responses via Ollama chat completions. |
| |
| Despite the name, this is sampling (not greedy) — matches |
| mlx_lm_wrapper. Each request can ask for num_samples > 1; we send |
| one request per sample so the server's PRNG produces independent |
| outputs. |
| """ |
| results: List[ModelResponse] = [] |
|
|
| for r in requests: |
| max_tokens = r.generation_size or DEFAULT_MAX_TOKENS |
| n_samples = getattr(r, "num_samples", 1) or 1 |
|
|
| messages = [{"role": "user", "content": r.query}] |
|
|
| samples: List[str] = [] |
| for i in range(n_samples): |
| try: |
| resp = self._client.chat.completions.create( |
| model=self.model_name, |
| messages=messages, |
| max_tokens=max_tokens, |
| temperature=DEFAULT_TEMPERATURE, |
| top_p=DEFAULT_TOP_P, |
| |
| |
| |
| |
| |
| extra_body={ |
| "top_k": DEFAULT_TOP_K, |
| "think": True, |
| }, |
| ) |
| text = resp.choices[0].message.content or "" |
| except Exception as e: |
| print(f"[gguf_wrapper] sample {i + 1}/{n_samples} failed: " |
| f"{type(e).__name__}: {e}", file=sys.stderr) |
| text = "" |
| samples.append(text) |
|
|
| results.append(ModelResponse( |
| text=samples, |
| input_tokens=[], |
| output_tokens=[[] for _ in samples], |
| reasonings=[None for _ in samples], |
| logprobs=[], |
| argmax_logits_eq_gold=[], |
| )) |
|
|
| return results |
|
|
| def loglikelihood(self, requests): |
| raise NotImplementedError( |
| "gguf_wrapper does not implement loglikelihood. " |
| "Ollama's OpenAI endpoint doesn't expose per-token logprobs " |
| "suitable for multiple-choice loglikelihood scoring. Use a " |
| "generative task variant with num_samples + maj_at_k." |
| ) |
|
|
| def loglikelihood_rolling(self, requests): |
| raise NotImplementedError( |
| "gguf_wrapper does not implement loglikelihood_rolling. " |
| "Perplexity-based metrics are not in our eval set." |
| ) |
|
|