LEM-Eval / mlx_lm_wrapper.py
Snider
feat: initial LEM-Eval scaffold — target-driven 8-PAC runner
45b798c
# SPDX-License-Identifier: EUPL-1.2
"""Lighteval custom model backend — routes inference through mlx_lm.
Usage (CLI):
lighteval custom \\
"lthn/lemer" \\
/Users/snider/Code/host-uk/core/.lek/lighteval_wrappers/mlx_lm_wrapper.py \\
"mmlu_pro" \\
--max-samples 10
Usage (Python API):
from lighteval.models.custom.custom_model import CustomModelConfig
config = CustomModelConfig(
model_name="lthn/lemer",
model_definition_file_path=".lek/lighteval_wrappers/mlx_lm_wrapper.py",
)
# Pipeline uses config.model_name as the HF repo ID passed to mlx_lm.load.
Design notes:
- Implements only ``greedy_until`` (generative). ``loglikelihood`` and
``loglikelihood_rolling`` raise NotImplementedError — generative tasks
like MMLU-Pro (in lighteval's default config) don't use them.
- Uses Google's calibrated Gemma 4 sampling by default: temp=1.0, top_p=0.95,
top_k=64. See reference_gemma4_sampling.md in the cladius memory.
- Supports ``num_samples > 1`` per Doc by generating N independent samples
with the same sampler. 8-PAC methodology sets num_samples=8 at the task
config level (via a task variant) and reads the resulting list of samples
via ModelResponse.text.
- Max tokens defaults to 1024 if the Doc's ``generation_size`` is None,
matching Google's stock Gemma 4 example. Override by setting
``generation_size`` on the task config.
"""
from typing import List
from lighteval.data import GenerativeTaskDataset
from lighteval.models.abstract_model import LightevalModel
from lighteval.models.model_output import ModelResponse
from lighteval.tasks.requests import Doc
from lighteval.utils.cache_management import SampleCache
from mlx_lm import load as mlx_load, generate as mlx_generate
from mlx_lm.sample_utils import make_sampler
# Google's calibrated Gemma 4 sampling — DO NOT change to greedy for eval.
# Greedy decoding measurably underperforms because Gemma 4 is trained for
# stochastic sampling; see reference_gemma4_sampling.md.
DEFAULT_TEMPERATURE = 1.0
DEFAULT_TOP_P = 0.95
DEFAULT_TOP_K = 64
DEFAULT_MAX_TOKENS = 4096 # <|think|> mode CoT on MMLU-Pro can run 2000-3500 tokens
class MLXLMModel(LightevalModel):
"""Lighteval custom backend that runs inference via mlx_lm on Apple Silicon.
The ``config.model_name`` field is used as the HuggingFace repo ID (or
local path) passed to ``mlx_lm.load``. Models must be in MLX format —
use mlx-community conversions or run ``mlx_vlm.convert`` on a HF
transformers checkpoint first.
"""
def __init__(self, config) -> None:
# LightevalModel's default __init__ takes no args (abstract base);
# don't call super().__init__(config) — it will raise TypeError.
self.config = config
self.model_name = config.model_name
print(f"[mlx_lm_wrapper] loading model: {self.model_name}")
self._model, self._tokenizer = mlx_load(self.model_name)
print(f"[mlx_lm_wrapper] model loaded")
# Build the sampler once — Google's calibrated defaults.
self._sampler = make_sampler(
temp=DEFAULT_TEMPERATURE,
top_p=DEFAULT_TOP_P,
top_k=DEFAULT_TOP_K,
)
# Lighteval's pipeline expects a _cache on the model for memoisation.
self._cache = SampleCache(config)
@property
def tokenizer(self):
return self._tokenizer
def tok_encode(self, text: str):
return self._tokenizer.encode(text)
@property
def add_special_tokens(self) -> bool:
return False
@property
def max_length(self) -> int:
# Gemma 4 E2B/E4B: 128K; 31B and 26B MoE: 256K.
# Generous default that fits all variants; tasks trim as needed.
return 131072
def greedy_until(self, requests: List[Doc]) -> List[ModelResponse]:
"""Generate text responses for a batch of prompts.
Despite the name, this is NOT greedy decoding — it uses the Gemma 4
calibrated sampler configured in __init__. Lighteval uses this method
as the entry point for all generative tasks; actual sampling strategy
is the model's choice.
Note: Doc is a frozen dataclass, so we iterate requests directly
rather than using GenerativeTaskDataset's splits_iterator (which
mutates tokenized_context on each doc). Simple sequential loop; mlx_lm
doesn't batch anyway.
"""
results: List[ModelResponse] = []
for r in requests:
max_tokens = r.generation_size or DEFAULT_MAX_TOKENS
n_samples = getattr(r, "num_samples", 1) or 1
# CRITICAL: wrap the query in Gemma 4's chat template before
# passing to mlx_lm. Without this, the model treats r.query as
# raw text continuation rather than a user turn, and produces
# degenerate completions (loops, prompt-echo, no CoT). The
# tokenizer's apply_chat_template emits the proper turn markers.
#
# enable_thinking=True is LOAD-BEARING for LEK'd models. All
# four Lemma family variants were LEK-2 trained with
# enable_thinking=True, meaning the attractor basin is
# conditioned on the <|think|> system anchor being present.
# Running with enable_thinking=False forces the model into a
# distribution it wasn't trained for — score drops, CoT style
# degrades, answer quality falls. Base Gemma 4 supports
# thinking natively too, so enabling it is fair both ways.
messages = [{"role": "user", "content": r.query}]
formatted_prompt = self._tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=True,
)
input_tokens = self.tok_encode(formatted_prompt)
# Generate N samples for this prompt. For n > 1, each sample
# is independent — mlx_lm's sampler introduces stochasticity
# via its internal PRNG state, so the same prompt produces
# different outputs each call when sampling is active.
samples: List[str] = []
for _ in range(n_samples):
output = mlx_generate(
self._model,
self._tokenizer,
prompt=formatted_prompt,
max_tokens=max_tokens,
sampler=self._sampler,
verbose=False,
)
samples.append(output)
response = ModelResponse(
text=samples,
input_tokens=list(input_tokens),
output_tokens=[[] for _ in samples],
reasonings=[None for _ in samples],
logprobs=[],
argmax_logits_eq_gold=[],
)
results.append(response)
return results
def loglikelihood(self, requests: List[Doc]) -> List[ModelResponse]:
"""Not implemented — use generative metrics (e.g., maj_at_k) instead.
For a full implementation, we would need to run a forward pass on
``context + continuation`` tokens and extract per-position log-probs
from the logits at the continuation positions. mlx_lm's top-level
API doesn't expose raw logits, so this requires direct model access
via ``mlx_lm.utils.load``.
"""
raise NotImplementedError(
"mlx_lm_wrapper does not implement loglikelihood. "
"Use a generative task variant with num_samples and maj_at_k."
)
def loglikelihood_rolling(self, requests: List[Doc]) -> List[ModelResponse]:
"""Not implemented — used for perplexity metrics we don't run."""
raise NotImplementedError(
"mlx_lm_wrapper does not implement loglikelihood_rolling. "
"Perplexity-based metrics are not in our eval set."
)