| |
| """Lighteval custom model backend — routes inference through mlx_lm. |
| |
| Usage (CLI): |
| lighteval custom \\ |
| "lthn/lemer" \\ |
| /Users/snider/Code/host-uk/core/.lek/lighteval_wrappers/mlx_lm_wrapper.py \\ |
| "mmlu_pro" \\ |
| --max-samples 10 |
| |
| Usage (Python API): |
| from lighteval.models.custom.custom_model import CustomModelConfig |
| config = CustomModelConfig( |
| model_name="lthn/lemer", |
| model_definition_file_path=".lek/lighteval_wrappers/mlx_lm_wrapper.py", |
| ) |
| # Pipeline uses config.model_name as the HF repo ID passed to mlx_lm.load. |
| |
| Design notes: |
| - Implements only ``greedy_until`` (generative). ``loglikelihood`` and |
| ``loglikelihood_rolling`` raise NotImplementedError — generative tasks |
| like MMLU-Pro (in lighteval's default config) don't use them. |
| - Uses Google's calibrated Gemma 4 sampling by default: temp=1.0, top_p=0.95, |
| top_k=64. See reference_gemma4_sampling.md in the cladius memory. |
| - Supports ``num_samples > 1`` per Doc by generating N independent samples |
| with the same sampler. 8-PAC methodology sets num_samples=8 at the task |
| config level (via a task variant) and reads the resulting list of samples |
| via ModelResponse.text. |
| - Max tokens defaults to 1024 if the Doc's ``generation_size`` is None, |
| matching Google's stock Gemma 4 example. Override by setting |
| ``generation_size`` on the task config. |
| """ |
|
|
| from typing import List |
|
|
| from lighteval.data import GenerativeTaskDataset |
| from lighteval.models.abstract_model import LightevalModel |
| from lighteval.models.model_output import ModelResponse |
| from lighteval.tasks.requests import Doc |
| from lighteval.utils.cache_management import SampleCache |
|
|
| from mlx_lm import load as mlx_load, generate as mlx_generate |
| from mlx_lm.sample_utils import make_sampler |
|
|
|
|
| |
| |
| |
| DEFAULT_TEMPERATURE = 1.0 |
| DEFAULT_TOP_P = 0.95 |
| DEFAULT_TOP_K = 64 |
| DEFAULT_MAX_TOKENS = 4096 |
|
|
|
|
| class MLXLMModel(LightevalModel): |
| """Lighteval custom backend that runs inference via mlx_lm on Apple Silicon. |
| |
| The ``config.model_name`` field is used as the HuggingFace repo ID (or |
| local path) passed to ``mlx_lm.load``. Models must be in MLX format — |
| use mlx-community conversions or run ``mlx_vlm.convert`` on a HF |
| transformers checkpoint first. |
| """ |
|
|
| def __init__(self, config) -> None: |
| |
| |
| self.config = config |
| self.model_name = config.model_name |
| print(f"[mlx_lm_wrapper] loading model: {self.model_name}") |
| self._model, self._tokenizer = mlx_load(self.model_name) |
| print(f"[mlx_lm_wrapper] model loaded") |
|
|
| |
| self._sampler = make_sampler( |
| temp=DEFAULT_TEMPERATURE, |
| top_p=DEFAULT_TOP_P, |
| top_k=DEFAULT_TOP_K, |
| ) |
|
|
| |
| self._cache = SampleCache(config) |
|
|
| @property |
| def tokenizer(self): |
| return self._tokenizer |
|
|
| def tok_encode(self, text: str): |
| return self._tokenizer.encode(text) |
|
|
| @property |
| def add_special_tokens(self) -> bool: |
| return False |
|
|
| @property |
| def max_length(self) -> int: |
| |
| |
| return 131072 |
|
|
| def greedy_until(self, requests: List[Doc]) -> List[ModelResponse]: |
| """Generate text responses for a batch of prompts. |
| |
| Despite the name, this is NOT greedy decoding — it uses the Gemma 4 |
| calibrated sampler configured in __init__. Lighteval uses this method |
| as the entry point for all generative tasks; actual sampling strategy |
| is the model's choice. |
| |
| Note: Doc is a frozen dataclass, so we iterate requests directly |
| rather than using GenerativeTaskDataset's splits_iterator (which |
| mutates tokenized_context on each doc). Simple sequential loop; mlx_lm |
| doesn't batch anyway. |
| """ |
| results: List[ModelResponse] = [] |
|
|
| for r in requests: |
| max_tokens = r.generation_size or DEFAULT_MAX_TOKENS |
| n_samples = getattr(r, "num_samples", 1) or 1 |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| messages = [{"role": "user", "content": r.query}] |
| formatted_prompt = self._tokenizer.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True, |
| enable_thinking=True, |
| ) |
| input_tokens = self.tok_encode(formatted_prompt) |
|
|
| |
| |
| |
| |
| samples: List[str] = [] |
| for _ in range(n_samples): |
| output = mlx_generate( |
| self._model, |
| self._tokenizer, |
| prompt=formatted_prompt, |
| max_tokens=max_tokens, |
| sampler=self._sampler, |
| verbose=False, |
| ) |
| samples.append(output) |
|
|
| response = ModelResponse( |
| text=samples, |
| input_tokens=list(input_tokens), |
| output_tokens=[[] for _ in samples], |
| reasonings=[None for _ in samples], |
| logprobs=[], |
| argmax_logits_eq_gold=[], |
| ) |
| results.append(response) |
|
|
| return results |
|
|
| def loglikelihood(self, requests: List[Doc]) -> List[ModelResponse]: |
| """Not implemented — use generative metrics (e.g., maj_at_k) instead. |
| |
| For a full implementation, we would need to run a forward pass on |
| ``context + continuation`` tokens and extract per-position log-probs |
| from the logits at the continuation positions. mlx_lm's top-level |
| API doesn't expose raw logits, so this requires direct model access |
| via ``mlx_lm.utils.load``. |
| """ |
| raise NotImplementedError( |
| "mlx_lm_wrapper does not implement loglikelihood. " |
| "Use a generative task variant with num_samples and maj_at_k." |
| ) |
|
|
| def loglikelihood_rolling(self, requests: List[Doc]) -> List[ModelResponse]: |
| """Not implemented — used for perplexity metrics we don't run.""" |
| raise NotImplementedError( |
| "mlx_lm_wrapper does not implement loglikelihood_rolling. " |
| "Perplexity-based metrics are not in our eval set." |
| ) |
|
|