| | |
| | """ |
| | Polish LLM Leaderboard evaluation for QuIP# Bielik-Q2-Sharp Variant A. |
| | |
| | Custom wrapper that loads QuIP# model via quip-sharp and runs eval |
| | through speakleash/lm-evaluation-harness (polish3 branch). |
| | |
| | Task groups: |
| | - polish_generate_few (5-shot generative: polemo2, 8tags, cbd, ppc, psc) |
| | - polish_mc (5-shot multiple choice variants) |
| | |
| | Usage: |
| | python eval_polish_quip.py \ |
| | --model_path /dev/shm/eval/model \ |
| | --tokenizer speakleash/Bielik-11B-v2.3-Instruct \ |
| | --output_dir /dev/shm/eval/results_a \ |
| | --num_fewshot 5 |
| | """ |
| | import sys |
| | import os |
| | import json |
| | import time |
| | import argparse |
| |
|
| | |
| | QUIP_DIR = os.environ.get('QUIP_DIR', '/dev/shm/eval/quip-sharp') |
| | sys.path.insert(0, QUIP_DIR) |
| |
|
| | import torch |
| |
|
| | |
| | _orig_load = torch.load |
| | def _compat_load(*a, **kw): |
| | kw.setdefault('weights_only', False) |
| | return _orig_load(*a, **kw) |
| | torch.load = _compat_load |
| | torch.set_grad_enabled(False) |
| |
|
| | import numpy as np |
| | from transformers import AutoTokenizer |
| |
|
| | |
| | from lib.utils.unsafe_import import model_from_hf_path |
| |
|
| | |
| | import lm_eval |
| | from lm_eval import evaluator |
| |
|
| | |
| | try: |
| | from lm_eval.api.model import LM as BaseLMClass |
| | API_VERSION = "new" |
| | except ImportError: |
| | from lm_eval.base import BaseLM as BaseLMClass |
| | API_VERSION = "old" |
| |
|
| |
|
| | def log(msg): |
| | print(f"[{time.strftime('%H:%M:%S')}] {msg}", flush=True) |
| |
|
| |
|
| | class QuIPSharpLM(BaseLMClass): |
| | """ |
| | lm-eval compatible wrapper for QuIP# quantized models. |
| | |
| | Supports both old (BaseLM) and new (LM) lm-eval APIs. |
| | Old API: implements _model_call / _model_generate (batching handled by BaseLM). |
| | New API: implements loglikelihood / loglikelihood_rolling / generate_until directly. |
| | """ |
| |
|
| | def __init__(self, model_path, tokenizer_path, batch_size=1, max_length=2048): |
| | super().__init__() |
| | log(f"Loading QuIP# model from {model_path}...") |
| | t0 = time.time() |
| | self._model, _ = model_from_hf_path(model_path, use_cuda_graph=False) |
| | self._model.eval() |
| | log(f"Model loaded in {time.time()-t0:.1f}s") |
| |
|
| | self._tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) |
| | if self._tokenizer.pad_token is None: |
| | self._tokenizer.pad_token = self._tokenizer.eos_token |
| | log(f"Tokenizer: {tokenizer_path} (vocab={self._tokenizer.vocab_size})") |
| |
|
| | self._batch_size = batch_size |
| | self._max_length = max_length |
| | self._device = torch.device("cuda") |
| |
|
| | |
| | @property |
| | def eot_token_id(self): |
| | return self._tokenizer.eos_token_id |
| |
|
| | @property |
| | def max_length(self): |
| | return self._max_length |
| |
|
| | @property |
| | def max_gen_toks(self): |
| | return 64 |
| |
|
| | @property |
| | def batch_size(self): |
| | return self._batch_size |
| |
|
| | @property |
| | def device(self): |
| | return self._device |
| |
|
| | @property |
| | def rank(self): |
| | return 0 |
| |
|
| | @property |
| | def world_size(self): |
| | return 1 |
| |
|
| | @property |
| | def tokenizer_name(self): |
| | return self._tokenizer.name_or_path |
| |
|
| | def tok_encode(self, string, **kwargs): |
| | return self._tokenizer.encode(string, add_special_tokens=False) |
| |
|
| | def tok_decode(self, tokens, **kwargs): |
| | return self._tokenizer.decode(tokens) |
| |
|
| | |
| | def _model_call(self, inps): |
| | """Forward pass — used by BaseLM for loglikelihood.""" |
| | with torch.no_grad(): |
| | return self._model(inps.to(self._device)).logits |
| |
|
| | def _model_generate(self, context, max_length, eos_token_id): |
| | """Generate — used by BaseLM for generate_until.""" |
| | with torch.no_grad(): |
| | return self._model.generate( |
| | context.to(self._device), |
| | max_length=max_length, |
| | eos_token_id=eos_token_id, |
| | do_sample=False, |
| | ) |
| |
|
| | |
| | def _encode_pair(self, ctx, cont): |
| | """Encode context+continuation, return (full_tokens, cont_length).""" |
| | ctx_enc = self._tokenizer.encode(ctx, add_special_tokens=False) |
| | cont_enc = self._tokenizer.encode(cont, add_special_tokens=False) |
| | full = ctx_enc + cont_enc |
| | if len(full) > self._max_length: |
| | full = full[-self._max_length:] |
| | cont_len = min(len(cont_enc), len(full)) |
| | else: |
| | cont_len = len(cont_enc) |
| | return full, cont_len |
| |
|
| | def loglikelihood(self, requests): |
| | """Compute log-likelihood with length-sorted batching for speed.""" |
| | if API_VERSION == "old": |
| | return super().loglikelihood(requests) |
| |
|
| | |
| | encoded = [] |
| | for req in requests: |
| | ctx, cont = req.args if hasattr(req, 'args') else req |
| | full, cont_len = self._encode_pair(ctx, cont) |
| | encoded.append((full, cont_len)) |
| |
|
| | total = len(encoded) |
| | results = [None] * total |
| | bs = max(self._batch_size, 8) |
| | pad_id = self._tokenizer.pad_token_id or 0 |
| |
|
| | |
| | sorted_indices = sorted(range(total), key=lambda i: len(encoded[i][0])) |
| |
|
| | log(f" loglikelihood: {total} requests, batch_size={bs} (length-sorted)") |
| | lens = [len(encoded[i][0]) for i in sorted_indices] |
| | log(f" sequence lengths: min={lens[0]}, max={lens[-1]}, " |
| | f"median={lens[len(lens)//2]}") |
| | t0 = time.time() |
| | processed = 0 |
| |
|
| | for batch_start in range(0, total, bs): |
| | batch_end = min(batch_start + bs, total) |
| | batch_indices = sorted_indices[batch_start:batch_end] |
| | batch = [encoded[i] for i in batch_indices] |
| |
|
| | |
| | max_len = len(batch[-1][0]) |
| |
|
| | input_ids = torch.full( |
| | (len(batch), max_len), pad_id, |
| | dtype=torch.long, device=self._device |
| | ) |
| | attention_mask = torch.zeros( |
| | (len(batch), max_len), |
| | dtype=torch.long, device=self._device |
| | ) |
| |
|
| | for i, (tokens, _) in enumerate(batch): |
| | |
| | offset = max_len - len(tokens) |
| | input_ids[i, offset:] = torch.tensor(tokens, dtype=torch.long) |
| | attention_mask[i, offset:] = 1 |
| |
|
| | with torch.no_grad(): |
| | logits = self._model( |
| | input_ids, attention_mask=attention_mask |
| | ).logits |
| |
|
| | |
| | for i, (tokens, cont_len) in enumerate(batch): |
| | offset = max_len - len(tokens) |
| | seq_logits = logits[i, offset:] |
| | seq_ids = input_ids[i, offset:] |
| |
|
| | shift_logits = seq_logits[:-1] |
| | shift_labels = seq_ids[1:] |
| | log_probs = torch.nn.functional.log_softmax(shift_logits, dim=-1) |
| |
|
| | cont_start = len(tokens) - cont_len - 1 |
| | if cont_start < 0: |
| | cont_start = 0 |
| |
|
| | |
| | cont_labels = shift_labels[cont_start:] |
| | cont_lps = log_probs[cont_start:] |
| | cont_log_prob = cont_lps[ |
| | torch.arange(len(cont_labels), device=self._device), |
| | cont_labels |
| | ].sum().item() |
| | is_greedy = ( |
| | shift_logits[cont_start:].argmax(dim=-1) == cont_labels |
| | ).all().item() |
| |
|
| | results[batch_indices[i]] = (cont_log_prob, is_greedy) |
| |
|
| | processed += len(batch) |
| | if processed % (bs * 50) < bs: |
| | elapsed = time.time() - t0 |
| | speed = processed / elapsed |
| | eta = (total - processed) / speed if speed > 0 else 0 |
| | log(f" loglikelihood: {processed}/{total} " |
| | f"({speed:.1f} req/s, ETA {eta/60:.1f}min)") |
| |
|
| | elapsed = time.time() - t0 |
| | log(f" loglikelihood done: {total} in {elapsed:.0f}s " |
| | f"({total/elapsed:.1f} req/s)") |
| | return results |
| |
|
| | def loglikelihood_rolling(self, requests): |
| | """Compute full-string log-likelihood (for perplexity).""" |
| | if API_VERSION == "old": |
| | return super().loglikelihood_rolling(requests) |
| |
|
| | results = [] |
| | for req in requests: |
| | text = req.args[0] if hasattr(req, 'args') else req[0] |
| | enc = self._tokenizer.encode(text, add_special_tokens=False) |
| | if len(enc) > self._max_length: |
| | enc = enc[-self._max_length:] |
| |
|
| | inp = torch.tensor([enc], device=self._device) |
| | with torch.no_grad(): |
| | logits = self._model(inp).logits |
| |
|
| | shift_logits = logits[0, :-1] |
| | shift_labels = inp[0, 1:] |
| | log_probs = torch.nn.functional.log_softmax(shift_logits, dim=-1) |
| | total_lp = sum( |
| | log_probs[i, shift_labels[i]].item() |
| | for i in range(len(shift_labels)) |
| | ) |
| | results.append(total_lp) |
| | return results |
| |
|
| | def generate_until(self, requests): |
| | """Generate text with batched inference for speed.""" |
| | if API_VERSION == "old": |
| | return super().generate_until(requests) |
| |
|
| | total = len(requests) |
| | results = [None] * total |
| | bs = max(self._batch_size, 8) |
| | pad_id = self._tokenizer.pad_token_id or 0 |
| |
|
| | |
| | parsed = [] |
| | for idx, req in enumerate(requests): |
| | if hasattr(req, 'args'): |
| | ctx, gen_kwargs = req.args |
| | else: |
| | ctx, gen_kwargs = req |
| | until = gen_kwargs.get('until', [self._tokenizer.eos_token]) |
| | if '\n' not in until: |
| | until = until + ['\n'] |
| | max_gen = gen_kwargs.get('max_gen_toks', self.max_gen_toks) |
| | enc = self._tokenizer.encode(ctx, add_special_tokens=False) |
| | if len(enc) > self._max_length - max_gen: |
| | enc = enc[-(self._max_length - max_gen):] |
| | parsed.append((enc, until, max_gen)) |
| |
|
| | |
| | sorted_indices = sorted(range(total), key=lambda i: len(parsed[i][0])) |
| |
|
| | lens = [len(parsed[i][0]) for i in sorted_indices] |
| | t0 = time.time() |
| | log(f" generate_until: {total} requests, batch_size={bs} (length-sorted)") |
| | log(f" context lengths: min={lens[0]}, max={lens[-1]}, " |
| | f"median={lens[len(lens)//2]}, max_gen_toks={self.max_gen_toks}") |
| | processed = 0 |
| |
|
| | for batch_start in range(0, total, bs): |
| | batch_end = min(batch_start + bs, total) |
| | batch_indices = sorted_indices[batch_start:batch_end] |
| | batch = [parsed[i] for i in batch_indices] |
| |
|
| | |
| | max_gen = batch[0][2] |
| |
|
| | |
| | max_ctx_len = max(len(enc) for enc, _, _ in batch) |
| | input_ids = torch.full( |
| | (len(batch), max_ctx_len), pad_id, |
| | dtype=torch.long, device=self._device |
| | ) |
| | attention_mask = torch.zeros( |
| | (len(batch), max_ctx_len), |
| | dtype=torch.long, device=self._device |
| | ) |
| | ctx_lengths = [] |
| | for i, (enc, _, _) in enumerate(batch): |
| | offset = max_ctx_len - len(enc) |
| | input_ids[i, offset:] = torch.tensor(enc, dtype=torch.long) |
| | attention_mask[i, offset:] = 1 |
| | ctx_lengths.append(len(enc)) |
| |
|
| | |
| | with torch.no_grad(): |
| | out = self._model.generate( |
| | input_ids, |
| | attention_mask=attention_mask, |
| | max_new_tokens=max_gen, |
| | do_sample=False, |
| | eos_token_id=self._tokenizer.eos_token_id, |
| | ) |
| |
|
| | |
| | for i, (enc, until, _) in enumerate(batch): |
| | offset = max_ctx_len - len(enc) |
| | gen_start = max_ctx_len |
| | gen_tokens = out[i, gen_start:] |
| | text = self._tokenizer.decode(gen_tokens, skip_special_tokens=True) |
| | for stop in until: |
| | if stop in text: |
| | text = text[:text.index(stop)] |
| | results[batch_indices[i]] = text |
| |
|
| | processed += len(batch) |
| | if processed % (bs * 10) < bs: |
| | elapsed = time.time() - t0 |
| | speed = processed / elapsed * 60 |
| | eta = (total - processed) / (processed / elapsed) if processed > 0 else 0 |
| | log(f" generate_until: {processed}/{total} " |
| | f"({speed:.1f} req/min, ETA {eta/60:.1f}min)") |
| |
|
| | elapsed = time.time() - t0 |
| | log(f" generate_until done: {total} in {elapsed:.0f}s " |
| | f"({total/elapsed*60:.1f} req/min)") |
| | return results |
| |
|
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser( |
| | description="Polish LLM Leaderboard eval for QuIP# models" |
| | ) |
| | parser.add_argument('--model_path', default='/dev/shm/eval/model', |
| | help='Path to QuIP# model directory') |
| | parser.add_argument('--tokenizer', default='speakleash/Bielik-11B-v2.3-Instruct', |
| | help='Tokenizer name or path') |
| | parser.add_argument('--output_dir', default='/dev/shm/eval/results_a', |
| | help='Output directory for results') |
| | parser.add_argument('--batch_size', type=int, default=1, |
| | help='Batch size for eval') |
| | parser.add_argument('--num_fewshot', type=int, default=5, |
| | help='Number of few-shot examples') |
| | parser.add_argument('--tasks', nargs='+', |
| | default=['polish_generate_few', 'polish_mc'], |
| | help='Task groups to evaluate') |
| | args = parser.parse_args() |
| |
|
| | os.makedirs(args.output_dir, exist_ok=True) |
| |
|
| | log("=" * 60) |
| | log(" Polish LLM Leaderboard Eval") |
| | log(" Model: QuIP# Bielik-Q2-Sharp Variant A") |
| | log(f" lm-eval API: {API_VERSION}") |
| | log(f" Tasks: {args.tasks}") |
| | log(f" Few-shot: {args.num_fewshot}") |
| | log("=" * 60) |
| |
|
| | |
| | model = QuIPSharpLM( |
| | model_path=args.model_path, |
| | tokenizer_path=args.tokenizer, |
| | batch_size=args.batch_size, |
| | ) |
| |
|
| | |
| | log(f"\nRunning {len(args.tasks)} tasks...") |
| | t0 = time.time() |
| |
|
| | try: |
| | results = evaluator.simple_evaluate( |
| | model=model, |
| | tasks=args.tasks, |
| | num_fewshot=args.num_fewshot, |
| | log_samples=True, |
| | batch_size=args.batch_size, |
| | ) |
| | except TypeError as e: |
| | log(f"simple_evaluate TypeError ({e}), trying older signature...") |
| | results = evaluator.simple_evaluate( |
| | model=model, |
| | tasks=args.tasks, |
| | num_fewshot=args.num_fewshot, |
| | no_cache=True, |
| | ) |
| |
|
| | elapsed = time.time() - t0 |
| | log(f"\nAll tasks completed in {elapsed:.0f}s") |
| |
|
| | |
| | out_file = os.path.join(args.output_dir, 'full_results.json') |
| | with open(out_file, 'w') as f: |
| | json.dump(results, f, indent=2, default=str) |
| | log(f"Saved: {out_file}") |
| |
|
| | |
| | all_results = {} |
| | if 'results' in results: |
| | for task_name, metrics in results['results'].items(): |
| | log(f"\n {task_name}:") |
| | for k, v in metrics.items(): |
| | if isinstance(v, (int, float)): |
| | log(f" {k}: {v:.4f}" if isinstance(v, float) else f" {k}: {v}") |
| | all_results[task_name] = metrics |
| |
|
| | |
| | log("\n" + "=" * 60) |
| | log(" FINAL RESULTS SUMMARY") |
| | log("=" * 60) |
| | scores = [] |
| | for group, tasks_res in all_results.items(): |
| | for task_name, metrics in tasks_res.items(): |
| | |
| | for key in ['acc_norm', 'acc', 'f1', 'exact_match']: |
| | if key in metrics: |
| | val = metrics[key] |
| | if isinstance(val, (int, float)): |
| | scores.append((task_name, key, val)) |
| | log(f" {task_name}: {key}={val:.4f}") |
| | break |
| |
|
| | if scores: |
| | avg = np.mean([s[2] for s in scores]) |
| | log(f"\n Average score: {avg:.4f} ({avg*100:.2f}%)") |
| | log(f" Baseline (IQ2_XXS): 61.34%") |
| | log(f" FP16 Instruct: 65.71%") |
| | if avg * 100 > 61.34: |
| | log(f" >>> BEATS BASELINE by {avg*100 - 61.34:.2f}pp <<<") |
| | else: |
| | log(f" >>> Below baseline by {61.34 - avg*100:.2f}pp <<<") |
| |
|
| | log("=" * 60) |
| | log(" EVALUATION COMPLETE") |
| | log("=" * 60) |
| |
|
| |
|
| | if __name__ == '__main__': |
| | main() |
| |
|