import sys, os, json, time, torch, argparse sys.path.insert(0, "/workspace/quip-sharp") torch.set_grad_enabled(False) from transformers import AutoTokenizer from lm_eval import evaluator from lm_eval.models.huggingface import HFLM from lib.utils.unsafe_import import model_from_hf_path MC_TASKS = [ "polemo2_in_multiple_choice", "polemo2_out_multiple_choice", "polish_8tags_multiple_choice", "polish_belebele_mc", "polish_dyk_multiple_choice", "polish_ppc_multiple_choice", "polish_psc_multiple_choice", "polish_cbd_multiple_choice", "polish_klej_ner_multiple_choice", "polish_polqa_reranking_multiple_choice", ] PPL_TASKS = ["polish_poleval2018_task3_test_10k"] BASELINES = { "polemo2_in_multiple_choice": 0.416, "polemo2_out_multiple_choice": 0.368, "polish_8tags_multiple_choice": 0.143, "polish_belebele_mc": 0.279, "polish_dyk_multiple_choice": 0.289, "polish_ppc_multiple_choice": 0.419, "polish_psc_multiple_choice": 0.466, "polish_cbd_multiple_choice": 0.149, "polish_klej_ner_multiple_choice": 0.343, "polish_polqa_reranking_multiple_choice": 0.534, } parser = argparse.ArgumentParser() parser.add_argument("--limit", type=int, default=None) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--model_path", type=str, default="/workspace/model") args = parser.parse_args() ALL_TASKS = MC_TASKS + PPL_TASKS start = time.time() lstr = str(args.limit) if args.limit else "FULL" print("=" * 70) print("Open PL LLM Leaderboard - QuIP# E8P12 2-bit Instruct") print("Batch: %d | Limit: %s" % (args.batch_size, lstr)) print("GPU: %s" % torch.cuda.get_device_name(0)) print("=" * 70) print("Loading model...") model, model_str = model_from_hf_path(args.model_path, use_cuda_graph=False, use_flash_attn=False) tokenizer = AutoTokenizer.from_pretrained(model_str) tokenizer.pad_token = tokenizer.eos_token lm = HFLM(pretrained=model, tokenizer=tokenizer, backend="causal", batch_size=args.batch_size, max_length=4096, trust_remote_code=True) ekw = dict(model=lm, tasks=ALL_TASKS, num_fewshot=5, batch_size=args.batch_size, log_samples=False) if args.limit: ekw["limit"] = args.limit print("Running eval...") results = evaluator.simple_evaluate(**ekw) elapsed = time.time() - start print("\n" + "=" * 70) print("RESULTS (5-shot, limit=%s)" % lstr) print("=" * 70) scores = {} nscores = {} for t in ALL_TASKS: if t not in results.get("results", {}): print(" %-45s MISSING" % t) continue tr = results["results"][t] score = None metric = "?" for mk in ["acc,none", "f1,none", "word_perplexity,none"]: if mk in tr: score = tr[mk] metric = mk.split(",")[0] break if score is None: continue bl = BASELINES.get(t, 0) is_ppl = t in PPL_TASKS if is_ppl: norm = None elif 0 < bl < 1.0: norm = max(0, (score - bl) / (1.0 - bl)) else: norm = max(0, score) scores[t] = score if norm is not None: nscores[t] = norm ns = "norm=%.4f" % norm if norm is not None else "" print(" %-45s %s=%.4f %s" % (t, metric, score, ns)) print("-" * 70) avg = sum(nscores.values()) / len(nscores) if nscores else 0 print(" %-45s %.4f (%.2f%%)" % ("Avg MC (normalized)", avg, avg * 100)) print("=" * 70) print("Time: %.1f min" % (elapsed / 60)) print("\nComparison:") print(" SpeakLeash IQ2_XXS = 61.34%%") print(" FP16 baseline = 65.71%%") print(" QuIP# E8P12 2-bit = %.2f%%" % (avg * 100)) os.makedirs("/workspace/eval_results", exist_ok=True) fn = "/workspace/eval_results/results_limit%s.json" % (str(args.limit) if args.limit else "full") json.dump({"avg_mc": float(avg), "scores": {k: float(v) for k,v in scores.items()}, "normalized": {k: float(v) for k,v in nscores.items()}, "full": results.get("results", {})}, open(fn, "w"), indent=2, default=str) print("Saved to %s" % fn)