#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Hugging Face Space: LLM Benchmarking App using Gradio - Upload config.yaml and dataset.jsonl - Select task - Run benchmarking across multiple models - Compute metrics: Exact Match, F1, ROUGE-L, BLEU - Display results and allow CSV download """ import os import time import json import yaml import gradio as gr import pandas as pd from tqdm import tqdm # Optional metrics try: from rouge_score import rouge_scorer except ImportError: rouge_scorer = None try: import sacrebleu except ImportError: sacrebleu = None # ---------------- Metrics ---------------- # def exact_match(pred, ref): return float(pred.strip().lower() == ref.strip().lower()) def token_f1(pred, ref): pred_tokens = pred.lower().split() ref_tokens = ref.lower().split() if not pred_tokens and not ref_tokens: return 1.0 if not pred_tokens or not ref_tokens: return 0.0 common = sum(min(pred_tokens.count(t), ref_tokens.count(t)) for t in set(pred_tokens)) precision = common / len(pred_tokens) recall = common / len(ref_tokens) return 2 * precision * recall / (precision + recall) if precision + recall else 0.0 def rouge_l(pred, ref): if rouge_scorer: scorer = rouge_scorer.RougeScorer(["rougeL"], use_stemmer=True) return scorer.score(ref, pred)["rougeL"].fmeasure return 0.0 def bleu(pred, ref): if sacrebleu: return sacrebleu.corpus_bleu([pred], [[ref]]).score return 0.0 def compute_metrics(task, prediction, reference): metrics = {} if task in ("qa", "classification"): metrics["exact_match"] = exact_match(prediction, reference) metrics["f1"] = token_f1(prediction, reference) elif task in ("summarization", "translation"): metrics["rougeL_f"] = rouge_l(prediction, reference) metrics["bleu"] = bleu(prediction, reference) else: metrics["f1"] = token_f1(prediction, reference) return metrics # ---------------- Hugging Face Inference ---------------- # def hf_generate(model_name, prompt, max_new_tokens=256, temperature=0.2): from huggingface_hub import InferenceClient client = InferenceClient(model=model_name, token=os.getenv("HUGGINGFACE_HUB_TOKEN")) start = time.time() try: output = client.text_generation(prompt, max_new_tokens=max_new_tokens, temperature=temperature) latency = time.time() - start return output.strip(), latency except Exception as e: return f"ERROR: {e}", time.time() - start # ---------------- Benchmark Function ---------------- # def benchmark(config_text, dataset_text, task): cfg = yaml.safe_load(config_text) data = [json.loads(line) for line in dataset_text.splitlines() if line.strip()] models = cfg.get("models", []) templates = cfg.get("prompt_templates", {}) template = templates.get(task, "{{text}}") results = [] for m in models: model_name = m["name"] max_tokens = m.get("params", {}).get("max_tokens", 256) temperature = m.get("params", {}).get("temperature", 0.2) for ex in tqdm(data, desc=model_name): variables = {k: ex.get(k, "") for k in ("question", "context", "text", "labels")} prompt = template for k, v in variables.items(): prompt = prompt.replace(f"{{{{{k}}}}}", str(v)) prediction, latency = hf_generate(model_name, prompt, max_new_tokens=max_tokens, temperature=temperature) metrics = compute_metrics(task, prediction, ex.get("reference", "")) row = { "model": model_name, "id": ex.get("id", ""), "task": task, "prompt": prompt, "prediction": prediction, "reference": ex.get("reference", ""), "latency_seconds": latency, **metrics } results.append(row) df = pd.DataFrame(results) summary = [] for model_name in set(df["model"]): sub = df[df["model"] == model_name] summary.append(f"## {model_name}") summary.append(f"Samples: {len(sub)}") for metric in ["exact_match", "f1", "rougeL_f", "bleu"]: if metric in sub.columns: vals = [v for v in sub[metric] if isinstance(v, (int, float))] if vals: summary.append(f"{metric}: mean={sum(vals)/len(vals):.4f}") summary.append(f"Latency mean: {sum(sub['latency_seconds'])/len(sub):.3f}s\n") return df, "\n".join(summary) def hf_judge(model_name, prompt, candidate, reference=None, rubric=None, max_new_tokens=256): """ Calls a judge model to score candidate output. Returns a dict of scores. Rubric should instruct JSON output, e.g.: {"relevance": int, "factuality": int, "clarity": int, "overall": float} """ from huggingface_hub import InferenceClient client = InferenceClient(model=model_name, token=os.getenv("HF_TOKEN")) rubric = rubric or ( "Evaluate the candidate answer. Score 1–5 for:\n" "- Relevance: addresses the prompt\n" "- Factuality: correct and supported\n" "- Clarity: clear and well-structured\n" "Return JSON: {\"relevance\": int, \"factuality\": int, \"clarity\": int, \"overall\": float}" ) judge_prompt = ( f"{rubric}\n\nPrompt:\n{prompt}\n\n" f"Candidate:\n{candidate}\n\n" f"Reference (if available):\n{reference if reference is not None else 'N/A'}\n" ) try: text = client.text_generation(judge_prompt, max_new_tokens=max_new_tokens, temperature=0.0) # Attempt to parse JSON anywhere in the response: import re, json m = re.search(r'\{.*\}', text, re.S) return json.loads(m.group(0)) if m else {"raw": text} except Exception as e: return {"error": str(e)} # ---------------- Gradio UI ---------------- # # ---------------- Gradio UI ---------------- # with gr.Blocks() as demo: gr.Markdown("# LLM Benchmarking App (Hugging Face)") gr.Markdown("Upload config.yaml and dataset.jsonl, select task, and run benchmark.") with gr.Row(): config_file = gr.File(label="Upload Config (YAML)", type="filepath") dataset_file = gr.File(label="Upload Dataset (JSONL)", type="filepath") task = gr.Textbox(label="Task (e.g., qa, summarization, classification)") use_judge = gr.Checkbox(label="Use Judge Model?", value=False) judge_model = gr.Textbox(label="Judge Model (HF repo id)", value="mistralai/Mistral-7B-Instruct") rubric = gr.Textbox(label="Judge Rubric", value="Evaluate relevance, factuality, clarity. Return JSON.") run_btn = gr.Button("Run Benchmark") results_table = gr.Dataframe(headers=[ "model","id","task","prediction","reference","latency_seconds", "exact_match","f1","rougeL_f","bleu","judge_relevance","judge_factuality","judge_clarity","judge_overall" ], label="Results") summary_box = gr.Textbox(label="Summary", lines=10) download_csv = gr.File(label="Download CSV") def run_benchmark(config_path, dataset_path, task, use_judge=False, judge_model=None, rubric=None): if not config_path or not dataset_path: return None, "Error: Please upload both files", None config_text = open(config_path, "r", encoding="utf-8").read() dataset_text = open(dataset_path, "r", encoding="utf-8").read() cfg = yaml.safe_load(config_text) data = [json.loads(line) for line in dataset_text.splitlines() if line.strip()] template = cfg.get("prompt_templates", {}).get(task, "{{text}}") results = [] for m in cfg.get("models", []): model_name = m["name"] max_tokens = m.get("params", {}).get("max_tokens", 256) temperature = m.get("params", {}).get("temperature", 0.2) for ex in data: variables = {k: ex.get(k, "") for k in ("question", "context", "text", "labels")} prompt = template for k, v in variables.items(): prompt = prompt.replace(f"{{{{{k}}}}}", str(v)) prediction, latency = hf_generate(model_name, prompt, max_new_tokens=max_tokens, temperature=temperature) metrics = compute_metrics(task, prediction, ex.get("reference", "")) row = { "model": model_name, "id": ex.get("id", ""), "task": task, "prompt": prompt, "prediction": prediction, "reference": ex.get("reference", ""), "latency_seconds": latency, **metrics } if use_judge and judge_model: scores = hf_judge(judge_model, prompt, prediction, ex.get("reference", ""), rubric) for k, v in (scores.items() if isinstance(scores, dict) else []): row[f"judge_{k}"] = v results.append(row) df = pd.DataFrame(results) csv_path = "results.csv" df.to_csv(csv_path, index=False) # Summary summary = [] for model_name in set(df["model"]): sub = df[df["model"] == model_name] summary.append(f"## {model_name}") summary.append(f"Samples: {len(sub)}") for metric in ["exact_match", "f1", "rougeL_f", "bleu", "judge_overall"]: if metric in sub.columns: vals = [v for v in sub[metric] if isinstance(v, (int, float))] if vals: summary.append(f"{metric}: mean={sum(vals)/len(vals):.4f}") summary.append(f"Latency mean: {sum(sub['latency_seconds'])/len(sub):.3f}s\n") return df, "\n".join(summary), csv_path run_btn.click( run_benchmark, inputs=[config_file, dataset_file, task, use_judge, judge_model, rubric], outputs=[results_table, summary_box, download_csv] ) demo.launch()