celikn's picture
Update app.py
ceefa4a verified
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Hugging Face Space: LLM Benchmarking App using Gradio
- Upload config.yaml and dataset.jsonl
- Select task
- Run benchmarking across multiple models
- Compute metrics: Exact Match, F1, ROUGE-L, BLEU
- Optional judge scoring
- Display results and allow CSV download
"""
import os
import time
import json
import yaml
import gradio as gr
import pandas as pd
from tqdm import tqdm
from huggingface_hub import login
# ---------------- Authentication ---------------- #
HF_TOKEN = (os.environ.get("HUGGINGFACE_HUB_TOKEN", "") or "").strip()
if HF_TOKEN:
login(token=HF_TOKEN)
else:
print("⚠️ WARNING: HF_TOKEN not found. Gated models may fail.")
# ---------------- Optional Metrics ---------------- #
try:
from rouge_score import rouge_scorer
except ImportError:
rouge_scorer = None
try:
import sacrebleu
except ImportError:
sacrebleu = None
# ---------------- Metrics ---------------- #
def exact_match(pred, ref):
return float(pred.strip().lower() == ref.strip().lower())
def token_f1(pred, ref):
pred_tokens = pred.lower().split()
ref_tokens = ref.lower().split()
if not pred_tokens and not ref_tokens:
return 1.0
if not pred_tokens or not ref_tokens:
return 0.0
common = sum(min(pred_tokens.count(t), ref_tokens.count(t)) for t in set(pred_tokens))
precision = common / len(pred_tokens)
recall = common / len(ref_tokens)
return 2 * precision * recall / (precision + recall) if precision + recall else 0.0
def rouge_l(pred, ref):
if rouge_scorer:
scorer = rouge_scorer.RougeScorer(["rougeL"], use_stemmer=True)
return scorer.score(ref, pred)["rougeL"].fmeasure
return 0.0
def bleu(pred, ref):
if sacrebleu:
return sacrebleu.corpus_bleu([pred], [[ref]]).score
return 0.0
def compute_metrics(task, prediction, reference):
metrics = {}
if task in ("qa", "classification"):
metrics["exact_match"] = exact_match(prediction, reference)
metrics["f1"] = token_f1(prediction, reference)
elif task in ("summarization", "translation", "conversation"):
metrics["rougeL_f"] = rouge_l(prediction, reference)
metrics["bleu"] = bleu(prediction, reference)
else:
metrics["f1"] = token_f1(prediction, reference)
return metrics
# ---------------- Hugging Face Inference ---------------- #
def hf_generate(model_name, prompt, max_new_tokens=256, temperature=0.2):
from huggingface_hub import InferenceClient
client = InferenceClient(model=model_name, token=HF_TOKEN)
start = time.time()
try:
# Detect model type for correct endpoint
if "flan" in model_name or "t5" in model_name:
output = client.text2text_generation(prompt, max_new_tokens=max_new_tokens)
else:
output = client.text_generation(prompt, max_new_tokens=max_new_tokens, temperature=temperature)
latency = time.time() - start
return output.strip(), latency
except Exception as e:
return f"ERROR: {str(e)}", time.time() - start
# ---------------- Judge Function ---------------- #
def hf_judge(model_name, prompt, candidate, reference=None, rubric=None, max_new_tokens=256):
from huggingface_hub import InferenceClient
client = InferenceClient(model=model_name, token=HF_TOKEN)
rubric = rubric or (
"Evaluate the candidate answer. Score 1–5 for:\n"
"- Relevance\n- Factuality\n- Clarity\nReturn JSON: {\"relevance\": int, \"factuality\": int, \"clarity\": int, \"overall\": float}"
)
judge_prompt = f"{rubric}\n\nPrompt:\n{prompt}\nCandidate:\n{candidate}\nReference:\n{reference or 'N/A'}"
try:
text = client.text_generation(judge_prompt, max_new_tokens=max_new_tokens, temperature=0.0)
import re
m = re.search(r'\{.*\}', text, re.S)
return json.loads(m.group(0)) if m else {"raw": text}
except Exception as e:
return {"error": str(e)}
# ---------------- Benchmark Function ---------------- #
def benchmark(config_text, dataset_text, task, use_judge=False):
cfg = yaml.safe_load(config_text)
data = [json.loads(line) for line in dataset_text.splitlines() if line.strip()]
models = cfg.get("models", [])
templates = cfg.get("prompt_templates", {})
template = templates.get(task, "{{text}}")
judge_cfg = cfg.get("judge", {})
results = []
for m in models:
model_name = m["name"]
max_tokens = m.get("params", {}).get("max_tokens", 256)
temperature = m.get("params", {}).get("temperature", 0.2)
for ex in tqdm(data, desc=model_name):
variables = {k: ex.get(k, "") for k in ("question", "context", "text", "labels")}
prompt = template
for k, v in variables.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
prediction, latency = hf_generate(model_name, prompt, max_new_tokens=max_tokens, temperature=temperature)
metrics = compute_metrics(task, prediction, ex.get("reference", ""))
row = {
"model": model_name,
"id": ex.get("id", ""),
"task": task,
"prompt": prompt,
"prediction": prediction,
"reference": ex.get("reference", ""),
"latency_seconds": latency,
**metrics
}
if use_judge and judge_cfg.get("enabled"):
scores = hf_judge(judge_cfg.get("model"), prompt, prediction, ex.get("reference", ""), judge_cfg.get("rubric"))
for k, v in (scores.items() if isinstance(scores, dict) else []):
row[f"judge_{k}"] = v
results.append(row)
df = pd.DataFrame(results)
summary = []
for model_name in set(df["model"]):
sub = df[df["model"] == model_name]
summary.append(f"## {model_name}")
summary.append(f"Samples: {len(sub)}")
for metric in ["exact_match", "f1", "rougeL_f", "bleu", "judge_overall"]:
if metric in sub.columns:
vals = [v for v in sub[metric] if isinstance(v, (int, float))]
if vals:
summary.append(f"{metric}: mean={sum(vals)/len(vals):.4f}")
summary.append(f"Latency mean: {sum(sub['latency_seconds'])/len(sub):.3f}s\n")
return df, "\n".join(summary)
# ---------------- Gradio UI ---------------- #
with gr.Blocks() as demo:
gr.Markdown("# LLM Benchmarking App (Hugging Face)")
gr.Markdown("Upload config.yaml and dataset.jsonl, select task, and run benchmark.")
with gr.Row():
config_file = gr.File(label="Upload Config (YAML)", type="filepath")
dataset_file = gr.File(label="Upload Dataset (JSONL)", type="filepath")
task = gr.Dropdown(choices=["qa", "summarization", "classification", "conversation"], label="Select Task")
use_judge = gr.Checkbox(label="Enable Judge Scoring?", value=False)
run_btn = gr.Button("Run Benchmark")
results_table = gr.Dataframe(headers=[
"model","id","task","prompt","prediction","reference","latency_seconds",
"exact_match","f1","rougeL_f","bleu","judge_overall"
], label="Results")
summary_box = gr.Textbox(label="Summary", lines=10)
download_csv = gr.File(label="Download CSV")
def run_benchmark(config_path, dataset_path, task, use_judge):
if not config_path or not dataset_path:
return None, "Error: Please upload both files", None
config_text = open(config_path, "r", encoding="utf-8").read()
dataset_text = open(dataset_path, "r", encoding="utf-8").read()
df, summary = benchmark(config_text, dataset_text, task, use_judge)
csv_path = "results.csv"
df.to_csv(csv_path, index=False)
return df, summary, csv_path
run_btn.click(run_benchmark, inputs=[config_file, dataset_file, task, use_judge], outputs=[results_table, summary_box, download_csv])
demo.launch()