celikn commited on
Commit
ceefa4a
·
verified ·
1 Parent(s): 394176d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -55
app.py CHANGED
@@ -1,4 +1,166 @@
1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  # ---------------- Gradio UI ---------------- #
3
  with gr.Blocks() as demo:
4
  gr.Markdown("# LLM Benchmarking App (Hugging Face)")
@@ -23,65 +185,12 @@ with gr.Blocks() as demo:
23
  def run_benchmark(config_path, dataset_path, task, use_judge):
24
  if not config_path or not dataset_path:
25
  return None, "Error: Please upload both files", None
26
-
27
  config_text = open(config_path, "r", encoding="utf-8").read()
28
  dataset_text = open(dataset_path, "r", encoding="utf-8").read()
29
- cfg = yaml.safe_load(config_text)
30
- data = [json.loads(line) for line in dataset_text.splitlines() if line.strip()]
31
- template = cfg.get("prompt_templates", {}).get(task, "{{text}}")
32
- judge_cfg = cfg.get("judge", {})
33
-
34
- results = []
35
- for m in cfg.get("models", []):
36
- model_name = m["name"]
37
- max_tokens = m.get("params", {}).get("max_tokens", 256)
38
- temperature = m.get("params", {}).get("temperature", 0.2)
39
-
40
- for ex in data:
41
- variables = {k: ex.get(k, "") for k in ("question", "context", "text", "labels")}
42
- prompt = template
43
- for k, v in variables.items():
44
- prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
45
-
46
- prediction, latency = hf_generate(model_name, prompt, max_new_tokens=max_tokens, temperature=temperature)
47
- metrics = compute_metrics(task, prediction, ex.get("reference", ""))
48
-
49
- row = {
50
- "model": model_name,
51
- "id": ex.get("id", ""),
52
- "task": task,
53
- "prompt": prompt,
54
- "prediction": prediction,
55
- "reference": ex.get("reference", ""),
56
- "latency_seconds": latency,
57
- **metrics
58
- }
59
-
60
- if use_judge and judge_cfg.get("enabled"):
61
- scores = hf_judge(judge_cfg.get("model"), prompt, prediction, ex.get("reference", ""), judge_cfg.get("rubric"))
62
- for k, v in (scores.items() if isinstance(scores, dict) else []):
63
- row[f"judge_{k}"] = v
64
-
65
- results.append(row)
66
-
67
- df = pd.DataFrame(results)
68
  csv_path = "results.csv"
69
  df.to_csv(csv_path, index=False)
70
-
71
- # Summary
72
- summary = []
73
- for model_name in set(df["model"]):
74
- sub = df[df["model"] == model_name]
75
- summary.append(f"## {model_name}")
76
- summary.append(f"Samples: {len(sub)}")
77
- for metric in ["exact_match", "f1", "rougeL_f", "bleu", "judge_overall"]:
78
- if metric in sub.columns:
79
- vals = [v for v in sub[metric] if isinstance(v, (int, float))]
80
- if vals:
81
- summary.append(f"{metric}: mean={sum(vals)/len(vals):.4f}")
82
- summary.append(f"Latency mean: {sum(sub['latency_seconds'])/len(sub):.3f}s\n")
83
-
84
- return df, "\n".join(summary), csv_path
85
 
86
  run_btn.click(run_benchmark, inputs=[config_file, dataset_file, task, use_judge], outputs=[results_table, summary_box, download_csv])
87
 
 
1
 
2
+ #!/usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+ """
5
+ Hugging Face Space: LLM Benchmarking App using Gradio
6
+ - Upload config.yaml and dataset.jsonl
7
+ - Select task
8
+ - Run benchmarking across multiple models
9
+ - Compute metrics: Exact Match, F1, ROUGE-L, BLEU
10
+ - Optional judge scoring
11
+ - Display results and allow CSV download
12
+ """
13
+
14
+ import os
15
+ import time
16
+ import json
17
+ import yaml
18
+ import gradio as gr
19
+ import pandas as pd
20
+ from tqdm import tqdm
21
+ from huggingface_hub import login
22
+
23
+ # ---------------- Authentication ---------------- #
24
+ HF_TOKEN = (os.environ.get("HUGGINGFACE_HUB_TOKEN", "") or "").strip()
25
+ if HF_TOKEN:
26
+ login(token=HF_TOKEN)
27
+ else:
28
+ print("⚠️ WARNING: HF_TOKEN not found. Gated models may fail.")
29
+
30
+ # ---------------- Optional Metrics ---------------- #
31
+ try:
32
+ from rouge_score import rouge_scorer
33
+ except ImportError:
34
+ rouge_scorer = None
35
+
36
+ try:
37
+ import sacrebleu
38
+ except ImportError:
39
+ sacrebleu = None
40
+
41
+ # ---------------- Metrics ---------------- #
42
+ def exact_match(pred, ref):
43
+ return float(pred.strip().lower() == ref.strip().lower())
44
+
45
+ def token_f1(pred, ref):
46
+ pred_tokens = pred.lower().split()
47
+ ref_tokens = ref.lower().split()
48
+ if not pred_tokens and not ref_tokens:
49
+ return 1.0
50
+ if not pred_tokens or not ref_tokens:
51
+ return 0.0
52
+ common = sum(min(pred_tokens.count(t), ref_tokens.count(t)) for t in set(pred_tokens))
53
+ precision = common / len(pred_tokens)
54
+ recall = common / len(ref_tokens)
55
+ return 2 * precision * recall / (precision + recall) if precision + recall else 0.0
56
+
57
+ def rouge_l(pred, ref):
58
+ if rouge_scorer:
59
+ scorer = rouge_scorer.RougeScorer(["rougeL"], use_stemmer=True)
60
+ return scorer.score(ref, pred)["rougeL"].fmeasure
61
+ return 0.0
62
+
63
+ def bleu(pred, ref):
64
+ if sacrebleu:
65
+ return sacrebleu.corpus_bleu([pred], [[ref]]).score
66
+ return 0.0
67
+
68
+ def compute_metrics(task, prediction, reference):
69
+ metrics = {}
70
+ if task in ("qa", "classification"):
71
+ metrics["exact_match"] = exact_match(prediction, reference)
72
+ metrics["f1"] = token_f1(prediction, reference)
73
+ elif task in ("summarization", "translation", "conversation"):
74
+ metrics["rougeL_f"] = rouge_l(prediction, reference)
75
+ metrics["bleu"] = bleu(prediction, reference)
76
+ else:
77
+ metrics["f1"] = token_f1(prediction, reference)
78
+ return metrics
79
+
80
+ # ---------------- Hugging Face Inference ---------------- #
81
+ def hf_generate(model_name, prompt, max_new_tokens=256, temperature=0.2):
82
+ from huggingface_hub import InferenceClient
83
+ client = InferenceClient(model=model_name, token=HF_TOKEN)
84
+ start = time.time()
85
+ try:
86
+ # Detect model type for correct endpoint
87
+ if "flan" in model_name or "t5" in model_name:
88
+ output = client.text2text_generation(prompt, max_new_tokens=max_new_tokens)
89
+ else:
90
+ output = client.text_generation(prompt, max_new_tokens=max_new_tokens, temperature=temperature)
91
+ latency = time.time() - start
92
+ return output.strip(), latency
93
+ except Exception as e:
94
+ return f"ERROR: {str(e)}", time.time() - start
95
+
96
+ # ---------------- Judge Function ---------------- #
97
+ def hf_judge(model_name, prompt, candidate, reference=None, rubric=None, max_new_tokens=256):
98
+ from huggingface_hub import InferenceClient
99
+ client = InferenceClient(model=model_name, token=HF_TOKEN)
100
+ rubric = rubric or (
101
+ "Evaluate the candidate answer. Score 1–5 for:\n"
102
+ "- Relevance\n- Factuality\n- Clarity\nReturn JSON: {\"relevance\": int, \"factuality\": int, \"clarity\": int, \"overall\": float}"
103
+ )
104
+ judge_prompt = f"{rubric}\n\nPrompt:\n{prompt}\nCandidate:\n{candidate}\nReference:\n{reference or 'N/A'}"
105
+ try:
106
+ text = client.text_generation(judge_prompt, max_new_tokens=max_new_tokens, temperature=0.0)
107
+ import re
108
+ m = re.search(r'\{.*\}', text, re.S)
109
+ return json.loads(m.group(0)) if m else {"raw": text}
110
+ except Exception as e:
111
+ return {"error": str(e)}
112
+
113
+ # ---------------- Benchmark Function ---------------- #
114
+ def benchmark(config_text, dataset_text, task, use_judge=False):
115
+ cfg = yaml.safe_load(config_text)
116
+ data = [json.loads(line) for line in dataset_text.splitlines() if line.strip()]
117
+ models = cfg.get("models", [])
118
+ templates = cfg.get("prompt_templates", {})
119
+ template = templates.get(task, "{{text}}")
120
+ judge_cfg = cfg.get("judge", {})
121
+
122
+ results = []
123
+ for m in models:
124
+ model_name = m["name"]
125
+ max_tokens = m.get("params", {}).get("max_tokens", 256)
126
+ temperature = m.get("params", {}).get("temperature", 0.2)
127
+ for ex in tqdm(data, desc=model_name):
128
+ variables = {k: ex.get(k, "") for k in ("question", "context", "text", "labels")}
129
+ prompt = template
130
+ for k, v in variables.items():
131
+ prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
132
+ prediction, latency = hf_generate(model_name, prompt, max_new_tokens=max_tokens, temperature=temperature)
133
+ metrics = compute_metrics(task, prediction, ex.get("reference", ""))
134
+ row = {
135
+ "model": model_name,
136
+ "id": ex.get("id", ""),
137
+ "task": task,
138
+ "prompt": prompt,
139
+ "prediction": prediction,
140
+ "reference": ex.get("reference", ""),
141
+ "latency_seconds": latency,
142
+ **metrics
143
+ }
144
+ if use_judge and judge_cfg.get("enabled"):
145
+ scores = hf_judge(judge_cfg.get("model"), prompt, prediction, ex.get("reference", ""), judge_cfg.get("rubric"))
146
+ for k, v in (scores.items() if isinstance(scores, dict) else []):
147
+ row[f"judge_{k}"] = v
148
+ results.append(row)
149
+
150
+ df = pd.DataFrame(results)
151
+ summary = []
152
+ for model_name in set(df["model"]):
153
+ sub = df[df["model"] == model_name]
154
+ summary.append(f"## {model_name}")
155
+ summary.append(f"Samples: {len(sub)}")
156
+ for metric in ["exact_match", "f1", "rougeL_f", "bleu", "judge_overall"]:
157
+ if metric in sub.columns:
158
+ vals = [v for v in sub[metric] if isinstance(v, (int, float))]
159
+ if vals:
160
+ summary.append(f"{metric}: mean={sum(vals)/len(vals):.4f}")
161
+ summary.append(f"Latency mean: {sum(sub['latency_seconds'])/len(sub):.3f}s\n")
162
+ return df, "\n".join(summary)
163
+
164
  # ---------------- Gradio UI ---------------- #
165
  with gr.Blocks() as demo:
166
  gr.Markdown("# LLM Benchmarking App (Hugging Face)")
 
185
  def run_benchmark(config_path, dataset_path, task, use_judge):
186
  if not config_path or not dataset_path:
187
  return None, "Error: Please upload both files", None
 
188
  config_text = open(config_path, "r", encoding="utf-8").read()
189
  dataset_text = open(dataset_path, "r", encoding="utf-8").read()
190
+ df, summary = benchmark(config_text, dataset_text, task, use_judge)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  csv_path = "results.csv"
192
  df.to_csv(csv_path, index=False)
193
+ return df, summary, csv_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
  run_btn.click(run_benchmark, inputs=[config_file, dataset_file, task, use_judge], outputs=[results_table, summary_box, download_csv])
196