Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -162,50 +162,57 @@ def hf_judge(model_name, prompt, candidate, reference=None, rubric=None, max_new
|
|
| 162 |
return {"error": str(e)}
|
| 163 |
|
| 164 |
|
|
|
|
|
|
|
| 165 |
# ---------------- Gradio UI ---------------- #
|
| 166 |
with gr.Blocks() as demo:
|
| 167 |
gr.Markdown("# LLM Benchmarking App (Hugging Face)")
|
| 168 |
gr.Markdown("Upload config.yaml and dataset.jsonl, select task, and run benchmark.")
|
| 169 |
-
|
| 170 |
|
| 171 |
with gr.Row():
|
| 172 |
-
use_judge = gr.Checkbox(label="Use judge model", value=False)
|
| 173 |
-
judge_model = gr.Textbox(label="Judge model (HF repo id)", value="mistralai/Mistral-7B-Instruct")
|
| 174 |
config_file = gr.File(label="Upload Config (YAML)", type="filepath")
|
| 175 |
dataset_file = gr.File(label="Upload Dataset (JSONL)", type="filepath")
|
|
|
|
| 176 |
task = gr.Textbox(label="Task (e.g., qa, summarization, classification)")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
run_btn = gr.Button("Run Benchmark")
|
| 178 |
|
| 179 |
-
results_table = gr.Dataframe(headers=[
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
summary_box = gr.Textbox(label="Summary", lines=10)
|
| 181 |
download_csv = gr.File(label="Download CSV")
|
| 182 |
-
|
| 183 |
-
|
| 184 |
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
-
|
| 187 |
-
def run_benchmark(config_path, dataset_path, task, use_judge=False, judge_model=None):
|
| 188 |
config_text = open(config_path, "r", encoding="utf-8").read()
|
| 189 |
dataset_text = open(dataset_path, "r", encoding="utf-8").read()
|
| 190 |
cfg = yaml.safe_load(config_text)
|
| 191 |
data = [json.loads(line) for line in dataset_text.splitlines() if line.strip()]
|
| 192 |
template = cfg.get("prompt_templates", {}).get(task, "{{text}}")
|
| 193 |
-
|
| 194 |
results = []
|
| 195 |
for m in cfg.get("models", []):
|
| 196 |
model_name = m["name"]
|
| 197 |
max_tokens = m.get("params", {}).get("max_tokens", 256)
|
| 198 |
temperature = m.get("params", {}).get("temperature", 0.2)
|
| 199 |
-
|
| 200 |
for ex in data:
|
| 201 |
variables = {k: ex.get(k, "") for k in ("question", "context", "text", "labels")}
|
| 202 |
prompt = template
|
| 203 |
for k, v in variables.items():
|
| 204 |
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
|
| 205 |
-
|
| 206 |
prediction, latency = hf_generate(model_name, prompt, max_new_tokens=max_tokens, temperature=temperature)
|
| 207 |
metrics = compute_metrics(task, prediction, ex.get("reference", ""))
|
| 208 |
-
|
| 209 |
row = {
|
| 210 |
"model": model_name,
|
| 211 |
"id": ex.get("id", ""),
|
|
@@ -216,22 +223,38 @@ with gr.Blocks() as demo:
|
|
| 216 |
"latency_seconds": latency,
|
| 217 |
**metrics
|
| 218 |
}
|
| 219 |
-
|
| 220 |
if use_judge and judge_model:
|
| 221 |
-
scores = hf_judge(judge_model, prompt, prediction, ex.get("reference", ""))
|
| 222 |
-
# Flatten judge scores with prefix
|
| 223 |
for k, v in (scores.items() if isinstance(scores, dict) else []):
|
| 224 |
row[f"judge_{k}"] = v
|
| 225 |
-
|
| 226 |
results.append(row)
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
|
| 237 |
demo.launch()
|
|
|
|
|
|
| 162 |
return {"error": str(e)}
|
| 163 |
|
| 164 |
|
| 165 |
+
# ---------------- Gradio UI ---------------- #
|
| 166 |
+
|
| 167 |
# ---------------- Gradio UI ---------------- #
|
| 168 |
with gr.Blocks() as demo:
|
| 169 |
gr.Markdown("# LLM Benchmarking App (Hugging Face)")
|
| 170 |
gr.Markdown("Upload config.yaml and dataset.jsonl, select task, and run benchmark.")
|
|
|
|
| 171 |
|
| 172 |
with gr.Row():
|
|
|
|
|
|
|
| 173 |
config_file = gr.File(label="Upload Config (YAML)", type="filepath")
|
| 174 |
dataset_file = gr.File(label="Upload Dataset (JSONL)", type="filepath")
|
| 175 |
+
|
| 176 |
task = gr.Textbox(label="Task (e.g., qa, summarization, classification)")
|
| 177 |
+
use_judge = gr.Checkbox(label="Use Judge Model?", value=False)
|
| 178 |
+
judge_model = gr.Textbox(label="Judge Model (HF repo id)", value="mistralai/Mistral-7B-Instruct")
|
| 179 |
+
rubric = gr.Textbox(label="Judge Rubric", value="Evaluate relevance, factuality, clarity. Return JSON.")
|
| 180 |
+
|
| 181 |
run_btn = gr.Button("Run Benchmark")
|
| 182 |
|
| 183 |
+
results_table = gr.Dataframe(headers=[
|
| 184 |
+
"model","id","task","prediction","reference","latency_seconds",
|
| 185 |
+
"exact_match","f1","rougeL_f","bleu","judge_relevance","judge_factuality","judge_clarity","judge_overall"
|
| 186 |
+
], label="Results")
|
| 187 |
+
|
| 188 |
summary_box = gr.Textbox(label="Summary", lines=10)
|
| 189 |
download_csv = gr.File(label="Download CSV")
|
|
|
|
|
|
|
| 190 |
|
| 191 |
+
def run_benchmark(config_path, dataset_path, task, use_judge=False, judge_model=None, rubric=None):
|
| 192 |
+
if not config_path or not dataset_path:
|
| 193 |
+
return None, "Error: Please upload both files", None
|
| 194 |
|
|
|
|
|
|
|
| 195 |
config_text = open(config_path, "r", encoding="utf-8").read()
|
| 196 |
dataset_text = open(dataset_path, "r", encoding="utf-8").read()
|
| 197 |
cfg = yaml.safe_load(config_text)
|
| 198 |
data = [json.loads(line) for line in dataset_text.splitlines() if line.strip()]
|
| 199 |
template = cfg.get("prompt_templates", {}).get(task, "{{text}}")
|
| 200 |
+
|
| 201 |
results = []
|
| 202 |
for m in cfg.get("models", []):
|
| 203 |
model_name = m["name"]
|
| 204 |
max_tokens = m.get("params", {}).get("max_tokens", 256)
|
| 205 |
temperature = m.get("params", {}).get("temperature", 0.2)
|
| 206 |
+
|
| 207 |
for ex in data:
|
| 208 |
variables = {k: ex.get(k, "") for k in ("question", "context", "text", "labels")}
|
| 209 |
prompt = template
|
| 210 |
for k, v in variables.items():
|
| 211 |
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
|
| 212 |
+
|
| 213 |
prediction, latency = hf_generate(model_name, prompt, max_new_tokens=max_tokens, temperature=temperature)
|
| 214 |
metrics = compute_metrics(task, prediction, ex.get("reference", ""))
|
| 215 |
+
|
| 216 |
row = {
|
| 217 |
"model": model_name,
|
| 218 |
"id": ex.get("id", ""),
|
|
|
|
| 223 |
"latency_seconds": latency,
|
| 224 |
**metrics
|
| 225 |
}
|
| 226 |
+
|
| 227 |
if use_judge and judge_model:
|
| 228 |
+
scores = hf_judge(judge_model, prompt, prediction, ex.get("reference", ""), rubric)
|
|
|
|
| 229 |
for k, v in (scores.items() if isinstance(scores, dict) else []):
|
| 230 |
row[f"judge_{k}"] = v
|
| 231 |
+
|
| 232 |
results.append(row)
|
| 233 |
+
|
| 234 |
+
df = pd.DataFrame(results)
|
| 235 |
+
csv_path = "results.csv"
|
| 236 |
+
df.to_csv(csv_path, index=False)
|
| 237 |
+
|
| 238 |
+
# Summary
|
| 239 |
+
summary = []
|
| 240 |
+
for model_name in set(df["model"]):
|
| 241 |
+
sub = df[df["model"] == model_name]
|
| 242 |
+
summary.append(f"## {model_name}")
|
| 243 |
+
summary.append(f"Samples: {len(sub)}")
|
| 244 |
+
for metric in ["exact_match", "f1", "rougeL_f", "bleu", "judge_overall"]:
|
| 245 |
+
if metric in sub.columns:
|
| 246 |
+
vals = [v for v in sub[metric] if isinstance(v, (int, float))]
|
| 247 |
+
if vals:
|
| 248 |
+
summary.append(f"{metric}: mean={sum(vals)/len(vals):.4f}")
|
| 249 |
+
summary.append(f"Latency mean: {sum(sub['latency_seconds'])/len(sub):.3f}s\n")
|
| 250 |
+
|
| 251 |
+
return df, "\n".join(summary), csv_path
|
| 252 |
+
|
| 253 |
+
run_btn.click(
|
| 254 |
+
run_benchmark,
|
| 255 |
+
inputs=[config_file, dataset_file, task, use_judge, judge_model, rubric],
|
| 256 |
+
outputs=[results_table, summary_box, download_csv]
|
| 257 |
+
)
|
| 258 |
|
| 259 |
demo.launch()
|
| 260 |
+
|