mats-sql-bundle / scripts /compute_bestofn_with_selector.py
thanhdath's picture
Upload folder using huggingface_hub
82ae30c verified
"""
Compute Best-of-N metrics with a TRAINED selector (binary YES/NO classifier).
For each question, run the selector on each of N candidates and pick the one
with the highest YES probability. Also compute greedy / pass@N for comparison.
Usage:
python scripts/compute_bestofn_with_selector.py <rollout.jsonl> <selector_ckpt> <label> [--selector_host URL]
If --selector_host given (a vLLM endpoint), use it. Otherwise load model in-process.
"""
import argparse
import json
import os
import re
import sys
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
# Bypass HTTP proxy for local vLLM endpoints
os.environ["NO_PROXY"] = "localhost,127.0.0.1"
os.environ["no_proxy"] = "localhost,127.0.0.1"
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(ROOT)
sys.path.insert(0, ROOT)
from validator_data.validator import _execute_sql
from data_processing.planner import is_execution_correct
import requests
PROMPT_TEMPLATE = (
"You are a SQL correctness judge.\n"
"Schema:\n{schema}\n\n"
"Question: {question}\n"
"External knowledge: {evidence}\n\n"
"Candidate SQL:\n{sql}\n\n"
"Execution result:\n{exec_result}\n\n"
"Is this SQL correct for the question? Answer YES or NO."
)
def qwen_chat(prompt: str) -> str:
return f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
def safe_truncate(s, n=400):
if s is None:
return "(empty)"
s = str(s)
return s if len(s) <= n else s[:n] + "..."
def safe_execute(db_path, sql):
if not sql or sql.strip() == "":
return ("", True)
try:
return _execute_sql("./" + db_path, sql)
except Exception:
return ("", True)
def score_via_vllm(host, prompt_chat, model_name="selector"):
"""Get P(YES) − P(NO) via vLLM completions with logprobs."""
payload = {
"model": model_name,
"prompt": prompt_chat,
"max_tokens": 1,
"n": 1,
"temperature": 0.0,
"logprobs": 20,
}
try:
r = requests.post(f"{host}/v1/completions", json=payload, timeout=60)
r.raise_for_status()
choice = r.json()["choices"][0]
# Look at logprobs of top tokens; find YES vs NO
if "logprobs" in choice and choice["logprobs"]:
top = choice["logprobs"]["top_logprobs"][0]
yes_lp = max((top[k] for k in (" YES", "YES", " yes", "yes", " Yes", "Yes") if k in top), default=-100.0)
no_lp = max((top[k] for k in (" NO", "NO", " no", "no", " No", "No") if k in top), default=-100.0)
return yes_lp - no_lp
# Fallback: text match
text = choice.get("text", "").strip().upper()
return 1.0 if text.startswith("YES") else (-1.0 if text.startswith("NO") else -100.0)
except Exception as e:
sys.stderr.write(f"score_via_vllm err: {type(e).__name__}: {e}\n")
return -100.0
def build_prompt_chat(sample, t, exec_result_str=None):
"""Build selector prompt.
`exec_result_str` MUST be the actual SQL execution result preview (or error message).
Do NOT pass the gold-graded label — that leaks the correctness label into the prompt
and makes the selector trivially match the oracle.
"""
schema = sample.get("schema", "")
question = sample.get("question", "")
evidence = sample.get("evidence", "") or "None"
fixed_sql = t.get("fixed_sql") or t.get("planner_sql") or ""
if exec_result_str is None:
# Safe default at inference time: signal unknown (selector must judge from SQL alone)
exec_result_str = "(execution result not available)"
prompt = PROMPT_TEMPLATE.format(
schema=safe_truncate(schema, 3000),
question=question,
evidence=evidence,
sql=safe_truncate(fixed_sql, 800),
exec_result=safe_truncate(exec_result_str, 300),
)
return qwen_chat(prompt)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("rollout_jsonl")
parser.add_argument("label")
parser.add_argument("--selector_host", default="http://localhost:8103")
args = parser.parse_args()
n_q = 0
n_greedy = 0
n_pass_at_N = 0
n_majority = 0 # rule-based majority (for compare)
n_selector = 0 # trained selector pick
K_used = None
samples = []
with open(args.rollout_jsonl) as f:
for line in f:
line = line.strip()
if line:
samples.append(json.loads(line))
print(f"Loaded {len(samples)} samples")
for sample in samples:
traj = sample.get("trajectories", [])
if not traj:
continue
n_q += 1
if K_used is None:
K_used = len(traj)
# Greedy = first traj
if traj[0].get("is_fixed_correct"):
n_greedy += 1
# pass@N = any correct
if any(t.get("is_fixed_correct") for t in traj):
n_pass_at_N += 1
# Rule-based majority: pick most-common non-empty execution result
db_path = sample["db_path"]
gold_sql = sample["sql"]
true_exec = safe_execute(db_path, gold_sql)
if true_exec[1]:
continue # gold has error; skip
# Execute all candidates' fixed SQLs once
with ThreadPoolExecutor(max_workers=8) as exe:
exec_results = list(exe.map(
lambda t: safe_execute(db_path, t.get("fixed_sql") or t.get("planner_sql") or ""),
traj
))
# Rule-based majority
majority_picks = []
for i, (er, t) in enumerate(zip(exec_results, traj)):
if er[1]:
continue
res_str = str(er[0]).strip()
if not res_str or "(no rows)" in res_str or res_str == "[]":
continue
majority_picks.append((i, res_str))
if majority_picks:
counter = Counter(s for _, s in majority_picks)
top_res, _ = counter.most_common(1)[0]
for i, s in majority_picks:
if s == top_res:
if traj[i].get("is_fixed_correct"):
n_majority += 1
break
else:
if traj[0].get("is_fixed_correct"):
n_majority += 1
# Trained selector: score each candidate using REAL execution result (no gold-label leak).
# Re-use the exec_results computed above for rule-based majority.
scores = []
with ThreadPoolExecutor(max_workers=8) as exe:
def _make_prompt(idx, t_):
er = exec_results[idx]
if er[1]:
exec_str = f"Error: {er[0]}"
else:
rows_str = str(er[0])
if not rows_str.strip() or rows_str.strip() == "[]":
exec_str = "OK. Result rows (preview): (no rows)"
else:
exec_str = f"OK. Result rows (preview): {rows_str[:300]}"
return build_prompt_chat(sample, t_, exec_result_str=exec_str)
futs = [exe.submit(score_via_vllm, args.selector_host, _make_prompt(i, t)) for i, t in enumerate(traj)]
for f in futs:
scores.append(f.result())
best_idx = max(range(len(scores)), key=lambda i: scores[i])
if traj[best_idx].get("is_fixed_correct"):
n_selector += 1
if n_q == 0:
print(f"{args.label}: no questions evaluated")
return
print()
print(f"=== {args.label} ===")
print(f" questions evaluated: {n_q}")
print(f" K used per question: {K_used}")
print(f" greedy (1st traj): {n_greedy}/{n_q} = {100*n_greedy/n_q:.2f}%")
print(f" rule-based majority: {n_majority}/{n_q} = {100*n_majority/n_q:.2f}%")
print(f" trained selector: {n_selector}/{n_q} = {100*n_selector/n_q:.2f}%")
print(f" pass@{K_used} (oracle): {n_pass_at_N}/{n_q} = {100*n_pass_at_N/n_q:.2f}%")
print()
if __name__ == "__main__":
main()