mats-sql-bundle / scripts /run_pipeline_rollouts.py
thanhdath's picture
Upload folder using huggingface_hub
82ae30c verified
"""
Pipeline rollout driver for the 3-stage collaborative-ORPO experiment.
Three-stage pipeline:
q → PLANNER (Qwen-Coder-0.5B SFT'd) → plan + first-cut SQL
→ VALIDATOR (Qwen-Coder-0.5B SFT'd) → free-form critique (4 sections)
→ FIXER (Qwen-Coder-0.5B SFT'd) → final SQL
For each input question we sample K planner outputs with stochastic decoding,
then for each planner output we sample K_val validator outputs, and for each
(planner, validator) we sample K_fix fixer outputs. Each leaf trajectory is
graded by execution of the fixer's final SQL.
The output JSONL is consumed by build_rl_data_collaborative.py to construct
preference pairs (planner-indep / planner-collab / validator-collab / fixer).
Usage:
# Three vLLM endpoints, e.g.
# GPU 0:8100 = planner
# GPU 1:8101 = validator
# GPU 1:8102 = fixer (can co-locate validator+fixer on one GPU since both 0.5B)
python scripts/run_pipeline_rollouts.py \\
--input_file data/sft_bird_with_evidence_train_text2sql.json \\
--output_file data/rollouts/bird_train_3stage_K4.jsonl \\
--planner_host http://localhost:8100 \\
--validator_host http://localhost:8101 \\
--fixer_host http://localhost:8102 \\
--K 4 --K_val 2 --K_fix 1 \\
--temperature 0.7 --top_p 0.9 \\
--max_questions 1000
"""
import argparse
import json
import os
import re
import sys
import time
from concurrent.futures import ThreadPoolExecutor
# Bypass HTTP proxy for local vLLM endpoints
os.environ["NO_PROXY"] = "localhost,127.0.0.1"
os.environ["no_proxy"] = "localhost,127.0.0.1"
import requests
from tqdm import tqdm
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(ROOT)
sys.path.insert(0, ROOT)
from validator_data.validator import _execute_sql
from data_processing.planner import is_execution_correct
PLANNER_PROMPT_TEMPLATE = (
"{schema}\n\n"
"Question: {question}\n"
"External knowledge: {evidence}\n\n"
"Planning:"
)
# Validator prompt — must match what the validator was SFT'd to expect.
VALIDATOR_PROMPT_HEADER = (
"You are a SQL critique agent. Output FOUR critique sections "
"(<select>...</select>, <condition>...</condition>, <join>...</join>, <order>...</order>) "
"analysing the SQL query below; do NOT output any SQL.\n\n"
)
# Specialized 2-validator headers (match SFT data built in build_validator_2agents_v3.py).
VALIDATOR_SEL_HEADER = (
"You are a SQL SELECT-clause critique agent. Output ONE critique section "
"<select>...</select> analysing the SELECT clause of the SQL query below; "
"do NOT output any SQL. Use 'None' if the SELECT clause looks correct.\n\n"
)
VALIDATOR_COND_HEADER = (
"You are a SQL CONDITION critique agent. Output ONE critique section "
"<condition>...</condition> analysing the WHERE/HAVING/CASE-WHEN conditions "
"of the SQL query below; do NOT output any SQL. Use 'None' if the conditions "
"look correct.\n\n"
)
VALIDATOR_PROMPT_BODY = (
"database schema:\n{schema}\n\n"
"Question: {question}\n"
"External knowledge: {evidence}\n\n"
"Generated SQL query: {sql_query}\n\n"
"Execution response:\n{execution_response}\n\n"
)
# Fixer prompt — must match what the fixer was SFT'd to expect.
FIXER_PROMPT_HEADER = (
"You are a SQL fixer. Given the question, schema, original SQL query, "
"execution response, and the validator's critique below, output ONLY the corrected "
"final SQL inside ```sql ... ``` markers.\n\n"
)
def qwen_chat(prompt: str) -> str:
return f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
def vllm_complete(host, model, prompt, n, temperature, top_p, max_tokens, seed=100):
payload = {
"model": model,
"prompt": prompt,
"max_tokens": max_tokens,
"n": n,
"temperature": temperature,
"top_p": top_p,
"stop": ["<|im_end|>", "<|endoftext|>"],
"seed": seed,
}
for attempt in range(3):
try:
r = requests.post(f"{host}/v1/completions", json=payload, timeout=120)
r.raise_for_status()
return [c["text"] for c in r.json()["choices"]]
except Exception as e:
if attempt == 2:
print(f"vLLM call failed: {e}", file=sys.stderr)
return []
time.sleep(1)
return []
def extract_sql_from_planner(text):
if text is None:
return ""
m = re.search(r"Final SQL query:\s*```(.+?)```", text, re.DOTALL)
if m:
s = m.group(1).strip()
else:
m = re.search(r"```(.+?)```", text, re.DOTALL)
if m:
s = m.group(1).strip()
else:
return text.strip()
if s.startswith("sql"):
s = s[3:].strip()
return s
def extract_sql_from_fixer(text):
if text is None:
return ""
m = re.search(r"```sql\s*\n?(.+?)```", text, re.DOTALL | re.IGNORECASE)
if m:
return m.group(1).strip()
m = re.search(r"```(.+?)```", text, re.DOTALL)
if m:
s = m.group(1).strip()
if s.lower().startswith("sql"):
s = s[3:].strip()
return s
return text.strip().strip("`").strip()
def parse_validator_sections(text):
sections = {"select": "", "condition": "", "join": "", "order": ""}
for tag in sections:
m = re.search(fr"<{tag}>(.*?)</{tag}>", text, re.DOTALL | re.IGNORECASE)
if m:
sections[tag] = m.group(1).strip()
return sections
def safe_execute(db_path, sql):
if not sql or sql.strip() == "":
return ("", True)
try:
return _execute_sql("./" + db_path, sql)
except Exception as e:
return (str(e), True)
def build_planner_prompt(sample):
return PLANNER_PROMPT_TEMPLATE.format(
schema=sample.get("schema_sequence") or sample.get("schema") or "",
question=sample.get("question", ""),
evidence=sample.get("evidence", "") or "None",
)
def build_validator_prompt(sample, planner_sql, exec_response):
body = VALIDATOR_PROMPT_BODY.format(
schema=sample.get("schema_sequence") or sample.get("schema") or "",
question=sample.get("question", ""),
evidence=sample.get("evidence", "") or "None",
sql_query=planner_sql,
execution_response=exec_response,
)
return VALIDATOR_PROMPT_HEADER + body
def build_validator_sel_prompt(sample, planner_sql, exec_response):
body = VALIDATOR_PROMPT_BODY.format(
schema=sample.get("schema_sequence") or sample.get("schema") or "",
question=sample.get("question", ""),
evidence=sample.get("evidence", "") or "None",
sql_query=planner_sql,
execution_response=exec_response,
)
return VALIDATOR_SEL_HEADER + body
def build_validator_cond_prompt(sample, planner_sql, exec_response):
body = VALIDATOR_PROMPT_BODY.format(
schema=sample.get("schema_sequence") or sample.get("schema") or "",
question=sample.get("question", ""),
evidence=sample.get("evidence", "") or "None",
sql_query=planner_sql,
execution_response=exec_response,
)
return VALIDATOR_COND_HEADER + body
def build_fixer_prompt(sample, planner_sql, exec_response, critique):
body = (
f"database schema:\n{sample.get('schema_sequence') or sample.get('schema') or ''}\n\n"
f"Question: {sample.get('question', '')}\n"
f"External knowledge: {sample.get('evidence','') or 'None'}\n\n"
f"Generated SQL query: {planner_sql}\n\n"
f"Execution response:\n{exec_response}\n\n"
)
return FIXER_PROMPT_HEADER + body + "\n\nValidator critique:\n" + critique + "\n\nFinal SQL:"
def process_sample(sample, args):
db_path = sample["db_path"]
gold_sql = sample["sql"]
true_exec = safe_execute(db_path, gold_sql)
if true_exec[1]:
return None # gold has error; skip
# Stage 1: planner — K samples (optionally split across temperatures via --mixed_temp)
planner_prompt_raw = build_planner_prompt(sample)
planner_chat = qwen_chat(planner_prompt_raw)
if getattr(args, "mixed_temp", "").strip():
temps = [float(x) for x in args.mixed_temp.split(",") if x.strip()]
# distribute args.K samples across temperatures
per_temp = max(1, args.K // len(temps))
remainder = args.K - per_temp * len(temps)
planner_outputs = []
for i, t in enumerate(temps):
n_t = per_temp + (1 if i < remainder else 0)
if n_t <= 0:
continue
outs = vllm_complete(
args.planner_host, "planner", planner_chat,
n=n_t, temperature=t, top_p=args.top_p,
max_tokens=args.max_planner_tokens, seed=args.seed + i * 31,
)
planner_outputs.extend(outs)
else:
planner_outputs = vllm_complete(
args.planner_host, "planner", planner_chat,
n=args.K, temperature=args.temperature, top_p=args.top_p,
max_tokens=args.max_planner_tokens, seed=args.seed,
)
if not planner_outputs:
return None
trajectories = []
for plan in planner_outputs:
planner_sql = extract_sql_from_planner(plan)
if not planner_sql:
continue
planner_exec = safe_execute(db_path, planner_sql)
exec_response = (
f"Error: {planner_exec[0]}" if planner_exec[1]
else f"OK. Result rows (preview): {str(planner_exec[0])[:300]}"
)
# Stage 2: validator — K_val samples per planner output (or skip if validator_host empty)
# Three modes:
# (a) Two specialized validators: --validator_sel_host + --validator_cond_host (per-paper design)
# (b) Legacy unified validator: --validator_host (single 4-section model)
# (c) None: insert all-OK placeholder
v_sel = getattr(args, "validator_sel_host", "") or ""
v_cond = getattr(args, "validator_cond_host", "") or ""
if v_sel and v_sel.lower() != "none" and v_cond and v_cond.lower() != "none":
sel_prompt = build_validator_sel_prompt(sample, planner_sql, exec_response)
cond_prompt = build_validator_cond_prompt(sample, planner_sql, exec_response)
sel_outputs = vllm_complete(
v_sel, "validator_sel", qwen_chat(sel_prompt),
n=args.K_val, temperature=args.temperature, top_p=args.top_p,
max_tokens=args.max_validator_tokens, seed=args.seed,
)
cond_outputs = vllm_complete(
v_cond, "validator_cond", qwen_chat(cond_prompt),
n=args.K_val, temperature=args.temperature, top_p=args.top_p,
max_tokens=args.max_validator_tokens, seed=args.seed + 1,
)
# Pair selection+condition outputs index-wise, padding with "None" if one ran short.
validator_outputs = []
for i in range(args.K_val):
s_out = sel_outputs[i] if i < len(sel_outputs) else "<select>\nSELECT.\nNone\n</select>"
c_out = cond_outputs[i] if i < len(cond_outputs) else "<condition>\nCONDITION.\nNone\n</condition>"
combined = (
s_out.strip() + "\n\n" +
c_out.strip() + "\n\n" +
"<join>\nJOIN.\nNone\n</join>\n\n"
"<order>\nORDER BY.\nNone\n</order>"
)
validator_outputs.append(combined)
validator_prompt_raw = sel_prompt + "\n\n[+]\n\n" + cond_prompt # for logging
elif args.validator_host and args.validator_host.lower() != "none":
validator_prompt_raw = build_validator_prompt(sample, planner_sql, exec_response)
validator_chat = qwen_chat(validator_prompt_raw)
validator_outputs = vllm_complete(
args.validator_host, "validator", validator_chat,
n=args.K_val, temperature=args.temperature, top_p=args.top_p,
max_tokens=args.max_validator_tokens, seed=args.seed,
)
else:
validator_prompt_raw = build_validator_prompt(sample, planner_sql, exec_response)
validator_outputs = [
"<select>\nSELECT.\nNone\n</select>\n\n"
"<condition>\nCONDITION.\nNone\n</condition>\n\n"
"<join>\nJOIN.\nNone\n</join>\n\n"
"<order>\nORDER BY.\nNone\n</order>"
] * args.K_val
for val_out in validator_outputs:
sections = parse_validator_sections(val_out)
critique_text = val_out.strip() # full critique as the validator's "completion"
# Stage 3: fixer — K_fix samples (skip when fixer_host=none → keep planner_sql)
fixer_prompt_raw = build_fixer_prompt(sample, planner_sql, exec_response, critique_text)
if args.fixer_host and args.fixer_host.lower() != "none":
fixer_chat = qwen_chat(fixer_prompt_raw)
fixer_outputs = vllm_complete(
args.fixer_host, "fixer", fixer_chat,
n=args.K_fix, temperature=args.temperature, top_p=args.top_p,
max_tokens=args.max_fixer_tokens, seed=args.seed,
)
else:
fixer_outputs = [""] * args.K_fix # empty → fixed_sql will fallback to planner_sql
for fix_out in fixer_outputs:
fixed_sql = extract_sql_from_fixer(fix_out) or planner_sql
trajectories.append({
"planner_prompt": planner_prompt_raw,
"planner_output": plan,
"planner_sql": planner_sql,
"planner_exec_ok": not planner_exec[1],
"validator_prompt": validator_prompt_raw,
"validator_output": critique_text,
"fb_select": sections["select"],
"fb_condition": sections["condition"],
"fb_join": sections["join"],
"fb_order": sections["order"],
"fixer_prompt": fixer_prompt_raw,
"fixer_output": fix_out,
"fixed_sql": fixed_sql,
})
if not trajectories:
return None
# Grade each trajectory
with ThreadPoolExecutor(max_workers=8) as exe:
planner_execs = list(exe.map(
lambda t: safe_execute(db_path, t["planner_sql"]), trajectories
))
fixed_execs = list(exe.map(
lambda t: safe_execute(db_path, t["fixed_sql"]), trajectories
))
for i, t in enumerate(trajectories):
pe, fe = planner_execs[i], fixed_execs[i]
t["is_planner_correct"] = (
(not pe[1]) and is_execution_correct(true_exec[0], pe[0])
)
t["is_fixed_correct"] = (
(not fe[1]) and is_execution_correct(true_exec[0], fe[0])
)
return {
"question": sample["question"],
"evidence": sample.get("evidence", ""),
"db_path": db_path,
"db_id": sample.get("db_id", ""),
"schema": sample.get("schema_sequence") or sample.get("schema") or "",
"sql": gold_sql,
"trajectories": trajectories,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", required=True)
parser.add_argument("--output_file", required=True)
parser.add_argument("--planner_host", default="http://localhost:8100")
parser.add_argument("--validator_host", default="http://localhost:8101",
help="Single unified validator host (legacy 4-section). "
"Ignored when --validator_sel_host AND --validator_cond_host are set.")
parser.add_argument("--validator_sel_host", default="",
help="Specialized SELECT-clause validator host (paper v_s). "
"When both this and --validator_cond_host are set, the unified validator is bypassed.")
parser.add_argument("--validator_cond_host", default="",
help="Specialized CONDITION validator host (paper v_c).")
parser.add_argument("--fixer_host", default="http://localhost:8102")
parser.add_argument("--K", type=int, default=4, help="planner samples per question")
parser.add_argument("--K_val", type=int, default=2, help="validator samples per planner output")
parser.add_argument("--K_fix", type=int, default=1, help="fixer samples per (planner, validator)")
parser.add_argument("--temperature", type=float, default=0.7)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--seed", type=int, default=100)
parser.add_argument("--max_planner_tokens", type=int, default=1024)
parser.add_argument("--max_validator_tokens", type=int, default=512)
parser.add_argument("--max_fixer_tokens", type=int, default=512)
parser.add_argument("--max_questions", type=int, default=-1)
parser.add_argument("--n_threads", type=int, default=8)
parser.add_argument("--mixed_temp", type=str, default="",
help="Comma-separated temperatures to mix across K planner samples (e.g. '0.5,0.7,0.9,1.1'). "
"If set, args.temperature is ignored for the planner stage. Used to boost pass@K diversity.")
args = parser.parse_args()
print(f"Loading {args.input_file}...")
with open(args.input_file) as f:
data = json.load(f)
if args.max_questions > 0:
data = data[: args.max_questions]
print(f" {len(data)} questions")
os.makedirs(os.path.dirname(args.output_file), exist_ok=True)
seen = set()
if os.path.exists(args.output_file):
with open(args.output_file) as f:
for line in f:
try:
d = json.loads(line)
seen.add((d["question"], d.get("db_id", "")))
except Exception:
pass
print(f" resuming: skip {len(seen)} already-processed")
todo = [s for s in data if (s["question"], s.get("db_id", "")) not in seen]
print(f" to process: {len(todo)}")
fout = open(args.output_file, "a")
n_ok = 0
n_winloss = 0
with ThreadPoolExecutor(max_workers=args.n_threads) as pool:
futures = {pool.submit(process_sample, s, args): s for s in todo}
pbar = tqdm(total=len(todo), desc="rollouts")
for fut in futures:
try:
result = fut.result()
except Exception as e:
print(f"sample failed: {e}", file=sys.stderr)
pbar.update(1)
continue
if result is None:
pbar.update(1)
continue
n_ok += 1
wins = sum(1 for t in result["trajectories"] if t["is_fixed_correct"])
losses = sum(1 for t in result["trajectories"] if not t["is_fixed_correct"])
if wins > 0 and losses > 0:
n_winloss += 1
fout.write(json.dumps(result) + "\n")
fout.flush()
pbar.update(1)
pbar.set_postfix(ok=n_ok, winloss=n_winloss)
pbar.close()
fout.close()
print(f"Done. processed={n_ok}, with_winloss={n_winloss} ({100*n_winloss/max(n_ok,1):.1f}%)")
if __name__ == "__main__":
main()