thanhdath commited on
Commit
82ae30c
·
verified ·
1 Parent(s): c268baf

Upload folder using huggingface_hub

Browse files
scripts/build_fixer_replanner_iter2.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Build fixer ORPO iter-2 're-planner' dataset.
2
+
3
+ Insight: the current fixer is too conservative — it changes planner_sql only 1.4%
4
+ of the time and rescues 0/533 hard questions on BIRD-dev. The fixer architecture
5
+ needs to be re-framed: instead of 'apply small critique-driven edit', train it as
6
+ a re-planner that produces a COMPLETE correct alternative when given a failed
7
+ attempt.
8
+
9
+ Data source: K=4 BIRD-train rollouts. For each question, find a (wrong-trajectory,
10
+ correct-trajectory) pair within the K=4 samples. Use:
11
+ - chosen = correct trajectory's planner_sql (the alternative that works)
12
+ - rejected = wrong trajectory's planner_sql or the fixer's mistaken output
13
+ - prompt = fixer's standard prompt with the wrong trajectory as the input
14
+
15
+ Output: data/llm_alignment/scaleup_iter2_v3/hf_fixer_replanner
16
+ """
17
+ import json
18
+ import os
19
+ import random
20
+ import re
21
+ from datasets import Dataset, DatasetDict
22
+
23
+ OUT_DIR = "/home/datht/mats-sql-tist/data/llm_alignment/scaleup_iter2_v3/hf_fixer_replanner"
24
+
25
+ SRC_PATHS = [
26
+ "/home/datht/mats-sql-tist/data/rollouts/bird_train_3stage_K4.jsonl",
27
+ "/home/datht/mats-sql-tist/data/rollouts/scaleup_bird_train_3stage_K4.jsonl",
28
+ "/home/datht/mats-sql-tist/data/rollouts/iter2_bird_train_3stage_K8.jsonl",
29
+ "/home/datht/mats-sql-tist/data/rollouts/scaleup_bird_train_2stage_K4.jsonl",
30
+ ]
31
+
32
+
33
+ def normalize_sql(sql):
34
+ return re.sub(r"\s+", " ", sql or "").lower().strip()
35
+
36
+
37
+ def main():
38
+ rng = random.Random(42)
39
+ pairs = []
40
+ seen_keys = set() # (question_hash, wrong_sql_hash) → dedup
41
+
42
+ for p in SRC_PATHS:
43
+ if not os.path.exists(p):
44
+ continue
45
+ with open(p) as f:
46
+ for line in f:
47
+ s = json.loads(line)
48
+ traj = s.get("trajectories", [])
49
+ if len(traj) < 2:
50
+ continue
51
+ correct_trajs = [t for t in traj if t.get("is_planner_correct")]
52
+ wrong_trajs = [t for t in traj if not t.get("is_planner_correct")]
53
+ if not correct_trajs or not wrong_trajs:
54
+ continue
55
+ # Build (wrong → correct) pairs within the K samples
56
+ for wt in wrong_trajs:
57
+ wsql = (wt.get("planner_sql") or "").strip()
58
+ if not wsql:
59
+ continue
60
+ # Pick the shortest correct planner_sql as the "preferred" alternative
61
+ correct_trajs_sorted = sorted(correct_trajs, key=lambda t: len(t.get("planner_sql") or ""))
62
+ csql = (correct_trajs_sorted[0].get("planner_sql") or "").strip()
63
+ if not csql or normalize_sql(csql) == normalize_sql(wsql):
64
+ continue
65
+ fixer_prompt = (wt.get("fixer_prompt") or "").strip()
66
+ if not fixer_prompt:
67
+ continue
68
+ key = (hash(s.get("question", "")), hash(normalize_sql(wsql)))
69
+ if key in seen_keys:
70
+ continue
71
+ seen_keys.add(key)
72
+ chosen_text = f"```sql\n{csql}\n```"
73
+ rejected_text = f"```sql\n{wsql}\n```"
74
+ pairs.append({
75
+ "prompt": fixer_prompt,
76
+ "chosen": chosen_text,
77
+ "rejected": rejected_text,
78
+ "db_path": s.get("db_path", ""),
79
+ "question": s.get("question", ""),
80
+ "db_id": s.get("db_id", ""),
81
+ })
82
+
83
+ rng.shuffle(pairs)
84
+
85
+ n_test = max(40, len(pairs) // 30)
86
+ test = pairs[:n_test]
87
+ train = pairs[n_test:]
88
+
89
+ dd = DatasetDict({
90
+ "train_dpo": Dataset.from_list(train),
91
+ "test_dpo": Dataset.from_list(test),
92
+ })
93
+ dd.save_to_disk(OUT_DIR)
94
+
95
+ print(f"=== Fixer ORPO iter-2 RE-PLANNER dataset ===")
96
+ print(f" total pairs: {len(pairs)}")
97
+ print(f" train: {len(train)}, test: {len(test)}")
98
+ print(f" Saved to {OUT_DIR}")
99
+
100
+
101
+ if __name__ == "__main__":
102
+ main()
scripts/build_validator_2agents_v3.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Split v3 unified validator data into 2 specialized SFT datasets:
2
+ - Validator Selection (v_s): critique only the SELECT clause
3
+ - Validator Condition (v_c): critique only the WHERE/HAVING/CASE conditions
4
+
5
+ Per the paper (approach.tex §Combined Validator), the multi-agent design has
6
+ 2 specialized validators, not one unified validator. This script extracts the
7
+ <select>...</select> and <condition>...</condition> sections from v3 unified
8
+ completions and emits 2 SFT datasets with section-specific prompts.
9
+
10
+ Outputs:
11
+ - data/multi-agents/fixed/sft-validator-selection-v3
12
+ - data/multi-agents/fixed/sft-validator-condition-v3
13
+ """
14
+ import re
15
+ from datasets import load_from_disk, Dataset, DatasetDict
16
+
17
+
18
+ SEL_INSTR = "You are a SQL SELECT-clause critique agent. Output ONE critique section <select>...</select> analysing the SELECT clause of the SQL query below; do NOT output any SQL. Use 'None' if the SELECT clause looks correct."
19
+ COND_INSTR = "You are a SQL CONDITION critique agent. Output ONE critique section <condition>...</condition> analysing the WHERE/HAVING/CASE-WHEN conditions of the SQL query below; do NOT output any SQL. Use 'None' if the conditions look correct."
20
+
21
+
22
+ SEC_RE = {
23
+ "select": re.compile(r"<select>(.*?)</select>", re.DOTALL),
24
+ "condition": re.compile(r"<condition>(.*?)</condition>", re.DOTALL),
25
+ }
26
+
27
+
28
+ def replace_header_block(prompt_unified, new_header_line):
29
+ """Replace the leading 'You are a SQL critique agent...' line with a section-specific one."""
30
+ # The unified prompts begin with: "You are a SQL critique agent. Output FOUR critique sections (...). do NOT output any SQL.\n\n..."
31
+ # Strip everything before the first blank line; keep the rest (schema + question + sql).
32
+ # Use a safe split on the first \n\n.
33
+ parts = prompt_unified.split("\n\n", 1)
34
+ rest = parts[1] if len(parts) > 1 else parts[0]
35
+ return new_header_line + "\n\n" + rest
36
+
37
+
38
+ def main():
39
+ v3 = load_from_disk("/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-diverse-v3")
40
+ sel_train, sel_test = [], []
41
+ cond_train, cond_test = [], []
42
+
43
+ for split, train_list, test_list in [("train", sel_train, cond_train), ("test", sel_test, cond_test)]:
44
+ target_sel = train_list
45
+ target_cond = test_list # placeholder; will reassign below
46
+ # redo:
47
+ sel_train, sel_test = [], []
48
+ cond_train, cond_test = [], []
49
+
50
+ for split_name, sel_out, cond_out in [("train", sel_train, cond_train), ("test", sel_test, cond_test)]:
51
+ ds = v3[split_name]
52
+ for ex in ds:
53
+ prompt = ex["prompt"]
54
+ completion = ex["completion"]
55
+ sel_match = SEC_RE["select"].search(completion)
56
+ cond_match = SEC_RE["condition"].search(completion)
57
+ if not sel_match or not cond_match:
58
+ continue
59
+ sel_body = sel_match.group(0).strip() # full <select>...</select>
60
+ cond_body = cond_match.group(0).strip()
61
+
62
+ # Build section-specific prompt
63
+ sel_prompt = replace_header_block(prompt, SEL_INSTR)
64
+ cond_prompt = replace_header_block(prompt, COND_INSTR)
65
+
66
+ # NOTE: SFT trainer in alignment-handbook reads `messages` column via dict access
67
+ # (chat_template uses messages['prompt'] / messages['completion']), so store as dict.
68
+ sel_out.append({
69
+ "prompt": sel_prompt,
70
+ "completion": sel_body,
71
+ "messages": {"prompt": sel_prompt, "completion": sel_body},
72
+ })
73
+ cond_out.append({
74
+ "prompt": cond_prompt,
75
+ "completion": cond_body,
76
+ "messages": {"prompt": cond_prompt, "completion": cond_body},
77
+ })
78
+
79
+ sel_dd = DatasetDict({
80
+ "train": Dataset.from_list(sel_train),
81
+ "test": Dataset.from_list(sel_test),
82
+ })
83
+ cond_dd = DatasetDict({
84
+ "train": Dataset.from_list(cond_train),
85
+ "test": Dataset.from_list(cond_test),
86
+ })
87
+
88
+ sel_dir = "/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-selection-v3"
89
+ cond_dir = "/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-condition-v3"
90
+ sel_dd.save_to_disk(sel_dir)
91
+ cond_dd.save_to_disk(cond_dir)
92
+
93
+ # Distribution
94
+ def stats(rows, key):
95
+ n_none = sum(1 for r in rows if r["completion"].strip().lower().endswith("none") or "None\n</" in r["completion"] or "No issues" in r["completion"])
96
+ return f"{len(rows)} total, {n_none} all-OK ({100*n_none/max(len(rows),1):.1f}%)"
97
+
98
+ print(f"=== Validator Selection (v_s) ===")
99
+ print(f" train: {stats(sel_train, 'sel')}")
100
+ print(f" test: {stats(sel_test, 'sel')}")
101
+ print(f" Saved to {sel_dir}")
102
+ print(f"\n=== Validator Condition (v_c) ===")
103
+ print(f" train: {stats(cond_train, 'cond')}")
104
+ print(f" test: {stats(cond_test, 'cond')}")
105
+ print(f" Saved to {cond_dir}")
106
+
107
+
108
+ if __name__ == "__main__":
109
+ main()
scripts/build_validator_sft_v3_balanced.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Build v3 validator SFT data with balanced all-OK + critique rows.
2
+
3
+ v2 had 8.1% all-OK rows → validator hallucinates critiques at inference.
4
+ v3 supplements v2 with ~5000 all-OK rows mined from real planner_correct
5
+ trajectories on BIRD-TRAIN, so the validator learns to stay silent when
6
+ the planner SQL is already correct.
7
+
8
+ Output: data/multi-agents/fixed/sft-validator-diverse-v3
9
+ """
10
+ import json
11
+ import random
12
+ from datasets import load_from_disk, Dataset, DatasetDict
13
+
14
+ OK_TEMPLATES = [
15
+ """<select>
16
+ SELECT.
17
+ No issues with SELECT.
18
+ </select>
19
+
20
+ <condition>
21
+ CONDITION.
22
+ No issues with WHERE/HAVING.
23
+ </condition>
24
+
25
+ <join>
26
+ JOIN.
27
+ Tables and join keys look correct.
28
+ </join>
29
+
30
+ <order>
31
+ ORDER BY.
32
+ None
33
+ </order>""",
34
+ """<select>
35
+ SELECT.
36
+ The SELECT clause is correct.
37
+ </select>
38
+
39
+ <condition>
40
+ CONDITION.
41
+ Filter conditions look correct.
42
+ </condition>
43
+
44
+ <join>
45
+ JOIN.
46
+ No issues with JOIN.
47
+ </join>
48
+
49
+ <order>
50
+ ORDER BY.
51
+ None
52
+ </order>""",
53
+ """<select>
54
+ SELECT.
55
+ None
56
+ </select>
57
+
58
+ <condition>
59
+ CONDITION.
60
+ None
61
+ </condition>
62
+
63
+ <join>
64
+ JOIN.
65
+ None
66
+ </join>
67
+
68
+ <order>
69
+ ORDER BY.
70
+ None
71
+ </order>""",
72
+ """<select>
73
+ SELECT.
74
+ The projection list matches the question.
75
+ </select>
76
+
77
+ <condition>
78
+ CONDITION.
79
+ WHERE/HAVING clauses are correct.
80
+ </condition>
81
+
82
+ <join>
83
+ JOIN.
84
+ Tables and join keys are correct.
85
+ </join>
86
+
87
+ <order>
88
+ ORDER BY.
89
+ The ordering is correct.
90
+ </order>""",
91
+ ]
92
+
93
+
94
+ def main():
95
+ rng = random.Random(42)
96
+
97
+ # Load existing v2 (force plain-dict copy; drop "messages" because v2 stores it as a non-list dict that breaks arrow)
98
+ v2 = load_from_disk("/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-diverse-v2")
99
+ v2_train = [{"prompt": r["prompt"], "completion": r["completion"]} for r in v2["train"]]
100
+ v2_test = [{"prompt": r["prompt"], "completion": r["completion"]} for r in v2["test"]]
101
+
102
+ # Mine all-OK rows from K=4 train rollouts (planner_correct trajectories)
103
+ src = "/home/datht/mats-sql-tist/data/rollouts/scaleup_bird_train_2stage_K4.jsonl"
104
+ ok_rows = []
105
+ seen_prompts = set()
106
+ with open(src) as f:
107
+ for line in f:
108
+ s = json.loads(line)
109
+ for t in s.get("trajectories", []):
110
+ if not t.get("is_planner_correct"):
111
+ continue
112
+ vp = (t.get("validator_prompt") or "").strip()
113
+ if not vp:
114
+ # rebuild from planner_prompt
115
+ pp = (t.get("planner_prompt") or "").strip()
116
+ psql = (t.get("planner_sql") or "").strip()
117
+ if not pp or not psql:
118
+ continue
119
+ vp = pp + "\n\nSQL query:\n" + psql
120
+ # dedup on full vp
121
+ if vp in seen_prompts:
122
+ continue
123
+ seen_prompts.add(vp)
124
+ ok_rows.append(vp)
125
+
126
+ rng.shuffle(ok_rows)
127
+
128
+ # Aim: balance such that all-OK ≈ critique. v2 has ~5208 critique rows.
129
+ target_ok = 5200
130
+ ok_rows = ok_rows[:target_ok]
131
+
132
+ # Add additional sft-style critique training: use v2 + new all-OK
133
+ new_rows = []
134
+ for vp in ok_rows:
135
+ completion = rng.choice(OK_TEMPLATES)
136
+ new_rows.append({"prompt": vp, "completion": completion})
137
+
138
+ # Test split: keep v2 test + small mined sample
139
+ test_ok = ok_rows[target_ok:target_ok + 100] if len(ok_rows) > target_ok else []
140
+ new_test_rows = []
141
+ for vp in test_ok:
142
+ completion = rng.choice(OK_TEMPLATES)
143
+ new_test_rows.append({"prompt": vp, "completion": completion})
144
+
145
+ # Combine
146
+ train_combined = v2_train + new_rows
147
+ test_combined = v2_test + new_test_rows
148
+ rng.shuffle(train_combined)
149
+
150
+ dd = DatasetDict({
151
+ "train": Dataset.from_list(train_combined),
152
+ "test": Dataset.from_list(test_combined),
153
+ })
154
+
155
+ out_dir = "/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-diverse-v3"
156
+ dd.save_to_disk(out_dir)
157
+
158
+ # Stats
159
+ n_train = len(train_combined)
160
+ n_train_ok = sum(1 for r in train_combined if "No issues" in r["completion"] or r["completion"].count("None") >= 3)
161
+ print(f"v3 built:")
162
+ print(f" train: {n_train} ({n_train_ok} all-OK, {n_train - n_train_ok} critique)")
163
+ print(f" test: {len(test_combined)}")
164
+ print(f" Saved to {out_dir}")
165
+
166
+
167
+ if __name__ == "__main__":
168
+ main()
scripts/compute_bestofn_metrics.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Compute Best-of-N metrics from a 3-stage pipeline rollout JSONL:
3
+ - greedy: EX of the first trajectory (K=1 baseline)
4
+ - pass@N: EX if ANY of the N trajectories is correct (oracle upper bound)
5
+ - majority: EX of the SQL whose execution result is the most common non-empty
6
+ result among executable trajectories (rule-based selector,
7
+ no extra trained selection agent needed).
8
+
9
+ Usage:
10
+ python scripts/compute_bestofn_metrics.py <rollout.jsonl> <label>
11
+ """
12
+ import json
13
+ import sys
14
+ import os
15
+ from collections import Counter
16
+ from concurrent.futures import ThreadPoolExecutor
17
+
18
+ ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
+ os.chdir(ROOT)
20
+ sys.path.insert(0, ROOT)
21
+ from validator_data.validator import _execute_sql
22
+ from data_processing.planner import is_execution_correct
23
+
24
+
25
+ def safe_execute(db_path, sql):
26
+ if not sql or sql.strip() == "":
27
+ return ("", True)
28
+ try:
29
+ return _execute_sql("./" + db_path, sql)
30
+ except Exception as e:
31
+ return (str(e), True)
32
+
33
+
34
+ def hash_result(result):
35
+ """Hash the execution result for majority voting (handles DataFrame, list, str, None)."""
36
+ if result is None:
37
+ return None
38
+ try:
39
+ # DataFrames need to be converted via .values to be hashable
40
+ import pandas as pd
41
+ if isinstance(result, pd.DataFrame):
42
+ return str(tuple(map(tuple, result.values.tolist())))
43
+ except Exception:
44
+ pass
45
+ return str(result)
46
+
47
+
48
+ def is_empty_result(result):
49
+ """Check if execution result is effectively empty."""
50
+ if result is None:
51
+ return True
52
+ try:
53
+ import pandas as pd
54
+ if isinstance(result, pd.DataFrame):
55
+ return result.empty
56
+ except Exception:
57
+ pass
58
+ s = str(result).strip()
59
+ return s == "" or "(no rows)" in s or s == "[]"
60
+
61
+
62
+ def select_majority(traj_list, db_path):
63
+ """
64
+ Rule-based selector: among executable trajectories with NON-empty result,
65
+ pick the SQL whose result hash is most common. Return (selected_sql, selected_idx).
66
+ Tie-breaking: first by frequency, then by trajectory order.
67
+ """
68
+ candidates = [] # (idx, sql, result_hash, is_empty)
69
+ for i, t in enumerate(traj_list):
70
+ sql = t.get("fixed_sql") or t.get("planner_sql")
71
+ if not sql or sql.strip() == "":
72
+ continue
73
+ exec_result, has_err = safe_execute(db_path, sql)
74
+ if has_err:
75
+ continue
76
+ empty = is_empty_result(exec_result)
77
+ candidates.append((i, sql, hash_result(exec_result), empty))
78
+
79
+ if not candidates:
80
+ # Nothing executable; fall back to first trajectory
81
+ return traj_list[0].get("fixed_sql") or traj_list[0].get("planner_sql"), 0
82
+
83
+ # Prefer non-empty results
84
+ non_empty = [c for c in candidates if not c[3]]
85
+ pool = non_empty if non_empty else candidates
86
+
87
+ # Majority vote on result hash
88
+ counter = Counter(c[2] for c in pool)
89
+ best_hash, _ = counter.most_common(1)[0]
90
+ # Pick the first trajectory with this hash
91
+ for i, sql, h, _ in pool:
92
+ if h == best_hash:
93
+ return sql, i
94
+ return pool[0][1], pool[0][0]
95
+
96
+
97
+ def main():
98
+ if len(sys.argv) != 3:
99
+ print("Usage: compute_bestofn_metrics.py <rollout.jsonl> <label>")
100
+ sys.exit(1)
101
+
102
+ rollout_path, label = sys.argv[1], sys.argv[2]
103
+
104
+ n_q = 0
105
+ n_greedy_correct = 0
106
+ n_pass_at_N = 0
107
+ n_majority_correct = 0
108
+ K_used = None
109
+
110
+ with open(rollout_path) as f:
111
+ for line in f:
112
+ line = line.strip()
113
+ if not line:
114
+ continue
115
+ sample = json.loads(line)
116
+ traj = sample.get("trajectories", [])
117
+ if not traj:
118
+ continue
119
+ n_q += 1
120
+ if K_used is None:
121
+ K_used = len(traj)
122
+
123
+ # Greedy = first trajectory's correctness
124
+ if traj[0].get("is_fixed_correct"):
125
+ n_greedy_correct += 1
126
+
127
+ # pass@N = any trajectory correct
128
+ if any(t.get("is_fixed_correct") for t in traj):
129
+ n_pass_at_N += 1
130
+
131
+ # Majority-vote selector
132
+ db_path = sample["db_path"]
133
+ gold_sql = sample["sql"]
134
+ gold_exec = safe_execute(db_path, gold_sql)
135
+ if gold_exec[1]:
136
+ continue # skip if gold has error
137
+
138
+ selected_sql, _idx = select_majority(traj, db_path)
139
+ sel_exec = safe_execute(db_path, selected_sql)
140
+ if not sel_exec[1] and is_execution_correct(gold_exec[0], sel_exec[0]):
141
+ n_majority_correct += 1
142
+
143
+ if n_q == 0:
144
+ print(f"{label}: no questions evaluated")
145
+ return
146
+
147
+ print()
148
+ print(f"=== {label} ===")
149
+ print(f" questions evaluated: {n_q}")
150
+ print(f" K used per question: {K_used}")
151
+ print(f" greedy (1st traj): {n_greedy_correct}/{n_q} = {100*n_greedy_correct/n_q:.2f}%")
152
+ print(f" selector-majority: {n_majority_correct}/{n_q} = {100*n_majority_correct/n_q:.2f}%")
153
+ print(f" pass@{K_used} (oracle): {n_pass_at_N}/{n_q} = {100*n_pass_at_N/n_q:.2f}%")
154
+ print()
155
+
156
+
157
+ if __name__ == "__main__":
158
+ main()
scripts/compute_bestofn_with_selector.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Compute Best-of-N metrics with a TRAINED selector (binary YES/NO classifier).
3
+
4
+ For each question, run the selector on each of N candidates and pick the one
5
+ with the highest YES probability. Also compute greedy / pass@N for comparison.
6
+
7
+ Usage:
8
+ python scripts/compute_bestofn_with_selector.py <rollout.jsonl> <selector_ckpt> <label> [--selector_host URL]
9
+
10
+ If --selector_host given (a vLLM endpoint), use it. Otherwise load model in-process.
11
+ """
12
+ import argparse
13
+ import json
14
+ import os
15
+ import re
16
+ import sys
17
+ from collections import Counter
18
+ from concurrent.futures import ThreadPoolExecutor
19
+
20
+ # Bypass HTTP proxy for local vLLM endpoints
21
+ os.environ["NO_PROXY"] = "localhost,127.0.0.1"
22
+ os.environ["no_proxy"] = "localhost,127.0.0.1"
23
+
24
+ ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
25
+ os.chdir(ROOT)
26
+ sys.path.insert(0, ROOT)
27
+
28
+ from validator_data.validator import _execute_sql
29
+ from data_processing.planner import is_execution_correct
30
+ import requests
31
+
32
+
33
+ PROMPT_TEMPLATE = (
34
+ "You are a SQL correctness judge.\n"
35
+ "Schema:\n{schema}\n\n"
36
+ "Question: {question}\n"
37
+ "External knowledge: {evidence}\n\n"
38
+ "Candidate SQL:\n{sql}\n\n"
39
+ "Execution result:\n{exec_result}\n\n"
40
+ "Is this SQL correct for the question? Answer YES or NO."
41
+ )
42
+
43
+
44
+ def qwen_chat(prompt: str) -> str:
45
+ return f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
46
+
47
+
48
+ def safe_truncate(s, n=400):
49
+ if s is None:
50
+ return "(empty)"
51
+ s = str(s)
52
+ return s if len(s) <= n else s[:n] + "..."
53
+
54
+
55
+ def safe_execute(db_path, sql):
56
+ if not sql or sql.strip() == "":
57
+ return ("", True)
58
+ try:
59
+ return _execute_sql("./" + db_path, sql)
60
+ except Exception:
61
+ return ("", True)
62
+
63
+
64
+ def score_via_vllm(host, prompt_chat, model_name="selector"):
65
+ """Get P(YES) − P(NO) via vLLM completions with logprobs."""
66
+ payload = {
67
+ "model": model_name,
68
+ "prompt": prompt_chat,
69
+ "max_tokens": 1,
70
+ "n": 1,
71
+ "temperature": 0.0,
72
+ "logprobs": 20,
73
+ }
74
+ try:
75
+ r = requests.post(f"{host}/v1/completions", json=payload, timeout=60)
76
+ r.raise_for_status()
77
+ choice = r.json()["choices"][0]
78
+ # Look at logprobs of top tokens; find YES vs NO
79
+ if "logprobs" in choice and choice["logprobs"]:
80
+ top = choice["logprobs"]["top_logprobs"][0]
81
+ yes_lp = max((top[k] for k in (" YES", "YES", " yes", "yes", " Yes", "Yes") if k in top), default=-100.0)
82
+ no_lp = max((top[k] for k in (" NO", "NO", " no", "no", " No", "No") if k in top), default=-100.0)
83
+ return yes_lp - no_lp
84
+ # Fallback: text match
85
+ text = choice.get("text", "").strip().upper()
86
+ return 1.0 if text.startswith("YES") else (-1.0 if text.startswith("NO") else -100.0)
87
+ except Exception as e:
88
+ sys.stderr.write(f"score_via_vllm err: {type(e).__name__}: {e}\n")
89
+ return -100.0
90
+
91
+
92
+ def build_prompt_chat(sample, t, exec_result_str=None):
93
+ """Build selector prompt.
94
+
95
+ `exec_result_str` MUST be the actual SQL execution result preview (or error message).
96
+ Do NOT pass the gold-graded label — that leaks the correctness label into the prompt
97
+ and makes the selector trivially match the oracle.
98
+ """
99
+ schema = sample.get("schema", "")
100
+ question = sample.get("question", "")
101
+ evidence = sample.get("evidence", "") or "None"
102
+ fixed_sql = t.get("fixed_sql") or t.get("planner_sql") or ""
103
+ if exec_result_str is None:
104
+ # Safe default at inference time: signal unknown (selector must judge from SQL alone)
105
+ exec_result_str = "(execution result not available)"
106
+ prompt = PROMPT_TEMPLATE.format(
107
+ schema=safe_truncate(schema, 3000),
108
+ question=question,
109
+ evidence=evidence,
110
+ sql=safe_truncate(fixed_sql, 800),
111
+ exec_result=safe_truncate(exec_result_str, 300),
112
+ )
113
+ return qwen_chat(prompt)
114
+
115
+
116
+ def main():
117
+ parser = argparse.ArgumentParser()
118
+ parser.add_argument("rollout_jsonl")
119
+ parser.add_argument("label")
120
+ parser.add_argument("--selector_host", default="http://localhost:8103")
121
+ args = parser.parse_args()
122
+
123
+ n_q = 0
124
+ n_greedy = 0
125
+ n_pass_at_N = 0
126
+ n_majority = 0 # rule-based majority (for compare)
127
+ n_selector = 0 # trained selector pick
128
+ K_used = None
129
+
130
+ samples = []
131
+ with open(args.rollout_jsonl) as f:
132
+ for line in f:
133
+ line = line.strip()
134
+ if line:
135
+ samples.append(json.loads(line))
136
+
137
+ print(f"Loaded {len(samples)} samples")
138
+
139
+ for sample in samples:
140
+ traj = sample.get("trajectories", [])
141
+ if not traj:
142
+ continue
143
+ n_q += 1
144
+ if K_used is None:
145
+ K_used = len(traj)
146
+
147
+ # Greedy = first traj
148
+ if traj[0].get("is_fixed_correct"):
149
+ n_greedy += 1
150
+
151
+ # pass@N = any correct
152
+ if any(t.get("is_fixed_correct") for t in traj):
153
+ n_pass_at_N += 1
154
+
155
+ # Rule-based majority: pick most-common non-empty execution result
156
+ db_path = sample["db_path"]
157
+ gold_sql = sample["sql"]
158
+ true_exec = safe_execute(db_path, gold_sql)
159
+ if true_exec[1]:
160
+ continue # gold has error; skip
161
+
162
+ # Execute all candidates' fixed SQLs once
163
+ with ThreadPoolExecutor(max_workers=8) as exe:
164
+ exec_results = list(exe.map(
165
+ lambda t: safe_execute(db_path, t.get("fixed_sql") or t.get("planner_sql") or ""),
166
+ traj
167
+ ))
168
+
169
+ # Rule-based majority
170
+ majority_picks = []
171
+ for i, (er, t) in enumerate(zip(exec_results, traj)):
172
+ if er[1]:
173
+ continue
174
+ res_str = str(er[0]).strip()
175
+ if not res_str or "(no rows)" in res_str or res_str == "[]":
176
+ continue
177
+ majority_picks.append((i, res_str))
178
+ if majority_picks:
179
+ counter = Counter(s for _, s in majority_picks)
180
+ top_res, _ = counter.most_common(1)[0]
181
+ for i, s in majority_picks:
182
+ if s == top_res:
183
+ if traj[i].get("is_fixed_correct"):
184
+ n_majority += 1
185
+ break
186
+ else:
187
+ if traj[0].get("is_fixed_correct"):
188
+ n_majority += 1
189
+
190
+ # Trained selector: score each candidate using REAL execution result (no gold-label leak).
191
+ # Re-use the exec_results computed above for rule-based majority.
192
+ scores = []
193
+ with ThreadPoolExecutor(max_workers=8) as exe:
194
+ def _make_prompt(idx, t_):
195
+ er = exec_results[idx]
196
+ if er[1]:
197
+ exec_str = f"Error: {er[0]}"
198
+ else:
199
+ rows_str = str(er[0])
200
+ if not rows_str.strip() or rows_str.strip() == "[]":
201
+ exec_str = "OK. Result rows (preview): (no rows)"
202
+ else:
203
+ exec_str = f"OK. Result rows (preview): {rows_str[:300]}"
204
+ return build_prompt_chat(sample, t_, exec_result_str=exec_str)
205
+ futs = [exe.submit(score_via_vllm, args.selector_host, _make_prompt(i, t)) for i, t in enumerate(traj)]
206
+ for f in futs:
207
+ scores.append(f.result())
208
+ best_idx = max(range(len(scores)), key=lambda i: scores[i])
209
+ if traj[best_idx].get("is_fixed_correct"):
210
+ n_selector += 1
211
+
212
+ if n_q == 0:
213
+ print(f"{args.label}: no questions evaluated")
214
+ return
215
+
216
+ print()
217
+ print(f"=== {args.label} ===")
218
+ print(f" questions evaluated: {n_q}")
219
+ print(f" K used per question: {K_used}")
220
+ print(f" greedy (1st traj): {n_greedy}/{n_q} = {100*n_greedy/n_q:.2f}%")
221
+ print(f" rule-based majority: {n_majority}/{n_q} = {100*n_majority/n_q:.2f}%")
222
+ print(f" trained selector: {n_selector}/{n_q} = {100*n_selector/n_q:.2f}%")
223
+ print(f" pass@{K_used} (oracle): {n_pass_at_N}/{n_q} = {100*n_pass_at_N/n_q:.2f}%")
224
+ print()
225
+
226
+
227
+ if __name__ == "__main__":
228
+ main()
scripts/run_pipeline_rollouts.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pipeline rollout driver for the 3-stage collaborative-ORPO experiment.
3
+
4
+ Three-stage pipeline:
5
+ q → PLANNER (Qwen-Coder-0.5B SFT'd) → plan + first-cut SQL
6
+ → VALIDATOR (Qwen-Coder-0.5B SFT'd) → free-form critique (4 sections)
7
+ → FIXER (Qwen-Coder-0.5B SFT'd) → final SQL
8
+
9
+ For each input question we sample K planner outputs with stochastic decoding,
10
+ then for each planner output we sample K_val validator outputs, and for each
11
+ (planner, validator) we sample K_fix fixer outputs. Each leaf trajectory is
12
+ graded by execution of the fixer's final SQL.
13
+
14
+ The output JSONL is consumed by build_rl_data_collaborative.py to construct
15
+ preference pairs (planner-indep / planner-collab / validator-collab / fixer).
16
+
17
+ Usage:
18
+ # Three vLLM endpoints, e.g.
19
+ # GPU 0:8100 = planner
20
+ # GPU 1:8101 = validator
21
+ # GPU 1:8102 = fixer (can co-locate validator+fixer on one GPU since both 0.5B)
22
+ python scripts/run_pipeline_rollouts.py \\
23
+ --input_file data/sft_bird_with_evidence_train_text2sql.json \\
24
+ --output_file data/rollouts/bird_train_3stage_K4.jsonl \\
25
+ --planner_host http://localhost:8100 \\
26
+ --validator_host http://localhost:8101 \\
27
+ --fixer_host http://localhost:8102 \\
28
+ --K 4 --K_val 2 --K_fix 1 \\
29
+ --temperature 0.7 --top_p 0.9 \\
30
+ --max_questions 1000
31
+ """
32
+
33
+ import argparse
34
+ import json
35
+ import os
36
+ import re
37
+ import sys
38
+ import time
39
+ from concurrent.futures import ThreadPoolExecutor
40
+
41
+ # Bypass HTTP proxy for local vLLM endpoints
42
+ os.environ["NO_PROXY"] = "localhost,127.0.0.1"
43
+ os.environ["no_proxy"] = "localhost,127.0.0.1"
44
+
45
+ import requests
46
+ from tqdm import tqdm
47
+
48
+ ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
49
+ os.chdir(ROOT)
50
+ sys.path.insert(0, ROOT)
51
+
52
+ from validator_data.validator import _execute_sql
53
+ from data_processing.planner import is_execution_correct
54
+
55
+
56
+ PLANNER_PROMPT_TEMPLATE = (
57
+ "{schema}\n\n"
58
+ "Question: {question}\n"
59
+ "External knowledge: {evidence}\n\n"
60
+ "Planning:"
61
+ )
62
+
63
+ # Validator prompt — must match what the validator was SFT'd to expect.
64
+ VALIDATOR_PROMPT_HEADER = (
65
+ "You are a SQL critique agent. Output FOUR critique sections "
66
+ "(<select>...</select>, <condition>...</condition>, <join>...</join>, <order>...</order>) "
67
+ "analysing the SQL query below; do NOT output any SQL.\n\n"
68
+ )
69
+
70
+ # Specialized 2-validator headers (match SFT data built in build_validator_2agents_v3.py).
71
+ VALIDATOR_SEL_HEADER = (
72
+ "You are a SQL SELECT-clause critique agent. Output ONE critique section "
73
+ "<select>...</select> analysing the SELECT clause of the SQL query below; "
74
+ "do NOT output any SQL. Use 'None' if the SELECT clause looks correct.\n\n"
75
+ )
76
+ VALIDATOR_COND_HEADER = (
77
+ "You are a SQL CONDITION critique agent. Output ONE critique section "
78
+ "<condition>...</condition> analysing the WHERE/HAVING/CASE-WHEN conditions "
79
+ "of the SQL query below; do NOT output any SQL. Use 'None' if the conditions "
80
+ "look correct.\n\n"
81
+ )
82
+
83
+ VALIDATOR_PROMPT_BODY = (
84
+ "database schema:\n{schema}\n\n"
85
+ "Question: {question}\n"
86
+ "External knowledge: {evidence}\n\n"
87
+ "Generated SQL query: {sql_query}\n\n"
88
+ "Execution response:\n{execution_response}\n\n"
89
+ )
90
+
91
+ # Fixer prompt — must match what the fixer was SFT'd to expect.
92
+ FIXER_PROMPT_HEADER = (
93
+ "You are a SQL fixer. Given the question, schema, original SQL query, "
94
+ "execution response, and the validator's critique below, output ONLY the corrected "
95
+ "final SQL inside ```sql ... ``` markers.\n\n"
96
+ )
97
+
98
+
99
+ def qwen_chat(prompt: str) -> str:
100
+ return f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
101
+
102
+
103
+ def vllm_complete(host, model, prompt, n, temperature, top_p, max_tokens, seed=100):
104
+ payload = {
105
+ "model": model,
106
+ "prompt": prompt,
107
+ "max_tokens": max_tokens,
108
+ "n": n,
109
+ "temperature": temperature,
110
+ "top_p": top_p,
111
+ "stop": ["<|im_end|>", "<|endoftext|>"],
112
+ "seed": seed,
113
+ }
114
+ for attempt in range(3):
115
+ try:
116
+ r = requests.post(f"{host}/v1/completions", json=payload, timeout=120)
117
+ r.raise_for_status()
118
+ return [c["text"] for c in r.json()["choices"]]
119
+ except Exception as e:
120
+ if attempt == 2:
121
+ print(f"vLLM call failed: {e}", file=sys.stderr)
122
+ return []
123
+ time.sleep(1)
124
+ return []
125
+
126
+
127
+ def extract_sql_from_planner(text):
128
+ if text is None:
129
+ return ""
130
+ m = re.search(r"Final SQL query:\s*```(.+?)```", text, re.DOTALL)
131
+ if m:
132
+ s = m.group(1).strip()
133
+ else:
134
+ m = re.search(r"```(.+?)```", text, re.DOTALL)
135
+ if m:
136
+ s = m.group(1).strip()
137
+ else:
138
+ return text.strip()
139
+ if s.startswith("sql"):
140
+ s = s[3:].strip()
141
+ return s
142
+
143
+
144
+ def extract_sql_from_fixer(text):
145
+ if text is None:
146
+ return ""
147
+ m = re.search(r"```sql\s*\n?(.+?)```", text, re.DOTALL | re.IGNORECASE)
148
+ if m:
149
+ return m.group(1).strip()
150
+ m = re.search(r"```(.+?)```", text, re.DOTALL)
151
+ if m:
152
+ s = m.group(1).strip()
153
+ if s.lower().startswith("sql"):
154
+ s = s[3:].strip()
155
+ return s
156
+ return text.strip().strip("`").strip()
157
+
158
+
159
+ def parse_validator_sections(text):
160
+ sections = {"select": "", "condition": "", "join": "", "order": ""}
161
+ for tag in sections:
162
+ m = re.search(fr"<{tag}>(.*?)</{tag}>", text, re.DOTALL | re.IGNORECASE)
163
+ if m:
164
+ sections[tag] = m.group(1).strip()
165
+ return sections
166
+
167
+
168
+ def safe_execute(db_path, sql):
169
+ if not sql or sql.strip() == "":
170
+ return ("", True)
171
+ try:
172
+ return _execute_sql("./" + db_path, sql)
173
+ except Exception as e:
174
+ return (str(e), True)
175
+
176
+
177
+ def build_planner_prompt(sample):
178
+ return PLANNER_PROMPT_TEMPLATE.format(
179
+ schema=sample.get("schema_sequence") or sample.get("schema") or "",
180
+ question=sample.get("question", ""),
181
+ evidence=sample.get("evidence", "") or "None",
182
+ )
183
+
184
+
185
+ def build_validator_prompt(sample, planner_sql, exec_response):
186
+ body = VALIDATOR_PROMPT_BODY.format(
187
+ schema=sample.get("schema_sequence") or sample.get("schema") or "",
188
+ question=sample.get("question", ""),
189
+ evidence=sample.get("evidence", "") or "None",
190
+ sql_query=planner_sql,
191
+ execution_response=exec_response,
192
+ )
193
+ return VALIDATOR_PROMPT_HEADER + body
194
+
195
+
196
+ def build_validator_sel_prompt(sample, planner_sql, exec_response):
197
+ body = VALIDATOR_PROMPT_BODY.format(
198
+ schema=sample.get("schema_sequence") or sample.get("schema") or "",
199
+ question=sample.get("question", ""),
200
+ evidence=sample.get("evidence", "") or "None",
201
+ sql_query=planner_sql,
202
+ execution_response=exec_response,
203
+ )
204
+ return VALIDATOR_SEL_HEADER + body
205
+
206
+
207
+ def build_validator_cond_prompt(sample, planner_sql, exec_response):
208
+ body = VALIDATOR_PROMPT_BODY.format(
209
+ schema=sample.get("schema_sequence") or sample.get("schema") or "",
210
+ question=sample.get("question", ""),
211
+ evidence=sample.get("evidence", "") or "None",
212
+ sql_query=planner_sql,
213
+ execution_response=exec_response,
214
+ )
215
+ return VALIDATOR_COND_HEADER + body
216
+
217
+
218
+ def build_fixer_prompt(sample, planner_sql, exec_response, critique):
219
+ body = (
220
+ f"database schema:\n{sample.get('schema_sequence') or sample.get('schema') or ''}\n\n"
221
+ f"Question: {sample.get('question', '')}\n"
222
+ f"External knowledge: {sample.get('evidence','') or 'None'}\n\n"
223
+ f"Generated SQL query: {planner_sql}\n\n"
224
+ f"Execution response:\n{exec_response}\n\n"
225
+ )
226
+ return FIXER_PROMPT_HEADER + body + "\n\nValidator critique:\n" + critique + "\n\nFinal SQL:"
227
+
228
+
229
+ def process_sample(sample, args):
230
+ db_path = sample["db_path"]
231
+ gold_sql = sample["sql"]
232
+ true_exec = safe_execute(db_path, gold_sql)
233
+ if true_exec[1]:
234
+ return None # gold has error; skip
235
+
236
+ # Stage 1: planner — K samples (optionally split across temperatures via --mixed_temp)
237
+ planner_prompt_raw = build_planner_prompt(sample)
238
+ planner_chat = qwen_chat(planner_prompt_raw)
239
+ if getattr(args, "mixed_temp", "").strip():
240
+ temps = [float(x) for x in args.mixed_temp.split(",") if x.strip()]
241
+ # distribute args.K samples across temperatures
242
+ per_temp = max(1, args.K // len(temps))
243
+ remainder = args.K - per_temp * len(temps)
244
+ planner_outputs = []
245
+ for i, t in enumerate(temps):
246
+ n_t = per_temp + (1 if i < remainder else 0)
247
+ if n_t <= 0:
248
+ continue
249
+ outs = vllm_complete(
250
+ args.planner_host, "planner", planner_chat,
251
+ n=n_t, temperature=t, top_p=args.top_p,
252
+ max_tokens=args.max_planner_tokens, seed=args.seed + i * 31,
253
+ )
254
+ planner_outputs.extend(outs)
255
+ else:
256
+ planner_outputs = vllm_complete(
257
+ args.planner_host, "planner", planner_chat,
258
+ n=args.K, temperature=args.temperature, top_p=args.top_p,
259
+ max_tokens=args.max_planner_tokens, seed=args.seed,
260
+ )
261
+ if not planner_outputs:
262
+ return None
263
+
264
+ trajectories = []
265
+ for plan in planner_outputs:
266
+ planner_sql = extract_sql_from_planner(plan)
267
+ if not planner_sql:
268
+ continue
269
+ planner_exec = safe_execute(db_path, planner_sql)
270
+ exec_response = (
271
+ f"Error: {planner_exec[0]}" if planner_exec[1]
272
+ else f"OK. Result rows (preview): {str(planner_exec[0])[:300]}"
273
+ )
274
+
275
+ # Stage 2: validator — K_val samples per planner output (or skip if validator_host empty)
276
+ # Three modes:
277
+ # (a) Two specialized validators: --validator_sel_host + --validator_cond_host (per-paper design)
278
+ # (b) Legacy unified validator: --validator_host (single 4-section model)
279
+ # (c) None: insert all-OK placeholder
280
+ v_sel = getattr(args, "validator_sel_host", "") or ""
281
+ v_cond = getattr(args, "validator_cond_host", "") or ""
282
+ if v_sel and v_sel.lower() != "none" and v_cond and v_cond.lower() != "none":
283
+ sel_prompt = build_validator_sel_prompt(sample, planner_sql, exec_response)
284
+ cond_prompt = build_validator_cond_prompt(sample, planner_sql, exec_response)
285
+ sel_outputs = vllm_complete(
286
+ v_sel, "validator_sel", qwen_chat(sel_prompt),
287
+ n=args.K_val, temperature=args.temperature, top_p=args.top_p,
288
+ max_tokens=args.max_validator_tokens, seed=args.seed,
289
+ )
290
+ cond_outputs = vllm_complete(
291
+ v_cond, "validator_cond", qwen_chat(cond_prompt),
292
+ n=args.K_val, temperature=args.temperature, top_p=args.top_p,
293
+ max_tokens=args.max_validator_tokens, seed=args.seed + 1,
294
+ )
295
+ # Pair selection+condition outputs index-wise, padding with "None" if one ran short.
296
+ validator_outputs = []
297
+ for i in range(args.K_val):
298
+ s_out = sel_outputs[i] if i < len(sel_outputs) else "<select>\nSELECT.\nNone\n</select>"
299
+ c_out = cond_outputs[i] if i < len(cond_outputs) else "<condition>\nCONDITION.\nNone\n</condition>"
300
+ combined = (
301
+ s_out.strip() + "\n\n" +
302
+ c_out.strip() + "\n\n" +
303
+ "<join>\nJOIN.\nNone\n</join>\n\n"
304
+ "<order>\nORDER BY.\nNone\n</order>"
305
+ )
306
+ validator_outputs.append(combined)
307
+ validator_prompt_raw = sel_prompt + "\n\n[+]\n\n" + cond_prompt # for logging
308
+ elif args.validator_host and args.validator_host.lower() != "none":
309
+ validator_prompt_raw = build_validator_prompt(sample, planner_sql, exec_response)
310
+ validator_chat = qwen_chat(validator_prompt_raw)
311
+ validator_outputs = vllm_complete(
312
+ args.validator_host, "validator", validator_chat,
313
+ n=args.K_val, temperature=args.temperature, top_p=args.top_p,
314
+ max_tokens=args.max_validator_tokens, seed=args.seed,
315
+ )
316
+ else:
317
+ validator_prompt_raw = build_validator_prompt(sample, planner_sql, exec_response)
318
+ validator_outputs = [
319
+ "<select>\nSELECT.\nNone\n</select>\n\n"
320
+ "<condition>\nCONDITION.\nNone\n</condition>\n\n"
321
+ "<join>\nJOIN.\nNone\n</join>\n\n"
322
+ "<order>\nORDER BY.\nNone\n</order>"
323
+ ] * args.K_val
324
+
325
+ for val_out in validator_outputs:
326
+ sections = parse_validator_sections(val_out)
327
+ critique_text = val_out.strip() # full critique as the validator's "completion"
328
+
329
+ # Stage 3: fixer — K_fix samples (skip when fixer_host=none → keep planner_sql)
330
+ fixer_prompt_raw = build_fixer_prompt(sample, planner_sql, exec_response, critique_text)
331
+ if args.fixer_host and args.fixer_host.lower() != "none":
332
+ fixer_chat = qwen_chat(fixer_prompt_raw)
333
+ fixer_outputs = vllm_complete(
334
+ args.fixer_host, "fixer", fixer_chat,
335
+ n=args.K_fix, temperature=args.temperature, top_p=args.top_p,
336
+ max_tokens=args.max_fixer_tokens, seed=args.seed,
337
+ )
338
+ else:
339
+ fixer_outputs = [""] * args.K_fix # empty → fixed_sql will fallback to planner_sql
340
+
341
+ for fix_out in fixer_outputs:
342
+ fixed_sql = extract_sql_from_fixer(fix_out) or planner_sql
343
+ trajectories.append({
344
+ "planner_prompt": planner_prompt_raw,
345
+ "planner_output": plan,
346
+ "planner_sql": planner_sql,
347
+ "planner_exec_ok": not planner_exec[1],
348
+ "validator_prompt": validator_prompt_raw,
349
+ "validator_output": critique_text,
350
+ "fb_select": sections["select"],
351
+ "fb_condition": sections["condition"],
352
+ "fb_join": sections["join"],
353
+ "fb_order": sections["order"],
354
+ "fixer_prompt": fixer_prompt_raw,
355
+ "fixer_output": fix_out,
356
+ "fixed_sql": fixed_sql,
357
+ })
358
+
359
+ if not trajectories:
360
+ return None
361
+
362
+ # Grade each trajectory
363
+ with ThreadPoolExecutor(max_workers=8) as exe:
364
+ planner_execs = list(exe.map(
365
+ lambda t: safe_execute(db_path, t["planner_sql"]), trajectories
366
+ ))
367
+ fixed_execs = list(exe.map(
368
+ lambda t: safe_execute(db_path, t["fixed_sql"]), trajectories
369
+ ))
370
+
371
+ for i, t in enumerate(trajectories):
372
+ pe, fe = planner_execs[i], fixed_execs[i]
373
+ t["is_planner_correct"] = (
374
+ (not pe[1]) and is_execution_correct(true_exec[0], pe[0])
375
+ )
376
+ t["is_fixed_correct"] = (
377
+ (not fe[1]) and is_execution_correct(true_exec[0], fe[0])
378
+ )
379
+
380
+ return {
381
+ "question": sample["question"],
382
+ "evidence": sample.get("evidence", ""),
383
+ "db_path": db_path,
384
+ "db_id": sample.get("db_id", ""),
385
+ "schema": sample.get("schema_sequence") or sample.get("schema") or "",
386
+ "sql": gold_sql,
387
+ "trajectories": trajectories,
388
+ }
389
+
390
+
391
+ def main():
392
+ parser = argparse.ArgumentParser()
393
+ parser.add_argument("--input_file", required=True)
394
+ parser.add_argument("--output_file", required=True)
395
+ parser.add_argument("--planner_host", default="http://localhost:8100")
396
+ parser.add_argument("--validator_host", default="http://localhost:8101",
397
+ help="Single unified validator host (legacy 4-section). "
398
+ "Ignored when --validator_sel_host AND --validator_cond_host are set.")
399
+ parser.add_argument("--validator_sel_host", default="",
400
+ help="Specialized SELECT-clause validator host (paper v_s). "
401
+ "When both this and --validator_cond_host are set, the unified validator is bypassed.")
402
+ parser.add_argument("--validator_cond_host", default="",
403
+ help="Specialized CONDITION validator host (paper v_c).")
404
+ parser.add_argument("--fixer_host", default="http://localhost:8102")
405
+ parser.add_argument("--K", type=int, default=4, help="planner samples per question")
406
+ parser.add_argument("--K_val", type=int, default=2, help="validator samples per planner output")
407
+ parser.add_argument("--K_fix", type=int, default=1, help="fixer samples per (planner, validator)")
408
+ parser.add_argument("--temperature", type=float, default=0.7)
409
+ parser.add_argument("--top_p", type=float, default=0.9)
410
+ parser.add_argument("--seed", type=int, default=100)
411
+ parser.add_argument("--max_planner_tokens", type=int, default=1024)
412
+ parser.add_argument("--max_validator_tokens", type=int, default=512)
413
+ parser.add_argument("--max_fixer_tokens", type=int, default=512)
414
+ parser.add_argument("--max_questions", type=int, default=-1)
415
+ parser.add_argument("--n_threads", type=int, default=8)
416
+ parser.add_argument("--mixed_temp", type=str, default="",
417
+ help="Comma-separated temperatures to mix across K planner samples (e.g. '0.5,0.7,0.9,1.1'). "
418
+ "If set, args.temperature is ignored for the planner stage. Used to boost pass@K diversity.")
419
+ args = parser.parse_args()
420
+
421
+ print(f"Loading {args.input_file}...")
422
+ with open(args.input_file) as f:
423
+ data = json.load(f)
424
+ if args.max_questions > 0:
425
+ data = data[: args.max_questions]
426
+ print(f" {len(data)} questions")
427
+
428
+ os.makedirs(os.path.dirname(args.output_file), exist_ok=True)
429
+
430
+ seen = set()
431
+ if os.path.exists(args.output_file):
432
+ with open(args.output_file) as f:
433
+ for line in f:
434
+ try:
435
+ d = json.loads(line)
436
+ seen.add((d["question"], d.get("db_id", "")))
437
+ except Exception:
438
+ pass
439
+ print(f" resuming: skip {len(seen)} already-processed")
440
+
441
+ todo = [s for s in data if (s["question"], s.get("db_id", "")) not in seen]
442
+ print(f" to process: {len(todo)}")
443
+
444
+ fout = open(args.output_file, "a")
445
+ n_ok = 0
446
+ n_winloss = 0
447
+
448
+ with ThreadPoolExecutor(max_workers=args.n_threads) as pool:
449
+ futures = {pool.submit(process_sample, s, args): s for s in todo}
450
+ pbar = tqdm(total=len(todo), desc="rollouts")
451
+ for fut in futures:
452
+ try:
453
+ result = fut.result()
454
+ except Exception as e:
455
+ print(f"sample failed: {e}", file=sys.stderr)
456
+ pbar.update(1)
457
+ continue
458
+ if result is None:
459
+ pbar.update(1)
460
+ continue
461
+ n_ok += 1
462
+ wins = sum(1 for t in result["trajectories"] if t["is_fixed_correct"])
463
+ losses = sum(1 for t in result["trajectories"] if not t["is_fixed_correct"])
464
+ if wins > 0 and losses > 0:
465
+ n_winloss += 1
466
+ fout.write(json.dumps(result) + "\n")
467
+ fout.flush()
468
+ pbar.update(1)
469
+ pbar.set_postfix(ok=n_ok, winloss=n_winloss)
470
+ pbar.close()
471
+
472
+ fout.close()
473
+ print(f"Done. processed={n_ok}, with_winloss={n_winloss} ({100*n_winloss/max(n_ok,1):.1f}%)")
474
+
475
+
476
+ if __name__ == "__main__":
477
+ main()