| """Build v3 validator SFT data with balanced all-OK + critique rows. |
| |
| v2 had 8.1% all-OK rows → validator hallucinates critiques at inference. |
| v3 supplements v2 with ~5000 all-OK rows mined from real planner_correct |
| trajectories on BIRD-TRAIN, so the validator learns to stay silent when |
| the planner SQL is already correct. |
| |
| Output: data/multi-agents/fixed/sft-validator-diverse-v3 |
| """ |
| import json |
| import random |
| from datasets import load_from_disk, Dataset, DatasetDict |
|
|
| OK_TEMPLATES = [ |
| """<select> |
| SELECT. |
| No issues with SELECT. |
| </select> |
| |
| <condition> |
| CONDITION. |
| No issues with WHERE/HAVING. |
| </condition> |
| |
| <join> |
| JOIN. |
| Tables and join keys look correct. |
| </join> |
| |
| <order> |
| ORDER BY. |
| None |
| </order>""", |
| """<select> |
| SELECT. |
| The SELECT clause is correct. |
| </select> |
| |
| <condition> |
| CONDITION. |
| Filter conditions look correct. |
| </condition> |
| |
| <join> |
| JOIN. |
| No issues with JOIN. |
| </join> |
| |
| <order> |
| ORDER BY. |
| None |
| </order>""", |
| """<select> |
| SELECT. |
| None |
| </select> |
| |
| <condition> |
| CONDITION. |
| None |
| </condition> |
| |
| <join> |
| JOIN. |
| None |
| </join> |
| |
| <order> |
| ORDER BY. |
| None |
| </order>""", |
| """<select> |
| SELECT. |
| The projection list matches the question. |
| </select> |
| |
| <condition> |
| CONDITION. |
| WHERE/HAVING clauses are correct. |
| </condition> |
| |
| <join> |
| JOIN. |
| Tables and join keys are correct. |
| </join> |
| |
| <order> |
| ORDER BY. |
| The ordering is correct. |
| </order>""", |
| ] |
|
|
|
|
| def main(): |
| rng = random.Random(42) |
|
|
| |
| v2 = load_from_disk("/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-diverse-v2") |
| v2_train = [{"prompt": r["prompt"], "completion": r["completion"]} for r in v2["train"]] |
| v2_test = [{"prompt": r["prompt"], "completion": r["completion"]} for r in v2["test"]] |
|
|
| |
| src = "/home/datht/mats-sql-tist/data/rollouts/scaleup_bird_train_2stage_K4.jsonl" |
| ok_rows = [] |
| seen_prompts = set() |
| with open(src) as f: |
| for line in f: |
| s = json.loads(line) |
| for t in s.get("trajectories", []): |
| if not t.get("is_planner_correct"): |
| continue |
| vp = (t.get("validator_prompt") or "").strip() |
| if not vp: |
| |
| pp = (t.get("planner_prompt") or "").strip() |
| psql = (t.get("planner_sql") or "").strip() |
| if not pp or not psql: |
| continue |
| vp = pp + "\n\nSQL query:\n" + psql |
| |
| if vp in seen_prompts: |
| continue |
| seen_prompts.add(vp) |
| ok_rows.append(vp) |
|
|
| rng.shuffle(ok_rows) |
|
|
| |
| target_ok = 5200 |
| ok_rows = ok_rows[:target_ok] |
|
|
| |
| new_rows = [] |
| for vp in ok_rows: |
| completion = rng.choice(OK_TEMPLATES) |
| new_rows.append({"prompt": vp, "completion": completion}) |
|
|
| |
| test_ok = ok_rows[target_ok:target_ok + 100] if len(ok_rows) > target_ok else [] |
| new_test_rows = [] |
| for vp in test_ok: |
| completion = rng.choice(OK_TEMPLATES) |
| new_test_rows.append({"prompt": vp, "completion": completion}) |
|
|
| |
| train_combined = v2_train + new_rows |
| test_combined = v2_test + new_test_rows |
| rng.shuffle(train_combined) |
|
|
| dd = DatasetDict({ |
| "train": Dataset.from_list(train_combined), |
| "test": Dataset.from_list(test_combined), |
| }) |
|
|
| out_dir = "/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-diverse-v3" |
| dd.save_to_disk(out_dir) |
|
|
| |
| n_train = len(train_combined) |
| n_train_ok = sum(1 for r in train_combined if "No issues" in r["completion"] or r["completion"].count("None") >= 3) |
| print(f"v3 built:") |
| print(f" train: {n_train} ({n_train_ok} all-OK, {n_train - n_train_ok} critique)") |
| print(f" test: {len(test_combined)}") |
| print(f" Saved to {out_dir}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|