| """Build fixer ORPO iter-2 're-planner' dataset. |
| |
| Insight: the current fixer is too conservative — it changes planner_sql only 1.4% |
| of the time and rescues 0/533 hard questions on BIRD-dev. The fixer architecture |
| needs to be re-framed: instead of 'apply small critique-driven edit', train it as |
| a re-planner that produces a COMPLETE correct alternative when given a failed |
| attempt. |
| |
| Data source: K=4 BIRD-train rollouts. For each question, find a (wrong-trajectory, |
| correct-trajectory) pair within the K=4 samples. Use: |
| - chosen = correct trajectory's planner_sql (the alternative that works) |
| - rejected = wrong trajectory's planner_sql or the fixer's mistaken output |
| - prompt = fixer's standard prompt with the wrong trajectory as the input |
| |
| Output: data/llm_alignment/scaleup_iter2_v3/hf_fixer_replanner |
| """ |
| import json |
| import os |
| import random |
| import re |
| from datasets import Dataset, DatasetDict |
|
|
| OUT_DIR = "/home/datht/mats-sql-tist/data/llm_alignment/scaleup_iter2_v3/hf_fixer_replanner" |
|
|
| SRC_PATHS = [ |
| "/home/datht/mats-sql-tist/data/rollouts/bird_train_3stage_K4.jsonl", |
| "/home/datht/mats-sql-tist/data/rollouts/scaleup_bird_train_3stage_K4.jsonl", |
| "/home/datht/mats-sql-tist/data/rollouts/iter2_bird_train_3stage_K8.jsonl", |
| "/home/datht/mats-sql-tist/data/rollouts/scaleup_bird_train_2stage_K4.jsonl", |
| ] |
|
|
|
|
| def normalize_sql(sql): |
| return re.sub(r"\s+", " ", sql or "").lower().strip() |
|
|
|
|
| def main(): |
| rng = random.Random(42) |
| pairs = [] |
| seen_keys = set() |
|
|
| for p in SRC_PATHS: |
| if not os.path.exists(p): |
| continue |
| with open(p) as f: |
| for line in f: |
| s = json.loads(line) |
| traj = s.get("trajectories", []) |
| if len(traj) < 2: |
| continue |
| correct_trajs = [t for t in traj if t.get("is_planner_correct")] |
| wrong_trajs = [t for t in traj if not t.get("is_planner_correct")] |
| if not correct_trajs or not wrong_trajs: |
| continue |
| |
| for wt in wrong_trajs: |
| wsql = (wt.get("planner_sql") or "").strip() |
| if not wsql: |
| continue |
| |
| correct_trajs_sorted = sorted(correct_trajs, key=lambda t: len(t.get("planner_sql") or "")) |
| csql = (correct_trajs_sorted[0].get("planner_sql") or "").strip() |
| if not csql or normalize_sql(csql) == normalize_sql(wsql): |
| continue |
| fixer_prompt = (wt.get("fixer_prompt") or "").strip() |
| if not fixer_prompt: |
| continue |
| key = (hash(s.get("question", "")), hash(normalize_sql(wsql))) |
| if key in seen_keys: |
| continue |
| seen_keys.add(key) |
| chosen_text = f"```sql\n{csql}\n```" |
| rejected_text = f"```sql\n{wsql}\n```" |
| pairs.append({ |
| "prompt": fixer_prompt, |
| "chosen": chosen_text, |
| "rejected": rejected_text, |
| "db_path": s.get("db_path", ""), |
| "question": s.get("question", ""), |
| "db_id": s.get("db_id", ""), |
| }) |
|
|
| rng.shuffle(pairs) |
|
|
| n_test = max(40, len(pairs) // 30) |
| test = pairs[:n_test] |
| train = pairs[n_test:] |
|
|
| dd = DatasetDict({ |
| "train_dpo": Dataset.from_list(train), |
| "test_dpo": Dataset.from_list(test), |
| }) |
| dd.save_to_disk(OUT_DIR) |
|
|
| print(f"=== Fixer ORPO iter-2 RE-PLANNER dataset ===") |
| print(f" total pairs: {len(pairs)}") |
| print(f" train: {len(train)}, test: {len(test)}") |
| print(f" Saved to {OUT_DIR}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|