File size: 4,046 Bytes
82ae30c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 | """Build fixer ORPO iter-2 're-planner' dataset.
Insight: the current fixer is too conservative — it changes planner_sql only 1.4%
of the time and rescues 0/533 hard questions on BIRD-dev. The fixer architecture
needs to be re-framed: instead of 'apply small critique-driven edit', train it as
a re-planner that produces a COMPLETE correct alternative when given a failed
attempt.
Data source: K=4 BIRD-train rollouts. For each question, find a (wrong-trajectory,
correct-trajectory) pair within the K=4 samples. Use:
- chosen = correct trajectory's planner_sql (the alternative that works)
- rejected = wrong trajectory's planner_sql or the fixer's mistaken output
- prompt = fixer's standard prompt with the wrong trajectory as the input
Output: data/llm_alignment/scaleup_iter2_v3/hf_fixer_replanner
"""
import json
import os
import random
import re
from datasets import Dataset, DatasetDict
OUT_DIR = "/home/datht/mats-sql-tist/data/llm_alignment/scaleup_iter2_v3/hf_fixer_replanner"
SRC_PATHS = [
"/home/datht/mats-sql-tist/data/rollouts/bird_train_3stage_K4.jsonl",
"/home/datht/mats-sql-tist/data/rollouts/scaleup_bird_train_3stage_K4.jsonl",
"/home/datht/mats-sql-tist/data/rollouts/iter2_bird_train_3stage_K8.jsonl",
"/home/datht/mats-sql-tist/data/rollouts/scaleup_bird_train_2stage_K4.jsonl",
]
def normalize_sql(sql):
return re.sub(r"\s+", " ", sql or "").lower().strip()
def main():
rng = random.Random(42)
pairs = []
seen_keys = set() # (question_hash, wrong_sql_hash) → dedup
for p in SRC_PATHS:
if not os.path.exists(p):
continue
with open(p) as f:
for line in f:
s = json.loads(line)
traj = s.get("trajectories", [])
if len(traj) < 2:
continue
correct_trajs = [t for t in traj if t.get("is_planner_correct")]
wrong_trajs = [t for t in traj if not t.get("is_planner_correct")]
if not correct_trajs or not wrong_trajs:
continue
# Build (wrong → correct) pairs within the K samples
for wt in wrong_trajs:
wsql = (wt.get("planner_sql") or "").strip()
if not wsql:
continue
# Pick the shortest correct planner_sql as the "preferred" alternative
correct_trajs_sorted = sorted(correct_trajs, key=lambda t: len(t.get("planner_sql") or ""))
csql = (correct_trajs_sorted[0].get("planner_sql") or "").strip()
if not csql or normalize_sql(csql) == normalize_sql(wsql):
continue
fixer_prompt = (wt.get("fixer_prompt") or "").strip()
if not fixer_prompt:
continue
key = (hash(s.get("question", "")), hash(normalize_sql(wsql)))
if key in seen_keys:
continue
seen_keys.add(key)
chosen_text = f"```sql\n{csql}\n```"
rejected_text = f"```sql\n{wsql}\n```"
pairs.append({
"prompt": fixer_prompt,
"chosen": chosen_text,
"rejected": rejected_text,
"db_path": s.get("db_path", ""),
"question": s.get("question", ""),
"db_id": s.get("db_id", ""),
})
rng.shuffle(pairs)
n_test = max(40, len(pairs) // 30)
test = pairs[:n_test]
train = pairs[n_test:]
dd = DatasetDict({
"train_dpo": Dataset.from_list(train),
"test_dpo": Dataset.from_list(test),
})
dd.save_to_disk(OUT_DIR)
print(f"=== Fixer ORPO iter-2 RE-PLANNER dataset ===")
print(f" total pairs: {len(pairs)}")
print(f" train: {len(train)}, test: {len(test)}")
print(f" Saved to {OUT_DIR}")
if __name__ == "__main__":
main()
|