mats-sql-bundle / scripts /build_validator_2agents_v3.py
thanhdath's picture
Upload folder using huggingface_hub
82ae30c verified
"""Split v3 unified validator data into 2 specialized SFT datasets:
- Validator Selection (v_s): critique only the SELECT clause
- Validator Condition (v_c): critique only the WHERE/HAVING/CASE conditions
Per the paper (approach.tex §Combined Validator), the multi-agent design has
2 specialized validators, not one unified validator. This script extracts the
<select>...</select> and <condition>...</condition> sections from v3 unified
completions and emits 2 SFT datasets with section-specific prompts.
Outputs:
- data/multi-agents/fixed/sft-validator-selection-v3
- data/multi-agents/fixed/sft-validator-condition-v3
"""
import re
from datasets import load_from_disk, Dataset, DatasetDict
SEL_INSTR = "You are a SQL SELECT-clause critique agent. Output ONE critique section <select>...</select> analysing the SELECT clause of the SQL query below; do NOT output any SQL. Use 'None' if the SELECT clause looks correct."
COND_INSTR = "You are a SQL CONDITION critique agent. Output ONE critique section <condition>...</condition> analysing the WHERE/HAVING/CASE-WHEN conditions of the SQL query below; do NOT output any SQL. Use 'None' if the conditions look correct."
SEC_RE = {
"select": re.compile(r"<select>(.*?)</select>", re.DOTALL),
"condition": re.compile(r"<condition>(.*?)</condition>", re.DOTALL),
}
def replace_header_block(prompt_unified, new_header_line):
"""Replace the leading 'You are a SQL critique agent...' line with a section-specific one."""
# The unified prompts begin with: "You are a SQL critique agent. Output FOUR critique sections (...). do NOT output any SQL.\n\n..."
# Strip everything before the first blank line; keep the rest (schema + question + sql).
# Use a safe split on the first \n\n.
parts = prompt_unified.split("\n\n", 1)
rest = parts[1] if len(parts) > 1 else parts[0]
return new_header_line + "\n\n" + rest
def main():
v3 = load_from_disk("/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-diverse-v3")
sel_train, sel_test = [], []
cond_train, cond_test = [], []
for split, train_list, test_list in [("train", sel_train, cond_train), ("test", sel_test, cond_test)]:
target_sel = train_list
target_cond = test_list # placeholder; will reassign below
# redo:
sel_train, sel_test = [], []
cond_train, cond_test = [], []
for split_name, sel_out, cond_out in [("train", sel_train, cond_train), ("test", sel_test, cond_test)]:
ds = v3[split_name]
for ex in ds:
prompt = ex["prompt"]
completion = ex["completion"]
sel_match = SEC_RE["select"].search(completion)
cond_match = SEC_RE["condition"].search(completion)
if not sel_match or not cond_match:
continue
sel_body = sel_match.group(0).strip() # full <select>...</select>
cond_body = cond_match.group(0).strip()
# Build section-specific prompt
sel_prompt = replace_header_block(prompt, SEL_INSTR)
cond_prompt = replace_header_block(prompt, COND_INSTR)
# NOTE: SFT trainer in alignment-handbook reads `messages` column via dict access
# (chat_template uses messages['prompt'] / messages['completion']), so store as dict.
sel_out.append({
"prompt": sel_prompt,
"completion": sel_body,
"messages": {"prompt": sel_prompt, "completion": sel_body},
})
cond_out.append({
"prompt": cond_prompt,
"completion": cond_body,
"messages": {"prompt": cond_prompt, "completion": cond_body},
})
sel_dd = DatasetDict({
"train": Dataset.from_list(sel_train),
"test": Dataset.from_list(sel_test),
})
cond_dd = DatasetDict({
"train": Dataset.from_list(cond_train),
"test": Dataset.from_list(cond_test),
})
sel_dir = "/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-selection-v3"
cond_dir = "/home/datht/mats-sql-tist/data/multi-agents/fixed/sft-validator-condition-v3"
sel_dd.save_to_disk(sel_dir)
cond_dd.save_to_disk(cond_dir)
# Distribution
def stats(rows, key):
n_none = sum(1 for r in rows if r["completion"].strip().lower().endswith("none") or "None\n</" in r["completion"] or "No issues" in r["completion"])
return f"{len(rows)} total, {n_none} all-OK ({100*n_none/max(len(rows),1):.1f}%)"
print(f"=== Validator Selection (v_s) ===")
print(f" train: {stats(sel_train, 'sel')}")
print(f" test: {stats(sel_test, 'sel')}")
print(f" Saved to {sel_dir}")
print(f"\n=== Validator Condition (v_c) ===")
print(f" train: {stats(cond_train, 'cond')}")
print(f" test: {stats(cond_test, 'cond')}")
print(f" Saved to {cond_dir}")
if __name__ == "__main__":
main()