sayit-archive-tw / scripts /build_conversations.py
audreyt's picture
Initial dataset: 1,931 transcripts, 59K SFT pairs, 85K RAG chunks, 765 bilingual terms
bf38a2e verified
#!/usr/bin/env python3
"""Build SFT conversation pairs from parsed transcript turns.
Reads dataset/data/turns.jsonl and produces:
- dataset/data/conversations.jsonl (ShareGPT multi-turn format)
- dataset/data/sft_pairs.jsonl (individual instruction/response pairs)
"""
import json
import sys
from collections import defaultdict
from pathlib import Path
DATA_DIR = Path(__file__).resolve().parent.parent / "data"
TURNS_FILE = DATA_DIR / "turns.jsonl"
CONV_FILE = DATA_DIR / "conversations.jsonl"
SFT_FILE = DATA_DIR / "sft_pairs.jsonl"
MIN_RESPONSE_LEN = 20 # Skip trivially short Audrey responses in SFT pairs
MAX_CONTEXT_TURNS = 3 # Max preceding turns to include as instruction context
def load_turns_by_file(path: Path) -> dict:
"""Load turns grouped by source_file, preserving turn_index order."""
groups = defaultdict(list)
with open(path, encoding="utf-8") as f:
for line in f:
turn = json.loads(line)
groups[turn["source_file"]].append(turn)
# Sort each group by turn_index
for sf in groups:
groups[sf].sort(key=lambda t: t["turn_index"])
return groups
def merge_messages(turns: list[dict]) -> list[dict]:
"""Merge consecutive same-role turns into ShareGPT messages.
Audrey → role "assistant", everyone else → role "human".
Consecutive human turns from different speakers get speaker attribution.
Consecutive assistant turns are concatenated.
"""
messages = []
for turn in turns:
role = "assistant" if turn["is_audrey"] else "human"
speaker = turn["speaker"]
text = turn["text"].strip()
if not text:
continue
if messages and messages[-1]["role"] == role:
# Merge into previous message
prev = messages[-1]
if role == "human":
# Add speaker attribution to both previous and current if needed
if prev.get("_multi_speaker") is False:
# First merge: retrofit attribution on the previous text
prev["content"] = f"**{prev['speaker']}:** {prev['content']}"
prev["_multi_speaker"] = True
prev["content"] += f"\n\n**{speaker}:** {text}"
prev["_multi_speaker"] = True
# Track all speakers
if speaker not in prev["_speakers"]:
prev["_speakers"].append(speaker)
else:
# Consecutive Audrey turns: just concatenate
prev["content"] += f"\n\n{text}"
else:
messages.append({
"role": role,
"speaker": speaker,
"content": text,
"_multi_speaker": False,
"_speakers": [speaker],
})
# Clean up internal tracking fields
for msg in messages:
if msg["_multi_speaker"]:
# Use all speakers joined
msg["speaker"] = ", ".join(msg["_speakers"])
del msg["_multi_speaker"]
del msg["_speakers"]
return messages
def build_conversation(source_file: str, turns: list[dict]) -> dict:
"""Build a ShareGPT-format conversation record."""
first = turns[0]
conv_id = source_file.replace(".md", "")
messages = merge_messages(turns)
# Collect all unique non-Audrey participants
participants = []
seen = set()
for t in turns:
if t["speaker"] not in seen:
seen.add(t["speaker"])
participants.append(t["speaker"])
audrey_word_count = sum(
len(m["content"].split()) if first["language"] == "en"
else len(m["content"])
for m in messages if m["role"] == "assistant"
)
return {
"id": conv_id,
"date": first["date"],
"title": first["title"],
"language": first["language"],
"participants": participants,
"messages": [{"role": m["role"], "speaker": m["speaker"], "content": m["content"]} for m in messages],
"source_file": source_file,
"num_turns": len(messages),
"audrey_word_count": audrey_word_count,
}
def build_sft_pairs_from_conversation(conv: dict) -> list[dict]:
"""Extract SFT pairs from a multi-turn conversation.
For each assistant turn, include up to MAX_CONTEXT_TURNS preceding turns
as instruction context.
"""
pairs = []
messages = conv["messages"]
pair_idx = 0
for i, msg in enumerate(messages):
if msg["role"] != "assistant":
continue
response = msg["content"]
if len(response) < MIN_RESPONSE_LEN:
continue
# Gather preceding context (up to MAX_CONTEXT_TURNS)
context_start = max(0, i - MAX_CONTEXT_TURNS)
context_msgs = messages[context_start:i]
if not context_msgs:
# Assistant speaks first with no preceding context — skip for SFT
continue
# Build instruction from context turns
instruction_parts = []
for cm in context_msgs:
if cm["role"] == "human":
instruction_parts.append(cm["content"])
else:
# Prior assistant turn as context
instruction_parts.append(f"[Audrey Tang:] {cm['content']}")
instruction = "\n\n".join(instruction_parts)
pairs.append({
"id": f"{conv['id']}/pair_{pair_idx:03d}",
"date": conv["date"],
"title": conv["title"],
"language": conv["language"],
"instruction": instruction,
"response": response,
"context_turns": len(context_msgs),
"source_file": conv["source_file"],
})
pair_idx += 1
return pairs
def build_sft_pairs_from_monologue(conv: dict) -> list[dict]:
"""Build SFT pairs from a monologue (Audrey-only single-speaker file).
Split the text into paragraphs and use the title as instruction.
"""
pairs = []
messages = conv["messages"]
if not messages:
return pairs
# Combine all message content (should be one merged assistant message)
full_text = "\n\n".join(m["content"] for m in messages)
# Split into paragraph groups (~500 chars each for reasonable SFT chunks)
paragraphs = [p.strip() for p in full_text.split("\n\n") if p.strip()]
if not paragraphs:
return pairs
# If the whole text is short enough, make one pair
if len(paragraphs) <= 3 or len(full_text) < 500:
if len(full_text) >= MIN_RESPONSE_LEN:
pairs.append({
"id": f"{conv['id']}/monologue_000",
"date": conv["date"],
"title": conv["title"],
"language": conv["language"],
"instruction": conv["title"],
"response": full_text,
"context_turns": 0,
"source_file": conv["source_file"],
})
return pairs
# Group paragraphs into chunks
chunks = []
current_chunk = []
current_len = 0
for para in paragraphs:
current_chunk.append(para)
current_len += len(para)
if current_len >= 400:
chunks.append("\n\n".join(current_chunk))
current_chunk = []
current_len = 0
if current_chunk:
chunks.append("\n\n".join(current_chunk))
for idx, chunk in enumerate(chunks):
if len(chunk) < MIN_RESPONSE_LEN:
continue
if idx == 0:
instruction = conv["title"]
else:
instruction = f"{conv['title']} (continued)"
pairs.append({
"id": f"{conv['id']}/monologue_{idx:03d}",
"date": conv["date"],
"title": conv["title"],
"language": conv["language"],
"instruction": instruction,
"response": chunk,
"context_turns": 0,
"source_file": conv["source_file"],
})
return pairs
def is_monologue(turns: list[dict]) -> bool:
"""Check if all turns are from a single speaker who is Audrey."""
speakers = {t["speaker"] for t in turns}
return len(speakers) == 1 and turns[0]["is_audrey"]
def main():
print(f"Reading turns from {TURNS_FILE}...")
groups = load_turns_by_file(TURNS_FILE)
print(f"Found {len(groups)} source files, {sum(len(v) for v in groups.values())} total turns")
conversations = []
all_sft_pairs = []
monologue_count = 0
no_audrey_count = 0
lang_counts = defaultdict(int)
lang_sft_counts = defaultdict(int)
for source_file, turns in sorted(groups.items()):
conv = build_conversation(source_file, turns)
conversations.append(conv)
lang_counts[conv["language"]] += 1
has_audrey = any(t["is_audrey"] for t in turns)
if is_monologue(turns):
monologue_count += 1
pairs = build_sft_pairs_from_monologue(conv)
elif has_audrey:
pairs = build_sft_pairs_from_conversation(conv)
else:
# No Audrey in this conversation — no SFT pairs
no_audrey_count += 1
pairs = []
for p in pairs:
lang_sft_counts[p["language"]] += 1
all_sft_pairs.extend(pairs)
# Write outputs
print(f"\nWriting {len(conversations)} conversations to {CONV_FILE}...")
with open(CONV_FILE, "w", encoding="utf-8") as f:
for conv in conversations:
f.write(json.dumps(conv, ensure_ascii=False) + "\n")
print(f"Writing {len(all_sft_pairs)} SFT pairs to {SFT_FILE}...")
with open(SFT_FILE, "w", encoding="utf-8") as f:
for pair in all_sft_pairs:
f.write(json.dumps(pair, ensure_ascii=False) + "\n")
# Statistics
total_turns = sum(c["num_turns"] for c in conversations)
avg_turns = total_turns / len(conversations) if conversations else 0
total_audrey_wc = sum(c["audrey_word_count"] for c in conversations)
print("\n" + "=" * 60)
print("OUTPUT STATISTICS")
print("=" * 60)
print(f"Total conversations: {len(conversations)}")
print(f" Monologues (Audrey only): {monologue_count}")
print(f" No Audrey present: {no_audrey_count}")
print(f" Multi-speaker w/ Audrey: {len(conversations) - monologue_count - no_audrey_count}")
print(f"Total SFT pairs: {len(all_sft_pairs)}")
print(f"Avg turns per conversation: {avg_turns:.1f}")
print(f"Total Audrey word/char ct: {total_audrey_wc}")
print()
print("Language split (conversations):")
for lang, count in sorted(lang_counts.items()):
print(f" {lang}: {count}")
print("Language split (SFT pairs):")
for lang, count in sorted(lang_sft_counts.items()):
print(f" {lang}: {count}")
print("=" * 60)
if __name__ == "__main__":
main()