sayit-archive-tw / scripts /build_chunks.py
audreyt's picture
Initial dataset: 1,931 transcripts, 59K SFT pairs, 85K RAG chunks, 765 bilingual terms
bf38a2e verified
#!/usr/bin/env python3
"""Build RAG chunks from parsed turns for the Audrey Tang transcript dataset.
Reads turns.jsonl and produces embedding-ready chunks in chunks.jsonl.
Chunking strategy:
- Primary unit: each Audrey turn + the question that prompted it
- Long turns (>600 token estimate): split at paragraph boundaries with overlap
- Short turns (<50 token estimate): kept as-is (no adjacent Audrey turns to merge with)
- Question field: last non-Audrey turn before the response
- Context_before: up to 2 preceding turns for retrieval context
"""
import json
import statistics
import sys
from collections import Counter, defaultdict
from pathlib import Path
INPUT = Path(__file__).resolve().parent.parent / "data" / "turns.jsonl"
OUTPUT = Path(__file__).resolve().parent.parent / "data" / "chunks.jsonl"
# Token estimation: 1 token per 4 chars for English, 1 token per 1.5 chars for Chinese
SPLIT_THRESHOLD = 600 # token estimate above which we split
TARGET_MIN = 200 # target tokens per sub-chunk
TARGET_MAX = 500 # target tokens per sub-chunk
def token_estimate(text: str, language: str) -> int:
if language == "zh":
return max(1, round(len(text) / 1.5))
else:
return max(1, round(len(text) / 4))
def split_into_paragraphs(text: str) -> list[str]:
"""Split text into non-empty paragraphs."""
return [p.strip() for p in text.split("\n\n") if p.strip()]
def last_sentence(text: str) -> str:
"""Extract the last sentence from text for overlap context."""
# Handle both Chinese and English sentence endings
for sep in ["。", ".\n", ". ", "!", "?", "!\n", "! ", "?\n", "? "]:
parts = text.rsplit(sep, 1)
if len(parts) == 2 and parts[1].strip():
return parts[1].strip()
elif len(parts) == 2:
# Separator was at the end; try to get the last real sentence
inner = parts[0]
for sep2 in ["。", ". ", "!", "?", "! ", "? "]:
inner_parts = inner.rsplit(sep2, 1)
if len(inner_parts) == 2:
return inner_parts[1].strip() + sep.strip()
return inner[-200:] if len(inner) > 200 else inner
# No sentence boundary found; return last 200 chars
return text[-200:] if len(text) > 200 else text
def build_sub_chunks(text: str, language: str) -> list[str]:
"""Split a long text into sub-chunks at paragraph boundaries."""
paragraphs = split_into_paragraphs(text)
if not paragraphs:
return [text]
# If a single paragraph is still too long, we keep it as-is
# (further splitting within paragraphs would break mid-thought)
chunks = []
current_parts = []
current_tokens = 0
for para in paragraphs:
para_tokens = token_estimate(para, language)
# If adding this paragraph would exceed target and we have content, emit
if current_tokens + para_tokens > TARGET_MAX and current_parts:
chunk_text = "\n\n".join(current_parts)
chunks.append(chunk_text)
# 1-sentence overlap for continuity
overlap = last_sentence(current_parts[-1])
overlap_tokens = token_estimate(overlap, language)
if overlap_tokens < 100:
current_parts = [overlap]
current_tokens = overlap_tokens
else:
current_parts = []
current_tokens = 0
current_parts.append(para)
current_tokens += para_tokens
# Emit remaining content
if current_parts:
chunk_text = "\n\n".join(current_parts)
# If this last chunk is very small and we have previous chunks, merge it
if chunks and token_estimate(chunk_text, language) < TARGET_MIN // 2:
chunks[-1] = chunks[-1] + "\n\n" + chunk_text
else:
chunks.append(chunk_text)
return chunks if chunks else [text]
def main():
# Read all turns, group by source_file
turns_by_source = defaultdict(list)
with open(INPUT) as f:
for line in f:
turn = json.loads(line)
turns_by_source[turn["source_file"]].append(turn)
# Sort each group by turn_index
for source in turns_by_source:
turns_by_source[source].sort(key=lambda t: t["turn_index"])
chunks = []
for source_file, turns in sorted(turns_by_source.items()):
chunk_counter = 0
# Derive stem from source_file (remove .md extension)
stem = source_file.rsplit(".", 1)[0] if "." in source_file else source_file
for i, turn in enumerate(turns):
if not turn["is_audrey"]:
continue
# Find question: last non-Audrey turn before this response
question = None
for j in range(i - 1, -1, -1):
if not turns[j]["is_audrey"]:
question = turns[j]["text"]
break
# Find context_before: up to 2 preceding turns (any speaker)
context_parts = []
for j in range(max(0, i - 2), i):
ctx = turns[j]
context_parts.append(f"{ctx['speaker']}: {ctx['text']}")
context_before = "\n\n".join(context_parts) if context_parts else None
te = token_estimate(turn["text"], turn["language"])
if te > SPLIT_THRESHOLD:
# Split long turn into sub-chunks
sub_texts = build_sub_chunks(turn["text"], turn["language"])
for k, sub_text in enumerate(sub_texts):
sub_te = token_estimate(sub_text, turn["language"])
chunk = {
"id": f"{stem}/chunk_{chunk_counter:03d}",
"date": turn["date"],
"title": turn["title"],
"language": turn["language"],
"speaker": turn["speaker"],
"question": question if k == 0 else None,
"text": sub_text,
"context_before": context_before if k == 0 else None,
"token_estimate": sub_te,
"source_file": turn["source_file"],
}
chunks.append(chunk)
chunk_counter += 1
else:
# Single chunk
chunk = {
"id": f"{stem}/chunk_{chunk_counter:03d}",
"date": turn["date"],
"title": turn["title"],
"language": turn["language"],
"speaker": turn["speaker"],
"question": question,
"text": turn["text"],
"context_before": context_before,
"token_estimate": te,
"source_file": turn["source_file"],
}
chunks.append(chunk)
chunk_counter += 1
# Write output
with open(OUTPUT, "w") as f:
for chunk in chunks:
f.write(json.dumps(chunk, ensure_ascii=False) + "\n")
# Print statistics
token_estimates = [c["token_estimate"] for c in chunks]
languages = Counter(c["language"] for c in chunks)
years = Counter(c["date"][:4] for c in chunks)
under_100 = sum(1 for t in token_estimates if t < 100)
over_600 = sum(1 for t in token_estimates if t > 600)
print(f"=== RAG Chunk Statistics ===")
print(f"Total chunks: {len(chunks):,}")
print(f"Unique source files: {len(set(c['source_file'] for c in chunks)):,}")
print()
print(f"Token estimates:")
print(f" Mean: {statistics.mean(token_estimates):.1f}")
print(f" Median: {statistics.median(token_estimates):.1f}")
print(f" Min: {min(token_estimates)}")
print(f" Max: {max(token_estimates)}")
print(f" Stdev: {statistics.stdev(token_estimates):.1f}")
print()
print(f"Out-of-range chunks:")
print(f" Below 100 tokens: {under_100:,} ({under_100/len(chunks)*100:.1f}%)")
print(f" Above 600 tokens: {over_600:,} ({over_600/len(chunks)*100:.1f}%)")
print()
print(f"Language split:")
for lang, count in sorted(languages.items()):
print(f" {lang}: {count:,} ({count/len(chunks)*100:.1f}%)")
print()
print(f"Chunks by year:")
for year, count in sorted(years.items()):
print(f" {year}: {count:,}")
print()
print(f"Output written to: {OUTPUT}")
print(f"Output size: {OUTPUT.stat().st_size / 1024 / 1024:.1f} MB")
if __name__ == "__main__":
main()