File size: 8,570 Bytes
bf38a2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
#!/usr/bin/env python3
"""Build RAG chunks from parsed turns for the Audrey Tang transcript dataset.

Reads turns.jsonl and produces embedding-ready chunks in chunks.jsonl.

Chunking strategy:
- Primary unit: each Audrey turn + the question that prompted it
- Long turns (>600 token estimate): split at paragraph boundaries with overlap
- Short turns (<50 token estimate): kept as-is (no adjacent Audrey turns to merge with)
- Question field: last non-Audrey turn before the response
- Context_before: up to 2 preceding turns for retrieval context
"""

import json
import statistics
import sys
from collections import Counter, defaultdict
from pathlib import Path

INPUT = Path(__file__).resolve().parent.parent / "data" / "turns.jsonl"
OUTPUT = Path(__file__).resolve().parent.parent / "data" / "chunks.jsonl"

# Token estimation: 1 token per 4 chars for English, 1 token per 1.5 chars for Chinese
SPLIT_THRESHOLD = 600  # token estimate above which we split
TARGET_MIN = 200       # target tokens per sub-chunk
TARGET_MAX = 500       # target tokens per sub-chunk


def token_estimate(text: str, language: str) -> int:
    if language == "zh":
        return max(1, round(len(text) / 1.5))
    else:
        return max(1, round(len(text) / 4))


def split_into_paragraphs(text: str) -> list[str]:
    """Split text into non-empty paragraphs."""
    return [p.strip() for p in text.split("\n\n") if p.strip()]


def last_sentence(text: str) -> str:
    """Extract the last sentence from text for overlap context."""
    # Handle both Chinese and English sentence endings
    for sep in ["。", ".\n", ". ", "!", "?", "!\n", "! ", "?\n", "? "]:
        parts = text.rsplit(sep, 1)
        if len(parts) == 2 and parts[1].strip():
            return parts[1].strip()
        elif len(parts) == 2:
            # Separator was at the end; try to get the last real sentence
            inner = parts[0]
            for sep2 in ["。", ". ", "!", "?", "! ", "? "]:
                inner_parts = inner.rsplit(sep2, 1)
                if len(inner_parts) == 2:
                    return inner_parts[1].strip() + sep.strip()
            return inner[-200:] if len(inner) > 200 else inner
    # No sentence boundary found; return last 200 chars
    return text[-200:] if len(text) > 200 else text


def build_sub_chunks(text: str, language: str) -> list[str]:
    """Split a long text into sub-chunks at paragraph boundaries."""
    paragraphs = split_into_paragraphs(text)
    if not paragraphs:
        return [text]

    # If a single paragraph is still too long, we keep it as-is
    # (further splitting within paragraphs would break mid-thought)
    chunks = []
    current_parts = []
    current_tokens = 0

    for para in paragraphs:
        para_tokens = token_estimate(para, language)

        # If adding this paragraph would exceed target and we have content, emit
        if current_tokens + para_tokens > TARGET_MAX and current_parts:
            chunk_text = "\n\n".join(current_parts)
            chunks.append(chunk_text)

            # 1-sentence overlap for continuity
            overlap = last_sentence(current_parts[-1])
            overlap_tokens = token_estimate(overlap, language)
            if overlap_tokens < 100:
                current_parts = [overlap]
                current_tokens = overlap_tokens
            else:
                current_parts = []
                current_tokens = 0

        current_parts.append(para)
        current_tokens += para_tokens

    # Emit remaining content
    if current_parts:
        chunk_text = "\n\n".join(current_parts)
        # If this last chunk is very small and we have previous chunks, merge it
        if chunks and token_estimate(chunk_text, language) < TARGET_MIN // 2:
            chunks[-1] = chunks[-1] + "\n\n" + chunk_text
        else:
            chunks.append(chunk_text)

    return chunks if chunks else [text]


def main():
    # Read all turns, group by source_file
    turns_by_source = defaultdict(list)
    with open(INPUT) as f:
        for line in f:
            turn = json.loads(line)
            turns_by_source[turn["source_file"]].append(turn)

    # Sort each group by turn_index
    for source in turns_by_source:
        turns_by_source[source].sort(key=lambda t: t["turn_index"])

    chunks = []

    for source_file, turns in sorted(turns_by_source.items()):
        chunk_counter = 0
        # Derive stem from source_file (remove .md extension)
        stem = source_file.rsplit(".", 1)[0] if "." in source_file else source_file

        for i, turn in enumerate(turns):
            if not turn["is_audrey"]:
                continue

            # Find question: last non-Audrey turn before this response
            question = None
            for j in range(i - 1, -1, -1):
                if not turns[j]["is_audrey"]:
                    question = turns[j]["text"]
                    break

            # Find context_before: up to 2 preceding turns (any speaker)
            context_parts = []
            for j in range(max(0, i - 2), i):
                ctx = turns[j]
                context_parts.append(f"{ctx['speaker']}: {ctx['text']}")
            context_before = "\n\n".join(context_parts) if context_parts else None

            te = token_estimate(turn["text"], turn["language"])

            if te > SPLIT_THRESHOLD:
                # Split long turn into sub-chunks
                sub_texts = build_sub_chunks(turn["text"], turn["language"])
                for k, sub_text in enumerate(sub_texts):
                    sub_te = token_estimate(sub_text, turn["language"])
                    chunk = {
                        "id": f"{stem}/chunk_{chunk_counter:03d}",
                        "date": turn["date"],
                        "title": turn["title"],
                        "language": turn["language"],
                        "speaker": turn["speaker"],
                        "question": question if k == 0 else None,
                        "text": sub_text,
                        "context_before": context_before if k == 0 else None,
                        "token_estimate": sub_te,
                        "source_file": turn["source_file"],
                    }
                    chunks.append(chunk)
                    chunk_counter += 1
            else:
                # Single chunk
                chunk = {
                    "id": f"{stem}/chunk_{chunk_counter:03d}",
                    "date": turn["date"],
                    "title": turn["title"],
                    "language": turn["language"],
                    "speaker": turn["speaker"],
                    "question": question,
                    "text": turn["text"],
                    "context_before": context_before,
                    "token_estimate": te,
                    "source_file": turn["source_file"],
                }
                chunks.append(chunk)
                chunk_counter += 1

    # Write output
    with open(OUTPUT, "w") as f:
        for chunk in chunks:
            f.write(json.dumps(chunk, ensure_ascii=False) + "\n")

    # Print statistics
    token_estimates = [c["token_estimate"] for c in chunks]
    languages = Counter(c["language"] for c in chunks)
    years = Counter(c["date"][:4] for c in chunks)
    under_100 = sum(1 for t in token_estimates if t < 100)
    over_600 = sum(1 for t in token_estimates if t > 600)

    print(f"=== RAG Chunk Statistics ===")
    print(f"Total chunks: {len(chunks):,}")
    print(f"Unique source files: {len(set(c['source_file'] for c in chunks)):,}")
    print()
    print(f"Token estimates:")
    print(f"  Mean:   {statistics.mean(token_estimates):.1f}")
    print(f"  Median: {statistics.median(token_estimates):.1f}")
    print(f"  Min:    {min(token_estimates)}")
    print(f"  Max:    {max(token_estimates)}")
    print(f"  Stdev:  {statistics.stdev(token_estimates):.1f}")
    print()
    print(f"Out-of-range chunks:")
    print(f"  Below 100 tokens: {under_100:,} ({under_100/len(chunks)*100:.1f}%)")
    print(f"  Above 600 tokens: {over_600:,} ({over_600/len(chunks)*100:.1f}%)")
    print()
    print(f"Language split:")
    for lang, count in sorted(languages.items()):
        print(f"  {lang}: {count:,} ({count/len(chunks)*100:.1f}%)")
    print()
    print(f"Chunks by year:")
    for year, count in sorted(years.items()):
        print(f"  {year}: {count:,}")
    print()
    print(f"Output written to: {OUTPUT}")
    print(f"Output size: {OUTPUT.stat().st_size / 1024 / 1024:.1f} MB")


if __name__ == "__main__":
    main()