| | |
| | """Parse ~2,000 Audrey Tang transcript markdown files into structured JSONL. |
| | |
| | Reads from: /Users/au/w/transcript/*.md |
| | Writes to: dataset/data/turns.jsonl (one JSON object per speaker turn) |
| | dataset/data/metadata.json (statistics) |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import json |
| | import glob |
| | import html |
| | import os |
| | import re |
| | import sys |
| | from collections import Counter |
| | from pathlib import Path |
| | from typing import Optional |
| |
|
| | TRANSCRIPT_DIR = "/Users/au/w/transcript" |
| | OUTPUT_DIR = Path(__file__).resolve().parent.parent / "data" |
| |
|
| | SKIP_FILES = {"lexicon.md", "z.md"} |
| |
|
| | |
| | AUDREY_NAMES = {"audrey tang", "唐鳳"} |
| |
|
| | |
| | HEADER_RE = re.compile(r"^#\s+(\d{4}-\d{2}-\d{2})\s+(.+)$") |
| | HEADER_YEAR_ONLY_RE = re.compile(r"^#\s+(\d{4})年(.+)$") |
| | SPEAKER_RE = re.compile(r"^###\s+(.+?)\s*[::]\s*$") |
| | STAGE_DIR_RE = re.compile(r"^>\s*(.+)$") |
| | HTML_TAG_RE = re.compile(r"<[^>]+>") |
| | IFRAME_BLOCK_RE = re.compile(r"<iframe[\s\S]*?</iframe>", re.IGNORECASE) |
| |
|
| |
|
| | def is_cjk(char: str) -> bool: |
| | cp = ord(char) |
| | return 0x4E00 <= cp <= 0x9FFF |
| |
|
| |
|
| | def detect_language(text: str) -> str: |
| | """Return 'zh' if >30% CJK characters, else 'en'.""" |
| | chars = [c for c in text if not c.isspace()] |
| | if not chars: |
| | return "en" |
| | cjk_count = sum(1 for c in chars if is_cjk(c)) |
| | return "zh" if cjk_count / len(chars) > 0.30 else "en" |
| |
|
| |
|
| | def normalize_speaker(name: str) -> str: |
| | """Normalize speaker name: strip extra whitespace.""" |
| | return " ".join(name.split()) |
| |
|
| |
|
| | def is_audrey(speaker: str) -> bool: |
| | return speaker.lower().strip() in AUDREY_NAMES |
| |
|
| |
|
| | def extract_stage_directions(text: str) -> list[str]: |
| | """Extract parenthetical stage directions like (laughter), (笑)from text.""" |
| | directions = [] |
| | |
| | for m in re.finditer(r"[((]([^))]+)[))]", text): |
| | directions.append(f"({m.group(1)})") |
| | return directions |
| |
|
| |
|
| | def clean_text(text: str) -> str: |
| | """Clean turn text: decode HTML entities, strip HTML tags, normalize whitespace.""" |
| | text = html.unescape(text) |
| | text = IFRAME_BLOCK_RE.sub("", text) |
| | text = HTML_TAG_RE.sub("", text) |
| | |
| | |
| | text = re.sub(r"\[([^\]]*)\]\([^)]+\)", r"\1", text) |
| | |
| | lines = text.split("\n") |
| | cleaned_lines = [] |
| | for line in lines: |
| | stripped = line.strip() |
| | if stripped: |
| | cleaned_lines.append(stripped) |
| | else: |
| | cleaned_lines.append("") |
| | text = "\n".join(cleaned_lines) |
| | |
| | text = re.sub(r"\n{3,}", "\n\n", text) |
| | return text.strip() |
| |
|
| |
|
| | def parse_file(filepath: str) -> dict | None: |
| | """Parse a single transcript .md file. |
| | |
| | Returns dict with keys: date, title, source_file, turns, language, skip_reason |
| | or None if the file should be skipped. |
| | """ |
| | filename = os.path.basename(filepath) |
| |
|
| | if filename in SKIP_FILES: |
| | return {"skip_reason": f"excluded file: {filename}", "source_file": filename} |
| |
|
| | with open(filepath, "r", encoding="utf-8") as f: |
| | content = f.read() |
| |
|
| | if not content.strip(): |
| | return {"skip_reason": "empty file", "source_file": filename} |
| |
|
| | lines = content.split("\n") |
| |
|
| | |
| | date = None |
| | title = None |
| | header_line_idx = None |
| |
|
| | for i, line in enumerate(lines): |
| | stripped = line.strip() |
| | if not stripped: |
| | continue |
| | m = HEADER_RE.match(stripped) |
| | if m: |
| | date = m.group(1) |
| | title = m.group(2).strip() |
| | header_line_idx = i |
| | break |
| | m2 = HEADER_YEAR_ONLY_RE.match(stripped) |
| | if m2: |
| | |
| | date = None |
| | title = stripped.lstrip("# ").strip() |
| | header_line_idx = i |
| | break |
| | |
| | if stripped.startswith("###"): |
| | break |
| | if not stripped.startswith("#"): |
| | break |
| | break |
| |
|
| | |
| | if date is None: |
| | fm = re.match(r"(\d{4}-\d{2}-\d{2})", filename) |
| | if fm: |
| | date = fm.group(1) |
| | if title is None: |
| | |
| | title_part = filename[11:] |
| | title_part = title_part.rsplit(".md", 1)[0] |
| | title = title_part.replace("-", " ") |
| |
|
| | if date is None: |
| | return {"skip_reason": "no date found in header or filename", "source_file": filename} |
| |
|
| | |
| | body_start = (header_line_idx + 1) if header_line_idx is not None else 0 |
| | body_lines = lines[body_start:] |
| |
|
| | turns = [] |
| | current_speaker = None |
| | current_paragraphs = [] |
| | current_stage_dirs = [] |
| | has_speaker_blocks = False |
| |
|
| | def flush_turn(): |
| | nonlocal current_speaker, current_paragraphs, current_stage_dirs |
| | if current_speaker and current_paragraphs: |
| | text = "\n\n".join(current_paragraphs) |
| | text = clean_text(text) |
| | if text: |
| | turns.append({ |
| | "speaker": current_speaker, |
| | "text": text, |
| | "stage_directions": current_stage_dirs[:], |
| | }) |
| | current_paragraphs = [] |
| | current_stage_dirs = [] |
| |
|
| | for line in body_lines: |
| | stripped = line.strip() |
| |
|
| | |
| | sm = SPEAKER_RE.match(stripped) |
| | if sm: |
| | has_speaker_blocks = True |
| | new_speaker = normalize_speaker(sm.group(1)) |
| | if new_speaker == current_speaker: |
| | |
| | |
| | if current_paragraphs: |
| | current_paragraphs.append("") |
| | else: |
| | flush_turn() |
| | current_speaker = new_speaker |
| | continue |
| |
|
| | |
| | sd = STAGE_DIR_RE.match(stripped) |
| | if sd: |
| | direction_text = sd.group(1).strip() |
| | |
| | dirs = extract_stage_directions(direction_text) |
| | if dirs: |
| | current_stage_dirs.extend(dirs) |
| | |
| | continue |
| |
|
| | |
| | if current_speaker: |
| | if stripped: |
| | current_paragraphs.append(stripped) |
| | elif current_paragraphs and current_paragraphs[-1] != "": |
| | current_paragraphs.append("") |
| |
|
| | flush_turn() |
| |
|
| | |
| | if not has_speaker_blocks: |
| | |
| | text_lines = [] |
| | for line in body_lines: |
| | stripped = line.strip() |
| | if not stripped: |
| | continue |
| | sd = STAGE_DIR_RE.match(stripped) |
| | if sd: |
| | continue |
| | |
| | if stripped.startswith("http") or stripped.startswith("<"): |
| | continue |
| | text_lines.append(stripped) |
| |
|
| | if not text_lines: |
| | return {"skip_reason": "no meaningful content", "source_file": filename} |
| |
|
| | full_text = clean_text("\n\n".join(text_lines)) |
| | if not full_text: |
| | return {"skip_reason": "no meaningful content after cleaning", "source_file": filename} |
| |
|
| | |
| | lang = detect_language(full_text) |
| | speaker = "唐鳳" if lang == "zh" else "Audrey Tang" |
| | turns = [{ |
| | "speaker": speaker, |
| | "text": full_text, |
| | "stage_directions": [], |
| | }] |
| |
|
| | if not turns: |
| | return {"skip_reason": "no turns extracted", "source_file": filename} |
| |
|
| | |
| | all_text = " ".join(t["text"] for t in turns) |
| | language = detect_language(all_text) |
| |
|
| | return { |
| | "date": date, |
| | "title": title, |
| | "source_file": filename, |
| | "turns": turns, |
| | "language": language, |
| | "skip_reason": None, |
| | } |
| |
|
| |
|
| | def make_turn_id(source_file: str, turn_index: int) -> str: |
| | """Create a turn ID from source file and index.""" |
| | base = source_file.rsplit(".md", 1)[0] |
| | return f"{base}/{turn_index:03d}" |
| |
|
| |
|
| | def main(): |
| | OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
| |
|
| | md_files = sorted(glob.glob(os.path.join(TRANSCRIPT_DIR, "*.md"))) |
| | print(f"Found {len(md_files)} .md files") |
| |
|
| | all_turns = [] |
| | skipped = [] |
| | parsed_count = 0 |
| | speaker_counter = Counter() |
| | lang_counter = Counter() |
| | dates = [] |
| |
|
| | for filepath in md_files: |
| | result = parse_file(filepath) |
| | if result is None: |
| | continue |
| |
|
| | if result.get("skip_reason"): |
| | skipped.append({ |
| | "file": result["source_file"], |
| | "reason": result["skip_reason"], |
| | }) |
| | continue |
| |
|
| | parsed_count += 1 |
| | turns = result["turns"] |
| | total_turns = len(turns) |
| | lang_counter[result["language"]] += 1 |
| | dates.append(result["date"]) |
| |
|
| | for i, turn in enumerate(turns): |
| | speaker = turn["speaker"] |
| | speaker_counter[speaker] += 1 |
| | turn_obj = { |
| | "id": make_turn_id(result["source_file"], i), |
| | "date": result["date"], |
| | "title": result["title"], |
| | "source_file": result["source_file"], |
| | "speaker": speaker, |
| | "text": turn["text"], |
| | "turn_index": i, |
| | "is_audrey": is_audrey(speaker), |
| | "language": result["language"], |
| | "stage_directions": turn["stage_directions"], |
| | "total_turns": total_turns, |
| | } |
| | all_turns.append(turn_obj) |
| |
|
| | |
| | turns_path = OUTPUT_DIR / "turns.jsonl" |
| | with open(turns_path, "w", encoding="utf-8") as f: |
| | for turn in all_turns: |
| | f.write(json.dumps(turn, ensure_ascii=False) + "\n") |
| |
|
| | |
| | audrey_turns = sum(1 for t in all_turns if t["is_audrey"]) |
| | sorted_dates = sorted(dates) if dates else [] |
| | top_speakers = speaker_counter.most_common(20) |
| |
|
| | metadata = { |
| | "total_files_found": len(md_files), |
| | "total_files_parsed": parsed_count, |
| | "total_files_skipped": len(skipped), |
| | "total_turns": len(all_turns), |
| | "total_audrey_turns": audrey_turns, |
| | "language_distribution": dict(lang_counter), |
| | "date_range": { |
| | "earliest": sorted_dates[0] if sorted_dates else None, |
| | "latest": sorted_dates[-1] if sorted_dates else None, |
| | }, |
| | "top_speakers": [{"speaker": s, "count": c} for s, c in top_speakers], |
| | "skipped_files": skipped, |
| | } |
| |
|
| | meta_path = OUTPUT_DIR / "metadata.json" |
| | with open(meta_path, "w", encoding="utf-8") as f: |
| | json.dump(metadata, f, ensure_ascii=False, indent=2) |
| |
|
| | |
| | print(f"\nParsed {parsed_count} files, skipped {len(skipped)}") |
| | print(f"Total turns: {len(all_turns)}") |
| | print(f"Audrey turns: {audrey_turns}") |
| | print(f"Language distribution: {dict(lang_counter)}") |
| | if sorted_dates: |
| | print(f"Date range: {sorted_dates[0]} to {sorted_dates[-1]}") |
| | print(f"Top 10 speakers:") |
| | for speaker, count in top_speakers[:10]: |
| | print(f" {speaker}: {count}") |
| | print(f"\nOutput: {turns_path}") |
| | print(f"Metadata: {meta_path}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|