"""Run the end-to-end pipeline (document/persona/worldview layers).""" from __future__ import annotations import argparse from pathlib import Path import re from typing import Dict, List from fic_agent.config import RuntimeConfig from fic_agent.ingest.pipeline import ( build_document_layer, chunks_to_dicts, dialogues_to_dicts, extract_dialogue, load_text_file, save_jsonl, ) from fic_agent.persona.profile import ( build_persona_profile, render_persona_prompt, save_persona_profile, save_persona_prompt, ) from fic_agent.worldview.worldview import ( build_worldview_notes, build_worldview_notes_llm, save_worldview_notes, ) from fic_agent.retrieval.retriever import build_index_for_texts def _norm_text(value: str) -> str: return re.sub(r"\s+", " ", (value or "").strip()).lower() def _select_worldview_for_character( character: str, worldview_notes: List[Dict], character_chunk_ids: set[str], top_n: int = 6, ) -> List[str]: char_tokens = set(_norm_text(character).split()) ranked: List[tuple[int, str]] = [] for note in worldview_notes: text = (note.get("text") or "").strip() if not text: continue entity = _norm_text(str(note.get("entity") or "")) text_l = _norm_text(text) source_chunk = note.get("source_chunk") score = 0 if source_chunk in character_chunk_ids: score += 2 if entity and char_tokens and (set(entity.split()) & char_tokens): score += 4 if char_tokens and any(re.search(rf"\b{re.escape(tok)}\b", text_l) for tok in char_tokens): score += 3 if note.get("type") in {"rule", "event"} and source_chunk in character_chunk_ids: score += 1 if score > 0: ranked.append((score, text)) ranked.sort(key=lambda x: (x[0], len(x[1])), reverse=True) selected: List[str] = [] seen = set() for _, text in ranked: k = _norm_text(text) if k in seen: continue seen.add(k) selected.append(text) if len(selected) >= top_n: break if selected: return selected # Fallback: provide a small set of global rule/setting notes. for note in worldview_notes: t = (note.get("type") or "").lower() if t not in {"rule", "setting", "event", "location", "organization"}: continue text = (note.get("text") or "").strip() if not text: continue k = _norm_text(text) if k in seen: continue seen.add(k) selected.append(text) if len(selected) >= min(3, top_n): break return selected def main() -> None: parser = argparse.ArgumentParser(description="fic-agent pipeline") parser.add_argument("--input", required=True, help="Path to raw novel text") parser.add_argument("--book-id", default="book", help="Book id") parser.add_argument("--character", default=None, help="Character name for persona") parser.add_argument("--characters", default=None, help="Comma-separated character list for speaker detection") parser.add_argument( "--all-characters", action="store_true", help="Generate persona/profile prompt for all characters in --characters (or all detected speakers if --characters is absent).", ) parser.add_argument("--max-chars", type=int, default=2000) parser.add_argument("--overlap", type=int, default=200) parser.add_argument("--build-index", action="store_true", help="Build vector indexes") parser.add_argument("--worldview-llm", action="store_true", help="Use LLM to extract worldview notes") args = parser.parse_args() cfg = RuntimeConfig() raw_text = load_text_file(args.input) chunks = build_document_layer(raw_text, book_id=args.book_id, max_chars=args.max_chars, overlap=args.overlap) chunk_dicts = chunks_to_dicts(chunks) processed_dir = Path(cfg.data_processed_dir) processed_dir.mkdir(parents=True, exist_ok=True) chunks_path = processed_dir / "chunks.jsonl" save_jsonl(chunk_dicts, str(chunks_path)) character_candidates = None if args.characters: character_candidates = [c.strip() for c in args.characters.split(",") if c.strip()] dialogues = extract_dialogue(chunks, cfg=cfg, character_candidates=character_candidates) dialogues_path = processed_dir / "dialogues.jsonl" save_jsonl(dialogues_to_dicts(dialogues), str(dialogues_path)) # Worldview notes if args.worldview_llm: worldview_notes = build_worldview_notes_llm(chunk_dicts, cfg) else: worldview_notes = build_worldview_notes(chunk_dicts) worldview_path = processed_dir / "worldview_notes.jsonl" save_worldview_notes(worldview_notes, str(worldview_path)) # Persona profile(s) persona_outputs: List[tuple[str, Path, Path]] = [] persona_targets: List[str] = [] if args.all_characters: if character_candidates: persona_targets = character_candidates else: # Derive from extracted speakers when candidates are not provided. seen = set() for d in dialogues: if d.speaker and d.speaker not in seen: seen.add(d.speaker) persona_targets.append(d.speaker) elif args.character: persona_targets = [args.character] for character in persona_targets: utterances: List[str] = [d.utterance for d in dialogues if d.speaker == character] background_utterances: List[str] = [d.utterance for d in dialogues if d.speaker != character] character_chunk_ids = {d.chunk_id for d in dialogues if d.speaker == character} all_speakers = sorted({d.speaker for d in dialogues if d.speaker}) excluded_terms = sorted(set((character_candidates or []) + all_speakers)) character_worldview_notes = _select_worldview_for_character( character=character, worldview_notes=worldview_notes, character_chunk_ids=character_chunk_ids, top_n=6, ) profile = build_persona_profile( character, utterances, background_utterances=background_utterances, excluded_terms=excluded_terms, worldview_notes=character_worldview_notes, ) safe_character = character.replace("/", "_") persona_path = processed_dir / f"persona_{safe_character}.json" save_persona_profile(profile, str(persona_path)) persona_prompt_path = processed_dir / f"persona_{safe_character}_prompt.txt" save_persona_prompt(render_persona_prompt(profile), str(persona_prompt_path)) persona_outputs.append((character, persona_path, persona_prompt_path)) print(f"Saved chunks to {chunks_path}") print(f"Saved dialogues to {dialogues_path}") for character, persona_path, persona_prompt_path in persona_outputs: print(f"Saved persona for {character} to {persona_path}") print(f"Saved persona prompt for {character} to {persona_prompt_path}") print(f"Saved worldview notes to {worldview_path}") if args.build_index: # Facts build_index_for_texts( [c["text"] for c in chunk_dicts], [ {"id": c["chunk_id"], "text": c["text"], "chapter_id": c["chapter_id"]} for c in chunk_dicts ], cfg, "facts", ) # Persona if dialogues: build_index_for_texts( [d.utterance for d in dialogues], [ { "id": f"dlg-{i}", "text": d.utterance, "speaker": d.speaker, "chunk_id": d.chunk_id, } for i, d in enumerate(dialogues) ], cfg, "persona", ) else: print("No dialogue extracted; skipping persona index.") # Worldview if worldview_notes: worldview_texts = [] worldview_meta = [] seen_worldview_text = set() for i, w in enumerate(worldview_notes): text = str(w.get("text", "")).strip() if not text: continue norm = " ".join(text.split()).lower() if norm in seen_worldview_text: continue seen_worldview_text.add(norm) worldview_texts.append(text) worldview_meta.append( { "id": f"wv-{i}", "text": text, "type": w.get("type"), "entity": w.get("entity"), "source_chunk": w.get("source_chunk"), } ) build_index_for_texts( worldview_texts, worldview_meta, cfg, "worldview", ) else: print("No worldview notes extracted; skipping worldview index.") print("Built faiss indexes in", cfg.data_index_dir) if __name__ == "__main__": main()