text
stringlengths 1
93.6k
|
|---|
x_ray.add_entity(text, ent.label_, ent_start, ent.sent.text.strip(), ent_len)
|
return intervals
|
def load_spacy(model: str, book_path: str | None, lemma_lang: str) -> Any:
|
import spacy
|
if model == "":
|
return spacy.blank(lemma_lang)
|
excluded_components = ["parser"]
|
if book_path is None:
|
excluded_components.append("ner")
|
nlp = spacy.load(model, exclude=excluded_components)
|
if book_path is not None:
|
# simpler and faster https://spacy.io/usage/linguistic-features#sbd
|
nlp.enable_pipe("senter")
|
if book_path is not None:
|
custom_x_path = get_custom_x_path(book_path)
|
if custom_x_path.exists():
|
ruler = nlp.add_pipe(
|
"entity_ruler", before="ner", config={"phrase_matcher_attr": "LOWER"}
|
)
|
patterns = []
|
with custom_x_path.open(encoding="utf-8") as f:
|
for name, label, aliases, *_ in json.load(f):
|
patterns.append({"label": label, "pattern": name, "id": name})
|
for alias in [x.strip() for x in aliases.split(",")]:
|
patterns.append({"label": label, "pattern": alias, "id": name})
|
ruler.add_patterns(patterns)
|
return nlp
|
def create_spacy_matcher(
|
nlp, model, lemma_lang, is_kindle, lemmas_conn, plugin_path, prefs
|
):
|
from spacy.matcher import PhraseMatcher
|
from spacy.tokens import DocBin
|
disabled_pipes = list(set(["ner", "parser", "senter"]) & set(nlp.pipe_names))
|
pkg_versions = load_plugin_json(plugin_path, "data/deps.json")
|
model_version = get_spacy_model_version(model, pkg_versions)
|
lemma_matcher = PhraseMatcher(nlp.vocab, attr="LOWER")
|
lemmas_doc_path = spacy_doc_path(
|
model, model_version, lemma_lang, is_kindle, plugin_path, prefs
|
)
|
if not lemmas_doc_path.exists():
|
save_spacy_docs(
|
nlp,
|
model,
|
model_version,
|
lemma_lang,
|
is_kindle,
|
lemmas_conn,
|
plugin_path,
|
prefs,
|
)
|
lemmas_doc_bin = DocBin().from_disk(lemmas_doc_path)
|
with nlp.select_pipes(disable=disabled_pipes):
|
lemma_matcher.add("lemmas", lemmas_doc_bin.get_docs(nlp.vocab))
|
return lemma_matcher
|
# <FILESEP>
|
import os
|
import threading
|
import json
|
from datetime import datetime
|
import litellm
|
import random
|
import asyncio
|
from loguru import logger
|
from text import SentenceStream
|
voice_tone_description = """<character voice tone> is used by the voice generator to choose the appropriate voice and intonation for <character response text>.
|
<character voice tone> is strictly one of the following:
|
- "neutral": conversation is normal, neutral, like a business conversation or a conversation with a new acquaintance or a stranger
|
- "warm": conversation is warm, like a conversation with a friend or a conversation with a partner
|
- "erotic": conversation is about sex, love, or romance
|
- "excited": conversation is excited, like a happy announcement or surprising news
|
- "sad": conversation is sad, like a sad story or a sad conversation
|
"""
|
narrator_comment_format_description = """<character response text> contains comments made by the narrator.
|
The comments are always in the third person and enclosed in asterisks.
|
Examples:
|
- Are you serious?! *her eyes widened* How are you going to do that?
|
- *he looks down* I'm not sure I can do that.
|
- I'm glad you're here. *she rushed to hug him*
|
"""
|
character_agent_message_format_voice_tone = (
|
"Respond with the following JSON object:"
|
'{"text": "<character response text>", "voice_tone": "<character voice tone>"}'
|
f"\n{voice_tone_description}"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.