text
stringlengths 1
93.6k
|
|---|
return None
|
def kindle_find_lemma(
|
doc,
|
lemma_matcher,
|
start,
|
mobi_codec,
|
escaped_text,
|
lemmas_conn,
|
ll_conn,
|
lemma_lang,
|
prefs,
|
):
|
from spacy.util import filter_spans
|
lemma_starts: set[int] = set()
|
for span in filter_spans(lemma_matcher(doc, as_spans=True)):
|
data = get_kindle_lemma_data(
|
getattr(span, "lemma_", ""),
|
span.text,
|
getattr(span.doc[span.start], "pos_", ""),
|
lemmas_conn,
|
lemma_lang,
|
prefs,
|
)
|
if data is not None:
|
kindle_add_lemma(
|
span.start_char,
|
span.end_char,
|
start,
|
doc.text,
|
ll_conn,
|
mobi_codec,
|
escaped_text,
|
lemma_starts,
|
data,
|
)
|
def epub_find_lemma(
|
doc,
|
lemma_matcher,
|
paragraph_start,
|
paragraph_end,
|
interval_tree,
|
epub,
|
xhtml_path,
|
):
|
from spacy.util import filter_spans
|
for span in filter_spans(lemma_matcher(doc, as_spans=True)):
|
if interval_tree is not None and interval_tree.is_overlap(
|
Interval(span.start_char, span.end_char - 1)
|
):
|
return
|
pos = getattr(span.doc[span.start], "pos_", "")
|
epub.add_lemma(
|
getattr(span, "lemma_", ""),
|
span.text,
|
spacy_to_wiktionary_pos(pos) if pos != "" else "",
|
paragraph_start,
|
paragraph_end,
|
span.start_char,
|
span.end_char,
|
xhtml_path,
|
)
|
def spacy_to_kindle_pos(pos: str) -> str:
|
# spaCy POS: https://universaldependencies.org/u/pos
|
match pos:
|
case "NOUN":
|
return "noun"
|
case "VERB":
|
return "verb"
|
case "ADJ":
|
return "adjective"
|
case "ADV":
|
return "adverb"
|
case "CCONJ" | "SCONJ":
|
return "conjunction"
|
case "ADP":
|
return "preposition"
|
case "PRON":
|
return "pronoun"
|
case _:
|
return "other"
|
def get_kindle_lemma_data(
|
lemma: str,
|
word: str,
|
pos: str,
|
conn: sqlite3.Connection,
|
lemma_lang: str,
|
prefs: Prefs,
|
) -> tuple[int, int] | None:
|
if pos != "":
|
return get_kindle_lemma_with_pos(lemma, word, pos, conn, lemma_lang, prefs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.