text
stringlengths
1
93.6k
epub,
xhtml_path,
)
supported_languages = load_languages_data(data.plugin_path)
gloss_lang = prefs["gloss_lang"]
gloss_source = supported_languages[gloss_lang]["gloss_source"]
epub.modify_epub(prefs, data.book_lang, gloss_lang, gloss_source)
return
# Kindle
final_start = calculate_final_start(data)
if data.create_ww:
ll_conn, ll_path = create_lang_layer(
data.asin,
data.book_path,
data.acr,
data.revision,
)
if data.create_x:
x_ray_conn, x_ray_path = create_x_ray_db(
data.asin,
data.book_path,
data.book_lang,
data.plugin_path,
prefs,
mediawiki.sitename,
)
x_ray = X_Ray(x_ray_conn, mediawiki, wikidata, custom_x_ray)
for doc, context in nlp.pipe(parse_book(data), as_tuples=True):
if data.kfx_json is not None:
start = context
escaped_text = None
else:
start, escaped_text = context
if data.create_x:
find_named_entity(
start,
x_ray,
doc,
data.mobi_codec,
data.book_lang,
escaped_text,
custom_x_ray,
)
if data.create_ww:
kindle_find_lemma(
doc,
lemma_matcher,
start,
data.mobi_codec,
escaped_text,
lemmas_conn,
ll_conn,
data.book_lang,
prefs,
)
if notif:
notif.put((start / final_start, "Creating files"))
if data.create_x:
x_ray.finish(
x_ray_path,
final_start,
data.kfx_json,
data.mobi_html,
data.mobi_codec,
prefs,
)
if data.create_ww:
save_db(ll_conn, ll_path)
lemmas_conn.close() # type: ignore
def parse_book(data: ParseJobData) -> Iterator[tuple[str, tuple[int, str] | int]]:
if data.kfx_json is not None:
for entry in filter(lambda x: x["type"] == 1, data.kfx_json):
# Remove byte order mark and word joiner
yield re.sub(r"\ufeff|\u2060", " ", entry["content"]), entry["position"]
elif data.mobi_html is not None:
# match text inside HTML tags
for match_body in re.finditer(b"<body.{3,}?</body>", data.mobi_html, re.DOTALL):
for m in re.finditer(b">[^<]{2,}<", match_body.group(0)):
text = m.group(0)[1:-1].decode(data.mobi_codec)
text = re.sub(r"\ufeff|\u2060", " ", text)
yield unescape(text), (match_body.start() + m.start() + 1, text)
def index_in_escaped_text(
token: str, escaped_text: str, start_offset: int
) -> tuple[int, int] | None:
if token not in escaped_text[start_offset:]:
# replace Unicode character to numeric character reference
token = escape(token, False).encode("ascii", "xmlcharrefreplace").decode()
if token in escaped_text[start_offset:]:
token_start = escaped_text.index(token, start_offset)
return token_start, token_start + len(token)
else: