|
|
import json |
|
|
import csv |
|
|
import re |
|
|
from langdetect import detect |
|
|
import pycountry |
|
|
from LanguageTool import Lang |
|
|
from sklearn.feature_extraction.text import TfidfVectorizer |
|
|
from guidance import models, gen, select, instruction, system, user, assistant |
|
|
import openai |
|
|
from romanize import uroman |
|
|
from ScriptureReference import ScriptureReference as SR |
|
|
import stanza |
|
|
import difflib |
|
|
import requests |
|
|
|
|
|
|
|
|
|
|
|
class TranslationNoteFinder: |
|
|
verses = SR.verse_ones |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, bible_text_path, api_key, lang_code=None): |
|
|
|
|
|
|
|
|
self.verses = TranslationNoteFinder.verses |
|
|
|
|
|
|
|
|
|
|
|
self.target_bible_text = self.load_bible(bible_text_path) |
|
|
|
|
|
|
|
|
if lang_code: |
|
|
self.language = lang_code |
|
|
self.lang_name = pycountry.languages.get(alpha_2=self.language).name |
|
|
print(f'Language of target Bible text: {self.lang_name}') |
|
|
else: |
|
|
first_line_nt = self.target_bible_text.splitlines()[23213] |
|
|
self.language = detect(first_line_nt) |
|
|
self.lang_name = pycountry.languages.get(alpha_2=self.language).name |
|
|
print(f'Detected language of target Bible text: {self.lang_name}') |
|
|
|
|
|
|
|
|
self.target_bible_text = self.load_bible(bible_text_path) |
|
|
self.api_key = api_key |
|
|
|
|
|
|
|
|
def parse_tsv_to_json(self, file_content, book_abbrev): |
|
|
result = [] |
|
|
|
|
|
|
|
|
tsv_reader = csv.reader(file_content.splitlines(), delimiter='\t') |
|
|
|
|
|
for row in tsv_reader: |
|
|
|
|
|
if row and len(row) > 3 and row[4].strip(): |
|
|
|
|
|
entry = { |
|
|
"source_term": row[4].strip(), |
|
|
"translation_note": row[6].strip(), |
|
|
"verse": book_abbrev + row[0].strip() |
|
|
} |
|
|
|
|
|
result.append(entry) |
|
|
|
|
|
return result |
|
|
|
|
|
|
|
|
def load_translation_notes(self, book_abbrev): |
|
|
|
|
|
translation_notes_path = f'https://git.door43.org/unfoldingWord/en_tn/raw/branch/master/tn_{book_abbrev}.tsv' |
|
|
response = requests.get(translation_notes_path) |
|
|
if response.status_code == 200: |
|
|
translation_notes_raw = response.text |
|
|
else: |
|
|
translation_notes_raw = '' |
|
|
|
|
|
translation_notes = self.parse_tsv_to_json(translation_notes_raw, book_abbrev) |
|
|
|
|
|
return translation_notes |
|
|
|
|
|
|
|
|
def load_bible(self, bible_path): |
|
|
|
|
|
if bible_path.startswith('http'): |
|
|
|
|
|
response = requests.get(bible_path) |
|
|
|
|
|
if response.status_code == 200: |
|
|
bible_text = response.text |
|
|
else: |
|
|
bible_text = '' |
|
|
else: |
|
|
|
|
|
with open(bible_path, 'r', encoding='utf-8') as file: |
|
|
bible_text = file.read() |
|
|
return bible_text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def segment_corpus(self, bible_text): |
|
|
documents = [] |
|
|
current_document = [] |
|
|
verse_lines = bible_text.splitlines() |
|
|
for i, line in enumerate(verse_lines, start=1): |
|
|
if i in self.verses: |
|
|
if current_document: |
|
|
joined_doc_string = " ".join(current_document) |
|
|
documents.append(joined_doc_string) |
|
|
current_document = [] |
|
|
current_document.append(line.strip()) |
|
|
|
|
|
if current_document: |
|
|
joined_doc_string = " ".join(current_document) |
|
|
documents.append(joined_doc_string) |
|
|
return documents |
|
|
|
|
|
|
|
|
|
|
|
def best_ngram_for_note(self, note, target_verse_text, language): |
|
|
|
|
|
|
|
|
openai_llm = models.OpenAI("gpt-4", api_key=self.api_key) |
|
|
openai_lm = openai_llm |
|
|
|
|
|
source_term = note['source_term'].strip() |
|
|
source_lang = Lang(source_term, options=['en', 'he', 'el']).lang_name |
|
|
print(f'Source term: {source_term}, \nSource language: {source_lang}') |
|
|
|
|
|
|
|
|
with system(): |
|
|
openai_lm += f'You are an expert at translating between {source_lang} and {language}.' |
|
|
openai_lm += f'When asked to translate, provide only the {language} translation of the {source_lang} term found in the {language} verse.' |
|
|
openai_lm += 'Nothing else. Do not provide any additional information or context. Be extrememly succinct in your translations.' |
|
|
openai_lm += f'You must choose only an N-gram which already exists in the {language} verse.' |
|
|
|
|
|
with user(): |
|
|
openai_lm += f'What is a good translation of {source_term} from {source_lang} into {language} and is also found within this verse: {target_verse_text}?' |
|
|
|
|
|
|
|
|
with assistant(): |
|
|
openai_lm += gen('openai_translation', stop='.') |
|
|
print(f'OpenAI translation: {openai_lm["openai_translation"]}') |
|
|
|
|
|
|
|
|
llm_output = openai_lm["openai_translation"].strip() |
|
|
print(f'LLM output: {llm_output}') |
|
|
if llm_output in target_verse_text: |
|
|
print(f'LLM output found in verse: {llm_output}') |
|
|
return llm_output |
|
|
else: |
|
|
print(f'LLM output not found in verse: {llm_output}') |
|
|
return '' |
|
|
|
|
|
|
|
|
def verse_notes(self, verse_ref): |
|
|
|
|
|
v_ref = SR(verse_ref) |
|
|
|
|
|
|
|
|
translation_notes_in_verse = [] |
|
|
|
|
|
translation_notes = self.load_translation_notes(v_ref.structured_ref['bookCode']) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for note in translation_notes: |
|
|
note_v_ref = SR(note['verse']) |
|
|
if note_v_ref.line_number == v_ref.line_number: |
|
|
translation_notes_in_verse.append(note) |
|
|
print(f'Source terms for all translation notes in verse: {[note["source_term"] for note in translation_notes_in_verse]}') |
|
|
|
|
|
|
|
|
target_verse_text = self.target_bible_text.splitlines()[v_ref.line_number - 1] |
|
|
|
|
|
ngrams = [] |
|
|
for note in translation_notes_in_verse: |
|
|
source_term = note['source_term'] |
|
|
trans_note = note['translation_note'] |
|
|
ngram = self.best_ngram_for_note(note, target_verse_text, self.lang_name) |
|
|
start_pos = target_verse_text.lower().find(ngram.lower()) |
|
|
end_pos = start_pos + len(ngram) |
|
|
ngrams.append( |
|
|
{ |
|
|
'ngram': ngram, |
|
|
'start_pos': start_pos, |
|
|
'end_pos': end_pos, |
|
|
'source_term': source_term, |
|
|
'trans_note': trans_note |
|
|
}) |
|
|
|
|
|
|
|
|
print('Verse notes to be returned:') |
|
|
print(json.dumps(ngrams, indent=4)) |
|
|
return { |
|
|
'target_verse_text': target_verse_text, |
|
|
'verse_ref': v_ref.structured_ref, |
|
|
'line_number': v_ref.line_number, |
|
|
'ngrams': ngrams |
|
|
} |
|
|
|
|
|
|
|
|
|