text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _retrieve_tag(self, text):
"""Tag text with chosen tagger and clean tags. Tag format: [('word', 'tag')] :param text: string :return: list of tuples, with each tuple containing the word and its pos tag :rtype : list """ |
if self.tagger == 'tag_ngram_123_backoff': # Data format: Perseus Style (see https://github.com/cltk/latin_treebank_perseus)
tags = POSTag('latin').tag_ngram_123_backoff(text.lower())
return [(tag[0], tag[1]) for tag in tags]
elif self.tagger == 'tag_tnt':
tags = POSTag('latin').tag_tnt(text.lower())
return [(tag[0], tag[1]) for tag in tags]
elif self.tagger == 'tag_crf':
tags = POSTag('latin').tag_crf(text.lower())
return [(tag[0], tag[1]) for tag in tags] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _retrieve_morpheus_entry(self, word):
"""Return Morpheus entry for word Entry format: [(head word, tag, macronized form)] :param word: unmacronized, lowercased word :ptype word: string :return: Morpheus entry in tuples :rtype : list """ |
entry = self.macron_data.get(word)
if entry is None:
logger.info('No Morpheus entry found for {}.'.format(word))
return None
elif len(entry) == 0:
logger.info('No Morpheus entry found for {}.'.format(word))
return entry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _macronize_word(self, word):
"""Return macronized word. :param word: (word, tag) :ptype word: tuple :return: (word, tag, macronized_form) :rtype : tuple """ |
head_word = word[0]
tag = word[1]
if tag is None:
logger.info('Tagger {} could not tag {}.'.format(self.tagger, head_word))
return head_word, tag, head_word
elif tag == 'U--------':
return (head_word, tag.lower(), head_word)
else:
entries = self._retrieve_morpheus_entry(head_word)
if entries is None:
return head_word, tag.lower(), head_word
matched_entry = [entry for entry in entries if entry[0] == tag.lower()]
if len(matched_entry) == 0:
logger.info('No matching Morpheus entry found for {}.'.format(head_word))
return head_word, tag.lower(), entries[0][2]
elif len(matched_entry) == 1:
return head_word, tag.lower(), matched_entry[0][2].lower()
else:
logger.info('Multiple matching entries found for {}.'.format(head_word))
return head_word, tag.lower(), matched_entry[1][2].lower() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def macronize_tags(self, text):
"""Return macronized form along with POS tags. E.g. "Gallia est omnis divisa in partes tres," -> [('gallia', 'n-s---fb-', 'galliā'), ('est', 'v3spia---', 'est'), ('omnis', 'a-s---mn-', 'omnis'), ('divisa', 't-prppnn-', 'dīvīsa'), ('in', 'r--------', 'in'), ('partes', 'n-p---fa-', 'partēs'), ('tres', 'm--------', 'trēs')] :param text: raw text :return: tuples of head word, tag, macronized form :rtype : list """ |
return [self._macronize_word(word) for word in self._retrieve_tag(text)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def macronize_text(self, text):
"""Return macronized form of text. E.g. "Gallia est omnis divisa in partes tres," -> "galliā est omnis dīvīsa in partēs trēs ," :param text: raw text :return: macronized text :rtype : str """ |
macronized_words = [entry[2] for entry in self.macronize_tags(text)]
return " ".join(macronized_words) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def string_tokenizer(self, untokenized_string: str, include_blanks=False):
""" This function is based off CLTK's line tokenizer. Use this for strings rather than .txt files. input: '20. u2-sza-bi-la-kum\n1. a-na ia-as2-ma-ah-{d}iszkur#\n2. qi2-bi2-ma\n3. um-ma {d}utu-szi-{d}iszkur\n' output:['20. u2-sza-bi-la-kum', '1. a-na ia-as2-ma-ah-{d}iszkur#', '2. qi2-bi2-ma'] :param untokenized_string: string :param include_blanks: instances of empty lines :return: lines as strings in list """ |
line_output = []
assert isinstance(untokenized_string, str), \
'Incoming argument must be a string.'
if include_blanks:
tokenized_lines = untokenized_string.splitlines()
else:
tokenized_lines = [line for line in untokenized_string.splitlines()
if line != r'\\n']
for line in tokenized_lines:
# Strip out damage characters
if not self.damage: # Add 'xn' -- missing sign or number?
line = ''.join(c for c in line if c not in "#[]?!*")
re.match(r'^\d*\.|\d\'\.', line)
line_output.append(line.rstrip())
return line_output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def line_tokenizer(self, text):
""" From a .txt file, outputs lines as string in list. input: 21. u2-wa-a-ru at-ta e2-kal2-la-ka _e2_-ka wu-e-er output:['21. u2-wa-a-ru at-ta e2-kal2-la-ka _e2_-ka wu-e-er', :param: .txt file containing untokenized string :return: lines as strings in list """ |
line_output = []
with open(text, mode='r+', encoding='utf8') as file:
lines = file.readlines()
assert isinstance(text, str), 'Incoming argument must be a string.'
for line in lines:
# Strip out damage characters
if not self.damage: # Add 'xn' -- missing sign or number?
line = ''.join(c for c in line if c not in "#[]?!*")
re.match(r'^\d*\.|\d\'\.', line)
line_output.append(line.rstrip())
return line_output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_lang_data(self):
"""Define and call data for future use. Initializes and defines all variables which define the phonetic vectors. """ |
root = os.path.expanduser('~')
csv_dir_path = os.path.join(root, 'cltk_data/sanskrit/model/sanskrit_models_cltk/phonetics')
all_phonetic_csv = os.path.join(csv_dir_path, 'all_script_phonetic_data.csv')
tamil_csv = os.path.join(csv_dir_path, 'tamil_script_phonetic_data.csv')
# Make helper function for this
with open(all_phonetic_csv,'r') as f:
reader = csv.reader(f, delimiter = ',', quotechar = '"')
next(reader, None) # Skip headers
all_phonetic_data = [row for row in reader]
with open(tamil_csv,'r') as f:
reader = csv.reader(f, delimiter = ',', quotechar = '"')
next(reader, None) # Skip headers
# tamil_phonetic_data = [row[PHONETIC_VECTOR_START_OFFSET:] for row in reader]
tamil_phonetic_data = [row for row in reader]
# Handle better?
all_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in all_phonetic_data]
tamil_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in tamil_phonetic_data]
all_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in all_phonetic_data])
tamil_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in tamil_phonetic_data])
phonetic_vector_length = all_phonetic_vectors.shape[1]
return all_phonetic_data, tamil_phonetic_data, all_phonetic_vectors, tamil_phonetic_vectors, phonetic_vector_length |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def orthographic_syllabify(self, word):
"""Main syllablic function.""" |
p_vectors = [self.get_phonetic_feature_vector(c, self.lang) for c in word]
syllables = []
for i in range(len(word)):
v = p_vectors[i]
syllables.append(word[i])
if i + 1 < len(word) and (not self.is_valid(p_vectors[i + 1]) or self.is_misc(p_vectors[i + 1])):
syllables.append(u' ')
elif not self.is_valid(v) or self.is_misc(v):
syllables.append(u' ')
elif self.is_vowel(v):
anu_nonplos = (i + 2 < len(word) and
self.is_anusvaar(p_vectors[i + 1]) and
not self.is_plosive(p_vectors[i + 2])
)
anu_eow = (i + 2 == len(word) and
self.is_anusvaar(p_vectors[i + 1]))
if not (anu_nonplos or anu_eow):
syllables.append(u' ')
elif i + 1 < len(word) and (self.is_consonant(v) or self.is_nukta(v)):
if self.is_consonant(p_vectors[i + 1]):
syllables.append(u' ')
elif self.is_vowel(p_vectors[i + 1]) and not self.is_dependent_vowel(p_vectors[i + 1]):
syllables.append(u' ')
elif self.is_anusvaar(p_vectors[i + 1]):
anu_nonplos = (i + 2 < len(word) and not self.is_plosive(p_vectors[i + 2]))
anu_eow = i + 2 == len(word)
if not (anu_nonplos or anu_eow):
syllables.append(u' ')
return u''.join(syllables).strip().split(u' ') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_file(filepath: str) -> str: """Read a file and return it as a string""" |
# ? Check this is ok if absolute paths passed in
filepath = os.path.expanduser(filepath)
with open(filepath) as opened_file: # type: IO
file_read = opened_file.read() # type: str
return file_read |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def return_concordance_all(self, tokens: List[str]) -> List[List[str]]: """Take a list of tokens, iteratively run each word through return_concordance_word and build a list of all. This returns a list of lists. """ |
coll = pyuca.Collator() # type: pyuca.Collator
tokens = sorted(tokens, key=coll.sort_key) #! is the list order preserved?
concordance_list = [] # type: List[List[str]]
for token in tokens:
concordance_list_for_word = self.return_concordance_word(token) # List[str]
if concordance_list_for_word:
concordance_list.append(concordance_list_for_word)
return concordance_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hexameter(self, line: str) -> str: """ Format a string of hexameter metrical stress patterns into foot divisions :param line: the scansion pattern :return: the scansion string formatted with foot breaks -UU|-UU|-UU|--|-UU|-- """ |
mylist = list(line)
items = len(mylist)
idx_start = items - 2
idx_end = items
while idx_start > 0:
potential_foot = "".join(mylist[idx_start: idx_end])
if potential_foot == self.constants.HEXAMETER_ENDING or \
potential_foot == self.constants.SPONDEE:
mylist.insert(idx_start, self.constants.FOOT_SEPARATOR)
idx_start -= 1
idx_end -= 2
if potential_foot == self.constants.DACTYL:
mylist.insert(idx_start, "|")
idx_start -= 1
idx_end -= 3
idx_start -= 1
return "".join(mylist) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_line_scansion(self, line: str, scansion: str) -> str: """ Merge a line of verse with its scansion string. Do not accent dipthongs. :param line: the original Latin verse line :param scansion: the scansion pattern :return: the original line with the scansion pattern applied via macrons Ārma virūmque canō, Troiae quī prīmus ab ōrīs lītora, mūltum īlle ēt tērrīs iāctātus et ālto aut facere, haec ā tē dīctaque fāctaque sūnt """ |
letters = list(line)
marks = list(scansion)
if len(scansion) < len(line):
marks += ((len(line) - len(scansion)) * " ").split()
for idx in range(0, len(marks)):
if marks[idx] == self.constants.STRESSED:
vowel = letters[idx]
if vowel not in self.stress_accent_dict:
LOG.error("problem! vowel: {} not in dict for line {}".format(vowel, line))
pass
else:
if idx > 1:
if (letters[idx -2] + letters[idx - 1]).lower() == "qu":
new_vowel = self.stress_accent_dict[vowel]
letters[idx] = new_vowel
continue
if idx > 0:
if letters[idx - 1] + vowel in self.constants.DIPTHONGS:
continue
new_vowel = self.stress_accent_dict[vowel]
letters[idx] = new_vowel
else:
new_vowel = self.stress_accent_dict[vowel]
letters[idx] = new_vowel
return "".join(letters).rstrip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def arabicrange():
u"""return a list of arabic characteres . Return a list of characteres between \u060c to \u0652 @return: list of arabic characteres. @rtype: unicode """ |
mylist = []
for i in range(0x0600, 0x00653):
try:
mylist.append(unichr(i))
except NameError:
# python 3 compatible
mylist.append(chr(i))
except ValueError:
pass
return mylist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_vocalized(word):
"""Checks if the arabic word is vocalized. the word musn't have any spaces and pounctuations. @param word: arabic unicode char @type word: unicode @return: if the word is vocalized @rtype:Boolean """ |
if word.isalpha():
return False
for char in word:
if is_tashkeel(char):
return True
else:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_arabicstring(text):
""" Checks for an Arabic standard Unicode block characters An arabic string can contain spaces, digits and pounctuation. but only arabic standard characters, not extended arabic @param text: input text @type text: unicode @return: True if all charaters are in Arabic block @rtype: Boolean """ |
if re.search(u"([^\u0600-\u0652%s%s%s\s\d])" \
% (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), text):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_arabicword(word):
""" Checks for an valid Arabic word. An Arabic word not contains spaces, digits and pounctuation avoid some spelling error, TEH_MARBUTA must be at the end. @param word: input word @type word: unicode @return: True if all charaters are in Arabic block @rtype: Boolean """ |
if len(word) == 0:
return False
elif re.search(u"([^\u0600-\u0652%s%s%s])" \
% (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), word):
return False
elif is_haraka(word[0]) or word[0] in (WAW_HAMZA, YEH_HAMZA):
return False
# if Teh Marbuta or Alef_Maksura not in the end
elif re.match(u"^(.)*[%s](.)+$" % ALEF_MAKSURA, word):
return False
elif re.match(u"^(.)*[%s]([^%s%s%s])(.)+$" % \
(TEH_MARBUTA, DAMMA, KASRA, FATHA), word):
return False
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize_hamza(word):
"""Standardize the Hamzat into one form of hamza, replace Madda by hamza and alef. Replace the LamAlefs by simplified letters. @param word: arabic text. @type word: unicode. @return: return a converted text. @rtype: unicode. """ |
if word.startswith(ALEF_MADDA):
if len(word) >= 3 and (word[1] not in HARAKAT) and \
(word[2] == SHADDA or len(word) == 3):
word = HAMZA + ALEF + word[1:]
else:
word = HAMZA + HAMZA + word[1:]
# convert all Hamza from into one form
word = word.replace(ALEF_MADDA, HAMZA + HAMZA)
word = HAMZAT_PATTERN.sub(HAMZA, word)
return word |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def joint(letters, marks):
""" joint the letters with the marks the length ot letters and marks must be equal return word @param letters: the word letters @type letters: unicode @param marks: the word marks @type marks: unicode @return: word @rtype: unicode """ |
# The length ot letters and marks must be equal
if len(letters) != len(marks):
return ""
stack_letter = stack.Stack(letters)
stack_letter.items.reverse()
stack_mark = stack.Stack(marks)
stack_mark.items.reverse()
word_stack = stack.Stack()
last_letter = stack_letter.pop()
last_mark = stack_mark.pop()
vowels = HARAKAT
while last_letter != None and last_mark != None:
if last_letter == SHADDA:
top = word_stack.pop()
if top not in vowels:
word_stack.push(top)
word_stack.push(last_letter)
if last_mark != NOT_DEF_HARAKA:
word_stack.push(last_mark)
else:
word_stack.push(last_letter)
if last_mark != NOT_DEF_HARAKA:
word_stack.push(last_mark)
last_letter = stack_letter.pop()
last_mark = stack_mark.pop()
if not (stack_letter.is_empty() and stack_mark.is_empty()):
return False
else:
return ''.join(word_stack.items) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shaddalike(partial, fully):
""" If the two words has the same letters and the same harakats, this fuction return True. The first word is partially vocalized, the second is fully if the partially contians a shadda, it must be at the same place in the fully @param partial: the partially vocalized word @type partial: unicode @param fully: the fully vocalized word @type fully: unicode @return: if contains shadda @rtype: Boolean """ |
# المدخل ليس به شدة، لا داعي للبحث
if not has_shadda(partial):
return True
# المدخل به شدة، والنتيجة ليس بها شدة، خاطئ
elif not has_shadda(fully) and has_shadda(partial):
return False
# المدخل والمخرج بهما شدة، نتأكد من موقعهما
partial = strip_harakat(partial)
fully = strip_harakat(fully)
pstack = stack.Stack(partial)
vstack = stack.Stack(fully)
plast = pstack.pop()
vlast = vstack.pop()
# if debug: print "+0", Pstack, Vstack
while plast != None and vlast != None:
if plast == vlast:
plast = pstack.pop()
vlast = vstack.pop()
elif plast == SHADDA and vlast != SHADDA:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
break
elif plast != SHADDA and vlast == SHADDA:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
vlast = vstack.pop()
else:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
break
if not (pstack.is_empty() and vstack.is_empty()):
return False
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reduce_tashkeel(text):
"""Reduce the Tashkeel, by deleting evident cases. @param text: the input text fully vocalized. @type text: unicode. @return : partially vocalized text. @rtype: unicode. """ |
patterns = [
# delete all fathat, except on waw and yeh
u"(?<!(%s|%s))(%s|%s)" % (WAW, YEH, SUKUN, FATHA),
# delete damma if followed by waw.
u"%s(?=%s)" % (DAMMA, WAW),
# delete kasra if followed by yeh.
u"%s(?=%s)" % (KASRA, YEH),
# delete fatha if followed by alef to reduce yeh maftouha
# and waw maftouha before alef.
u"%s(?=%s)" % (FATHA, ALEF),
# delete fatha from yeh and waw if they are in the word begining.
u"(?<=\s(%s|%s))%s" % (WAW, YEH, FATHA),
# delete kasra if preceded by Hamza below alef.
u"(?<=%s)%s" % (ALEF_HAMZA_BELOW, KASRA),
]
reduced = text
for pat in patterns:
reduced = re.sub(pat, '', reduced)
return reduced |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vocalized_similarity(word1, word2):
""" if the two words has the same letters and the same harakats, this function return True. The two words can be full vocalized, or partial vocalized @param word1: first word @type word1: unicode @param word2: second word @type word2: unicode @return: return if words are similar, else return negative number of errors @rtype: Boolean / int """ |
stack1 = stack.Stack(word1)
stack2 = stack.Stack(word2)
last1 = stack1.pop()
last2 = stack2.pop()
err_count = 0
vowels = HARAKAT
while last1 != None and last2 != None:
if last1 == last2:
last1 = stack1.pop()
last2 = stack2.pop()
elif last1 in vowels and last2 not in vowels:
last1 = stack1.pop()
elif last1 not in vowels and last2 in vowels:
last2 = stack2.pop()
else:
# break
if last1 == SHADDA:
last1 = stack1.pop()
elif last2 == SHADDA:
last2 = stack2.pop()
else:
last1 = stack1.pop()
last2 = stack2.pop()
err_count += 1
if err_count > 0:
return -err_count
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tokenize(text=""):
""" Tokenize text into words. @param text: the input text. @type text: unicode. @return: list of words. @rtype: list. """ |
if text == '':
return []
else:
# split tokens
mylist = TOKEN_PATTERN.split(text)
# don't remove newline \n
mylist = [TOKEN_REPLACE.sub('', x) for x in mylist if x]
# remove empty substring
mylist = [x for x in mylist if x]
return mylist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_docs(corpus, lemmatize, rm_stops):
"""Open and process files from a corpus. Return a list of sentences for an author. Each sentence is itself a list of tokenized words. """ |
assert corpus in ['phi5', 'tlg']
if corpus == 'phi5':
language = 'latin'
filepaths = assemble_phi5_author_filepaths()
jv_replacer = JVReplacer()
text_cleaner = phi5_plaintext_cleanup
word_tokenizer = nltk_tokenize_words
if rm_stops:
stops = latin_stops
else:
stops = None
elif corpus == 'tlg':
language = 'greek'
filepaths = assemble_tlg_author_filepaths()
text_cleaner = tlg_plaintext_cleanup
word_tokenizer = nltk_tokenize_words
if rm_stops:
stops = latin_stops
else:
stops = None
if lemmatize:
lemmatizer = LemmaReplacer(language)
sent_tokenizer = TokenizeSentence(language)
for filepath in filepaths:
with open(filepath) as f:
text = f.read()
# light first-pass cleanup, before sentence tokenization (which relies on punctuation)
text = text_cleaner(text, rm_punctuation=False, rm_periods=False)
sent_tokens = sent_tokenizer.tokenize_sentences(text)
# doc_sentences = []
for sentence in sent_tokens:
# a second cleanup at sentence-level, to rm all punctuation
sentence = text_cleaner(sentence, rm_punctuation=True, rm_periods=True)
sentence = word_tokenizer(sentence)
sentence = [s.lower() for s in sentence]
sentence = [w for w in sentence if w]
if language == 'latin':
sentence = [w[1:] if w.startswith('-') else w for w in sentence]
if stops:
sentence = [w for w in sentence if w not in stops]
sentence = [w for w in sentence if len(w) > 1] # rm short words
if sentence:
sentence = sentence
if lemmatize:
sentence = lemmatizer.lemmatize(sentence)
if sentence and language == 'latin':
sentence = [jv_replacer.replace(word) for word in sentence]
if sentence:
yield sentence |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_model(corpus, lemmatize=False, rm_stops=False, size=100, window=10, min_count=5, workers=4, sg=1, save_path=None):
"""Train W2V model.""" |
# Simple training, with one large list
t0 = time.time()
sentences_stream = gen_docs(corpus, lemmatize=lemmatize, rm_stops=rm_stops)
# sentences_list = []
# for sent in sentences_stream:
# sentences_list.append(sent)
model = Word2Vec(sentences=list(sentences_stream), size=size, window=window, min_count=min_count, workers=workers,
sg=sg)
# "Trim" the model of unnecessary data. Model cannot be updated anymore.
model.init_sims(replace=True)
if save_path:
save_path = os.path.expanduser(save_path)
model.save(save_path)
print('Total training time for {0}: {1} minutes'.format(save_path, (time.time() - t0) / 60)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_sims(word, language, lemmatized=False, threshold=0.70):
"""Get similar Word2Vec terms from vocabulary or trained model. TODO: Add option to install corpus if not available. """ |
# Normalize incoming word string
jv_replacer = JVReplacer()
if language == 'latin':
# Note that casefold() seemingly does not work with diacritic
# Greek, likely because of it expects single code points, not
# diacritics. Look into global string normalization to code points
# for all languages, especially Greek.
word = jv_replacer.replace(word).casefold()
model_dirs = {'greek': '~/cltk_data/greek/model/greek_word2vec_cltk',
'latin': '~/cltk_data/latin/model/latin_word2vec_cltk'}
assert language in model_dirs.keys(), 'Langauges available with Word2Vec model: {}'.format(model_dirs.keys())
if lemmatized:
lemma_str = '_lemmed'
else:
lemma_str = ''
model_name = '{0}_s100_w30_min5_sg{1}.model'.format(language, lemma_str)
model_dir_abs = os.path.expanduser(model_dirs[language])
model_path = os.path.join(model_dir_abs, model_name)
try:
model = Word2Vec.load(model_path)
except FileNotFoundError as fnf_error:
print(fnf_error)
print("CLTK's Word2Vec models cannot be found. Please import '{}_word2vec_cltk'.".format(language))
raise
try:
similars = model.most_similar(word)
except KeyError as key_err:
print(key_err)
possible_matches = []
for term in model.vocab:
if term.startswith(word[:3]):
possible_matches.append(term)
print("The following terms in the Word2Vec model you may be looking for: '{}'.".format(possible_matches))
return None
returned_sims = []
for similar in similars:
if similar[1] > threshold:
returned_sims.append(similar[0])
if not returned_sims:
print("Matches found, but below the threshold of 'threshold={}'. Lower it to see these results.".format(threshold))
return returned_sims |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def invalid_foot_to_spondee(self, feet: list, foot: str, idx: int) -> str: """ In hexameters, a single foot that is a unstressed_stressed syllable pattern is often just a double spondee, so here we coerce it to stressed. :param feet: list of string representations of meterical feet :param foot: the bad foot to correct :param idx: the index of the foot to correct :return: corrected scansion -UU----U----UU """ |
new_foot = foot.replace(self.constants.UNSTRESSED, self.constants.STRESSED)
feet[idx] = new_foot
return "".join(feet) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def correct_dactyl_chain(self, scansion: str) -> str: """ Three or more unstressed accents in a row is a broken dactyl chain, best detected and processed backwards. Since this method takes a Procrustean approach to modifying the scansion pattern, it is not used by default in the scan method; however, it is available as an optional keyword parameter, and users looking to further automate the generation of scansion candidates should consider using this as a fall back. :param scansion: scansion with broken dactyl chain; inverted amphibrachs not allowed :return: corrected line of scansion - - - - - U U - - - U U - x - - - U U - - - - - U U - U """ |
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
# ignore last two positions, save them
feet = [vals.pop(), vals.pop()]
length = len(vals)
idx = length - 1
while idx > 0:
one = vals[idx]
two = vals[idx - 1]
if idx > 1:
three = vals[idx - 2]
else:
three = ""
# Dactyl foot is okay, no corrections
if one == self.constants.UNSTRESSED and \
two == self.constants.UNSTRESSED and \
three == self.constants.STRESSED:
feet += [one]
feet += [two]
feet += [three]
idx -= 3
continue
# Spondee foot is okay, no corrections
if one == self.constants.STRESSED and \
two == self.constants.STRESSED:
feet += [one]
feet += [two]
idx -= 2
continue
# handle "U U U" foot as "- U U"
if one == self.constants.UNSTRESSED and \
two == self.constants.UNSTRESSED and \
three == self.constants.UNSTRESSED:
feet += [one]
feet += [two]
feet += [self.constants.STRESSED]
idx -= 3
continue
# handle "U U -" foot as "- -"
if one == self.constants.STRESSED and \
two == self.constants.UNSTRESSED and \
three == self.constants.UNSTRESSED:
feet += [self.constants.STRESSED]
feet += [self.constants.STRESSED]
idx -= 2
continue
# handle "- U" foot as "- -"
if one == self.constants.UNSTRESSED and \
two == self.constants.STRESSED:
feet += [self.constants.STRESSED]
feet += [two]
idx -= 2
continue
corrected = "".join(feet[::-1])
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_raw_r_assimilation(last_syllable: str) -> str: """ -r preceded by an -s-, -l- or -n- becomes respectively en -s, -l or -n. 'armr' 'áss' 'stóll' 'steinn' 'vinn' :param last_syllable: last syllable of an Old Norse word :return: """ |
if len(last_syllable) > 0:
if last_syllable[-1] == "l":
return last_syllable + "l"
elif last_syllable[-1] == "s":
return last_syllable + "s"
elif last_syllable[-1] == "n":
return last_syllable + "n"
return last_syllable + "r" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_r_ending_to_syllable(last_syllable: str, is_first=True) -> str: """ Adds an the -r ending to the last syllable of an Old Norse word. In some cases, it really adds an -r. In other cases, it on doubles the last character or left the syllable unchanged. 'armr' 'áss' 'stóll' 'jökull' 'steinn' 'mikill' 'sæll' 'litill' 'vænn' 'lauss' 'vinr' 'selr' 'fagr' 'vitr' 'vetr' 'akr' 'Björn' 'þurs' 'karl' 'hrafn' :param last_syllable: last syllable of the word :param is_first: is it the first syllable of the word? :return: inflected syllable """ |
if len(last_syllable) >= 2:
if last_syllable[-1] in ['l', 'n', 's', 'r']:
if last_syllable[-2] in CONSONANTS:
# Apocope of r
return last_syllable
else:
# Assimilation of r
if len(last_syllable) >= 3 and last_syllable[-3:-1] in DIPHTHONGS:
return apply_raw_r_assimilation(last_syllable)
elif last_syllable[-2] in SHORT_VOWELS and is_first:
# No assimilation when r is supposed to be added to a stressed syllable
# whose last letter is l, n or s and the penultimate letter is a short vowel
return last_syllable + "r"
elif last_syllable[-2] in SHORT_VOWELS:
return apply_raw_r_assimilation(last_syllable)
elif last_syllable[-2] in LONG_VOWELS:
return apply_raw_r_assimilation(last_syllable)
return apply_raw_r_assimilation(last_syllable)
else:
return last_syllable + "r"
else:
return last_syllable + "r" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_r_ending(stem: str) -> str: """ Adds an -r ending to an Old Norse noun. 'armr' 'áss' 'stóll' 'jökull' 'steinn' 'mikill' 'sæll' 'litill' 'vænn' 'lauss' 'vinr' 'selr' 'fagr' 'vitr' 'vetr' 'akr' 'björn' 'þurs' 'karl' 'hrafn' :param stem: :return: """ |
s_stem = s.syllabify_ssp(stem.lower())
n_stem = len(s_stem)
last_syllable = Syllable(s_stem[-1], VOWELS, CONSONANTS)
return "".join(s_stem[:-1]) + add_r_ending_to_syllable(last_syllable.text, n_stem == 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_i_umlaut(stem: str):
""" Changes the vowel of the last syllable of the given stem according to an i-umlaut. 'mæl' 'legð' 'vek' 'hef' 'byð' 'bær' 'réð' 'fœr' :param stem: :return: """ |
assert len(stem) > 0
s_stem = s.syllabify_ssp(stem.lower())
last_syllable = OldNorseSyllable(s_stem[-1], VOWELS, CONSONANTS)
last_syllable.apply_i_umlaut()
return "".join(s_stem[:-1]) + str(last_syllable) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def correct_invalid_start(self, scansion: str) -> str: """ The third syllable of a hendecasyllabic line is long, so we will convert it. :param scansion: scansion string :return: scansion string with corrected start - U - U U - U - U - U """ |
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
corrected = vals[:2] + [self.constants.STRESSED] + vals[3:]
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tag_one(self: object, tokens: List[str], index: int, history: List[str]):
""" Determine an appropriate tag for the specified token, and return that tag. If this tagger is unable to determine a tag for the specified token, then its backoff tagger is consulted. :rtype: tuple :type tokens: list :param tokens: The list of words that are being tagged. :type index: int :param index: The index of the word whose tag should be returned. :type history: list(str) :param history: A list of the tags for all words before index. """ |
lemma = None
for tagger in self._taggers:
lemma = tagger.choose_tag(tokens, index, history)
if lemma is not None:
break
return lemma, tagger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tokenize_akkadian_words(line):
""" Operates on a single line of text, returns all words in the line as a tuple in a list. input: "1. isz-pur-ram a-na" output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")] :param: line: text string :return: list of tuples: (word, language) """ |
beginning_underscore = "_[^_]+(?!_)$"
# only match a string if it has a beginning underscore anywhere
ending_underscore = "^(?<!_)[^_]+_"
# only match a string if it has an ending underscore anywhere
two_underscores = "_[^_]+_"
# only match a string if it has two underscores
words = line.split()
# split the line on spaces ignoring the first split (which is the
# line number)
language = "akkadian"
output_words = []
for word in words:
if re.search(two_underscores, word):
# If the string has two underscores in it then the word is
# in Sumerian while the neighboring words are in Akkadian.
output_words.append((word, "sumerian"))
elif re.search(beginning_underscore, word):
# If the word has an initial underscore somewhere
# but no other underscores than we're starting a block
# of Sumerian.
language = "sumerian"
output_words.append((word, language))
elif re.search(ending_underscore, word):
# If the word has an ending underscore somewhere
# but not other underscores than we're ending a block
# of Sumerian.
output_words.append((word, language))
language = "akkadian"
else:
# If there are no underscore than we are continuing
# whatever language we're currently in.
output_words.append((word, language))
return output_words |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tokenize_arabic_words(text):
""" Tokenize text into words @param text: the input text. @type text: unicode. @return: list of words. @rtype: list. """ |
specific_tokens = []
if not text:
return specific_tokens
else:
specific_tokens = araby.tokenize(text)
return specific_tokens |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tokenize_middle_high_german_words(text):
"""Tokenizes MHG text""" |
assert isinstance(text, str)
# As far as I know, hyphens were never used for compounds, so the tokenizer treats all hyphens as line-breaks
text = re.sub(r'-\n',r'-', text)
text = re.sub(r'\n', r' ', text)
text = re.sub(r'(?<=.)(?=[\.\";\,\:\[\]\(\)!&?])',r' ', text)
text = re.sub(r'(?<=[\.\";\,\:\[\]\(\)!&?])(?=.)',r' ', text)
text = re.sub(r'\s+',r' ', text)
text = str.split(text)
return text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tokenize(self, string):
"""Tokenize incoming string.""" |
if self.language == 'akkadian':
tokens = tokenize_akkadian_words(string)
elif self.language == 'arabic':
tokens = tokenize_arabic_words(string)
elif self.language == 'french':
tokens = tokenize_french_words(string)
elif self.language == 'greek':
tokens = tokenize_greek_words(string)
elif self.language == 'latin':
tokens = tokenize_latin_words(string)
elif self.language == 'old_norse':
tokens = tokenize_old_norse_words(string)
elif self.language == 'middle_english':
tokens = tokenize_middle_english_words(string)
elif self.language == 'middle_high_german':
tokens = tokenize_middle_high_german_words(string)
else:
tokens = nltk_tokenize_words(string)
return tokens |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tokenize_sign(self, word):
"""This is for tokenizing cuneiform signs.""" |
if self.language == 'akkadian':
sign_tokens = tokenize_akkadian_signs(word)
else:
sign_tokens = 'Language must be written using cuneiform.'
return sign_tokens |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_import_source():
"""Check if tlgu imported, if not import it.""" |
path_rel = '~/cltk_data/greek/software/greek_software_tlgu/tlgu.h'
path = os.path.expanduser(path_rel)
if not os.path.isfile(path):
try:
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_software_tlgu')
except Exception as exc:
logger.error('Failed to import TLGU: %s', exc)
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_install(self):
"""Check if tlgu installed, if not install it.""" |
try:
subprocess.check_output(['which', 'tlgu'])
except Exception as exc:
logger.info('TLGU not installed: %s', exc)
logger.info('Installing TLGU.')
if not subprocess.check_output(['which', 'gcc']):
logger.error('GCC seems not to be installed.')
else:
tlgu_path_rel = '~/cltk_data/greek/software/greek_software_tlgu'
tlgu_path = os.path.expanduser(tlgu_path_rel)
if not self.testing:
print('Do you want to install TLGU?')
print('To continue, press Return. To exit, Control-C.')
input()
else:
print('Automated or test build, skipping keyboard input confirmation for installation of TLGU.')
try:
command = 'cd {0} && make install'.format(tlgu_path)
print('Going to run command:', command)
p_out = subprocess.call(command, shell=True)
if p_out == 0:
logger.info('TLGU installed.')
else:
logger.error('TLGU install without sudo failed.')
except Exception as exc:
logger.error('TLGU install failed: %s', exc)
else: # for Linux needing root access to '/usr/local/bin'
if not self.testing:
print('Could not install without root access. Do you want to install TLGU with sudo?')
command = 'cd {0} && sudo make install'.format(tlgu_path)
print('Going to run command:', command)
print('To continue, press Return. To exit, Control-C.')
input()
p_out = subprocess.call(command, shell=True)
else:
command = 'cd {0} && sudo make install'.format(tlgu_path)
p_out = subprocess.call(command, shell=True)
if p_out == 0:
logger.info('TLGU installed.')
else:
logger.error('TLGU install with sudo failed.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def syllabify(self, word):
"""Splits input Latin word into a list of syllables, based on the language syllables loaded for the Syllabifier instance""" |
prefixes = self.language['single_syllable_prefixes']
prefixes.sort(key=len, reverse=True)
# Check if word is in exception dictionary
if word in self.language['exceptions']:
syllables = self.language['exceptions'][word]
# Else, breakdown syllables for word
else:
syllables = []
# Remove prefixes
for prefix in prefixes:
if word.startswith(prefix):
syllables.append(prefix)
word = re.sub('^%s' % prefix, '', word)
break
# Initialize syllable to build by iterating through over characters
syllable = ''
# Get word length for determining character position in word
word_len = len(word)
# Iterate over characters to build syllables
for i, char in enumerate(word):
# Build syllable
syllable = syllable + char
syllable_complete = False
# Checks to process syllable logic
char_is_vowel = self._is_vowel(char)
has_next_char = i < word_len - 1
has_prev_char = i > 0
# If it's the end of the word, the syllable is complete
if not has_next_char:
syllable_complete = True
else:
next_char = word[i + 1]
if has_prev_char:
prev_char = word[i - 1]
# 'i' is a special case for a vowel. when i is at the
# beginning of the word (Iesu) or i is between
# vowels (alleluia), then the i is treated as a
# consonant (y) Note: what about compounds like 'adiungere'
if char == 'i' and has_next_char and self._is_vowel(next_char):
if i == 0:
char_is_vowel = False
elif self._is_vowel(prev_char):
char_is_vowel = False
# Determine if the syllable is complete
if char_is_vowel:
if (
( # If the next character's a vowel
self._is_vowel(
next_char) # And it doesn't compose a dipthong with the current character
and not self._is_diphthong(char,
next_char) # And the current character isn't preceded by a q, unless followed by a u
and not (
has_prev_char
and prev_char == "q"
and char == "u"
and next_char != "u"
)
)
or (
# If the next character's a consonant but not a double consonant, unless it's a mute consonant followed by a liquid consonant
i < word_len - 2
and (
(
(
has_prev_char
and prev_char != "q"
and char == "u"
and self._is_vowel(word[i + 2])
)
or (
not has_prev_char
and char == "u"
and self._is_vowel(word[i + 2])
)
)
or (
char != "u"
and self._is_vowel(word[i + 2])
and not self._is_diphthong(char, next_char)
)
or (
self._is_mute_consonant_or_f(next_char)
and self._is_liquid_consonant(word[i + 2])
)
)
)
):
syllable_complete = True
# Otherwise, it's a consonant
else:
if ( # If the next character's also a consonant (but it's not the last in the word)
(
not self._is_vowel(next_char)
and i < word_len - 2
) # If the char's not a mute consonant followed by a liquid consonant
and not (
self._is_mute_consonant_or_f(char)
and self._is_liquid_consonant(next_char)
) # If the char's not a c, p, or t followed by an h
and not (
(
has_prev_char
and not self._is_vowel(prev_char)
and char in ['c', 'p', 't'] and next_char == 'h'
)
or (
not has_prev_char
and char in ['c', 'p', 't'] and next_char == 'h'
)
) # And it's not the only letter in the syllable
and not len(syllable) == 1
):
syllable_complete = True
# If it's a complete syllable, append it to syllables list and reset syllable
if syllable_complete:
syllables.append(syllable)
syllable = ''
return syllables |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _clean_text(self, text):
"""Clean the text of extraneous punction. By default, ':', ';', and '.' are defined as stops. :param text: raw text :return: clean text :rtype : string """ |
clean = []
for char in text:
if char in self.punc_stops:
clean += '.'
elif char not in self.punc:
clean += char
else:
pass
return (''.join(clean)).lower() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _tokenize(self, text):
"""Tokenize the text into a list of sentences with a list of words. :param text: raw text :return: tokenized text :rtype : list """ |
sentences = []
tokens = []
for word in self._clean_accents(text).split(' '):
tokens.append(word)
if '.' in word:
sentences.append(tokens)
tokens = []
return sentences |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _long_by_nature(self, syllable):
"""Check if syllable is long by nature. Long by nature includes: 1) Syllable contains a diphthong 2) Syllable contains a long vowel :param syllable: current syllable :return: True if long by nature :rtype : bool """ |
# Find diphthongs
vowel_group = []
for char in syllable:
print
if char in self.long_vowels:
return True
elif char not in self.sing_cons and char not in self.doub_cons:
vowel_group += char
if ''.join(vowel_group) in self.diphthongs:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _long_by_position(self, syllable, sentence):
"""Check if syllable is long by position. Long by position includes: 1) Next syllable begins with two consonants, unless those consonants are a stop + liquid combination 2) Next syllable begins with a double consonant 3) Syllable ends with a consonant and the next syllable begins with a consonant :param syllable: Current syllable :param sentence: Current sentence :return: True if syllable is long by position :rtype : bool """ |
try:
next_syll = sentence[sentence.index(syllable) + 1]
# Long by position by case 1
if (next_syll[0] in self.sing_cons and next_syll[1] in
self.sing_cons) and (next_syll[0] not in self.stops and
next_syll[1] not in self.liquids):
return True
# Long by position by case 2
elif syllable[-1] in self.vowels and next_syll[0] in self.doub_cons:
return True
# Long by position by case 3
elif syllable[-1] in self.sing_cons and (next_syll[0] in self.sing_cons):
return True
else:
pass
except IndexError:
logger.info("IndexError while checking if syllable '%s' is long. Continuing.", syllable) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan_text(self, input_string):
"""The primary method for the class. :param input_string: A string of macronized text. :return: meter of text :rtype : list """ |
syllables = self._make_syllables(input_string)
sentence_syllables = self._syllable_condenser(syllables)
meter = self._scansion(sentence_syllables)
return meter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stem(self, text):
"""Stem each word of the Latin text.""" |
stemmed_text = ''
for word in text.split(' '):
if word not in self.stops:
# remove '-que' suffix
word, in_que_pass_list = self._checkremove_que(word)
if not in_que_pass_list:
# remove the simple endings from the target word
word, was_stemmed = self._matchremove_simple_endings(word)
# if word didn't match the simple endings, try verb endings
if not was_stemmed:
word = self._matchremove_verb_endings(word)
# add the stemmed word to the text
stemmed_text += word + ' '
return stemmed_text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _checkremove_que(self, word):
"""If word ends in -que and if word is not in pass list, strip -que""" |
in_que_pass_list = False
que_pass_list = ['atque',
'quoque',
'neque',
'itaque',
'absque',
'apsque',
'abusque',
'adaeque',
'adusque',
'denique',
'deque',
'susque',
'oblique',
'peraeque',
'plenisque',
'quandoque',
'quisque',
'quaeque',
'cuiusque',
'cuique',
'quemque',
'quamque',
'quaque',
'quique',
'quorumque',
'quarumque',
'quibusque',
'quosque',
'quasque',
'quotusquisque',
'quousque',
'ubique',
'undique',
'usque',
'uterque',
'utique',
'utroque',
'utribique',
'torque',
'coque',
'concoque',
'contorque',
'detorque',
'decoque',
'excoque',
'extorque',
'obtorque',
'optorque',
'retorque',
'recoque',
'attorque',
'incoque',
'intorque',
'praetorque']
if word not in que_pass_list:
word = re.sub(r'que$', '', word)
else:
in_que_pass_list = True
return word, in_que_pass_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _matchremove_simple_endings(self, word):
"""Remove the noun, adjective, adverb word endings""" |
was_stemmed = False
# noun, adjective, and adverb word endings sorted by charlen, then alph
simple_endings = ['ibus',
'ius',
'ae',
'am',
'as',
'em',
'es',
'ia',
'is',
'nt',
'os',
'ud',
'um',
'us',
'a',
'e',
'i',
'o',
'u']
for ending in simple_endings:
if word.endswith(ending):
word = re.sub(r'{0}$'.format(ending), '', word)
was_stemmed = True
break
return word, was_stemmed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup(self, word) -> List[str]: """ Prepares a word for syllable processing. If the word starts with a prefix, process it separately. :param word: :return: """ |
if len(word) == 1:
return [word]
for prefix in self.constants.PREFIXES:
if word.startswith(prefix):
(first, rest) = string_utils.split_on(word, prefix)
if self._contains_vowels(rest):
return string_utils.remove_blank_spaces(
self._process(first) + self._process(rest))
# a word like pror can happen from ellision
return string_utils.remove_blank_spaces(self._process(word))
if word in self.constants.UI_EXCEPTIONS.keys():
return self.constants.UI_EXCEPTIONS[word]
return string_utils.remove_blank_spaces(self._process(word)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_consonantal_i(self, word) -> str: """Convert i to j when at the start of a word.""" |
match = list(self.consonantal_i_matcher.finditer(word))
if match:
if word[0].isupper():
return "J" + word[1:]
return "j" + word[1:]
return word |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process(self, word: str) -> List[str]: """ Process a word into a list of strings representing the syllables of the word. This method describes rules for consonant grouping behaviors and then iteratively applies those rules the list of letters that comprise the word, until all the letters are grouped into appropriate syllable groups. :param word: :return: """ |
# if a blank arrives from splitting, just return an empty list
if len(word.strip()) == 0:
return []
word = self.convert_consonantal_i(word)
my_word = " " + word + " "
letters = list(my_word)
positions = []
for dipth in self.diphthongs:
if dipth in my_word:
dipth_matcher = re.compile("{}".format(dipth))
matches = dipth_matcher.finditer(my_word)
for match in matches:
(start, end) = match.span()
positions.append(start)
matches = self.kw_matcher.finditer(my_word)
for match in matches:
(start, end) = match.span()
positions.append(start)
letters = string_utils.merge_next(letters, positions)
letters = string_utils.remove_blanks(letters)
positions.clear()
if not self._contains_vowels("".join(letters)):
return ["".join(letters).strip()] # occurs when only 'qu' appears by ellision
positions = self._starting_consonants_only(letters)
while len(positions) > 0:
letters = string_utils.move_consonant_right(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._starting_consonants_only(letters)
positions = self._ending_consonants_only(letters)
while len(positions) > 0:
letters = string_utils.move_consonant_left(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._ending_consonants_only(letters)
positions = self._find_solo_consonant(letters)
while len(positions) > 0:
letters = self._move_consonant(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._find_solo_consonant(letters)
positions = self._find_consonant_cluster(letters)
while len(positions) > 0:
letters = self._move_consonant(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._find_consonant_cluster(letters)
return letters |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ends_with_vowel(self, letter_group: str) -> bool: """Check if a string ends with a vowel.""" |
if len(letter_group) == 0:
return False
return self._contains_vowels(letter_group[-1]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _starts_with_vowel(self, letter_group: str) -> bool: """Check if a string starts with a vowel.""" |
if len(letter_group) == 0:
return False
return self._contains_vowels(letter_group[0]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _starting_consonants_only(self, letters: list) -> list: """Return a list of starting consonant positions.""" |
for idx, letter in enumerate(letters):
if not self._contains_vowels(letter) and self._contains_consonants(letter):
return [idx]
if self._contains_vowels(letter):
return []
if self._contains_vowels(letter) and self._contains_consonants(letter):
return []
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ending_consonants_only(self, letters: List[str]) -> List[int]: """Return a list of positions for ending consonants.""" |
reversed_letters = list(reversed(letters))
length = len(letters)
for idx, letter in enumerate(reversed_letters):
if not self._contains_vowels(letter) and self._contains_consonants(letter):
return [(length - idx) - 1]
if self._contains_vowels(letter):
return []
if self._contains_vowels(letter) and self._contains_consonants(letter):
return []
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_solo_consonant(self, letters: List[str]) -> List[int]: """Find the positions of any solo consonants that are not yet paired with a vowel.""" |
solos = []
for idx, letter in enumerate(letters):
if len(letter) == 1 and self._contains_consonants(letter):
solos.append(idx)
return solos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _move_consonant(self, letters: list, positions: List[int]) -> List[str]: """ Given a list of consonant positions, move the consonants according to certain consonant syllable behavioral rules for gathering and grouping. :param letters: :param positions: :return: """ |
for pos in positions:
previous_letter = letters[pos - 1]
consonant = letters[pos]
next_letter = letters[pos + 1]
if self._contains_vowels(next_letter) and self._starts_with_vowel(next_letter):
return string_utils.move_consonant_right(letters, [pos])
if self._contains_vowels(previous_letter) and self._ends_with_vowel(
previous_letter) and len(previous_letter) == 1:
return string_utils.move_consonant_left(letters, [pos])
if previous_letter + consonant in self.constants.ASPIRATES:
return string_utils.move_consonant_left(letters, [pos])
if consonant + next_letter in self.constants.ASPIRATES:
return string_utils.move_consonant_right(letters, [pos])
if next_letter[0] == consonant:
return string_utils.move_consonant_left(letters, [pos])
if consonant in self.constants.MUTES and next_letter[0] in self.constants.LIQUIDS:
return string_utils.move_consonant_right(letters, [pos])
if consonant in ['k', 'K'] and next_letter[0] in ['w', 'W']:
return string_utils.move_consonant_right(letters, [pos])
if self._contains_consonants(next_letter[0]) and self._starts_with_vowel(
previous_letter[-1]):
return string_utils.move_consonant_left(letters, [pos])
# fall through case
if self._contains_consonants(next_letter[0]):
return string_utils.move_consonant_right(letters, [pos])
return letters |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_syllable_count(self, syllables: List[str]) -> int: """ Counts the number of syllable groups that would occur after ellision. Often we will want preserve the position and separation of syllables so that they can be used to reconstitute a line, and apply stresses to the original word positions. However, we also want to be able to count the number of syllables accurately. :param syllables: :return: 11 """ |
tmp_syllables = copy.deepcopy(syllables)
return len(string_utils.remove_blank_spaces(
string_utils.move_consonant_right(tmp_syllables,
self._find_solo_consonant(tmp_syllables)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _unrecognised(achr):
""" Handle unrecognised characters. """ |
if options['handleUnrecognised'] == UNRECOGNISED_ECHO:
return achr
elif options['handleUnrecognised'] == UNRECOGNISED_SUBSTITUTE:
return options['substituteChar']
else:
raise KeyError(achr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _transliterate (self, text, outFormat):
""" Transliterate the text to the target transliteration scheme.""" |
result = []
for c in text:
if c.isspace(): result.append(c)
try:
result.append(self[c].equivalents[outFormat.name])
except KeyError:
result.append(_unrecognised(c))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setupParseTree(self, rowFrom, rowTo, colIndex, tree):
""" Build the search tree for multi-character encodings. """ |
if colIndex == self._longestEntry:
return
prevchar = None
rowIndex = rowFrom
while rowIndex <= rowTo:
if colIndex < len(self._parsedata[rowIndex]):
c = self._parsedata[rowIndex][colIndex]
if c != prevchar:
tree[c] = {}
if prevchar is not None:
self._setupParseTree(rowFrom, rowIndex - 1, colIndex + 1, tree[prevchar])
rowFrom = rowIndex
prevchar = c
if rowIndex == rowTo:
self._setupParseTree(rowFrom, rowIndex, colIndex + 1, tree[prevchar])
rowIndex = rowIndex + 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _transliterate (self, text, outFormat):
""" Transliterate the text to Unicode.""" |
result = []
text = self._preprocess(text)
i = 0
while i < len(text):
if text[i].isspace():
result.append(text[i])
i = i+1
else:
chr = self._getNextChar(text, i)
try:
result.append(self[chr].unichr)
except KeyError:
result.append(_unrecognised(chr))
i = i + len(chr)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _equivalent(self, char, prev, next, implicitA):
""" Transliterate a Devanagari character to Latin. Add implicit As unless overridden by VIRAMA. """ |
result = []
if char.unichr != DevanagariCharacter._VIRAMA:
result.append(char.equivalents[self.name])
""" Append implicit A to consonants if the next character isn't a vowel. """
if implicitA and char.isConsonant \
and ((next is not None \
and next.unichr != DevanagariCharacter._VIRAMA \
and not next.isVowel) \
or next is None):
result.append(characterBlocks['DEVANAGARI']\
[DevanagariCharacter._LETTER_A].equivalents[self.name])
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_corpora(self):
"""Show corpora available for the CLTK to download.""" |
try:
# corpora = LANGUAGE_CORPORA[self.language]
corpora = self.all_corpora
corpus_names = [corpus['name'] for corpus in corpora]
return corpus_names
except (NameError, KeyError) as error:
msg = 'Corpus not available for language "{}": {}'.format(self.language, error)
logger.error(msg)
raise CorpusImportError(msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def onekgreek_tei_xml_to_text():
"""Find TEI XML dir of TEI XML for the First 1k Years of Greek corpus.""" |
if not bs4_installed:
logger.error('Install `bs4` and `lxml` to parse these TEI files.')
raise ImportError
xml_dir = os.path.expanduser('~/cltk_data/greek/text/greek_text_first1kgreek/data/*/*/*.xml')
xml_paths = glob.glob(xml_dir)
if not len(xml_paths):
logger.error('1K Greek corpus not installed. Use CorpusInstaller to get `First1KGreek`.')
raise FileNotFoundError
xml_paths = [path for path in xml_paths if '__cts__' not in path]
# new dir
new_dir = os.path.expanduser('~/cltk_data/greek/text/greek_text_first1kgreek_plaintext/')
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
for xml_path in xml_paths:
_, xml_name = os.path.split(xml_path)
xml_name = xml_name.rstrip('.xml')
xml_name += '.txt'
with open(xml_path) as file_open:
soup = BeautifulSoup(file_open, 'lxml')
body = soup.body
text = body.get_text()
new_plaintext_path = os.path.join(new_dir, xml_name)
with open(new_plaintext_path, 'w') as file_open:
file_open.write(text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def onekgreek_tei_xml_to_text_capitains():
"""Use MyCapitains program to convert TEI to plaintext.""" |
file = os.path.expanduser(
'~/cltk_data/greek/text/greek_text_first1kgreek/data/tlg0627/tlg021/tlg0627.tlg021.1st1K-grc1.xml')
xml_dir = os.path.expanduser('~/cltk_data/greek/text/greek_text_first1kgreek/data/*/*/*.xml')
xml_paths = glob.glob(xml_dir)
if not len(xml_paths):
logger.error('1K Greek corpus not installed. Use CorpusInstaller to get `First1KGreek`.')
raise FileNotFoundError
xml_paths = [path for path in xml_paths if '__cts__' not in path]
# new dir
new_dir = os.path.expanduser('~/cltk_data/greek/text/greek_text_first1kgreek_plaintext/')
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
for xml_path in xml_paths:
_, xml_name = os.path.split(xml_path)
xml_name = xml_name.rstrip('.xml')
xml_name += '.txt'
plain_text = ''
with open(xml_path) as file_open:
text = CapitainsCtsText(resource=file_open)
for ref in text.getReffs(level=len(text.citation)):
psg = text.getTextualNode(subreference=ref, simple=True)
text_line = psg.export(Mimetypes.PLAINTEXT, exclude=["tei:note"])
plain_text += text_line
new_plaintext_path = os.path.join(new_dir, xml_name)
with open(new_plaintext_path, 'w') as file_open:
file_open.write(plain_text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_replacement_patterns(self):
"""Check for availability of the specified dictionary.""" |
filename = self.dictionary + '.py'
models = self.language + '_models_cltk'
rel_path = os.path.join('~/cltk_data',
self.language,
'model',
models,
'semantics',
filename)
path = os.path.expanduser(rel_path)
logger.info('Loading lemmata or synonyms. This may take a minute.')
loader = importlib.machinery.SourceFileLoader(filename, path)
module = types.ModuleType(loader.name)
loader.exec_module(module)
return module.DICTIONARY |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup(self, tokens):
"""Return a list of possible lemmata and their probabilities for each token""" |
lemmatized_tokens = []
if type(tokens) == list:
for token in tokens:
# look for token in lemma dict keys
if token.lower() in self.lemmata.keys():
# `lemmas` is a list of possible lemmata. Probability values must be assigned.
# `lemmalist` is a list of the form [(LEMMA, PROBABILITY), (LEMMA, PROBABILITY)]
# `lemmaobj` is a tuple with the form (LEMMA, LIST)
lemmas = self.lemmata[token.lower()]
lemmalist = []
for lemma in lemmas:
lemmalist.append((lemma, 1/len(lemmas)))
lemmaobj = (token, lemmalist)
else:
# if token not found in lemma-headword list, return the token itself
lemmalist = []
lemmalist.append((token, 1))
lemmaobj = (token, lemmalist)
lemmatized_tokens.append(lemmaobj)
if type(tokens) == str:
if tokens.lower() in self.lemmata.keys():
# `lemmas` is a list of possible lemmata. Probability values must be assigned.
# `lemmalist` is a list of the form [(LEMMA, PROBABILITY), (LEMMA, PROBABILITY)]
# `lemmaobj` is a tuple with the form (LEMMA, LIST)
lemmas = self.lemmata[tokens.lower()]
lemmalist = []
for lemma in lemmas:
lemmalist.append((lemma, 1/len(lemmas)))
lemmaobj = (tokens, lemmalist)
else:
# if token not found in lemma-headword list, return the token itself
lemmalist = []
lemmalist.append((tokens, 1))
lemmaobj = (tokens, lemmalist)
lemmatized_tokens.append(lemmaobj)
return lemmatized_tokens |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isolate(obj):
"""Feed a standard semantic object in and receive a simple list of lemmata """ |
answers = []
for token in obj:
lemmata = token[1]
for pair in lemmata:
answers.append(pair[0])
return answers |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_hierarchy(self, hierarchy):
""" Sets an alternative sonority hierarchy, note that you will also need to specify the vowelset with the set_vowels, in order for the module to correctly identify each nucleus. The order of the phonemes defined is by decreased consonantality Example: ['fe', 'mi', 'na', 'rum'] """ |
self.hierarchy = dict([(k, i) for i, j in enumerate(hierarchy) for k in j]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def syllabify_ssp(self, word):
""" Syllabifies a word according to the Sonority Sequencing Principle :param word: Word to be syllabified :return: List consisting of syllables Example: First you need to define the matters of articulation ['fe', 'mi', 'na', 'rum'] Not specifying your alphabet results in an error: Traceback (most recent call last):
cltk.exceptions.InputError Additionally, you can utilize the language parameter: ['lo', 'be', 'bæ', 'ren'] ['hun', 'tyng'] ['ar', 'ce', 'bis', 'cop'] The break_geminants parameter ensures a breakpoint is placed between geminants: ['en', 'ni', 'tungl'] """ |
# List indicating the syllable indices
syllables = []
find_nucleus = True
i = 0
try:
# Replace each letter occurence with its corresponding number
# indicating its position in the sonority hierarchy
encoded = list(map(lambda x: self.hierarchy[x], word))
except KeyError:
LOG.error(
"The given string contains invalid characters. "
"Make sure to define the mater of articulation for each phoneme.")
raise InputError
while i < len(word) - 1:
# Search for nucleus
while word[i] not in self.vowels and i < len(word) - 1 and find_nucleus:
i += 1
if find_nucleus is True:
i += 1
if i >= len(word) - 1:
break
else:
# If the break_geminants parameter is set to True, prioritize geminants
if self.break_geminants and word[i-1] == word[i]:
syllables.append(i-1)
find_nucleus = True
# If a cluster of three phonemes with the same values exist, break syllable
elif encoded[i - 1] == encoded[i] == encoded[i + 1]:
syllables.append(i)
find_nucleus = True
elif encoded[i] > encoded[i - 1] and encoded[i] > encoded[i + 1]:
syllables.append(i)
find_nucleus = True
elif encoded[i] < encoded[i - 1] and encoded[i] < encoded[i + 1]:
syllables.append(i)
find_nucleus = True
else:
find_nucleus = False
i += 1
for n, k in enumerate(syllables):
word = word[:k + n + 1] + "." + word[k + n + 1:]
word = word.split('.')
# Check if last syllable has a nucleus
if sum([x in self.vowels for x in word[-1]]) == 0:
word[-2] += word[-1]
word = word[:-1]
return self.onset_maximization(word) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_spondaic(self, scansion: str) -> str: """ If a pentameter line has 12 syllables, then it must start with double spondees. :param scansion: a string of scansion patterns :return: a scansion pattern string starting with two spondees - - - - - - U U - U U U """ |
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1]
corrected = "".join(new_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def correct_penultimate_dactyl_chain(self, scansion: str) -> str: """ For pentameter the last two feet of the verse are predictable dactyls, and do not regularly allow substitutions. :param scansion: scansion line thus far :return: corrected line of scansion U U U U U U U - U U - U U U """ |
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
n_vals = vals[:-7] + [self.constants.DACTYL + self.constants.DACTYL] + [vals[-1]]
corrected = "".join(n_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eval_str_to_list(input_str: str) -> List[str]: """Turn str into str or tuple.""" |
inner_cast = ast.literal_eval(input_str) # type: List[str]
if isinstance(inner_cast, list):
return inner_cast
else:
raise ValueError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_authors(filepath: str) -> List[str]: """Open file and check for author info.""" |
str_oneline = r'(^__author__ = )(\[.*?\])' # type" str
comp_oneline = re.compile(str_oneline, re.MULTILINE) # type: Pattern[str]
with open(filepath) as file_open:
file_read = file_open.read() # type: str
match = comp_oneline.findall(file_read)
if match:
inner_list_as_str = match[0][1] # type: str
inner_list = eval_str_to_list(inner_list_as_str) # type: List[str]
return inner_list
return list() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scantree(path: str) -> Generator: """Recursively yield DirEntry objects for given directory.""" |
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
yield from scantree(entry.path)
else:
if entry.name.endswith('.py'):
yield entry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_contribs(def_dict_list: Dict[str, List[str]]) -> None: """Write to file, in current dir, 'contributors.md'.""" |
file_str = '' # type: str
note = '# Contributors\nCLTK Core authors, ordered alphabetically by first name\n\n' # type: str # pylint: disable=line-too-long
file_str += note
for contrib in def_dict_list:
file_str += '## ' + contrib + '\n'
for module in def_dict_list[contrib]:
file_str += '* ' + module + '\n'
file_str += '\n'
file_name = 'contributors.md' # type: str
with open(file_name, 'w') as file_open: # type: IO
file_open.write(file_str)
logger.info('Wrote contribs file at "%s".', file_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_write_contribs() -> None: """Look for files, find authors, sort, write file.""" |
map_file_auth = {} # type: Dict[str, List[str]]
for filename in scantree('cltk'):
filepath = filename.path # type: str
authors_list = get_authors(filepath) # type: List[str]
if authors_list:
map_file_auth[filepath] = authors_list
map_auth_file = defaultdict(list) # type: Dict[str, List[str]]
for file, authors_file in map_file_auth.items():
for author in authors_file:
map_auth_file[author].append(file)
# now sort the str contents of the list value
map_auth_file = sort_def_dict(map_auth_file)
map_auth_file_sorted = sorted(map_auth_file.items()) # type: List[Tuple[str, List[str]]]
map_auth_file = OrderedDict(map_auth_file_sorted)
write_contribs(map_auth_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def syllabify(self, hierarchy):
""" Syllables may play a role in verse classification. """ |
if len(self.long_lines) == 0:
logger.error("No text was imported")
self.syllabified_text = []
else:
syllabifier = Syllabifier(language="old_norse", break_geminants=True)
syllabifier.set_hierarchy(hierarchy)
syllabified_text = []
for i, long_line in enumerate(self.long_lines):
syllabified_text.append([])
for short_line in long_line:
assert isinstance(short_line, ShortLine) or isinstance(short_line, LongLine)
short_line.syllabify(syllabifier)
syllabified_text[i].append(short_line.syllabified)
self.syllabified_text = syllabified_text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_phonetics(self):
""" Transcribing words in verse helps find alliteration. """ |
if len(self.long_lines) == 0:
logger.error("No text was imported")
self.syllabified_text = []
else:
transcriber = Transcriber(DIPHTHONGS_IPA, DIPHTHONGS_IPA_class, IPA_class, old_norse_rules)
transcribed_text = []
phonological_features_text = []
for i, long_line in enumerate(self.long_lines):
transcribed_text.append([])
phonological_features_text.append([])
for short_line in long_line:
assert isinstance(short_line, ShortLine) or isinstance(short_line, LongLine)
short_line.to_phonetics(transcriber)
transcribed_text[i].append(short_line.transcribed)
phonological_features_text[i].append(short_line.phonological_features_text)
self.transcribed_text = transcribed_text
self.phonological_features_text = phonological_features_text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_word_with(self, poetry_tools: PoetryTools):
""" Compute the phonetic transcription of the word with IPA representation Compute the syllables of the word Compute the length of each syllable Compute if a syllable is stress of noe Compute the POS category the word is in :param poetry_tools: instance of PoetryTools :return: """ |
phonemes = poetry_tools.tr.text_to_phonemes(self.text)
self.syl = poetry_tools.syllabifier.syllabify_phonemes(phonemes)
for i, syllable in enumerate(self.syl):
self.ipa_transcription.append([])
syl_len = measure_old_norse_syllable(syllable).value
syl_stress = 1 if i == 0 else 0
self.length.append(syl_len)
self.stress.append(syl_stress)
for c in syllable:
self.ipa_transcription[i].append(c.ipar) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_path(dicts, keys, v):
""" Helper function for modifying nested dictionaries :param dicts: dict: the given dictionary :param keys: list str: path to added value :param v: str: value to be added Example: {'a': {'b': {'c': ['d']}}} In case of duplicate paths, the additional value will be added to the leaf node rather than simply replace it: {'a': {'b': {'c': ['d', 'e']}}} """ |
for key in keys[:-1]:
dicts = dicts.setdefault(key, dict())
dicts = dicts.setdefault(keys[-1], list())
dicts.append(v) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_paths(src):
""" Generates root-to-leaf paths, given a treebank in string format. Note that get_path is an iterator and does not return all the paths simultaneously. :param src: str: treebank Examples: Get the sixth generated path: ['IP-MAT-SPE', 'IP-MAT-PRN', 'VBD', 'seyde'] """ |
st = list()
tmp = ''
for let in src:
if let == '(':
if tmp != '':
st.append(tmp)
tmp = ''
elif let == ')':
if tmp != '':
st.append(tmp)
yield st
st = st[:-1 - (tmp != '')]
tmp = ''
elif let == ' ':
if tmp != '':
st.append(tmp)
tmp = ''
else:
tmp += let |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transliterate(self, text, mode='Latin'):
""" Transliterates Anglo-Saxon runes into latin and vice versa. Sources: http://www.arild-hauge.com/eanglor.htm https://en.wikipedia.org/wiki/Anglo-Saxon_runes :param text: str: The text to be transcribed :param mode: Specifies transliteration mode, options: Latin (default):
Transliterates Anglo-Saxon runes into the latin alphabet, using the Dickins system Anglo-Saxon/Anglo-Frisian : Transliterates Latin text into Anglo-Saxon runes Examples: 'ᚻᚹᚫᛏ ᚹᛖ ᚷᚪᚱᛞᛖᚾᚪ ᛁᚾ ᚷᛠᚱᛞᚪᚷᚢᛗ' 'oft scyld scefin sceathena threatum' """ |
if mode == 'Latin':
return Transliterate.__transliterate_helper(text, L_Transliteration)
elif mode in ['Anglo-Saxon', 'Anglo-Frisian']:
return Transliterate.__transliterate_helper(text, R_Transliteration)
else:
LOG.error("The specified mode is currently not supported")
raise InputError("The specified mode is currently not supported") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear_objects(self, obj):
""" Clears objects with name @obj out of the task space. This is useful for supporting task modes with single types of objects, as in @self.single_object_mode without changing the model definition. """ |
for obj_name, obj_mjcf in self.mujoco_objects.items():
if obj_name == obj:
continue
else:
sim_state = self.sim.get_state()
# print(self.sim.model.get_joint_qpos_addr(obj_name))
sim_state.qpos[self.sim.model.get_joint_qpos_addr(obj_name)[0]] = 10
self.sim.set_state(sim_state)
self.sim.forward() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_contact(self):
""" Returns True if gripper is in contact with an object. """ |
collision = False
for contact in self.sim.data.contact[: self.sim.data.ncon]:
if (
self.sim.model.geom_id2name(contact.geom1) in self.finger_names
or self.sim.model.geom_id2name(contact.geom2) in self.finger_names
):
collision = True
break
return collision |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_success(self):
""" Returns True if task has been completed. """ |
# remember objects that are on the correct pegs
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
for i in range(len(self.ob_inits)):
obj_str = str(self.item_names[i]) + "0"
obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]]
dist = np.linalg.norm(gripper_site_pos - obj_pos)
r_reach = 1 - np.tanh(10.0 * dist)
self.objects_on_pegs[i] = int(self.on_peg(obj_pos, i) and r_reach < 0.6)
if self.single_object_mode > 0:
return np.sum(self.objects_on_pegs) > 0 # need one object on peg
# returns True if all objects are on correct pegs
return np.sum(self.objects_on_pegs) == len(self.ob_inits) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def staged_rewards(self):
""" Helper function to return staged rewards based on current physical states. Returns: r_reach (float):
reward for reaching and grasping r_lift (float):
reward for lifting and aligning r_stack (float):
reward for stacking """ |
# reaching is successful when the gripper site is close to
# the center of the cube
cubeA_pos = self.sim.data.body_xpos[self.cubeA_body_id]
cubeB_pos = self.sim.data.body_xpos[self.cubeB_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dist = np.linalg.norm(gripper_site_pos - cubeA_pos)
r_reach = (1 - np.tanh(10.0 * dist)) * 0.25
# collision checking
touch_left_finger = False
touch_right_finger = False
touch_cubeA_cubeB = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in self.l_finger_geom_ids and c.geom2 == self.cubeA_geom_id:
touch_left_finger = True
if c.geom1 == self.cubeA_geom_id and c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom1 in self.r_finger_geom_ids and c.geom2 == self.cubeA_geom_id:
touch_right_finger = True
if c.geom1 == self.cubeA_geom_id and c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True
if c.geom1 == self.cubeA_geom_id and c.geom2 == self.cubeB_geom_id:
touch_cubeA_cubeB = True
if c.geom1 == self.cubeB_geom_id and c.geom2 == self.cubeA_geom_id:
touch_cubeA_cubeB = True
# additional grasping reward
if touch_left_finger and touch_right_finger:
r_reach += 0.25
# lifting is successful when the cube is above the table top
# by a margin
cubeA_height = cubeA_pos[2]
table_height = self.table_full_size[2]
cubeA_lifted = cubeA_height > table_height + 0.04
r_lift = 1.0 if cubeA_lifted else 0.0
# Aligning is successful when cubeA is right above cubeB
if cubeA_lifted:
horiz_dist = np.linalg.norm(
np.array(cubeA_pos[:2]) - np.array(cubeB_pos[:2])
)
r_lift += 0.5 * (1 - np.tanh(horiz_dist))
# stacking is successful when the block is lifted and
# the gripper is not holding the object
r_stack = 0
not_touching = not touch_left_finger and not touch_right_finger
if not_touching and r_lift > 0 and touch_cubeA_cubeB:
r_stack = 2.0
return (r_reach, r_lift, r_stack) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_objects(self, mujoco_objects):
"""Adds physical objects to the MJCF model.""" |
self.mujoco_objects = mujoco_objects
self.objects = {} # xml manifestation
self.max_horizontal_radius = 0
for obj_name, obj_mjcf in mujoco_objects.items():
self.merge_asset(obj_mjcf)
# Load object
obj = obj_mjcf.get_collision(name=obj_name, site=True)
obj.append(new_joint(name=obj_name, type="free", damping="0.0005"))
self.objects[obj_name] = obj
self.worldbody.append(obj)
self.max_horizontal_radius = max(
self.max_horizontal_radius, obj_mjcf.get_horizontal_radius()
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hide_visualization(self):
""" Hides all visualization geoms and sites. This should be called before rendering to agents """ |
for site_name in self.visualization_sites:
site = self.worldbody.find(".//site[@name='{}']".format(site_name))
site.set("rgba", "0 0 0 0")
for geom_name in self.visualization_geoms:
geom = self.worldbody.find(".//geom[@name='{}']".format(geom_name))
geom.set("rgba", "0 0 0 0") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_model(self):
""" Loads the arena and pot object. """ |
super()._load_model()
self.mujoco_robot.set_base_xpos([0, 0, 0])
# load model for table top workspace
self.mujoco_arena = TableArena(
table_full_size=self.table_full_size, table_friction=self.table_friction
)
if self.use_indicator_object:
self.mujoco_arena.add_pos_indicator()
# The sawyer robot has a pedestal, we want to align it with the table
self.mujoco_arena.set_origin([0.45 + self.table_full_size[0] / 2, 0, 0])
# task includes arena, robot, and objects of interest
self.model = TableTopTask(
self.mujoco_arena,
self.mujoco_robot,
self.mujoco_objects,
self.object_initializer,
)
self.model.place_objects() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pot_quat(self):
"""Returns the orientation of the pot.""" |
return T.convert_quat(self.sim.data.body_xquat[self.cube_body_id], to="xyzw") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_success(self):
""" Returns True if task is successfully completed """ |
# cube is higher than the table top above a margin
cube_height = self.sim.data.body_xpos[self.cube_body_id][2]
table_height = self.table_full_size[2]
return cube_height > table_height + 0.10 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _start_new_episode(self):
""" Bookkeeping to do at the start of each new episode. """ |
# flush any data left over from the previous episode if any interactions have happened
if self.has_interaction:
self._flush()
# timesteps in current episode
self.t = 0
self.has_interaction = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _flush(self):
""" Method to flush internal state to disk. """ |
t1, t2 = str(time.time()).split(".")
state_path = os.path.join(self.ep_directory, "state_{}_{}.npz".format(t1, t2))
if hasattr(self.env, "unwrapped"):
env_name = self.env.unwrapped.__class__.__name__
else:
env_name = self.env.__class__.__name__
np.savez(
state_path,
states=np.array(self.states),
action_infos=self.action_infos,
env=env_name,
)
self.states = []
self.action_infos = [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_asset_dependency(self):
""" Converts every file dependency into absolute path so when we merge we don't break things. """ |
for node in self.asset.findall("./*[@file]"):
file = node.get("file")
abs_path = os.path.abspath(self.folder)
abs_path = os.path.join(abs_path, file)
node.set("file", abs_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_default_element(self, name):
""" Creates a <@name/> tag under root if there is none. """ |
found = self.root.find(name)
if found is not None:
return found
ele = ET.Element(name)
self.root.append(ele)
return ele |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(self, other, merge_body=True):
""" Default merge method. Args: other: another MujocoXML instance raises XML error if @other is not a MujocoXML instance. merges <worldbody/>, <actuator/> and <asset/> of @other into @self merge_body: True if merging child bodies of @other. Defaults to True. """ |
if not isinstance(other, MujocoXML):
raise XMLError("{} is not a MujocoXML instance.".format(type(other)))
if merge_body:
for body in other.worldbody:
self.worldbody.append(body)
self.merge_asset(other)
for one_actuator in other.actuator:
self.actuator.append(one_actuator)
for one_equality in other.equality:
self.equality.append(one_equality)
for one_contact in other.contact:
self.contact.append(one_contact)
for one_default in other.default:
self.default.append(one_default) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.