text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_table(self, table_name): """Load a table. This will fail if the tables does not already exist in the database. If the table exists, its columns will be reflected and are available on the :py:class:`Table <dataset.Table>` object. Returns a :py:class:`Table <dataset.Table>` instance. :: table = db.load_table('population') """
table_name = normalize_table_name(table_name) with self.lock: if table_name not in self._tables: self._tables[table_name] = Table(self, table_name) return self._tables.get(table_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_table(self, table_name, primary_id=None, primary_type=None): """Load or create a table. This is now the same as ``create_table``. :: table = db.get_table('population') # you can also use the short-hand syntax: table = db['population'] """
return self.create_table(table_name, primary_id, primary_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query(self, query, *args, **kwargs): """Run a statement on the database directly. Allows for the execution of arbitrary read/write queries. A query can either be a plain text string, or a `SQLAlchemy expression <http://docs.sqlalchemy.org/en/latest/core/tutorial.html#selecting>`_. If a plain string is passed in, it will be converted to an expression automatically. Further positional and keyword arguments will be used for parameter binding. To include a positional argument in your query, use question marks in the query (i.e. ``SELECT * FROM tbl WHERE a = ?```). For keyword arguments, use a bind parameter (i.e. ``SELECT * FROM tbl WHERE a = :foo``). :: statement = 'SELECT user, COUNT(*) c FROM photos GROUP BY user' for row in db.query(statement): print(row['user'], row['c']) The returned iterator will yield each result sequentially. """
if isinstance(query, six.string_types): query = text(query) _step = kwargs.pop('_step', QUERY_STEP) rp = self.executable.execute(query, *args, **kwargs) return ResultIter(rp, row_type=self.row_type, step=_step)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printcolour(text, sameline=False, colour=get_colour("ENDC")): """ Print color text using escape codes """
if sameline: sep = '' else: sep = '\n' sys.stdout.write(get_colour(colour) + text + bcolours["ENDC"] + sep)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def abbreviate(labels, rfill=' '): """ Abbreviate labels without introducing ambiguities. """
max_len = max(len(l) for l in labels) for i in range(1, max_len): abbrev = [l[:i].ljust(i, rfill) for l in labels] if len(abbrev) == len(set(abbrev)): break return abbrev
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def box_text(text, width, offset=0): """ Return text inside an ascii textbox """
box = " " * offset + "-" * (width+2) + "\n" box += " " * offset + "|" + text.center(width) + "|" + "\n" box += " " * offset + "-" * (width+2) return box
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_bins(n, min_val, max_val, h=None, binwidth=None): """ Calculate number of bins for the histogram """
if not h: h = max(10, math.log(n + 1, 2)) if binwidth == 0: binwidth = 0.1 if binwidth is None: binwidth = (max_val - min_val) / h for b in drange(min_val, max_val, step=binwidth, include_stop=True): if b.is_integer(): yield int(b) else: yield b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_numbers(numbers): """ Read the input data in the most optimal way """
if isiterable(numbers): for number in numbers: yield float(str(number).strip()) else: with open(numbers) as fh: for number in fh: yield float(number.strip())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_demo(): """ Run a demonstration """
module_dir = dirname(dirname(os.path.realpath(__file__))) demo_file = os.path.join(module_dir, 'examples/data/exp.txt') if not os.path.isfile(demo_file): sys.stderr.write("demo input file not found!\n") sys.stderr.write("run the downloaddata.sh script in the example first\n") sys.exit(1) # plotting a histogram print("plotting a basic histogram") print("plot_hist('%s')" % demo_file) print("hist -f %s" % demo_file) print("cat %s | hist" % demo_file) plot_hist(demo_file) print("*" * 80) # with colours print("histogram with colours") print("plot_hist('%s', colour='blue')" % demo_file) print("hist -f %s -c blue" % demo_file) plot_hist(demo_file, colour='blue') print("*" * 80) # changing the shape of the point print("changing the shape of the bars") print("plot_hist('%s', pch='.')" % demo_file) print("hist -f %s -p ." % demo_file) plot_hist(demo_file, pch='.') print("*" * 80) # changing the size of the plot print("changing the size of the plot") print("plot_hist('%s', height=35.0, bincount=40)" % demo_file) print("hist -f %s -s 35.0 -b 40" % demo_file) plot_hist(demo_file, height=35.0, bincount=40)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_scatter(f, xs, ys, size, pch, colour, title): """ Form a complex number. Arguments: f -- comma delimited file w/ x,y coordinates xs -- if f not specified this is a file w/ x coordinates ys -- if f not specified this is a filew / y coordinates size -- size of the plot pch -- shape of the points (any character) colour -- colour of the points title -- title of the plot """
cs = None if f: if isinstance(f, str): with open(f) as fh: data = [tuple(line.strip().split(',')) for line in fh] else: data = [tuple(line.strip().split(',')) for line in f] xs = [float(i[0]) for i in data] ys = [float(i[1]) for i in data] if len(data[0]) > 2: cs = [i[2].strip() for i in data] elif isinstance(xs, list) and isinstance(ys, list): pass else: with open(xs) as fh: xs = [float(str(row).strip()) for row in fh] with open(ys) as fh: ys = [float(str(row).strip()) for row in fh] _plot_scatter(xs, ys, size, pch, colour, title, cs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def syllabify(self): """ Syllabifier module for Middle High German The algorithm works by applying the MOP(Maximal Onset Principle) on open syllables. For closed syllables, the legal partitions are checked and applied. The word is always returned in lowercase. Examples: ['ent', 'slâ', 'fen'] ['fröu', 'de'] ['füe', 'rest'] """
# Array holding the index of each given syllable ind = [] i = 0 # Iterate through letters of word searching for the nuclei while i < len(self.word) - 1: if self.word[i] in SHORT_VOWELS + LONG_VOWELS: nucleus = '' # Find cluster of vowels while self.word[i] in SHORT_VOWELS + LONG_VOWELS and i < len(self.word) - 1: nucleus += self.word[i] i += 1 try: # Check whether it is suceeded by a geminant if self.word[i] == self.word[i + 1]: ind.append(i) i += 2 continue except IndexError: pass if nucleus in SHORT_VOWELS: ind.append(i + 2 if self.word[i:i+3] in TRIPHTHONGS else i + 1 if self.word[i:i + 2] in DIPHTHONGS else i) continue else: ind.append(i - 1) continue i += 1 self.syllabified = self.word for n, k in enumerate(ind): self.syllabified = self.syllabified[:k + n + 1] + "." + self.syllabified[k + n + 1:] # Check whether the last syllable lacks a vowel nucleus self.syllabified = self.syllabified.split(".") if sum(map(lambda x: x in SHORT_VOWELS, self.syllabified[-1])) == 0: self.syllabified[-2] += self.syllabified[-1] self.syllabified = self.syllabified[:-1] return self.syllabified
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ASCII_encoding(self): """Returns the ASCII encoding of a string"""
w = unicodedata.normalize('NFKD', self.word).encode('ASCII', 'ignore') # Encode into ASCII, returns a bytestring w = w.decode('utf-8') # Convert back to string return w
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _convert_consonant(sign): """ Uses dictionary to replace ATF convention for unicode characters. input = ['as,', 'S,ATU', 'tet,', 'T,et', 'sza', 'ASZ'] output = ['aṣ', 'ṢATU', 'teṭ', 'Ṭet', 'ša', 'AŠ'] :param sign: string :return: string """
for key in TITTLES: sign = sign.replace(key, TITTLES[key]) return sign
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _convert_number_to_subscript(num): """ Converts number into subscript input = ["a", "a1", "a2", "a3", "be2", "be3", "bad2", "bad3"] output = ["a", "a₁", "a₂", "a₃", "be₂", "be₃", "bad₂", "bad₃"] :param num: number called after sign :return: number in subscript """
subscript = '' for character in str(num): subscript += chr(0x2080 + int(character)) return subscript
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _convert_num(self, sign): """ Converts number registered in get_number_from_sign. input = ["a2", "☉", "be3"] output = ["a₂", "☉", "be₃"] :param sign: string :return sign: string """
# Check if there's a number at the end new_sign, num = self._get_number_from_sign(sign) if num < 2: # "ab" -> "ab" return new_sign.replace(str(num), self._convert_number_to_subscript(num)) if num > 3: # "buru14" -> "buru₁₄" return new_sign.replace(str(num), self._convert_number_to_subscript(num)) if self.two_three: # pylint: disable=no-else-return return new_sign.replace(str(num), self._convert_number_to_subscript(num)) else: # "bad3" -> "bàd" for i, character in enumerate(new_sign): new_vowel = '' if character in VOWELS: if num == 2: # noinspection PyUnusedLocal new_vowel = character + chr(0x0301) elif num == 3: new_vowel = character + chr(0x0300) break return new_sign[:i] + normalize('NFC', new_vowel) + \ new_sign[i+1:].replace(str(num), '')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(self, text_string): """ Expects a list of tokens, will return the list converted from ATF format to print-format. input = ["a", "a2", "a3", "geme2", "bad3", "buru14"] output = ["a", "á", "à", "géme", "bàd", "buru₁₄"] :param text_string: string :return: text_string """
output = [self._convert_num(self._convert_consonant(token)) for token in text_string] return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Levenshtein_Distance(w1, w2): """ Computes Levenshtein Distance between two words Args: :param w1: str :param w2: str :return: int Examples: 2 4 2 """
m, n = len(w1), len(w2) v1 = [i for i in range(n + 1)] v2 = [0 for i in range(n + 1)] for i in range(m): v2[0] = i + 1 for j in range(n): delCost = v1[j + 1] + 1 insCost = v2[j] + 1 subCost = v1[j] if w1[i] != w2[j]: subCost += 1 v2[j + 1] = min(delCost, insCost, subCost) v1, v2 = v2, v1 return v1[-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Damerau_Levenshtein_Distance(w1, w2): """ Computes Damerau-Levenshtein Distance between two words Args: :param w1: str :param w2: str :return int: Examples: For the most part, Damerau-Levenshtein behaves identically to Levenshtein: 2 4 The strength of DL lies in detecting transposition of characters: 1 """
# Define alphabet alph = sorted(list(set(w1 + w2))) # Calculate alphabet size alph_s = len(alph) dam_ar = [0 for _ in range(alph_s)] mat = [[0 for _ in range(len(w2) + 2)] for _ in range(len(w1) + 2)] max_dist = len(w1) + len(w2) mat[0][0] = max_dist # Initialize matrix margin to the maximum possible distance (essentially inf) for ease of calculations (avoiding try blocks) for i in range(1, len(w1) + 2): mat[i][0] = max_dist mat[i][1] = i - 1 for i in range(1, len(w2) + 2): mat[0][i] = max_dist mat[1][i] = i - 1 for i in range(2, len(w1) + 2): tem = 0 for j in range(2, len(w2) + 2): k = dam_ar[alph.index(w2[j - 2])] l = tem if w1[i - 2] == w2[j - 2]: cost = 0 tem = j else: cost = 1 # The reccurence relation of DL is identical to that of Levenshtein with the addition of transposition mat[i][j] = min(mat[i - 1][j - 1] + cost, mat[i][j - 1] + 1, mat[i - 1][j] + 1, mat[k - 1][l - 1] + i + j - k - l - 1) dam_ar[alph.index(w1[i - 2])] = i return mat[-1][-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def counter_from_str(self, string): """Build word frequency list from incoming string."""
string_list = [chars for chars in string if chars not in self.punctuation] string_joined = ''.join(string_list) tokens = self.punkt.word_tokenize(string_joined) return Counter(tokens)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _assemble_corpus_string(self, corpus): """Takes a list of filepaths, returns a string containing contents of all files."""
if corpus == 'phi5': filepaths = assemble_phi5_author_filepaths() file_cleaner = phi5_plaintext_cleanup elif corpus == 'tlg': filepaths = assemble_tlg_author_filepaths() file_cleaner = tlg_plaintext_cleanup for filepath in filepaths: with open(filepath) as file_open: file_read = file_open.read().lower() file_clean = file_cleaner(file_read) yield file_clean
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_punctuation_dict() -> Dict[int, None]: """ Provide a dictionary for removing punctuation, swallowing spaces. :return dict with punctuation from the unicode table Im ok Oh Fine """
tmp = dict((i, None) for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')) return tmp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def punctuation_for_spaces_dict() -> Dict[int, str]: """ Provide a dictionary for removing punctuation, keeping spaces. Essential for scansion to keep stress patterns in alignment with original vowel positions in the verse. :return dict with punctuation from the unicode table I m ok Oh Fine """
return dict((i, " ") for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def differences(scansion: str, candidate: str) -> List[int]: """ Given two strings, return a list of index positions where the contents differ. :param scansion: :param candidate: :return: [2] """
before = scansion.replace(" ", "") after = candidate.replace(" ", "") diffs = [] for idx, tmp in enumerate(before): if before[idx] != after[idx]: diffs.append(idx) return diffs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def space_list(line: str) -> List[int]: """ Given a string, return a list of index positions where a blank space occurs. :param line: :return: [0, 1, 2, 3, 7] """
spaces = [] for idx, car in enumerate(list(line)): if car == " ": spaces.append(idx) return spaces
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_syllables_with_trailing_spaces(line: str, syllables: List[str]) -> List[str]: """ Given a line of syllables and spaces, and a list of syllables, produce a list of the syllables with trailing spaces attached as approriate. :param line: :param syllables: :return: [' ar', 'ma ', 'vi', 'rum', 'que ', 'ca', 'no '] """
syllabs_spaces = [] idx = 0 linelen = len(line) for position, syl in enumerate(syllables): start = line.index(syl, idx) idx = start + len(syl) if position == 0 and start > 0: # line starts with punctuation, substituted w/ spaces syl = (start * " ") + syl if idx + 1 > len(line): syllabs_spaces.append(syl) return syllabs_spaces nextchar = line[idx] if nextchar != " ": syllabs_spaces.append(syl) continue else: tmpidx = idx while tmpidx < linelen and nextchar == " ": syl += " " tmpidx += 1 if tmpidx == linelen: syllabs_spaces.append(syl) return syllabs_spaces nextchar = line[tmpidx] idx = tmpidx - 1 syllabs_spaces.append(syl) return syllabs_spaces
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def join_syllables_spaces(syllables: List[str], spaces: List[int]) -> str: """ Given a list of syllables, and a list of integers indicating the position of spaces, return a string that has a space inserted at the designated points. :param syllables: :param spaces: :return: 'won to tree dun' """
syllable_line = list("".join(syllables)) for space in spaces: syllable_line.insert(space, " ") return "".join(flatten(syllable_line))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stress_positions(stress: str, scansion: str) -> List[int]: """ Given a stress value and a scansion line, return the index positions of the stresses. :param stress: :param scansion: :return: [0, 3, 6] """
line = scansion.replace(" ", "") stresses = [] for idx, char in enumerate(line): if char == stress: stresses.append(idx) return stresses
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_elisions(elided: List[str]) -> str: """ Given a list of strings with different space swapping elisions applied, merge the elisions, taking the most without compounding the omissions. :param elided: :return: 'ignav agua mult hiatus' """
results = list(elided[0]) for line in elided: for idx, car in enumerate(line): if car == " ": results[idx] = " " return "".join(results)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move_consonant_right(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letters, and a list of consonant positions, move the consonant positions to the right, merging strings as necessary. :param letters: :param positions: :return: ['a', 'b', '', '', 'bra'] """
for pos in positions: letters[pos + 1] = letters[pos] + letters[pos + 1] letters[pos] = "" return letters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move_consonant_left(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letters, and a list of consonant positions, move the consonant positions to the left, merging strings as necessary. :param letters: :param positions: :return: ['ab', '', '', '', 'bra'] """
for pos in positions: letters[pos - 1] = letters[pos - 1] + letters[pos] letters[pos] = "" return letters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_next(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letter positions, merge each letter with its next neighbor. :param letters: :param positions: :return: ['ab', '', 'ov', '', 'o'] ['ab', '', 'ov', 'o', ''] """
for pos in positions: letters[pos] = letters[pos] + letters[pos + 1] letters[pos + 1] = "" return letters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_blanks(letters: List[str]): """ Given a list of letters, remove any empty strings. :param letters: :return: ['a', 'b', 'c'] """
cleaned = [] for letter in letters: if letter != "": cleaned.append(letter) return cleaned
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split_on(word: str, section: str) -> Tuple[str, str]: """ Given a string, split on a section, and return the two sections as a tuple. :param word: :param section: :return: ('ham', 'rye') """
return word[:word.index(section)] + section, word[word.index(section) + len(section):]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_blank_spaces(syllables: List[str]) -> List[str]: """ Given a list of letters, remove any blank spaces or empty strings. :param syllables: :return: ['a', 'b', 'c'] """
cleaned = [] for syl in syllables: if syl == " " or syl == '': pass else: cleaned.append(syl) return cleaned
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def overwrite(char_list: List[str], regexp: str, quality: str, offset: int = 0) -> List[str]: """ Given a list of characters and spaces, a matching regular expression, and a quality or character, replace the matching character with a space, overwriting with an offset and a multiplier if provided. :param char_list: :param regexp: :param quality: :param offset: :return: ['m', 'u', 'l', 't', ' ', ' ', 'i', 'g', 'n', 'e'] """
long_matcher = re.compile(regexp) line = "".join(char_list) long_positions = long_matcher.finditer(line) for match in long_positions: (start, end) = match.span() # pylint: disable=unused-variable char_list[start + offset] = quality return char_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_unstresses(stresses: List[int], count: int) -> List[int]: """ Given a list of stressed positions, and count of possible positions, return a list of the unstressed positions. :param stresses: a list of stressed positions :param count: the number of possible positions :return: a list of unstressed positions [1, 2, 4, 5, 7, 8, 10, 11, 13, 14, 16] """
return list(set(range(count)) - set(stresses))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decline_weak_masculine_noun(ns: str, gs: str, np: str): """ Gives the full declension of weak masculine nouns. goði goða goða goða goðar goða goðum goða hluti hluta hluta hluta hlutar hluta hlutum hluta arfi arfa arfa arfa arfar arfa örfum arfa bryti bryta bryta bryta brytjar brytja brytjum brytja vöðvi vöðva vöðva vöðva vöðvar vöðva vöðum vöðva The main pattern is: :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """
# nominative singular print(ns) # accusative singular print(gs) # dative singular print(gs) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np[:-1]) # dative plural if len(np) > 3 and np[-3] == "v": print(apply_u_umlaut(np[:-3]) + "um") else: print(apply_u_umlaut(np[:-2]) + "um") # genitive plural print(np[:-1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decline_weak_neuter_noun(ns: str, gs: str, np: str): """ Gives the full declension of weak neuter nouns. auga auga auga auga augu augu augum augna hjarta hjarta hjarta hjarta hjörtu hjörtu hjörtum hjartna lunga lunga lunga lunga lungu lungu lungum lungna eyra eyra eyra eyra eyru eyru eyrum eyrna The main pattern is: -a -a -a -a -u -u -um -na :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """
# nominative singular print(ns) # accusative singular print(ns) # dative singular print(ns) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural print(np+"m") # genitive plural print(ns[:-1]+"na")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select_id_by_name(query): """Do a case-insensitive regex match on author name, returns TLG id."""
id_author = get_id_author() comp = regex.compile(r'{}'.format(query.casefold()), flags=regex.VERSION1) matches = [] for _id, author in id_author.items(): match = comp.findall(author.casefold()) if match: matches.append((_id, author)) return matches
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_date_of_author(_id): """Pass author id and return the name of its associated date."""
_dict = get_date_author() for date, ids in _dict.items(): if _id in ids: return date return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_epoch(_str): """Take incoming string, return its epoch."""
_return = None if _str.startswith('A.D. '): _return = 'ad' elif _str.startswith('a. A.D. '): _return = None #? elif _str.startswith('p. A.D. '): _return = 'ad' elif regex.match(r'^[0-9]+ B\.C\. *', _str): _return = 'bc' elif regex.match(r'^a\. *[0-9]+ B\.C\. *', _str): _return = 'bc' elif regex.match(r'^p\. *[0-9]+ B\.C\. *', _str): _return = None #? elif _str == 'Incertum' or _str == 'Varia': _return = _str return _return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decline_noun(self, noun, gender, mimation=True): """Return a list of all possible declined forms given any form of a noun and its gender."""
stem = self.stemmer.get_stem(noun, gender) declension = [] for case in self.endings[gender]['singular']: if gender == 'm': form = stem + self.endings[gender]['singular'][case] else: form = stem + self.endings[gender]['singular'][case][1:] declension.append((form, {'case': case, 'number': 'singular'})) for case in self.endings[gender]['dual']: if gender == 'm': form = stem + self.endings[gender]['dual'][case] else: form = stem + self.endings[gender]['dual'][case][1:] declension.append((form, {'case': case, 'number': 'dual'})) for case in self.endings[gender]['plural']: if gender == 'm': form = stem + self.endings[gender]['plural'][case] else: if stem[-3] in self.akkadian['macron_vowels']: theme_vowel = stem[-3] else: theme_vowel = 'ā' ending = [x for x in self.endings[gender]['plural'][case] if x[0] == theme_vowel] if stem[-2] in self.akkadian['short_vowels']: form = stem[:-2] + ending[0] elif stem[-1] in self.akkadian['consonants'] and stem[-2] in self.akkadian['macron_vowels']: form = stem + ending[0] else: form = stem[:-1] + ending[0] declension.append((form, {'case': case, 'number': 'plural'})) return declension
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stem(text): """make string lower-case"""
text = text.lower() """Stem each word of the French text.""" stemmed_text = '' word_tokenizer = WordTokenizer('french') tokenized_text = word_tokenizer.tokenize(text) for word in tokenized_text: """remove the simple endings from the target word""" word, was_stemmed = matchremove_noun_endings(word) """if word didn't match the simple endings, try verb endings""" if not was_stemmed: word = matchremove_verb_endings(word) """add the stemmed word to the text""" stemmed_text += word + ' ' return stemmed_text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform_i_to_j_optional(self, line: str) -> str: """ Sometimes for the demands of meter a more permissive i to j transformation is warranted. :param line: :return: Italjam Lāvīnjaque omnjum """
words = line.split(" ") space_list = string_utils.space_list(line) corrected_words = [] for word in words: found = False for prefix in self.constants.PREFIXES: if word.startswith(prefix) and word != prefix: corrected_words.append(self.syllabifier.convert_consonantal_i(prefix)) corrected_words.append( self.syllabifier.convert_consonantal_i(word[len(prefix):])) found = True break if not found: corrected_words.append(self.syllabifier.convert_consonantal_i(word)) new_line = string_utils.join_syllables_spaces(corrected_words, space_list) # the following two may be tunable and subject to improvement char_list = string_utils.overwrite(list(new_line), "[bcdfgjkmpqrstvwxzBCDFGHJKMPQRSTVWXZ][i][{}]".format( self.constants.VOWELS_WO_I), "j", 1) char_list = string_utils.overwrite(char_list, "[{}][iI][{}]".format(self.constants.LIQUIDS, self.constants.VOWELS_WO_I), "j", 1) return "".join(char_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def accent_by_position(self, verse_line: str) -> str: """ Accent vowels according to the rules of scansion. :param verse_line: a line of unaccented verse :return: the same line with vowels accented by position Ārma virūmque canō Trojae qui primus ab oris """
line = verse_line.translate(self.punctuation_substitutions) line = self.transform_i_to_j(line) marks = list(line) # locate and save dipthong positions since we don't want them being accented dipthong_positions = [] for dipth in self.constants.DIPTHONGS: if dipth in line: dipthong_positions.append(line.find(dipth)) # Vowels followed by 2 consonants # The digraphs ch, ph, th, qu and sometimes gu and su count as single consonants. # see http://people.virginia.edu/~jdk3t/epicintrog/scansion.htm marks = string_utils.overwrite(marks, "[{}][{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # one space (or more for 'dropped' punctuation may intervene) marks = string_utils.overwrite(marks, r"[{}][{}]\s*[{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # ... if both consonants are in the next word, the vowel may be long # .... but it could be short if the vowel is not on the thesis/emphatic part of the foot # ... see Gildersleeve and Lodge p.446 marks = string_utils.overwrite(marks, r"[{}]\s*[{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # x is considered as two letters marks = string_utils.overwrite(marks, "[{}][xX]".format(self.constants.VOWELS), self.constants.STRESSED) # z is considered as two letters marks = string_utils.overwrite(marks, r"[{}][zZ]".format(self.constants.VOWELS), self.constants.STRESSED) original_verse = list(line) for idx, word in enumerate(original_verse): if marks[idx] == self.constants.STRESSED: original_verse[idx] = self.constants.VOWELS_TO_ACCENTS[original_verse[idx]] # make sure dipthongs aren't accented for idx in dipthong_positions: if original_verse[idx + 1] in self.constants.ACCENTS_TO_VOWELS: original_verse[idx + 1] = self.constants.ACCENTS_TO_VOWELS[original_verse[idx + 1]] return "".join(original_verse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_offset(self, syllables_spaces: List[str]) -> Dict[int, int]: """ Calculate a dictionary of accent positions from a list of syllables with spaces. :param syllables_spaces: :return: """
line = string_utils.flatten(syllables_spaces) mydict = {} # type: Dict[int, int] # #defaultdict(int) #type: Dict[int, int] for idx, syl in enumerate(syllables_spaces): target_syllable = syllables_spaces[idx] skip_qu = string_utils.starts_with_qu(target_syllable) matches = list(self.syllable_matcher.finditer(target_syllable)) for position, possible in enumerate(matches): if skip_qu: skip_qu = False continue (start, end) = possible.span() if target_syllable[start:end] in \ self.constants.VOWELS + self.constants.ACCENTED_VOWELS: part = line[:len("".join(syllables_spaces[:idx]))] offset = len(part) + start if line[offset] not in self.constants.VOWELS + self.constants.ACCENTED_VOWELS: LOG.error("Problem at line {} offset {}".format(line, offset)) mydict[idx] = offset return mydict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def produce_scansion(self, stresses: list, syllables_wspaces: List[str], offset_map: Dict[int, int]) -> str: """ Create a scansion string that has stressed and unstressed syllable positions in locations that correspond with the original texts syllable vowels. :param stresses list of syllable positions :param syllables_wspaces list of syllables with spaces escaped for punctuation or elision :param offset_map dictionary of syllable positions, and an offset amount which is the number of spaces to skip in the original line before inserting the accent. """
scansion = list(" " * len(string_utils.flatten(syllables_wspaces))) unstresses = string_utils.get_unstresses(stresses, len(syllables_wspaces)) try: for idx in unstresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.UNSTRESSED for idx in stresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.STRESSED except Exception as e: LOG.error("problem with syllables; check syllabification {}, {}".format( syllables_wspaces, e)) return "".join(scansion)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flag_dipthongs(self, syllables: List[str]) -> List[int]: """ Return a list of syllables that contain a dipthong :param syllables: :return: """
long_positions = [] for idx, syl in enumerate(syllables): for dipthong in self.constants.DIPTHONGS: if dipthong in syllables[idx]: if not string_utils.starts_with_qu(syllables[idx]): long_positions.append(idx) return long_positions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def elide(self, line: str, regexp: str, quantity: int = 1, offset: int = 0) -> str: """ Erase a section of a line, matching on a regex, pushing in a quantity of blank spaces, and jumping forward with an offset if necessary. If the elided vowel was strong, the vowel merged with takes on the stress. :param line: :param regexp: :param quantity: :param offset: :return: uv āvaritia mar avaritia """
matcher = re.compile(regexp) positions = matcher.finditer(line) new_line = line for match in positions: (start, end) = match.span() # pylint: disable=unused-variable if (start > 0) and new_line[start - 1: start + 1] in self.constants.DIPTHONGS: vowel_to_coerce = new_line[end - 1] new_line = new_line[:(start - 1) + offset] + (" " * (quantity + 2)) + \ self.constants.stress_accent_dict[vowel_to_coerce] + new_line[end:] else: new_line = new_line[:start + offset] + \ (" " * quantity) + new_line[start + quantity + offset:] return new_line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def assign_candidate(self, verse: Verse, candidate: str) -> Verse: """ Helper method; make sure that the verse object is properly packaged. :param verse: :param candidate: :return: """
verse.scansion = candidate verse.valid = True verse.accented = self.formatter.merge_line_scansion( verse.original, verse.scansion) return verse
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __getRoots(self, lemma, model=None): """ Retrieve the known roots of a lemma :param lemma: Canonical form of the word (lemma) :type lemma: str :param model_roots: Model data from the loaded self.__data__. Can be passed by decline() :type model_roots: dict :return: Dictionary of roots with their root identifier as key :rtype: dict """
if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) ROOT_IDS = { "K": "lemma", "1": "geninf", "2": "perf" } lemma_entry = self.__lemmas__[lemma] original_roots = { root_id: lemma_entry[root_name].split(",") for root_id, root_name in ROOT_IDS.items() if root_id != "K" and lemma_entry[root_name] } returned_roots = {} if not model: model = self.__models__[lemma_entry["model"]] # For each registered root in the model, for model_root_id, model_root_data in model["R"].items(): # If we have K, it's equivalent to canonical form if model_root_data[0] == "K": returned_roots[model_root_id] = [lemma_entry["lemma"]] # Otherwise we have deletion number and addition char else: deletion, addition = int(model_root_data[0]), model_root_data[1] or "" # If a the root is declared already, # we retrieve the information if model_root_id != "1" and model_root_id in returned_roots: lemma_roots = returned_roots[model_root_id] else: lemma_roots = lemma_entry["lemma"].split(",") # We construct the roots returned_roots[model_root_id] = [ lemma_root[:-deletion] + addition for lemma_root in lemma_roots ] original_roots.update(returned_roots) return original_roots
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decline(self, lemma, flatten=False, collatinus_dict=False): """ Decline a lemma .. warning:: POS are incomplete as we do not detect the type outside of verbs, participle and adjective. :raise UnknownLemma: When the lemma is unknown to our data :param lemma: Lemma (Canonical form) to decline :type lemma: str :param flatten: If set to True, returns a list of forms without natural language information about them :type flatten: bool :param collatinus_dict: If sets to True, Dictionary of grammatically valid forms, including variants, with keys\ corresponding to morpho informations. :type collatinus_dict: bool :return: List of tuple where first value is the form and second the pos, ie [("sum", "v1ppip---")] :rtype: list or dict """
if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) # Get data information lemma_entry = self.__lemmas__[lemma] model = self.__models__[lemma_entry["model"]] # Get the roots roots = self.__getRoots(lemma, model=model) # Get the known forms in order keys = sorted([int(key) for key in model["des"].keys()]) forms_data = [(key, model["des"][str(key)]) for key in keys] # Generate the return dict forms = {key: [] for key in keys} for key, form_list in forms_data: for form in form_list: root_id, endings = tuple(form) for root in roots[root_id]: for ending in endings: forms[key].append(root + ending) # sufd means we have the original forms of the parent but we add a suffix if len(model["sufd"]): # For each constant form1 for key, iter_forms in forms.items(): new_forms = [] # We add the constant suffix for sufd in model["sufd"]: new_forms += [form+sufd for form in iter_forms] forms[key] = new_forms # If we need a secure version of the forms. For example, if we have variants if len(model["suf"]): cached_forms = {k: v+[] for k, v in forms.items()} # Making cache without using copy # For each suffix # The format is [suffix characters, [modified forms]] for suffixes in model["suf"]: suffix, modified_forms = suffixes[0], suffixes[1] for modified_form in modified_forms: forms[modified_form] += [f+suffix for f in cached_forms[modified_form]] # We update with the new roots # If some form do not exist, we delete them prehentively if len(model["abs"]): for abs_form in model["abs"]: if abs_form in forms: del forms[abs_form] if flatten: return list([form for case_forms in forms.values() for form in case_forms]) elif collatinus_dict: return forms else: return list( [(form, self.__getPOS(key)) for key, case_forms in forms.items() for form in case_forms] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _sentence_context(match, language='latin', case_insensitive=True): """Take one incoming regex match object and return the sentence in which the match occurs. :rtype : str :param match: regex.match :param language: str """
language_punct = {'greek': r'\.|;', 'latin': r'\.|\?|!'} assert language in language_punct.keys(), \ 'Available punctuation schemes: {}'.format(language_punct.keys()) start = match.start() end = match.end() window = 1000 snippet_left = match.string[start - window:start + 1] snippet_right = match.string[end:end + window] re_match = match.string[match.start():match.end()] comp_sent_boundary = regex.compile(language_punct[language], flags=regex.VERSION1) # Left left_punct = [] for punct in comp_sent_boundary.finditer(snippet_left): end = punct.end() left_punct.append(end) try: last_period = left_punct.pop() + 1 except IndexError: last_period = 0 # Right right_punct = [] for punct in comp_sent_boundary.finditer(snippet_right): end = punct.end() right_punct.append(end) try: first_period = right_punct.pop(0) except IndexError: first_period = 0 sentence = snippet_left[last_period:-1] + '*' + re_match + '*' + snippet_right[0:first_period] return sentence
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_regex(input_str, pattern, language, context, case_insensitive=True): """Take input string and a regex pattern, then yield generator of matches in desired format. TODO: Rename this `match_pattern` and incorporate the keyword expansion code currently in search_corpus. :param input_str: :param pattern: :param language: :param context: Integer or 'sentence' 'paragraph' :rtype : str """
if type(context) is str: contexts = ['sentence', 'paragraph'] assert context in contexts or type(context) is int, 'Available contexts: {}'.format(contexts) else: context = int(context) for match in _regex_span(pattern, input_str, case_insensitive=case_insensitive): if context == 'sentence': yield _sentence_context(match, language) elif context == 'paragraph': yield _paragraph_context(match) else: yield _window_match(match, context)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_worlist_trie(wordlist): """ Creates a nested dictionary representing the trie created by the given word list. :param wordlist: str list: :return: nested dictionary {'e': {'i': {'n': {'a': {'n': {'d': {'e': {'r': {'__end__': '__end__'}}}}}, 'e': {'n': {'__end__': '__end__'}}}}}, 'n': {'e': {'b': {'e': {'n': {'__end__': '__end__'}}}}}} """
dicts = dict() for w in wordlist: curr = dicts for l in w: curr = curr.setdefault(l, {}) curr['__end__'] = '__end__' return dicts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid_hendecasyllables(self, scanned_line: str) -> bool: """Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns :param scanned_line: a line containing a sequence of stressed and unstressed syllables :return bool True """
line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "") line = line.replace(" ", "") if len(line) < 11: return False line = line[:-1] + self.constants.OPTIONAL_ENDING return self.VALID_HENDECASYLLABLES.__contains__(line)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid_pentameter(self, scanned_line: str) -> bool: """Determine if a scansion pattern is one of the valid Pentameter metrical patterns :param scanned_line: a line containing a sequence of stressed and unstressed syllables :return bool: whether or not the scansion is a valid pentameter True """
line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "") line = line.replace(" ", "") if len(line) < 10: return False line = line[:-1] + self.constants.OPTIONAL_ENDING return self.VALID_PENTAMETERS.__contains__(line)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hexameter_feet(self, scansion: str) -> List[str]: """ Produces a list of hexameter feet, stressed and unstressed syllables with spaces intact. If the scansion line is not entirely correct, it will attempt to corral one or more improper patterns into one or more feet. :param: scansion: the scanned line :return list of strings, representing the feet of the hexameter, or if the scansion is wildly incorrect, the function will return an empty list. - U U |- - |- - |- - |- U U |- U - U U |- - |U - |- - |- U U |- U """
backwards_scan = list(scansion.rstrip()) feet = [] candidates = [self.constants.STRESSED + self.constants.OPTIONAL_ENDING, self.constants.STRESSED + self.constants.STRESSED, self.constants.STRESSED + self.constants.UNSTRESSED, self.constants.UNSTRESSED + self.constants.STRESSED] incomplete_foot = self.constants.UNSTRESSED + self.constants.UNSTRESSED try: while len(backwards_scan) > 0: spaces = [] chunk1 = backwards_scan.pop() while len("".join(chunk1).replace(" ", "")) == 0: if len(backwards_scan) == 0: feet.append(chunk1) return feet[::-1] chunk1 = backwards_scan.pop() + "".join(chunk1) chunk2 = backwards_scan.pop() while chunk2 == " ": spaces.append(chunk2) if len(backwards_scan) == 0: feet.append(chunk2) return feet[::-1] chunk2 = backwards_scan.pop() new_candidate = "".join(chunk2) + "".join(spaces) + "".join(chunk1) if new_candidate.replace(" ", "") in candidates: feet.append(new_candidate) else: if new_candidate.replace(" ", "") == incomplete_foot: spaces2 = [] previous_mark = backwards_scan.pop() while previous_mark == " ": spaces2.append(previous_mark) previous_mark = backwards_scan.pop() if previous_mark == self.constants.STRESSED: new_candidate = "".join(previous_mark) + "".join( spaces2) + new_candidate feet.append(new_candidate) else: feet.append(new_candidate) # invalid foot spaces3 = [] next_mark = backwards_scan.pop() while next_mark == " ": spaces3.append(previous_mark) next_mark = backwards_scan.pop() feet.append("".join(next_mark) + "".join(spaces3) + previous_mark) except Exception as ex: LOG.error("err at: {}, {}".format(scansion, ex)) return list() return feet[::-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def closest_hexameter_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid hexameter patterns. :return: list of the closest valid hexameter patterns; only candidates with a matching length/number of syllables are considered. ['-UU-UU-----UU--'] """
return self._closest_patterns(self.VALID_HEXAMETERS, scansion)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def closest_pentameter_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid pentameter patterns. :return: list of the closest valid pentameter patterns; only candidates with a matching length/number of syllables are considered. ['---UU--UU-UUX'] """
return self._closest_patterns(self.VALID_PENTAMETERS, scansion)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def closest_hendecasyllable_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid hendecasyllable patterns. :return: list of the closest valid hendecasyllable patterns; only candidates with a matching length/number of syllables are considered. ['-U-UU-U-U-X', 'U--UU-U-U-X'] """
return self._closest_patterns(self.VALID_HENDECASYLLABLES, scansion)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _closest_patterns(self, patterns: List[str], scansion: str) -> List[str]: """ Find the closest group of matching valid patterns. :patterns: a list of patterns :scansion: the scansion pattern thus far :return: list of the closest valid patterns; only candidates with a matching length/number of syllables are considered. """
pattern = scansion.replace(" ", "") pattern = pattern.replace(self.constants.FOOT_SEPARATOR, "") ending = pattern[-1] candidate = pattern[:len(pattern) - 1] + self.constants.OPTIONAL_ENDING cans = [(distance(candidate, x), x) for x in patterns if len(x) == len(candidate)] if cans: cans = sorted(cans, key=lambda tup: tup[0]) top = cans[0][0] return [can[1][:-1] + ending for can in cans if can[0] == top] return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_pentameter_templates(self) -> List[str]: """Create pentameter templates."""
return [ # '-UU|-UU|-|-UU|-UU|X' self.constants.DACTYL + self.constants.DACTYL + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '-UU|--|-|-UU|-UU|X' self.constants.DACTYL + self.constants.SPONDEE + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '--|-UU|-|-UU|-UU|X' self.constants.SPONDEE + self.constants.DACTYL + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '--|--|-|-UU|-UU|X' self.constants.SPONDEE + self.constants.SPONDEE + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_replacement_patterns(self): """Check for availability of lemmatizer for a language."""
if self.language == 'latin': warnings.warn( "LemmaReplacer is deprecated and will soon be removed from CLTK. Please use the BackoffLatinLemmatizer at cltk.lemmatize.latin.backoff.", DeprecationWarning, stacklevel=2) rel_path = os.path.join('~','cltk_data', self.language, 'model','latin_models_cltk', 'lemmata','latin_lemmata_cltk.py') path = os.path.expanduser(rel_path) #logger.info('Loading lemmata. This may take a minute.') loader = importlib.machinery.SourceFileLoader('latin_lemmata_cltk', path) elif self.language == 'greek': rel_path = os.path.join('~','cltk_data', self.language, 'model','greek_models_cltk', 'lemmata','greek_lemmata_cltk.py') path = os.path.expanduser(rel_path) #logger.info('Loading lemmata. This may take a minute.') loader = importlib.machinery.SourceFileLoader('greek_lemmata_cltk', path) module = loader.load_module() lemmata = module.LEMMATA return lemmata
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Needleman_Wunsch(w1, w2, d=-1, alphabet = "abcdefghijklmnopqrstuvwxyz", S = Default_Matrix(26, 1, -1) ): """ Computes allignment using Needleman-Wunsch algorithm. The alphabet parameter is used for specifying the alphabetical order of the similarity matrix. Similarity matrix is initialized to an unweighted matrix that returns 1 for match and -1 for substitution. Args: :param w1: str :param w2: str :param d: int/float :param alphabet: str :param S: list :return: str tuple Examples: NW calculates the optimal string alignment based on a weighted matrix M. By default, an unweighted similarity matrix is used to represent substitution cost (1 if match, -1 otherwise). ('piscis', 'pesc-e') You can also define your own alphabet and matrix ('pesc-are', 'piscia--') Clearly, a weighted matrix should be used over the default one if linguistic accuracy is desired. The Matrix can be defined manually through matching of manners of articulation or stochastically by detecting the most common substitutions. A simple example follows: First define the similarity matrix We know want to increase the score for matching a to i. ('pescare', 'pisci-a') """
#S must be a square matrix matching the length of your alphabet if len(S) != len(alphabet) or len(S[0])!= len(alphabet): raise AssertionError("Unexpected dimensions of Similarity matrix, S." " S must be a n by n square matrix, where n is the" " length of your predefined alphabet") m,n = len(w1), len(w2) F = [[0 for i in range(n+1)] for j in range(m+1)] for i in range(m+1): F[i][0] = d*i for i in range(n+1): F[0][i] = d*i #F[i][j] is given by the reccurence relation F[i][j] = max(F[i-1][j-1] + S(A[i],B[i]), F[i][j-1] + d, F[i-1][j] + d) #Where S the similarity matrix and d the gap penalty for i in range(1,m+1): for j in range(1,n+1): F[i][j] = max(F[i-1][j-1] + S[alphabet.index(w1[i-1])][alphabet.index(w2[j-1])], F[i-1][j] + d,F[i][j-1] + d) A1, A2 = "", "" i, j = m, n #Since F[n][m] gives the maximum score, we can now reconstruct the alignment by determining whether the optimal move #is a match, insertion or deletion while i>0 or j>0: if i>0 and j>0 and F[i][j] == F[i-1][j-1] + S[alphabet.index(w1[i-1])][alphabet.index(w2[j-1])]: A1 = w1[i-1] + A1 A2 = w2[j-1] + A2 i -= 1 j -= 1 elif i>0 and F[i][j] == F[i-1][j] + d: A1 = w1[i-1] + A1 A2 = "-" + A2 i -= 1 else: A1 = "-" + A1 A2 = w2[j-1] + A2 j -= 1 return (A1, A2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def toc(self): """ Returns a rich list of texts in the catalog. """
output = [] for key in sorted(self.catalog.keys()): edition = self.catalog[key]['edition'] length = len(self.catalog[key]['transliteration']) output.append( "Pnum: {key}, Edition: {edition}, length: {length} line(s)".format( key=key, edition=edition, length=length)) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def englishToPun_number(number): """This function converts the normal english number to the punjabi number with punjabi digits, its input will be an integer of type int, and output will be a string. """
output = '' number = list(str(number)) for digit in number: output += DIGITS[int(digit)] return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_indiclang_char(c,lang): """ Applicable to Brahmi derived Indic scripts """
o=get_offset(c,lang) return (o>=0 and o<=0x7f) or ord(c)==DANDA or ord(c)==DOUBLE_DANDA
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_velar(c,lang): """ Is the character a velar """
o=get_offset(c,lang) return (o>=VELAR_RANGE[0] and o<=VELAR_RANGE[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_palatal(c,lang): """ Is the character a palatal """
o=get_offset(c,lang) return (o>=PALATAL_RANGE[0] and o<=PALATAL_RANGE[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_retroflex(c,lang): """ Is the character a retroflex """
o=get_offset(c,lang) return (o>=RETROFLEX_RANGE[0] and o<=RETROFLEX_RANGE[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_dental(c,lang): """ Is the character a dental """
o=get_offset(c,lang) return (o>=DENTAL_RANGE[0] and o<=DENTAL_RANGE[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_labial(c,lang): """ Is the character a labial """
o=get_offset(c,lang) return (o>=LABIAL_RANGE[0] and o<=LABIAL_RANGE[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_phonetics(self): """Transcribe phonetics."""
tr = Transcriber() self.transcribed_phonetics = [tr.transcribe(line) for line in self.text]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def PositionedPhoneme(phoneme, word_initial = False, word_final = False, syllable_initial = False, syllable_final = False, env_start = False, env_end = False): ''' A decorator for phonemes, used in applying rules over words. Returns a copy of the input phoneme, with additional attributes, specifying whether the phoneme occurs at a word or syllable boundary, or its position in an environment. ''' pos_phoneme = deepcopy(phoneme) pos_phoneme.word_initial = word_initial pos_phoneme.word_final = word_final pos_phoneme.syllable_initial = syllable_initial pos_phoneme.syllable_final = syllable_final pos_phoneme.env_start = env_start pos_phoneme.env_end = env_end return pos_phoneme
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def matches(self, other): ''' A disjunctive list matches a phoneme if any of its members matches the phoneme. If other is also a disjunctive list, any match between this list and the other returns true. ''' if other is None: return False if isinstance(other, PhonemeDisjunction): return any([phoneme.matches(other) for phoneme in self]) if isinstance(other, list) or isinstance(other, PhonologicalFeature): other = phoneme(other) return any([phoneme <= other for phoneme in self])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def transcribe(self, text, as_phonemes = False): ''' Trascribes a text, which is first tokenized for words, then each word is transcribed. If as_phonemes is true, returns a list of list of phoneme objects, else returns a string concatenation of the IPA symbols of the phonemes. ''' phoneme_words = [self.transcribe_word(word) for word in self._tokenize(text)] if not as_phonemes: words = [''.join([phoneme.ipa for phoneme in word]) for word in phoneme_words] return ' '.join(words) else: return phoneme_words
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def transcribe_to_modern(self, text) : ''' A very first attempt at trancribing from IPA to some modern orthography. The method is intended to provide the student with clues to the pronounciation of old orthographies. ''' # first transcribe letter by letter phoneme_words = self.transcribe(text, as_phonemes = True) words = [''.join([self.to_modern[0][phoneme.ipa] for phoneme in word]) for word in phoneme_words] modern_text = ' '.join(words) # then apply phonotactic fixes for regexp, replacement in self.to_modern[1]: modern_text = re.sub(regexp, replacement, modern_text) return modern_text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def voice(self, consonant) : ''' Voices a consonant, by searching the sound inventory for a consonant having the same features as the argument, but +voice. ''' voiced_consonant = deepcopy(consonant) voiced_consonant[Voiced] = Voiced.pos return self._find_sound(voiced_consonant)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def aspirate(self, consonant) : ''' Aspirates a consonant, by searching the sound inventory for a consonant having the same features as the argument, but +aspirated. ''' aspirated_consonant = deepcopy(consonant) aspirated_consonant[Aspirated] = Aspirated.pos return self._find_sound(aspirated_consonant)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def train_sentence_tokenizer(self: object, text: str): """ Train sentence tokenizer. """
language_punkt_vars = PunktLanguageVars # Set punctuation if self.punctuation: if self.strict: language_punkt_vars.sent_end_chars = self.punctuation + self.strict_punctuation else: language_punkt_vars.sent_end_chars = self.punctuation # Set abbreviations trainer = PunktTrainer(text, language_punkt_vars) trainer.INCLUDE_ALL_COLLOCS = True trainer.INCLUDE_ABBREV_COLLOCS = True tokenizer = PunktSentenceTokenizer(trainer.get_params()) if self.abbreviations: for abbreviation in self.abbreviations: tokenizer._params.abbrev_types.add(abbreviation) return tokenizer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def docs(self, fileids=None) -> Generator[str, str, None]: """ Returns the complete text of an Text document, closing the document after we are done reading it and yielding it in a memory safe fashion. """
if not fileids: fileids = self.fileids() # Create a generator, loading one document into memory at a time. for path, encoding in self.abspaths(fileids, include_encoding=True): with codecs.open(path, 'r', encoding=encoding) as reader: if self.skip_keywords: tmp_data = [] for line in reader: skip = False for keyword in self.skip_keywords: if keyword in line: skip = True if not skip: tmp_data.append(line) yield ''.join(tmp_data) else: yield reader.read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sizes(self, fileids=None) -> Generator[int, int, None]: """ Returns a list of tuples, the fileid and size on disk of the file. This function is used to detect oddly large files in the corpus. """
if not fileids: fileids = self.fileids() # Create a generator, getting every path and computing filesize for path in self.abspaths(fileids): yield os.path.getsize(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def docs(self: object, fileids:str): """ Returns the complete text of a .tess file, closing the document after we are done reading it and yielding it in a memory-safe fashion. """
for path, encoding in self.abspaths(fileids, include_encoding=True): with codecs.open(path, 'r', encoding=encoding) as f: yield f.read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lines(self: object, fileids: str, plaintext: bool = True): """ Tokenizes documents in the corpus by line """
for text in self.texts(fileids, plaintext): text = re.sub(r'\n\s*\n', '\n', text, re.MULTILINE) # Remove blank lines for line in text.split('\n'): yield line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sents(self: object, fileids: str): """ Tokenizes documents in the corpus by sentence """
for para in self.paras(fileids): for sent in sent_tokenize(para): yield sent
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def words(self: object, fileids: str): """ Tokenizes documents in the corpus by word """
for sent in self.sents(fileids): for token in word_tokenize(sent): yield token
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pos_tokenize(self: object, fileids: str): """ Segments, tokenizes, and POS tag a document in the corpus. """
for para in self.paras(fileids): yield [ self.pos_tagger(word_tokenize(sent)) for sent in sent_tokenize(para) ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def describe(self: object, fileids: str = None): """ Performs a single pass of the corpus and returns a dictionary with a variety of metrics concerning the state of the corpus. based on (Bengfort et al, 2018: 46) """
started = time.time() # Structures to perform counting counts = FreqDist() tokens = FreqDist() # Perform a single pass over paragraphs, tokenize, and counts for para in self.paras(fileids): counts['paras'] += 1 for sent in para: counts['sents'] += 1 # Include POS at some point for word in sent: counts['words'] += 1 tokens[word] += 1 # Compute the number of files in the corpus n_fileids = len(self.fileids()) # Return data structure with information return { 'files': n_fileids, 'paras': counts['paras'], 'sents': counts['sents'], 'words': counts['words'], 'vocab': len(tokens), 'lexdiv': round((counts['words'] / len(tokens)), 3), 'ppdoc': round((counts['paras'] / n_fileids), 3), 'sppar':round((counts['sents'] / counts['paras']), 3), 'secs': round((time.time()-started), 3), }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_entries(self): """Check for availability of lemmatizer for French."""
rel_path = os.path.join('~','cltk_data', 'french', 'text','french_data_cltk' ,'entries.py') path = os.path.expanduser(rel_path) #logger.info('Loading entries. This may take a minute.') loader = importlib.machinery.SourceFileLoader('entries', path) module = loader.load_module() entries = module.entries return entries
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lemmatize(self, tokens): """define list of lemmas"""
entries = self.entries forms_and_lemmas = self.forms_and_lemmas lemma_list = [x[0] for x in entries] """Provide a lemma for each token""" lemmatized = [] for token in tokens: """check for a match between token and list of lemmas""" if token in lemma_list: lemmed = (token, token) lemmatized.append(lemmed) else: """if no match check for a match between token and list of lemma forms""" lemma = [k for k, v in forms_and_lemmas.items() if token in v] if lemma != []: lemmed = (token, lemma) lemmatized.append(lemmed) elif lemma == []: """if no match apply regular expressions and check for a match against the list of lemmas again""" regexed = regex(token) if regexed in lemma_list: lemmed = (token, regexed) lemmatized.append(lemmed) else: lemmed = (token, "None") lemmatized.append(lemmed) return lemmatized
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize(self, text: str, model: object = None): """ Method for tokenizing sentences with pretrained punkt models; can be overridden by language-specific tokenizers. :rtype: list :param text: text to be tokenized into sentences :type text: str :param model: tokenizer object to used # Should be in init? :type model: object """
if not self.model: model = self.model tokenizer = self.model if self.lang_vars: tokenizer._lang_vars = self.lang_vars return tokenizer.tokenize(text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize(self, text: str, model: object = None): """ Method for tokenizing sentences with regular expressions. :rtype: list :param text: text to be tokenized into sentences :type text: str """
sentences = re.split(self.pattern, text) return sentences
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_forms_and_lemmas(self): """Load the dictionary of lemmas and forms from the OE models repository."""
rel_path = os.path.join(CLTK_DATA_DIR, 'old_english', 'model', 'old_english_models_cltk', 'data', 'oe.lemmas') path = os.path.expanduser(rel_path) self.lemma_dict = {} with open(path, 'r') as infile: lines = infile.read().splitlines() for line in lines: forms = line.split('\t') lemma = forms[0] for form_seq in forms: indiv_forms = form_seq.split(',') for form in indiv_forms: form = form.lower() lemma_list = self.lemma_dict.get(form, []) lemma_list.append(lemma) self.lemma_dict[form] = lemma_list for form in self.lemma_dict.keys(): self.lemma_dict[form] = list(set(self.lemma_dict[form]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_type_counts(self): """Load the table of frequency counts of word forms."""
rel_path = os.path.join(CLTK_DATA_DIR, 'old_english', 'model', 'old_english_models_cltk', 'data', 'oe.counts') path = os.path.expanduser(rel_path) self.type_counts = {} with open(path, 'r') as infile: lines = infile.read().splitlines() for line in lines: count, word = line.split() self.type_counts[word] = int(count)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _relative_frequency(self, word): """Computes the log relative frequency for a word form"""
count = self.type_counts.get(word, 0) return math.log(count/len(self.type_counts)) if count > 0 else 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _lemmatize_token(self, token, best_guess=True, return_frequencies=False): """Lemmatize a single token. If best_guess is true, then take the most frequent lemma when a form has multiple possible lemmatizations. If the form is not found, just return it. If best_guess is false, then always return the full set of possible lemmas, or None if none found. """
lemmas = self.lemma_dict.get(token.lower(), None) if best_guess == True: if lemmas == None: lemma = token elif len(lemmas) > 1: counts = [self.type_counts[word] for word in lemmas] lemma = lemmas[argmax(counts)] else: lemma = lemmas[0] if return_frequencies == True: lemma = (lemma, self._relative_frequency(lemma)) else: lemma = [] if lemmas == None else lemmas if return_frequencies == True: lemma = [(word, self._relative_frequency(word)) for word in lemma] return(token, lemma)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lemmatize(self, text, best_guess=True, return_frequencies=False): """Lemmatize all tokens in a string or a list. A string is first tokenized using punkt. Throw a type error if the input is neither a string nor a list. """
if isinstance(text, str): tokens = wordpunct_tokenize(text) elif isinstance(text, list): tokens= text else: raise TypeError("lemmatize only works with strings or lists of string tokens.") return [self._lemmatize_token(token, best_guess, return_frequencies) for token in tokens]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate(self, filename): """Runs the lemmatize function over the contents of the file, counting the proportion of unfound lemmas."""
with open(filename, 'r') as infile: lines = infile.read().splitlines() lemma_count = 0 token_count = 0 for line in lines: line = re.sub(r'[.,!?:;0-9]', ' ', line) lemmas = [lemma for (_, lemma) in self.lemmatize(line, best_guess=False)] token_count += len(lemmas) lemma_count += len(lemmas) - lemmas.count([]) return lemma_count/token_count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_stem(self, noun, gender, mimation=True): """Return the stem of a noun, given its gender"""
stem = '' if mimation and noun[-1:] == 'm': # noun = noun[:-1] pass # Take off ending if gender == 'm': if noun[-2:] in list(self.endings['m']['singular'].values()) + \ list(self.endings['m']['dual'].values()): stem = noun[:-2] elif noun[-1] in list(self.endings['m']['plural'].values()): stem = noun[:-1] else: print("Unknown masculine noun: {}".format(noun)) elif gender == 'f': if noun[-4:] in self.endings['f']['plural']['nominative'] + \ self.endings['f']['plural']['oblique']: stem = noun[:-4] + 't' elif noun[-3:] in list(self.endings['f']['singular'].values()) + \ list(self.endings['f']['dual'].values()): stem = noun[:-3] + 't' elif noun[-2:] in list(self.endings['m']['singular'].values()) + \ list(self.endings['m']['dual'].values()): stem = noun[:-2] else: print("Unknown feminine noun: {}".format(noun)) else: print("Unknown noun: {}".format(noun)) return stem