id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
230,300
python-cmd2/cmd2
examples/scripts/save_help_text.py
get_sub_commands
def get_sub_commands(parser: argparse.ArgumentParser) -> List[str]: """Get a list of sub-commands for an ArgumentParser""" sub_cmds = [] # Check if this is parser has sub-commands if parser is not None and parser._subparsers is not None: # Find the _SubParsersAction for the sub-commands of this parser for action in parser._subparsers._actions: if isinstance(action, argparse._SubParsersAction): for sub_cmd, sub_cmd_parser in action.choices.items(): sub_cmds.append(sub_cmd) # Look for nested sub-commands for nested_sub_cmd in get_sub_commands(sub_cmd_parser): sub_cmds.append('{} {}'.format(sub_cmd, nested_sub_cmd)) break sub_cmds.sort() return sub_cmds
python
def get_sub_commands(parser: argparse.ArgumentParser) -> List[str]: sub_cmds = [] # Check if this is parser has sub-commands if parser is not None and parser._subparsers is not None: # Find the _SubParsersAction for the sub-commands of this parser for action in parser._subparsers._actions: if isinstance(action, argparse._SubParsersAction): for sub_cmd, sub_cmd_parser in action.choices.items(): sub_cmds.append(sub_cmd) # Look for nested sub-commands for nested_sub_cmd in get_sub_commands(sub_cmd_parser): sub_cmds.append('{} {}'.format(sub_cmd, nested_sub_cmd)) break sub_cmds.sort() return sub_cmds
[ "def", "get_sub_commands", "(", "parser", ":", "argparse", ".", "ArgumentParser", ")", "->", "List", "[", "str", "]", ":", "sub_cmds", "=", "[", "]", "# Check if this is parser has sub-commands", "if", "parser", "is", "not", "None", "and", "parser", ".", "_sub...
Get a list of sub-commands for an ArgumentParser
[ "Get", "a", "list", "of", "sub", "-", "commands", "for", "an", "ArgumentParser" ]
b22c0bd891ed08c8b09df56df9d91f48166a5e2a
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/examples/scripts/save_help_text.py#L16-L36
230,301
python-cmd2/cmd2
examples/scripts/save_help_text.py
main
def main() -> None: """Main function of this script""" # Make sure we have access to self if 'self' not in globals(): print("Run 'set locals_in_py true' and then rerun this script") return # Make sure the user passed in an output file if len(sys.argv) != 2: print("Usage: {} <output_file>".format(os.path.basename(sys.argv[0]))) return # Open the output file outfile_path = os.path.expanduser(sys.argv[1]) try: outfile = open(outfile_path, 'w') except OSError as e: print("Error opening {} because: {}".format(outfile_path, e)) return # Write the help summary header = '{0}\nSUMMARY\n{0}\n'.format(ASTERISKS) outfile.write(header) result = app('help -v') outfile.write(result.stdout) # Get a list of all commands and help topics and then filter out duplicates all_commands = set(self.get_all_commands()) all_topics = set(self.get_help_topics()) to_save = list(all_commands | all_topics) to_save.sort() for item in to_save: is_command = item in all_commands add_help_to_file(item, outfile, is_command) if is_command: # Add any sub-commands for subcmd in get_sub_commands(getattr(self.cmd_func(item), 'argparser', None)): full_cmd = '{} {}'.format(item, subcmd) add_help_to_file(full_cmd, outfile, is_command) outfile.close() print("Output written to {}".format(outfile_path))
python
def main() -> None: # Make sure we have access to self if 'self' not in globals(): print("Run 'set locals_in_py true' and then rerun this script") return # Make sure the user passed in an output file if len(sys.argv) != 2: print("Usage: {} <output_file>".format(os.path.basename(sys.argv[0]))) return # Open the output file outfile_path = os.path.expanduser(sys.argv[1]) try: outfile = open(outfile_path, 'w') except OSError as e: print("Error opening {} because: {}".format(outfile_path, e)) return # Write the help summary header = '{0}\nSUMMARY\n{0}\n'.format(ASTERISKS) outfile.write(header) result = app('help -v') outfile.write(result.stdout) # Get a list of all commands and help topics and then filter out duplicates all_commands = set(self.get_all_commands()) all_topics = set(self.get_help_topics()) to_save = list(all_commands | all_topics) to_save.sort() for item in to_save: is_command = item in all_commands add_help_to_file(item, outfile, is_command) if is_command: # Add any sub-commands for subcmd in get_sub_commands(getattr(self.cmd_func(item), 'argparser', None)): full_cmd = '{} {}'.format(item, subcmd) add_help_to_file(full_cmd, outfile, is_command) outfile.close() print("Output written to {}".format(outfile_path))
[ "def", "main", "(", ")", "->", "None", ":", "# Make sure we have access to self", "if", "'self'", "not", "in", "globals", "(", ")", ":", "print", "(", "\"Run 'set locals_in_py true' and then rerun this script\"", ")", "return", "# Make sure the user passed in an output file...
Main function of this script
[ "Main", "function", "of", "this", "script" ]
b22c0bd891ed08c8b09df56df9d91f48166a5e2a
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/examples/scripts/save_help_text.py#L58-L103
230,302
tkem/cachetools
cachetools/__init__.py
cached
def cached(cache, key=keys.hashkey, lock=None): """Decorator to wrap a function with a memoizing callable that saves results in a cache. """ def decorator(func): if cache is None: def wrapper(*args, **kwargs): return func(*args, **kwargs) elif lock is None: def wrapper(*args, **kwargs): k = key(*args, **kwargs) try: return cache[k] except KeyError: pass # key not found v = func(*args, **kwargs) try: cache[k] = v except ValueError: pass # value too large return v else: def wrapper(*args, **kwargs): k = key(*args, **kwargs) try: with lock: return cache[k] except KeyError: pass # key not found v = func(*args, **kwargs) try: with lock: cache[k] = v except ValueError: pass # value too large return v return _update_wrapper(wrapper, func) return decorator
python
def cached(cache, key=keys.hashkey, lock=None): def decorator(func): if cache is None: def wrapper(*args, **kwargs): return func(*args, **kwargs) elif lock is None: def wrapper(*args, **kwargs): k = key(*args, **kwargs) try: return cache[k] except KeyError: pass # key not found v = func(*args, **kwargs) try: cache[k] = v except ValueError: pass # value too large return v else: def wrapper(*args, **kwargs): k = key(*args, **kwargs) try: with lock: return cache[k] except KeyError: pass # key not found v = func(*args, **kwargs) try: with lock: cache[k] = v except ValueError: pass # value too large return v return _update_wrapper(wrapper, func) return decorator
[ "def", "cached", "(", "cache", ",", "key", "=", "keys", ".", "hashkey", ",", "lock", "=", "None", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "cache", "is", "None", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ...
Decorator to wrap a function with a memoizing callable that saves results in a cache.
[ "Decorator", "to", "wrap", "a", "function", "with", "a", "memoizing", "callable", "that", "saves", "results", "in", "a", "cache", "." ]
1b67cddadccb89993e9d2567bac22e57e2b2b373
https://github.com/tkem/cachetools/blob/1b67cddadccb89993e9d2567bac22e57e2b2b373/cachetools/__init__.py#L30-L68
230,303
tkem/cachetools
cachetools/__init__.py
cachedmethod
def cachedmethod(cache, key=keys.hashkey, lock=None): """Decorator to wrap a class or instance method with a memoizing callable that saves results in a cache. """ def decorator(method): if lock is None: def wrapper(self, *args, **kwargs): c = cache(self) if c is None: return method(self, *args, **kwargs) k = key(*args, **kwargs) try: return c[k] except KeyError: pass # key not found v = method(self, *args, **kwargs) try: c[k] = v except ValueError: pass # value too large return v else: def wrapper(self, *args, **kwargs): c = cache(self) if c is None: return method(self, *args, **kwargs) k = key(*args, **kwargs) try: with lock(self): return c[k] except KeyError: pass # key not found v = method(self, *args, **kwargs) try: with lock(self): c[k] = v except ValueError: pass # value too large return v return _update_wrapper(wrapper, method) return decorator
python
def cachedmethod(cache, key=keys.hashkey, lock=None): def decorator(method): if lock is None: def wrapper(self, *args, **kwargs): c = cache(self) if c is None: return method(self, *args, **kwargs) k = key(*args, **kwargs) try: return c[k] except KeyError: pass # key not found v = method(self, *args, **kwargs) try: c[k] = v except ValueError: pass # value too large return v else: def wrapper(self, *args, **kwargs): c = cache(self) if c is None: return method(self, *args, **kwargs) k = key(*args, **kwargs) try: with lock(self): return c[k] except KeyError: pass # key not found v = method(self, *args, **kwargs) try: with lock(self): c[k] = v except ValueError: pass # value too large return v return _update_wrapper(wrapper, method) return decorator
[ "def", "cachedmethod", "(", "cache", ",", "key", "=", "keys", ".", "hashkey", ",", "lock", "=", "None", ")", ":", "def", "decorator", "(", "method", ")", ":", "if", "lock", "is", "None", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", ...
Decorator to wrap a class or instance method with a memoizing callable that saves results in a cache.
[ "Decorator", "to", "wrap", "a", "class", "or", "instance", "method", "with", "a", "memoizing", "callable", "that", "saves", "results", "in", "a", "cache", "." ]
1b67cddadccb89993e9d2567bac22e57e2b2b373
https://github.com/tkem/cachetools/blob/1b67cddadccb89993e9d2567bac22e57e2b2b373/cachetools/__init__.py#L71-L112
230,304
pudo/dataset
dataset/util.py
iter_result_proxy
def iter_result_proxy(rp, step=None): """Iterate over the ResultProxy.""" while True: if step is None: chunk = rp.fetchall() else: chunk = rp.fetchmany(step) if not chunk: break for row in chunk: yield row
python
def iter_result_proxy(rp, step=None): while True: if step is None: chunk = rp.fetchall() else: chunk = rp.fetchmany(step) if not chunk: break for row in chunk: yield row
[ "def", "iter_result_proxy", "(", "rp", ",", "step", "=", "None", ")", ":", "while", "True", ":", "if", "step", "is", "None", ":", "chunk", "=", "rp", ".", "fetchall", "(", ")", "else", ":", "chunk", "=", "rp", ".", "fetchmany", "(", "step", ")", ...
Iterate over the ResultProxy.
[ "Iterate", "over", "the", "ResultProxy", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/util.py#L24-L34
230,305
pudo/dataset
dataset/util.py
normalize_column_name
def normalize_column_name(name): """Check if a string is a reasonable thing to use as a column name.""" if not isinstance(name, six.string_types): raise ValueError('%r is not a valid column name.' % name) # limit to 63 characters name = name.strip()[:63] # column names can be 63 *bytes* max in postgresql if isinstance(name, six.text_type): while len(name.encode('utf-8')) >= 64: name = name[:len(name) - 1] if not len(name) or '.' in name or '-' in name: raise ValueError('%r is not a valid column name.' % name) return name
python
def normalize_column_name(name): if not isinstance(name, six.string_types): raise ValueError('%r is not a valid column name.' % name) # limit to 63 characters name = name.strip()[:63] # column names can be 63 *bytes* max in postgresql if isinstance(name, six.text_type): while len(name.encode('utf-8')) >= 64: name = name[:len(name) - 1] if not len(name) or '.' in name or '-' in name: raise ValueError('%r is not a valid column name.' % name) return name
[ "def", "normalize_column_name", "(", "name", ")", ":", "if", "not", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "'%r is not a valid column name.'", "%", "name", ")", "# limit to 63 characters", "name", "=", "...
Check if a string is a reasonable thing to use as a column name.
[ "Check", "if", "a", "string", "is", "a", "reasonable", "thing", "to", "use", "as", "a", "column", "name", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/util.py#L59-L73
230,306
pudo/dataset
dataset/util.py
safe_url
def safe_url(url): """Remove password from printed connection URLs.""" parsed = urlparse(url) if parsed.password is not None: pwd = ':%s@' % parsed.password url = url.replace(pwd, ':*****@') return url
python
def safe_url(url): parsed = urlparse(url) if parsed.password is not None: pwd = ':%s@' % parsed.password url = url.replace(pwd, ':*****@') return url
[ "def", "safe_url", "(", "url", ")", ":", "parsed", "=", "urlparse", "(", "url", ")", "if", "parsed", ".", "password", "is", "not", "None", ":", "pwd", "=", "':%s@'", "%", "parsed", ".", "password", "url", "=", "url", ".", "replace", "(", "pwd", ","...
Remove password from printed connection URLs.
[ "Remove", "password", "from", "printed", "connection", "URLs", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/util.py#L86-L92
230,307
pudo/dataset
dataset/util.py
index_name
def index_name(table, columns): """Generate an artificial index name.""" sig = '||'.join(columns) key = sha1(sig.encode('utf-8')).hexdigest()[:16] return 'ix_%s_%s' % (table, key)
python
def index_name(table, columns): sig = '||'.join(columns) key = sha1(sig.encode('utf-8')).hexdigest()[:16] return 'ix_%s_%s' % (table, key)
[ "def", "index_name", "(", "table", ",", "columns", ")", ":", "sig", "=", "'||'", ".", "join", "(", "columns", ")", "key", "=", "sha1", "(", "sig", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "[", ":", "16", "]", "return",...
Generate an artificial index name.
[ "Generate", "an", "artificial", "index", "name", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/util.py#L95-L99
230,308
pudo/dataset
dataset/util.py
ensure_tuple
def ensure_tuple(obj): """Try and make the given argument into a tuple.""" if obj is None: return tuple() if isinstance(obj, Iterable) and not isinstance(obj, six.string_types): return tuple(obj) return obj,
python
def ensure_tuple(obj): if obj is None: return tuple() if isinstance(obj, Iterable) and not isinstance(obj, six.string_types): return tuple(obj) return obj,
[ "def", "ensure_tuple", "(", "obj", ")", ":", "if", "obj", "is", "None", ":", "return", "tuple", "(", ")", "if", "isinstance", "(", "obj", ",", "Iterable", ")", "and", "not", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", ":", "return...
Try and make the given argument into a tuple.
[ "Try", "and", "make", "the", "given", "argument", "into", "a", "tuple", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/util.py#L102-L108
230,309
pudo/dataset
dataset/util.py
pad_chunk_columns
def pad_chunk_columns(chunk): """Given a set of items to be inserted, make sure they all have the same columns by padding columns with None if they are missing.""" columns = set() for record in chunk: columns.update(record.keys()) for record in chunk: for column in columns: record.setdefault(column, None) return chunk
python
def pad_chunk_columns(chunk): columns = set() for record in chunk: columns.update(record.keys()) for record in chunk: for column in columns: record.setdefault(column, None) return chunk
[ "def", "pad_chunk_columns", "(", "chunk", ")", ":", "columns", "=", "set", "(", ")", "for", "record", "in", "chunk", ":", "columns", ".", "update", "(", "record", ".", "keys", "(", ")", ")", "for", "record", "in", "chunk", ":", "for", "column", "in",...
Given a set of items to be inserted, make sure they all have the same columns by padding columns with None if they are missing.
[ "Given", "a", "set", "of", "items", "to", "be", "inserted", "make", "sure", "they", "all", "have", "the", "same", "columns", "by", "padding", "columns", "with", "None", "if", "they", "are", "missing", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/util.py#L111-L120
230,310
pudo/dataset
dataset/types.py
Types.guess
def guess(cls, sample): """Given a single sample, guess the column type for the field. If the sample is an instance of an SQLAlchemy type, the type will be used instead. """ if isinstance(sample, TypeEngine): return sample if isinstance(sample, bool): return cls.boolean elif isinstance(sample, int): return cls.integer elif isinstance(sample, float): return cls.float elif isinstance(sample, datetime): return cls.datetime elif isinstance(sample, date): return cls.date return cls.text
python
def guess(cls, sample): if isinstance(sample, TypeEngine): return sample if isinstance(sample, bool): return cls.boolean elif isinstance(sample, int): return cls.integer elif isinstance(sample, float): return cls.float elif isinstance(sample, datetime): return cls.datetime elif isinstance(sample, date): return cls.date return cls.text
[ "def", "guess", "(", "cls", ",", "sample", ")", ":", "if", "isinstance", "(", "sample", ",", "TypeEngine", ")", ":", "return", "sample", "if", "isinstance", "(", "sample", ",", "bool", ")", ":", "return", "cls", ".", "boolean", "elif", "isinstance", "(...
Given a single sample, guess the column type for the field. If the sample is an instance of an SQLAlchemy type, the type will be used instead.
[ "Given", "a", "single", "sample", "guess", "the", "column", "type", "for", "the", "field", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/types.py#L19-L37
230,311
pudo/dataset
dataset/table.py
Table.insert
def insert(self, row, ensure=None, types=None): """Add a ``row`` dict by inserting it into the table. If ``ensure`` is set, any of the keys of the row are not table columns, they will be created automatically. During column creation, ``types`` will be checked for a key matching the name of a column to be created, and the given SQLAlchemy column type will be used. Otherwise, the type is guessed from the row value, defaulting to a simple unicode field. :: data = dict(title='I am a banana!') table.insert(data) Returns the inserted row's primary key. """ row = self._sync_columns(row, ensure, types=types) res = self.db.executable.execute(self.table.insert(row)) if len(res.inserted_primary_key) > 0: return res.inserted_primary_key[0] return True
python
def insert(self, row, ensure=None, types=None): row = self._sync_columns(row, ensure, types=types) res = self.db.executable.execute(self.table.insert(row)) if len(res.inserted_primary_key) > 0: return res.inserted_primary_key[0] return True
[ "def", "insert", "(", "self", ",", "row", ",", "ensure", "=", "None", ",", "types", "=", "None", ")", ":", "row", "=", "self", ".", "_sync_columns", "(", "row", ",", "ensure", ",", "types", "=", "types", ")", "res", "=", "self", ".", "db", ".", ...
Add a ``row`` dict by inserting it into the table. If ``ensure`` is set, any of the keys of the row are not table columns, they will be created automatically. During column creation, ``types`` will be checked for a key matching the name of a column to be created, and the given SQLAlchemy column type will be used. Otherwise, the type is guessed from the row value, defaulting to a simple unicode field. :: data = dict(title='I am a banana!') table.insert(data) Returns the inserted row's primary key.
[ "Add", "a", "row", "dict", "by", "inserting", "it", "into", "the", "table", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L61-L83
230,312
pudo/dataset
dataset/table.py
Table.insert_ignore
def insert_ignore(self, row, keys, ensure=None, types=None): """Add a ``row`` dict into the table if the row does not exist. If rows with matching ``keys`` exist they will be added to the table. Setting ``ensure`` results in automatically creating missing columns, i.e., keys of the row are not table columns. During column creation, ``types`` will be checked for a key matching the name of a column to be created, and the given SQLAlchemy column type will be used. Otherwise, the type is guessed from the row value, defaulting to a simple unicode field. :: data = dict(id=10, title='I am a banana!') table.insert_ignore(data, ['id']) """ row = self._sync_columns(row, ensure, types=types) if self._check_ensure(ensure): self.create_index(keys) args, _ = self._keys_to_args(row, keys) if self.count(**args) == 0: return self.insert(row, ensure=False) return False
python
def insert_ignore(self, row, keys, ensure=None, types=None): row = self._sync_columns(row, ensure, types=types) if self._check_ensure(ensure): self.create_index(keys) args, _ = self._keys_to_args(row, keys) if self.count(**args) == 0: return self.insert(row, ensure=False) return False
[ "def", "insert_ignore", "(", "self", ",", "row", ",", "keys", ",", "ensure", "=", "None", ",", "types", "=", "None", ")", ":", "row", "=", "self", ".", "_sync_columns", "(", "row", ",", "ensure", ",", "types", "=", "types", ")", "if", "self", ".", ...
Add a ``row`` dict into the table if the row does not exist. If rows with matching ``keys`` exist they will be added to the table. Setting ``ensure`` results in automatically creating missing columns, i.e., keys of the row are not table columns. During column creation, ``types`` will be checked for a key matching the name of a column to be created, and the given SQLAlchemy column type will be used. Otherwise, the type is guessed from the row value, defaulting to a simple unicode field. :: data = dict(id=10, title='I am a banana!') table.insert_ignore(data, ['id'])
[ "Add", "a", "row", "dict", "into", "the", "table", "if", "the", "row", "does", "not", "exist", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L85-L109
230,313
pudo/dataset
dataset/table.py
Table.insert_many
def insert_many(self, rows, chunk_size=1000, ensure=None, types=None): """Add many rows at a time. This is significantly faster than adding them one by one. Per default the rows are processed in chunks of 1000 per commit, unless you specify a different ``chunk_size``. See :py:meth:`insert() <dataset.Table.insert>` for details on the other parameters. :: rows = [dict(name='Dolly')] * 10000 table.insert_many(rows) """ chunk = [] for row in rows: row = self._sync_columns(row, ensure, types=types) chunk.append(row) if len(chunk) == chunk_size: chunk = pad_chunk_columns(chunk) self.table.insert().execute(chunk) chunk = [] if len(chunk): chunk = pad_chunk_columns(chunk) self.table.insert().execute(chunk)
python
def insert_many(self, rows, chunk_size=1000, ensure=None, types=None): chunk = [] for row in rows: row = self._sync_columns(row, ensure, types=types) chunk.append(row) if len(chunk) == chunk_size: chunk = pad_chunk_columns(chunk) self.table.insert().execute(chunk) chunk = [] if len(chunk): chunk = pad_chunk_columns(chunk) self.table.insert().execute(chunk)
[ "def", "insert_many", "(", "self", ",", "rows", ",", "chunk_size", "=", "1000", ",", "ensure", "=", "None", ",", "types", "=", "None", ")", ":", "chunk", "=", "[", "]", "for", "row", "in", "rows", ":", "row", "=", "self", ".", "_sync_columns", "(",...
Add many rows at a time. This is significantly faster than adding them one by one. Per default the rows are processed in chunks of 1000 per commit, unless you specify a different ``chunk_size``. See :py:meth:`insert() <dataset.Table.insert>` for details on the other parameters. :: rows = [dict(name='Dolly')] * 10000 table.insert_many(rows)
[ "Add", "many", "rows", "at", "a", "time", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L111-L136
230,314
pudo/dataset
dataset/table.py
Table.update
def update(self, row, keys, ensure=None, types=None, return_count=False): """Update a row in the table. The update is managed via the set of column names stated in ``keys``: they will be used as filters for the data to be updated, using the values in ``row``. :: # update all entries with id matching 10, setting their title columns data = dict(id=10, title='I am a banana!') table.update(data, ['id']) If keys in ``row`` update columns not present in the table, they will be created based on the settings of ``ensure`` and ``types``, matching the behavior of :py:meth:`insert() <dataset.Table.insert>`. """ row = self._sync_columns(row, ensure, types=types) args, row = self._keys_to_args(row, keys) clause = self._args_to_clause(args) if not len(row): return self.count(clause) stmt = self.table.update(whereclause=clause, values=row) rp = self.db.executable.execute(stmt) if rp.supports_sane_rowcount(): return rp.rowcount if return_count: return self.count(clause)
python
def update(self, row, keys, ensure=None, types=None, return_count=False): row = self._sync_columns(row, ensure, types=types) args, row = self._keys_to_args(row, keys) clause = self._args_to_clause(args) if not len(row): return self.count(clause) stmt = self.table.update(whereclause=clause, values=row) rp = self.db.executable.execute(stmt) if rp.supports_sane_rowcount(): return rp.rowcount if return_count: return self.count(clause)
[ "def", "update", "(", "self", ",", "row", ",", "keys", ",", "ensure", "=", "None", ",", "types", "=", "None", ",", "return_count", "=", "False", ")", ":", "row", "=", "self", ".", "_sync_columns", "(", "row", ",", "ensure", ",", "types", "=", "type...
Update a row in the table. The update is managed via the set of column names stated in ``keys``: they will be used as filters for the data to be updated, using the values in ``row``. :: # update all entries with id matching 10, setting their title columns data = dict(id=10, title='I am a banana!') table.update(data, ['id']) If keys in ``row`` update columns not present in the table, they will be created based on the settings of ``ensure`` and ``types``, matching the behavior of :py:meth:`insert() <dataset.Table.insert>`.
[ "Update", "a", "row", "in", "the", "table", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L138-L164
230,315
pudo/dataset
dataset/table.py
Table.upsert
def upsert(self, row, keys, ensure=None, types=None): """An UPSERT is a smart combination of insert and update. If rows with matching ``keys`` exist they will be updated, otherwise a new row is inserted in the table. :: data = dict(id=10, title='I am a banana!') table.upsert(data, ['id']) """ row = self._sync_columns(row, ensure, types=types) if self._check_ensure(ensure): self.create_index(keys) row_count = self.update(row, keys, ensure=False, return_count=True) if row_count == 0: return self.insert(row, ensure=False) return True
python
def upsert(self, row, keys, ensure=None, types=None): row = self._sync_columns(row, ensure, types=types) if self._check_ensure(ensure): self.create_index(keys) row_count = self.update(row, keys, ensure=False, return_count=True) if row_count == 0: return self.insert(row, ensure=False) return True
[ "def", "upsert", "(", "self", ",", "row", ",", "keys", ",", "ensure", "=", "None", ",", "types", "=", "None", ")", ":", "row", "=", "self", ".", "_sync_columns", "(", "row", ",", "ensure", ",", "types", "=", "types", ")", "if", "self", ".", "_che...
An UPSERT is a smart combination of insert and update. If rows with matching ``keys`` exist they will be updated, otherwise a new row is inserted in the table. :: data = dict(id=10, title='I am a banana!') table.upsert(data, ['id'])
[ "An", "UPSERT", "is", "a", "smart", "combination", "of", "insert", "and", "update", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L166-L182
230,316
pudo/dataset
dataset/table.py
Table.delete
def delete(self, *clauses, **filters): """Delete rows from the table. Keyword arguments can be used to add column-based filters. The filter criterion will always be equality: :: table.delete(place='Berlin') If no arguments are given, all records are deleted. """ if not self.exists: return False clause = self._args_to_clause(filters, clauses=clauses) stmt = self.table.delete(whereclause=clause) rp = self.db.executable.execute(stmt) return rp.rowcount > 0
python
def delete(self, *clauses, **filters): if not self.exists: return False clause = self._args_to_clause(filters, clauses=clauses) stmt = self.table.delete(whereclause=clause) rp = self.db.executable.execute(stmt) return rp.rowcount > 0
[ "def", "delete", "(", "self", ",", "*", "clauses", ",", "*", "*", "filters", ")", ":", "if", "not", "self", ".", "exists", ":", "return", "False", "clause", "=", "self", ".", "_args_to_clause", "(", "filters", ",", "clauses", "=", "clauses", ")", "st...
Delete rows from the table. Keyword arguments can be used to add column-based filters. The filter criterion will always be equality: :: table.delete(place='Berlin') If no arguments are given, all records are deleted.
[ "Delete", "rows", "from", "the", "table", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L184-L200
230,317
pudo/dataset
dataset/table.py
Table._reflect_table
def _reflect_table(self): """Load the tables definition from the database.""" with self.db.lock: try: self._table = SQLATable(self.name, self.db.metadata, schema=self.db.schema, autoload=True) except NoSuchTableError: pass
python
def _reflect_table(self): with self.db.lock: try: self._table = SQLATable(self.name, self.db.metadata, schema=self.db.schema, autoload=True) except NoSuchTableError: pass
[ "def", "_reflect_table", "(", "self", ")", ":", "with", "self", ".", "db", ".", "lock", ":", "try", ":", "self", ".", "_table", "=", "SQLATable", "(", "self", ".", "name", ",", "self", ".", "db", ".", "metadata", ",", "schema", "=", "self", ".", ...
Load the tables definition from the database.
[ "Load", "the", "tables", "definition", "from", "the", "database", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L202-L211
230,318
pudo/dataset
dataset/table.py
Table._sync_table
def _sync_table(self, columns): """Lazy load, create or adapt the table structure in the database.""" if self._table is None: # Load an existing table from the database. self._reflect_table() if self._table is None: # Create the table with an initial set of columns. if not self._auto_create: raise DatasetException("Table does not exist: %s" % self.name) # Keep the lock scope small because this is run very often. with self.db.lock: self._threading_warn() self._table = SQLATable(self.name, self.db.metadata, schema=self.db.schema) if self._primary_id is not False: # This can go wrong on DBMS like MySQL and SQLite where # tables cannot have no columns. primary_id = self._primary_id or self.PRIMARY_DEFAULT primary_type = self._primary_type or Types.integer increment = primary_type in [Types.integer, Types.bigint] column = Column(primary_id, primary_type, primary_key=True, autoincrement=increment) self._table.append_column(column) for column in columns: if not column.name == self._primary_id: self._table.append_column(column) self._table.create(self.db.executable, checkfirst=True) elif len(columns): with self.db.lock: self._reflect_table() self._threading_warn() for column in columns: if not self.has_column(column.name): self.db.op.add_column(self.name, column, self.db.schema) self._reflect_table()
python
def _sync_table(self, columns): if self._table is None: # Load an existing table from the database. self._reflect_table() if self._table is None: # Create the table with an initial set of columns. if not self._auto_create: raise DatasetException("Table does not exist: %s" % self.name) # Keep the lock scope small because this is run very often. with self.db.lock: self._threading_warn() self._table = SQLATable(self.name, self.db.metadata, schema=self.db.schema) if self._primary_id is not False: # This can go wrong on DBMS like MySQL and SQLite where # tables cannot have no columns. primary_id = self._primary_id or self.PRIMARY_DEFAULT primary_type = self._primary_type or Types.integer increment = primary_type in [Types.integer, Types.bigint] column = Column(primary_id, primary_type, primary_key=True, autoincrement=increment) self._table.append_column(column) for column in columns: if not column.name == self._primary_id: self._table.append_column(column) self._table.create(self.db.executable, checkfirst=True) elif len(columns): with self.db.lock: self._reflect_table() self._threading_warn() for column in columns: if not self.has_column(column.name): self.db.op.add_column(self.name, column, self.db.schema) self._reflect_table()
[ "def", "_sync_table", "(", "self", ",", "columns", ")", ":", "if", "self", ".", "_table", "is", "None", ":", "# Load an existing table from the database.", "self", ".", "_reflect_table", "(", ")", "if", "self", ".", "_table", "is", "None", ":", "# Create the t...
Lazy load, create or adapt the table structure in the database.
[ "Lazy", "load", "create", "or", "adapt", "the", "table", "structure", "in", "the", "database", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L220-L256
230,319
pudo/dataset
dataset/table.py
Table.drop
def drop(self): """Drop the table from the database. Deletes both the schema and all the contents within it. """ with self.db.lock: if self.exists: self._threading_warn() self.table.drop(self.db.executable, checkfirst=True) self._table = None
python
def drop(self): with self.db.lock: if self.exists: self._threading_warn() self.table.drop(self.db.executable, checkfirst=True) self._table = None
[ "def", "drop", "(", "self", ")", ":", "with", "self", ".", "db", ".", "lock", ":", "if", "self", ".", "exists", ":", "self", ".", "_threading_warn", "(", ")", "self", ".", "table", ".", "drop", "(", "self", ".", "db", ".", "executable", ",", "che...
Drop the table from the database. Deletes both the schema and all the contents within it.
[ "Drop", "the", "table", "from", "the", "database", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L390-L399
230,320
pudo/dataset
dataset/table.py
Table.has_index
def has_index(self, columns): """Check if an index exists to cover the given ``columns``.""" if not self.exists: return False columns = set([normalize_column_name(c) for c in columns]) if columns in self._indexes: return True for column in columns: if not self.has_column(column): return False indexes = self.db.inspect.get_indexes(self.name, schema=self.db.schema) for index in indexes: if columns == set(index.get('column_names', [])): self._indexes.append(columns) return True return False
python
def has_index(self, columns): if not self.exists: return False columns = set([normalize_column_name(c) for c in columns]) if columns in self._indexes: return True for column in columns: if not self.has_column(column): return False indexes = self.db.inspect.get_indexes(self.name, schema=self.db.schema) for index in indexes: if columns == set(index.get('column_names', [])): self._indexes.append(columns) return True return False
[ "def", "has_index", "(", "self", ",", "columns", ")", ":", "if", "not", "self", ".", "exists", ":", "return", "False", "columns", "=", "set", "(", "[", "normalize_column_name", "(", "c", ")", "for", "c", "in", "columns", "]", ")", "if", "columns", "i...
Check if an index exists to cover the given ``columns``.
[ "Check", "if", "an", "index", "exists", "to", "cover", "the", "given", "columns", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L401-L416
230,321
pudo/dataset
dataset/table.py
Table.create_index
def create_index(self, columns, name=None, **kw): """Create an index to speed up queries on a table. If no ``name`` is given a random name is created. :: table.create_index(['name', 'country']) """ columns = [normalize_column_name(c) for c in ensure_tuple(columns)] with self.db.lock: if not self.exists: raise DatasetException("Table has not been created yet.") for column in columns: if not self.has_column(column): return if not self.has_index(columns): self._threading_warn() name = name or index_name(self.name, columns) columns = [self.table.c[c] for c in columns] idx = Index(name, *columns, **kw) idx.create(self.db.executable)
python
def create_index(self, columns, name=None, **kw): columns = [normalize_column_name(c) for c in ensure_tuple(columns)] with self.db.lock: if not self.exists: raise DatasetException("Table has not been created yet.") for column in columns: if not self.has_column(column): return if not self.has_index(columns): self._threading_warn() name = name or index_name(self.name, columns) columns = [self.table.c[c] for c in columns] idx = Index(name, *columns, **kw) idx.create(self.db.executable)
[ "def", "create_index", "(", "self", ",", "columns", ",", "name", "=", "None", ",", "*", "*", "kw", ")", ":", "columns", "=", "[", "normalize_column_name", "(", "c", ")", "for", "c", "in", "ensure_tuple", "(", "columns", ")", "]", "with", "self", ".",...
Create an index to speed up queries on a table. If no ``name`` is given a random name is created. :: table.create_index(['name', 'country'])
[ "Create", "an", "index", "to", "speed", "up", "queries", "on", "a", "table", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L418-L440
230,322
pudo/dataset
dataset/table.py
Table.find
def find(self, *_clauses, **kwargs): """Perform a simple search on the table. Simply pass keyword arguments as ``filter``. :: results = table.find(country='France') results = table.find(country='France', year=1980) Using ``_limit``:: # just return the first 10 rows results = table.find(country='France', _limit=10) You can sort the results by single or multiple columns. Append a minus sign to the column name for descending order:: # sort results by a column 'year' results = table.find(country='France', order_by='year') # return all rows sorted by multiple columns (descending by year) results = table.find(order_by=['country', '-year']) To perform complex queries with advanced filters or to perform aggregation, use :py:meth:`db.query() <dataset.Database.query>` instead. """ if not self.exists: return iter([]) _limit = kwargs.pop('_limit', None) _offset = kwargs.pop('_offset', 0) order_by = kwargs.pop('order_by', None) _streamed = kwargs.pop('_streamed', False) _step = kwargs.pop('_step', QUERY_STEP) if _step is False or _step == 0: _step = None order_by = self._args_to_order_by(order_by) args = self._args_to_clause(kwargs, clauses=_clauses) query = self.table.select(whereclause=args, limit=_limit, offset=_offset) if len(order_by): query = query.order_by(*order_by) conn = self.db.executable if _streamed: conn = self.db.engine.connect() conn = conn.execution_options(stream_results=True) return ResultIter(conn.execute(query), row_type=self.db.row_type, step=_step)
python
def find(self, *_clauses, **kwargs): if not self.exists: return iter([]) _limit = kwargs.pop('_limit', None) _offset = kwargs.pop('_offset', 0) order_by = kwargs.pop('order_by', None) _streamed = kwargs.pop('_streamed', False) _step = kwargs.pop('_step', QUERY_STEP) if _step is False or _step == 0: _step = None order_by = self._args_to_order_by(order_by) args = self._args_to_clause(kwargs, clauses=_clauses) query = self.table.select(whereclause=args, limit=_limit, offset=_offset) if len(order_by): query = query.order_by(*order_by) conn = self.db.executable if _streamed: conn = self.db.engine.connect() conn = conn.execution_options(stream_results=True) return ResultIter(conn.execute(query), row_type=self.db.row_type, step=_step)
[ "def", "find", "(", "self", ",", "*", "_clauses", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "exists", ":", "return", "iter", "(", "[", "]", ")", "_limit", "=", "kwargs", ".", "pop", "(", "'_limit'", ",", "None", ")", "_offset",...
Perform a simple search on the table. Simply pass keyword arguments as ``filter``. :: results = table.find(country='France') results = table.find(country='France', year=1980) Using ``_limit``:: # just return the first 10 rows results = table.find(country='France', _limit=10) You can sort the results by single or multiple columns. Append a minus sign to the column name for descending order:: # sort results by a column 'year' results = table.find(country='France', order_by='year') # return all rows sorted by multiple columns (descending by year) results = table.find(order_by=['country', '-year']) To perform complex queries with advanced filters or to perform aggregation, use :py:meth:`db.query() <dataset.Database.query>` instead.
[ "Perform", "a", "simple", "search", "on", "the", "table", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L442-L494
230,323
pudo/dataset
dataset/table.py
Table.find_one
def find_one(self, *args, **kwargs): """Get a single result from the table. Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or ``None``. :: row = table.find_one(country='United States') """ if not self.exists: return None kwargs['_limit'] = 1 kwargs['_step'] = None resiter = self.find(*args, **kwargs) try: for row in resiter: return row finally: resiter.close()
python
def find_one(self, *args, **kwargs): if not self.exists: return None kwargs['_limit'] = 1 kwargs['_step'] = None resiter = self.find(*args, **kwargs) try: for row in resiter: return row finally: resiter.close()
[ "def", "find_one", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "exists", ":", "return", "None", "kwargs", "[", "'_limit'", "]", "=", "1", "kwargs", "[", "'_step'", "]", "=", "None", "resiter", "=", ...
Get a single result from the table. Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or ``None``. :: row = table.find_one(country='United States')
[ "Get", "a", "single", "result", "from", "the", "table", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L496-L515
230,324
pudo/dataset
dataset/table.py
Table.count
def count(self, *_clauses, **kwargs): """Return the count of results for the given filter set.""" # NOTE: this does not have support for limit and offset since I can't # see how this is useful. Still, there might be compatibility issues # with people using these flags. Let's see how it goes. if not self.exists: return 0 args = self._args_to_clause(kwargs, clauses=_clauses) query = select([func.count()], whereclause=args) query = query.select_from(self.table) rp = self.db.executable.execute(query) return rp.fetchone()[0]
python
def count(self, *_clauses, **kwargs): # NOTE: this does not have support for limit and offset since I can't # see how this is useful. Still, there might be compatibility issues # with people using these flags. Let's see how it goes. if not self.exists: return 0 args = self._args_to_clause(kwargs, clauses=_clauses) query = select([func.count()], whereclause=args) query = query.select_from(self.table) rp = self.db.executable.execute(query) return rp.fetchone()[0]
[ "def", "count", "(", "self", ",", "*", "_clauses", ",", "*", "*", "kwargs", ")", ":", "# NOTE: this does not have support for limit and offset since I can't", "# see how this is useful. Still, there might be compatibility issues", "# with people using these flags. Let's see how it goes...
Return the count of results for the given filter set.
[ "Return", "the", "count", "of", "results", "for", "the", "given", "filter", "set", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L517-L529
230,325
pudo/dataset
dataset/__init__.py
connect
def connect(url=None, schema=None, reflect_metadata=True, engine_kwargs=None, reflect_views=True, ensure_schema=True, row_type=row_type): """ Opens a new connection to a database. *url* can be any valid `SQLAlchemy engine URL`_. If *url* is not defined it will try to use *DATABASE_URL* from environment variable. Returns an instance of :py:class:`Database <dataset.Database>`. Set *reflect_metadata* to False if you don't want the entire database schema to be pre-loaded. This significantly speeds up connecting to large databases with lots of tables. *reflect_views* can be set to False if you don't want views to be loaded. Additionally, *engine_kwargs* will be directly passed to SQLAlchemy, e.g. set *engine_kwargs={'pool_recycle': 3600}* will avoid `DB connection timeout`_. Set *row_type* to an alternate dict-like class to change the type of container rows are stored in.:: db = dataset.connect('sqlite:///factbook.db') .. _SQLAlchemy Engine URL: http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine .. _DB connection timeout: http://docs.sqlalchemy.org/en/latest/core/pooling.html#setting-pool-recycle """ if url is None: url = os.environ.get('DATABASE_URL', 'sqlite://') return Database(url, schema=schema, reflect_metadata=reflect_metadata, engine_kwargs=engine_kwargs, reflect_views=reflect_views, ensure_schema=ensure_schema, row_type=row_type)
python
def connect(url=None, schema=None, reflect_metadata=True, engine_kwargs=None, reflect_views=True, ensure_schema=True, row_type=row_type): if url is None: url = os.environ.get('DATABASE_URL', 'sqlite://') return Database(url, schema=schema, reflect_metadata=reflect_metadata, engine_kwargs=engine_kwargs, reflect_views=reflect_views, ensure_schema=ensure_schema, row_type=row_type)
[ "def", "connect", "(", "url", "=", "None", ",", "schema", "=", "None", ",", "reflect_metadata", "=", "True", ",", "engine_kwargs", "=", "None", ",", "reflect_views", "=", "True", ",", "ensure_schema", "=", "True", ",", "row_type", "=", "row_type", ")", "...
Opens a new connection to a database. *url* can be any valid `SQLAlchemy engine URL`_. If *url* is not defined it will try to use *DATABASE_URL* from environment variable. Returns an instance of :py:class:`Database <dataset.Database>`. Set *reflect_metadata* to False if you don't want the entire database schema to be pre-loaded. This significantly speeds up connecting to large databases with lots of tables. *reflect_views* can be set to False if you don't want views to be loaded. Additionally, *engine_kwargs* will be directly passed to SQLAlchemy, e.g. set *engine_kwargs={'pool_recycle': 3600}* will avoid `DB connection timeout`_. Set *row_type* to an alternate dict-like class to change the type of container rows are stored in.:: db = dataset.connect('sqlite:///factbook.db') .. _SQLAlchemy Engine URL: http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine .. _DB connection timeout: http://docs.sqlalchemy.org/en/latest/core/pooling.html#setting-pool-recycle
[ "Opens", "a", "new", "connection", "to", "a", "database", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/__init__.py#L17-L42
230,326
pudo/dataset
dataset/database.py
Database.executable
def executable(self): """Connection against which statements will be executed.""" if not hasattr(self.local, 'conn'): self.local.conn = self.engine.connect() return self.local.conn
python
def executable(self): if not hasattr(self.local, 'conn'): self.local.conn = self.engine.connect() return self.local.conn
[ "def", "executable", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ".", "local", ",", "'conn'", ")", ":", "self", ".", "local", ".", "conn", "=", "self", ".", "engine", ".", "connect", "(", ")", "return", "self", ".", "local", ".", ...
Connection against which statements will be executed.
[ "Connection", "against", "which", "statements", "will", "be", "executed", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/database.py#L60-L64
230,327
pudo/dataset
dataset/database.py
Database.in_transaction
def in_transaction(self): """Check if this database is in a transactional context.""" if not hasattr(self.local, 'tx'): return False return len(self.local.tx) > 0
python
def in_transaction(self): if not hasattr(self.local, 'tx'): return False return len(self.local.tx) > 0
[ "def", "in_transaction", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ".", "local", ",", "'tx'", ")", ":", "return", "False", "return", "len", "(", "self", ".", "local", ".", "tx", ")", ">", "0" ]
Check if this database is in a transactional context.
[ "Check", "if", "this", "database", "is", "in", "a", "transactional", "context", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/database.py#L83-L87
230,328
pudo/dataset
dataset/database.py
Database.begin
def begin(self): """Enter a transaction explicitly. No data will be written until the transaction has been committed. """ if not hasattr(self.local, 'tx'): self.local.tx = [] self.local.tx.append(self.executable.begin())
python
def begin(self): if not hasattr(self.local, 'tx'): self.local.tx = [] self.local.tx.append(self.executable.begin())
[ "def", "begin", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ".", "local", ",", "'tx'", ")", ":", "self", ".", "local", ".", "tx", "=", "[", "]", "self", ".", "local", ".", "tx", ".", "append", "(", "self", ".", "executable", "."...
Enter a transaction explicitly. No data will be written until the transaction has been committed.
[ "Enter", "a", "transaction", "explicitly", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/database.py#L94-L101
230,329
pudo/dataset
dataset/database.py
Database.rollback
def rollback(self): """Roll back the current transaction. Discard all statements executed since the transaction was begun. """ if hasattr(self.local, 'tx') and self.local.tx: tx = self.local.tx.pop() tx.rollback() self._flush_tables()
python
def rollback(self): if hasattr(self.local, 'tx') and self.local.tx: tx = self.local.tx.pop() tx.rollback() self._flush_tables()
[ "def", "rollback", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "local", ",", "'tx'", ")", "and", "self", ".", "local", ".", "tx", ":", "tx", "=", "self", ".", "local", ".", "tx", ".", "pop", "(", ")", "tx", ".", "rollback", "(", ...
Roll back the current transaction. Discard all statements executed since the transaction was begun.
[ "Roll", "back", "the", "current", "transaction", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/database.py#L113-L121
230,330
pudo/dataset
dataset/database.py
Database.load_table
def load_table(self, table_name): """Load a table. This will fail if the tables does not already exist in the database. If the table exists, its columns will be reflected and are available on the :py:class:`Table <dataset.Table>` object. Returns a :py:class:`Table <dataset.Table>` instance. :: table = db.load_table('population') """ table_name = normalize_table_name(table_name) with self.lock: if table_name not in self._tables: self._tables[table_name] = Table(self, table_name) return self._tables.get(table_name)
python
def load_table(self, table_name): table_name = normalize_table_name(table_name) with self.lock: if table_name not in self._tables: self._tables[table_name] = Table(self, table_name) return self._tables.get(table_name)
[ "def", "load_table", "(", "self", ",", "table_name", ")", ":", "table_name", "=", "normalize_table_name", "(", "table_name", ")", "with", "self", ".", "lock", ":", "if", "table_name", "not", "in", "self", ".", "_tables", ":", "self", ".", "_tables", "[", ...
Load a table. This will fail if the tables does not already exist in the database. If the table exists, its columns will be reflected and are available on the :py:class:`Table <dataset.Table>` object. Returns a :py:class:`Table <dataset.Table>` instance. :: table = db.load_table('population')
[ "Load", "a", "table", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/database.py#L190-L206
230,331
pudo/dataset
dataset/database.py
Database.get_table
def get_table(self, table_name, primary_id=None, primary_type=None): """Load or create a table. This is now the same as ``create_table``. :: table = db.get_table('population') # you can also use the short-hand syntax: table = db['population'] """ return self.create_table(table_name, primary_id, primary_type)
python
def get_table(self, table_name, primary_id=None, primary_type=None): return self.create_table(table_name, primary_id, primary_type)
[ "def", "get_table", "(", "self", ",", "table_name", ",", "primary_id", "=", "None", ",", "primary_type", "=", "None", ")", ":", "return", "self", ".", "create_table", "(", "table_name", ",", "primary_id", ",", "primary_type", ")" ]
Load or create a table. This is now the same as ``create_table``. :: table = db.get_table('population') # you can also use the short-hand syntax: table = db['population']
[ "Load", "or", "create", "a", "table", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/database.py#L208-L218
230,332
pudo/dataset
dataset/database.py
Database.query
def query(self, query, *args, **kwargs): """Run a statement on the database directly. Allows for the execution of arbitrary read/write queries. A query can either be a plain text string, or a `SQLAlchemy expression <http://docs.sqlalchemy.org/en/latest/core/tutorial.html#selecting>`_. If a plain string is passed in, it will be converted to an expression automatically. Further positional and keyword arguments will be used for parameter binding. To include a positional argument in your query, use question marks in the query (i.e. ``SELECT * FROM tbl WHERE a = ?```). For keyword arguments, use a bind parameter (i.e. ``SELECT * FROM tbl WHERE a = :foo``). :: statement = 'SELECT user, COUNT(*) c FROM photos GROUP BY user' for row in db.query(statement): print(row['user'], row['c']) The returned iterator will yield each result sequentially. """ if isinstance(query, six.string_types): query = text(query) _step = kwargs.pop('_step', QUERY_STEP) rp = self.executable.execute(query, *args, **kwargs) return ResultIter(rp, row_type=self.row_type, step=_step)
python
def query(self, query, *args, **kwargs): if isinstance(query, six.string_types): query = text(query) _step = kwargs.pop('_step', QUERY_STEP) rp = self.executable.execute(query, *args, **kwargs) return ResultIter(rp, row_type=self.row_type, step=_step)
[ "def", "query", "(", "self", ",", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "query", ",", "six", ".", "string_types", ")", ":", "query", "=", "text", "(", "query", ")", "_step", "=", "kwargs", ".", "po...
Run a statement on the database directly. Allows for the execution of arbitrary read/write queries. A query can either be a plain text string, or a `SQLAlchemy expression <http://docs.sqlalchemy.org/en/latest/core/tutorial.html#selecting>`_. If a plain string is passed in, it will be converted to an expression automatically. Further positional and keyword arguments will be used for parameter binding. To include a positional argument in your query, use question marks in the query (i.e. ``SELECT * FROM tbl WHERE a = ?```). For keyword arguments, use a bind parameter (i.e. ``SELECT * FROM tbl WHERE a = :foo``). :: statement = 'SELECT user, COUNT(*) c FROM photos GROUP BY user' for row in db.query(statement): print(row['user'], row['c']) The returned iterator will yield each result sequentially.
[ "Run", "a", "statement", "on", "the", "database", "directly", "." ]
a008d120c7f3c48ccba98a282c0c67d6e719c0e5
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/database.py#L228-L254
230,333
glamp/bashplotlib
bashplotlib/utils/helpers.py
printcolour
def printcolour(text, sameline=False, colour=get_colour("ENDC")): """ Print color text using escape codes """ if sameline: sep = '' else: sep = '\n' sys.stdout.write(get_colour(colour) + text + bcolours["ENDC"] + sep)
python
def printcolour(text, sameline=False, colour=get_colour("ENDC")): if sameline: sep = '' else: sep = '\n' sys.stdout.write(get_colour(colour) + text + bcolours["ENDC"] + sep)
[ "def", "printcolour", "(", "text", ",", "sameline", "=", "False", ",", "colour", "=", "get_colour", "(", "\"ENDC\"", ")", ")", ":", "if", "sameline", ":", "sep", "=", "''", "else", ":", "sep", "=", "'\\n'", "sys", ".", "stdout", ".", "write", "(", ...
Print color text using escape codes
[ "Print", "color", "text", "using", "escape", "codes" ]
f7533172c4dc912b5accae42edd5c0f655d7468f
https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/utils/helpers.py#L36-L44
230,334
glamp/bashplotlib
bashplotlib/utils/helpers.py
abbreviate
def abbreviate(labels, rfill=' '): """ Abbreviate labels without introducing ambiguities. """ max_len = max(len(l) for l in labels) for i in range(1, max_len): abbrev = [l[:i].ljust(i, rfill) for l in labels] if len(abbrev) == len(set(abbrev)): break return abbrev
python
def abbreviate(labels, rfill=' '): max_len = max(len(l) for l in labels) for i in range(1, max_len): abbrev = [l[:i].ljust(i, rfill) for l in labels] if len(abbrev) == len(set(abbrev)): break return abbrev
[ "def", "abbreviate", "(", "labels", ",", "rfill", "=", "' '", ")", ":", "max_len", "=", "max", "(", "len", "(", "l", ")", "for", "l", "in", "labels", ")", "for", "i", "in", "range", "(", "1", ",", "max_len", ")", ":", "abbrev", "=", "[", "l", ...
Abbreviate labels without introducing ambiguities.
[ "Abbreviate", "labels", "without", "introducing", "ambiguities", "." ]
f7533172c4dc912b5accae42edd5c0f655d7468f
https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/utils/helpers.py#L67-L76
230,335
glamp/bashplotlib
bashplotlib/utils/helpers.py
box_text
def box_text(text, width, offset=0): """ Return text inside an ascii textbox """ box = " " * offset + "-" * (width+2) + "\n" box += " " * offset + "|" + text.center(width) + "|" + "\n" box += " " * offset + "-" * (width+2) return box
python
def box_text(text, width, offset=0): box = " " * offset + "-" * (width+2) + "\n" box += " " * offset + "|" + text.center(width) + "|" + "\n" box += " " * offset + "-" * (width+2) return box
[ "def", "box_text", "(", "text", ",", "width", ",", "offset", "=", "0", ")", ":", "box", "=", "\" \"", "*", "offset", "+", "\"-\"", "*", "(", "width", "+", "2", ")", "+", "\"\\n\"", "box", "+=", "\" \"", "*", "offset", "+", "\"|\"", "+", "text", ...
Return text inside an ascii textbox
[ "Return", "text", "inside", "an", "ascii", "textbox" ]
f7533172c4dc912b5accae42edd5c0f655d7468f
https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/utils/helpers.py#L79-L86
230,336
glamp/bashplotlib
bashplotlib/histogram.py
calc_bins
def calc_bins(n, min_val, max_val, h=None, binwidth=None): """ Calculate number of bins for the histogram """ if not h: h = max(10, math.log(n + 1, 2)) if binwidth == 0: binwidth = 0.1 if binwidth is None: binwidth = (max_val - min_val) / h for b in drange(min_val, max_val, step=binwidth, include_stop=True): if b.is_integer(): yield int(b) else: yield b
python
def calc_bins(n, min_val, max_val, h=None, binwidth=None): if not h: h = max(10, math.log(n + 1, 2)) if binwidth == 0: binwidth = 0.1 if binwidth is None: binwidth = (max_val - min_val) / h for b in drange(min_val, max_val, step=binwidth, include_stop=True): if b.is_integer(): yield int(b) else: yield b
[ "def", "calc_bins", "(", "n", ",", "min_val", ",", "max_val", ",", "h", "=", "None", ",", "binwidth", "=", "None", ")", ":", "if", "not", "h", ":", "h", "=", "max", "(", "10", ",", "math", ".", "log", "(", "n", "+", "1", ",", "2", ")", ")",...
Calculate number of bins for the histogram
[ "Calculate", "number", "of", "bins", "for", "the", "histogram" ]
f7533172c4dc912b5accae42edd5c0f655d7468f
https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/histogram.py#L20-L34
230,337
glamp/bashplotlib
bashplotlib/histogram.py
read_numbers
def read_numbers(numbers): """ Read the input data in the most optimal way """ if isiterable(numbers): for number in numbers: yield float(str(number).strip()) else: with open(numbers) as fh: for number in fh: yield float(number.strip())
python
def read_numbers(numbers): if isiterable(numbers): for number in numbers: yield float(str(number).strip()) else: with open(numbers) as fh: for number in fh: yield float(number.strip())
[ "def", "read_numbers", "(", "numbers", ")", ":", "if", "isiterable", "(", "numbers", ")", ":", "for", "number", "in", "numbers", ":", "yield", "float", "(", "str", "(", "number", ")", ".", "strip", "(", ")", ")", "else", ":", "with", "open", "(", "...
Read the input data in the most optimal way
[ "Read", "the", "input", "data", "in", "the", "most", "optimal", "way" ]
f7533172c4dc912b5accae42edd5c0f655d7468f
https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/histogram.py#L37-L47
230,338
glamp/bashplotlib
bashplotlib/histogram.py
run_demo
def run_demo(): """ Run a demonstration """ module_dir = dirname(dirname(os.path.realpath(__file__))) demo_file = os.path.join(module_dir, 'examples/data/exp.txt') if not os.path.isfile(demo_file): sys.stderr.write("demo input file not found!\n") sys.stderr.write("run the downloaddata.sh script in the example first\n") sys.exit(1) # plotting a histogram print("plotting a basic histogram") print("plot_hist('%s')" % demo_file) print("hist -f %s" % demo_file) print("cat %s | hist" % demo_file) plot_hist(demo_file) print("*" * 80) # with colours print("histogram with colours") print("plot_hist('%s', colour='blue')" % demo_file) print("hist -f %s -c blue" % demo_file) plot_hist(demo_file, colour='blue') print("*" * 80) # changing the shape of the point print("changing the shape of the bars") print("plot_hist('%s', pch='.')" % demo_file) print("hist -f %s -p ." % demo_file) plot_hist(demo_file, pch='.') print("*" * 80) # changing the size of the plot print("changing the size of the plot") print("plot_hist('%s', height=35.0, bincount=40)" % demo_file) print("hist -f %s -s 35.0 -b 40" % demo_file) plot_hist(demo_file, height=35.0, bincount=40)
python
def run_demo(): module_dir = dirname(dirname(os.path.realpath(__file__))) demo_file = os.path.join(module_dir, 'examples/data/exp.txt') if not os.path.isfile(demo_file): sys.stderr.write("demo input file not found!\n") sys.stderr.write("run the downloaddata.sh script in the example first\n") sys.exit(1) # plotting a histogram print("plotting a basic histogram") print("plot_hist('%s')" % demo_file) print("hist -f %s" % demo_file) print("cat %s | hist" % demo_file) plot_hist(demo_file) print("*" * 80) # with colours print("histogram with colours") print("plot_hist('%s', colour='blue')" % demo_file) print("hist -f %s -c blue" % demo_file) plot_hist(demo_file, colour='blue') print("*" * 80) # changing the shape of the point print("changing the shape of the bars") print("plot_hist('%s', pch='.')" % demo_file) print("hist -f %s -p ." % demo_file) plot_hist(demo_file, pch='.') print("*" * 80) # changing the size of the plot print("changing the size of the plot") print("plot_hist('%s', height=35.0, bincount=40)" % demo_file) print("hist -f %s -s 35.0 -b 40" % demo_file) plot_hist(demo_file, height=35.0, bincount=40)
[ "def", "run_demo", "(", ")", ":", "module_dir", "=", "dirname", "(", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ")", "demo_file", "=", "os", ".", "path", ".", "join", "(", "module_dir", ",", "'examples/data/exp.txt'", ...
Run a demonstration
[ "Run", "a", "demonstration" ]
f7533172c4dc912b5accae42edd5c0f655d7468f
https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/histogram.py#L50-L88
230,339
glamp/bashplotlib
bashplotlib/scatterplot.py
plot_scatter
def plot_scatter(f, xs, ys, size, pch, colour, title): """ Form a complex number. Arguments: f -- comma delimited file w/ x,y coordinates xs -- if f not specified this is a file w/ x coordinates ys -- if f not specified this is a filew / y coordinates size -- size of the plot pch -- shape of the points (any character) colour -- colour of the points title -- title of the plot """ cs = None if f: if isinstance(f, str): with open(f) as fh: data = [tuple(line.strip().split(',')) for line in fh] else: data = [tuple(line.strip().split(',')) for line in f] xs = [float(i[0]) for i in data] ys = [float(i[1]) for i in data] if len(data[0]) > 2: cs = [i[2].strip() for i in data] elif isinstance(xs, list) and isinstance(ys, list): pass else: with open(xs) as fh: xs = [float(str(row).strip()) for row in fh] with open(ys) as fh: ys = [float(str(row).strip()) for row in fh] _plot_scatter(xs, ys, size, pch, colour, title, cs)
python
def plot_scatter(f, xs, ys, size, pch, colour, title): cs = None if f: if isinstance(f, str): with open(f) as fh: data = [tuple(line.strip().split(',')) for line in fh] else: data = [tuple(line.strip().split(',')) for line in f] xs = [float(i[0]) for i in data] ys = [float(i[1]) for i in data] if len(data[0]) > 2: cs = [i[2].strip() for i in data] elif isinstance(xs, list) and isinstance(ys, list): pass else: with open(xs) as fh: xs = [float(str(row).strip()) for row in fh] with open(ys) as fh: ys = [float(str(row).strip()) for row in fh] _plot_scatter(xs, ys, size, pch, colour, title, cs)
[ "def", "plot_scatter", "(", "f", ",", "xs", ",", "ys", ",", "size", ",", "pch", ",", "colour", ",", "title", ")", ":", "cs", "=", "None", "if", "f", ":", "if", "isinstance", "(", "f", ",", "str", ")", ":", "with", "open", "(", "f", ")", "as",...
Form a complex number. Arguments: f -- comma delimited file w/ x,y coordinates xs -- if f not specified this is a file w/ x coordinates ys -- if f not specified this is a filew / y coordinates size -- size of the plot pch -- shape of the points (any character) colour -- colour of the points title -- title of the plot
[ "Form", "a", "complex", "number", "." ]
f7533172c4dc912b5accae42edd5c0f655d7468f
https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/scatterplot.py#L52-L84
230,340
cltk/cltk
cltk/phonology/middle_high_german/transcription.py
Word.syllabify
def syllabify(self): """ Syllabifier module for Middle High German The algorithm works by applying the MOP(Maximal Onset Principle) on open syllables. For closed syllables, the legal partitions are checked and applied. The word is always returned in lowercase. Examples: >>> Word('entslâfen').syllabify() ['ent', 'slâ', 'fen'] >>> Word('fröude').syllabify() ['fröu', 'de'] >>> Word('füerest').syllabify() ['füe', 'rest'] """ # Array holding the index of each given syllable ind = [] i = 0 # Iterate through letters of word searching for the nuclei while i < len(self.word) - 1: if self.word[i] in SHORT_VOWELS + LONG_VOWELS: nucleus = '' # Find cluster of vowels while self.word[i] in SHORT_VOWELS + LONG_VOWELS and i < len(self.word) - 1: nucleus += self.word[i] i += 1 try: # Check whether it is suceeded by a geminant if self.word[i] == self.word[i + 1]: ind.append(i) i += 2 continue except IndexError: pass if nucleus in SHORT_VOWELS: ind.append(i + 2 if self.word[i:i+3] in TRIPHTHONGS else i + 1 if self.word[i:i + 2] in DIPHTHONGS else i) continue else: ind.append(i - 1) continue i += 1 self.syllabified = self.word for n, k in enumerate(ind): self.syllabified = self.syllabified[:k + n + 1] + "." + self.syllabified[k + n + 1:] # Check whether the last syllable lacks a vowel nucleus self.syllabified = self.syllabified.split(".") if sum(map(lambda x: x in SHORT_VOWELS, self.syllabified[-1])) == 0: self.syllabified[-2] += self.syllabified[-1] self.syllabified = self.syllabified[:-1] return self.syllabified
python
def syllabify(self): # Array holding the index of each given syllable ind = [] i = 0 # Iterate through letters of word searching for the nuclei while i < len(self.word) - 1: if self.word[i] in SHORT_VOWELS + LONG_VOWELS: nucleus = '' # Find cluster of vowels while self.word[i] in SHORT_VOWELS + LONG_VOWELS and i < len(self.word) - 1: nucleus += self.word[i] i += 1 try: # Check whether it is suceeded by a geminant if self.word[i] == self.word[i + 1]: ind.append(i) i += 2 continue except IndexError: pass if nucleus in SHORT_VOWELS: ind.append(i + 2 if self.word[i:i+3] in TRIPHTHONGS else i + 1 if self.word[i:i + 2] in DIPHTHONGS else i) continue else: ind.append(i - 1) continue i += 1 self.syllabified = self.word for n, k in enumerate(ind): self.syllabified = self.syllabified[:k + n + 1] + "." + self.syllabified[k + n + 1:] # Check whether the last syllable lacks a vowel nucleus self.syllabified = self.syllabified.split(".") if sum(map(lambda x: x in SHORT_VOWELS, self.syllabified[-1])) == 0: self.syllabified[-2] += self.syllabified[-1] self.syllabified = self.syllabified[:-1] return self.syllabified
[ "def", "syllabify", "(", "self", ")", ":", "# Array holding the index of each given syllable", "ind", "=", "[", "]", "i", "=", "0", "# Iterate through letters of word searching for the nuclei", "while", "i", "<", "len", "(", "self", ".", "word", ")", "-", "1", ":"...
Syllabifier module for Middle High German The algorithm works by applying the MOP(Maximal Onset Principle) on open syllables. For closed syllables, the legal partitions are checked and applied. The word is always returned in lowercase. Examples: >>> Word('entslâfen').syllabify() ['ent', 'slâ', 'fen'] >>> Word('fröude').syllabify() ['fröu', 'de'] >>> Word('füerest').syllabify() ['füe', 'rest']
[ "Syllabifier", "module", "for", "Middle", "High", "German" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/middle_high_german/transcription.py#L151-L221
230,341
cltk/cltk
cltk/phonology/middle_high_german/transcription.py
Word.ASCII_encoding
def ASCII_encoding(self): """Returns the ASCII encoding of a string""" w = unicodedata.normalize('NFKD', self.word).encode('ASCII', 'ignore') # Encode into ASCII, returns a bytestring w = w.decode('utf-8') # Convert back to string return w
python
def ASCII_encoding(self): w = unicodedata.normalize('NFKD', self.word).encode('ASCII', 'ignore') # Encode into ASCII, returns a bytestring w = w.decode('utf-8') # Convert back to string return w
[ "def", "ASCII_encoding", "(", "self", ")", ":", "w", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "self", ".", "word", ")", ".", "encode", "(", "'ASCII'", ",", "'ignore'", ")", "# Encode into ASCII, returns a bytestring", "w", "=", "w", ".", "...
Returns the ASCII encoding of a string
[ "Returns", "the", "ASCII", "encoding", "of", "a", "string" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/middle_high_german/transcription.py#L272-L279
230,342
cltk/cltk
cltk/stem/akkadian/atf_converter.py
ATFConverter._convert_consonant
def _convert_consonant(sign): """ Uses dictionary to replace ATF convention for unicode characters. input = ['as,', 'S,ATU', 'tet,', 'T,et', 'sza', 'ASZ'] output = ['aṣ', 'ṢATU', 'teṭ', 'Ṭet', 'ša', 'AŠ'] :param sign: string :return: string """ for key in TITTLES: sign = sign.replace(key, TITTLES[key]) return sign
python
def _convert_consonant(sign): for key in TITTLES: sign = sign.replace(key, TITTLES[key]) return sign
[ "def", "_convert_consonant", "(", "sign", ")", ":", "for", "key", "in", "TITTLES", ":", "sign", "=", "sign", ".", "replace", "(", "key", ",", "TITTLES", "[", "key", "]", ")", "return", "sign" ]
Uses dictionary to replace ATF convention for unicode characters. input = ['as,', 'S,ATU', 'tet,', 'T,et', 'sza', 'ASZ'] output = ['aṣ', 'ṢATU', 'teṭ', 'Ṭet', 'ša', 'AŠ'] :param sign: string :return: string
[ "Uses", "dictionary", "to", "replace", "ATF", "convention", "for", "unicode", "characters", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/akkadian/atf_converter.py#L51-L63
230,343
cltk/cltk
cltk/stem/akkadian/atf_converter.py
ATFConverter._convert_number_to_subscript
def _convert_number_to_subscript(num): """ Converts number into subscript input = ["a", "a1", "a2", "a3", "be2", "be3", "bad2", "bad3"] output = ["a", "a₁", "a₂", "a₃", "be₂", "be₃", "bad₂", "bad₃"] :param num: number called after sign :return: number in subscript """ subscript = '' for character in str(num): subscript += chr(0x2080 + int(character)) return subscript
python
def _convert_number_to_subscript(num): subscript = '' for character in str(num): subscript += chr(0x2080 + int(character)) return subscript
[ "def", "_convert_number_to_subscript", "(", "num", ")", ":", "subscript", "=", "''", "for", "character", "in", "str", "(", "num", ")", ":", "subscript", "+=", "chr", "(", "0x2080", "+", "int", "(", "character", ")", ")", "return", "subscript" ]
Converts number into subscript input = ["a", "a1", "a2", "a3", "be2", "be3", "bad2", "bad3"] output = ["a", "a₁", "a₂", "a₃", "be₂", "be₃", "bad₂", "bad₃"] :param num: number called after sign :return: number in subscript
[ "Converts", "number", "into", "subscript" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/akkadian/atf_converter.py#L66-L79
230,344
cltk/cltk
cltk/stem/akkadian/atf_converter.py
ATFConverter._convert_num
def _convert_num(self, sign): """ Converts number registered in get_number_from_sign. input = ["a2", "☉", "be3"] output = ["a₂", "☉", "be₃"] :param sign: string :return sign: string """ # Check if there's a number at the end new_sign, num = self._get_number_from_sign(sign) if num < 2: # "ab" -> "ab" return new_sign.replace(str(num), self._convert_number_to_subscript(num)) if num > 3: # "buru14" -> "buru₁₄" return new_sign.replace(str(num), self._convert_number_to_subscript(num)) if self.two_three: # pylint: disable=no-else-return return new_sign.replace(str(num), self._convert_number_to_subscript(num)) else: # "bad3" -> "bàd" for i, character in enumerate(new_sign): new_vowel = '' if character in VOWELS: if num == 2: # noinspection PyUnusedLocal new_vowel = character + chr(0x0301) elif num == 3: new_vowel = character + chr(0x0300) break return new_sign[:i] + normalize('NFC', new_vowel) + \ new_sign[i+1:].replace(str(num), '')
python
def _convert_num(self, sign): # Check if there's a number at the end new_sign, num = self._get_number_from_sign(sign) if num < 2: # "ab" -> "ab" return new_sign.replace(str(num), self._convert_number_to_subscript(num)) if num > 3: # "buru14" -> "buru₁₄" return new_sign.replace(str(num), self._convert_number_to_subscript(num)) if self.two_three: # pylint: disable=no-else-return return new_sign.replace(str(num), self._convert_number_to_subscript(num)) else: # "bad3" -> "bàd" for i, character in enumerate(new_sign): new_vowel = '' if character in VOWELS: if num == 2: # noinspection PyUnusedLocal new_vowel = character + chr(0x0301) elif num == 3: new_vowel = character + chr(0x0300) break return new_sign[:i] + normalize('NFC', new_vowel) + \ new_sign[i+1:].replace(str(num), '')
[ "def", "_convert_num", "(", "self", ",", "sign", ")", ":", "# Check if there's a number at the end", "new_sign", ",", "num", "=", "self", ".", "_get_number_from_sign", "(", "sign", ")", "if", "num", "<", "2", ":", "# \"ab\" -> \"ab\"", "return", "new_sign", ".",...
Converts number registered in get_number_from_sign. input = ["a2", "☉", "be3"] output = ["a₂", "☉", "be₃"] :param sign: string :return sign: string
[ "Converts", "number", "registered", "in", "get_number_from_sign", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/akkadian/atf_converter.py#L100-L133
230,345
cltk/cltk
cltk/stem/akkadian/atf_converter.py
ATFConverter.process
def process(self, text_string): """ Expects a list of tokens, will return the list converted from ATF format to print-format. input = ["a", "a2", "a3", "geme2", "bad3", "buru14"] output = ["a", "á", "à", "géme", "bàd", "buru₁₄"] :param text_string: string :return: text_string """ output = [self._convert_num(self._convert_consonant(token)) for token in text_string] return output
python
def process(self, text_string): output = [self._convert_num(self._convert_consonant(token)) for token in text_string] return output
[ "def", "process", "(", "self", ",", "text_string", ")", ":", "output", "=", "[", "self", ".", "_convert_num", "(", "self", ".", "_convert_consonant", "(", "token", ")", ")", "for", "token", "in", "text_string", "]", "return", "output" ]
Expects a list of tokens, will return the list converted from ATF format to print-format. input = ["a", "a2", "a3", "geme2", "bad3", "buru14"] output = ["a", "á", "à", "géme", "bàd", "buru₁₄"] :param text_string: string :return: text_string
[ "Expects", "a", "list", "of", "tokens", "will", "return", "the", "list", "converted", "from", "ATF", "format", "to", "print", "-", "format", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/akkadian/atf_converter.py#L135-L148
230,346
cltk/cltk
cltk/text_reuse/levenshtein.py
Levenshtein.Levenshtein_Distance
def Levenshtein_Distance(w1, w2): """ Computes Levenshtein Distance between two words Args: :param w1: str :param w2: str :return: int Examples: >>> Levenshtein.Levenshtein_Distance('noctis', 'noctem') 2 >>> Levenshtein.Levenshtein_Distance('nox', 'nochem') 4 >>> Levenshtein.Levenshtein_Distance('orbis', 'robis') 2 """ m, n = len(w1), len(w2) v1 = [i for i in range(n + 1)] v2 = [0 for i in range(n + 1)] for i in range(m): v2[0] = i + 1 for j in range(n): delCost = v1[j + 1] + 1 insCost = v2[j] + 1 subCost = v1[j] if w1[i] != w2[j]: subCost += 1 v2[j + 1] = min(delCost, insCost, subCost) v1, v2 = v2, v1 return v1[-1]
python
def Levenshtein_Distance(w1, w2): m, n = len(w1), len(w2) v1 = [i for i in range(n + 1)] v2 = [0 for i in range(n + 1)] for i in range(m): v2[0] = i + 1 for j in range(n): delCost = v1[j + 1] + 1 insCost = v2[j] + 1 subCost = v1[j] if w1[i] != w2[j]: subCost += 1 v2[j + 1] = min(delCost, insCost, subCost) v1, v2 = v2, v1 return v1[-1]
[ "def", "Levenshtein_Distance", "(", "w1", ",", "w2", ")", ":", "m", ",", "n", "=", "len", "(", "w1", ")", ",", "len", "(", "w2", ")", "v1", "=", "[", "i", "for", "i", "in", "range", "(", "n", "+", "1", ")", "]", "v2", "=", "[", "0", "for"...
Computes Levenshtein Distance between two words Args: :param w1: str :param w2: str :return: int Examples: >>> Levenshtein.Levenshtein_Distance('noctis', 'noctem') 2 >>> Levenshtein.Levenshtein_Distance('nox', 'nochem') 4 >>> Levenshtein.Levenshtein_Distance('orbis', 'robis') 2
[ "Computes", "Levenshtein", "Distance", "between", "two", "words" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/text_reuse/levenshtein.py#L16-L53
230,347
cltk/cltk
cltk/text_reuse/levenshtein.py
Levenshtein.Damerau_Levenshtein_Distance
def Damerau_Levenshtein_Distance(w1, w2): """ Computes Damerau-Levenshtein Distance between two words Args: :param w1: str :param w2: str :return int: Examples: For the most part, Damerau-Levenshtein behaves identically to Levenshtein: >>> Levenshtein.Damerau_Levenshtein_Distance('noctis', 'noctem') 2 >>> Levenshtein.Levenshtein_Distance('nox', 'nochem') 4 The strength of DL lies in detecting transposition of characters: >>> Levenshtein.Damerau_Levenshtein_Distance('orbis', 'robis') 1 """ # Define alphabet alph = sorted(list(set(w1 + w2))) # Calculate alphabet size alph_s = len(alph) dam_ar = [0 for _ in range(alph_s)] mat = [[0 for _ in range(len(w2) + 2)] for _ in range(len(w1) + 2)] max_dist = len(w1) + len(w2) mat[0][0] = max_dist # Initialize matrix margin to the maximum possible distance (essentially inf) for ease of calculations (avoiding try blocks) for i in range(1, len(w1) + 2): mat[i][0] = max_dist mat[i][1] = i - 1 for i in range(1, len(w2) + 2): mat[0][i] = max_dist mat[1][i] = i - 1 for i in range(2, len(w1) + 2): tem = 0 for j in range(2, len(w2) + 2): k = dam_ar[alph.index(w2[j - 2])] l = tem if w1[i - 2] == w2[j - 2]: cost = 0 tem = j else: cost = 1 # The reccurence relation of DL is identical to that of Levenshtein with the addition of transposition mat[i][j] = min(mat[i - 1][j - 1] + cost, mat[i][j - 1] + 1, mat[i - 1][j] + 1, mat[k - 1][l - 1] + i + j - k - l - 1) dam_ar[alph.index(w1[i - 2])] = i return mat[-1][-1]
python
def Damerau_Levenshtein_Distance(w1, w2): # Define alphabet alph = sorted(list(set(w1 + w2))) # Calculate alphabet size alph_s = len(alph) dam_ar = [0 for _ in range(alph_s)] mat = [[0 for _ in range(len(w2) + 2)] for _ in range(len(w1) + 2)] max_dist = len(w1) + len(w2) mat[0][0] = max_dist # Initialize matrix margin to the maximum possible distance (essentially inf) for ease of calculations (avoiding try blocks) for i in range(1, len(w1) + 2): mat[i][0] = max_dist mat[i][1] = i - 1 for i in range(1, len(w2) + 2): mat[0][i] = max_dist mat[1][i] = i - 1 for i in range(2, len(w1) + 2): tem = 0 for j in range(2, len(w2) + 2): k = dam_ar[alph.index(w2[j - 2])] l = tem if w1[i - 2] == w2[j - 2]: cost = 0 tem = j else: cost = 1 # The reccurence relation of DL is identical to that of Levenshtein with the addition of transposition mat[i][j] = min(mat[i - 1][j - 1] + cost, mat[i][j - 1] + 1, mat[i - 1][j] + 1, mat[k - 1][l - 1] + i + j - k - l - 1) dam_ar[alph.index(w1[i - 2])] = i return mat[-1][-1]
[ "def", "Damerau_Levenshtein_Distance", "(", "w1", ",", "w2", ")", ":", "# Define alphabet", "alph", "=", "sorted", "(", "list", "(", "set", "(", "w1", "+", "w2", ")", ")", ")", "# Calculate alphabet size", "alph_s", "=", "len", "(", "alph", ")", "dam_ar", ...
Computes Damerau-Levenshtein Distance between two words Args: :param w1: str :param w2: str :return int: Examples: For the most part, Damerau-Levenshtein behaves identically to Levenshtein: >>> Levenshtein.Damerau_Levenshtein_Distance('noctis', 'noctem') 2 >>> Levenshtein.Levenshtein_Distance('nox', 'nochem') 4 The strength of DL lies in detecting transposition of characters: >>> Levenshtein.Damerau_Levenshtein_Distance('orbis', 'robis') 1
[ "Computes", "Damerau", "-", "Levenshtein", "Distance", "between", "two", "words" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/text_reuse/levenshtein.py#L56-L121
230,348
cltk/cltk
cltk/utils/frequency.py
Frequency.counter_from_str
def counter_from_str(self, string): """Build word frequency list from incoming string.""" string_list = [chars for chars in string if chars not in self.punctuation] string_joined = ''.join(string_list) tokens = self.punkt.word_tokenize(string_joined) return Counter(tokens)
python
def counter_from_str(self, string): string_list = [chars for chars in string if chars not in self.punctuation] string_joined = ''.join(string_list) tokens = self.punkt.word_tokenize(string_joined) return Counter(tokens)
[ "def", "counter_from_str", "(", "self", ",", "string", ")", ":", "string_list", "=", "[", "chars", "for", "chars", "in", "string", "if", "chars", "not", "in", "self", ".", "punctuation", "]", "string_joined", "=", "''", ".", "join", "(", "string_list", "...
Build word frequency list from incoming string.
[ "Build", "word", "frequency", "list", "from", "incoming", "string", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/utils/frequency.py#L26-L31
230,349
cltk/cltk
cltk/utils/frequency.py
Frequency._assemble_corpus_string
def _assemble_corpus_string(self, corpus): """Takes a list of filepaths, returns a string containing contents of all files.""" if corpus == 'phi5': filepaths = assemble_phi5_author_filepaths() file_cleaner = phi5_plaintext_cleanup elif corpus == 'tlg': filepaths = assemble_tlg_author_filepaths() file_cleaner = tlg_plaintext_cleanup for filepath in filepaths: with open(filepath) as file_open: file_read = file_open.read().lower() file_clean = file_cleaner(file_read) yield file_clean
python
def _assemble_corpus_string(self, corpus): if corpus == 'phi5': filepaths = assemble_phi5_author_filepaths() file_cleaner = phi5_plaintext_cleanup elif corpus == 'tlg': filepaths = assemble_tlg_author_filepaths() file_cleaner = tlg_plaintext_cleanup for filepath in filepaths: with open(filepath) as file_open: file_read = file_open.read().lower() file_clean = file_cleaner(file_read) yield file_clean
[ "def", "_assemble_corpus_string", "(", "self", ",", "corpus", ")", ":", "if", "corpus", "==", "'phi5'", ":", "filepaths", "=", "assemble_phi5_author_filepaths", "(", ")", "file_cleaner", "=", "phi5_plaintext_cleanup", "elif", "corpus", "==", "'tlg'", ":", "filepat...
Takes a list of filepaths, returns a string containing contents of all files.
[ "Takes", "a", "list", "of", "filepaths", "returns", "a", "string", "containing", "contents", "of", "all", "files", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/utils/frequency.py#L43-L58
230,350
cltk/cltk
cltk/prosody/latin/string_utils.py
remove_punctuation_dict
def remove_punctuation_dict() -> Dict[int, None]: """ Provide a dictionary for removing punctuation, swallowing spaces. :return dict with punctuation from the unicode table >>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate( ... remove_punctuation_dict()).lstrip()) Im ok Oh Fine """ tmp = dict((i, None) for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')) return tmp
python
def remove_punctuation_dict() -> Dict[int, None]: tmp = dict((i, None) for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')) return tmp
[ "def", "remove_punctuation_dict", "(", ")", "->", "Dict", "[", "int", ",", "None", "]", ":", "tmp", "=", "dict", "(", "(", "i", ",", "None", ")", "for", "i", "in", "range", "(", "sys", ".", "maxunicode", ")", "if", "unicodedata", ".", "category", "...
Provide a dictionary for removing punctuation, swallowing spaces. :return dict with punctuation from the unicode table >>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate( ... remove_punctuation_dict()).lstrip()) Im ok Oh Fine
[ "Provide", "a", "dictionary", "for", "removing", "punctuation", "swallowing", "spaces", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L15-L27
230,351
cltk/cltk
cltk/prosody/latin/string_utils.py
punctuation_for_spaces_dict
def punctuation_for_spaces_dict() -> Dict[int, str]: """ Provide a dictionary for removing punctuation, keeping spaces. Essential for scansion to keep stress patterns in alignment with original vowel positions in the verse. :return dict with punctuation from the unicode table >>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate( ... punctuation_for_spaces_dict()).strip()) I m ok Oh Fine """ return dict((i, " ") for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))
python
def punctuation_for_spaces_dict() -> Dict[int, str]: return dict((i, " ") for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))
[ "def", "punctuation_for_spaces_dict", "(", ")", "->", "Dict", "[", "int", ",", "str", "]", ":", "return", "dict", "(", "(", "i", ",", "\" \"", ")", "for", "i", "in", "range", "(", "sys", ".", "maxunicode", ")", "if", "unicodedata", ".", "category", "...
Provide a dictionary for removing punctuation, keeping spaces. Essential for scansion to keep stress patterns in alignment with original vowel positions in the verse. :return dict with punctuation from the unicode table >>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate( ... punctuation_for_spaces_dict()).strip()) I m ok Oh Fine
[ "Provide", "a", "dictionary", "for", "removing", "punctuation", "keeping", "spaces", ".", "Essential", "for", "scansion", "to", "keep", "stress", "patterns", "in", "alignment", "with", "original", "vowel", "positions", "in", "the", "verse", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L30-L42
230,352
cltk/cltk
cltk/prosody/latin/string_utils.py
differences
def differences(scansion: str, candidate: str) -> List[int]: """ Given two strings, return a list of index positions where the contents differ. :param scansion: :param candidate: :return: >>> differences("abc", "abz") [2] """ before = scansion.replace(" ", "") after = candidate.replace(" ", "") diffs = [] for idx, tmp in enumerate(before): if before[idx] != after[idx]: diffs.append(idx) return diffs
python
def differences(scansion: str, candidate: str) -> List[int]: before = scansion.replace(" ", "") after = candidate.replace(" ", "") diffs = [] for idx, tmp in enumerate(before): if before[idx] != after[idx]: diffs.append(idx) return diffs
[ "def", "differences", "(", "scansion", ":", "str", ",", "candidate", ":", "str", ")", "->", "List", "[", "int", "]", ":", "before", "=", "scansion", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "after", "=", "candidate", ".", "replace", "(", "\" \...
Given two strings, return a list of index positions where the contents differ. :param scansion: :param candidate: :return: >>> differences("abc", "abz") [2]
[ "Given", "two", "strings", "return", "a", "list", "of", "index", "positions", "where", "the", "contents", "differ", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L45-L62
230,353
cltk/cltk
cltk/prosody/latin/string_utils.py
space_list
def space_list(line: str) -> List[int]: """ Given a string, return a list of index positions where a blank space occurs. :param line: :return: >>> space_list(" abc ") [0, 1, 2, 3, 7] """ spaces = [] for idx, car in enumerate(list(line)): if car == " ": spaces.append(idx) return spaces
python
def space_list(line: str) -> List[int]: spaces = [] for idx, car in enumerate(list(line)): if car == " ": spaces.append(idx) return spaces
[ "def", "space_list", "(", "line", ":", "str", ")", "->", "List", "[", "int", "]", ":", "spaces", "=", "[", "]", "for", "idx", ",", "car", "in", "enumerate", "(", "list", "(", "line", ")", ")", ":", "if", "car", "==", "\" \"", ":", "spaces", "."...
Given a string, return a list of index positions where a blank space occurs. :param line: :return: >>> space_list(" abc ") [0, 1, 2, 3, 7]
[ "Given", "a", "string", "return", "a", "list", "of", "index", "positions", "where", "a", "blank", "space", "occurs", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L82-L96
230,354
cltk/cltk
cltk/prosody/latin/string_utils.py
to_syllables_with_trailing_spaces
def to_syllables_with_trailing_spaces(line: str, syllables: List[str]) -> List[str]: """ Given a line of syllables and spaces, and a list of syllables, produce a list of the syllables with trailing spaces attached as approriate. :param line: :param syllables: :return: >>> to_syllables_with_trailing_spaces(' arma virumque cano ', ... ['ar', 'ma', 'vi', 'rum', 'que', 'ca', 'no' ]) [' ar', 'ma ', 'vi', 'rum', 'que ', 'ca', 'no '] """ syllabs_spaces = [] idx = 0 linelen = len(line) for position, syl in enumerate(syllables): start = line.index(syl, idx) idx = start + len(syl) if position == 0 and start > 0: # line starts with punctuation, substituted w/ spaces syl = (start * " ") + syl if idx + 1 > len(line): syllabs_spaces.append(syl) return syllabs_spaces nextchar = line[idx] if nextchar != " ": syllabs_spaces.append(syl) continue else: tmpidx = idx while tmpidx < linelen and nextchar == " ": syl += " " tmpidx += 1 if tmpidx == linelen: syllabs_spaces.append(syl) return syllabs_spaces nextchar = line[tmpidx] idx = tmpidx - 1 syllabs_spaces.append(syl) return syllabs_spaces
python
def to_syllables_with_trailing_spaces(line: str, syllables: List[str]) -> List[str]: syllabs_spaces = [] idx = 0 linelen = len(line) for position, syl in enumerate(syllables): start = line.index(syl, idx) idx = start + len(syl) if position == 0 and start > 0: # line starts with punctuation, substituted w/ spaces syl = (start * " ") + syl if idx + 1 > len(line): syllabs_spaces.append(syl) return syllabs_spaces nextchar = line[idx] if nextchar != " ": syllabs_spaces.append(syl) continue else: tmpidx = idx while tmpidx < linelen and nextchar == " ": syl += " " tmpidx += 1 if tmpidx == linelen: syllabs_spaces.append(syl) return syllabs_spaces nextchar = line[tmpidx] idx = tmpidx - 1 syllabs_spaces.append(syl) return syllabs_spaces
[ "def", "to_syllables_with_trailing_spaces", "(", "line", ":", "str", ",", "syllables", ":", "List", "[", "str", "]", ")", "->", "List", "[", "str", "]", ":", "syllabs_spaces", "=", "[", "]", "idx", "=", "0", "linelen", "=", "len", "(", "line", ")", "...
Given a line of syllables and spaces, and a list of syllables, produce a list of the syllables with trailing spaces attached as approriate. :param line: :param syllables: :return: >>> to_syllables_with_trailing_spaces(' arma virumque cano ', ... ['ar', 'ma', 'vi', 'rum', 'que', 'ca', 'no' ]) [' ar', 'ma ', 'vi', 'rum', 'que ', 'ca', 'no ']
[ "Given", "a", "line", "of", "syllables", "and", "spaces", "and", "a", "list", "of", "syllables", "produce", "a", "list", "of", "the", "syllables", "with", "trailing", "spaces", "attached", "as", "approriate", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L112-L151
230,355
cltk/cltk
cltk/prosody/latin/string_utils.py
join_syllables_spaces
def join_syllables_spaces(syllables: List[str], spaces: List[int]) -> str: """ Given a list of syllables, and a list of integers indicating the position of spaces, return a string that has a space inserted at the designated points. :param syllables: :param spaces: :return: >>> join_syllables_spaces(["won", "to", "tree", "dun"], [3, 6, 11]) 'won to tree dun' """ syllable_line = list("".join(syllables)) for space in spaces: syllable_line.insert(space, " ") return "".join(flatten(syllable_line))
python
def join_syllables_spaces(syllables: List[str], spaces: List[int]) -> str: syllable_line = list("".join(syllables)) for space in spaces: syllable_line.insert(space, " ") return "".join(flatten(syllable_line))
[ "def", "join_syllables_spaces", "(", "syllables", ":", "List", "[", "str", "]", ",", "spaces", ":", "List", "[", "int", "]", ")", "->", "str", ":", "syllable_line", "=", "list", "(", "\"\"", ".", "join", "(", "syllables", ")", ")", "for", "space", "i...
Given a list of syllables, and a list of integers indicating the position of spaces, return a string that has a space inserted at the designated points. :param syllables: :param spaces: :return: >>> join_syllables_spaces(["won", "to", "tree", "dun"], [3, 6, 11]) 'won to tree dun'
[ "Given", "a", "list", "of", "syllables", "and", "a", "list", "of", "integers", "indicating", "the", "position", "of", "spaces", "return", "a", "string", "that", "has", "a", "space", "inserted", "at", "the", "designated", "points", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L154-L169
230,356
cltk/cltk
cltk/prosody/latin/string_utils.py
stress_positions
def stress_positions(stress: str, scansion: str) -> List[int]: """ Given a stress value and a scansion line, return the index positions of the stresses. :param stress: :param scansion: :return: >>> stress_positions("-", " - U U - UU - U U") [0, 3, 6] """ line = scansion.replace(" ", "") stresses = [] for idx, char in enumerate(line): if char == stress: stresses.append(idx) return stresses
python
def stress_positions(stress: str, scansion: str) -> List[int]: line = scansion.replace(" ", "") stresses = [] for idx, char in enumerate(line): if char == stress: stresses.append(idx) return stresses
[ "def", "stress_positions", "(", "stress", ":", "str", ",", "scansion", ":", "str", ")", "->", "List", "[", "int", "]", ":", "line", "=", "scansion", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "stresses", "=", "[", "]", "for", "idx", ",", "char...
Given a stress value and a scansion line, return the index positions of the stresses. :param stress: :param scansion: :return: >>> stress_positions("-", " - U U - UU - U U") [0, 3, 6]
[ "Given", "a", "stress", "value", "and", "a", "scansion", "line", "return", "the", "index", "positions", "of", "the", "stresses", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L187-L203
230,357
cltk/cltk
cltk/prosody/latin/string_utils.py
merge_elisions
def merge_elisions(elided: List[str]) -> str: """ Given a list of strings with different space swapping elisions applied, merge the elisions, taking the most without compounding the omissions. :param elided: :return: >>> merge_elisions([ ... "ignavae agua multum hiatus", "ignav agua multum hiatus" ,"ignavae agua mult hiatus"]) 'ignav agua mult hiatus' """ results = list(elided[0]) for line in elided: for idx, car in enumerate(line): if car == " ": results[idx] = " " return "".join(results)
python
def merge_elisions(elided: List[str]) -> str: results = list(elided[0]) for line in elided: for idx, car in enumerate(line): if car == " ": results[idx] = " " return "".join(results)
[ "def", "merge_elisions", "(", "elided", ":", "List", "[", "str", "]", ")", "->", "str", ":", "results", "=", "list", "(", "elided", "[", "0", "]", ")", "for", "line", "in", "elided", ":", "for", "idx", ",", "car", "in", "enumerate", "(", "line", ...
Given a list of strings with different space swapping elisions applied, merge the elisions, taking the most without compounding the omissions. :param elided: :return: >>> merge_elisions([ ... "ignavae agua multum hiatus", "ignav agua multum hiatus" ,"ignavae agua mult hiatus"]) 'ignav agua mult hiatus'
[ "Given", "a", "list", "of", "strings", "with", "different", "space", "swapping", "elisions", "applied", "merge", "the", "elisions", "taking", "the", "most", "without", "compounding", "the", "omissions", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L206-L223
230,358
cltk/cltk
cltk/prosody/latin/string_utils.py
move_consonant_right
def move_consonant_right(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letters, and a list of consonant positions, move the consonant positions to the right, merging strings as necessary. :param letters: :param positions: :return: >>> move_consonant_right(list("abbra"), [ 2, 3]) ['a', 'b', '', '', 'bra'] """ for pos in positions: letters[pos + 1] = letters[pos] + letters[pos + 1] letters[pos] = "" return letters
python
def move_consonant_right(letters: List[str], positions: List[int]) -> List[str]: for pos in positions: letters[pos + 1] = letters[pos] + letters[pos + 1] letters[pos] = "" return letters
[ "def", "move_consonant_right", "(", "letters", ":", "List", "[", "str", "]", ",", "positions", ":", "List", "[", "int", "]", ")", "->", "List", "[", "str", "]", ":", "for", "pos", "in", "positions", ":", "letters", "[", "pos", "+", "1", "]", "=", ...
Given a list of letters, and a list of consonant positions, move the consonant positions to the right, merging strings as necessary. :param letters: :param positions: :return: >>> move_consonant_right(list("abbra"), [ 2, 3]) ['a', 'b', '', '', 'bra']
[ "Given", "a", "list", "of", "letters", "and", "a", "list", "of", "consonant", "positions", "move", "the", "consonant", "positions", "to", "the", "right", "merging", "strings", "as", "necessary", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L226-L241
230,359
cltk/cltk
cltk/prosody/latin/string_utils.py
move_consonant_left
def move_consonant_left(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letters, and a list of consonant positions, move the consonant positions to the left, merging strings as necessary. :param letters: :param positions: :return: >>> move_consonant_left(['a', 'b', '', '', 'bra'], [1]) ['ab', '', '', '', 'bra'] """ for pos in positions: letters[pos - 1] = letters[pos - 1] + letters[pos] letters[pos] = "" return letters
python
def move_consonant_left(letters: List[str], positions: List[int]) -> List[str]: for pos in positions: letters[pos - 1] = letters[pos - 1] + letters[pos] letters[pos] = "" return letters
[ "def", "move_consonant_left", "(", "letters", ":", "List", "[", "str", "]", ",", "positions", ":", "List", "[", "int", "]", ")", "->", "List", "[", "str", "]", ":", "for", "pos", "in", "positions", ":", "letters", "[", "pos", "-", "1", "]", "=", ...
Given a list of letters, and a list of consonant positions, move the consonant positions to the left, merging strings as necessary. :param letters: :param positions: :return: >>> move_consonant_left(['a', 'b', '', '', 'bra'], [1]) ['ab', '', '', '', 'bra']
[ "Given", "a", "list", "of", "letters", "and", "a", "list", "of", "consonant", "positions", "move", "the", "consonant", "positions", "to", "the", "left", "merging", "strings", "as", "necessary", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L244-L259
230,360
cltk/cltk
cltk/prosody/latin/string_utils.py
merge_next
def merge_next(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letter positions, merge each letter with its next neighbor. :param letters: :param positions: :return: >>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2]) ['ab', '', 'ov', '', 'o'] >>> # Note: because it operates on the original list passed in, the effect is not cummulative: >>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2, 3]) ['ab', '', 'ov', 'o', ''] """ for pos in positions: letters[pos] = letters[pos] + letters[pos + 1] letters[pos + 1] = "" return letters
python
def merge_next(letters: List[str], positions: List[int]) -> List[str]: for pos in positions: letters[pos] = letters[pos] + letters[pos + 1] letters[pos + 1] = "" return letters
[ "def", "merge_next", "(", "letters", ":", "List", "[", "str", "]", ",", "positions", ":", "List", "[", "int", "]", ")", "->", "List", "[", "str", "]", ":", "for", "pos", "in", "positions", ":", "letters", "[", "pos", "]", "=", "letters", "[", "po...
Given a list of letter positions, merge each letter with its next neighbor. :param letters: :param positions: :return: >>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2]) ['ab', '', 'ov', '', 'o'] >>> # Note: because it operates on the original list passed in, the effect is not cummulative: >>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2, 3]) ['ab', '', 'ov', 'o', '']
[ "Given", "a", "list", "of", "letter", "positions", "merge", "each", "letter", "with", "its", "next", "neighbor", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L262-L279
230,361
cltk/cltk
cltk/prosody/latin/string_utils.py
remove_blanks
def remove_blanks(letters: List[str]): """ Given a list of letters, remove any empty strings. :param letters: :return: >>> remove_blanks(['a', '', 'b', '', 'c']) ['a', 'b', 'c'] """ cleaned = [] for letter in letters: if letter != "": cleaned.append(letter) return cleaned
python
def remove_blanks(letters: List[str]): cleaned = [] for letter in letters: if letter != "": cleaned.append(letter) return cleaned
[ "def", "remove_blanks", "(", "letters", ":", "List", "[", "str", "]", ")", ":", "cleaned", "=", "[", "]", "for", "letter", "in", "letters", ":", "if", "letter", "!=", "\"\"", ":", "cleaned", ".", "append", "(", "letter", ")", "return", "cleaned" ]
Given a list of letters, remove any empty strings. :param letters: :return: >>> remove_blanks(['a', '', 'b', '', 'c']) ['a', 'b', 'c']
[ "Given", "a", "list", "of", "letters", "remove", "any", "empty", "strings", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L282-L296
230,362
cltk/cltk
cltk/prosody/latin/string_utils.py
split_on
def split_on(word: str, section: str) -> Tuple[str, str]: """ Given a string, split on a section, and return the two sections as a tuple. :param word: :param section: :return: >>> split_on('hamrye', 'ham') ('ham', 'rye') """ return word[:word.index(section)] + section, word[word.index(section) + len(section):]
python
def split_on(word: str, section: str) -> Tuple[str, str]: return word[:word.index(section)] + section, word[word.index(section) + len(section):]
[ "def", "split_on", "(", "word", ":", "str", ",", "section", ":", "str", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "return", "word", "[", ":", "word", ".", "index", "(", "section", ")", "]", "+", "section", ",", "word", "[", "word", ...
Given a string, split on a section, and return the two sections as a tuple. :param word: :param section: :return: >>> split_on('hamrye', 'ham') ('ham', 'rye')
[ "Given", "a", "string", "split", "on", "a", "section", "and", "return", "the", "two", "sections", "as", "a", "tuple", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L299-L310
230,363
cltk/cltk
cltk/prosody/latin/string_utils.py
remove_blank_spaces
def remove_blank_spaces(syllables: List[str]) -> List[str]: """ Given a list of letters, remove any blank spaces or empty strings. :param syllables: :return: >>> remove_blank_spaces(['', 'a', ' ', 'b', ' ', 'c', '']) ['a', 'b', 'c'] """ cleaned = [] for syl in syllables: if syl == " " or syl == '': pass else: cleaned.append(syl) return cleaned
python
def remove_blank_spaces(syllables: List[str]) -> List[str]: cleaned = [] for syl in syllables: if syl == " " or syl == '': pass else: cleaned.append(syl) return cleaned
[ "def", "remove_blank_spaces", "(", "syllables", ":", "List", "[", "str", "]", ")", "->", "List", "[", "str", "]", ":", "cleaned", "=", "[", "]", "for", "syl", "in", "syllables", ":", "if", "syl", "==", "\" \"", "or", "syl", "==", "''", ":", "pass",...
Given a list of letters, remove any blank spaces or empty strings. :param syllables: :return: >>> remove_blank_spaces(['', 'a', ' ', 'b', ' ', 'c', '']) ['a', 'b', 'c']
[ "Given", "a", "list", "of", "letters", "remove", "any", "blank", "spaces", "or", "empty", "strings", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L313-L329
230,364
cltk/cltk
cltk/prosody/latin/string_utils.py
overwrite
def overwrite(char_list: List[str], regexp: str, quality: str, offset: int = 0) -> List[str]: """ Given a list of characters and spaces, a matching regular expression, and a quality or character, replace the matching character with a space, overwriting with an offset and a multiplier if provided. :param char_list: :param regexp: :param quality: :param offset: :return: >>> overwrite(list('multe igne'), r'e\s[aeiou]', ' ') ['m', 'u', 'l', 't', ' ', ' ', 'i', 'g', 'n', 'e'] """ long_matcher = re.compile(regexp) line = "".join(char_list) long_positions = long_matcher.finditer(line) for match in long_positions: (start, end) = match.span() # pylint: disable=unused-variable char_list[start + offset] = quality return char_list
python
def overwrite(char_list: List[str], regexp: str, quality: str, offset: int = 0) -> List[str]: long_matcher = re.compile(regexp) line = "".join(char_list) long_positions = long_matcher.finditer(line) for match in long_positions: (start, end) = match.span() # pylint: disable=unused-variable char_list[start + offset] = quality return char_list
[ "def", "overwrite", "(", "char_list", ":", "List", "[", "str", "]", ",", "regexp", ":", "str", ",", "quality", ":", "str", ",", "offset", ":", "int", "=", "0", ")", "->", "List", "[", "str", "]", ":", "long_matcher", "=", "re", ".", "compile", "(...
Given a list of characters and spaces, a matching regular expression, and a quality or character, replace the matching character with a space, overwriting with an offset and a multiplier if provided. :param char_list: :param regexp: :param quality: :param offset: :return: >>> overwrite(list('multe igne'), r'e\s[aeiou]', ' ') ['m', 'u', 'l', 't', ' ', ' ', 'i', 'g', 'n', 'e']
[ "Given", "a", "list", "of", "characters", "and", "spaces", "a", "matching", "regular", "expression", "and", "a", "quality", "or", "character", "replace", "the", "matching", "character", "with", "a", "space", "overwriting", "with", "an", "offset", "and", "a", ...
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L332-L353
230,365
cltk/cltk
cltk/prosody/latin/string_utils.py
get_unstresses
def get_unstresses(stresses: List[int], count: int) -> List[int]: """ Given a list of stressed positions, and count of possible positions, return a list of the unstressed positions. :param stresses: a list of stressed positions :param count: the number of possible positions :return: a list of unstressed positions >>> get_unstresses([0, 3, 6, 9, 12, 15], 17) [1, 2, 4, 5, 7, 8, 10, 11, 13, 14, 16] """ return list(set(range(count)) - set(stresses))
python
def get_unstresses(stresses: List[int], count: int) -> List[int]: return list(set(range(count)) - set(stresses))
[ "def", "get_unstresses", "(", "stresses", ":", "List", "[", "int", "]", ",", "count", ":", "int", ")", "->", "List", "[", "int", "]", ":", "return", "list", "(", "set", "(", "range", "(", "count", ")", ")", "-", "set", "(", "stresses", ")", ")" ]
Given a list of stressed positions, and count of possible positions, return a list of the unstressed positions. :param stresses: a list of stressed positions :param count: the number of possible positions :return: a list of unstressed positions >>> get_unstresses([0, 3, 6, 9, 12, 15], 17) [1, 2, 4, 5, 7, 8, 10, 11, 13, 14, 16]
[ "Given", "a", "list", "of", "stressed", "positions", "and", "count", "of", "possible", "positions", "return", "a", "list", "of", "the", "unstressed", "positions", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/string_utils.py#L380-L392
230,366
cltk/cltk
cltk/inflection/old_norse/nouns.py
decline_strong_masculine_noun
def decline_strong_masculine_noun(ns: str, gs: str, np: str): """ Gives the full declension of strong masculine nouns. >>> decline_strong_masculine_noun("armr", "arms", "armar") armr arm armi arms armar arma örmum arma # >>> decline_strong_masculine_noun("ketill", "ketils", "katlar") # ketill # ketil # katli # ketils # katlar # katla # kötlum # katla >>> decline_strong_masculine_noun("mór", "mós", "móar") mór mó mói mós móar móa móum móa >>> decline_strong_masculine_noun("hirðir", "hirðis", "hirðar") hirðir hirð hirði hirðis hirðar hirða hirðum hirða >>> decline_strong_masculine_noun("söngr", "söngs", "söngvar") söngr söng söngvi söngs söngvar söngva söngvum söngva >>> decline_strong_masculine_noun("gestr", "gests", "gestir") gestr gest gesti gests gestir gesti gestum gesta >>> decline_strong_masculine_noun("staðr", "staðar", "staðir") staðr stað staði staðar staðir staði stöðum staða # >>> decline_strong_masculine_noun("skjöldr", "skjaldar", "skildir") # skjöldr # skjöld # skildi # skjaldar # skildir # skjöldu # skjöldum # skjalda # # >>> decline_strong_masculine_noun("völlr", "vallar", "vellir") # völlr # völl # velli # vallar # vellir # völlu # völlum # valla # # >>> decline_strong_masculine_noun("fögnuðr", "fagnaðar", "fagnaðir") # fögnuðr # fögnuð # fagnaði # fagnaðar # fagnaðir # fögnuðu # fögnuðum # fagnaða a-stem armr, arm, armi, arms; armar, arma, örmum, arma ketill, ketil, katli, ketils; katlar, katla, kötlum, katla mór, mó, mó, mós; móar, móa, móm, móa hirðir, hirði, hirði, hirðis; hirðar, hirða, hirðum, hirða söngr, söng, söngvi, söngs; söngvar, söngva, söngvum, söngva i-stem gestr, gest, gest, gests; gestir, gesti, gestum, gesta staðr, stað stað, staðar; staðir, staði, stöðum, staða # u-stem # skjödr, skjöld, skildi, skjaldar; skildir, skjöldu, skjöldum, skjalda # völlr, völl, velli, vallar; vellir, völlu, völlum, valla # fögnuðr, fögnuð, fągnaði, fagnaðar; fagnaðir, fögnuðu, fögnuðum, fagnaða :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ np_syl = s.syllabify_ssp(np) last_np_syl = np_syl[-1] if last_np_syl.endswith("ar"): # a-stem common_stem = extract_common_stem(ns, gs, np) # nominative singular print(ns) # accusative singular print(common_stem) # dative singular if np[len(common_stem):][0] == "v": print(common_stem + "vi") else: print(common_stem + "i") # genitive singular print(gs) # nominative plural print(np) # accusative plural if last_np_syl.endswith("ar"): print(np[:-1]) elif last_np_syl.endswith("ir"): print(np[:-1]) # dative plural if np[len(common_stem):][0] == "v": print(apply_u_umlaut(common_stem) + "vum") elif np[len(common_stem):][0] == "j": print(apply_u_umlaut(common_stem) + "jum") else: print(apply_u_umlaut(common_stem) + "um") # genitive plural if np[len(common_stem):][0] == "v": print(common_stem + "va") elif np[len(common_stem):][0] == "j": print(common_stem + "ja") else: print(common_stem + "a") elif last_np_syl.endswith("ir"): # if has_u_umlaut(ns): # # u-stem # common_stem = ns[:-1] # # # nominative singular # print(ns) # # # accusative singular # print(common_stem) # # # dative singular # if np[len(common_stem):][0] == "v": # print(common_stem + "vi") # else: # print(common_stem + "i") # # # genitive singular # print(gs) # # common_stem_p = np[:-2] # # nominative plural # print(np) # # # accusative plural # print(apply_u_umlaut(common_stem_p)+"u") # # # dative plural # if np[len(common_stem):][0] == "v": # print(apply_u_umlaut(common_stem_p) + "vum") # # elif np[len(common_stem):][0] == "j": # print(apply_u_umlaut(common_stem_p) + "jum") # else: # print(apply_u_umlaut(common_stem_p) + "um") # # # genitive plural # if np[len(common_stem):][0] == "v": # print(common_stem_p + "va") # elif np[len(common_stem):][0] == "j": # print(common_stem_p + "ja") # else: # print(common_stem_p + "a") # else: # i-stem common_stem = extract_common_stem(ns, gs, np) # nominative singular print(ns) # accusative singular print(common_stem) # dative singular if np[len(common_stem):][0] == "v": print(common_stem + "vi") else: print(common_stem + "i") # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np[:-1]) # dative plural if np[len(common_stem):][0] == "v": print(apply_u_umlaut(common_stem) + "vum") elif np[len(common_stem):][0] == "j": print(apply_u_umlaut(common_stem) + "jum") else: print(apply_u_umlaut(common_stem) + "um") # genitive plural if np[len(common_stem):][0] == "v": print(common_stem + "va") elif np[len(common_stem):][0] == "j": print(common_stem + "ja") else: print(common_stem + "a")
python
def decline_strong_masculine_noun(ns: str, gs: str, np: str): np_syl = s.syllabify_ssp(np) last_np_syl = np_syl[-1] if last_np_syl.endswith("ar"): # a-stem common_stem = extract_common_stem(ns, gs, np) # nominative singular print(ns) # accusative singular print(common_stem) # dative singular if np[len(common_stem):][0] == "v": print(common_stem + "vi") else: print(common_stem + "i") # genitive singular print(gs) # nominative plural print(np) # accusative plural if last_np_syl.endswith("ar"): print(np[:-1]) elif last_np_syl.endswith("ir"): print(np[:-1]) # dative plural if np[len(common_stem):][0] == "v": print(apply_u_umlaut(common_stem) + "vum") elif np[len(common_stem):][0] == "j": print(apply_u_umlaut(common_stem) + "jum") else: print(apply_u_umlaut(common_stem) + "um") # genitive plural if np[len(common_stem):][0] == "v": print(common_stem + "va") elif np[len(common_stem):][0] == "j": print(common_stem + "ja") else: print(common_stem + "a") elif last_np_syl.endswith("ir"): # if has_u_umlaut(ns): # # u-stem # common_stem = ns[:-1] # # # nominative singular # print(ns) # # # accusative singular # print(common_stem) # # # dative singular # if np[len(common_stem):][0] == "v": # print(common_stem + "vi") # else: # print(common_stem + "i") # # # genitive singular # print(gs) # # common_stem_p = np[:-2] # # nominative plural # print(np) # # # accusative plural # print(apply_u_umlaut(common_stem_p)+"u") # # # dative plural # if np[len(common_stem):][0] == "v": # print(apply_u_umlaut(common_stem_p) + "vum") # # elif np[len(common_stem):][0] == "j": # print(apply_u_umlaut(common_stem_p) + "jum") # else: # print(apply_u_umlaut(common_stem_p) + "um") # # # genitive plural # if np[len(common_stem):][0] == "v": # print(common_stem_p + "va") # elif np[len(common_stem):][0] == "j": # print(common_stem_p + "ja") # else: # print(common_stem_p + "a") # else: # i-stem common_stem = extract_common_stem(ns, gs, np) # nominative singular print(ns) # accusative singular print(common_stem) # dative singular if np[len(common_stem):][0] == "v": print(common_stem + "vi") else: print(common_stem + "i") # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np[:-1]) # dative plural if np[len(common_stem):][0] == "v": print(apply_u_umlaut(common_stem) + "vum") elif np[len(common_stem):][0] == "j": print(apply_u_umlaut(common_stem) + "jum") else: print(apply_u_umlaut(common_stem) + "um") # genitive plural if np[len(common_stem):][0] == "v": print(common_stem + "va") elif np[len(common_stem):][0] == "j": print(common_stem + "ja") else: print(common_stem + "a")
[ "def", "decline_strong_masculine_noun", "(", "ns", ":", "str", ",", "gs", ":", "str", ",", "np", ":", "str", ")", ":", "np_syl", "=", "s", ".", "syllabify_ssp", "(", "np", ")", "last_np_syl", "=", "np_syl", "[", "-", "1", "]", "if", "last_np_syl", "....
Gives the full declension of strong masculine nouns. >>> decline_strong_masculine_noun("armr", "arms", "armar") armr arm armi arms armar arma örmum arma # >>> decline_strong_masculine_noun("ketill", "ketils", "katlar") # ketill # ketil # katli # ketils # katlar # katla # kötlum # katla >>> decline_strong_masculine_noun("mór", "mós", "móar") mór mó mói mós móar móa móum móa >>> decline_strong_masculine_noun("hirðir", "hirðis", "hirðar") hirðir hirð hirði hirðis hirðar hirða hirðum hirða >>> decline_strong_masculine_noun("söngr", "söngs", "söngvar") söngr söng söngvi söngs söngvar söngva söngvum söngva >>> decline_strong_masculine_noun("gestr", "gests", "gestir") gestr gest gesti gests gestir gesti gestum gesta >>> decline_strong_masculine_noun("staðr", "staðar", "staðir") staðr stað staði staðar staðir staði stöðum staða # >>> decline_strong_masculine_noun("skjöldr", "skjaldar", "skildir") # skjöldr # skjöld # skildi # skjaldar # skildir # skjöldu # skjöldum # skjalda # # >>> decline_strong_masculine_noun("völlr", "vallar", "vellir") # völlr # völl # velli # vallar # vellir # völlu # völlum # valla # # >>> decline_strong_masculine_noun("fögnuðr", "fagnaðar", "fagnaðir") # fögnuðr # fögnuð # fagnaði # fagnaðar # fagnaðir # fögnuðu # fögnuðum # fagnaða a-stem armr, arm, armi, arms; armar, arma, örmum, arma ketill, ketil, katli, ketils; katlar, katla, kötlum, katla mór, mó, mó, mós; móar, móa, móm, móa hirðir, hirði, hirði, hirðis; hirðar, hirða, hirðum, hirða söngr, söng, söngvi, söngs; söngvar, söngva, söngvum, söngva i-stem gestr, gest, gest, gests; gestir, gesti, gestum, gesta staðr, stað stað, staðar; staðir, staði, stöðum, staða # u-stem # skjödr, skjöld, skildi, skjaldar; skildir, skjöldu, skjöldum, skjalda # völlr, völl, velli, vallar; vellir, völlu, völlum, valla # fögnuðr, fögnuð, fągnaði, fagnaðar; fagnaðir, fögnuðu, fögnuðum, fagnaða :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return:
[ "Gives", "the", "full", "declension", "of", "strong", "masculine", "nouns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/nouns.py#L69-L328
230,367
cltk/cltk
cltk/inflection/old_norse/nouns.py
decline_strong_feminine_noun
def decline_strong_feminine_noun(ns: str, gs: str, np: str): """ Gives the full declension of strong feminine nouns. o macron-stem Most of strong feminine nouns follows the declension of rún and för. >>> decline_strong_feminine_noun("rún", "rúnar", "rúnar") rún rún rún rúnar rúnar rúnar rúnum rúna >>> decline_strong_feminine_noun("för", "farar", "farar") för för för farar farar farar förum fara >>> decline_strong_feminine_noun("kerling", "kerlingar", "kerlingar") kerling kerling kerlingu kerlingar kerlingar kerlingar kerlingum kerlinga >>> decline_strong_feminine_noun("skel", "skeljar", "skeljar") skel skel skel skeljar skeljar skeljar skeljum skelja >>> decline_strong_feminine_noun("ör", "örvar", "örvar") ör ör ör örvar örvar örvar örum örva >>> decline_strong_feminine_noun("heiðr", "heiðar", "heiðar") heiðr heiði heiði heiðar heiðar heiðar heiðum heiða i-stem >>> decline_strong_feminine_noun("öxl", "axlar", "axlir") öxl öxl öxl axlar axlir axlir öxlum axla >>> decline_strong_feminine_noun("höfn", "hafnar", "hafnir") höfn höfn höfn hafnar hafnir hafnir höfnum hafna >>> decline_strong_feminine_noun("norn", "nornar", "nornir") norn norn norn nornar nornir nornir nornum norna >>> decline_strong_feminine_noun("jörð", "jarðar", "jarðir") jörð jörð jörð jarðar jarðir jarðir jörðum jarða >>> decline_strong_feminine_noun("borg", "borgar", "borgir") borg borg borgu borgar borgir borgir borgum borga :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular if len(ns) > 2 and ns[-1] == "r" and ns[-2] in CONSONANTS: print(ns[:-1]+"i") else: print(ns) # dative singular if len(ns) > 2 and ns[-1] == "r" and ns[-2] in CONSONANTS: print(ns[:-1]+"i") elif ns.endswith("ing") or ns.endswith("rg"): print(ns + "u") else: print(ns) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural # print("dative plural "+np[len(np[:-3]):][0]) if np[len(np[:-3]):][0] == "v": print(apply_u_umlaut(np[:-2])[:-1]+"um") elif np[len(np[:-3]):][0] == "j": print(apply_u_umlaut(np[:-2])+"um") else: print(apply_u_umlaut(np[:-2])+"um") # genitive plural print(np[:-2]+"a")
python
def decline_strong_feminine_noun(ns: str, gs: str, np: str): # nominative singular print(ns) # accusative singular if len(ns) > 2 and ns[-1] == "r" and ns[-2] in CONSONANTS: print(ns[:-1]+"i") else: print(ns) # dative singular if len(ns) > 2 and ns[-1] == "r" and ns[-2] in CONSONANTS: print(ns[:-1]+"i") elif ns.endswith("ing") or ns.endswith("rg"): print(ns + "u") else: print(ns) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural # print("dative plural "+np[len(np[:-3]):][0]) if np[len(np[:-3]):][0] == "v": print(apply_u_umlaut(np[:-2])[:-1]+"um") elif np[len(np[:-3]):][0] == "j": print(apply_u_umlaut(np[:-2])+"um") else: print(apply_u_umlaut(np[:-2])+"um") # genitive plural print(np[:-2]+"a")
[ "def", "decline_strong_feminine_noun", "(", "ns", ":", "str", ",", "gs", ":", "str", ",", "np", ":", "str", ")", ":", "# nominative singular", "print", "(", "ns", ")", "# accusative singular", "if", "len", "(", "ns", ")", ">", "2", "and", "ns", "[", "-...
Gives the full declension of strong feminine nouns. o macron-stem Most of strong feminine nouns follows the declension of rún and för. >>> decline_strong_feminine_noun("rún", "rúnar", "rúnar") rún rún rún rúnar rúnar rúnar rúnum rúna >>> decline_strong_feminine_noun("för", "farar", "farar") för för för farar farar farar förum fara >>> decline_strong_feminine_noun("kerling", "kerlingar", "kerlingar") kerling kerling kerlingu kerlingar kerlingar kerlingar kerlingum kerlinga >>> decline_strong_feminine_noun("skel", "skeljar", "skeljar") skel skel skel skeljar skeljar skeljar skeljum skelja >>> decline_strong_feminine_noun("ör", "örvar", "örvar") ör ör ör örvar örvar örvar örum örva >>> decline_strong_feminine_noun("heiðr", "heiðar", "heiðar") heiðr heiði heiði heiðar heiðar heiðar heiðum heiða i-stem >>> decline_strong_feminine_noun("öxl", "axlar", "axlir") öxl öxl öxl axlar axlir axlir öxlum axla >>> decline_strong_feminine_noun("höfn", "hafnar", "hafnir") höfn höfn höfn hafnar hafnir hafnir höfnum hafna >>> decline_strong_feminine_noun("norn", "nornar", "nornir") norn norn norn nornar nornir nornir nornum norna >>> decline_strong_feminine_noun("jörð", "jarðar", "jarðir") jörð jörð jörð jarðar jarðir jarðir jörðum jarða >>> decline_strong_feminine_noun("borg", "borgar", "borgir") borg borg borgu borgar borgir borgir borgum borga :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return:
[ "Gives", "the", "full", "declension", "of", "strong", "feminine", "nouns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/nouns.py#L331-L491
230,368
cltk/cltk
cltk/inflection/old_norse/nouns.py
decline_strong_neuter_noun
def decline_strong_neuter_noun(ns: str, gs: str, np: str): """ Gives the full declension of strong neuter nouns. a-stem Most of strong neuter nouns follow the declensions of skip, land and herað. >>> decline_strong_neuter_noun("skip", "skips", "skip") skip skip skipi skips skip skip skipum skipa >>> decline_strong_neuter_noun("land", "lands", "lönd") land land landi lands lönd lönd löndum landa >>> decline_strong_neuter_noun("herað", "heraðs", "heruð") herað herað heraði heraðs heruð heruð heruðum heraða # >>> decline_strong_neuter_noun("kyn", "kyns", "kyn") # kyn # kyn # kyni # kyns # kyn # kyn # kynjum # kynja # # >>> decline_strong_neuter_noun("högg", "höggs", "högg") # högg # högg # höggvi # höggs # högg # högg # höggum # höggva >>> decline_strong_neuter_noun("kvæði", "kvæðis", "kvæði") kvæði kvæði kvæði kvæðis kvæði kvæði kvæðum kvæða :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular print(ns) # dative singular if ns[-1] == "i": print(ns) # TODO +"vi" else: print(ns+"i") # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural if ns[-1] in CONSONANTS: print(apply_u_umlaut(np)+"um") else: print(apply_u_umlaut(np[:-1]) + "um") # TODO +"vum" # genitive plural if ns[-1] in CONSONANTS: print(ns+"a") # TODO + "va" else: print(ns[:-1]+"a")
python
def decline_strong_neuter_noun(ns: str, gs: str, np: str): # nominative singular print(ns) # accusative singular print(ns) # dative singular if ns[-1] == "i": print(ns) # TODO +"vi" else: print(ns+"i") # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural if ns[-1] in CONSONANTS: print(apply_u_umlaut(np)+"um") else: print(apply_u_umlaut(np[:-1]) + "um") # TODO +"vum" # genitive plural if ns[-1] in CONSONANTS: print(ns+"a") # TODO + "va" else: print(ns[:-1]+"a")
[ "def", "decline_strong_neuter_noun", "(", "ns", ":", "str", ",", "gs", ":", "str", ",", "np", ":", "str", ")", ":", "# nominative singular", "print", "(", "ns", ")", "# accusative singular", "print", "(", "ns", ")", "# dative singular", "if", "ns", "[", "-...
Gives the full declension of strong neuter nouns. a-stem Most of strong neuter nouns follow the declensions of skip, land and herað. >>> decline_strong_neuter_noun("skip", "skips", "skip") skip skip skipi skips skip skip skipum skipa >>> decline_strong_neuter_noun("land", "lands", "lönd") land land landi lands lönd lönd löndum landa >>> decline_strong_neuter_noun("herað", "heraðs", "heruð") herað herað heraði heraðs heruð heruð heruðum heraða # >>> decline_strong_neuter_noun("kyn", "kyns", "kyn") # kyn # kyn # kyni # kyns # kyn # kyn # kynjum # kynja # # >>> decline_strong_neuter_noun("högg", "höggs", "högg") # högg # högg # höggvi # höggs # högg # högg # höggum # höggva >>> decline_strong_neuter_noun("kvæði", "kvæðis", "kvæði") kvæði kvæði kvæði kvæðis kvæði kvæði kvæðum kvæða :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return:
[ "Gives", "the", "full", "declension", "of", "strong", "neuter", "nouns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/nouns.py#L494-L601
230,369
cltk/cltk
cltk/inflection/old_norse/nouns.py
decline_weak_masculine_noun
def decline_weak_masculine_noun(ns: str, gs: str, np: str): """ Gives the full declension of weak masculine nouns. >>> decline_weak_masculine_noun("goði", "goða", "goðar") goði goða goða goða goðar goða goðum goða >>> decline_weak_masculine_noun("hluti", "hluta", "hlutar") hluti hluta hluta hluta hlutar hluta hlutum hluta >>> decline_weak_masculine_noun("arfi", "arfa", "arfar") arfi arfa arfa arfa arfar arfa örfum arfa >>> decline_weak_masculine_noun("bryti", "bryta", "brytjar") bryti bryta bryta bryta brytjar brytja brytjum brytja >>> decline_weak_masculine_noun("vöðvi", "vöðva", "vöðvar") vöðvi vöðva vöðva vöðva vöðvar vöðva vöðum vöðva The main pattern is: :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular print(gs) # dative singular print(gs) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np[:-1]) # dative plural if len(np) > 3 and np[-3] == "v": print(apply_u_umlaut(np[:-3]) + "um") else: print(apply_u_umlaut(np[:-2]) + "um") # genitive plural print(np[:-1])
python
def decline_weak_masculine_noun(ns: str, gs: str, np: str): # nominative singular print(ns) # accusative singular print(gs) # dative singular print(gs) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np[:-1]) # dative plural if len(np) > 3 and np[-3] == "v": print(apply_u_umlaut(np[:-3]) + "um") else: print(apply_u_umlaut(np[:-2]) + "um") # genitive plural print(np[:-1])
[ "def", "decline_weak_masculine_noun", "(", "ns", ":", "str", ",", "gs", ":", "str", ",", "np", ":", "str", ")", ":", "# nominative singular", "print", "(", "ns", ")", "# accusative singular", "print", "(", "gs", ")", "# dative singular", "print", "(", "gs", ...
Gives the full declension of weak masculine nouns. >>> decline_weak_masculine_noun("goði", "goða", "goðar") goði goða goða goða goðar goða goðum goða >>> decline_weak_masculine_noun("hluti", "hluta", "hlutar") hluti hluta hluta hluta hlutar hluta hlutum hluta >>> decline_weak_masculine_noun("arfi", "arfa", "arfar") arfi arfa arfa arfa arfar arfa örfum arfa >>> decline_weak_masculine_noun("bryti", "bryta", "brytjar") bryti bryta bryta bryta brytjar brytja brytjum brytja >>> decline_weak_masculine_noun("vöðvi", "vöðva", "vöðvar") vöðvi vöðva vöðva vöðva vöðvar vöðva vöðum vöðva The main pattern is: :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return:
[ "Gives", "the", "full", "declension", "of", "weak", "masculine", "nouns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/nouns.py#L604-L690
230,370
cltk/cltk
cltk/inflection/old_norse/nouns.py
decline_weak_feminine_noun
def decline_weak_feminine_noun(ns: str, gs: str, np: str): """ Gives the full declension of weak feminine nouns. >>> decline_weak_feminine_noun("saga", "sögu", "sögur") saga sögu sögu sögu sögur sögur sögum sagna >>> decline_weak_feminine_noun("kona", "konu", "konur") kona konu konu konu konur konur konum kvenna >>> decline_weak_feminine_noun("kirkja", "kirkju", "kirkjur") kirkja kirkju kirkju kirkju kirkjur kirkjur kirkjum kirkna >>> decline_weak_feminine_noun("völva", "völu", "völur") völva völu völu völu völur völur völum völna >>> decline_weak_feminine_noun("speki", "speki", "") speki speki speki speki >>> decline_weak_feminine_noun("reiði", "reiði", "") reiði reiði reiði reiði >>> decline_weak_feminine_noun("elli", "elli", "") elli elli elli elli >>> decline_weak_feminine_noun("frœði", "frœði", "") frœði frœði frœði frœði It is to note that the genitive plural of völva is not attested so the given form is analogously reconstructed. The main pattern is: -a -u -u -u -ur -ur -um -na :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ if ns[-1] == "i" and gs[-1] == "i" and not np: print(ns) print(ns) print(ns) print(ns) else: # nominative singular print(ns) # accusative singular print(gs) # dative singular print(gs) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural print(np[:-1]+"m") # genitive plural if ns == "kona": print("kvenna") elif ns[-2] == "v" or ns[-2] == "j": print(ns[:-2]+"na") else: print(ns[:-1]+"na")
python
def decline_weak_feminine_noun(ns: str, gs: str, np: str): if ns[-1] == "i" and gs[-1] == "i" and not np: print(ns) print(ns) print(ns) print(ns) else: # nominative singular print(ns) # accusative singular print(gs) # dative singular print(gs) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural print(np[:-1]+"m") # genitive plural if ns == "kona": print("kvenna") elif ns[-2] == "v" or ns[-2] == "j": print(ns[:-2]+"na") else: print(ns[:-1]+"na")
[ "def", "decline_weak_feminine_noun", "(", "ns", ":", "str", ",", "gs", ":", "str", ",", "np", ":", "str", ")", ":", "if", "ns", "[", "-", "1", "]", "==", "\"i\"", "and", "gs", "[", "-", "1", "]", "==", "\"i\"", "and", "not", "np", ":", "print",...
Gives the full declension of weak feminine nouns. >>> decline_weak_feminine_noun("saga", "sögu", "sögur") saga sögu sögu sögu sögur sögur sögum sagna >>> decline_weak_feminine_noun("kona", "konu", "konur") kona konu konu konu konur konur konum kvenna >>> decline_weak_feminine_noun("kirkja", "kirkju", "kirkjur") kirkja kirkju kirkju kirkju kirkjur kirkjur kirkjum kirkna >>> decline_weak_feminine_noun("völva", "völu", "völur") völva völu völu völu völur völur völum völna >>> decline_weak_feminine_noun("speki", "speki", "") speki speki speki speki >>> decline_weak_feminine_noun("reiði", "reiði", "") reiði reiði reiði reiði >>> decline_weak_feminine_noun("elli", "elli", "") elli elli elli elli >>> decline_weak_feminine_noun("frœði", "frœði", "") frœði frœði frœði frœði It is to note that the genitive plural of völva is not attested so the given form is analogously reconstructed. The main pattern is: -a -u -u -u -ur -ur -um -na :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return:
[ "Gives", "the", "full", "declension", "of", "weak", "feminine", "nouns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/nouns.py#L693-L814
230,371
cltk/cltk
cltk/inflection/old_norse/nouns.py
decline_weak_neuter_noun
def decline_weak_neuter_noun(ns: str, gs: str, np: str): """ Gives the full declension of weak neuter nouns. >>> decline_weak_neuter_noun("auga", "auga", "augu") auga auga auga auga augu augu augum augna >>> decline_weak_neuter_noun("hjarta", "hjarta", "hjörtu") hjarta hjarta hjarta hjarta hjörtu hjörtu hjörtum hjartna >>> decline_weak_neuter_noun("lunga", "lunga", "lungu") lunga lunga lunga lunga lungu lungu lungum lungna >>> decline_weak_neuter_noun("eyra", "eyra", "eyru") eyra eyra eyra eyra eyru eyru eyrum eyrna The main pattern is: -a -a -a -a -u -u -um -na :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular print(ns) # dative singular print(ns) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural print(np+"m") # genitive plural print(ns[:-1]+"na")
python
def decline_weak_neuter_noun(ns: str, gs: str, np: str): # nominative singular print(ns) # accusative singular print(ns) # dative singular print(ns) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural print(np+"m") # genitive plural print(ns[:-1]+"na")
[ "def", "decline_weak_neuter_noun", "(", "ns", ":", "str", ",", "gs", ":", "str", ",", "np", ":", "str", ")", ":", "# nominative singular", "print", "(", "ns", ")", "# accusative singular", "print", "(", "ns", ")", "# dative singular", "print", "(", "ns", "...
Gives the full declension of weak neuter nouns. >>> decline_weak_neuter_noun("auga", "auga", "augu") auga auga auga auga augu augu augum augna >>> decline_weak_neuter_noun("hjarta", "hjarta", "hjörtu") hjarta hjarta hjarta hjarta hjörtu hjörtu hjörtum hjartna >>> decline_weak_neuter_noun("lunga", "lunga", "lungu") lunga lunga lunga lunga lungu lungu lungum lungna >>> decline_weak_neuter_noun("eyra", "eyra", "eyru") eyra eyra eyra eyra eyru eyru eyrum eyrna The main pattern is: -a -a -a -a -u -u -um -na :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return:
[ "Gives", "the", "full", "declension", "of", "weak", "neuter", "nouns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/nouns.py#L817-L898
230,372
cltk/cltk
cltk/corpus/greek/tlg/parse_tlg_indices.py
select_id_by_name
def select_id_by_name(query): """Do a case-insensitive regex match on author name, returns TLG id.""" id_author = get_id_author() comp = regex.compile(r'{}'.format(query.casefold()), flags=regex.VERSION1) matches = [] for _id, author in id_author.items(): match = comp.findall(author.casefold()) if match: matches.append((_id, author)) return matches
python
def select_id_by_name(query): id_author = get_id_author() comp = regex.compile(r'{}'.format(query.casefold()), flags=regex.VERSION1) matches = [] for _id, author in id_author.items(): match = comp.findall(author.casefold()) if match: matches.append((_id, author)) return matches
[ "def", "select_id_by_name", "(", "query", ")", ":", "id_author", "=", "get_id_author", "(", ")", "comp", "=", "regex", ".", "compile", "(", "r'{}'", ".", "format", "(", "query", ".", "casefold", "(", ")", ")", ",", "flags", "=", "regex", ".", "VERSION1...
Do a case-insensitive regex match on author name, returns TLG id.
[ "Do", "a", "case", "-", "insensitive", "regex", "match", "on", "author", "name", "returns", "TLG", "id", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/greek/tlg/parse_tlg_indices.py#L105-L114
230,373
cltk/cltk
cltk/corpus/greek/tlg/parse_tlg_indices.py
get_date_of_author
def get_date_of_author(_id): """Pass author id and return the name of its associated date.""" _dict = get_date_author() for date, ids in _dict.items(): if _id in ids: return date return None
python
def get_date_of_author(_id): _dict = get_date_author() for date, ids in _dict.items(): if _id in ids: return date return None
[ "def", "get_date_of_author", "(", "_id", ")", ":", "_dict", "=", "get_date_author", "(", ")", "for", "date", ",", "ids", "in", "_dict", ".", "items", "(", ")", ":", "if", "_id", "in", "ids", ":", "return", "date", "return", "None" ]
Pass author id and return the name of its associated date.
[ "Pass", "author", "id", "and", "return", "the", "name", "of", "its", "associated", "date", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/greek/tlg/parse_tlg_indices.py#L148-L154
230,374
cltk/cltk
cltk/corpus/greek/tlg/parse_tlg_indices.py
_get_epoch
def _get_epoch(_str): """Take incoming string, return its epoch.""" _return = None if _str.startswith('A.D. '): _return = 'ad' elif _str.startswith('a. A.D. '): _return = None #? elif _str.startswith('p. A.D. '): _return = 'ad' elif regex.match(r'^[0-9]+ B\.C\. *', _str): _return = 'bc' elif regex.match(r'^a\. *[0-9]+ B\.C\. *', _str): _return = 'bc' elif regex.match(r'^p\. *[0-9]+ B\.C\. *', _str): _return = None #? elif _str == 'Incertum' or _str == 'Varia': _return = _str return _return
python
def _get_epoch(_str): _return = None if _str.startswith('A.D. '): _return = 'ad' elif _str.startswith('a. A.D. '): _return = None #? elif _str.startswith('p. A.D. '): _return = 'ad' elif regex.match(r'^[0-9]+ B\.C\. *', _str): _return = 'bc' elif regex.match(r'^a\. *[0-9]+ B\.C\. *', _str): _return = 'bc' elif regex.match(r'^p\. *[0-9]+ B\.C\. *', _str): _return = None #? elif _str == 'Incertum' or _str == 'Varia': _return = _str return _return
[ "def", "_get_epoch", "(", "_str", ")", ":", "_return", "=", "None", "if", "_str", ".", "startswith", "(", "'A.D. '", ")", ":", "_return", "=", "'ad'", "elif", "_str", ".", "startswith", "(", "'a. A.D. '", ")", ":", "_return", "=", "None", "#?", "elif",...
Take incoming string, return its epoch.
[ "Take", "incoming", "string", "return", "its", "epoch", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/greek/tlg/parse_tlg_indices.py#L157-L174
230,375
cltk/cltk
cltk/stem/akkadian/declension.py
NaiveDecliner.decline_noun
def decline_noun(self, noun, gender, mimation=True): """Return a list of all possible declined forms given any form of a noun and its gender.""" stem = self.stemmer.get_stem(noun, gender) declension = [] for case in self.endings[gender]['singular']: if gender == 'm': form = stem + self.endings[gender]['singular'][case] else: form = stem + self.endings[gender]['singular'][case][1:] declension.append((form, {'case': case, 'number': 'singular'})) for case in self.endings[gender]['dual']: if gender == 'm': form = stem + self.endings[gender]['dual'][case] else: form = stem + self.endings[gender]['dual'][case][1:] declension.append((form, {'case': case, 'number': 'dual'})) for case in self.endings[gender]['plural']: if gender == 'm': form = stem + self.endings[gender]['plural'][case] else: if stem[-3] in self.akkadian['macron_vowels']: theme_vowel = stem[-3] else: theme_vowel = 'ā' ending = [x for x in self.endings[gender]['plural'][case] if x[0] == theme_vowel] if stem[-2] in self.akkadian['short_vowels']: form = stem[:-2] + ending[0] elif stem[-1] in self.akkadian['consonants'] and stem[-2] in self.akkadian['macron_vowels']: form = stem + ending[0] else: form = stem[:-1] + ending[0] declension.append((form, {'case': case, 'number': 'plural'})) return declension
python
def decline_noun(self, noun, gender, mimation=True): stem = self.stemmer.get_stem(noun, gender) declension = [] for case in self.endings[gender]['singular']: if gender == 'm': form = stem + self.endings[gender]['singular'][case] else: form = stem + self.endings[gender]['singular'][case][1:] declension.append((form, {'case': case, 'number': 'singular'})) for case in self.endings[gender]['dual']: if gender == 'm': form = stem + self.endings[gender]['dual'][case] else: form = stem + self.endings[gender]['dual'][case][1:] declension.append((form, {'case': case, 'number': 'dual'})) for case in self.endings[gender]['plural']: if gender == 'm': form = stem + self.endings[gender]['plural'][case] else: if stem[-3] in self.akkadian['macron_vowels']: theme_vowel = stem[-3] else: theme_vowel = 'ā' ending = [x for x in self.endings[gender]['plural'][case] if x[0] == theme_vowel] if stem[-2] in self.akkadian['short_vowels']: form = stem[:-2] + ending[0] elif stem[-1] in self.akkadian['consonants'] and stem[-2] in self.akkadian['macron_vowels']: form = stem + ending[0] else: form = stem[:-1] + ending[0] declension.append((form, {'case': case, 'number': 'plural'})) return declension
[ "def", "decline_noun", "(", "self", ",", "noun", ",", "gender", ",", "mimation", "=", "True", ")", ":", "stem", "=", "self", ".", "stemmer", ".", "get_stem", "(", "noun", ",", "gender", ")", "declension", "=", "[", "]", "for", "case", "in", "self", ...
Return a list of all possible declined forms given any form of a noun and its gender.
[ "Return", "a", "list", "of", "all", "possible", "declined", "forms", "given", "any", "form", "of", "a", "noun", "and", "its", "gender", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/akkadian/declension.py#L21-L54
230,376
cltk/cltk
cltk/stem/french/stem.py
stem
def stem(text): """make string lower-case""" text = text.lower() """Stem each word of the French text.""" stemmed_text = '' word_tokenizer = WordTokenizer('french') tokenized_text = word_tokenizer.tokenize(text) for word in tokenized_text: """remove the simple endings from the target word""" word, was_stemmed = matchremove_noun_endings(word) """if word didn't match the simple endings, try verb endings""" if not was_stemmed: word = matchremove_verb_endings(word) """add the stemmed word to the text""" stemmed_text += word + ' ' return stemmed_text
python
def stem(text): text = text.lower() """Stem each word of the French text.""" stemmed_text = '' word_tokenizer = WordTokenizer('french') tokenized_text = word_tokenizer.tokenize(text) for word in tokenized_text: """remove the simple endings from the target word""" word, was_stemmed = matchremove_noun_endings(word) """if word didn't match the simple endings, try verb endings""" if not was_stemmed: word = matchremove_verb_endings(word) """add the stemmed word to the text""" stemmed_text += word + ' ' return stemmed_text
[ "def", "stem", "(", "text", ")", ":", "text", "=", "text", ".", "lower", "(", ")", "\"\"\"Stem each word of the French text.\"\"\"", "stemmed_text", "=", "''", "word_tokenizer", "=", "WordTokenizer", "(", "'french'", ")", "tokenized_text", "=", "word_tokenizer", "...
make string lower-case
[ "make", "string", "lower", "-", "case" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/french/stem.py#L8-L25
230,377
cltk/cltk
cltk/prosody/latin/verse_scanner.py
VerseScanner.transform_i_to_j_optional
def transform_i_to_j_optional(self, line: str) -> str: """ Sometimes for the demands of meter a more permissive i to j transformation is warranted. :param line: :return: >>> print(VerseScanner().transform_i_to_j_optional("Italiam")) Italjam >>> print(VerseScanner().transform_i_to_j_optional("Lāvīniaque")) Lāvīnjaque >>> print(VerseScanner().transform_i_to_j_optional("omnium")) omnjum """ words = line.split(" ") space_list = string_utils.space_list(line) corrected_words = [] for word in words: found = False for prefix in self.constants.PREFIXES: if word.startswith(prefix) and word != prefix: corrected_words.append(self.syllabifier.convert_consonantal_i(prefix)) corrected_words.append( self.syllabifier.convert_consonantal_i(word[len(prefix):])) found = True break if not found: corrected_words.append(self.syllabifier.convert_consonantal_i(word)) new_line = string_utils.join_syllables_spaces(corrected_words, space_list) # the following two may be tunable and subject to improvement char_list = string_utils.overwrite(list(new_line), "[bcdfgjkmpqrstvwxzBCDFGHJKMPQRSTVWXZ][i][{}]".format( self.constants.VOWELS_WO_I), "j", 1) char_list = string_utils.overwrite(char_list, "[{}][iI][{}]".format(self.constants.LIQUIDS, self.constants.VOWELS_WO_I), "j", 1) return "".join(char_list)
python
def transform_i_to_j_optional(self, line: str) -> str: words = line.split(" ") space_list = string_utils.space_list(line) corrected_words = [] for word in words: found = False for prefix in self.constants.PREFIXES: if word.startswith(prefix) and word != prefix: corrected_words.append(self.syllabifier.convert_consonantal_i(prefix)) corrected_words.append( self.syllabifier.convert_consonantal_i(word[len(prefix):])) found = True break if not found: corrected_words.append(self.syllabifier.convert_consonantal_i(word)) new_line = string_utils.join_syllables_spaces(corrected_words, space_list) # the following two may be tunable and subject to improvement char_list = string_utils.overwrite(list(new_line), "[bcdfgjkmpqrstvwxzBCDFGHJKMPQRSTVWXZ][i][{}]".format( self.constants.VOWELS_WO_I), "j", 1) char_list = string_utils.overwrite(char_list, "[{}][iI][{}]".format(self.constants.LIQUIDS, self.constants.VOWELS_WO_I), "j", 1) return "".join(char_list)
[ "def", "transform_i_to_j_optional", "(", "self", ",", "line", ":", "str", ")", "->", "str", ":", "words", "=", "line", ".", "split", "(", "\" \"", ")", "space_list", "=", "string_utils", ".", "space_list", "(", "line", ")", "corrected_words", "=", "[", "...
Sometimes for the demands of meter a more permissive i to j transformation is warranted. :param line: :return: >>> print(VerseScanner().transform_i_to_j_optional("Italiam")) Italjam >>> print(VerseScanner().transform_i_to_j_optional("Lāvīniaque")) Lāvīnjaque >>> print(VerseScanner().transform_i_to_j_optional("omnium")) omnjum
[ "Sometimes", "for", "the", "demands", "of", "meter", "a", "more", "permissive", "i", "to", "j", "transformation", "is", "warranted", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/verse_scanner.py#L90-L128
230,378
cltk/cltk
cltk/prosody/latin/verse_scanner.py
VerseScanner.accent_by_position
def accent_by_position(self, verse_line: str) -> str: """ Accent vowels according to the rules of scansion. :param verse_line: a line of unaccented verse :return: the same line with vowels accented by position >>> print(VerseScanner().accent_by_position( ... "Arma virumque cano, Troiae qui primus ab oris").lstrip()) Ārma virūmque canō Trojae qui primus ab oris """ line = verse_line.translate(self.punctuation_substitutions) line = self.transform_i_to_j(line) marks = list(line) # locate and save dipthong positions since we don't want them being accented dipthong_positions = [] for dipth in self.constants.DIPTHONGS: if dipth in line: dipthong_positions.append(line.find(dipth)) # Vowels followed by 2 consonants # The digraphs ch, ph, th, qu and sometimes gu and su count as single consonants. # see http://people.virginia.edu/~jdk3t/epicintrog/scansion.htm marks = string_utils.overwrite(marks, "[{}][{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # one space (or more for 'dropped' punctuation may intervene) marks = string_utils.overwrite(marks, r"[{}][{}]\s*[{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # ... if both consonants are in the next word, the vowel may be long # .... but it could be short if the vowel is not on the thesis/emphatic part of the foot # ... see Gildersleeve and Lodge p.446 marks = string_utils.overwrite(marks, r"[{}]\s*[{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # x is considered as two letters marks = string_utils.overwrite(marks, "[{}][xX]".format(self.constants.VOWELS), self.constants.STRESSED) # z is considered as two letters marks = string_utils.overwrite(marks, r"[{}][zZ]".format(self.constants.VOWELS), self.constants.STRESSED) original_verse = list(line) for idx, word in enumerate(original_verse): if marks[idx] == self.constants.STRESSED: original_verse[idx] = self.constants.VOWELS_TO_ACCENTS[original_verse[idx]] # make sure dipthongs aren't accented for idx in dipthong_positions: if original_verse[idx + 1] in self.constants.ACCENTS_TO_VOWELS: original_verse[idx + 1] = self.constants.ACCENTS_TO_VOWELS[original_verse[idx + 1]] return "".join(original_verse)
python
def accent_by_position(self, verse_line: str) -> str: line = verse_line.translate(self.punctuation_substitutions) line = self.transform_i_to_j(line) marks = list(line) # locate and save dipthong positions since we don't want them being accented dipthong_positions = [] for dipth in self.constants.DIPTHONGS: if dipth in line: dipthong_positions.append(line.find(dipth)) # Vowels followed by 2 consonants # The digraphs ch, ph, th, qu and sometimes gu and su count as single consonants. # see http://people.virginia.edu/~jdk3t/epicintrog/scansion.htm marks = string_utils.overwrite(marks, "[{}][{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # one space (or more for 'dropped' punctuation may intervene) marks = string_utils.overwrite(marks, r"[{}][{}]\s*[{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # ... if both consonants are in the next word, the vowel may be long # .... but it could be short if the vowel is not on the thesis/emphatic part of the foot # ... see Gildersleeve and Lodge p.446 marks = string_utils.overwrite(marks, r"[{}]\s*[{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # x is considered as two letters marks = string_utils.overwrite(marks, "[{}][xX]".format(self.constants.VOWELS), self.constants.STRESSED) # z is considered as two letters marks = string_utils.overwrite(marks, r"[{}][zZ]".format(self.constants.VOWELS), self.constants.STRESSED) original_verse = list(line) for idx, word in enumerate(original_verse): if marks[idx] == self.constants.STRESSED: original_verse[idx] = self.constants.VOWELS_TO_ACCENTS[original_verse[idx]] # make sure dipthongs aren't accented for idx in dipthong_positions: if original_verse[idx + 1] in self.constants.ACCENTS_TO_VOWELS: original_verse[idx + 1] = self.constants.ACCENTS_TO_VOWELS[original_verse[idx + 1]] return "".join(original_verse)
[ "def", "accent_by_position", "(", "self", ",", "verse_line", ":", "str", ")", "->", "str", ":", "line", "=", "verse_line", ".", "translate", "(", "self", ".", "punctuation_substitutions", ")", "line", "=", "self", ".", "transform_i_to_j", "(", "line", ")", ...
Accent vowels according to the rules of scansion. :param verse_line: a line of unaccented verse :return: the same line with vowels accented by position >>> print(VerseScanner().accent_by_position( ... "Arma virumque cano, Troiae qui primus ab oris").lstrip()) Ārma virūmque canō Trojae qui primus ab oris
[ "Accent", "vowels", "according", "to", "the", "rules", "of", "scansion", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/verse_scanner.py#L130-L192
230,379
cltk/cltk
cltk/prosody/latin/verse_scanner.py
VerseScanner.calc_offset
def calc_offset(self, syllables_spaces: List[str]) -> Dict[int, int]: """ Calculate a dictionary of accent positions from a list of syllables with spaces. :param syllables_spaces: :return: """ line = string_utils.flatten(syllables_spaces) mydict = {} # type: Dict[int, int] # #defaultdict(int) #type: Dict[int, int] for idx, syl in enumerate(syllables_spaces): target_syllable = syllables_spaces[idx] skip_qu = string_utils.starts_with_qu(target_syllable) matches = list(self.syllable_matcher.finditer(target_syllable)) for position, possible in enumerate(matches): if skip_qu: skip_qu = False continue (start, end) = possible.span() if target_syllable[start:end] in \ self.constants.VOWELS + self.constants.ACCENTED_VOWELS: part = line[:len("".join(syllables_spaces[:idx]))] offset = len(part) + start if line[offset] not in self.constants.VOWELS + self.constants.ACCENTED_VOWELS: LOG.error("Problem at line {} offset {}".format(line, offset)) mydict[idx] = offset return mydict
python
def calc_offset(self, syllables_spaces: List[str]) -> Dict[int, int]: line = string_utils.flatten(syllables_spaces) mydict = {} # type: Dict[int, int] # #defaultdict(int) #type: Dict[int, int] for idx, syl in enumerate(syllables_spaces): target_syllable = syllables_spaces[idx] skip_qu = string_utils.starts_with_qu(target_syllable) matches = list(self.syllable_matcher.finditer(target_syllable)) for position, possible in enumerate(matches): if skip_qu: skip_qu = False continue (start, end) = possible.span() if target_syllable[start:end] in \ self.constants.VOWELS + self.constants.ACCENTED_VOWELS: part = line[:len("".join(syllables_spaces[:idx]))] offset = len(part) + start if line[offset] not in self.constants.VOWELS + self.constants.ACCENTED_VOWELS: LOG.error("Problem at line {} offset {}".format(line, offset)) mydict[idx] = offset return mydict
[ "def", "calc_offset", "(", "self", ",", "syllables_spaces", ":", "List", "[", "str", "]", ")", "->", "Dict", "[", "int", ",", "int", "]", ":", "line", "=", "string_utils", ".", "flatten", "(", "syllables_spaces", ")", "mydict", "=", "{", "}", "# type: ...
Calculate a dictionary of accent positions from a list of syllables with spaces. :param syllables_spaces: :return:
[ "Calculate", "a", "dictionary", "of", "accent", "positions", "from", "a", "list", "of", "syllables", "with", "spaces", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/verse_scanner.py#L218-L244
230,380
cltk/cltk
cltk/prosody/latin/verse_scanner.py
VerseScanner.produce_scansion
def produce_scansion(self, stresses: list, syllables_wspaces: List[str], offset_map: Dict[int, int]) -> str: """ Create a scansion string that has stressed and unstressed syllable positions in locations that correspond with the original texts syllable vowels. :param stresses list of syllable positions :param syllables_wspaces list of syllables with spaces escaped for punctuation or elision :param offset_map dictionary of syllable positions, and an offset amount which is the number of spaces to skip in the original line before inserting the accent. """ scansion = list(" " * len(string_utils.flatten(syllables_wspaces))) unstresses = string_utils.get_unstresses(stresses, len(syllables_wspaces)) try: for idx in unstresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.UNSTRESSED for idx in stresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.STRESSED except Exception as e: LOG.error("problem with syllables; check syllabification {}, {}".format( syllables_wspaces, e)) return "".join(scansion)
python
def produce_scansion(self, stresses: list, syllables_wspaces: List[str], offset_map: Dict[int, int]) -> str: scansion = list(" " * len(string_utils.flatten(syllables_wspaces))) unstresses = string_utils.get_unstresses(stresses, len(syllables_wspaces)) try: for idx in unstresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.UNSTRESSED for idx in stresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.STRESSED except Exception as e: LOG.error("problem with syllables; check syllabification {}, {}".format( syllables_wspaces, e)) return "".join(scansion)
[ "def", "produce_scansion", "(", "self", ",", "stresses", ":", "list", ",", "syllables_wspaces", ":", "List", "[", "str", "]", ",", "offset_map", ":", "Dict", "[", "int", ",", "int", "]", ")", "->", "str", ":", "scansion", "=", "list", "(", "\" \"", "...
Create a scansion string that has stressed and unstressed syllable positions in locations that correspond with the original texts syllable vowels. :param stresses list of syllable positions :param syllables_wspaces list of syllables with spaces escaped for punctuation or elision :param offset_map dictionary of syllable positions, and an offset amount which is the number of spaces to skip in the original line before inserting the accent.
[ "Create", "a", "scansion", "string", "that", "has", "stressed", "and", "unstressed", "syllable", "positions", "in", "locations", "that", "correspond", "with", "the", "original", "texts", "syllable", "vowels", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/verse_scanner.py#L246-L271
230,381
cltk/cltk
cltk/prosody/latin/verse_scanner.py
VerseScanner.flag_dipthongs
def flag_dipthongs(self, syllables: List[str]) -> List[int]: """ Return a list of syllables that contain a dipthong :param syllables: :return: """ long_positions = [] for idx, syl in enumerate(syllables): for dipthong in self.constants.DIPTHONGS: if dipthong in syllables[idx]: if not string_utils.starts_with_qu(syllables[idx]): long_positions.append(idx) return long_positions
python
def flag_dipthongs(self, syllables: List[str]) -> List[int]: long_positions = [] for idx, syl in enumerate(syllables): for dipthong in self.constants.DIPTHONGS: if dipthong in syllables[idx]: if not string_utils.starts_with_qu(syllables[idx]): long_positions.append(idx) return long_positions
[ "def", "flag_dipthongs", "(", "self", ",", "syllables", ":", "List", "[", "str", "]", ")", "->", "List", "[", "int", "]", ":", "long_positions", "=", "[", "]", "for", "idx", ",", "syl", "in", "enumerate", "(", "syllables", ")", ":", "for", "dipthong"...
Return a list of syllables that contain a dipthong :param syllables: :return:
[ "Return", "a", "list", "of", "syllables", "that", "contain", "a", "dipthong" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/verse_scanner.py#L273-L286
230,382
cltk/cltk
cltk/prosody/latin/verse_scanner.py
VerseScanner.elide
def elide(self, line: str, regexp: str, quantity: int = 1, offset: int = 0) -> str: """ Erase a section of a line, matching on a regex, pushing in a quantity of blank spaces, and jumping forward with an offset if necessary. If the elided vowel was strong, the vowel merged with takes on the stress. :param line: :param regexp: :param quantity: :param offset: :return: >>> print(VerseScanner().elide("uvae avaritia", r"[e]\s*[a]")) uv āvaritia >>> print(VerseScanner().elide("mare avaritia", r"[e]\s*[a]")) mar avaritia """ matcher = re.compile(regexp) positions = matcher.finditer(line) new_line = line for match in positions: (start, end) = match.span() # pylint: disable=unused-variable if (start > 0) and new_line[start - 1: start + 1] in self.constants.DIPTHONGS: vowel_to_coerce = new_line[end - 1] new_line = new_line[:(start - 1) + offset] + (" " * (quantity + 2)) + \ self.constants.stress_accent_dict[vowel_to_coerce] + new_line[end:] else: new_line = new_line[:start + offset] + \ (" " * quantity) + new_line[start + quantity + offset:] return new_line
python
def elide(self, line: str, regexp: str, quantity: int = 1, offset: int = 0) -> str: matcher = re.compile(regexp) positions = matcher.finditer(line) new_line = line for match in positions: (start, end) = match.span() # pylint: disable=unused-variable if (start > 0) and new_line[start - 1: start + 1] in self.constants.DIPTHONGS: vowel_to_coerce = new_line[end - 1] new_line = new_line[:(start - 1) + offset] + (" " * (quantity + 2)) + \ self.constants.stress_accent_dict[vowel_to_coerce] + new_line[end:] else: new_line = new_line[:start + offset] + \ (" " * quantity) + new_line[start + quantity + offset:] return new_line
[ "def", "elide", "(", "self", ",", "line", ":", "str", ",", "regexp", ":", "str", ",", "quantity", ":", "int", "=", "1", ",", "offset", ":", "int", "=", "0", ")", "->", "str", ":", "matcher", "=", "re", ".", "compile", "(", "regexp", ")", "posit...
Erase a section of a line, matching on a regex, pushing in a quantity of blank spaces, and jumping forward with an offset if necessary. If the elided vowel was strong, the vowel merged with takes on the stress. :param line: :param regexp: :param quantity: :param offset: :return: >>> print(VerseScanner().elide("uvae avaritia", r"[e]\s*[a]")) uv āvaritia >>> print(VerseScanner().elide("mare avaritia", r"[e]\s*[a]")) mar avaritia
[ "Erase", "a", "section", "of", "a", "line", "matching", "on", "a", "regex", "pushing", "in", "a", "quantity", "of", "blank", "spaces", "and", "jumping", "forward", "with", "an", "offset", "if", "necessary", ".", "If", "the", "elided", "vowel", "was", "st...
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/verse_scanner.py#L288-L317
230,383
cltk/cltk
cltk/prosody/latin/verse_scanner.py
VerseScanner.assign_candidate
def assign_candidate(self, verse: Verse, candidate: str) -> Verse: """ Helper method; make sure that the verse object is properly packaged. :param verse: :param candidate: :return: """ verse.scansion = candidate verse.valid = True verse.accented = self.formatter.merge_line_scansion( verse.original, verse.scansion) return verse
python
def assign_candidate(self, verse: Verse, candidate: str) -> Verse: verse.scansion = candidate verse.valid = True verse.accented = self.formatter.merge_line_scansion( verse.original, verse.scansion) return verse
[ "def", "assign_candidate", "(", "self", ",", "verse", ":", "Verse", ",", "candidate", ":", "str", ")", "->", "Verse", ":", "verse", ".", "scansion", "=", "candidate", "verse", ".", "valid", "=", "True", "verse", ".", "accented", "=", "self", ".", "form...
Helper method; make sure that the verse object is properly packaged. :param verse: :param candidate: :return:
[ "Helper", "method", ";", "make", "sure", "that", "the", "verse", "object", "is", "properly", "packaged", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/verse_scanner.py#L372-L384
230,384
cltk/cltk
cltk/stem/latin/declension.py
CollatinusDecliner.__getRoots
def __getRoots(self, lemma, model=None): """ Retrieve the known roots of a lemma :param lemma: Canonical form of the word (lemma) :type lemma: str :param model_roots: Model data from the loaded self.__data__. Can be passed by decline() :type model_roots: dict :return: Dictionary of roots with their root identifier as key :rtype: dict """ if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) ROOT_IDS = { "K": "lemma", "1": "geninf", "2": "perf" } lemma_entry = self.__lemmas__[lemma] original_roots = { root_id: lemma_entry[root_name].split(",") for root_id, root_name in ROOT_IDS.items() if root_id != "K" and lemma_entry[root_name] } returned_roots = {} if not model: model = self.__models__[lemma_entry["model"]] # For each registered root in the model, for model_root_id, model_root_data in model["R"].items(): # If we have K, it's equivalent to canonical form if model_root_data[0] == "K": returned_roots[model_root_id] = [lemma_entry["lemma"]] # Otherwise we have deletion number and addition char else: deletion, addition = int(model_root_data[0]), model_root_data[1] or "" # If a the root is declared already, # we retrieve the information if model_root_id != "1" and model_root_id in returned_roots: lemma_roots = returned_roots[model_root_id] else: lemma_roots = lemma_entry["lemma"].split(",") # We construct the roots returned_roots[model_root_id] = [ lemma_root[:-deletion] + addition for lemma_root in lemma_roots ] original_roots.update(returned_roots) return original_roots
python
def __getRoots(self, lemma, model=None): if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) ROOT_IDS = { "K": "lemma", "1": "geninf", "2": "perf" } lemma_entry = self.__lemmas__[lemma] original_roots = { root_id: lemma_entry[root_name].split(",") for root_id, root_name in ROOT_IDS.items() if root_id != "K" and lemma_entry[root_name] } returned_roots = {} if not model: model = self.__models__[lemma_entry["model"]] # For each registered root in the model, for model_root_id, model_root_data in model["R"].items(): # If we have K, it's equivalent to canonical form if model_root_data[0] == "K": returned_roots[model_root_id] = [lemma_entry["lemma"]] # Otherwise we have deletion number and addition char else: deletion, addition = int(model_root_data[0]), model_root_data[1] or "" # If a the root is declared already, # we retrieve the information if model_root_id != "1" and model_root_id in returned_roots: lemma_roots = returned_roots[model_root_id] else: lemma_roots = lemma_entry["lemma"].split(",") # We construct the roots returned_roots[model_root_id] = [ lemma_root[:-deletion] + addition for lemma_root in lemma_roots ] original_roots.update(returned_roots) return original_roots
[ "def", "__getRoots", "(", "self", ",", "lemma", ",", "model", "=", "None", ")", ":", "if", "lemma", "not", "in", "self", ".", "__lemmas__", ":", "raise", "UnknownLemma", "(", "\"%s is unknown\"", "%", "lemma", ")", "ROOT_IDS", "=", "{", "\"K\"", ":", "...
Retrieve the known roots of a lemma :param lemma: Canonical form of the word (lemma) :type lemma: str :param model_roots: Model data from the loaded self.__data__. Can be passed by decline() :type model_roots: dict :return: Dictionary of roots with their root identifier as key :rtype: dict
[ "Retrieve", "the", "known", "roots", "of", "a", "lemma" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/latin/declension.py#L57-L111
230,385
cltk/cltk
cltk/stem/latin/declension.py
CollatinusDecliner.decline
def decline(self, lemma, flatten=False, collatinus_dict=False): """ Decline a lemma .. warning:: POS are incomplete as we do not detect the type outside of verbs, participle and adjective. :raise UnknownLemma: When the lemma is unknown to our data :param lemma: Lemma (Canonical form) to decline :type lemma: str :param flatten: If set to True, returns a list of forms without natural language information about them :type flatten: bool :param collatinus_dict: If sets to True, Dictionary of grammatically valid forms, including variants, with keys\ corresponding to morpho informations. :type collatinus_dict: bool :return: List of tuple where first value is the form and second the pos, ie [("sum", "v1ppip---")] :rtype: list or dict """ if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) # Get data information lemma_entry = self.__lemmas__[lemma] model = self.__models__[lemma_entry["model"]] # Get the roots roots = self.__getRoots(lemma, model=model) # Get the known forms in order keys = sorted([int(key) for key in model["des"].keys()]) forms_data = [(key, model["des"][str(key)]) for key in keys] # Generate the return dict forms = {key: [] for key in keys} for key, form_list in forms_data: for form in form_list: root_id, endings = tuple(form) for root in roots[root_id]: for ending in endings: forms[key].append(root + ending) # sufd means we have the original forms of the parent but we add a suffix if len(model["sufd"]): # For each constant form1 for key, iter_forms in forms.items(): new_forms = [] # We add the constant suffix for sufd in model["sufd"]: new_forms += [form+sufd for form in iter_forms] forms[key] = new_forms # If we need a secure version of the forms. For example, if we have variants if len(model["suf"]): cached_forms = {k: v+[] for k, v in forms.items()} # Making cache without using copy # For each suffix # The format is [suffix characters, [modified forms]] for suffixes in model["suf"]: suffix, modified_forms = suffixes[0], suffixes[1] for modified_form in modified_forms: forms[modified_form] += [f+suffix for f in cached_forms[modified_form]] # We update with the new roots # If some form do not exist, we delete them prehentively if len(model["abs"]): for abs_form in model["abs"]: if abs_form in forms: del forms[abs_form] if flatten: return list([form for case_forms in forms.values() for form in case_forms]) elif collatinus_dict: return forms else: return list( [(form, self.__getPOS(key)) for key, case_forms in forms.items() for form in case_forms] )
python
def decline(self, lemma, flatten=False, collatinus_dict=False): if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) # Get data information lemma_entry = self.__lemmas__[lemma] model = self.__models__[lemma_entry["model"]] # Get the roots roots = self.__getRoots(lemma, model=model) # Get the known forms in order keys = sorted([int(key) for key in model["des"].keys()]) forms_data = [(key, model["des"][str(key)]) for key in keys] # Generate the return dict forms = {key: [] for key in keys} for key, form_list in forms_data: for form in form_list: root_id, endings = tuple(form) for root in roots[root_id]: for ending in endings: forms[key].append(root + ending) # sufd means we have the original forms of the parent but we add a suffix if len(model["sufd"]): # For each constant form1 for key, iter_forms in forms.items(): new_forms = [] # We add the constant suffix for sufd in model["sufd"]: new_forms += [form+sufd for form in iter_forms] forms[key] = new_forms # If we need a secure version of the forms. For example, if we have variants if len(model["suf"]): cached_forms = {k: v+[] for k, v in forms.items()} # Making cache without using copy # For each suffix # The format is [suffix characters, [modified forms]] for suffixes in model["suf"]: suffix, modified_forms = suffixes[0], suffixes[1] for modified_form in modified_forms: forms[modified_form] += [f+suffix for f in cached_forms[modified_form]] # We update with the new roots # If some form do not exist, we delete them prehentively if len(model["abs"]): for abs_form in model["abs"]: if abs_form in forms: del forms[abs_form] if flatten: return list([form for case_forms in forms.values() for form in case_forms]) elif collatinus_dict: return forms else: return list( [(form, self.__getPOS(key)) for key, case_forms in forms.items() for form in case_forms] )
[ "def", "decline", "(", "self", ",", "lemma", ",", "flatten", "=", "False", ",", "collatinus_dict", "=", "False", ")", ":", "if", "lemma", "not", "in", "self", ".", "__lemmas__", ":", "raise", "UnknownLemma", "(", "\"%s is unknown\"", "%", "lemma", ")", "...
Decline a lemma .. warning:: POS are incomplete as we do not detect the type outside of verbs, participle and adjective. :raise UnknownLemma: When the lemma is unknown to our data :param lemma: Lemma (Canonical form) to decline :type lemma: str :param flatten: If set to True, returns a list of forms without natural language information about them :type flatten: bool :param collatinus_dict: If sets to True, Dictionary of grammatically valid forms, including variants, with keys\ corresponding to morpho informations. :type collatinus_dict: bool :return: List of tuple where first value is the form and second the pos, ie [("sum", "v1ppip---")] :rtype: list or dict
[ "Decline", "a", "lemma" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/latin/declension.py#L113-L189
230,386
cltk/cltk
cltk/ir/query.py
_sentence_context
def _sentence_context(match, language='latin', case_insensitive=True): """Take one incoming regex match object and return the sentence in which the match occurs. :rtype : str :param match: regex.match :param language: str """ language_punct = {'greek': r'\.|;', 'latin': r'\.|\?|!'} assert language in language_punct.keys(), \ 'Available punctuation schemes: {}'.format(language_punct.keys()) start = match.start() end = match.end() window = 1000 snippet_left = match.string[start - window:start + 1] snippet_right = match.string[end:end + window] re_match = match.string[match.start():match.end()] comp_sent_boundary = regex.compile(language_punct[language], flags=regex.VERSION1) # Left left_punct = [] for punct in comp_sent_boundary.finditer(snippet_left): end = punct.end() left_punct.append(end) try: last_period = left_punct.pop() + 1 except IndexError: last_period = 0 # Right right_punct = [] for punct in comp_sent_boundary.finditer(snippet_right): end = punct.end() right_punct.append(end) try: first_period = right_punct.pop(0) except IndexError: first_period = 0 sentence = snippet_left[last_period:-1] + '*' + re_match + '*' + snippet_right[0:first_period] return sentence
python
def _sentence_context(match, language='latin', case_insensitive=True): language_punct = {'greek': r'\.|;', 'latin': r'\.|\?|!'} assert language in language_punct.keys(), \ 'Available punctuation schemes: {}'.format(language_punct.keys()) start = match.start() end = match.end() window = 1000 snippet_left = match.string[start - window:start + 1] snippet_right = match.string[end:end + window] re_match = match.string[match.start():match.end()] comp_sent_boundary = regex.compile(language_punct[language], flags=regex.VERSION1) # Left left_punct = [] for punct in comp_sent_boundary.finditer(snippet_left): end = punct.end() left_punct.append(end) try: last_period = left_punct.pop() + 1 except IndexError: last_period = 0 # Right right_punct = [] for punct in comp_sent_boundary.finditer(snippet_right): end = punct.end() right_punct.append(end) try: first_period = right_punct.pop(0) except IndexError: first_period = 0 sentence = snippet_left[last_period:-1] + '*' + re_match + '*' + snippet_right[0:first_period] return sentence
[ "def", "_sentence_context", "(", "match", ",", "language", "=", "'latin'", ",", "case_insensitive", "=", "True", ")", ":", "language_punct", "=", "{", "'greek'", ":", "r'\\.|;'", ",", "'latin'", ":", "r'\\.|\\?|!'", "}", "assert", "language", "in", "language_p...
Take one incoming regex match object and return the sentence in which the match occurs. :rtype : str :param match: regex.match :param language: str
[ "Take", "one", "incoming", "regex", "match", "object", "and", "return", "the", "sentence", "in", "which", "the", "match", "occurs", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/ir/query.py#L40-L85
230,387
cltk/cltk
cltk/ir/query.py
match_regex
def match_regex(input_str, pattern, language, context, case_insensitive=True): """Take input string and a regex pattern, then yield generator of matches in desired format. TODO: Rename this `match_pattern` and incorporate the keyword expansion code currently in search_corpus. :param input_str: :param pattern: :param language: :param context: Integer or 'sentence' 'paragraph' :rtype : str """ if type(context) is str: contexts = ['sentence', 'paragraph'] assert context in contexts or type(context) is int, 'Available contexts: {}'.format(contexts) else: context = int(context) for match in _regex_span(pattern, input_str, case_insensitive=case_insensitive): if context == 'sentence': yield _sentence_context(match, language) elif context == 'paragraph': yield _paragraph_context(match) else: yield _window_match(match, context)
python
def match_regex(input_str, pattern, language, context, case_insensitive=True): if type(context) is str: contexts = ['sentence', 'paragraph'] assert context in contexts or type(context) is int, 'Available contexts: {}'.format(contexts) else: context = int(context) for match in _regex_span(pattern, input_str, case_insensitive=case_insensitive): if context == 'sentence': yield _sentence_context(match, language) elif context == 'paragraph': yield _paragraph_context(match) else: yield _window_match(match, context)
[ "def", "match_regex", "(", "input_str", ",", "pattern", ",", "language", ",", "context", ",", "case_insensitive", "=", "True", ")", ":", "if", "type", "(", "context", ")", "is", "str", ":", "contexts", "=", "[", "'sentence'", ",", "'paragraph'", "]", "as...
Take input string and a regex pattern, then yield generator of matches in desired format. TODO: Rename this `match_pattern` and incorporate the keyword expansion code currently in search_corpus. :param input_str: :param pattern: :param language: :param context: Integer or 'sentence' 'paragraph' :rtype : str
[ "Take", "input", "string", "and", "a", "regex", "pattern", "then", "yield", "generator", "of", "matches", "in", "desired", "format", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/ir/query.py#L153-L177
230,388
cltk/cltk
cltk/text_reuse/automata.py
make_worlist_trie
def make_worlist_trie(wordlist): """ Creates a nested dictionary representing the trie created by the given word list. :param wordlist: str list: :return: nested dictionary >>> make_worlist_trie(['einander', 'einen', 'neben']) {'e': {'i': {'n': {'a': {'n': {'d': {'e': {'r': {'__end__': '__end__'}}}}}, 'e': {'n': {'__end__': '__end__'}}}}}, 'n': {'e': {'b': {'e': {'n': {'__end__': '__end__'}}}}}} """ dicts = dict() for w in wordlist: curr = dicts for l in w: curr = curr.setdefault(l, {}) curr['__end__'] = '__end__' return dicts
python
def make_worlist_trie(wordlist): dicts = dict() for w in wordlist: curr = dicts for l in w: curr = curr.setdefault(l, {}) curr['__end__'] = '__end__' return dicts
[ "def", "make_worlist_trie", "(", "wordlist", ")", ":", "dicts", "=", "dict", "(", ")", "for", "w", "in", "wordlist", ":", "curr", "=", "dicts", "for", "l", "in", "w", ":", "curr", "=", "curr", ".", "setdefault", "(", "l", ",", "{", "}", ")", "cur...
Creates a nested dictionary representing the trie created by the given word list. :param wordlist: str list: :return: nested dictionary >>> make_worlist_trie(['einander', 'einen', 'neben']) {'e': {'i': {'n': {'a': {'n': {'d': {'e': {'r': {'__end__': '__end__'}}}}}, 'e': {'n': {'__end__': '__end__'}}}}}, 'n': {'e': {'b': {'e': {'n': {'__end__': '__end__'}}}}}}
[ "Creates", "a", "nested", "dictionary", "representing", "the", "trie", "created", "by", "the", "given", "word", "list", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/text_reuse/automata.py#L592-L612
230,389
cltk/cltk
cltk/prosody/latin/metrical_validator.py
MetricalValidator.is_valid_hendecasyllables
def is_valid_hendecasyllables(self, scanned_line: str) -> bool: """Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns :param scanned_line: a line containing a sequence of stressed and unstressed syllables :return bool >>> print(MetricalValidator().is_valid_hendecasyllables("-U-UU-U-U-U")) True """ line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "") line = line.replace(" ", "") if len(line) < 11: return False line = line[:-1] + self.constants.OPTIONAL_ENDING return self.VALID_HENDECASYLLABLES.__contains__(line)
python
def is_valid_hendecasyllables(self, scanned_line: str) -> bool: line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "") line = line.replace(" ", "") if len(line) < 11: return False line = line[:-1] + self.constants.OPTIONAL_ENDING return self.VALID_HENDECASYLLABLES.__contains__(line)
[ "def", "is_valid_hendecasyllables", "(", "self", ",", "scanned_line", ":", "str", ")", "->", "bool", ":", "line", "=", "scanned_line", ".", "replace", "(", "self", ".", "constants", ".", "FOOT_SEPARATOR", ",", "\"\"", ")", "line", "=", "line", ".", "replac...
Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns :param scanned_line: a line containing a sequence of stressed and unstressed syllables :return bool >>> print(MetricalValidator().is_valid_hendecasyllables("-U-UU-U-U-U")) True
[ "Determine", "if", "a", "scansion", "pattern", "is", "one", "of", "the", "valid", "Hendecasyllables", "metrical", "patterns" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L36-L50
230,390
cltk/cltk
cltk/prosody/latin/metrical_validator.py
MetricalValidator.is_valid_pentameter
def is_valid_pentameter(self, scanned_line: str) -> bool: """Determine if a scansion pattern is one of the valid Pentameter metrical patterns :param scanned_line: a line containing a sequence of stressed and unstressed syllables :return bool: whether or not the scansion is a valid pentameter >>> print(MetricalValidator().is_valid_pentameter('-UU-UU--UU-UUX')) True """ line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "") line = line.replace(" ", "") if len(line) < 10: return False line = line[:-1] + self.constants.OPTIONAL_ENDING return self.VALID_PENTAMETERS.__contains__(line)
python
def is_valid_pentameter(self, scanned_line: str) -> bool: line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "") line = line.replace(" ", "") if len(line) < 10: return False line = line[:-1] + self.constants.OPTIONAL_ENDING return self.VALID_PENTAMETERS.__contains__(line)
[ "def", "is_valid_pentameter", "(", "self", ",", "scanned_line", ":", "str", ")", "->", "bool", ":", "line", "=", "scanned_line", ".", "replace", "(", "self", ".", "constants", ".", "FOOT_SEPARATOR", ",", "\"\"", ")", "line", "=", "line", ".", "replace", ...
Determine if a scansion pattern is one of the valid Pentameter metrical patterns :param scanned_line: a line containing a sequence of stressed and unstressed syllables :return bool: whether or not the scansion is a valid pentameter >>> print(MetricalValidator().is_valid_pentameter('-UU-UU--UU-UUX')) True
[ "Determine", "if", "a", "scansion", "pattern", "is", "one", "of", "the", "valid", "Pentameter", "metrical", "patterns" ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L52-L66
230,391
cltk/cltk
cltk/prosody/latin/metrical_validator.py
MetricalValidator.hexameter_feet
def hexameter_feet(self, scansion: str) -> List[str]: """ Produces a list of hexameter feet, stressed and unstressed syllables with spaces intact. If the scansion line is not entirely correct, it will attempt to corral one or more improper patterns into one or more feet. :param: scansion: the scanned line :return list of strings, representing the feet of the hexameter, or if the scansion is wildly incorrect, the function will return an empty list. >>> print("|".join(MetricalValidator().hexameter_feet( ... "- U U - - - - - - - U U - U")).strip() ) - U U |- - |- - |- - |- U U |- U >>> print("|".join(MetricalValidator().hexameter_feet( ... "- U U - - U - - - - U U - U")).strip()) - U U |- - |U - |- - |- U U |- U """ backwards_scan = list(scansion.rstrip()) feet = [] candidates = [self.constants.STRESSED + self.constants.OPTIONAL_ENDING, self.constants.STRESSED + self.constants.STRESSED, self.constants.STRESSED + self.constants.UNSTRESSED, self.constants.UNSTRESSED + self.constants.STRESSED] incomplete_foot = self.constants.UNSTRESSED + self.constants.UNSTRESSED try: while len(backwards_scan) > 0: spaces = [] chunk1 = backwards_scan.pop() while len("".join(chunk1).replace(" ", "")) == 0: if len(backwards_scan) == 0: feet.append(chunk1) return feet[::-1] chunk1 = backwards_scan.pop() + "".join(chunk1) chunk2 = backwards_scan.pop() while chunk2 == " ": spaces.append(chunk2) if len(backwards_scan) == 0: feet.append(chunk2) return feet[::-1] chunk2 = backwards_scan.pop() new_candidate = "".join(chunk2) + "".join(spaces) + "".join(chunk1) if new_candidate.replace(" ", "") in candidates: feet.append(new_candidate) else: if new_candidate.replace(" ", "") == incomplete_foot: spaces2 = [] previous_mark = backwards_scan.pop() while previous_mark == " ": spaces2.append(previous_mark) previous_mark = backwards_scan.pop() if previous_mark == self.constants.STRESSED: new_candidate = "".join(previous_mark) + "".join( spaces2) + new_candidate feet.append(new_candidate) else: feet.append(new_candidate) # invalid foot spaces3 = [] next_mark = backwards_scan.pop() while next_mark == " ": spaces3.append(previous_mark) next_mark = backwards_scan.pop() feet.append("".join(next_mark) + "".join(spaces3) + previous_mark) except Exception as ex: LOG.error("err at: {}, {}".format(scansion, ex)) return list() return feet[::-1]
python
def hexameter_feet(self, scansion: str) -> List[str]: backwards_scan = list(scansion.rstrip()) feet = [] candidates = [self.constants.STRESSED + self.constants.OPTIONAL_ENDING, self.constants.STRESSED + self.constants.STRESSED, self.constants.STRESSED + self.constants.UNSTRESSED, self.constants.UNSTRESSED + self.constants.STRESSED] incomplete_foot = self.constants.UNSTRESSED + self.constants.UNSTRESSED try: while len(backwards_scan) > 0: spaces = [] chunk1 = backwards_scan.pop() while len("".join(chunk1).replace(" ", "")) == 0: if len(backwards_scan) == 0: feet.append(chunk1) return feet[::-1] chunk1 = backwards_scan.pop() + "".join(chunk1) chunk2 = backwards_scan.pop() while chunk2 == " ": spaces.append(chunk2) if len(backwards_scan) == 0: feet.append(chunk2) return feet[::-1] chunk2 = backwards_scan.pop() new_candidate = "".join(chunk2) + "".join(spaces) + "".join(chunk1) if new_candidate.replace(" ", "") in candidates: feet.append(new_candidate) else: if new_candidate.replace(" ", "") == incomplete_foot: spaces2 = [] previous_mark = backwards_scan.pop() while previous_mark == " ": spaces2.append(previous_mark) previous_mark = backwards_scan.pop() if previous_mark == self.constants.STRESSED: new_candidate = "".join(previous_mark) + "".join( spaces2) + new_candidate feet.append(new_candidate) else: feet.append(new_candidate) # invalid foot spaces3 = [] next_mark = backwards_scan.pop() while next_mark == " ": spaces3.append(previous_mark) next_mark = backwards_scan.pop() feet.append("".join(next_mark) + "".join(spaces3) + previous_mark) except Exception as ex: LOG.error("err at: {}, {}".format(scansion, ex)) return list() return feet[::-1]
[ "def", "hexameter_feet", "(", "self", ",", "scansion", ":", "str", ")", "->", "List", "[", "str", "]", ":", "backwards_scan", "=", "list", "(", "scansion", ".", "rstrip", "(", ")", ")", "feet", "=", "[", "]", "candidates", "=", "[", "self", ".", "c...
Produces a list of hexameter feet, stressed and unstressed syllables with spaces intact. If the scansion line is not entirely correct, it will attempt to corral one or more improper patterns into one or more feet. :param: scansion: the scanned line :return list of strings, representing the feet of the hexameter, or if the scansion is wildly incorrect, the function will return an empty list. >>> print("|".join(MetricalValidator().hexameter_feet( ... "- U U - - - - - - - U U - U")).strip() ) - U U |- - |- - |- - |- U U |- U >>> print("|".join(MetricalValidator().hexameter_feet( ... "- U U - - U - - - - U U - U")).strip()) - U U |- - |U - |- - |- U U |- U
[ "Produces", "a", "list", "of", "hexameter", "feet", "stressed", "and", "unstressed", "syllables", "with", "spaces", "intact", ".", "If", "the", "scansion", "line", "is", "not", "entirely", "correct", "it", "will", "attempt", "to", "corral", "one", "or", "mor...
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L74-L139
230,392
cltk/cltk
cltk/prosody/latin/metrical_validator.py
MetricalValidator.closest_hexameter_patterns
def closest_hexameter_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid hexameter patterns. :return: list of the closest valid hexameter patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_hexameter_patterns('-UUUUU-----UU--')) ['-UU-UU-----UU--'] """ return self._closest_patterns(self.VALID_HEXAMETERS, scansion)
python
def closest_hexameter_patterns(self, scansion: str) -> List[str]: return self._closest_patterns(self.VALID_HEXAMETERS, scansion)
[ "def", "closest_hexameter_patterns", "(", "self", ",", "scansion", ":", "str", ")", "->", "List", "[", "str", "]", ":", "return", "self", ".", "_closest_patterns", "(", "self", ".", "VALID_HEXAMETERS", ",", "scansion", ")" ]
Find the closest group of matching valid hexameter patterns. :return: list of the closest valid hexameter patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_hexameter_patterns('-UUUUU-----UU--')) ['-UU-UU-----UU--']
[ "Find", "the", "closest", "group", "of", "matching", "valid", "hexameter", "patterns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L157-L167
230,393
cltk/cltk
cltk/prosody/latin/metrical_validator.py
MetricalValidator.closest_pentameter_patterns
def closest_pentameter_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid pentameter patterns. :return: list of the closest valid pentameter patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_pentameter_patterns('--UUU--UU-UUX')) ['---UU--UU-UUX'] """ return self._closest_patterns(self.VALID_PENTAMETERS, scansion)
python
def closest_pentameter_patterns(self, scansion: str) -> List[str]: return self._closest_patterns(self.VALID_PENTAMETERS, scansion)
[ "def", "closest_pentameter_patterns", "(", "self", ",", "scansion", ":", "str", ")", "->", "List", "[", "str", "]", ":", "return", "self", ".", "_closest_patterns", "(", "self", ".", "VALID_PENTAMETERS", ",", "scansion", ")" ]
Find the closest group of matching valid pentameter patterns. :return: list of the closest valid pentameter patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_pentameter_patterns('--UUU--UU-UUX')) ['---UU--UU-UUX']
[ "Find", "the", "closest", "group", "of", "matching", "valid", "pentameter", "patterns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L178-L188
230,394
cltk/cltk
cltk/prosody/latin/metrical_validator.py
MetricalValidator.closest_hendecasyllable_patterns
def closest_hendecasyllable_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid hendecasyllable patterns. :return: list of the closest valid hendecasyllable patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_hendecasyllable_patterns('UU-UU-U-U-X')) ['-U-UU-U-U-X', 'U--UU-U-U-X'] """ return self._closest_patterns(self.VALID_HENDECASYLLABLES, scansion)
python
def closest_hendecasyllable_patterns(self, scansion: str) -> List[str]: return self._closest_patterns(self.VALID_HENDECASYLLABLES, scansion)
[ "def", "closest_hendecasyllable_patterns", "(", "self", ",", "scansion", ":", "str", ")", "->", "List", "[", "str", "]", ":", "return", "self", ".", "_closest_patterns", "(", "self", ".", "VALID_HENDECASYLLABLES", ",", "scansion", ")" ]
Find the closest group of matching valid hendecasyllable patterns. :return: list of the closest valid hendecasyllable patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_hendecasyllable_patterns('UU-UU-U-U-X')) ['-U-UU-U-U-X', 'U--UU-U-U-X']
[ "Find", "the", "closest", "group", "of", "matching", "valid", "hendecasyllable", "patterns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L190-L200
230,395
cltk/cltk
cltk/prosody/latin/metrical_validator.py
MetricalValidator._closest_patterns
def _closest_patterns(self, patterns: List[str], scansion: str) -> List[str]: """ Find the closest group of matching valid patterns. :patterns: a list of patterns :scansion: the scansion pattern thus far :return: list of the closest valid patterns; only candidates with a matching length/number of syllables are considered. """ pattern = scansion.replace(" ", "") pattern = pattern.replace(self.constants.FOOT_SEPARATOR, "") ending = pattern[-1] candidate = pattern[:len(pattern) - 1] + self.constants.OPTIONAL_ENDING cans = [(distance(candidate, x), x) for x in patterns if len(x) == len(candidate)] if cans: cans = sorted(cans, key=lambda tup: tup[0]) top = cans[0][0] return [can[1][:-1] + ending for can in cans if can[0] == top] return []
python
def _closest_patterns(self, patterns: List[str], scansion: str) -> List[str]: pattern = scansion.replace(" ", "") pattern = pattern.replace(self.constants.FOOT_SEPARATOR, "") ending = pattern[-1] candidate = pattern[:len(pattern) - 1] + self.constants.OPTIONAL_ENDING cans = [(distance(candidate, x), x) for x in patterns if len(x) == len(candidate)] if cans: cans = sorted(cans, key=lambda tup: tup[0]) top = cans[0][0] return [can[1][:-1] + ending for can in cans if can[0] == top] return []
[ "def", "_closest_patterns", "(", "self", ",", "patterns", ":", "List", "[", "str", "]", ",", "scansion", ":", "str", ")", "->", "List", "[", "str", "]", ":", "pattern", "=", "scansion", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "pattern", "=", ...
Find the closest group of matching valid patterns. :patterns: a list of patterns :scansion: the scansion pattern thus far :return: list of the closest valid patterns; only candidates with a matching length/number of syllables are considered.
[ "Find", "the", "closest", "group", "of", "matching", "valid", "patterns", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L202-L221
230,396
cltk/cltk
cltk/prosody/latin/metrical_validator.py
MetricalValidator._build_pentameter_templates
def _build_pentameter_templates(self) -> List[str]: """Create pentameter templates.""" return [ # '-UU|-UU|-|-UU|-UU|X' self.constants.DACTYL + self.constants.DACTYL + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '-UU|--|-|-UU|-UU|X' self.constants.DACTYL + self.constants.SPONDEE + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '--|-UU|-|-UU|-UU|X' self.constants.SPONDEE + self.constants.DACTYL + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '--|--|-|-UU|-UU|X' self.constants.SPONDEE + self.constants.SPONDEE + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING]
python
def _build_pentameter_templates(self) -> List[str]: return [ # '-UU|-UU|-|-UU|-UU|X' self.constants.DACTYL + self.constants.DACTYL + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '-UU|--|-|-UU|-UU|X' self.constants.DACTYL + self.constants.SPONDEE + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '--|-UU|-|-UU|-UU|X' self.constants.SPONDEE + self.constants.DACTYL + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '--|--|-|-UU|-UU|X' self.constants.SPONDEE + self.constants.SPONDEE + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING]
[ "def", "_build_pentameter_templates", "(", "self", ")", "->", "List", "[", "str", "]", ":", "return", "[", "# '-UU|-UU|-|-UU|-UU|X'", "self", ".", "constants", ".", "DACTYL", "+", "self", ".", "constants", ".", "DACTYL", "+", "self", ".", "constants", ".", ...
Create pentameter templates.
[ "Create", "pentameter", "templates", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L260-L277
230,397
cltk/cltk
cltk/stem/lemma.py
LemmaReplacer._load_replacement_patterns
def _load_replacement_patterns(self): """Check for availability of lemmatizer for a language.""" if self.language == 'latin': warnings.warn( "LemmaReplacer is deprecated and will soon be removed from CLTK. Please use the BackoffLatinLemmatizer at cltk.lemmatize.latin.backoff.", DeprecationWarning, stacklevel=2) rel_path = os.path.join('~','cltk_data', self.language, 'model','latin_models_cltk', 'lemmata','latin_lemmata_cltk.py') path = os.path.expanduser(rel_path) #logger.info('Loading lemmata. This may take a minute.') loader = importlib.machinery.SourceFileLoader('latin_lemmata_cltk', path) elif self.language == 'greek': rel_path = os.path.join('~','cltk_data', self.language, 'model','greek_models_cltk', 'lemmata','greek_lemmata_cltk.py') path = os.path.expanduser(rel_path) #logger.info('Loading lemmata. This may take a minute.') loader = importlib.machinery.SourceFileLoader('greek_lemmata_cltk', path) module = loader.load_module() lemmata = module.LEMMATA return lemmata
python
def _load_replacement_patterns(self): if self.language == 'latin': warnings.warn( "LemmaReplacer is deprecated and will soon be removed from CLTK. Please use the BackoffLatinLemmatizer at cltk.lemmatize.latin.backoff.", DeprecationWarning, stacklevel=2) rel_path = os.path.join('~','cltk_data', self.language, 'model','latin_models_cltk', 'lemmata','latin_lemmata_cltk.py') path = os.path.expanduser(rel_path) #logger.info('Loading lemmata. This may take a minute.') loader = importlib.machinery.SourceFileLoader('latin_lemmata_cltk', path) elif self.language == 'greek': rel_path = os.path.join('~','cltk_data', self.language, 'model','greek_models_cltk', 'lemmata','greek_lemmata_cltk.py') path = os.path.expanduser(rel_path) #logger.info('Loading lemmata. This may take a minute.') loader = importlib.machinery.SourceFileLoader('greek_lemmata_cltk', path) module = loader.load_module() lemmata = module.LEMMATA return lemmata
[ "def", "_load_replacement_patterns", "(", "self", ")", ":", "if", "self", ".", "language", "==", "'latin'", ":", "warnings", ".", "warn", "(", "\"LemmaReplacer is deprecated and will soon be removed from CLTK. Please use the BackoffLatinLemmatizer at cltk.lemmatize.latin.backoff.\"...
Check for availability of lemmatizer for a language.
[ "Check", "for", "availability", "of", "lemmatizer", "for", "a", "language", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/lemma.py#L27-L52
230,398
cltk/cltk
cltk/text_reuse/comparison.py
Needleman_Wunsch
def Needleman_Wunsch(w1, w2, d=-1, alphabet = "abcdefghijklmnopqrstuvwxyz", S = Default_Matrix(26, 1, -1) ): """ Computes allignment using Needleman-Wunsch algorithm. The alphabet parameter is used for specifying the alphabetical order of the similarity matrix. Similarity matrix is initialized to an unweighted matrix that returns 1 for match and -1 for substitution. Args: :param w1: str :param w2: str :param d: int/float :param alphabet: str :param S: list :return: str tuple Examples: NW calculates the optimal string alignment based on a weighted matrix M. By default, an unweighted similarity matrix is used to represent substitution cost (1 if match, -1 otherwise). >>> Needleman_Wunsch('piscis', 'pesce') ('piscis', 'pesc-e') You can also define your own alphabet and matrix >>> Needleman_Wunsch('pescare', 'piscia', alphabet = "aceiprs", S = Default_Matrix(7, 1, -1)) ('pesc-are', 'piscia--') Clearly, a weighted matrix should be used over the default one if linguistic accuracy is desired. The Matrix can be defined manually through matching of manners of articulation or stochastically by detecting the most common substitutions. A simple example follows: First define the similarity matrix >>> M = Default_Matrix(7, 1, -1) We know want to increase the score for matching a to i. >>> M[0][3] = 0.8 >>> M[3][0] = 0.8 >>> Needleman_Wunsch('pescare', 'piscia', alphabet = "aceiprs", S = M) ('pescare', 'pisci-a') """ #S must be a square matrix matching the length of your alphabet if len(S) != len(alphabet) or len(S[0])!= len(alphabet): raise AssertionError("Unexpected dimensions of Similarity matrix, S." " S must be a n by n square matrix, where n is the" " length of your predefined alphabet") m,n = len(w1), len(w2) F = [[0 for i in range(n+1)] for j in range(m+1)] for i in range(m+1): F[i][0] = d*i for i in range(n+1): F[0][i] = d*i #F[i][j] is given by the reccurence relation F[i][j] = max(F[i-1][j-1] + S(A[i],B[i]), F[i][j-1] + d, F[i-1][j] + d) #Where S the similarity matrix and d the gap penalty for i in range(1,m+1): for j in range(1,n+1): F[i][j] = max(F[i-1][j-1] + S[alphabet.index(w1[i-1])][alphabet.index(w2[j-1])], F[i-1][j] + d,F[i][j-1] + d) A1, A2 = "", "" i, j = m, n #Since F[n][m] gives the maximum score, we can now reconstruct the alignment by determining whether the optimal move #is a match, insertion or deletion while i>0 or j>0: if i>0 and j>0 and F[i][j] == F[i-1][j-1] + S[alphabet.index(w1[i-1])][alphabet.index(w2[j-1])]: A1 = w1[i-1] + A1 A2 = w2[j-1] + A2 i -= 1 j -= 1 elif i>0 and F[i][j] == F[i-1][j] + d: A1 = w1[i-1] + A1 A2 = "-" + A2 i -= 1 else: A1 = "-" + A1 A2 = w2[j-1] + A2 j -= 1 return (A1, A2)
python
def Needleman_Wunsch(w1, w2, d=-1, alphabet = "abcdefghijklmnopqrstuvwxyz", S = Default_Matrix(26, 1, -1) ): #S must be a square matrix matching the length of your alphabet if len(S) != len(alphabet) or len(S[0])!= len(alphabet): raise AssertionError("Unexpected dimensions of Similarity matrix, S." " S must be a n by n square matrix, where n is the" " length of your predefined alphabet") m,n = len(w1), len(w2) F = [[0 for i in range(n+1)] for j in range(m+1)] for i in range(m+1): F[i][0] = d*i for i in range(n+1): F[0][i] = d*i #F[i][j] is given by the reccurence relation F[i][j] = max(F[i-1][j-1] + S(A[i],B[i]), F[i][j-1] + d, F[i-1][j] + d) #Where S the similarity matrix and d the gap penalty for i in range(1,m+1): for j in range(1,n+1): F[i][j] = max(F[i-1][j-1] + S[alphabet.index(w1[i-1])][alphabet.index(w2[j-1])], F[i-1][j] + d,F[i][j-1] + d) A1, A2 = "", "" i, j = m, n #Since F[n][m] gives the maximum score, we can now reconstruct the alignment by determining whether the optimal move #is a match, insertion or deletion while i>0 or j>0: if i>0 and j>0 and F[i][j] == F[i-1][j-1] + S[alphabet.index(w1[i-1])][alphabet.index(w2[j-1])]: A1 = w1[i-1] + A1 A2 = w2[j-1] + A2 i -= 1 j -= 1 elif i>0 and F[i][j] == F[i-1][j] + d: A1 = w1[i-1] + A1 A2 = "-" + A2 i -= 1 else: A1 = "-" + A1 A2 = w2[j-1] + A2 j -= 1 return (A1, A2)
[ "def", "Needleman_Wunsch", "(", "w1", ",", "w2", ",", "d", "=", "-", "1", ",", "alphabet", "=", "\"abcdefghijklmnopqrstuvwxyz\"", ",", "S", "=", "Default_Matrix", "(", "26", ",", "1", ",", "-", "1", ")", ")", ":", "#S must be a square matrix matching the len...
Computes allignment using Needleman-Wunsch algorithm. The alphabet parameter is used for specifying the alphabetical order of the similarity matrix. Similarity matrix is initialized to an unweighted matrix that returns 1 for match and -1 for substitution. Args: :param w1: str :param w2: str :param d: int/float :param alphabet: str :param S: list :return: str tuple Examples: NW calculates the optimal string alignment based on a weighted matrix M. By default, an unweighted similarity matrix is used to represent substitution cost (1 if match, -1 otherwise). >>> Needleman_Wunsch('piscis', 'pesce') ('piscis', 'pesc-e') You can also define your own alphabet and matrix >>> Needleman_Wunsch('pescare', 'piscia', alphabet = "aceiprs", S = Default_Matrix(7, 1, -1)) ('pesc-are', 'piscia--') Clearly, a weighted matrix should be used over the default one if linguistic accuracy is desired. The Matrix can be defined manually through matching of manners of articulation or stochastically by detecting the most common substitutions. A simple example follows: First define the similarity matrix >>> M = Default_Matrix(7, 1, -1) We know want to increase the score for matching a to i. >>> M[0][3] = 0.8 >>> M[3][0] = 0.8 >>> Needleman_Wunsch('pescare', 'piscia', alphabet = "aceiprs", S = M) ('pescare', 'pisci-a')
[ "Computes", "allignment", "using", "Needleman", "-", "Wunsch", "algorithm", ".", "The", "alphabet", "parameter", "is", "used", "for", "specifying", "the", "alphabetical", "order", "of", "the", "similarity", "matrix", ".", "Similarity", "matrix", "is", "initialized...
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/text_reuse/comparison.py#L147-L241
230,399
cltk/cltk
cltk/corpus/akkadian/cdli_corpus.py
CDLICorpus.toc
def toc(self): """ Returns a rich list of texts in the catalog. """ output = [] for key in sorted(self.catalog.keys()): edition = self.catalog[key]['edition'] length = len(self.catalog[key]['transliteration']) output.append( "Pnum: {key}, Edition: {edition}, length: {length} line(s)".format( key=key, edition=edition, length=length)) return output
python
def toc(self): output = [] for key in sorted(self.catalog.keys()): edition = self.catalog[key]['edition'] length = len(self.catalog[key]['transliteration']) output.append( "Pnum: {key}, Edition: {edition}, length: {length} line(s)".format( key=key, edition=edition, length=length)) return output
[ "def", "toc", "(", "self", ")", ":", "output", "=", "[", "]", "for", "key", "in", "sorted", "(", "self", ".", "catalog", ".", "keys", "(", ")", ")", ":", "edition", "=", "self", ".", "catalog", "[", "key", "]", "[", "'edition'", "]", "length", ...
Returns a rich list of texts in the catalog.
[ "Returns", "a", "rich", "list", "of", "texts", "in", "the", "catalog", "." ]
ed9c025b7ec43c949481173251b70e05e4dffd27
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/akkadian/cdli_corpus.py#L108-L119