repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
nim65s/ndh
ndh/utils.py
get_env
def get_env(env_file='.env'): """ Set default environment variables from .env file """ try: with open(env_file) as f: for line in f.readlines(): try: key, val = line.split('=', maxsplit=1) os.environ.setdefault(key.strip(), val.strip()) except ValueError: pass except FileNotFoundError: pass
python
def get_env(env_file='.env'): """ Set default environment variables from .env file """ try: with open(env_file) as f: for line in f.readlines(): try: key, val = line.split('=', maxsplit=1) os.environ.setdefault(key.strip(), val.strip()) except ValueError: pass except FileNotFoundError: pass
[ "def", "get_env", "(", "env_file", "=", "'.env'", ")", ":", "try", ":", "with", "open", "(", "env_file", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "try", ":", "key", ",", "val", "=", "line", ".", "split", "...
Set default environment variables from .env file
[ "Set", "default", "environment", "variables", "from", ".", "env", "file" ]
train
https://github.com/nim65s/ndh/blob/3e14644e3f701044acbb7aafbf69b51ad6f86d99/ndh/utils.py#L27-L40
merry-bits/DBQuery
src/dbquery/query.py
to_dict_formatter
def to_dict_formatter(row, cursor): """ Take a row and use the column names from cursor to turn the row into a dictionary. Note: converts column names to lower-case! :param row: one database row, sequence of column values :type row: (value, ...) :param cursor: the cursor which was used to make the query :type cursor: DB-API cursor object """ # Empty row? Return. if not row: return row # No cursor? Raise runtime error. if cursor is None or cursor.description is None: raise RuntimeError("No DB-API cursor or description available.") # Give each value the appropriate column name within in the resulting # dictionary. column_names = (d[0] for d in cursor.description) # 0 is the name return {name: value for value, name in zip(row, column_names)}
python
def to_dict_formatter(row, cursor): """ Take a row and use the column names from cursor to turn the row into a dictionary. Note: converts column names to lower-case! :param row: one database row, sequence of column values :type row: (value, ...) :param cursor: the cursor which was used to make the query :type cursor: DB-API cursor object """ # Empty row? Return. if not row: return row # No cursor? Raise runtime error. if cursor is None or cursor.description is None: raise RuntimeError("No DB-API cursor or description available.") # Give each value the appropriate column name within in the resulting # dictionary. column_names = (d[0] for d in cursor.description) # 0 is the name return {name: value for value, name in zip(row, column_names)}
[ "def", "to_dict_formatter", "(", "row", ",", "cursor", ")", ":", "# Empty row? Return.", "if", "not", "row", ":", "return", "row", "# No cursor? Raise runtime error.", "if", "cursor", "is", "None", "or", "cursor", ".", "description", "is", "None", ":", "raise", ...
Take a row and use the column names from cursor to turn the row into a dictionary. Note: converts column names to lower-case! :param row: one database row, sequence of column values :type row: (value, ...) :param cursor: the cursor which was used to make the query :type cursor: DB-API cursor object
[ "Take", "a", "row", "and", "use", "the", "column", "names", "from", "cursor", "to", "turn", "the", "row", "into", "a", "dictionary", "." ]
train
https://github.com/merry-bits/DBQuery/blob/5f46dc94e2721129f8a799b5f613373e6cd9cb73/src/dbquery/query.py#L14-L35
merry-bits/DBQuery
src/dbquery/query.py
Query.show
def show(self, *args, **kwds): """ Show how the SQL looks like when executed by the DB. This might not be supported by all connection types. For example: PostgreSQL does support it, SQLite does not. :rtype: str """ # Same as in __call__, arguments win over keywords arg = args if not arg: arg = kwds # pylint: disable=redefined-variable-type return self._db.show(self._sql, arg)
python
def show(self, *args, **kwds): """ Show how the SQL looks like when executed by the DB. This might not be supported by all connection types. For example: PostgreSQL does support it, SQLite does not. :rtype: str """ # Same as in __call__, arguments win over keywords arg = args if not arg: arg = kwds # pylint: disable=redefined-variable-type return self._db.show(self._sql, arg)
[ "def", "show", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "# Same as in __call__, arguments win over keywords", "arg", "=", "args", "if", "not", "arg", ":", "arg", "=", "kwds", "# pylint: disable=redefined-variable-type", "return", "self", "...
Show how the SQL looks like when executed by the DB. This might not be supported by all connection types. For example: PostgreSQL does support it, SQLite does not. :rtype: str
[ "Show", "how", "the", "SQL", "looks", "like", "when", "executed", "by", "the", "DB", "." ]
train
https://github.com/merry-bits/DBQuery/blob/5f46dc94e2721129f8a799b5f613373e6cd9cb73/src/dbquery/query.py#L113-L125
merry-bits/DBQuery
src/dbquery/query.py
Select._produce_return
def _produce_return(self, cursor): """ Get the rows from the cursor and apply the row formatter. :return: sequence of rows, or a generator if a row formatter has to be applied """ results = cursor.fetchall() # Format rows within a generator? if self._row_formatter is not None: return (self._row_formatter(r, cursor) for r in results) return results
python
def _produce_return(self, cursor): """ Get the rows from the cursor and apply the row formatter. :return: sequence of rows, or a generator if a row formatter has to be applied """ results = cursor.fetchall() # Format rows within a generator? if self._row_formatter is not None: return (self._row_formatter(r, cursor) for r in results) return results
[ "def", "_produce_return", "(", "self", ",", "cursor", ")", ":", "results", "=", "cursor", ".", "fetchall", "(", ")", "# Format rows within a generator?", "if", "self", ".", "_row_formatter", "is", "not", "None", ":", "return", "(", "self", ".", "_row_formatter...
Get the rows from the cursor and apply the row formatter. :return: sequence of rows, or a generator if a row formatter has to be applied
[ "Get", "the", "rows", "from", "the", "cursor", "and", "apply", "the", "row", "formatter", "." ]
train
https://github.com/merry-bits/DBQuery/blob/5f46dc94e2721129f8a799b5f613373e6cd9cb73/src/dbquery/query.py#L173-L185
merry-bits/DBQuery
src/dbquery/query.py
SelectOne._produce_return
def _produce_return(self, cursor): """ Return the one result. """ results = cursor.fetchmany(2) if len(results) != 1: return None # Return the one row, or the one column. row = results[0] if self._row_formatter is not None: row = self._row_formatter(row, cursor) elif len(row) == 1: row = row[0] return row
python
def _produce_return(self, cursor): """ Return the one result. """ results = cursor.fetchmany(2) if len(results) != 1: return None # Return the one row, or the one column. row = results[0] if self._row_formatter is not None: row = self._row_formatter(row, cursor) elif len(row) == 1: row = row[0] return row
[ "def", "_produce_return", "(", "self", ",", "cursor", ")", ":", "results", "=", "cursor", ".", "fetchmany", "(", "2", ")", "if", "len", "(", "results", ")", "!=", "1", ":", "return", "None", "# Return the one row, or the one column.", "row", "=", "results", ...
Return the one result.
[ "Return", "the", "one", "result", "." ]
train
https://github.com/merry-bits/DBQuery/blob/5f46dc94e2721129f8a799b5f613373e6cd9cb73/src/dbquery/query.py#L195-L209
merry-bits/DBQuery
src/dbquery/query.py
SelectIterator._row_generator
def _row_generator(self, cursor): """ Yields individual rows until no more rows exist in query result. Applies row formatter if such exists. """ rowset = cursor.fetchmany(self._arraysize) while rowset: if self._row_formatter is not None: rowset = (self._row_formatter(r, cursor) for r in rowset) for row in rowset: yield row rowset = cursor.fetchmany(self._arraysize)
python
def _row_generator(self, cursor): """ Yields individual rows until no more rows exist in query result. Applies row formatter if such exists. """ rowset = cursor.fetchmany(self._arraysize) while rowset: if self._row_formatter is not None: rowset = (self._row_formatter(r, cursor) for r in rowset) for row in rowset: yield row rowset = cursor.fetchmany(self._arraysize)
[ "def", "_row_generator", "(", "self", ",", "cursor", ")", ":", "rowset", "=", "cursor", ".", "fetchmany", "(", "self", ".", "_arraysize", ")", "while", "rowset", ":", "if", "self", ".", "_row_formatter", "is", "not", "None", ":", "rowset", "=", "(", "s...
Yields individual rows until no more rows exist in query result. Applies row formatter if such exists.
[ "Yields", "individual", "rows", "until", "no", "more", "rows", "exist", "in", "query", "result", ".", "Applies", "row", "formatter", "if", "such", "exists", "." ]
train
https://github.com/merry-bits/DBQuery/blob/5f46dc94e2721129f8a799b5f613373e6cd9cb73/src/dbquery/query.py#L250-L260
merry-bits/DBQuery
src/dbquery/query.py
SelectIterator._produce_return
def _produce_return(self, cursor): """ Calls callback once with generator. :rtype: None """ self.callback(self._row_generator(cursor), *self.cb_args) return None
python
def _produce_return(self, cursor): """ Calls callback once with generator. :rtype: None """ self.callback(self._row_generator(cursor), *self.cb_args) return None
[ "def", "_produce_return", "(", "self", ",", "cursor", ")", ":", "self", ".", "callback", "(", "self", ".", "_row_generator", "(", "cursor", ")", ",", "*", "self", ".", "cb_args", ")", "return", "None" ]
Calls callback once with generator. :rtype: None
[ "Calls", "callback", "once", "with", "generator", ".", ":", "rtype", ":", "None" ]
train
https://github.com/merry-bits/DBQuery/blob/5f46dc94e2721129f8a799b5f613373e6cd9cb73/src/dbquery/query.py#L262-L267
merry-bits/DBQuery
src/dbquery/query.py
Manipulation._produce_return
def _produce_return(self, cursor): """ Return the rowcount property from the used cursor. Checks the count first, if a count was given. :raise ManipulationCheckError: if a row count was set but does not match """ rowcount = cursor.rowcount # Check the row count? if self._rowcount is not None and self._rowcount != rowcount: raise ManipulationCheckError( "Count was {}, expected {}.".format(rowcount, self._rowcount)) return rowcount
python
def _produce_return(self, cursor): """ Return the rowcount property from the used cursor. Checks the count first, if a count was given. :raise ManipulationCheckError: if a row count was set but does not match """ rowcount = cursor.rowcount # Check the row count? if self._rowcount is not None and self._rowcount != rowcount: raise ManipulationCheckError( "Count was {}, expected {}.".format(rowcount, self._rowcount)) return rowcount
[ "def", "_produce_return", "(", "self", ",", "cursor", ")", ":", "rowcount", "=", "cursor", ".", "rowcount", "# Check the row count?", "if", "self", ".", "_rowcount", "is", "not", "None", "and", "self", ".", "_rowcount", "!=", "rowcount", ":", "raise", "Manip...
Return the rowcount property from the used cursor. Checks the count first, if a count was given. :raise ManipulationCheckError: if a row count was set but does not match
[ "Return", "the", "rowcount", "property", "from", "the", "used", "cursor", "." ]
train
https://github.com/merry-bits/DBQuery/blob/5f46dc94e2721129f8a799b5f613373e6cd9cb73/src/dbquery/query.py#L293-L308
gotlium/django-pinba
pinba/timers.py
get_monitor
def get_monitor(host=PINBA_SERVER, port=PINBA_PORT, *args, **kwargs): """ todo: memory + cpu """ kwargs['servername'] = PINBA_SERVER_NAME kwargs['scriptname'] = __file__ for item in inspect.stack(): if item and __file__ not in item: kwargs['scriptname'] = item[3] break return ScriptMonitor((host, port), *args, **kwargs)
python
def get_monitor(host=PINBA_SERVER, port=PINBA_PORT, *args, **kwargs): """ todo: memory + cpu """ kwargs['servername'] = PINBA_SERVER_NAME kwargs['scriptname'] = __file__ for item in inspect.stack(): if item and __file__ not in item: kwargs['scriptname'] = item[3] break return ScriptMonitor((host, port), *args, **kwargs)
[ "def", "get_monitor", "(", "host", "=", "PINBA_SERVER", ",", "port", "=", "PINBA_PORT", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'servername'", "]", "=", "PINBA_SERVER_NAME", "kwargs", "[", "'scriptname'", "]", "=", "__file__", ...
todo: memory + cpu
[ "todo", ":", "memory", "+", "cpu" ]
train
https://github.com/gotlium/django-pinba/blob/6e7b156e5ffdf491bfb58fd1c94f384ce9d59eb5/pinba/timers.py#L9-L21
cemsbr/yala
yala/main.py
LinterRunner.get_results
def get_results(self): """Run the linter, parse, and return result list. If a linter specified by the user is not found, return an error message as result. """ try: stdout, stderr = self._lint() # Can't return a generator from a subprocess return list(stdout), stderr or [] except FileNotFoundError as exception: # Error if the linter was not found but was chosen by the user if self._linter.name in self.config.user_linters: error_msg = 'Could not find {}. Did you install it? ' \ 'Got exception: {}'.format(self._linter.name, exception) return [[], [error_msg]] # If the linter was not chosen by the user, do nothing return [[], []]
python
def get_results(self): """Run the linter, parse, and return result list. If a linter specified by the user is not found, return an error message as result. """ try: stdout, stderr = self._lint() # Can't return a generator from a subprocess return list(stdout), stderr or [] except FileNotFoundError as exception: # Error if the linter was not found but was chosen by the user if self._linter.name in self.config.user_linters: error_msg = 'Could not find {}. Did you install it? ' \ 'Got exception: {}'.format(self._linter.name, exception) return [[], [error_msg]] # If the linter was not chosen by the user, do nothing return [[], []]
[ "def", "get_results", "(", "self", ")", ":", "try", ":", "stdout", ",", "stderr", "=", "self", ".", "_lint", "(", ")", "# Can't return a generator from a subprocess", "return", "list", "(", "stdout", ")", ",", "stderr", "or", "[", "]", "except", "FileNotFoun...
Run the linter, parse, and return result list. If a linter specified by the user is not found, return an error message as result.
[ "Run", "the", "linter", "parse", "and", "return", "result", "list", "." ]
train
https://github.com/cemsbr/yala/blob/efceb044cb3de8d1c12140087ae9d5f8269bfbf9/yala/main.py#L48-L65
cemsbr/yala
yala/main.py
LinterRunner._get_command
def _get_command(self): """Return command with options and targets, ready for execution.""" targets = ' '.join(self.targets) cmd_str = self._linter.command_with_options + ' ' + targets cmd_shlex = shlex.split(cmd_str) return list(cmd_shlex)
python
def _get_command(self): """Return command with options and targets, ready for execution.""" targets = ' '.join(self.targets) cmd_str = self._linter.command_with_options + ' ' + targets cmd_shlex = shlex.split(cmd_str) return list(cmd_shlex)
[ "def", "_get_command", "(", "self", ")", ":", "targets", "=", "' '", ".", "join", "(", "self", ".", "targets", ")", "cmd_str", "=", "self", ".", "_linter", ".", "command_with_options", "+", "' '", "+", "targets", "cmd_shlex", "=", "shlex", ".", "split", ...
Return command with options and targets, ready for execution.
[ "Return", "command", "with", "options", "and", "targets", "ready", "for", "execution", "." ]
train
https://github.com/cemsbr/yala/blob/efceb044cb3de8d1c12140087ae9d5f8269bfbf9/yala/main.py#L67-L72
cemsbr/yala
yala/main.py
LinterRunner._lint
def _lint(self): """Run linter in a subprocess.""" command = self._get_command() process = subprocess.run(command, stdout=subprocess.PIPE, # nosec stderr=subprocess.PIPE) LOG.info('Finished %s', ' '.join(command)) stdout, stderr = self._get_output_lines(process) return self._linter.parse(stdout), self._parse_stderr(stderr)
python
def _lint(self): """Run linter in a subprocess.""" command = self._get_command() process = subprocess.run(command, stdout=subprocess.PIPE, # nosec stderr=subprocess.PIPE) LOG.info('Finished %s', ' '.join(command)) stdout, stderr = self._get_output_lines(process) return self._linter.parse(stdout), self._parse_stderr(stderr)
[ "def", "_lint", "(", "self", ")", ":", "command", "=", "self", ".", "_get_command", "(", ")", "process", "=", "subprocess", ".", "run", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "# nosec", "stderr", "=", "subprocess", ".", "PI...
Run linter in a subprocess.
[ "Run", "linter", "in", "a", "subprocess", "." ]
train
https://github.com/cemsbr/yala/blob/efceb044cb3de8d1c12140087ae9d5f8269bfbf9/yala/main.py#L74-L81
cemsbr/yala
yala/main.py
Main.lint
def lint(self, targets): """Run linters in parallel and sort all results. Args: targets (list): List of files and folders to lint. """ LinterRunner.targets = targets linters = self._config.get_linter_classes() with Pool() as pool: out_err_none = pool.map(LinterRunner.run, linters) out_err = [item for item in out_err_none if item is not None] stdout, stderr = zip(*out_err) return sorted(chain.from_iterable(stdout)), chain.from_iterable(stderr)
python
def lint(self, targets): """Run linters in parallel and sort all results. Args: targets (list): List of files and folders to lint. """ LinterRunner.targets = targets linters = self._config.get_linter_classes() with Pool() as pool: out_err_none = pool.map(LinterRunner.run, linters) out_err = [item for item in out_err_none if item is not None] stdout, stderr = zip(*out_err) return sorted(chain.from_iterable(stdout)), chain.from_iterable(stderr)
[ "def", "lint", "(", "self", ",", "targets", ")", ":", "LinterRunner", ".", "targets", "=", "targets", "linters", "=", "self", ".", "_config", ".", "get_linter_classes", "(", ")", "with", "Pool", "(", ")", "as", "pool", ":", "out_err_none", "=", "pool", ...
Run linters in parallel and sort all results. Args: targets (list): List of files and folders to lint.
[ "Run", "linters", "in", "parallel", "and", "sort", "all", "results", "." ]
train
https://github.com/cemsbr/yala/blob/efceb044cb3de8d1c12140087ae9d5f8269bfbf9/yala/main.py#L109-L121
cemsbr/yala
yala/main.py
Main.run_from_cli
def run_from_cli(self, args): """Read arguments, run and print results. Args: args (dict): Arguments parsed by docopt. """ if args['--dump-config']: self._config.print_config() else: stdout, stderr = self.lint(args['<path>']) self.print_results(stdout, stderr)
python
def run_from_cli(self, args): """Read arguments, run and print results. Args: args (dict): Arguments parsed by docopt. """ if args['--dump-config']: self._config.print_config() else: stdout, stderr = self.lint(args['<path>']) self.print_results(stdout, stderr)
[ "def", "run_from_cli", "(", "self", ",", "args", ")", ":", "if", "args", "[", "'--dump-config'", "]", ":", "self", ".", "_config", ".", "print_config", "(", ")", "else", ":", "stdout", ",", "stderr", "=", "self", ".", "lint", "(", "args", "[", "'<pat...
Read arguments, run and print results. Args: args (dict): Arguments parsed by docopt.
[ "Read", "arguments", "run", "and", "print", "results", "." ]
train
https://github.com/cemsbr/yala/blob/efceb044cb3de8d1c12140087ae9d5f8269bfbf9/yala/main.py#L123-L133
cemsbr/yala
yala/main.py
Main.print_results
def print_results(cls, stdout, stderr): """Print linter results and exits with an error if there's any.""" for line in stderr: print(line, file=sys.stderr) if stdout: if stderr: # blank line to separate stdout from stderr print(file=sys.stderr) cls._print_stdout(stdout) else: print(':) No issues found.')
python
def print_results(cls, stdout, stderr): """Print linter results and exits with an error if there's any.""" for line in stderr: print(line, file=sys.stderr) if stdout: if stderr: # blank line to separate stdout from stderr print(file=sys.stderr) cls._print_stdout(stdout) else: print(':) No issues found.')
[ "def", "print_results", "(", "cls", ",", "stdout", ",", "stderr", ")", ":", "for", "line", "in", "stderr", ":", "print", "(", "line", ",", "file", "=", "sys", ".", "stderr", ")", "if", "stdout", ":", "if", "stderr", ":", "# blank line to separate stdout ...
Print linter results and exits with an error if there's any.
[ "Print", "linter", "results", "and", "exits", "with", "an", "error", "if", "there", "s", "any", "." ]
train
https://github.com/cemsbr/yala/blob/efceb044cb3de8d1c12140087ae9d5f8269bfbf9/yala/main.py#L136-L145
Fylipp/ostd
ostd/__init__.py
Downloader.download
def download(self, *ids): """ Downloads the subtitles with the given ids. :param ids: The subtitles to download :return: Result instances :raises NotOKException """ bundles = sublists_of(ids, 20) # 20 files at once is an API restriction for bundle in bundles: download_response = self._rpc.DownloadSubtitles(self._token, bundle) assert_status(download_response) download_data = download_response.get('data') for item in download_data: subtitle_id = item['idsubtitlefile'] subtitle_data = item['data'] decompressed = decompress(subtitle_data) yield Result(subtitle_id, decompressed)
python
def download(self, *ids): """ Downloads the subtitles with the given ids. :param ids: The subtitles to download :return: Result instances :raises NotOKException """ bundles = sublists_of(ids, 20) # 20 files at once is an API restriction for bundle in bundles: download_response = self._rpc.DownloadSubtitles(self._token, bundle) assert_status(download_response) download_data = download_response.get('data') for item in download_data: subtitle_id = item['idsubtitlefile'] subtitle_data = item['data'] decompressed = decompress(subtitle_data) yield Result(subtitle_id, decompressed)
[ "def", "download", "(", "self", ",", "*", "ids", ")", ":", "bundles", "=", "sublists_of", "(", "ids", ",", "20", ")", "# 20 files at once is an API restriction", "for", "bundle", "in", "bundles", ":", "download_response", "=", "self", ".", "_rpc", ".", "Down...
Downloads the subtitles with the given ids. :param ids: The subtitles to download :return: Result instances :raises NotOKException
[ "Downloads", "the", "subtitles", "with", "the", "given", "ids", ".", ":", "param", "ids", ":", "The", "subtitles", "to", "download", ":", "return", ":", "Result", "instances", ":", "raises", "NotOKException" ]
train
https://github.com/Fylipp/ostd/blob/fb89bf8427aa15730f4a963a5183206b19e41aee/ostd/__init__.py#L25-L47
clinicedc/edc-notification
edc_notification/signals.py
notification_on_post_create_historical_record
def notification_on_post_create_historical_record( instance, history_date, history_user, history_change_reason, **kwargs ): """Checks and processes any notifications for this model. Processes if `label_lower` is in site_notifications.models. Note, this is the post_create of the historical model. """ if ( site_notifications.loaded and instance._meta.label_lower in site_notifications.models ): opts = dict( instance=instance, user=instance.user_modified or instance.user_created, history_date=history_date, history_user=history_user, history_change_reason=history_change_reason, fail_silently=True, **kwargs ) site_notifications.notify(**opts)
python
def notification_on_post_create_historical_record( instance, history_date, history_user, history_change_reason, **kwargs ): """Checks and processes any notifications for this model. Processes if `label_lower` is in site_notifications.models. Note, this is the post_create of the historical model. """ if ( site_notifications.loaded and instance._meta.label_lower in site_notifications.models ): opts = dict( instance=instance, user=instance.user_modified or instance.user_created, history_date=history_date, history_user=history_user, history_change_reason=history_change_reason, fail_silently=True, **kwargs ) site_notifications.notify(**opts)
[ "def", "notification_on_post_create_historical_record", "(", "instance", ",", "history_date", ",", "history_user", ",", "history_change_reason", ",", "*", "*", "kwargs", ")", ":", "if", "(", "site_notifications", ".", "loaded", "and", "instance", ".", "_meta", ".", ...
Checks and processes any notifications for this model. Processes if `label_lower` is in site_notifications.models. Note, this is the post_create of the historical model.
[ "Checks", "and", "processes", "any", "notifications", "for", "this", "model", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/signals.py#L14-L36
clinicedc/edc-notification
edc_notification/signals.py
manage_mailists_on_userprofile_m2m_changed
def manage_mailists_on_userprofile_m2m_changed( action, instance, pk_set, sender, **kwargs ): """Updates the mail server mailing lists based on the selections in the UserProfile model. """ try: instance.email_notifications except AttributeError: pass else: if action == "post_remove": update_mailing_lists_in_m2m( sender=sender, userprofile=instance, unsubscribe=True, pk_set=pk_set, verbose=True, ) elif action == "post_add": update_mailing_lists_in_m2m( sender=sender, userprofile=instance, subscribe=True, pk_set=pk_set, verbose=True, )
python
def manage_mailists_on_userprofile_m2m_changed( action, instance, pk_set, sender, **kwargs ): """Updates the mail server mailing lists based on the selections in the UserProfile model. """ try: instance.email_notifications except AttributeError: pass else: if action == "post_remove": update_mailing_lists_in_m2m( sender=sender, userprofile=instance, unsubscribe=True, pk_set=pk_set, verbose=True, ) elif action == "post_add": update_mailing_lists_in_m2m( sender=sender, userprofile=instance, subscribe=True, pk_set=pk_set, verbose=True, )
[ "def", "manage_mailists_on_userprofile_m2m_changed", "(", "action", ",", "instance", ",", "pk_set", ",", "sender", ",", "*", "*", "kwargs", ")", ":", "try", ":", "instance", ".", "email_notifications", "except", "AttributeError", ":", "pass", "else", ":", "if", ...
Updates the mail server mailing lists based on the selections in the UserProfile model.
[ "Updates", "the", "mail", "server", "mailing", "lists", "based", "on", "the", "selections", "in", "the", "UserProfile", "model", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/signals.py#L42-L68
Synerty/peek-plugin-base
peek_plugin_base/client/PeekPlatformMobileHttpHookABC.py
PeekPlatformMobileHttpHookABC.addMobileResource
def addMobileResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: """ Add Site Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None """ pluginSubPath = pluginSubPath.strip(b'/') self.__rootMobileResource.putChild(pluginSubPath, resource)
python
def addMobileResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: """ Add Site Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None """ pluginSubPath = pluginSubPath.strip(b'/') self.__rootMobileResource.putChild(pluginSubPath, resource)
[ "def", "addMobileResource", "(", "self", ",", "pluginSubPath", ":", "bytes", ",", "resource", ":", "BasicResource", ")", "->", "None", ":", "pluginSubPath", "=", "pluginSubPath", ".", "strip", "(", "b'/'", ")", "self", ".", "__rootMobileResource", ".", "putChi...
Add Site Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None
[ "Add", "Site", "Resource" ]
train
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/client/PeekPlatformMobileHttpHookABC.py#L31-L42
sqrvrtx/plaid
plaid/jinja_check.py
parse
def parse(file_contents, file_name): ''' Takes a list of files which are assumed to be jinja2 templates and tries to parse the contents of the files Args: file_contents (str): File contents of a jinja file Raises: Exception: An exception is raised if the contents of the file cannot be parsed. ''' env = Environment() result = "" try: env.parse(file_contents) except Exception: _, exc_value, _ = sys.exc_info() result += "ERROR: Jinja2 Template File: {0}".format(file_name) result += repr(exc_value) + '\n' return result
python
def parse(file_contents, file_name): ''' Takes a list of files which are assumed to be jinja2 templates and tries to parse the contents of the files Args: file_contents (str): File contents of a jinja file Raises: Exception: An exception is raised if the contents of the file cannot be parsed. ''' env = Environment() result = "" try: env.parse(file_contents) except Exception: _, exc_value, _ = sys.exc_info() result += "ERROR: Jinja2 Template File: {0}".format(file_name) result += repr(exc_value) + '\n' return result
[ "def", "parse", "(", "file_contents", ",", "file_name", ")", ":", "env", "=", "Environment", "(", ")", "result", "=", "\"\"", "try", ":", "env", ".", "parse", "(", "file_contents", ")", "except", "Exception", ":", "_", ",", "exc_value", ",", "_", "=", ...
Takes a list of files which are assumed to be jinja2 templates and tries to parse the contents of the files Args: file_contents (str): File contents of a jinja file Raises: Exception: An exception is raised if the contents of the file cannot be parsed.
[ "Takes", "a", "list", "of", "files", "which", "are", "assumed", "to", "be", "jinja2", "templates", "and", "tries", "to", "parse", "the", "contents", "of", "the", "files" ]
train
https://github.com/sqrvrtx/plaid/blob/2b6162f896e40e7c490e767839de143e042c2a18/plaid/jinja_check.py#L11-L33
futursolo/magichttp
magichttp/readers.py
RequestInitialMalformedError.write_response
def write_response( self, status_code: Union[ int, constants.HttpStatusCode ]=constants.HttpStatusCode.BAD_REQUEST, *, headers: Optional[_HeaderType]=None ) -> "writers.HttpResponseWriter": """ When this exception is raised on the server side, this method is used to send a error response instead of :method:`BaseHttpStreamReader.write_response()`. """ return self._delegate.write_response( constants.HttpStatusCode(status_code), headers=headers)
python
def write_response( self, status_code: Union[ int, constants.HttpStatusCode ]=constants.HttpStatusCode.BAD_REQUEST, *, headers: Optional[_HeaderType]=None ) -> "writers.HttpResponseWriter": """ When this exception is raised on the server side, this method is used to send a error response instead of :method:`BaseHttpStreamReader.write_response()`. """ return self._delegate.write_response( constants.HttpStatusCode(status_code), headers=headers)
[ "def", "write_response", "(", "self", ",", "status_code", ":", "Union", "[", "int", ",", "constants", ".", "HttpStatusCode", "]", "=", "constants", ".", "HttpStatusCode", ".", "BAD_REQUEST", ",", "*", ",", "headers", ":", "Optional", "[", "_HeaderType", "]",...
When this exception is raised on the server side, this method is used to send a error response instead of :method:`BaseHttpStreamReader.write_response()`.
[ "When", "this", "exception", "is", "raised", "on", "the", "server", "side", "this", "method", "is", "used", "to", "send", "a", "error", "response", "instead", "of", ":", "method", ":", "BaseHttpStreamReader", ".", "write_response", "()", "." ]
train
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/readers.py#L145-L158
futursolo/magichttp
magichttp/readers.py
BaseHttpStreamReader.read
async def read(self, n: int=-1, exactly: bool=False) -> bytes: """ Read at most n bytes data or if exactly is `True`, read exactly n bytes data. If the end has been reached before the buffer has the length of data asked, it will raise a :class:`ReadUnsatisfiableError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`. """ async with self._read_lock: self._raise_exc_if_finished() if n == 0: return b"" if exactly: if n < 0: # pragma: no cover raise ValueError( "You MUST sepcify the length of the data " "if exactly is True.") if n > self.max_buf_len: # pragma: no cover raise ValueError( "The length provided cannot be larger " "than the max buffer length.") while len(self) < n: try: await self._wait_for_data() except asyncio.CancelledError: # pragma: no cover raise except Exception as e: raise ReadUnsatisfiableError from e elif n < 0: while True: if len(self) > self.max_buf_len: raise MaxBufferLengthReachedError try: await self._wait_for_data() except asyncio.CancelledError: # pragma: no cover raise except Exception: data = bytes(self._buf) self._buf.clear() return data elif len(self) == 0: await self._wait_for_data() data = bytes(self._buf[0:n]) del self._buf[0:n] return data
python
async def read(self, n: int=-1, exactly: bool=False) -> bytes: """ Read at most n bytes data or if exactly is `True`, read exactly n bytes data. If the end has been reached before the buffer has the length of data asked, it will raise a :class:`ReadUnsatisfiableError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`. """ async with self._read_lock: self._raise_exc_if_finished() if n == 0: return b"" if exactly: if n < 0: # pragma: no cover raise ValueError( "You MUST sepcify the length of the data " "if exactly is True.") if n > self.max_buf_len: # pragma: no cover raise ValueError( "The length provided cannot be larger " "than the max buffer length.") while len(self) < n: try: await self._wait_for_data() except asyncio.CancelledError: # pragma: no cover raise except Exception as e: raise ReadUnsatisfiableError from e elif n < 0: while True: if len(self) > self.max_buf_len: raise MaxBufferLengthReachedError try: await self._wait_for_data() except asyncio.CancelledError: # pragma: no cover raise except Exception: data = bytes(self._buf) self._buf.clear() return data elif len(self) == 0: await self._wait_for_data() data = bytes(self._buf[0:n]) del self._buf[0:n] return data
[ "async", "def", "read", "(", "self", ",", "n", ":", "int", "=", "-", "1", ",", "exactly", ":", "bool", "=", "False", ")", "->", "bytes", ":", "async", "with", "self", ".", "_read_lock", ":", "self", ".", "_raise_exc_if_finished", "(", ")", "if", "n...
Read at most n bytes data or if exactly is `True`, read exactly n bytes data. If the end has been reached before the buffer has the length of data asked, it will raise a :class:`ReadUnsatisfiableError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`.
[ "Read", "at", "most", "n", "bytes", "data", "or", "if", "exactly", "is", "True", "read", "exactly", "n", "bytes", "data", ".", "If", "the", "end", "has", "been", "reached", "before", "the", "buffer", "has", "the", "length", "of", "data", "asked", "it",...
train
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/readers.py#L280-L340
futursolo/magichttp
magichttp/readers.py
BaseHttpStreamReader.read_until
async def read_until( self, separator: bytes=b"\n", *, keep_separator: bool=True) -> bytes: """ Read until the separator has been found. When the max size of the buffer has been reached, and the separator is not found, this method will raise a :class:`MaxBufferLengthReachedError`. Similarly, if the end has been reached before found the separator it will raise a :class:`SeparatorNotFoundError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`. """ async with self._read_lock: self._raise_exc_if_finished() start_pos = 0 while True: separator_pos = self._buf.find(separator, start_pos) if separator_pos != -1: break if len(self) > self.max_buf_len: raise MaxBufferLengthReachedError try: await self._wait_for_data() except asyncio.CancelledError: # pragma: no cover raise except Exception as e: if len(self) > 0: raise SeparatorNotFoundError from e else: raise new_start_pos = len(self) - len(separator) if new_start_pos > 0: start_pos = new_start_pos full_pos = separator_pos + len(separator) if keep_separator: data_pos = full_pos else: data_pos = separator_pos data = bytes(self._buf[0:data_pos]) del self._buf[0:full_pos] return data
python
async def read_until( self, separator: bytes=b"\n", *, keep_separator: bool=True) -> bytes: """ Read until the separator has been found. When the max size of the buffer has been reached, and the separator is not found, this method will raise a :class:`MaxBufferLengthReachedError`. Similarly, if the end has been reached before found the separator it will raise a :class:`SeparatorNotFoundError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`. """ async with self._read_lock: self._raise_exc_if_finished() start_pos = 0 while True: separator_pos = self._buf.find(separator, start_pos) if separator_pos != -1: break if len(self) > self.max_buf_len: raise MaxBufferLengthReachedError try: await self._wait_for_data() except asyncio.CancelledError: # pragma: no cover raise except Exception as e: if len(self) > 0: raise SeparatorNotFoundError from e else: raise new_start_pos = len(self) - len(separator) if new_start_pos > 0: start_pos = new_start_pos full_pos = separator_pos + len(separator) if keep_separator: data_pos = full_pos else: data_pos = separator_pos data = bytes(self._buf[0:data_pos]) del self._buf[0:full_pos] return data
[ "async", "def", "read_until", "(", "self", ",", "separator", ":", "bytes", "=", "b\"\\n\"", ",", "*", ",", "keep_separator", ":", "bool", "=", "True", ")", "->", "bytes", ":", "async", "with", "self", ".", "_read_lock", ":", "self", ".", "_raise_exc_if_f...
Read until the separator has been found. When the max size of the buffer has been reached, and the separator is not found, this method will raise a :class:`MaxBufferLengthReachedError`. Similarly, if the end has been reached before found the separator it will raise a :class:`SeparatorNotFoundError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`.
[ "Read", "until", "the", "separator", "has", "been", "found", "." ]
train
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/readers.py#L342-L400
futursolo/magichttp
magichttp/readers.py
HttpRequestReader.write_response
def write_response( self, status_code: Union[int, constants.HttpStatusCode], *, headers: Optional[_HeaderType]=None ) -> "writers.HttpResponseWriter": """ Write a response to the client. """ self._writer = self.__delegate.write_response( constants.HttpStatusCode(status_code), headers=headers) return self._writer
python
def write_response( self, status_code: Union[int, constants.HttpStatusCode], *, headers: Optional[_HeaderType]=None ) -> "writers.HttpResponseWriter": """ Write a response to the client. """ self._writer = self.__delegate.write_response( constants.HttpStatusCode(status_code), headers=headers) return self._writer
[ "def", "write_response", "(", "self", ",", "status_code", ":", "Union", "[", "int", ",", "constants", ".", "HttpStatusCode", "]", ",", "*", ",", "headers", ":", "Optional", "[", "_HeaderType", "]", "=", "None", ")", "->", "\"writers.HttpResponseWriter\"", ":...
Write a response to the client.
[ "Write", "a", "response", "to", "the", "client", "." ]
train
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/readers.py#L476-L487
hugosenari/dbus2any
dbus2any/dbus2xml.py
createDbusProxyObject
def createDbusProxyObject(bus_name, object_path, bus=None): ''' Create dbus proxy object ''' bus = bus or dbus.SessionBus.get_session() return bus.get_object(bus_name, object_path)
python
def createDbusProxyObject(bus_name, object_path, bus=None): ''' Create dbus proxy object ''' bus = bus or dbus.SessionBus.get_session() return bus.get_object(bus_name, object_path)
[ "def", "createDbusProxyObject", "(", "bus_name", ",", "object_path", ",", "bus", "=", "None", ")", ":", "bus", "=", "bus", "or", "dbus", ".", "SessionBus", ".", "get_session", "(", ")", "return", "bus", ".", "get_object", "(", "bus_name", ",", "object_path...
Create dbus proxy object
[ "Create", "dbus", "proxy", "object" ]
train
https://github.com/hugosenari/dbus2any/blob/5aa48cec679a9647dcff9c10a23b225ca94b8d65/dbus2any/dbus2xml.py#L6-L11
jaraco/jaraco.translate
jaraco/translate/google.py
translate
def translate(text, target_lang='en', source_lang=None): """ Use the Google v2 API to translate the text. You had better have set the API key on this function before calling it. """ url_base = 'https://www.googleapis.com/language/translate/v2' params = dict( key=translate.API_key, q=text, target=target_lang, ) if source_lang: params['source'] = source_lang resp = requests.get(url_base, params=params) resp.raise_for_status() return resp.json()['data']['translations'][0]['translatedText']
python
def translate(text, target_lang='en', source_lang=None): """ Use the Google v2 API to translate the text. You had better have set the API key on this function before calling it. """ url_base = 'https://www.googleapis.com/language/translate/v2' params = dict( key=translate.API_key, q=text, target=target_lang, ) if source_lang: params['source'] = source_lang resp = requests.get(url_base, params=params) resp.raise_for_status() return resp.json()['data']['translations'][0]['translatedText']
[ "def", "translate", "(", "text", ",", "target_lang", "=", "'en'", ",", "source_lang", "=", "None", ")", ":", "url_base", "=", "'https://www.googleapis.com/language/translate/v2'", "params", "=", "dict", "(", "key", "=", "translate", ".", "API_key", ",", "q", "...
Use the Google v2 API to translate the text. You had better have set the API key on this function before calling it.
[ "Use", "the", "Google", "v2", "API", "to", "translate", "the", "text", ".", "You", "had", "better", "have", "set", "the", "API", "key", "on", "this", "function", "before", "calling", "it", "." ]
train
https://github.com/jaraco/jaraco.translate/blob/21a83d6378bec84c308d0c950f073fced01d36e7/jaraco/translate/google.py#L4-L19
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_credentials
def from_credentials(cls: Type[SigningKeyType], salt: Union[str, bytes], password: Union[str, bytes], scrypt_params: Optional[ScryptParams] = None) -> SigningKeyType: """ Create a SigningKey object from credentials :param salt: Secret salt passphrase credential :param password: Secret password credential :param scrypt_params: ScryptParams instance """ if scrypt_params is None: scrypt_params = ScryptParams() salt = ensure_bytes(salt) password = ensure_bytes(password) seed = scrypt(password, salt, scrypt_params.N, scrypt_params.r, scrypt_params.p, scrypt_params.seed_length) return cls(seed)
python
def from_credentials(cls: Type[SigningKeyType], salt: Union[str, bytes], password: Union[str, bytes], scrypt_params: Optional[ScryptParams] = None) -> SigningKeyType: """ Create a SigningKey object from credentials :param salt: Secret salt passphrase credential :param password: Secret password credential :param scrypt_params: ScryptParams instance """ if scrypt_params is None: scrypt_params = ScryptParams() salt = ensure_bytes(salt) password = ensure_bytes(password) seed = scrypt(password, salt, scrypt_params.N, scrypt_params.r, scrypt_params.p, scrypt_params.seed_length) return cls(seed)
[ "def", "from_credentials", "(", "cls", ":", "Type", "[", "SigningKeyType", "]", ",", "salt", ":", "Union", "[", "str", ",", "bytes", "]", ",", "password", ":", "Union", "[", "str", ",", "bytes", "]", ",", "scrypt_params", ":", "Optional", "[", "ScryptP...
Create a SigningKey object from credentials :param salt: Secret salt passphrase credential :param password: Secret password credential :param scrypt_params: ScryptParams instance
[ "Create", "a", "SigningKey", "object", "from", "credentials" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L34-L50
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.save_seedhex_file
def save_seedhex_file(self, path: str) -> None: """ Save hexadecimal seed file from seed :param path: Authentication file path """ seedhex = convert_seed_to_seedhex(self.seed) with open(path, 'w') as fh: fh.write(seedhex)
python
def save_seedhex_file(self, path: str) -> None: """ Save hexadecimal seed file from seed :param path: Authentication file path """ seedhex = convert_seed_to_seedhex(self.seed) with open(path, 'w') as fh: fh.write(seedhex)
[ "def", "save_seedhex_file", "(", "self", ",", "path", ":", "str", ")", "->", "None", ":", "seedhex", "=", "convert_seed_to_seedhex", "(", "self", ".", "seed", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "("...
Save hexadecimal seed file from seed :param path: Authentication file path
[ "Save", "hexadecimal", "seed", "file", "from", "seed" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L52-L60
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_seedhex_file
def from_seedhex_file(path: str) -> SigningKeyType: """ Return SigningKey instance from Seedhex file :param str path: Hexadecimal seed file path """ with open(path, 'r') as fh: seedhex = fh.read() return SigningKey.from_seedhex(seedhex)
python
def from_seedhex_file(path: str) -> SigningKeyType: """ Return SigningKey instance from Seedhex file :param str path: Hexadecimal seed file path """ with open(path, 'r') as fh: seedhex = fh.read() return SigningKey.from_seedhex(seedhex)
[ "def", "from_seedhex_file", "(", "path", ":", "str", ")", "->", "SigningKeyType", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "fh", ":", "seedhex", "=", "fh", ".", "read", "(", ")", "return", "SigningKey", ".", "from_seedhex", "(", "seedhe...
Return SigningKey instance from Seedhex file :param str path: Hexadecimal seed file path
[ "Return", "SigningKey", "instance", "from", "Seedhex", "file" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L63-L71
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_seedhex
def from_seedhex(cls: Type[SigningKeyType], seedhex: str) -> SigningKeyType: """ Return SigningKey instance from Seedhex :param str seedhex: Hexadecimal seed string """ regex_seedhex = compile("([0-9a-fA-F]{64})") match = search(regex_seedhex, seedhex) if not match: raise Exception('Error: Bad seed hexadecimal format') seedhex = match.groups()[0] seed = convert_seedhex_to_seed(seedhex) return cls(seed)
python
def from_seedhex(cls: Type[SigningKeyType], seedhex: str) -> SigningKeyType: """ Return SigningKey instance from Seedhex :param str seedhex: Hexadecimal seed string """ regex_seedhex = compile("([0-9a-fA-F]{64})") match = search(regex_seedhex, seedhex) if not match: raise Exception('Error: Bad seed hexadecimal format') seedhex = match.groups()[0] seed = convert_seedhex_to_seed(seedhex) return cls(seed)
[ "def", "from_seedhex", "(", "cls", ":", "Type", "[", "SigningKeyType", "]", ",", "seedhex", ":", "str", ")", "->", "SigningKeyType", ":", "regex_seedhex", "=", "compile", "(", "\"([0-9a-fA-F]{64})\"", ")", "match", "=", "search", "(", "regex_seedhex", ",", "...
Return SigningKey instance from Seedhex :param str seedhex: Hexadecimal seed string
[ "Return", "SigningKey", "instance", "from", "Seedhex" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L74-L86
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_private_key
def from_private_key(path: str) -> SigningKeyType: """ Read authentication file Add public key attribute :param path: Authentication file path """ key = load_key(path) key.pubkey = Base58Encoder.encode(key.vk) return key
python
def from_private_key(path: str) -> SigningKeyType: """ Read authentication file Add public key attribute :param path: Authentication file path """ key = load_key(path) key.pubkey = Base58Encoder.encode(key.vk) return key
[ "def", "from_private_key", "(", "path", ":", "str", ")", "->", "SigningKeyType", ":", "key", "=", "load_key", "(", "path", ")", "key", ".", "pubkey", "=", "Base58Encoder", ".", "encode", "(", "key", ".", "vk", ")", "return", "key" ]
Read authentication file Add public key attribute :param path: Authentication file path
[ "Read", "authentication", "file", "Add", "public", "key", "attribute" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L97-L106
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.decrypt_seal
def decrypt_seal(self, data: bytes) -> bytes: """ Decrypt bytes data with a curve25519 version of the ed25519 key pair :param data: Encrypted data :return: """ curve25519_public_key = libnacl.crypto_sign_ed25519_pk_to_curve25519(self.vk) curve25519_secret_key = libnacl.crypto_sign_ed25519_sk_to_curve25519(self.sk) return libnacl.crypto_box_seal_open(data, curve25519_public_key, curve25519_secret_key)
python
def decrypt_seal(self, data: bytes) -> bytes: """ Decrypt bytes data with a curve25519 version of the ed25519 key pair :param data: Encrypted data :return: """ curve25519_public_key = libnacl.crypto_sign_ed25519_pk_to_curve25519(self.vk) curve25519_secret_key = libnacl.crypto_sign_ed25519_sk_to_curve25519(self.sk) return libnacl.crypto_box_seal_open(data, curve25519_public_key, curve25519_secret_key)
[ "def", "decrypt_seal", "(", "self", ",", "data", ":", "bytes", ")", "->", "bytes", ":", "curve25519_public_key", "=", "libnacl", ".", "crypto_sign_ed25519_pk_to_curve25519", "(", "self", ".", "vk", ")", "curve25519_secret_key", "=", "libnacl", ".", "crypto_sign_ed...
Decrypt bytes data with a curve25519 version of the ed25519 key pair :param data: Encrypted data :return:
[ "Decrypt", "bytes", "data", "with", "a", "curve25519", "version", "of", "the", "ed25519", "key", "pair" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L108-L118
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_pubsec_file
def from_pubsec_file(cls: Type[SigningKeyType], path: str) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF file :param path: Path to WIF file """ with open(path, 'r') as fh: pubsec_content = fh.read() # line patterns regex_pubkey = compile("pub: ([1-9A-HJ-NP-Za-km-z]{43,44})", MULTILINE) regex_signkey = compile("sec: ([1-9A-HJ-NP-Za-km-z]{88,90})", MULTILINE) # check public key field match = search(regex_pubkey, pubsec_content) if not match: raise Exception('Error: Bad format PubSec v1 file, missing public key') # check signkey field match = search(regex_signkey, pubsec_content) if not match: raise Exception('Error: Bad format PubSec v1 file, missing sec key') # capture signkey signkey_hex = match.groups()[0] # extract seed from signkey seed = bytes(Base58Encoder.decode(signkey_hex)[0:32]) return cls(seed)
python
def from_pubsec_file(cls: Type[SigningKeyType], path: str) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF file :param path: Path to WIF file """ with open(path, 'r') as fh: pubsec_content = fh.read() # line patterns regex_pubkey = compile("pub: ([1-9A-HJ-NP-Za-km-z]{43,44})", MULTILINE) regex_signkey = compile("sec: ([1-9A-HJ-NP-Za-km-z]{88,90})", MULTILINE) # check public key field match = search(regex_pubkey, pubsec_content) if not match: raise Exception('Error: Bad format PubSec v1 file, missing public key') # check signkey field match = search(regex_signkey, pubsec_content) if not match: raise Exception('Error: Bad format PubSec v1 file, missing sec key') # capture signkey signkey_hex = match.groups()[0] # extract seed from signkey seed = bytes(Base58Encoder.decode(signkey_hex)[0:32]) return cls(seed)
[ "def", "from_pubsec_file", "(", "cls", ":", "Type", "[", "SigningKeyType", "]", ",", "path", ":", "str", ")", "->", "SigningKeyType", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "fh", ":", "pubsec_content", "=", "fh", ".", "read", "(", "...
Return SigningKey instance from Duniter WIF file :param path: Path to WIF file
[ "Return", "SigningKey", "instance", "from", "Duniter", "WIF", "file" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L121-L150
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.save_pubsec_file
def save_pubsec_file(self, path: str) -> None: """ Save a Duniter PubSec file (PubSec) v1 :param path: Path to file """ # version version = 1 # base58 encode keys base58_signing_key = Base58Encoder.encode(self.sk) base58_public_key = self.pubkey # save file with open(path, 'w') as fh: fh.write( """Type: PubSec Version: {version} pub: {pubkey} sec: {signkey}""".format(version=version, pubkey=base58_public_key, signkey=base58_signing_key) )
python
def save_pubsec_file(self, path: str) -> None: """ Save a Duniter PubSec file (PubSec) v1 :param path: Path to file """ # version version = 1 # base58 encode keys base58_signing_key = Base58Encoder.encode(self.sk) base58_public_key = self.pubkey # save file with open(path, 'w') as fh: fh.write( """Type: PubSec Version: {version} pub: {pubkey} sec: {signkey}""".format(version=version, pubkey=base58_public_key, signkey=base58_signing_key) )
[ "def", "save_pubsec_file", "(", "self", ",", "path", ":", "str", ")", "->", "None", ":", "# version", "version", "=", "1", "# base58 encode keys", "base58_signing_key", "=", "Base58Encoder", ".", "encode", "(", "self", ".", "sk", ")", "base58_public_key", "=",...
Save a Duniter PubSec file (PubSec) v1 :param path: Path to file
[ "Save", "a", "Duniter", "PubSec", "file", "(", "PubSec", ")", "v1" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L152-L172
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_wif_or_ewif_file
def from_wif_or_ewif_file(path: str, password: Optional[str] = None) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF or EWIF file :param path: Path to WIF of EWIF file :param password: Password needed for EWIF file """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format WIF or EWIF v1 file') # capture hexa wif key wif_hex = match.groups()[0] return SigningKey.from_wif_or_ewif_hex(wif_hex, password)
python
def from_wif_or_ewif_file(path: str, password: Optional[str] = None) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF or EWIF file :param path: Path to WIF of EWIF file :param password: Password needed for EWIF file """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format WIF or EWIF v1 file') # capture hexa wif key wif_hex = match.groups()[0] return SigningKey.from_wif_or_ewif_hex(wif_hex, password)
[ "def", "from_wif_or_ewif_file", "(", "path", ":", "str", ",", "password", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "SigningKeyType", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "fh", ":", "wif_content", "=", "fh", ".", "...
Return SigningKey instance from Duniter WIF or EWIF file :param path: Path to WIF of EWIF file :param password: Password needed for EWIF file
[ "Return", "SigningKey", "instance", "from", "Duniter", "WIF", "or", "EWIF", "file" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L175-L193
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_wif_or_ewif_hex
def from_wif_or_ewif_hex(wif_hex: str, password: Optional[str] = None) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF or EWIF in hexadecimal format :param wif_hex: WIF or EWIF string in hexadecimal format :param password: Password of EWIF encrypted seed """ wif_bytes = Base58Encoder.decode(wif_hex) fi = wif_bytes[0:1] if fi == b"\x01": return SigningKey.from_wif_hex(wif_hex) elif fi == b"\x02" and password is not None: return SigningKey.from_ewif_hex(wif_hex, password) else: raise Exception("Error: Bad format: not WIF nor EWIF")
python
def from_wif_or_ewif_hex(wif_hex: str, password: Optional[str] = None) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF or EWIF in hexadecimal format :param wif_hex: WIF or EWIF string in hexadecimal format :param password: Password of EWIF encrypted seed """ wif_bytes = Base58Encoder.decode(wif_hex) fi = wif_bytes[0:1] if fi == b"\x01": return SigningKey.from_wif_hex(wif_hex) elif fi == b"\x02" and password is not None: return SigningKey.from_ewif_hex(wif_hex, password) else: raise Exception("Error: Bad format: not WIF nor EWIF")
[ "def", "from_wif_or_ewif_hex", "(", "wif_hex", ":", "str", ",", "password", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "SigningKeyType", ":", "wif_bytes", "=", "Base58Encoder", ".", "decode", "(", "wif_hex", ")", "fi", "=", "wif_bytes", "[",...
Return SigningKey instance from Duniter WIF or EWIF in hexadecimal format :param wif_hex: WIF or EWIF string in hexadecimal format :param password: Password of EWIF encrypted seed
[ "Return", "SigningKey", "instance", "from", "Duniter", "WIF", "or", "EWIF", "in", "hexadecimal", "format" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L196-L212
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_wif_file
def from_wif_file(path: str) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF file :param path: Path to WIF file """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format WIF v1 file') # capture hexa wif key wif_hex = match.groups()[0] return SigningKey.from_wif_hex(wif_hex)
python
def from_wif_file(path: str) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF file :param path: Path to WIF file """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format WIF v1 file') # capture hexa wif key wif_hex = match.groups()[0] return SigningKey.from_wif_hex(wif_hex)
[ "def", "from_wif_file", "(", "path", ":", "str", ")", "->", "SigningKeyType", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "fh", ":", "wif_content", "=", "fh", ".", "read", "(", ")", "# check data field", "regex", "=", "compile", "(", "'Dat...
Return SigningKey instance from Duniter WIF file :param path: Path to WIF file
[ "Return", "SigningKey", "instance", "from", "Duniter", "WIF", "file" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L215-L232
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_wif_hex
def from_wif_hex(cls: Type[SigningKeyType], wif_hex: str) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF in hexadecimal format :param wif_hex: WIF string in hexadecimal format """ wif_bytes = Base58Encoder.decode(wif_hex) if len(wif_bytes) != 35: raise Exception("Error: the size of WIF is invalid") # extract data checksum_from_wif = wif_bytes[-2:] fi = wif_bytes[0:1] seed = wif_bytes[1:-2] seed_fi = wif_bytes[0:-2] # check WIF format flag if fi != b"\x01": raise Exception("Error: bad format version, not WIF") # checksum control checksum = libnacl.crypto_hash_sha256(libnacl.crypto_hash_sha256(seed_fi))[0:2] if checksum_from_wif != checksum: raise Exception("Error: bad checksum of the WIF") return cls(seed)
python
def from_wif_hex(cls: Type[SigningKeyType], wif_hex: str) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF in hexadecimal format :param wif_hex: WIF string in hexadecimal format """ wif_bytes = Base58Encoder.decode(wif_hex) if len(wif_bytes) != 35: raise Exception("Error: the size of WIF is invalid") # extract data checksum_from_wif = wif_bytes[-2:] fi = wif_bytes[0:1] seed = wif_bytes[1:-2] seed_fi = wif_bytes[0:-2] # check WIF format flag if fi != b"\x01": raise Exception("Error: bad format version, not WIF") # checksum control checksum = libnacl.crypto_hash_sha256(libnacl.crypto_hash_sha256(seed_fi))[0:2] if checksum_from_wif != checksum: raise Exception("Error: bad checksum of the WIF") return cls(seed)
[ "def", "from_wif_hex", "(", "cls", ":", "Type", "[", "SigningKeyType", "]", ",", "wif_hex", ":", "str", ")", "->", "SigningKeyType", ":", "wif_bytes", "=", "Base58Encoder", ".", "decode", "(", "wif_hex", ")", "if", "len", "(", "wif_bytes", ")", "!=", "35...
Return SigningKey instance from Duniter WIF in hexadecimal format :param wif_hex: WIF string in hexadecimal format
[ "Return", "SigningKey", "instance", "from", "Duniter", "WIF", "in", "hexadecimal", "format" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L235-L260
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.save_wif_file
def save_wif_file(self, path: str) -> None: """ Save a Wallet Import Format file (WIF) v1 :param path: Path to file """ # version version = 1 # add format to seed (1=WIF,2=EWIF) seed_fi = b"\x01" + self.seed # calculate checksum sha256_v1 = libnacl.crypto_hash_sha256(seed_fi) sha256_v2 = libnacl.crypto_hash_sha256(sha256_v1) checksum = sha256_v2[0:2] # base58 encode key and checksum wif_key = Base58Encoder.encode(seed_fi + checksum) with open(path, 'w') as fh: fh.write( """Type: WIF Version: {version} Data: {data}""".format(version=version, data=wif_key) )
python
def save_wif_file(self, path: str) -> None: """ Save a Wallet Import Format file (WIF) v1 :param path: Path to file """ # version version = 1 # add format to seed (1=WIF,2=EWIF) seed_fi = b"\x01" + self.seed # calculate checksum sha256_v1 = libnacl.crypto_hash_sha256(seed_fi) sha256_v2 = libnacl.crypto_hash_sha256(sha256_v1) checksum = sha256_v2[0:2] # base58 encode key and checksum wif_key = Base58Encoder.encode(seed_fi + checksum) with open(path, 'w') as fh: fh.write( """Type: WIF Version: {version} Data: {data}""".format(version=version, data=wif_key) )
[ "def", "save_wif_file", "(", "self", ",", "path", ":", "str", ")", "->", "None", ":", "# version", "version", "=", "1", "# add format to seed (1=WIF,2=EWIF)", "seed_fi", "=", "b\"\\x01\"", "+", "self", ".", "seed", "# calculate checksum", "sha256_v1", "=", "libn...
Save a Wallet Import Format file (WIF) v1 :param path: Path to file
[ "Save", "a", "Wallet", "Import", "Format", "file", "(", "WIF", ")", "v1" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L262-L287
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_ewif_file
def from_ewif_file(path: str, password: str) -> SigningKeyType: """ Return SigningKey instance from Duniter EWIF file :param path: Path to EWIF file :param password: Password of the encrypted seed """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format EWIF v1 file') # capture ewif key ewif_hex = match.groups()[0] return SigningKey.from_ewif_hex(ewif_hex, password)
python
def from_ewif_file(path: str, password: str) -> SigningKeyType: """ Return SigningKey instance from Duniter EWIF file :param path: Path to EWIF file :param password: Password of the encrypted seed """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format EWIF v1 file') # capture ewif key ewif_hex = match.groups()[0] return SigningKey.from_ewif_hex(ewif_hex, password)
[ "def", "from_ewif_file", "(", "path", ":", "str", ",", "password", ":", "str", ")", "->", "SigningKeyType", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "fh", ":", "wif_content", "=", "fh", ".", "read", "(", ")", "# check data field", "rege...
Return SigningKey instance from Duniter EWIF file :param path: Path to EWIF file :param password: Password of the encrypted seed
[ "Return", "SigningKey", "instance", "from", "Duniter", "EWIF", "file" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L290-L308
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.from_ewif_hex
def from_ewif_hex(cls: Type[SigningKeyType], ewif_hex: str, password: str) -> SigningKeyType: """ Return SigningKey instance from Duniter EWIF in hexadecimal format :param ewif_hex: EWIF string in hexadecimal format :param password: Password of the encrypted seed """ ewif_bytes = Base58Encoder.decode(ewif_hex) if len(ewif_bytes) != 39: raise Exception("Error: the size of EWIF is invalid") # extract data fi = ewif_bytes[0:1] checksum_from_ewif = ewif_bytes[-2:] ewif_no_checksum = ewif_bytes[0:-2] salt = ewif_bytes[1:5] encryptedhalf1 = ewif_bytes[5:21] encryptedhalf2 = ewif_bytes[21:37] # check format flag if fi != b"\x02": raise Exception("Error: bad format version, not EWIF") # checksum control checksum = libnacl.crypto_hash_sha256(libnacl.crypto_hash_sha256(ewif_no_checksum))[0:2] if checksum_from_ewif != checksum: raise Exception("Error: bad checksum of the EWIF") # SCRYPT password_bytes = password.encode("utf-8") scrypt_seed = scrypt(password_bytes, salt, 16384, 8, 8, 64) derivedhalf1 = scrypt_seed[0:32] derivedhalf2 = scrypt_seed[32:64] # AES aes = pyaes.AESModeOfOperationECB(derivedhalf2) decryptedhalf1 = aes.decrypt(encryptedhalf1) decryptedhalf2 = aes.decrypt(encryptedhalf2) # XOR seed1 = xor_bytes(decryptedhalf1, derivedhalf1[0:16]) seed2 = xor_bytes(decryptedhalf2, derivedhalf1[16:32]) seed = bytes(seed1 + seed2) # Password Control signer = SigningKey(seed) salt_from_seed = libnacl.crypto_hash_sha256( libnacl.crypto_hash_sha256( Base58Encoder.decode(signer.pubkey)))[0:4] if salt_from_seed != salt: raise Exception("Error: bad Password of EWIF address") return cls(seed)
python
def from_ewif_hex(cls: Type[SigningKeyType], ewif_hex: str, password: str) -> SigningKeyType: """ Return SigningKey instance from Duniter EWIF in hexadecimal format :param ewif_hex: EWIF string in hexadecimal format :param password: Password of the encrypted seed """ ewif_bytes = Base58Encoder.decode(ewif_hex) if len(ewif_bytes) != 39: raise Exception("Error: the size of EWIF is invalid") # extract data fi = ewif_bytes[0:1] checksum_from_ewif = ewif_bytes[-2:] ewif_no_checksum = ewif_bytes[0:-2] salt = ewif_bytes[1:5] encryptedhalf1 = ewif_bytes[5:21] encryptedhalf2 = ewif_bytes[21:37] # check format flag if fi != b"\x02": raise Exception("Error: bad format version, not EWIF") # checksum control checksum = libnacl.crypto_hash_sha256(libnacl.crypto_hash_sha256(ewif_no_checksum))[0:2] if checksum_from_ewif != checksum: raise Exception("Error: bad checksum of the EWIF") # SCRYPT password_bytes = password.encode("utf-8") scrypt_seed = scrypt(password_bytes, salt, 16384, 8, 8, 64) derivedhalf1 = scrypt_seed[0:32] derivedhalf2 = scrypt_seed[32:64] # AES aes = pyaes.AESModeOfOperationECB(derivedhalf2) decryptedhalf1 = aes.decrypt(encryptedhalf1) decryptedhalf2 = aes.decrypt(encryptedhalf2) # XOR seed1 = xor_bytes(decryptedhalf1, derivedhalf1[0:16]) seed2 = xor_bytes(decryptedhalf2, derivedhalf1[16:32]) seed = bytes(seed1 + seed2) # Password Control signer = SigningKey(seed) salt_from_seed = libnacl.crypto_hash_sha256( libnacl.crypto_hash_sha256( Base58Encoder.decode(signer.pubkey)))[0:4] if salt_from_seed != salt: raise Exception("Error: bad Password of EWIF address") return cls(seed)
[ "def", "from_ewif_hex", "(", "cls", ":", "Type", "[", "SigningKeyType", "]", ",", "ewif_hex", ":", "str", ",", "password", ":", "str", ")", "->", "SigningKeyType", ":", "ewif_bytes", "=", "Base58Encoder", ".", "decode", "(", "ewif_hex", ")", "if", "len", ...
Return SigningKey instance from Duniter EWIF in hexadecimal format :param ewif_hex: EWIF string in hexadecimal format :param password: Password of the encrypted seed
[ "Return", "SigningKey", "instance", "from", "Duniter", "EWIF", "in", "hexadecimal", "format" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L311-L363
duniter/duniter-python-api
duniterpy/key/signing_key.py
SigningKey.save_ewif_file
def save_ewif_file(self, path: str, password: str) -> None: """ Save an Encrypted Wallet Import Format file (WIF v2) :param path: Path to file :param password: """ # version version = 1 # add version to seed salt = libnacl.crypto_hash_sha256( libnacl.crypto_hash_sha256( Base58Encoder.decode(self.pubkey)))[0:4] # SCRYPT password_bytes = password.encode("utf-8") scrypt_seed = scrypt(password_bytes, salt, 16384, 8, 8, 64) derivedhalf1 = scrypt_seed[0:32] derivedhalf2 = scrypt_seed[32:64] # XOR seed1_xor_derivedhalf1_1 = bytes(xor_bytes(self.seed[0:16], derivedhalf1[0:16])) seed2_xor_derivedhalf1_2 = bytes(xor_bytes(self.seed[16:32], derivedhalf1[16:32])) # AES aes = pyaes.AESModeOfOperationECB(derivedhalf2) encryptedhalf1 = aes.encrypt(seed1_xor_derivedhalf1_1) encryptedhalf2 = aes.encrypt(seed2_xor_derivedhalf1_2) # add format to final seed (1=WIF,2=EWIF) seed_bytes = b'\x02' + salt + encryptedhalf1 + encryptedhalf2 # calculate checksum sha256_v1 = libnacl.crypto_hash_sha256(seed_bytes) sha256_v2 = libnacl.crypto_hash_sha256(sha256_v1) checksum = sha256_v2[0:2] # B58 encode final key string ewif_key = Base58Encoder.encode(seed_bytes + checksum) # save file with open(path, 'w') as fh: fh.write( """Type: EWIF Version: {version} Data: {data}""".format(version=version, data=ewif_key) )
python
def save_ewif_file(self, path: str, password: str) -> None: """ Save an Encrypted Wallet Import Format file (WIF v2) :param path: Path to file :param password: """ # version version = 1 # add version to seed salt = libnacl.crypto_hash_sha256( libnacl.crypto_hash_sha256( Base58Encoder.decode(self.pubkey)))[0:4] # SCRYPT password_bytes = password.encode("utf-8") scrypt_seed = scrypt(password_bytes, salt, 16384, 8, 8, 64) derivedhalf1 = scrypt_seed[0:32] derivedhalf2 = scrypt_seed[32:64] # XOR seed1_xor_derivedhalf1_1 = bytes(xor_bytes(self.seed[0:16], derivedhalf1[0:16])) seed2_xor_derivedhalf1_2 = bytes(xor_bytes(self.seed[16:32], derivedhalf1[16:32])) # AES aes = pyaes.AESModeOfOperationECB(derivedhalf2) encryptedhalf1 = aes.encrypt(seed1_xor_derivedhalf1_1) encryptedhalf2 = aes.encrypt(seed2_xor_derivedhalf1_2) # add format to final seed (1=WIF,2=EWIF) seed_bytes = b'\x02' + salt + encryptedhalf1 + encryptedhalf2 # calculate checksum sha256_v1 = libnacl.crypto_hash_sha256(seed_bytes) sha256_v2 = libnacl.crypto_hash_sha256(sha256_v1) checksum = sha256_v2[0:2] # B58 encode final key string ewif_key = Base58Encoder.encode(seed_bytes + checksum) # save file with open(path, 'w') as fh: fh.write( """Type: EWIF Version: {version} Data: {data}""".format(version=version, data=ewif_key) )
[ "def", "save_ewif_file", "(", "self", ",", "path", ":", "str", ",", "password", ":", "str", ")", "->", "None", ":", "# version", "version", "=", "1", "# add version to seed", "salt", "=", "libnacl", ".", "crypto_hash_sha256", "(", "libnacl", ".", "crypto_has...
Save an Encrypted Wallet Import Format file (WIF v2) :param path: Path to file :param password:
[ "Save", "an", "Encrypted", "Wallet", "Import", "Format", "file", "(", "WIF", "v2", ")" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L365-L412
Laufire/ec
ec/utils.py
get
def get(type=None, **ArgConfig): r"""Helps to interactively get user input. Args: desc (str): The description for input. type (type / CustomType): The type of the input (defaults to None). """ ArgConfig.update(type=type) return gatherInput(**reconfigArg(ArgConfig))
python
def get(type=None, **ArgConfig): r"""Helps to interactively get user input. Args: desc (str): The description for input. type (type / CustomType): The type of the input (defaults to None). """ ArgConfig.update(type=type) return gatherInput(**reconfigArg(ArgConfig))
[ "def", "get", "(", "type", "=", "None", ",", "*", "*", "ArgConfig", ")", ":", "ArgConfig", ".", "update", "(", "type", "=", "type", ")", "return", "gatherInput", "(", "*", "*", "reconfigArg", "(", "ArgConfig", ")", ")" ]
r"""Helps to interactively get user input. Args: desc (str): The description for input. type (type / CustomType): The type of the input (defaults to None).
[ "r", "Helps", "to", "interactively", "get", "user", "input", "." ]
train
https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/ec/utils.py#L14-L22
etcher-be/epab
epab/utils/_exe_version.py
get_product_version
def get_product_version(path: typing.Union[str, Path]) -> VersionInfo: """ Get version info from executable Args: path: path to the executable Returns: VersionInfo """ path = Path(path).absolute() pe_info = pefile.PE(str(path)) try: for file_info in pe_info.FileInfo: # pragma: no branch if isinstance(file_info, list): result = _parse_file_info(file_info) if result: return result else: result = _parse_file_info(pe_info.FileInfo) if result: return result raise RuntimeError(f'unable to obtain version from {path}') except (KeyError, AttributeError) as exc: traceback.print_exc() raise RuntimeError(f'unable to obtain version from {path}') from exc
python
def get_product_version(path: typing.Union[str, Path]) -> VersionInfo: """ Get version info from executable Args: path: path to the executable Returns: VersionInfo """ path = Path(path).absolute() pe_info = pefile.PE(str(path)) try: for file_info in pe_info.FileInfo: # pragma: no branch if isinstance(file_info, list): result = _parse_file_info(file_info) if result: return result else: result = _parse_file_info(pe_info.FileInfo) if result: return result raise RuntimeError(f'unable to obtain version from {path}') except (KeyError, AttributeError) as exc: traceback.print_exc() raise RuntimeError(f'unable to obtain version from {path}') from exc
[ "def", "get_product_version", "(", "path", ":", "typing", ".", "Union", "[", "str", ",", "Path", "]", ")", "->", "VersionInfo", ":", "path", "=", "Path", "(", "path", ")", ".", "absolute", "(", ")", "pe_info", "=", "pefile", ".", "PE", "(", "str", ...
Get version info from executable Args: path: path to the executable Returns: VersionInfo
[ "Get", "version", "info", "from", "executable" ]
train
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_exe_version.py#L54-L80
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
ThreadedTaskManager.start
def start(self): """this function will start the queing thread that executes the iterator and feeds jobs into the queue. It also starts the worker threads that just sit and wait for items to appear on the queue. This is a non blocking call, so the executing thread is free to do other things while the other threads work.""" self.logger.debug('start') # start each of the task threads. for x in range(self.number_of_threads): # each thread is given the config object as well as a reference to # this manager class. The manager class is where the queue lives # and the task threads will refer to it to get their next jobs. new_thread = TaskThread(self.config, self.task_queue) self.thread_list.append(new_thread) new_thread.start() self.queuing_thread = threading.Thread( name="QueuingThread", target=self._queuing_thread_func ) self.queuing_thread.start()
python
def start(self): """this function will start the queing thread that executes the iterator and feeds jobs into the queue. It also starts the worker threads that just sit and wait for items to appear on the queue. This is a non blocking call, so the executing thread is free to do other things while the other threads work.""" self.logger.debug('start') # start each of the task threads. for x in range(self.number_of_threads): # each thread is given the config object as well as a reference to # this manager class. The manager class is where the queue lives # and the task threads will refer to it to get their next jobs. new_thread = TaskThread(self.config, self.task_queue) self.thread_list.append(new_thread) new_thread.start() self.queuing_thread = threading.Thread( name="QueuingThread", target=self._queuing_thread_func ) self.queuing_thread.start()
[ "def", "start", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "'start'", ")", "# start each of the task threads.", "for", "x", "in", "range", "(", "self", ".", "number_of_threads", ")", ":", "# each thread is given the config object as well as a r...
this function will start the queing thread that executes the iterator and feeds jobs into the queue. It also starts the worker threads that just sit and wait for items to appear on the queue. This is a non blocking call, so the executing thread is free to do other things while the other threads work.
[ "this", "function", "will", "start", "the", "queing", "thread", "that", "executes", "the", "iterator", "and", "feeds", "jobs", "into", "the", "queue", ".", "It", "also", "starts", "the", "worker", "threads", "that", "just", "sit", "and", "wait", "for", "it...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L84-L103
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
ThreadedTaskManager.wait_for_completion
def wait_for_completion(self, waiting_func=None): """This is a blocking function call that will wait for the queuing thread to complete. parameters: waiting_func - this function will be called every one second while waiting for the queuing thread to quit. This allows for logging timers, status indicators, etc.""" self.logger.debug("waiting to join queuingThread") self._responsive_join(self.queuing_thread, waiting_func)
python
def wait_for_completion(self, waiting_func=None): """This is a blocking function call that will wait for the queuing thread to complete. parameters: waiting_func - this function will be called every one second while waiting for the queuing thread to quit. This allows for logging timers, status indicators, etc.""" self.logger.debug("waiting to join queuingThread") self._responsive_join(self.queuing_thread, waiting_func)
[ "def", "wait_for_completion", "(", "self", ",", "waiting_func", "=", "None", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"waiting to join queuingThread\"", ")", "self", ".", "_responsive_join", "(", "self", ".", "queuing_thread", ",", "waiting_func", "...
This is a blocking function call that will wait for the queuing thread to complete. parameters: waiting_func - this function will be called every one second while waiting for the queuing thread to quit. This allows for logging timers, status indicators, etc.
[ "This", "is", "a", "blocking", "function", "call", "that", "will", "wait", "for", "the", "queuing", "thread", "to", "complete", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L106-L115
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
ThreadedTaskManager.blocking_start
def blocking_start(self, waiting_func=None): """this function is just a wrapper around the start and wait_for_completion methods. It starts the queuing thread and then waits for it to complete. If run by the main thread, it will detect the KeyboardInterrupt exception (which is what SIGTERM and SIGHUP have been translated to) and will order the threads to die.""" try: self.start() self.wait_for_completion(waiting_func) # it only ends if someone hits ^C or sends SIGHUP or SIGTERM - # any of which will get translated into a KeyboardInterrupt except KeyboardInterrupt: while True: try: self.stop() break except KeyboardInterrupt: self.logger.warning('We heard you the first time. There ' 'is no need for further keyboard or signal ' 'interrupts. We are waiting for the ' 'worker threads to stop. If this app ' 'does not halt soon, you may have to send ' 'SIGKILL (kill -9)')
python
def blocking_start(self, waiting_func=None): """this function is just a wrapper around the start and wait_for_completion methods. It starts the queuing thread and then waits for it to complete. If run by the main thread, it will detect the KeyboardInterrupt exception (which is what SIGTERM and SIGHUP have been translated to) and will order the threads to die.""" try: self.start() self.wait_for_completion(waiting_func) # it only ends if someone hits ^C or sends SIGHUP or SIGTERM - # any of which will get translated into a KeyboardInterrupt except KeyboardInterrupt: while True: try: self.stop() break except KeyboardInterrupt: self.logger.warning('We heard you the first time. There ' 'is no need for further keyboard or signal ' 'interrupts. We are waiting for the ' 'worker threads to stop. If this app ' 'does not halt soon, you may have to send ' 'SIGKILL (kill -9)')
[ "def", "blocking_start", "(", "self", ",", "waiting_func", "=", "None", ")", ":", "try", ":", "self", ".", "start", "(", ")", "self", ".", "wait_for_completion", "(", "waiting_func", ")", "# it only ends if someone hits ^C or sends SIGHUP or SIGTERM -", "# any of whi...
this function is just a wrapper around the start and wait_for_completion methods. It starts the queuing thread and then waits for it to complete. If run by the main thread, it will detect the KeyboardInterrupt exception (which is what SIGTERM and SIGHUP have been translated to) and will order the threads to die.
[ "this", "function", "is", "just", "a", "wrapper", "around", "the", "start", "and", "wait_for_completion", "methods", ".", "It", "starts", "the", "queuing", "thread", "and", "then", "waits", "for", "it", "to", "complete", ".", "If", "run", "by", "the", "mai...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L127-L149
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
ThreadedTaskManager.wait_for_empty_queue
def wait_for_empty_queue(self, wait_log_interval=0, wait_reason=''): """Sit around and wait for the queue to become empty parameters: wait_log_interval - while sleeping, it is helpful if the thread periodically announces itself so that we know that it is still alive. This number is the time in seconds between log entries. wait_reason - the is for the explaination of why the thread is sleeping. This is likely to be a message like: 'there is no work to do'.""" seconds = 0 while True: if self.task_queue.empty(): break self.quit_check() if wait_log_interval and not seconds % wait_log_interval: self.logger.info('%s: %dsec so far', wait_reason, seconds) self.quit_check() seconds += 1 time.sleep(1.0)
python
def wait_for_empty_queue(self, wait_log_interval=0, wait_reason=''): """Sit around and wait for the queue to become empty parameters: wait_log_interval - while sleeping, it is helpful if the thread periodically announces itself so that we know that it is still alive. This number is the time in seconds between log entries. wait_reason - the is for the explaination of why the thread is sleeping. This is likely to be a message like: 'there is no work to do'.""" seconds = 0 while True: if self.task_queue.empty(): break self.quit_check() if wait_log_interval and not seconds % wait_log_interval: self.logger.info('%s: %dsec so far', wait_reason, seconds) self.quit_check() seconds += 1 time.sleep(1.0)
[ "def", "wait_for_empty_queue", "(", "self", ",", "wait_log_interval", "=", "0", ",", "wait_reason", "=", "''", ")", ":", "seconds", "=", "0", "while", "True", ":", "if", "self", ".", "task_queue", ".", "empty", "(", ")", ":", "break", "self", ".", "qui...
Sit around and wait for the queue to become empty parameters: wait_log_interval - while sleeping, it is helpful if the thread periodically announces itself so that we know that it is still alive. This number is the time in seconds between log entries. wait_reason - the is for the explaination of why the thread is sleeping. This is likely to be a message like: 'there is no work to do'.
[ "Sit", "around", "and", "wait", "for", "the", "queue", "to", "become", "empty" ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L152-L174
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
ThreadedTaskManager._responsive_join
def _responsive_join(self, thread, waiting_func=None): """similar to the responsive sleep, a join function blocks a thread until some other thread dies. If that takes a long time, we'd like to have some indicaition as to what the waiting thread is doing. This method will wait for another thread while calling the waiting_func once every second. parameters: thread - an instance of the TaskThread class representing the thread to wait for waiting_func - a function to call every second while waiting for the thread to die""" while True: try: thread.join(1.0) if not thread.isAlive(): break if waiting_func: waiting_func() except KeyboardInterrupt: self.logger.debug('quit detected by _responsive_join') self.quit = True
python
def _responsive_join(self, thread, waiting_func=None): """similar to the responsive sleep, a join function blocks a thread until some other thread dies. If that takes a long time, we'd like to have some indicaition as to what the waiting thread is doing. This method will wait for another thread while calling the waiting_func once every second. parameters: thread - an instance of the TaskThread class representing the thread to wait for waiting_func - a function to call every second while waiting for the thread to die""" while True: try: thread.join(1.0) if not thread.isAlive(): break if waiting_func: waiting_func() except KeyboardInterrupt: self.logger.debug('quit detected by _responsive_join') self.quit = True
[ "def", "_responsive_join", "(", "self", ",", "thread", ",", "waiting_func", "=", "None", ")", ":", "while", "True", ":", "try", ":", "thread", ".", "join", "(", "1.0", ")", "if", "not", "thread", ".", "isAlive", "(", ")", ":", "break", "if", "waiting...
similar to the responsive sleep, a join function blocks a thread until some other thread dies. If that takes a long time, we'd like to have some indicaition as to what the waiting thread is doing. This method will wait for another thread while calling the waiting_func once every second. parameters: thread - an instance of the TaskThread class representing the thread to wait for waiting_func - a function to call every second while waiting for the thread to die
[ "similar", "to", "the", "responsive", "sleep", "a", "join", "function", "blocks", "a", "thread", "until", "some", "other", "thread", "dies", ".", "If", "that", "takes", "a", "long", "time", "we", "d", "like", "to", "have", "some", "indicaition", "as", "t...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L177-L198
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
ThreadedTaskManager._kill_worker_threads
def _kill_worker_threads(self): """This function coerces the consumer/worker threads to kill themselves. When called by the queuing thread, one death token will be placed on the queue for each thread. Each worker thread is always looking for the death token. When it encounters it, it immediately runs to completion without drawing anything more off the queue. This is a blocking call. The thread using this function will wait for all the worker threads to die.""" for x in range(self.number_of_threads): self.task_queue.put((None, None)) self.logger.debug("waiting for standard worker threads to stop") for t in self.thread_list: t.join()
python
def _kill_worker_threads(self): """This function coerces the consumer/worker threads to kill themselves. When called by the queuing thread, one death token will be placed on the queue for each thread. Each worker thread is always looking for the death token. When it encounters it, it immediately runs to completion without drawing anything more off the queue. This is a blocking call. The thread using this function will wait for all the worker threads to die.""" for x in range(self.number_of_threads): self.task_queue.put((None, None)) self.logger.debug("waiting for standard worker threads to stop") for t in self.thread_list: t.join()
[ "def", "_kill_worker_threads", "(", "self", ")", ":", "for", "x", "in", "range", "(", "self", ".", "number_of_threads", ")", ":", "self", ".", "task_queue", ".", "put", "(", "(", "None", ",", "None", ")", ")", "self", ".", "logger", ".", "debug", "("...
This function coerces the consumer/worker threads to kill themselves. When called by the queuing thread, one death token will be placed on the queue for each thread. Each worker thread is always looking for the death token. When it encounters it, it immediately runs to completion without drawing anything more off the queue. This is a blocking call. The thread using this function will wait for all the worker threads to die.
[ "This", "function", "coerces", "the", "consumer", "/", "worker", "threads", "to", "kill", "themselves", ".", "When", "called", "by", "the", "queuing", "thread", "one", "death", "token", "will", "be", "placed", "on", "the", "queue", "for", "each", "thread", ...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L201-L214
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
ThreadedTaskManager._queuing_thread_func
def _queuing_thread_func(self): """This is the function responsible for reading the iterator and putting contents into the queue. It loops as long as there are items in the iterator. Should something go wrong with this thread, or it detects the quit flag, it will calmly kill its workers and then quit itself.""" self.logger.debug('_queuing_thread_func start') try: for job_params in self._get_iterator(): # may never raise # StopIteration self.config.logger.debug('received %r', job_params) if job_params is None: if self.config.quit_on_empty_queue: self.wait_for_empty_queue( wait_log_interval=10, wait_reason='waiting for queue to drain' ) raise KeyboardInterrupt self.logger.info("there is nothing to do. Sleeping " "for %d seconds" % self.config.idle_delay) self._responsive_sleep(self.config.idle_delay) continue self.quit_check() #self.logger.debug("queuing job %s", job_params) self.task_queue.put((self.task_func, job_params)) except Exception: self.logger.error('queuing jobs has failed', exc_info=True) except KeyboardInterrupt: self.logger.debug('queuingThread gets quit request') finally: self.logger.debug("we're quitting queuingThread") self._kill_worker_threads() self.logger.debug("all worker threads stopped") # now that we've killed all the workers, we can set the quit flag # to True. This will cause any other threads to die and shut down # the application. Originally, the setting of this flag was at the # start of this "finally" block. However, that meant that the # workers would abort their currently running jobs. In the case of # of the natural ending of an application where an iterater ran to # exhaustion, the workers would die before completing their tasks. # Moving the setting of the flag to this location allows the # workers to finish and then the app shuts down. self.quit = True
python
def _queuing_thread_func(self): """This is the function responsible for reading the iterator and putting contents into the queue. It loops as long as there are items in the iterator. Should something go wrong with this thread, or it detects the quit flag, it will calmly kill its workers and then quit itself.""" self.logger.debug('_queuing_thread_func start') try: for job_params in self._get_iterator(): # may never raise # StopIteration self.config.logger.debug('received %r', job_params) if job_params is None: if self.config.quit_on_empty_queue: self.wait_for_empty_queue( wait_log_interval=10, wait_reason='waiting for queue to drain' ) raise KeyboardInterrupt self.logger.info("there is nothing to do. Sleeping " "for %d seconds" % self.config.idle_delay) self._responsive_sleep(self.config.idle_delay) continue self.quit_check() #self.logger.debug("queuing job %s", job_params) self.task_queue.put((self.task_func, job_params)) except Exception: self.logger.error('queuing jobs has failed', exc_info=True) except KeyboardInterrupt: self.logger.debug('queuingThread gets quit request') finally: self.logger.debug("we're quitting queuingThread") self._kill_worker_threads() self.logger.debug("all worker threads stopped") # now that we've killed all the workers, we can set the quit flag # to True. This will cause any other threads to die and shut down # the application. Originally, the setting of this flag was at the # start of this "finally" block. However, that meant that the # workers would abort their currently running jobs. In the case of # of the natural ending of an application where an iterater ran to # exhaustion, the workers would die before completing their tasks. # Moving the setting of the flag to this location allows the # workers to finish and then the app shuts down. self.quit = True
[ "def", "_queuing_thread_func", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "'_queuing_thread_func start'", ")", "try", ":", "for", "job_params", "in", "self", ".", "_get_iterator", "(", ")", ":", "# may never raise", "# StopIteration", "self...
This is the function responsible for reading the iterator and putting contents into the queue. It loops as long as there are items in the iterator. Should something go wrong with this thread, or it detects the quit flag, it will calmly kill its workers and then quit itself.
[ "This", "is", "the", "function", "responsible", "for", "reading", "the", "iterator", "and", "putting", "contents", "into", "the", "queue", ".", "It", "loops", "as", "long", "as", "there", "are", "items", "in", "the", "iterator", ".", "Should", "something", ...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L217-L260
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
TaskThread.run
def run(self): """The main routine for a thread's work. The thread pulls tasks from the task queue and executes them until it encounters a death token. The death token is a tuple of two Nones. """ try: quit_request_detected = False while True: function, arguments = self.task_queue.get() if function is None: # this allows us to watch the threads die and identify # threads that may be hanging or deadlocked self.config.logger.info('quits') break if quit_request_detected: continue try: try: args, kwargs = arguments except ValueError: args = arguments kwargs = {} function(*args, **kwargs) # execute the task except Exception: self.config.logger.error("Error in processing a job", exc_info=True) except KeyboardInterrupt: # TODO: can probably go away self.config.logger.info('quit request detected') quit_request_detected = True #thread.interrupt_main() # only needed if signal handler # not registered except Exception: self.config.logger.critical("Failure in task_queue", exc_info=True)
python
def run(self): """The main routine for a thread's work. The thread pulls tasks from the task queue and executes them until it encounters a death token. The death token is a tuple of two Nones. """ try: quit_request_detected = False while True: function, arguments = self.task_queue.get() if function is None: # this allows us to watch the threads die and identify # threads that may be hanging or deadlocked self.config.logger.info('quits') break if quit_request_detected: continue try: try: args, kwargs = arguments except ValueError: args = arguments kwargs = {} function(*args, **kwargs) # execute the task except Exception: self.config.logger.error("Error in processing a job", exc_info=True) except KeyboardInterrupt: # TODO: can probably go away self.config.logger.info('quit request detected') quit_request_detected = True #thread.interrupt_main() # only needed if signal handler # not registered except Exception: self.config.logger.critical("Failure in task_queue", exc_info=True)
[ "def", "run", "(", "self", ")", ":", "try", ":", "quit_request_detected", "=", "False", "while", "True", ":", "function", ",", "arguments", "=", "self", ".", "task_queue", ".", "get", "(", ")", "if", "function", "is", "None", ":", "# this allows us to watc...
The main routine for a thread's work. The thread pulls tasks from the task queue and executes them until it encounters a death token. The death token is a tuple of two Nones.
[ "The", "main", "routine", "for", "a", "thread", "s", "work", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L329-L362
klahnakoski/mo-times
mo_times/vendor/dateutil/easter.py
easter
def easter(year, method=EASTER_WESTERN): """ This method was ported from the work done by GM Arts, on top of the algorithm by Claus Tondering, which was based in part on the algorithm of Ouding (1940), as quoted in "Explanatory Supplement to the Astronomical Almanac", P. Kenneth Seidelmann, editor. This algorithm implements three different easter calculation methods: 1 - Original calculation in Julian calendar, valid in dates after 326 AD 2 - Original method, with date converted to Gregorian calendar, valid in years 1583 to 4099 3 - Revised method, in Gregorian calendar, valid in years 1583 to 4099 as well These methods are represented by the constants: EASTER_JULIAN = 1 EASTER_ORTHODOX = 2 EASTER_WESTERN = 3 The default method is method 3. More about the algorithm may be found at: http://users.chariot.net.au/~gmarts/eastalg.htm and http://www.tondering.dk/claus/calendar.html """ if not (1 <= method <= 3): raise ValueError("invalid method") # g - Golden year - 1 # c - Century # h - (23 - Epact) mod 30 # i - Number of days from March 21 to Paschal Full Moon # j - Weekday for PFM (0=Sunday, etc) # p - Number of days from March 21 to Sunday on or before PFM # (-6 to 28 methods 1 & 3, to 56 for method 2) # e - Extra days to add for method 2 (converting Julian # date to Gregorian date) y = year g = y % 19 e = 0 if method < 3: # Old method i = (19*g+15)%30 j = (y+y//4+i)%7 if method == 2: # Extra dates to convert Julian to Gregorian date e = 10 if y > 1600: e = e+y//100-16-(y//100-16)//4 else: # New method c = y//100 h = (c-c//4-(8*c+13)//25+19*g+15)%30 i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11)) j = (y+y//4+i+2-c+c//4)%7 # p can be from -6 to 56 corresponding to dates 22 March to 23 May # (later dates apply to method 2, although 23 May never actually occurs) p = i-j+e d = 1+(p+27+(p+6)//40)%31 m = 3+(p+26)//30 return datetime.date(int(y), int(m), int(d))
python
def easter(year, method=EASTER_WESTERN): """ This method was ported from the work done by GM Arts, on top of the algorithm by Claus Tondering, which was based in part on the algorithm of Ouding (1940), as quoted in "Explanatory Supplement to the Astronomical Almanac", P. Kenneth Seidelmann, editor. This algorithm implements three different easter calculation methods: 1 - Original calculation in Julian calendar, valid in dates after 326 AD 2 - Original method, with date converted to Gregorian calendar, valid in years 1583 to 4099 3 - Revised method, in Gregorian calendar, valid in years 1583 to 4099 as well These methods are represented by the constants: EASTER_JULIAN = 1 EASTER_ORTHODOX = 2 EASTER_WESTERN = 3 The default method is method 3. More about the algorithm may be found at: http://users.chariot.net.au/~gmarts/eastalg.htm and http://www.tondering.dk/claus/calendar.html """ if not (1 <= method <= 3): raise ValueError("invalid method") # g - Golden year - 1 # c - Century # h - (23 - Epact) mod 30 # i - Number of days from March 21 to Paschal Full Moon # j - Weekday for PFM (0=Sunday, etc) # p - Number of days from March 21 to Sunday on or before PFM # (-6 to 28 methods 1 & 3, to 56 for method 2) # e - Extra days to add for method 2 (converting Julian # date to Gregorian date) y = year g = y % 19 e = 0 if method < 3: # Old method i = (19*g+15)%30 j = (y+y//4+i)%7 if method == 2: # Extra dates to convert Julian to Gregorian date e = 10 if y > 1600: e = e+y//100-16-(y//100-16)//4 else: # New method c = y//100 h = (c-c//4-(8*c+13)//25+19*g+15)%30 i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11)) j = (y+y//4+i+2-c+c//4)%7 # p can be from -6 to 56 corresponding to dates 22 March to 23 May # (later dates apply to method 2, although 23 May never actually occurs) p = i-j+e d = 1+(p+27+(p+6)//40)%31 m = 3+(p+26)//30 return datetime.date(int(y), int(m), int(d))
[ "def", "easter", "(", "year", ",", "method", "=", "EASTER_WESTERN", ")", ":", "if", "not", "(", "1", "<=", "method", "<=", "3", ")", ":", "raise", "ValueError", "(", "\"invalid method\"", ")", "# g - Golden year - 1", "# c - Century", "# h - (23 - Epact) mod 30"...
This method was ported from the work done by GM Arts, on top of the algorithm by Claus Tondering, which was based in part on the algorithm of Ouding (1940), as quoted in "Explanatory Supplement to the Astronomical Almanac", P. Kenneth Seidelmann, editor. This algorithm implements three different easter calculation methods: 1 - Original calculation in Julian calendar, valid in dates after 326 AD 2 - Original method, with date converted to Gregorian calendar, valid in years 1583 to 4099 3 - Revised method, in Gregorian calendar, valid in years 1583 to 4099 as well These methods are represented by the constants: EASTER_JULIAN = 1 EASTER_ORTHODOX = 2 EASTER_WESTERN = 3 The default method is method 3. More about the algorithm may be found at: http://users.chariot.net.au/~gmarts/eastalg.htm and http://www.tondering.dk/claus/calendar.html
[ "This", "method", "was", "ported", "from", "the", "work", "done", "by", "GM", "Arts", "on", "top", "of", "the", "algorithm", "by", "Claus", "Tondering", "which", "was", "based", "in", "part", "on", "the", "algorithm", "of", "Ouding", "(", "1940", ")", ...
train
https://github.com/klahnakoski/mo-times/blob/e64a720b9796e076adeb0d5773ec6915ca045b9d/mo_times/vendor/dateutil/easter.py#L17-L90
themattrix/python-simian
simian/patch.py
patch
def patch(module, external=(), internal=()): """ Temporarily monkey-patch dependencies which can be external to, or internal to the supplied module. :param module: Module object :param external: External dependencies to patch (full paths as strings) :param internal: Internal dependencies to patch (short names as strings) :return: """ external = tuple(external) internal = tuple(internal) def decorator(fn): @wraps(fn) def wrapper(*args, **kwargs): # The master mock is used to contain all of the sub-mocks. It is a # useful container and can also be used to determine the order of # calls to all sub-mocks. master_mock = mock.MagicMock() def get_mock(name): return getattr(master_mock, __patch_name(name)) def patch_external(name): return mock.patch(name, get_mock(name)) def patch_internal(name): return mock.patch(module.__name__ + '.' + name, get_mock(name)) try: with __nested(patch_external(n) for n in external): if external: # Reload the module to ensure that patched external # dependencies are accounted for. reload_module(module) # Patch objects in the module itself. with __nested(patch_internal(n) for n in internal): return fn(master_mock, *args, **kwargs) finally: if external: # When all patches have been discarded, reload the module # to bring it back to its original state (except for all of # the references which have been reassigned). reload_module(module) return wrapper return decorator
python
def patch(module, external=(), internal=()): """ Temporarily monkey-patch dependencies which can be external to, or internal to the supplied module. :param module: Module object :param external: External dependencies to patch (full paths as strings) :param internal: Internal dependencies to patch (short names as strings) :return: """ external = tuple(external) internal = tuple(internal) def decorator(fn): @wraps(fn) def wrapper(*args, **kwargs): # The master mock is used to contain all of the sub-mocks. It is a # useful container and can also be used to determine the order of # calls to all sub-mocks. master_mock = mock.MagicMock() def get_mock(name): return getattr(master_mock, __patch_name(name)) def patch_external(name): return mock.patch(name, get_mock(name)) def patch_internal(name): return mock.patch(module.__name__ + '.' + name, get_mock(name)) try: with __nested(patch_external(n) for n in external): if external: # Reload the module to ensure that patched external # dependencies are accounted for. reload_module(module) # Patch objects in the module itself. with __nested(patch_internal(n) for n in internal): return fn(master_mock, *args, **kwargs) finally: if external: # When all patches have been discarded, reload the module # to bring it back to its original state (except for all of # the references which have been reassigned). reload_module(module) return wrapper return decorator
[ "def", "patch", "(", "module", ",", "external", "=", "(", ")", ",", "internal", "=", "(", ")", ")", ":", "external", "=", "tuple", "(", "external", ")", "internal", "=", "tuple", "(", "internal", ")", "def", "decorator", "(", "fn", ")", ":", "@", ...
Temporarily monkey-patch dependencies which can be external to, or internal to the supplied module. :param module: Module object :param external: External dependencies to patch (full paths as strings) :param internal: Internal dependencies to patch (short names as strings) :return:
[ "Temporarily", "monkey", "-", "patch", "dependencies", "which", "can", "be", "external", "to", "or", "internal", "to", "the", "supplied", "module", "." ]
train
https://github.com/themattrix/python-simian/blob/c5870e4c5a81554bd37c835981cc9d22e720e9bd/simian/patch.py#L7-L54
COLORFULBOARD/revision
revision/data.py
Revision.create
def create(cls, description="", message=""): """ :param description: :type description: str :param message: :type message: str """ instance = cls() instance.revision_id = make_hash_id() instance.release_date = datetime.datetime.now() if len(description) > 0: instance.description = description if len(message) > 0: instance.message = message return instance
python
def create(cls, description="", message=""): """ :param description: :type description: str :param message: :type message: str """ instance = cls() instance.revision_id = make_hash_id() instance.release_date = datetime.datetime.now() if len(description) > 0: instance.description = description if len(message) > 0: instance.message = message return instance
[ "def", "create", "(", "cls", ",", "description", "=", "\"\"", ",", "message", "=", "\"\"", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "revision_id", "=", "make_hash_id", "(", ")", "instance", ".", "release_date", "=", "datetime", ".", ...
:param description: :type description: str :param message: :type message: str
[ ":", "param", "description", ":", ":", "type", "description", ":", "str", ":", "param", "message", ":", ":", "type", "message", ":", "str" ]
train
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/data.py#L69-L86
COLORFULBOARD/revision
revision/data.py
Revision.parse
def parse(self, rev_string): """ :param rev_string: :type rev_string: str """ elements = rev_string.split(MESSAGE_LINE_SEPARATOR) heading = elements[0] heading_elements = heading.split(" ") self.revision_id = heading_elements[2] datetime_str = "{} {}".format( heading_elements[0], heading_elements[1] ) self.release_date = datetime.datetime.strptime( datetime_str, DATETIME_FORMAT ) self.description = elements[1] self.message = elements[2]
python
def parse(self, rev_string): """ :param rev_string: :type rev_string: str """ elements = rev_string.split(MESSAGE_LINE_SEPARATOR) heading = elements[0] heading_elements = heading.split(" ") self.revision_id = heading_elements[2] datetime_str = "{} {}".format( heading_elements[0], heading_elements[1] ) self.release_date = datetime.datetime.strptime( datetime_str, DATETIME_FORMAT ) self.description = elements[1] self.message = elements[2]
[ "def", "parse", "(", "self", ",", "rev_string", ")", ":", "elements", "=", "rev_string", ".", "split", "(", "MESSAGE_LINE_SEPARATOR", ")", "heading", "=", "elements", "[", "0", "]", "heading_elements", "=", "heading", ".", "split", "(", "\" \"", ")", "self...
:param rev_string: :type rev_string: str
[ ":", "param", "rev_string", ":", ":", "type", "rev_string", ":", "str" ]
train
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/data.py#L88-L110
COLORFULBOARD/revision
revision/data.py
Revision.to_markdown
def to_markdown(self): """ :return: :rtype: str """ return "## {} {}\n\n{}\n\n{}\n\n".format( self.release_date.strftime(DATETIME_FORMAT), self.revision_id, self.description, self.message )
python
def to_markdown(self): """ :return: :rtype: str """ return "## {} {}\n\n{}\n\n{}\n\n".format( self.release_date.strftime(DATETIME_FORMAT), self.revision_id, self.description, self.message )
[ "def", "to_markdown", "(", "self", ")", ":", "return", "\"## {} {}\\n\\n{}\\n\\n{}\\n\\n\"", ".", "format", "(", "self", ".", "release_date", ".", "strftime", "(", "DATETIME_FORMAT", ")", ",", "self", ".", "revision_id", ",", "self", ".", "description", ",", "...
:return: :rtype: str
[ ":", "return", ":", ":", "rtype", ":", "str" ]
train
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/data.py#L120-L130
ajyoon/blur
blur/rand.py
_linear_interp
def _linear_interp(curve, test_x, round_result=False): """ Take a series of points and interpolate between them at ``test_x``. Args: curve (list[tuple]): A list of ``(x, y)`` points sorted in nondecreasing ``x`` value. If multiple points have the same ``x`` value, all but the last will be ignored. test_x (float): The ``x`` value to find the ``y`` value of Returns: float: The ``y`` value of the curve at ``test_x`` if ``round_result is False`` int: if ``round_result is True`` or the result is a whole number, the ``y`` value of the curve at ``test_x`` rounded to the nearest whole number. Raises: ProbabilityUndefinedError: if ``test_x`` is out of the domain of ``curve`` Example: >>> curve = [(0, 0), (2, 1)] >>> _linear_interp(curve, 0.5) 0.25 >>> _linear_interp(curve, 0.5, round_result=True) 0 """ index = 0 for index in range(len(curve) - 1): # Ignore points which share an x value with the following point if curve[index][0] == curve[index + 1][0]: continue if curve[index][0] <= test_x <= curve[index + 1][0]: slope = ((curve[index + 1][1] - curve[index][1]) / (curve[index + 1][0] - curve[index][0])) y_intercept = curve[index][1] - (slope * curve[index][0]) result = (slope * test_x) + y_intercept if round_result: return int(round(result)) else: if result.is_integer(): return int(result) else: return result else: raise ProbabilityUndefinedError
python
def _linear_interp(curve, test_x, round_result=False): """ Take a series of points and interpolate between them at ``test_x``. Args: curve (list[tuple]): A list of ``(x, y)`` points sorted in nondecreasing ``x`` value. If multiple points have the same ``x`` value, all but the last will be ignored. test_x (float): The ``x`` value to find the ``y`` value of Returns: float: The ``y`` value of the curve at ``test_x`` if ``round_result is False`` int: if ``round_result is True`` or the result is a whole number, the ``y`` value of the curve at ``test_x`` rounded to the nearest whole number. Raises: ProbabilityUndefinedError: if ``test_x`` is out of the domain of ``curve`` Example: >>> curve = [(0, 0), (2, 1)] >>> _linear_interp(curve, 0.5) 0.25 >>> _linear_interp(curve, 0.5, round_result=True) 0 """ index = 0 for index in range(len(curve) - 1): # Ignore points which share an x value with the following point if curve[index][0] == curve[index + 1][0]: continue if curve[index][0] <= test_x <= curve[index + 1][0]: slope = ((curve[index + 1][1] - curve[index][1]) / (curve[index + 1][0] - curve[index][0])) y_intercept = curve[index][1] - (slope * curve[index][0]) result = (slope * test_x) + y_intercept if round_result: return int(round(result)) else: if result.is_integer(): return int(result) else: return result else: raise ProbabilityUndefinedError
[ "def", "_linear_interp", "(", "curve", ",", "test_x", ",", "round_result", "=", "False", ")", ":", "index", "=", "0", "for", "index", "in", "range", "(", "len", "(", "curve", ")", "-", "1", ")", ":", "# Ignore points which share an x value with the following p...
Take a series of points and interpolate between them at ``test_x``. Args: curve (list[tuple]): A list of ``(x, y)`` points sorted in nondecreasing ``x`` value. If multiple points have the same ``x`` value, all but the last will be ignored. test_x (float): The ``x`` value to find the ``y`` value of Returns: float: The ``y`` value of the curve at ``test_x`` if ``round_result is False`` int: if ``round_result is True`` or the result is a whole number, the ``y`` value of the curve at ``test_x`` rounded to the nearest whole number. Raises: ProbabilityUndefinedError: if ``test_x`` is out of the domain of ``curve`` Example: >>> curve = [(0, 0), (2, 1)] >>> _linear_interp(curve, 0.5) 0.25 >>> _linear_interp(curve, 0.5, round_result=True) 0
[ "Take", "a", "series", "of", "points", "and", "interpolate", "between", "them", "at", "test_x", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L27-L74
ajyoon/blur
blur/rand.py
_clamp_value
def _clamp_value(value, minimum, maximum): """ Clamp a value to fit between a minimum and a maximum. * If ``value`` is between ``minimum`` and ``maximum``, return ``value`` * If ``value`` is below ``minimum``, return ``minimum`` * If ``value is above ``maximum``, return ``maximum`` Args: value (float or int): The number to clamp minimum (float or int): The lowest allowed return value maximum (float or int): The highest allowed return value Returns: float or int: the clamped value Raises: ValueError: if maximum < minimum Example: >>> _clamp_value(3, 5, 10) 5 >>> _clamp_value(11, 5, 10) 10 >>> _clamp_value(8, 5, 10) 8 """ if maximum < minimum: raise ValueError if value < minimum: return minimum elif value > maximum: return maximum else: return value
python
def _clamp_value(value, minimum, maximum): """ Clamp a value to fit between a minimum and a maximum. * If ``value`` is between ``minimum`` and ``maximum``, return ``value`` * If ``value`` is below ``minimum``, return ``minimum`` * If ``value is above ``maximum``, return ``maximum`` Args: value (float or int): The number to clamp minimum (float or int): The lowest allowed return value maximum (float or int): The highest allowed return value Returns: float or int: the clamped value Raises: ValueError: if maximum < minimum Example: >>> _clamp_value(3, 5, 10) 5 >>> _clamp_value(11, 5, 10) 10 >>> _clamp_value(8, 5, 10) 8 """ if maximum < minimum: raise ValueError if value < minimum: return minimum elif value > maximum: return maximum else: return value
[ "def", "_clamp_value", "(", "value", ",", "minimum", ",", "maximum", ")", ":", "if", "maximum", "<", "minimum", ":", "raise", "ValueError", "if", "value", "<", "minimum", ":", "return", "minimum", "elif", "value", ">", "maximum", ":", "return", "maximum", ...
Clamp a value to fit between a minimum and a maximum. * If ``value`` is between ``minimum`` and ``maximum``, return ``value`` * If ``value`` is below ``minimum``, return ``minimum`` * If ``value is above ``maximum``, return ``maximum`` Args: value (float or int): The number to clamp minimum (float or int): The lowest allowed return value maximum (float or int): The highest allowed return value Returns: float or int: the clamped value Raises: ValueError: if maximum < minimum Example: >>> _clamp_value(3, 5, 10) 5 >>> _clamp_value(11, 5, 10) 10 >>> _clamp_value(8, 5, 10) 8
[ "Clamp", "a", "value", "to", "fit", "between", "a", "minimum", "and", "a", "maximum", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L106-L140
ajyoon/blur
blur/rand.py
_normal_function
def _normal_function(x, mean, variance): """ Find a value in the cumulative distribution function of a normal curve. See https://en.wikipedia.org/wiki/Normal_distribution Args: x (float): Value to feed into the normal function mean (float): Mean of the normal function variance (float): Variance of the normal function Returns: float Example: >>> round(_normal_function(0, 0, 5), 4) 0.1784 """ e_power = -1 * (((x - mean) ** 2) / (2 * variance)) return (1 / math.sqrt(2 * variance * math.pi)) * (math.e ** e_power)
python
def _normal_function(x, mean, variance): """ Find a value in the cumulative distribution function of a normal curve. See https://en.wikipedia.org/wiki/Normal_distribution Args: x (float): Value to feed into the normal function mean (float): Mean of the normal function variance (float): Variance of the normal function Returns: float Example: >>> round(_normal_function(0, 0, 5), 4) 0.1784 """ e_power = -1 * (((x - mean) ** 2) / (2 * variance)) return (1 / math.sqrt(2 * variance * math.pi)) * (math.e ** e_power)
[ "def", "_normal_function", "(", "x", ",", "mean", ",", "variance", ")", ":", "e_power", "=", "-", "1", "*", "(", "(", "(", "x", "-", "mean", ")", "**", "2", ")", "/", "(", "2", "*", "variance", ")", ")", "return", "(", "1", "/", "math", ".", ...
Find a value in the cumulative distribution function of a normal curve. See https://en.wikipedia.org/wiki/Normal_distribution Args: x (float): Value to feed into the normal function mean (float): Mean of the normal function variance (float): Variance of the normal function Returns: float Example: >>> round(_normal_function(0, 0, 5), 4) 0.1784
[ "Find", "a", "value", "in", "the", "cumulative", "distribution", "function", "of", "a", "normal", "curve", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L143-L161
ajyoon/blur
blur/rand.py
_is_valid_options_weights_list
def _is_valid_options_weights_list(value): '''Check whether ``values`` is a valid argument for ``weighted_choice``.''' return ((isinstance(value, list)) and len(value) > 1 and (all(isinstance(opt, tuple) and len(opt) == 2 and isinstance(opt[1], (int, float)) for opt in value)))
python
def _is_valid_options_weights_list(value): '''Check whether ``values`` is a valid argument for ``weighted_choice``.''' return ((isinstance(value, list)) and len(value) > 1 and (all(isinstance(opt, tuple) and len(opt) == 2 and isinstance(opt[1], (int, float)) for opt in value)))
[ "def", "_is_valid_options_weights_list", "(", "value", ")", ":", "return", "(", "(", "isinstance", "(", "value", ",", "list", ")", ")", "and", "len", "(", "value", ")", ">", "1", "and", "(", "all", "(", "isinstance", "(", "opt", ",", "tuple", ")", "a...
Check whether ``values`` is a valid argument for ``weighted_choice``.
[ "Check", "whether", "values", "is", "a", "valid", "argument", "for", "weighted_choice", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L164-L171
ajyoon/blur
blur/rand.py
bound_weights
def bound_weights(weights, minimum=None, maximum=None): """ Bound a weight list so that all outcomes fit within specified bounds. The probability distribution within the ``minimum`` and ``maximum`` values remains the same. Weights in the list with outcomes outside of ``minimum`` and ``maximum`` are removed. If weights are removed from either end, attach weights at the modified edges at the same weight (y-axis) position they had interpolated in the original list. If neither ``minimum`` nor ``maximum`` are set, ``weights`` will be returned unmodified. If both are set, ``minimum`` must be less than ``maximum``. Args: weights (list): the list of weights where each weight is a ``tuple`` of form ``(float, float)`` corresponding to ``(outcome, weight)``. Must be sorted in increasing order of outcomes minimum (float): Lowest allowed outcome for the weight list maximum (float): Highest allowed outcome for the weight list Returns: list: A list of 2-tuples of form ``(float, float)``, the bounded weight list. Raises: ValueError: if ``maximum < minimum`` Example: >>> weights = [(0, 0), (2, 2), (4, 0)] >>> bound_weights(weights, 1, 3) [(1, 1), (2, 2), (3, 1)] """ # Copy weights to avoid side-effects bounded_weights = weights[:] # Remove weights outside of minimum and maximum if minimum is not None and maximum is not None: if maximum < minimum: raise ValueError bounded_weights = [bw for bw in bounded_weights if minimum <= bw[0] <= maximum] elif minimum is not None: bounded_weights = [bw for bw in bounded_weights if minimum <= bw[0]] elif maximum is not None: bounded_weights = [bw for bw in bounded_weights if bw[0] <= maximum] else: # Both minimum and maximum are None - the bound list is the same # as the original return bounded_weights # If weights were removed, attach new endpoints where they would have # appeared in the original curve if (bounded_weights[0][0] > weights[0][0] and bounded_weights[0][0] != minimum): bounded_weights.insert(0, (minimum, _linear_interp(weights, minimum))) if (bounded_weights[-1][0] < weights[-1][0] and bounded_weights[-1][0] != maximum): bounded_weights.append((maximum, _linear_interp(weights, maximum))) return bounded_weights
python
def bound_weights(weights, minimum=None, maximum=None): """ Bound a weight list so that all outcomes fit within specified bounds. The probability distribution within the ``minimum`` and ``maximum`` values remains the same. Weights in the list with outcomes outside of ``minimum`` and ``maximum`` are removed. If weights are removed from either end, attach weights at the modified edges at the same weight (y-axis) position they had interpolated in the original list. If neither ``minimum`` nor ``maximum`` are set, ``weights`` will be returned unmodified. If both are set, ``minimum`` must be less than ``maximum``. Args: weights (list): the list of weights where each weight is a ``tuple`` of form ``(float, float)`` corresponding to ``(outcome, weight)``. Must be sorted in increasing order of outcomes minimum (float): Lowest allowed outcome for the weight list maximum (float): Highest allowed outcome for the weight list Returns: list: A list of 2-tuples of form ``(float, float)``, the bounded weight list. Raises: ValueError: if ``maximum < minimum`` Example: >>> weights = [(0, 0), (2, 2), (4, 0)] >>> bound_weights(weights, 1, 3) [(1, 1), (2, 2), (3, 1)] """ # Copy weights to avoid side-effects bounded_weights = weights[:] # Remove weights outside of minimum and maximum if minimum is not None and maximum is not None: if maximum < minimum: raise ValueError bounded_weights = [bw for bw in bounded_weights if minimum <= bw[0] <= maximum] elif minimum is not None: bounded_weights = [bw for bw in bounded_weights if minimum <= bw[0]] elif maximum is not None: bounded_weights = [bw for bw in bounded_weights if bw[0] <= maximum] else: # Both minimum and maximum are None - the bound list is the same # as the original return bounded_weights # If weights were removed, attach new endpoints where they would have # appeared in the original curve if (bounded_weights[0][0] > weights[0][0] and bounded_weights[0][0] != minimum): bounded_weights.insert(0, (minimum, _linear_interp(weights, minimum))) if (bounded_weights[-1][0] < weights[-1][0] and bounded_weights[-1][0] != maximum): bounded_weights.append((maximum, _linear_interp(weights, maximum))) return bounded_weights
[ "def", "bound_weights", "(", "weights", ",", "minimum", "=", "None", ",", "maximum", "=", "None", ")", ":", "# Copy weights to avoid side-effects", "bounded_weights", "=", "weights", "[", ":", "]", "# Remove weights outside of minimum and maximum", "if", "minimum", "i...
Bound a weight list so that all outcomes fit within specified bounds. The probability distribution within the ``minimum`` and ``maximum`` values remains the same. Weights in the list with outcomes outside of ``minimum`` and ``maximum`` are removed. If weights are removed from either end, attach weights at the modified edges at the same weight (y-axis) position they had interpolated in the original list. If neither ``minimum`` nor ``maximum`` are set, ``weights`` will be returned unmodified. If both are set, ``minimum`` must be less than ``maximum``. Args: weights (list): the list of weights where each weight is a ``tuple`` of form ``(float, float)`` corresponding to ``(outcome, weight)``. Must be sorted in increasing order of outcomes minimum (float): Lowest allowed outcome for the weight list maximum (float): Highest allowed outcome for the weight list Returns: list: A list of 2-tuples of form ``(float, float)``, the bounded weight list. Raises: ValueError: if ``maximum < minimum`` Example: >>> weights = [(0, 0), (2, 2), (4, 0)] >>> bound_weights(weights, 1, 3) [(1, 1), (2, 2), (3, 1)]
[ "Bound", "a", "weight", "list", "so", "that", "all", "outcomes", "fit", "within", "specified", "bounds", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L188-L249
ajyoon/blur
blur/rand.py
normal_distribution
def normal_distribution(mean, variance, minimum=None, maximum=None, weight_count=23): """ Return a list of weights approximating a normal distribution. Args: mean (float): The mean of the distribution variance (float): The variance of the distribution minimum (float): The minimum outcome possible to bound the output distribution to maximum (float): The maximum outcome possible to bound the output distribution to weight_count (int): The number of weights that will be used to approximate the distribution Returns: list: a list of ``(float, float)`` weight tuples approximating a normal distribution. Raises: ValueError: ``if maximum < minimum`` TypeError: if both ``minimum`` and ``maximum`` are ``None`` Example: >>> weights = normal_distribution(10, 3, ... minimum=0, maximum=20, ... weight_count=5) >>> rounded_weights = [(round(value, 2), round(strength, 2)) ... for value, strength in weights] >>> rounded_weights [(1.34, 0.0), (4.8, 0.0), (8.27, 0.14), (11.73, 0.14), (15.2, 0.0)] """ # Pin 0 to +- 5 sigma as bounds, or minimum and maximum # if they cross +/- sigma standard_deviation = math.sqrt(variance) min_x = (standard_deviation * -5) + mean max_x = (standard_deviation * 5) + mean step = (max_x - min_x) / weight_count current_x = min_x weights = [] while current_x < max_x: weights.append( (current_x, _normal_function(current_x, mean, variance)) ) current_x += step if minimum is not None or maximum is not None: return bound_weights(weights, minimum, maximum) else: return weights
python
def normal_distribution(mean, variance, minimum=None, maximum=None, weight_count=23): """ Return a list of weights approximating a normal distribution. Args: mean (float): The mean of the distribution variance (float): The variance of the distribution minimum (float): The minimum outcome possible to bound the output distribution to maximum (float): The maximum outcome possible to bound the output distribution to weight_count (int): The number of weights that will be used to approximate the distribution Returns: list: a list of ``(float, float)`` weight tuples approximating a normal distribution. Raises: ValueError: ``if maximum < minimum`` TypeError: if both ``minimum`` and ``maximum`` are ``None`` Example: >>> weights = normal_distribution(10, 3, ... minimum=0, maximum=20, ... weight_count=5) >>> rounded_weights = [(round(value, 2), round(strength, 2)) ... for value, strength in weights] >>> rounded_weights [(1.34, 0.0), (4.8, 0.0), (8.27, 0.14), (11.73, 0.14), (15.2, 0.0)] """ # Pin 0 to +- 5 sigma as bounds, or minimum and maximum # if they cross +/- sigma standard_deviation = math.sqrt(variance) min_x = (standard_deviation * -5) + mean max_x = (standard_deviation * 5) + mean step = (max_x - min_x) / weight_count current_x = min_x weights = [] while current_x < max_x: weights.append( (current_x, _normal_function(current_x, mean, variance)) ) current_x += step if minimum is not None or maximum is not None: return bound_weights(weights, minimum, maximum) else: return weights
[ "def", "normal_distribution", "(", "mean", ",", "variance", ",", "minimum", "=", "None", ",", "maximum", "=", "None", ",", "weight_count", "=", "23", ")", ":", "# Pin 0 to +- 5 sigma as bounds, or minimum and maximum", "# if they cross +/- sigma", "standard_deviation", ...
Return a list of weights approximating a normal distribution. Args: mean (float): The mean of the distribution variance (float): The variance of the distribution minimum (float): The minimum outcome possible to bound the output distribution to maximum (float): The maximum outcome possible to bound the output distribution to weight_count (int): The number of weights that will be used to approximate the distribution Returns: list: a list of ``(float, float)`` weight tuples approximating a normal distribution. Raises: ValueError: ``if maximum < minimum`` TypeError: if both ``minimum`` and ``maximum`` are ``None`` Example: >>> weights = normal_distribution(10, 3, ... minimum=0, maximum=20, ... weight_count=5) >>> rounded_weights = [(round(value, 2), round(strength, 2)) ... for value, strength in weights] >>> rounded_weights [(1.34, 0.0), (4.8, 0.0), (8.27, 0.14), (11.73, 0.14), (15.2, 0.0)]
[ "Return", "a", "list", "of", "weights", "approximating", "a", "normal", "distribution", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L252-L300
ajyoon/blur
blur/rand.py
weighted_rand
def weighted_rand(weights, round_result=False): """ Generate a non-uniform random value based on a list of weight tuples. Treats weights as coordinates for a probability distribution curve and rolls accordingly. Constructs a piece-wise linear curve according to coordinates given in ``weights`` and rolls random values in the curve's bounding box until a value is found under the curve Weight tuples should be of the form: (outcome, strength). Args: weights: (list): the list of weights where each weight is a tuple of form ``(float, float)`` corresponding to ``(outcome, strength)``. Weights with strength ``0`` or less will have no chance to be rolled. The list must be sorted in increasing order of outcomes. round_result (bool): Whether or not to round the resulting value to the nearest integer. Returns: float: A weighted random number int: A weighted random number rounded to the nearest ``int`` Example: >>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP -0.650612268193731 >>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP -2 """ # If just one weight is passed, simply return the weight's name if len(weights) == 1: return weights[0][0] # Is there a way to do this more efficiently? Maybe even require that # ``weights`` already be sorted? weights = sorted(weights, key=lambda w: w[0]) x_min = weights[0][0] x_max = weights[-1][0] y_min = 0 y_max = max([point[1] for point in weights]) # Roll random numbers until a valid one is found attempt_count = 0 while attempt_count < 500000: # Get sample point sample = (random.uniform(x_min, x_max), random.uniform(y_min, y_max)) if _point_under_curve(weights, sample): # The sample point is under the curve if round_result: return int(round(sample[0])) else: return sample[0] attempt_count += 1 else: warnings.warn( 'Point not being found in weighted_rand() after 500000 ' 'attempts, defaulting to a random weight point. ' 'If this happens often, it is probably a bug.') return random.choice(weights)[0]
python
def weighted_rand(weights, round_result=False): """ Generate a non-uniform random value based on a list of weight tuples. Treats weights as coordinates for a probability distribution curve and rolls accordingly. Constructs a piece-wise linear curve according to coordinates given in ``weights`` and rolls random values in the curve's bounding box until a value is found under the curve Weight tuples should be of the form: (outcome, strength). Args: weights: (list): the list of weights where each weight is a tuple of form ``(float, float)`` corresponding to ``(outcome, strength)``. Weights with strength ``0`` or less will have no chance to be rolled. The list must be sorted in increasing order of outcomes. round_result (bool): Whether or not to round the resulting value to the nearest integer. Returns: float: A weighted random number int: A weighted random number rounded to the nearest ``int`` Example: >>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP -0.650612268193731 >>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP -2 """ # If just one weight is passed, simply return the weight's name if len(weights) == 1: return weights[0][0] # Is there a way to do this more efficiently? Maybe even require that # ``weights`` already be sorted? weights = sorted(weights, key=lambda w: w[0]) x_min = weights[0][0] x_max = weights[-1][0] y_min = 0 y_max = max([point[1] for point in weights]) # Roll random numbers until a valid one is found attempt_count = 0 while attempt_count < 500000: # Get sample point sample = (random.uniform(x_min, x_max), random.uniform(y_min, y_max)) if _point_under_curve(weights, sample): # The sample point is under the curve if round_result: return int(round(sample[0])) else: return sample[0] attempt_count += 1 else: warnings.warn( 'Point not being found in weighted_rand() after 500000 ' 'attempts, defaulting to a random weight point. ' 'If this happens often, it is probably a bug.') return random.choice(weights)[0]
[ "def", "weighted_rand", "(", "weights", ",", "round_result", "=", "False", ")", ":", "# If just one weight is passed, simply return the weight's name", "if", "len", "(", "weights", ")", "==", "1", ":", "return", "weights", "[", "0", "]", "[", "0", "]", "# Is the...
Generate a non-uniform random value based on a list of weight tuples. Treats weights as coordinates for a probability distribution curve and rolls accordingly. Constructs a piece-wise linear curve according to coordinates given in ``weights`` and rolls random values in the curve's bounding box until a value is found under the curve Weight tuples should be of the form: (outcome, strength). Args: weights: (list): the list of weights where each weight is a tuple of form ``(float, float)`` corresponding to ``(outcome, strength)``. Weights with strength ``0`` or less will have no chance to be rolled. The list must be sorted in increasing order of outcomes. round_result (bool): Whether or not to round the resulting value to the nearest integer. Returns: float: A weighted random number int: A weighted random number rounded to the nearest ``int`` Example: >>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP -0.650612268193731 >>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP -2
[ "Generate", "a", "non", "-", "uniform", "random", "value", "based", "on", "a", "list", "of", "weight", "tuples", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L401-L462
ajyoon/blur
blur/rand.py
weighted_choice
def weighted_choice(weights, as_index_and_value_tuple=False): """ Generate a non-uniform random choice based on a list of option tuples. Treats each outcome as a discreet unit with a chance to occur. Args: weights (list): a list of options where each option is a tuple of form ``(Any, float)`` corresponding to ``(outcome, strength)``. Outcome values may be of any type. Options with strength ``0`` or less will have no chance to be chosen. as_index_and_value_tuple (bool): Option to return an ``(index, value)`` tuple instead of just a single ``value``. This is useful when multiple outcomes in ``weights`` are the same and you need to know exactly which one was picked. Returns: Any: If ``as_index_and_value_tuple is False``, any one of the items in the outcomes of ``weights`` tuple (int, Any): If ``as_index_and_value_tuple is True``, a 2-tuple of form ``(int, Any)`` corresponding to ``(index, value)``. the index as well as value of the item that was picked. Example: >>> choices = [('choice one', 10), ('choice two', 3)] >>> weighted_choice(choices) # doctest: +SKIP # Often will be... 'choice one' >>> weighted_choice(choices, ... as_index_and_value_tuple=True) # doctest: +SKIP # Often will be... (0, 'choice one') """ if not len(weights): raise ValueError('List passed to weighted_choice() cannot be empty.') # Construct a line segment where each weight outcome is # allotted a length equal to the outcome's weight, # pick a uniformally random point along the line, and take # the outcome that point corresponds to prob_sum = sum(w[1] for w in weights) if prob_sum <= 0: raise ProbabilityUndefinedError( 'No item weights in weighted_choice() are greater than 0. ' 'Probability distribution is undefined.') sample = random.uniform(0, prob_sum) current_pos = 0 i = 0 while i < len(weights): if current_pos <= sample <= (current_pos + weights[i][1]): if as_index_and_value_tuple: return (i, weights[i][0]) else: return weights[i][0] current_pos += weights[i][1] i += 1 else: raise AssertionError('Something went wrong in weighted_choice(). ' 'Please submit a bug report!')
python
def weighted_choice(weights, as_index_and_value_tuple=False): """ Generate a non-uniform random choice based on a list of option tuples. Treats each outcome as a discreet unit with a chance to occur. Args: weights (list): a list of options where each option is a tuple of form ``(Any, float)`` corresponding to ``(outcome, strength)``. Outcome values may be of any type. Options with strength ``0`` or less will have no chance to be chosen. as_index_and_value_tuple (bool): Option to return an ``(index, value)`` tuple instead of just a single ``value``. This is useful when multiple outcomes in ``weights`` are the same and you need to know exactly which one was picked. Returns: Any: If ``as_index_and_value_tuple is False``, any one of the items in the outcomes of ``weights`` tuple (int, Any): If ``as_index_and_value_tuple is True``, a 2-tuple of form ``(int, Any)`` corresponding to ``(index, value)``. the index as well as value of the item that was picked. Example: >>> choices = [('choice one', 10), ('choice two', 3)] >>> weighted_choice(choices) # doctest: +SKIP # Often will be... 'choice one' >>> weighted_choice(choices, ... as_index_and_value_tuple=True) # doctest: +SKIP # Often will be... (0, 'choice one') """ if not len(weights): raise ValueError('List passed to weighted_choice() cannot be empty.') # Construct a line segment where each weight outcome is # allotted a length equal to the outcome's weight, # pick a uniformally random point along the line, and take # the outcome that point corresponds to prob_sum = sum(w[1] for w in weights) if prob_sum <= 0: raise ProbabilityUndefinedError( 'No item weights in weighted_choice() are greater than 0. ' 'Probability distribution is undefined.') sample = random.uniform(0, prob_sum) current_pos = 0 i = 0 while i < len(weights): if current_pos <= sample <= (current_pos + weights[i][1]): if as_index_and_value_tuple: return (i, weights[i][0]) else: return weights[i][0] current_pos += weights[i][1] i += 1 else: raise AssertionError('Something went wrong in weighted_choice(). ' 'Please submit a bug report!')
[ "def", "weighted_choice", "(", "weights", ",", "as_index_and_value_tuple", "=", "False", ")", ":", "if", "not", "len", "(", "weights", ")", ":", "raise", "ValueError", "(", "'List passed to weighted_choice() cannot be empty.'", ")", "# Construct a line segment where each ...
Generate a non-uniform random choice based on a list of option tuples. Treats each outcome as a discreet unit with a chance to occur. Args: weights (list): a list of options where each option is a tuple of form ``(Any, float)`` corresponding to ``(outcome, strength)``. Outcome values may be of any type. Options with strength ``0`` or less will have no chance to be chosen. as_index_and_value_tuple (bool): Option to return an ``(index, value)`` tuple instead of just a single ``value``. This is useful when multiple outcomes in ``weights`` are the same and you need to know exactly which one was picked. Returns: Any: If ``as_index_and_value_tuple is False``, any one of the items in the outcomes of ``weights`` tuple (int, Any): If ``as_index_and_value_tuple is True``, a 2-tuple of form ``(int, Any)`` corresponding to ``(index, value)``. the index as well as value of the item that was picked. Example: >>> choices = [('choice one', 10), ('choice two', 3)] >>> weighted_choice(choices) # doctest: +SKIP # Often will be... 'choice one' >>> weighted_choice(choices, ... as_index_and_value_tuple=True) # doctest: +SKIP # Often will be... (0, 'choice one')
[ "Generate", "a", "non", "-", "uniform", "random", "choice", "based", "on", "a", "list", "of", "option", "tuples", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L465-L524
ajyoon/blur
blur/rand.py
weighted_order
def weighted_order(weights): """ Non-uniformally order a list according to weighted priorities. ``weights`` is a list of tuples of form ``(Any, float or int)`` corresponding to ``(item, strength)``. The output list is constructed by repeatedly calling ``weighted_choice()`` on the weights, adding items to the end of the list as they are picked. Higher strength weights will have a higher chance of appearing near the beginning of the output list. A list weights with uniform strengths is equivalent to calling ``random.shuffle()`` on the list of items. If any weight strengths are ``<= 0``, a ``ProbabilityUndefinedError`` is be raised. Passing an empty list will return an empty list. Args: weights (list): a list of tuples of form ``(Any, float or int)`` corresponding to ``(item, strength)``. The output list is constructed by repeatedly calling ``weighted_choice()`` on the weights, appending items to the output list as they are picked. Returns: list: the newly ordered list Raises: ProbabilityUndefinedError: if any weight's strength is below 0. Example: >>> weights = [('Probably Earlier', 100), ... ('Probably Middle', 20), ... ('Probably Last', 1)] >>> weighted_order(weights) # doctest: +SKIP ['Probably Earlier', 'Probably Middle', 'Probably Last'] """ if not len(weights): return [] if any(w[1] <= 0 for w in weights): raise ProbabilityUndefinedError( 'All weight values must be greater than 0.') working_list = weights[:] output_list = [] while working_list: picked_item = weighted_choice(working_list, as_index_and_value_tuple=True) output_list.append(picked_item[1]) del working_list[picked_item[0]] return output_list
python
def weighted_order(weights): """ Non-uniformally order a list according to weighted priorities. ``weights`` is a list of tuples of form ``(Any, float or int)`` corresponding to ``(item, strength)``. The output list is constructed by repeatedly calling ``weighted_choice()`` on the weights, adding items to the end of the list as they are picked. Higher strength weights will have a higher chance of appearing near the beginning of the output list. A list weights with uniform strengths is equivalent to calling ``random.shuffle()`` on the list of items. If any weight strengths are ``<= 0``, a ``ProbabilityUndefinedError`` is be raised. Passing an empty list will return an empty list. Args: weights (list): a list of tuples of form ``(Any, float or int)`` corresponding to ``(item, strength)``. The output list is constructed by repeatedly calling ``weighted_choice()`` on the weights, appending items to the output list as they are picked. Returns: list: the newly ordered list Raises: ProbabilityUndefinedError: if any weight's strength is below 0. Example: >>> weights = [('Probably Earlier', 100), ... ('Probably Middle', 20), ... ('Probably Last', 1)] >>> weighted_order(weights) # doctest: +SKIP ['Probably Earlier', 'Probably Middle', 'Probably Last'] """ if not len(weights): return [] if any(w[1] <= 0 for w in weights): raise ProbabilityUndefinedError( 'All weight values must be greater than 0.') working_list = weights[:] output_list = [] while working_list: picked_item = weighted_choice(working_list, as_index_and_value_tuple=True) output_list.append(picked_item[1]) del working_list[picked_item[0]] return output_list
[ "def", "weighted_order", "(", "weights", ")", ":", "if", "not", "len", "(", "weights", ")", ":", "return", "[", "]", "if", "any", "(", "w", "[", "1", "]", "<=", "0", "for", "w", "in", "weights", ")", ":", "raise", "ProbabilityUndefinedError", "(", ...
Non-uniformally order a list according to weighted priorities. ``weights`` is a list of tuples of form ``(Any, float or int)`` corresponding to ``(item, strength)``. The output list is constructed by repeatedly calling ``weighted_choice()`` on the weights, adding items to the end of the list as they are picked. Higher strength weights will have a higher chance of appearing near the beginning of the output list. A list weights with uniform strengths is equivalent to calling ``random.shuffle()`` on the list of items. If any weight strengths are ``<= 0``, a ``ProbabilityUndefinedError`` is be raised. Passing an empty list will return an empty list. Args: weights (list): a list of tuples of form ``(Any, float or int)`` corresponding to ``(item, strength)``. The output list is constructed by repeatedly calling ``weighted_choice()`` on the weights, appending items to the output list as they are picked. Returns: list: the newly ordered list Raises: ProbabilityUndefinedError: if any weight's strength is below 0. Example: >>> weights = [('Probably Earlier', 100), ... ('Probably Middle', 20), ... ('Probably Last', 1)] >>> weighted_order(weights) # doctest: +SKIP ['Probably Earlier', 'Probably Middle', 'Probably Last']
[ "Non", "-", "uniformally", "order", "a", "list", "according", "to", "weighted", "priorities", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L527-L579
gr33ndata/dysl
dysl/social.py
SocialLM.tokenize
def tokenize(cls, text, mode='c'): """ Converts text into tokens :param text: string to be tokenized :param mode: split into chars (c) or words (w) """ if mode == 'c': return [ch for ch in text] else: return [w for w in text.split()]
python
def tokenize(cls, text, mode='c'): """ Converts text into tokens :param text: string to be tokenized :param mode: split into chars (c) or words (w) """ if mode == 'c': return [ch for ch in text] else: return [w for w in text.split()]
[ "def", "tokenize", "(", "cls", ",", "text", ",", "mode", "=", "'c'", ")", ":", "if", "mode", "==", "'c'", ":", "return", "[", "ch", "for", "ch", "in", "text", "]", "else", ":", "return", "[", "w", "for", "w", "in", "text", ".", "split", "(", ...
Converts text into tokens :param text: string to be tokenized :param mode: split into chars (c) or words (w)
[ "Converts", "text", "into", "tokens" ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L15-L24
gr33ndata/dysl
dysl/social.py
SocialLM.karbasa
def karbasa(self, result): """ Finding if class probabilities are close to eachother Ratio of the distance between 1st and 2nd class, to the distance between 1st and last class. :param result: The dict returned by LM.calculate() """ probs = result['all_probs'] probs.sort() return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
python
def karbasa(self, result): """ Finding if class probabilities are close to eachother Ratio of the distance between 1st and 2nd class, to the distance between 1st and last class. :param result: The dict returned by LM.calculate() """ probs = result['all_probs'] probs.sort() return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
[ "def", "karbasa", "(", "self", ",", "result", ")", ":", "probs", "=", "result", "[", "'all_probs'", "]", "probs", ".", "sort", "(", ")", "return", "float", "(", "probs", "[", "1", "]", "-", "probs", "[", "0", "]", ")", "/", "float", "(", "probs",...
Finding if class probabilities are close to eachother Ratio of the distance between 1st and 2nd class, to the distance between 1st and last class. :param result: The dict returned by LM.calculate()
[ "Finding", "if", "class", "probabilities", "are", "close", "to", "eachother", "Ratio", "of", "the", "distance", "between", "1st", "and", "2nd", "class", "to", "the", "distance", "between", "1st", "and", "last", "class", "." ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L26-L35
gr33ndata/dysl
dysl/social.py
SocialLM.classify
def classify(self, text=u''): """ Predicts the Language of a given text. :param text: Unicode text to be classified. """ result = self.calculate(doc_terms=self.tokenize(text)) #return (result['calc_id'], result) return (result['calc_id'], self.karbasa(result))
python
def classify(self, text=u''): """ Predicts the Language of a given text. :param text: Unicode text to be classified. """ result = self.calculate(doc_terms=self.tokenize(text)) #return (result['calc_id'], result) return (result['calc_id'], self.karbasa(result))
[ "def", "classify", "(", "self", ",", "text", "=", "u''", ")", ":", "result", "=", "self", ".", "calculate", "(", "doc_terms", "=", "self", ".", "tokenize", "(", "text", ")", ")", "#return (result['calc_id'], result)", "return", "(", "result", "[", "'calc_i...
Predicts the Language of a given text. :param text: Unicode text to be classified.
[ "Predicts", "the", "Language", "of", "a", "given", "text", "." ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L37-L44
gr33ndata/dysl
dysl/social.py
SocialLM.is_mention_line
def is_mention_line(cls, word): """ Detects links and mentions :param word: Token to be evaluated """ if word.startswith('@'): return True elif word.startswith('http://'): return True elif word.startswith('https://'): return True else: return False
python
def is_mention_line(cls, word): """ Detects links and mentions :param word: Token to be evaluated """ if word.startswith('@'): return True elif word.startswith('http://'): return True elif word.startswith('https://'): return True else: return False
[ "def", "is_mention_line", "(", "cls", ",", "word", ")", ":", "if", "word", ".", "startswith", "(", "'@'", ")", ":", "return", "True", "elif", "word", ".", "startswith", "(", "'http://'", ")", ":", "return", "True", "elif", "word", ".", "startswith", "(...
Detects links and mentions :param word: Token to be evaluated
[ "Detects", "links", "and", "mentions" ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L47-L59
gr33ndata/dysl
dysl/social.py
SocialLM.strip_mentions_links
def strip_mentions_links(self, text): """ Strips Mentions and Links :param text: Text to be stripped from. """ #print 'Before:', text new_text = [word for word in text.split() if not self.is_mention_line(word)] #print 'After:', u' '.join(new_text) return u' '.join(new_text)
python
def strip_mentions_links(self, text): """ Strips Mentions and Links :param text: Text to be stripped from. """ #print 'Before:', text new_text = [word for word in text.split() if not self.is_mention_line(word)] #print 'After:', u' '.join(new_text) return u' '.join(new_text)
[ "def", "strip_mentions_links", "(", "self", ",", "text", ")", ":", "#print 'Before:', text", "new_text", "=", "[", "word", "for", "word", "in", "text", ".", "split", "(", ")", "if", "not", "self", ".", "is_mention_line", "(", "word", ")", "]", "#print 'Aft...
Strips Mentions and Links :param text: Text to be stripped from.
[ "Strips", "Mentions", "and", "Links" ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L61-L69
gr33ndata/dysl
dysl/social.py
SocialLM.normalize
def normalize(self, text): """ Normalizes text. Converts to lowercase, Unicode NFC normalization and removes mentions and links :param text: Text to be normalized. """ #print 'Normalize...\n' text = text.lower() text = unicodedata.normalize('NFC', text) text = self.strip_mentions_links(text) return text
python
def normalize(self, text): """ Normalizes text. Converts to lowercase, Unicode NFC normalization and removes mentions and links :param text: Text to be normalized. """ #print 'Normalize...\n' text = text.lower() text = unicodedata.normalize('NFC', text) text = self.strip_mentions_links(text) return text
[ "def", "normalize", "(", "self", ",", "text", ")", ":", "#print 'Normalize...\\n'", "text", "=", "text", ".", "lower", "(", ")", "text", "=", "unicodedata", ".", "normalize", "(", "'NFC'", ",", "text", ")", "text", "=", "self", ".", "strip_mentions_links",...
Normalizes text. Converts to lowercase, Unicode NFC normalization and removes mentions and links :param text: Text to be normalized.
[ "Normalizes", "text", ".", "Converts", "to", "lowercase", "Unicode", "NFC", "normalization", "and", "removes", "mentions", "and", "links" ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L71-L83
appdotnet/ADNpy
adnpy/stream.py
StreamListener.on_data
def on_data(self, raw_data): """Called when raw data is received from connection. Override this method if you wish to manually handle the stream data. Return False to stop stream and close connection. """ data = json.loads(raw_data) message_type = data['meta'].get('type') prepare_method = 'prepare_%s' % (message_type) args = getattr(self, prepare_method, self.prepare_fallback)(data.get('data')) method_name = 'on_%s' % (message_type,) func = getattr(self, method_name, self.on_fallback) func(*args, meta=StreamingMeta.from_response_data(data.get('meta'), self.api))
python
def on_data(self, raw_data): """Called when raw data is received from connection. Override this method if you wish to manually handle the stream data. Return False to stop stream and close connection. """ data = json.loads(raw_data) message_type = data['meta'].get('type') prepare_method = 'prepare_%s' % (message_type) args = getattr(self, prepare_method, self.prepare_fallback)(data.get('data')) method_name = 'on_%s' % (message_type,) func = getattr(self, method_name, self.on_fallback) func(*args, meta=StreamingMeta.from_response_data(data.get('meta'), self.api))
[ "def", "on_data", "(", "self", ",", "raw_data", ")", ":", "data", "=", "json", ".", "loads", "(", "raw_data", ")", "message_type", "=", "data", "[", "'meta'", "]", ".", "get", "(", "'type'", ")", "prepare_method", "=", "'prepare_%s'", "%", "(", "messag...
Called when raw data is received from connection. Override this method if you wish to manually handle the stream data. Return False to stop stream and close connection.
[ "Called", "when", "raw", "data", "is", "received", "from", "connection", "." ]
train
https://github.com/appdotnet/ADNpy/blob/aedb181cd0d616257fac7b3676ac7d7211336118/adnpy/stream.py#L110-L125
benley/butcher
butcher/targets/virtual.py
VirtualTarget.output_files
def output_files(self): """Returns all output files from all of the current module's rules.""" for dep in self.subgraph.successors(self.address): dep_rule = self.subgraph.node[dep]['target_obj'] for out_file in dep_rule.output_files: yield out_file
python
def output_files(self): """Returns all output files from all of the current module's rules.""" for dep in self.subgraph.successors(self.address): dep_rule = self.subgraph.node[dep]['target_obj'] for out_file in dep_rule.output_files: yield out_file
[ "def", "output_files", "(", "self", ")", ":", "for", "dep", "in", "self", ".", "subgraph", ".", "successors", "(", "self", ".", "address", ")", ":", "dep_rule", "=", "self", ".", "subgraph", ".", "node", "[", "dep", "]", "[", "'target_obj'", "]", "fo...
Returns all output files from all of the current module's rules.
[ "Returns", "all", "output", "files", "from", "all", "of", "the", "current", "module", "s", "rules", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/virtual.py#L36-L41
futursolo/magichttp
magichttp/protocols.py
HttpClientProtocol.write_request
async def write_request( self, method: constants.HttpRequestMethod, *, uri: str="/", authority: Optional[str]=None, scheme: Optional[str]=None, headers: Optional[_HeaderType]=None) -> \ "writers.HttpRequestWriter": """ Send next request to the server. """ return await self._delegate.write_request( method, uri=uri, authority=authority, scheme=scheme, headers=headers)
python
async def write_request( self, method: constants.HttpRequestMethod, *, uri: str="/", authority: Optional[str]=None, scheme: Optional[str]=None, headers: Optional[_HeaderType]=None) -> \ "writers.HttpRequestWriter": """ Send next request to the server. """ return await self._delegate.write_request( method, uri=uri, authority=authority, scheme=scheme, headers=headers)
[ "async", "def", "write_request", "(", "self", ",", "method", ":", "constants", ".", "HttpRequestMethod", ",", "*", ",", "uri", ":", "str", "=", "\"/\"", ",", "authority", ":", "Optional", "[", "str", "]", "=", "None", ",", "scheme", ":", "Optional", "[...
Send next request to the server.
[ "Send", "next", "request", "to", "the", "server", "." ]
train
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/protocols.py#L259-L270
scivision/gridaurora
gridaurora/solarangle.py
solarzenithangle
def solarzenithangle(time: datetime, glat: float, glon: float, alt_m: float) -> tuple: """ Input: t: scalar or array of datetime """ time = totime(time) obs = EarthLocation(lat=glat*u.deg, lon=glon*u.deg, height=alt_m*u.m) times = Time(time, scale='ut1') sun = get_sun(times) sunobs = sun.transform_to(AltAz(obstime=times, location=obs)) return 90 - sunobs.alt.degree, sun, sunobs
python
def solarzenithangle(time: datetime, glat: float, glon: float, alt_m: float) -> tuple: """ Input: t: scalar or array of datetime """ time = totime(time) obs = EarthLocation(lat=glat*u.deg, lon=glon*u.deg, height=alt_m*u.m) times = Time(time, scale='ut1') sun = get_sun(times) sunobs = sun.transform_to(AltAz(obstime=times, location=obs)) return 90 - sunobs.alt.degree, sun, sunobs
[ "def", "solarzenithangle", "(", "time", ":", "datetime", ",", "glat", ":", "float", ",", "glon", ":", "float", ",", "alt_m", ":", "float", ")", "->", "tuple", ":", "time", "=", "totime", "(", "time", ")", "obs", "=", "EarthLocation", "(", "lat", "=",...
Input: t: scalar or array of datetime
[ "Input", ":" ]
train
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/solarangle.py#L8-L21
henzk/ape
ape/container_mode/validators/feature_order_validator.py
FeatureOrderValidator.check_order
def check_order(self): """ Performs the check and store the violations in self.violations. :return: boolean indicating the error state """ for feature, info in self.constraints.items(): self._check_feature(feature, info, 'before') self._check_feature(feature, info, 'after') self._check_position(feature, info) return not self.has_errors()
python
def check_order(self): """ Performs the check and store the violations in self.violations. :return: boolean indicating the error state """ for feature, info in self.constraints.items(): self._check_feature(feature, info, 'before') self._check_feature(feature, info, 'after') self._check_position(feature, info) return not self.has_errors()
[ "def", "check_order", "(", "self", ")", ":", "for", "feature", ",", "info", "in", "self", ".", "constraints", ".", "items", "(", ")", ":", "self", ".", "_check_feature", "(", "feature", ",", "info", ",", "'before'", ")", "self", ".", "_check_feature", ...
Performs the check and store the violations in self.violations. :return: boolean indicating the error state
[ "Performs", "the", "check", "and", "store", "the", "violations", "in", "self", ".", "violations", ".", ":", "return", ":", "boolean", "indicating", "the", "error", "state" ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/validators/feature_order_validator.py#L25-L36
henzk/ape
ape/container_mode/validators/feature_order_validator.py
FeatureOrderValidator._check_feature
def _check_feature(self, feature, info, mode): """ Private helper method performing the order check. :param feature: the feature to check. :param info: the info dict containing the before and after constraints :param mode: after | before string :return: None """ op = dict( before=operator.gt, after=operator.lt )[mode] feature_pos = self.get_feature_position(feature) if feature_pos is not None: # only proceed if the the feature exists in the current feature list for other in info.get(mode, []): other_pos = self.get_feature_position(other) if other_pos is not None: # only proceed if the the other feature exists in the current feature list if op(feature_pos, other_pos): message = '{feature} (pos {feature_pos}) must be {mode} feature {other} (pos {other_pos}) but isn\'t.'.format( feature=feature, feature_pos=feature_pos, other=other, other_pos=other_pos, mode=mode.upper() ) self.violations.append((feature, message))
python
def _check_feature(self, feature, info, mode): """ Private helper method performing the order check. :param feature: the feature to check. :param info: the info dict containing the before and after constraints :param mode: after | before string :return: None """ op = dict( before=operator.gt, after=operator.lt )[mode] feature_pos = self.get_feature_position(feature) if feature_pos is not None: # only proceed if the the feature exists in the current feature list for other in info.get(mode, []): other_pos = self.get_feature_position(other) if other_pos is not None: # only proceed if the the other feature exists in the current feature list if op(feature_pos, other_pos): message = '{feature} (pos {feature_pos}) must be {mode} feature {other} (pos {other_pos}) but isn\'t.'.format( feature=feature, feature_pos=feature_pos, other=other, other_pos=other_pos, mode=mode.upper() ) self.violations.append((feature, message))
[ "def", "_check_feature", "(", "self", ",", "feature", ",", "info", ",", "mode", ")", ":", "op", "=", "dict", "(", "before", "=", "operator", ".", "gt", ",", "after", "=", "operator", ".", "lt", ")", "[", "mode", "]", "feature_pos", "=", "self", "."...
Private helper method performing the order check. :param feature: the feature to check. :param info: the info dict containing the before and after constraints :param mode: after | before string :return: None
[ "Private", "helper", "method", "performing", "the", "order", "check", ".", ":", "param", "feature", ":", "the", "feature", "to", "check", ".", ":", "param", "info", ":", "the", "info", "dict", "containing", "the", "before", "and", "after", "constraints", "...
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/validators/feature_order_validator.py#L38-L70
henzk/ape
ape/container_mode/validators/feature_order_validator.py
FeatureOrderValidator._check_position
def _check_position(self, feature, info): """ Takes the featur and the info dict and checks for the forced position :param feature: :param info: :return: """ pos = info.get('position') if pos is not None: feature_pos = self.get_feature_position(feature) if feature_pos is not None: if feature_pos != pos: message = '{feature} has a forced position on ({pos}) but is on position {feature_pos}.'.format( feature=feature, pos=pos, feature_pos=feature_pos ) self.violations.append((feature, message))
python
def _check_position(self, feature, info): """ Takes the featur and the info dict and checks for the forced position :param feature: :param info: :return: """ pos = info.get('position') if pos is not None: feature_pos = self.get_feature_position(feature) if feature_pos is not None: if feature_pos != pos: message = '{feature} has a forced position on ({pos}) but is on position {feature_pos}.'.format( feature=feature, pos=pos, feature_pos=feature_pos ) self.violations.append((feature, message))
[ "def", "_check_position", "(", "self", ",", "feature", ",", "info", ")", ":", "pos", "=", "info", ".", "get", "(", "'position'", ")", "if", "pos", "is", "not", "None", ":", "feature_pos", "=", "self", ".", "get_feature_position", "(", "feature", ")", "...
Takes the featur and the info dict and checks for the forced position :param feature: :param info: :return:
[ "Takes", "the", "featur", "and", "the", "info", "dict", "and", "checks", "for", "the", "forced", "position", ":", "param", "feature", ":", ":", "param", "info", ":", ":", "return", ":" ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/validators/feature_order_validator.py#L72-L89
thejunglejane/datums
datums/pipeline/__init__.py
ReportPipeline._report
def _report(self, action, key_mapper=mappers._report_key_mapper): '''Return the dictionary of **kwargs with the correct datums attribute names and data types for the top level of the report, and return the nested levels separately. ''' _top_level = [ k for k, v in self.report.items() if not isinstance(v, dict)] _nested_level = [ k for k, v in self.report.items() if isinstance(v, dict)] top_level_dict = {} nested_levels_dict = {} for key in _top_level: try: if key == 'date' or key == 'timestamp': item = mappers._key_type_mapper[key]( str(self.report[key]), **{'ignoretz': True}) else: item = mappers._key_type_mapper[key](str( self.report[key]) if key != 'draft' else self.report[key]) except KeyError: item = self.report[key] finally: try: top_level_dict[key_mapper[key]] = item except KeyError: warnings.warn(''' {0} is not currently supported by datums and will be ignored. Would you consider submitting an issue to add support? https://www.github.com/thejunglejane/datums/issues '''.format(key)) for key in _nested_level: nested_levels_dict[key] = self.report[key] # Add the parent report ID nested_levels_dict[key][ 'reportUniqueIdentifier'] = mappers._key_type_mapper[ 'uniqueIdentifier'](str(self.report['uniqueIdentifier'])) if key == 'placemark': # Add the parent location report UUID nested_levels_dict[key][ 'locationUniqueIdentifier'] = nested_levels_dict[key].pop( 'reportUniqueIdentifier') # Create UUID for altitude report if there is not one and the action # is get_or_create, else delete the altitude report from the nested # levels and warn that it will not be updated if 'uniqueIdentifier' not in nested_levels_dict[key]: if action.__func__.func_name == 'get_or_create': nested_levels_dict[key]['uniqueIdentifier'] = uuid.uuid4() else: del nested_levels_dict[key] warnings.warn(''' No uniqueIdentifier found for AltitudeReport in {0}. Existing altitude report will not be updated. '''.format(self.report['uniqueIdentifier'])) return top_level_dict, nested_levels_dict
python
def _report(self, action, key_mapper=mappers._report_key_mapper): '''Return the dictionary of **kwargs with the correct datums attribute names and data types for the top level of the report, and return the nested levels separately. ''' _top_level = [ k for k, v in self.report.items() if not isinstance(v, dict)] _nested_level = [ k for k, v in self.report.items() if isinstance(v, dict)] top_level_dict = {} nested_levels_dict = {} for key in _top_level: try: if key == 'date' or key == 'timestamp': item = mappers._key_type_mapper[key]( str(self.report[key]), **{'ignoretz': True}) else: item = mappers._key_type_mapper[key](str( self.report[key]) if key != 'draft' else self.report[key]) except KeyError: item = self.report[key] finally: try: top_level_dict[key_mapper[key]] = item except KeyError: warnings.warn(''' {0} is not currently supported by datums and will be ignored. Would you consider submitting an issue to add support? https://www.github.com/thejunglejane/datums/issues '''.format(key)) for key in _nested_level: nested_levels_dict[key] = self.report[key] # Add the parent report ID nested_levels_dict[key][ 'reportUniqueIdentifier'] = mappers._key_type_mapper[ 'uniqueIdentifier'](str(self.report['uniqueIdentifier'])) if key == 'placemark': # Add the parent location report UUID nested_levels_dict[key][ 'locationUniqueIdentifier'] = nested_levels_dict[key].pop( 'reportUniqueIdentifier') # Create UUID for altitude report if there is not one and the action # is get_or_create, else delete the altitude report from the nested # levels and warn that it will not be updated if 'uniqueIdentifier' not in nested_levels_dict[key]: if action.__func__.func_name == 'get_or_create': nested_levels_dict[key]['uniqueIdentifier'] = uuid.uuid4() else: del nested_levels_dict[key] warnings.warn(''' No uniqueIdentifier found for AltitudeReport in {0}. Existing altitude report will not be updated. '''.format(self.report['uniqueIdentifier'])) return top_level_dict, nested_levels_dict
[ "def", "_report", "(", "self", ",", "action", ",", "key_mapper", "=", "mappers", ".", "_report_key_mapper", ")", ":", "_top_level", "=", "[", "k", "for", "k", ",", "v", "in", "self", ".", "report", ".", "items", "(", ")", "if", "not", "isinstance", "...
Return the dictionary of **kwargs with the correct datums attribute names and data types for the top level of the report, and return the nested levels separately.
[ "Return", "the", "dictionary", "of", "**", "kwargs", "with", "the", "correct", "datums", "attribute", "names", "and", "data", "types", "for", "the", "top", "level", "of", "the", "report", "and", "return", "the", "nested", "levels", "separately", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/pipeline/__init__.py#L56-L109
dArignac/pelican-minification
minification/__init__.py
Minification.write_to_file
def write_to_file(path_file, callback): """ Reads the content of the given file, puts the content into the callback and writes the result back to the file. :param path_file: the path to the file :type path_file: str :param callback: the callback function :type callback: function """ try: with open(path_file, 'r+', encoding='utf-8') as f: content = callback(f.read()) f.seek(0) f.write(content) f.truncate() except Exception as e: raise Exception( 'unable to minify file %(file)s, exception was %(exception)r' % { 'file': path_file, 'exception': e, } )
python
def write_to_file(path_file, callback): """ Reads the content of the given file, puts the content into the callback and writes the result back to the file. :param path_file: the path to the file :type path_file: str :param callback: the callback function :type callback: function """ try: with open(path_file, 'r+', encoding='utf-8') as f: content = callback(f.read()) f.seek(0) f.write(content) f.truncate() except Exception as e: raise Exception( 'unable to minify file %(file)s, exception was %(exception)r' % { 'file': path_file, 'exception': e, } )
[ "def", "write_to_file", "(", "path_file", ",", "callback", ")", ":", "try", ":", "with", "open", "(", "path_file", ",", "'r+'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "content", "=", "callback", "(", "f", ".", "read", "(", ")", ")", ...
Reads the content of the given file, puts the content into the callback and writes the result back to the file. :param path_file: the path to the file :type path_file: str :param callback: the callback function :type callback: function
[ "Reads", "the", "content", "of", "the", "given", "file", "puts", "the", "content", "into", "the", "callback", "and", "writes", "the", "result", "back", "to", "the", "file", ".", ":", "param", "path_file", ":", "the", "path", "to", "the", "file", ":", "...
train
https://github.com/dArignac/pelican-minification/blob/8d9b3322fb3961f6496b2519c2303ffa6625f775/minification/__init__.py#L46-L66
jreinhardt/constraining-order
src/constrainingorder/sets.py
Interval.is_disjoint
def is_disjoint(self,other): """ Check whether two Intervals are disjoint. :param Interval other: The Interval to check disjointedness with. """ if self.is_empty() or other.is_empty(): return True if self.bounds[0] < other.bounds[0]: i1,i2 = self,other elif self.bounds[0] > other.bounds[0]: i2,i1 = self,other else: #coincident lower bounds if self.is_discrete() and not other.included[0]: return True elif other.is_discrete() and not self.included[0]: return True else: return False return not i2.bounds[0] in i1
python
def is_disjoint(self,other): """ Check whether two Intervals are disjoint. :param Interval other: The Interval to check disjointedness with. """ if self.is_empty() or other.is_empty(): return True if self.bounds[0] < other.bounds[0]: i1,i2 = self,other elif self.bounds[0] > other.bounds[0]: i2,i1 = self,other else: #coincident lower bounds if self.is_discrete() and not other.included[0]: return True elif other.is_discrete() and not self.included[0]: return True else: return False return not i2.bounds[0] in i1
[ "def", "is_disjoint", "(", "self", ",", "other", ")", ":", "if", "self", ".", "is_empty", "(", ")", "or", "other", ".", "is_empty", "(", ")", ":", "return", "True", "if", "self", ".", "bounds", "[", "0", "]", "<", "other", ".", "bounds", "[", "0"...
Check whether two Intervals are disjoint. :param Interval other: The Interval to check disjointedness with.
[ "Check", "whether", "two", "Intervals", "are", "disjoint", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L111-L133
jreinhardt/constraining-order
src/constrainingorder/sets.py
Interval.intersection
def intersection(self,other): """ Return a new Interval with the intersection of the two intervals, i.e. all elements that are in both self and other. :param Interval other: Interval to intersect with :rtype: Interval """ if self.bounds[0] < other.bounds[0]: i1,i2 = self,other else: i2,i1 = self,other if self.is_disjoint(other): return Interval((1,0),(True,True)) bounds = [None,None] included = [None,None] #sets are not disjoint, so i2.bounds[0] in i1: bounds[0] = i2.bounds[0] included[0] = i2.included[0] if i2.bounds[1] in i1: bounds[1] = i2.bounds[1] included[1] = i2.included[1] else: bounds[1] = i1.bounds[1] included[1] = i1.included[1] return Interval(bounds,included)
python
def intersection(self,other): """ Return a new Interval with the intersection of the two intervals, i.e. all elements that are in both self and other. :param Interval other: Interval to intersect with :rtype: Interval """ if self.bounds[0] < other.bounds[0]: i1,i2 = self,other else: i2,i1 = self,other if self.is_disjoint(other): return Interval((1,0),(True,True)) bounds = [None,None] included = [None,None] #sets are not disjoint, so i2.bounds[0] in i1: bounds[0] = i2.bounds[0] included[0] = i2.included[0] if i2.bounds[1] in i1: bounds[1] = i2.bounds[1] included[1] = i2.included[1] else: bounds[1] = i1.bounds[1] included[1] = i1.included[1] return Interval(bounds,included)
[ "def", "intersection", "(", "self", ",", "other", ")", ":", "if", "self", ".", "bounds", "[", "0", "]", "<", "other", ".", "bounds", "[", "0", "]", ":", "i1", ",", "i2", "=", "self", ",", "other", "else", ":", "i2", ",", "i1", "=", "self", ",...
Return a new Interval with the intersection of the two intervals, i.e. all elements that are in both self and other. :param Interval other: Interval to intersect with :rtype: Interval
[ "Return", "a", "new", "Interval", "with", "the", "intersection", "of", "the", "two", "intervals", "i", ".", "e", ".", "all", "elements", "that", "are", "in", "both", "self", "and", "other", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L209-L238
jreinhardt/constraining-order
src/constrainingorder/sets.py
Interval.is_empty
def is_empty(self): """ Check whether this interval is empty. :rtype: bool """ if self.bounds[1] < self.bounds[0]: return True if self.bounds[1] == self.bounds[0]: return not (self.included[0] and self.included[1])
python
def is_empty(self): """ Check whether this interval is empty. :rtype: bool """ if self.bounds[1] < self.bounds[0]: return True if self.bounds[1] == self.bounds[0]: return not (self.included[0] and self.included[1])
[ "def", "is_empty", "(", "self", ")", ":", "if", "self", ".", "bounds", "[", "1", "]", "<", "self", ".", "bounds", "[", "0", "]", ":", "return", "True", "if", "self", ".", "bounds", "[", "1", "]", "==", "self", ".", "bounds", "[", "0", "]", ":...
Check whether this interval is empty. :rtype: bool
[ "Check", "whether", "this", "interval", "is", "empty", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L240-L249
jreinhardt/constraining-order
src/constrainingorder/sets.py
Interval.is_discrete
def is_discrete(self): """ Check whether this interval contains exactly one number :rtype: bool """ return self.bounds[1] == self.bounds[0] and\ self.included == (True,True)
python
def is_discrete(self): """ Check whether this interval contains exactly one number :rtype: bool """ return self.bounds[1] == self.bounds[0] and\ self.included == (True,True)
[ "def", "is_discrete", "(", "self", ")", ":", "return", "self", ".", "bounds", "[", "1", "]", "==", "self", ".", "bounds", "[", "0", "]", "and", "self", ".", "included", "==", "(", "True", ",", "True", ")" ]
Check whether this interval contains exactly one number :rtype: bool
[ "Check", "whether", "this", "interval", "contains", "exactly", "one", "number" ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L251-L258
jreinhardt/constraining-order
src/constrainingorder/sets.py
IntervalSet.iter_members
def iter_members(self): """ Iterate over all elements of the set. :raises ValueError: if self is a set of everything """ if not self.is_discrete(): raise ValueError("non-discrete IntervalSet can not be iterated") for i in self.ints: yield i.get_point()
python
def iter_members(self): """ Iterate over all elements of the set. :raises ValueError: if self is a set of everything """ if not self.is_discrete(): raise ValueError("non-discrete IntervalSet can not be iterated") for i in self.ints: yield i.get_point()
[ "def", "iter_members", "(", "self", ")", ":", "if", "not", "self", ".", "is_discrete", "(", ")", ":", "raise", "ValueError", "(", "\"non-discrete IntervalSet can not be iterated\"", ")", "for", "i", "in", "self", ".", "ints", ":", "yield", "i", ".", "get_poi...
Iterate over all elements of the set. :raises ValueError: if self is a set of everything
[ "Iterate", "over", "all", "elements", "of", "the", "set", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L370-L379
jreinhardt/constraining-order
src/constrainingorder/sets.py
IntervalSet.intersection
def intersection(self,other): """ Return a new IntervalSet with the intersection of the two sets, i.e. all elements that are both in self and other. :param IntervalSet other: Set to intersect with :rtype: IntervalSet """ res = [] for i1 in self.ints: for i2 in other.ints: res.append(i1.intersection(i2)) return IntervalSet(res)
python
def intersection(self,other): """ Return a new IntervalSet with the intersection of the two sets, i.e. all elements that are both in self and other. :param IntervalSet other: Set to intersect with :rtype: IntervalSet """ res = [] for i1 in self.ints: for i2 in other.ints: res.append(i1.intersection(i2)) return IntervalSet(res)
[ "def", "intersection", "(", "self", ",", "other", ")", ":", "res", "=", "[", "]", "for", "i1", "in", "self", ".", "ints", ":", "for", "i2", "in", "other", ".", "ints", ":", "res", ".", "append", "(", "i1", ".", "intersection", "(", "i2", ")", "...
Return a new IntervalSet with the intersection of the two sets, i.e. all elements that are both in self and other. :param IntervalSet other: Set to intersect with :rtype: IntervalSet
[ "Return", "a", "new", "IntervalSet", "with", "the", "intersection", "of", "the", "two", "sets", "i", ".", "e", ".", "all", "elements", "that", "are", "both", "in", "self", "and", "other", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L381-L394
jreinhardt/constraining-order
src/constrainingorder/sets.py
IntervalSet.difference
def difference(self,other): """ Return a new IntervalSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param IntervalSet other: Set to subtract :rtype: IntervalSet """ res = IntervalSet.everything() for j in other.ints: tmp = [] for i in self.ints: tmp.extend(i._difference(j)) res = res.intersection(IntervalSet(tmp)) return res
python
def difference(self,other): """ Return a new IntervalSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param IntervalSet other: Set to subtract :rtype: IntervalSet """ res = IntervalSet.everything() for j in other.ints: tmp = [] for i in self.ints: tmp.extend(i._difference(j)) res = res.intersection(IntervalSet(tmp)) return res
[ "def", "difference", "(", "self", ",", "other", ")", ":", "res", "=", "IntervalSet", ".", "everything", "(", ")", "for", "j", "in", "other", ".", "ints", ":", "tmp", "=", "[", "]", "for", "i", "in", "self", ".", "ints", ":", "tmp", ".", "extend",...
Return a new IntervalSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param IntervalSet other: Set to subtract :rtype: IntervalSet
[ "Return", "a", "new", "IntervalSet", "with", "the", "difference", "of", "the", "two", "sets", "i", ".", "e", ".", "all", "elements", "that", "are", "in", "self", "but", "not", "in", "other", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L406-L420
jreinhardt/constraining-order
src/constrainingorder/sets.py
DiscreteSet.intersection
def intersection(self,other): """ Return a new DiscreteSet with the intersection of the two sets, i.e. all elements that are in both self and other. :param DiscreteSet other: Set to intersect with :rtype: DiscreteSet """ if self.everything: if other.everything: return DiscreteSet() else: return DiscreteSet(other.elements) else: if other.everything: return DiscreteSet(self.elements) else: return DiscreteSet(self.elements.intersection(other.elements))
python
def intersection(self,other): """ Return a new DiscreteSet with the intersection of the two sets, i.e. all elements that are in both self and other. :param DiscreteSet other: Set to intersect with :rtype: DiscreteSet """ if self.everything: if other.everything: return DiscreteSet() else: return DiscreteSet(other.elements) else: if other.everything: return DiscreteSet(self.elements) else: return DiscreteSet(self.elements.intersection(other.elements))
[ "def", "intersection", "(", "self", ",", "other", ")", ":", "if", "self", ".", "everything", ":", "if", "other", ".", "everything", ":", "return", "DiscreteSet", "(", ")", "else", ":", "return", "DiscreteSet", "(", "other", ".", "elements", ")", "else", ...
Return a new DiscreteSet with the intersection of the two sets, i.e. all elements that are in both self and other. :param DiscreteSet other: Set to intersect with :rtype: DiscreteSet
[ "Return", "a", "new", "DiscreteSet", "with", "the", "intersection", "of", "the", "two", "sets", "i", ".", "e", ".", "all", "elements", "that", "are", "in", "both", "self", "and", "other", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L492-L509
jreinhardt/constraining-order
src/constrainingorder/sets.py
DiscreteSet.difference
def difference(self,other): """ Return a new DiscreteSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param DiscreteSet other: Set to subtract :rtype: DiscreteSet :raises ValueError: if self is a set of everything """ if self.everything: raise ValueError("Can not remove from everything") elif other.everything: return DiscreteSet([]) else: return DiscreteSet(self.elements.difference(other.elements))
python
def difference(self,other): """ Return a new DiscreteSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param DiscreteSet other: Set to subtract :rtype: DiscreteSet :raises ValueError: if self is a set of everything """ if self.everything: raise ValueError("Can not remove from everything") elif other.everything: return DiscreteSet([]) else: return DiscreteSet(self.elements.difference(other.elements))
[ "def", "difference", "(", "self", ",", "other", ")", ":", "if", "self", ".", "everything", ":", "raise", "ValueError", "(", "\"Can not remove from everything\"", ")", "elif", "other", ".", "everything", ":", "return", "DiscreteSet", "(", "[", "]", ")", "else...
Return a new DiscreteSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param DiscreteSet other: Set to subtract :rtype: DiscreteSet :raises ValueError: if self is a set of everything
[ "Return", "a", "new", "DiscreteSet", "with", "the", "difference", "of", "the", "two", "sets", "i", ".", "e", ".", "all", "elements", "that", "are", "in", "self", "but", "not", "in", "other", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L511-L525
jreinhardt/constraining-order
src/constrainingorder/sets.py
DiscreteSet.union
def union(self,other): """ Return a new DiscreteSet with the union of the two sets, i.e. all elements that are in self or in other. :param DiscreteSet other: Set to unite with :rtype: DiscreteSet """ if self.everything: return self elif other.everything: return other else: return DiscreteSet(self.elements.union(other.elements))
python
def union(self,other): """ Return a new DiscreteSet with the union of the two sets, i.e. all elements that are in self or in other. :param DiscreteSet other: Set to unite with :rtype: DiscreteSet """ if self.everything: return self elif other.everything: return other else: return DiscreteSet(self.elements.union(other.elements))
[ "def", "union", "(", "self", ",", "other", ")", ":", "if", "self", ".", "everything", ":", "return", "self", "elif", "other", ".", "everything", ":", "return", "other", "else", ":", "return", "DiscreteSet", "(", "self", ".", "elements", ".", "union", "...
Return a new DiscreteSet with the union of the two sets, i.e. all elements that are in self or in other. :param DiscreteSet other: Set to unite with :rtype: DiscreteSet
[ "Return", "a", "new", "DiscreteSet", "with", "the", "union", "of", "the", "two", "sets", "i", ".", "e", ".", "all", "elements", "that", "are", "in", "self", "or", "in", "other", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L527-L540
jreinhardt/constraining-order
src/constrainingorder/sets.py
DiscreteSet.iter_members
def iter_members(self): """ Iterate over all elements of the set. :raises ValueError: if self is a set of everything """ if self.everything: raise ValueError("Can not iterate everything") for coord in sorted(self.elements): yield coord
python
def iter_members(self): """ Iterate over all elements of the set. :raises ValueError: if self is a set of everything """ if self.everything: raise ValueError("Can not iterate everything") for coord in sorted(self.elements): yield coord
[ "def", "iter_members", "(", "self", ")", ":", "if", "self", ".", "everything", ":", "raise", "ValueError", "(", "\"Can not iterate everything\"", ")", "for", "coord", "in", "sorted", "(", "self", ".", "elements", ")", ":", "yield", "coord" ]
Iterate over all elements of the set. :raises ValueError: if self is a set of everything
[ "Iterate", "over", "all", "elements", "of", "the", "set", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L542-L551
jreinhardt/constraining-order
src/constrainingorder/sets.py
Patch.intersection
def intersection(self,other): "intersection with another patch" res = {} if set(self.sets.keys()) != set(other.sets.keys()): raise KeyError('Incompatible patches in intersection') for name,s1 in self.sets.items(): s2 = other.sets[name] res[name] = s1.intersection(s2) return Patch(res)
python
def intersection(self,other): "intersection with another patch" res = {} if set(self.sets.keys()) != set(other.sets.keys()): raise KeyError('Incompatible patches in intersection') for name,s1 in self.sets.items(): s2 = other.sets[name] res[name] = s1.intersection(s2) return Patch(res)
[ "def", "intersection", "(", "self", ",", "other", ")", ":", "res", "=", "{", "}", "if", "set", "(", "self", ".", "sets", ".", "keys", "(", ")", ")", "!=", "set", "(", "other", ".", "sets", ".", "keys", "(", ")", ")", ":", "raise", "KeyError", ...
intersection with another patch
[ "intersection", "with", "another", "patch" ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L602-L610
jreinhardt/constraining-order
src/constrainingorder/sets.py
Patch.iter_points
def iter_points(self): "returns a list of tuples of names and values" if not self.is_discrete(): raise ValueError("Patch is not discrete") names = sorted(self.sets.keys()) icoords = [self.sets[name].iter_members() for name in names] for coordinates in product(*icoords): yield tuple(zip(names,coordinates))
python
def iter_points(self): "returns a list of tuples of names and values" if not self.is_discrete(): raise ValueError("Patch is not discrete") names = sorted(self.sets.keys()) icoords = [self.sets[name].iter_members() for name in names] for coordinates in product(*icoords): yield tuple(zip(names,coordinates))
[ "def", "iter_points", "(", "self", ")", ":", "if", "not", "self", ".", "is_discrete", "(", ")", ":", "raise", "ValueError", "(", "\"Patch is not discrete\"", ")", "names", "=", "sorted", "(", "self", ".", "sets", ".", "keys", "(", ")", ")", "icoords", ...
returns a list of tuples of names and values
[ "returns", "a", "list", "of", "tuples", "of", "names", "and", "values" ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L612-L619
clinicedc/edc-form-label
edc_form_label/form_label_modeladmin_mixin.py
FormLabelModelAdminMixin.update_form_labels
def update_form_labels(self, request=None, obj=None, form=None): """Returns a form obj after modifying form labels referred to in custom_form_labels. """ for form_label in self.custom_form_labels: if form_label.field in form.base_fields: label = form_label.get_form_label( request=request, obj=obj, model=self.model, form=form ) if label: form.base_fields[form_label.field].label = mark_safe(label) return form
python
def update_form_labels(self, request=None, obj=None, form=None): """Returns a form obj after modifying form labels referred to in custom_form_labels. """ for form_label in self.custom_form_labels: if form_label.field in form.base_fields: label = form_label.get_form_label( request=request, obj=obj, model=self.model, form=form ) if label: form.base_fields[form_label.field].label = mark_safe(label) return form
[ "def", "update_form_labels", "(", "self", ",", "request", "=", "None", ",", "obj", "=", "None", ",", "form", "=", "None", ")", ":", "for", "form_label", "in", "self", ".", "custom_form_labels", ":", "if", "form_label", ".", "field", "in", "form", ".", ...
Returns a form obj after modifying form labels referred to in custom_form_labels.
[ "Returns", "a", "form", "obj", "after", "modifying", "form", "labels", "referred", "to", "in", "custom_form_labels", "." ]
train
https://github.com/clinicedc/edc-form-label/blob/9d90807ddf784045b3867d676bee6e384a8e9d71/edc_form_label/form_label_modeladmin_mixin.py#L18-L29
Datary/scrapbag
scrapbag/files.py
open_filezip
def open_filezip(file_path, find_str): """ Open the wrapped file. Read directly from the zip without extracting its content. """ if zipfile.is_zipfile(file_path): zipf = zipfile.ZipFile(file_path) interesting_files = [f for f in zipf.infolist() if find_str in f] for inside_file in interesting_files: yield zipf.open(inside_file)
python
def open_filezip(file_path, find_str): """ Open the wrapped file. Read directly from the zip without extracting its content. """ if zipfile.is_zipfile(file_path): zipf = zipfile.ZipFile(file_path) interesting_files = [f for f in zipf.infolist() if find_str in f] for inside_file in interesting_files: yield zipf.open(inside_file)
[ "def", "open_filezip", "(", "file_path", ",", "find_str", ")", ":", "if", "zipfile", ".", "is_zipfile", "(", "file_path", ")", ":", "zipf", "=", "zipfile", ".", "ZipFile", "(", "file_path", ")", "interesting_files", "=", "[", "f", "for", "f", "in", "zipf...
Open the wrapped file. Read directly from the zip without extracting its content.
[ "Open", "the", "wrapped", "file", ".", "Read", "directly", "from", "the", "zip", "without", "extracting", "its", "content", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/files.py#L18-L28
Datary/scrapbag
scrapbag/files.py
extract_filezip
def extract_filezip(path_to_file, dest_path, target_zipfiles=None): """ Extract file zip to destiny path folder targeting only some kind of files. """ target_zipfiles = ['.*'] if target_zipfiles is None else target_zipfiles files = [] _, ext = os.path.splitext(path_to_file) if ext == '.zip': file = open(path_to_file, 'rb') with zipfile.ZipFile(file) as zip_file: regexp = '|'.join(target_zipfiles) if target_zipfiles else '.*' search_regex = re.compile(regexp) lista = [m.group() for x in zip_file.namelist() for m in [search_regex.search(x)] if m] for zp_file in lista: try: zip_file.extract(zp_file, dest_path) files.append(os.path.join(dest_path, zp_file)) except Exception as ex: msg = 'Fail to extract {} in {} to {} - {}'.format( zp_file, path_to_file, dest_path, ex) logger.error(msg) file.close() else: logger.warning('Not zipfile passed in args') return files
python
def extract_filezip(path_to_file, dest_path, target_zipfiles=None): """ Extract file zip to destiny path folder targeting only some kind of files. """ target_zipfiles = ['.*'] if target_zipfiles is None else target_zipfiles files = [] _, ext = os.path.splitext(path_to_file) if ext == '.zip': file = open(path_to_file, 'rb') with zipfile.ZipFile(file) as zip_file: regexp = '|'.join(target_zipfiles) if target_zipfiles else '.*' search_regex = re.compile(regexp) lista = [m.group() for x in zip_file.namelist() for m in [search_regex.search(x)] if m] for zp_file in lista: try: zip_file.extract(zp_file, dest_path) files.append(os.path.join(dest_path, zp_file)) except Exception as ex: msg = 'Fail to extract {} in {} to {} - {}'.format( zp_file, path_to_file, dest_path, ex) logger.error(msg) file.close() else: logger.warning('Not zipfile passed in args') return files
[ "def", "extract_filezip", "(", "path_to_file", ",", "dest_path", ",", "target_zipfiles", "=", "None", ")", ":", "target_zipfiles", "=", "[", "'.*'", "]", "if", "target_zipfiles", "is", "None", "else", "target_zipfiles", "files", "=", "[", "]", "_", ",", "ext...
Extract file zip to destiny path folder targeting only some kind of files.
[ "Extract", "file", "zip", "to", "destiny", "path", "folder", "targeting", "only", "some", "kind", "of", "files", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/files.py#L31-L61
Datary/scrapbag
scrapbag/files.py
download_file
def download_file(url, destination, **kwargs): """ Download file process: - Open the url - Check if it has been downloaded and it hanged. - Download it to the destination folder. Args: :urls: url to take the file. :destionation: place to store the downloaded file. """ web_file = open_remote_url(url, **kwargs) file_size = 0 if not web_file: logger.error( "Remote file not found. Attempted URLs: {}".format(url)) return modified = is_remote_file_modified(web_file, destination) if modified: logger.info("Downloading: " + web_file.url) file_size = copy_remote_file(web_file, destination) else: logger.info("File up-to-date: " + destination) web_file.close() return file_size
python
def download_file(url, destination, **kwargs): """ Download file process: - Open the url - Check if it has been downloaded and it hanged. - Download it to the destination folder. Args: :urls: url to take the file. :destionation: place to store the downloaded file. """ web_file = open_remote_url(url, **kwargs) file_size = 0 if not web_file: logger.error( "Remote file not found. Attempted URLs: {}".format(url)) return modified = is_remote_file_modified(web_file, destination) if modified: logger.info("Downloading: " + web_file.url) file_size = copy_remote_file(web_file, destination) else: logger.info("File up-to-date: " + destination) web_file.close() return file_size
[ "def", "download_file", "(", "url", ",", "destination", ",", "*", "*", "kwargs", ")", ":", "web_file", "=", "open_remote_url", "(", "url", ",", "*", "*", "kwargs", ")", "file_size", "=", "0", "if", "not", "web_file", ":", "logger", ".", "error", "(", ...
Download file process: - Open the url - Check if it has been downloaded and it hanged. - Download it to the destination folder. Args: :urls: url to take the file. :destionation: place to store the downloaded file.
[ "Download", "file", "process", ":", "-", "Open", "the", "url", "-", "Check", "if", "it", "has", "been", "downloaded", "and", "it", "hanged", ".", "-", "Download", "it", "to", "the", "destination", "folder", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/files.py#L64-L91
Datary/scrapbag
scrapbag/files.py
open_remote_url
def open_remote_url(urls, **kwargs): """Open the url and check that it stores a file. Args: :urls: Endpoint to take the file """ if isinstance(urls, str): urls = [urls] for url in urls: try: web_file = requests.get(url, stream=True, **kwargs) if 'html' in web_file.headers['content-type']: raise ValueError("HTML source file retrieved.") return web_file except Exception as ex: logger.error('Fail to open remote url - {}'.format(ex)) continue
python
def open_remote_url(urls, **kwargs): """Open the url and check that it stores a file. Args: :urls: Endpoint to take the file """ if isinstance(urls, str): urls = [urls] for url in urls: try: web_file = requests.get(url, stream=True, **kwargs) if 'html' in web_file.headers['content-type']: raise ValueError("HTML source file retrieved.") return web_file except Exception as ex: logger.error('Fail to open remote url - {}'.format(ex)) continue
[ "def", "open_remote_url", "(", "urls", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "urls", ",", "str", ")", ":", "urls", "=", "[", "urls", "]", "for", "url", "in", "urls", ":", "try", ":", "web_file", "=", "requests", ".", "get", ...
Open the url and check that it stores a file. Args: :urls: Endpoint to take the file
[ "Open", "the", "url", "and", "check", "that", "it", "stores", "a", "file", ".", "Args", ":", ":", "urls", ":", "Endpoint", "to", "take", "the", "file" ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/files.py#L94-L109
Datary/scrapbag
scrapbag/files.py
is_remote_file_modified
def is_remote_file_modified(web_file, destination): """ Check if online file has been modified. Args: :web_file: online file to check. :destination: path of the offline file to compare. """ try: # check datetime of last modified in file. last_mod = web_file.headers.get('last-modified') if last_mod: web_file_time = time.strptime( web_file.headers.get( 'last-modified'), '%a, %d %b %Y %H:%M:%S %Z') else: web_file_time = time.gmtime() web_file_size = int(web_file.headers.get('content-length', -1)) if os.path.exists(destination): file_time = time.gmtime(os.path.getmtime(destination)) file_size = os.path.getsize(destination) if file_time >= web_file_time and file_size == web_file_size: return False except Exception as ex: msg = ('Fail checking if remote file is modified default returns TRUE' ' - {}'.format(ex)) logger.debug(msg) return True
python
def is_remote_file_modified(web_file, destination): """ Check if online file has been modified. Args: :web_file: online file to check. :destination: path of the offline file to compare. """ try: # check datetime of last modified in file. last_mod = web_file.headers.get('last-modified') if last_mod: web_file_time = time.strptime( web_file.headers.get( 'last-modified'), '%a, %d %b %Y %H:%M:%S %Z') else: web_file_time = time.gmtime() web_file_size = int(web_file.headers.get('content-length', -1)) if os.path.exists(destination): file_time = time.gmtime(os.path.getmtime(destination)) file_size = os.path.getsize(destination) if file_time >= web_file_time and file_size == web_file_size: return False except Exception as ex: msg = ('Fail checking if remote file is modified default returns TRUE' ' - {}'.format(ex)) logger.debug(msg) return True
[ "def", "is_remote_file_modified", "(", "web_file", ",", "destination", ")", ":", "try", ":", "# check datetime of last modified in file.", "last_mod", "=", "web_file", ".", "headers", ".", "get", "(", "'last-modified'", ")", "if", "last_mod", ":", "web_file_time", "...
Check if online file has been modified. Args: :web_file: online file to check. :destination: path of the offline file to compare.
[ "Check", "if", "online", "file", "has", "been", "modified", ".", "Args", ":", ":", "web_file", ":", "online", "file", "to", "check", ".", ":", "destination", ":", "path", "of", "the", "offline", "file", "to", "compare", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/files.py#L112-L141