id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
236,000
splunk/splunk-sdk-python
splunklib/modularinput/input_definition.py
InputDefinition.parse
def parse(stream): """Parse a stream containing XML into an ``InputDefinition``. :param stream: stream containing XML to parse. :return: definition: an ``InputDefinition`` object. """ definition = InputDefinition() # parse XML from the stream, then get the root node root = ET.parse(stream).getroot() for node in root: if node.tag == "configuration": # get config for each stanza definition.inputs = parse_xml_data(node, "stanza") else: definition.metadata[node.tag] = node.text return definition
python
def parse(stream): definition = InputDefinition() # parse XML from the stream, then get the root node root = ET.parse(stream).getroot() for node in root: if node.tag == "configuration": # get config for each stanza definition.inputs = parse_xml_data(node, "stanza") else: definition.metadata[node.tag] = node.text return definition
[ "def", "parse", "(", "stream", ")", ":", "definition", "=", "InputDefinition", "(", ")", "# parse XML from the stream, then get the root node", "root", "=", "ET", ".", "parse", "(", "stream", ")", ".", "getroot", "(", ")", "for", "node", "in", "root", ":", "...
Parse a stream containing XML into an ``InputDefinition``. :param stream: stream containing XML to parse. :return: definition: an ``InputDefinition`` object.
[ "Parse", "a", "stream", "containing", "XML", "into", "an", "InputDefinition", "." ]
a245a4eeb93b3621730418008e31715912bcdcd8
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/modularinput/input_definition.py#L42-L60
236,001
splunk/splunk-sdk-python
splunklib/searchcommands/environment.py
configure_logging
def configure_logging(logger_name, filename=None): """ Configure logging and return the named logger and the location of the logging configuration file loaded. This function expects a Splunk app directory structure:: <app-root> bin ... default ... local ... This function looks for a logging configuration file at each of these locations, loading the first, if any, logging configuration file that it finds:: local/{name}.logging.conf default/{name}.logging.conf local/logging.conf default/logging.conf The current working directory is set to *<app-root>* before the logging configuration file is loaded. Hence, paths in the logging configuration file are relative to *<app-root>*. The current directory is reset before return. You may short circuit the search for a logging configuration file by providing an alternative file location in `path`. Logging configuration files must be in `ConfigParser format`_. #Arguments: :param logger_name: Logger name :type logger_name: bytes, unicode :param filename: Location of an alternative logging configuration file or `None`. :type filename: bytes, unicode or NoneType :returns: The named logger and the location of the logging configuration file loaded. :rtype: tuple .. _ConfigParser format: https://docs.python.org/2/library/logging.config.html#configuration-file-format """ if filename is None: if logger_name is None: probing_paths = [path.join('local', 'logging.conf'), path.join('default', 'logging.conf')] else: probing_paths = [ path.join('local', logger_name + '.logging.conf'), path.join('default', logger_name + '.logging.conf'), path.join('local', 'logging.conf'), path.join('default', 'logging.conf')] for relative_path in probing_paths: configuration_file = path.join(app_root, relative_path) if path.exists(configuration_file): filename = configuration_file break elif not path.isabs(filename): found = False for conf in 'local', 'default': configuration_file = path.join(app_root, conf, filename) if path.exists(configuration_file): filename = configuration_file found = True break if not found: raise ValueError('Logging configuration file "{}" not found in local or default directory'.format(filename)) elif not path.exists(filename): raise ValueError('Logging configuration file "{}" not found'.format(filename)) if filename is not None: global _current_logging_configuration_file filename = path.realpath(filename) if filename != _current_logging_configuration_file: working_directory = getcwd() chdir(app_root) try: fileConfig(filename, {'SPLUNK_HOME': splunk_home}) finally: chdir(working_directory) _current_logging_configuration_file = filename if len(root.handlers) == 0: root.addHandler(StreamHandler()) return None if logger_name is None else getLogger(logger_name), filename
python
def configure_logging(logger_name, filename=None): if filename is None: if logger_name is None: probing_paths = [path.join('local', 'logging.conf'), path.join('default', 'logging.conf')] else: probing_paths = [ path.join('local', logger_name + '.logging.conf'), path.join('default', logger_name + '.logging.conf'), path.join('local', 'logging.conf'), path.join('default', 'logging.conf')] for relative_path in probing_paths: configuration_file = path.join(app_root, relative_path) if path.exists(configuration_file): filename = configuration_file break elif not path.isabs(filename): found = False for conf in 'local', 'default': configuration_file = path.join(app_root, conf, filename) if path.exists(configuration_file): filename = configuration_file found = True break if not found: raise ValueError('Logging configuration file "{}" not found in local or default directory'.format(filename)) elif not path.exists(filename): raise ValueError('Logging configuration file "{}" not found'.format(filename)) if filename is not None: global _current_logging_configuration_file filename = path.realpath(filename) if filename != _current_logging_configuration_file: working_directory = getcwd() chdir(app_root) try: fileConfig(filename, {'SPLUNK_HOME': splunk_home}) finally: chdir(working_directory) _current_logging_configuration_file = filename if len(root.handlers) == 0: root.addHandler(StreamHandler()) return None if logger_name is None else getLogger(logger_name), filename
[ "def", "configure_logging", "(", "logger_name", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "if", "logger_name", "is", "None", ":", "probing_paths", "=", "[", "path", ".", "join", "(", "'local'", ",", "'logging.conf'", ")",...
Configure logging and return the named logger and the location of the logging configuration file loaded. This function expects a Splunk app directory structure:: <app-root> bin ... default ... local ... This function looks for a logging configuration file at each of these locations, loading the first, if any, logging configuration file that it finds:: local/{name}.logging.conf default/{name}.logging.conf local/logging.conf default/logging.conf The current working directory is set to *<app-root>* before the logging configuration file is loaded. Hence, paths in the logging configuration file are relative to *<app-root>*. The current directory is reset before return. You may short circuit the search for a logging configuration file by providing an alternative file location in `path`. Logging configuration files must be in `ConfigParser format`_. #Arguments: :param logger_name: Logger name :type logger_name: bytes, unicode :param filename: Location of an alternative logging configuration file or `None`. :type filename: bytes, unicode or NoneType :returns: The named logger and the location of the logging configuration file loaded. :rtype: tuple .. _ConfigParser format: https://docs.python.org/2/library/logging.config.html#configuration-file-format
[ "Configure", "logging", "and", "return", "the", "named", "logger", "and", "the", "location", "of", "the", "logging", "configuration", "file", "loaded", "." ]
a245a4eeb93b3621730418008e31715912bcdcd8
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/searchcommands/environment.py#L27-L111
236,002
kennethreitz/legit
legit/scm.py
SCMRepo.git_exec
def git_exec(self, command, **kwargs): """Execute git commands""" from .cli import verbose_echo command.insert(0, self.git) if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user verbose = False else: verbose = self.verbose verbose_echo(' '.join(command), verbose, self.fake) if not self.fake: result = self.repo.git.execute(command, **kwargs) else: if 'with_extended_output' in kwargs: result = (0, '', '') else: result = '' return result
python
def git_exec(self, command, **kwargs): from .cli import verbose_echo command.insert(0, self.git) if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user verbose = False else: verbose = self.verbose verbose_echo(' '.join(command), verbose, self.fake) if not self.fake: result = self.repo.git.execute(command, **kwargs) else: if 'with_extended_output' in kwargs: result = (0, '', '') else: result = '' return result
[ "def", "git_exec", "(", "self", ",", "command", ",", "*", "*", "kwargs", ")", ":", "from", ".", "cli", "import", "verbose_echo", "command", ".", "insert", "(", "0", ",", "self", ".", "git", ")", "if", "kwargs", ".", "pop", "(", "'no_verbose'", ",", ...
Execute git commands
[ "Execute", "git", "commands" ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L47-L65
236,003
kennethreitz/legit
legit/scm.py
SCMRepo.unstash_index
def unstash_index(self, sync=False, branch=None): """Returns an unstash index if one is available.""" stash_list = self.git_exec(['stash', 'list'], no_verbose=True) if branch is None: branch = self.get_current_branch_name() for stash in stash_list.splitlines(): verb = 'syncing' if sync else 'switching' if ( (('Legit' in stash) and ('On {0}:'.format(branch) in stash) and (verb in stash) ) or (('GitHub' in stash) and ('On {0}:'.format(branch) in stash) and (verb in stash) ) ): return stash[7]
python
def unstash_index(self, sync=False, branch=None): stash_list = self.git_exec(['stash', 'list'], no_verbose=True) if branch is None: branch = self.get_current_branch_name() for stash in stash_list.splitlines(): verb = 'syncing' if sync else 'switching' if ( (('Legit' in stash) and ('On {0}:'.format(branch) in stash) and (verb in stash) ) or (('GitHub' in stash) and ('On {0}:'.format(branch) in stash) and (verb in stash) ) ): return stash[7]
[ "def", "unstash_index", "(", "self", ",", "sync", "=", "False", ",", "branch", "=", "None", ")", ":", "stash_list", "=", "self", ".", "git_exec", "(", "[", "'stash'", ",", "'list'", "]", ",", "no_verbose", "=", "True", ")", "if", "branch", "is", "Non...
Returns an unstash index if one is available.
[ "Returns", "an", "unstash", "index", "if", "one", "is", "available", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L88-L110
236,004
kennethreitz/legit
legit/scm.py
SCMRepo.unstash_it
def unstash_it(self, sync=False): """ Unstashes changes from current branch for branch sync. Requires prior code setting self.stash_index. """ if self.stash_index is not None: return self.git_exec( ['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
python
def unstash_it(self, sync=False): if self.stash_index is not None: return self.git_exec( ['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
[ "def", "unstash_it", "(", "self", ",", "sync", "=", "False", ")", ":", "if", "self", ".", "stash_index", "is", "not", "None", ":", "return", "self", ".", "git_exec", "(", "[", "'stash'", ",", "'pop'", ",", "'stash@{{{0}}}'", ".", "format", "(", "self",...
Unstashes changes from current branch for branch sync. Requires prior code setting self.stash_index.
[ "Unstashes", "changes", "from", "current", "branch", "for", "branch", "sync", ".", "Requires", "prior", "code", "setting", "self", ".", "stash_index", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L118-L125
236,005
kennethreitz/legit
legit/scm.py
SCMRepo.checkout_branch
def checkout_branch(self, branch): """Checks out given branch.""" _, stdout, stderr = self.git_exec( ['checkout', branch], with_extended_output=True) return '\n'.join([stderr, stdout])
python
def checkout_branch(self, branch): _, stdout, stderr = self.git_exec( ['checkout', branch], with_extended_output=True) return '\n'.join([stderr, stdout])
[ "def", "checkout_branch", "(", "self", ",", "branch", ")", ":", "_", ",", "stdout", ",", "stderr", "=", "self", ".", "git_exec", "(", "[", "'checkout'", ",", "branch", "]", ",", "with_extended_output", "=", "True", ")", "return", "'\\n'", ".", "join", ...
Checks out given branch.
[ "Checks", "out", "given", "branch", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L194-L200
236,006
kennethreitz/legit
legit/scm.py
SCMRepo.unpublish_branch
def unpublish_branch(self, branch): """Unpublishes given branch.""" try: return self.git_exec( ['push', self.remote.name, ':{0}'.format(branch)]) except GitCommandError: _, _, log = self.git_exec( ['fetch', self.remote.name, '--prune'], with_extended_output=True) abort('Unpublish failed. Fetching.', log=log, type='unpublish')
python
def unpublish_branch(self, branch): try: return self.git_exec( ['push', self.remote.name, ':{0}'.format(branch)]) except GitCommandError: _, _, log = self.git_exec( ['fetch', self.remote.name, '--prune'], with_extended_output=True) abort('Unpublish failed. Fetching.', log=log, type='unpublish')
[ "def", "unpublish_branch", "(", "self", ",", "branch", ")", ":", "try", ":", "return", "self", ".", "git_exec", "(", "[", "'push'", ",", "self", ".", "remote", ".", "name", ",", "':{0}'", ".", "format", "(", "branch", ")", "]", ")", "except", "GitCom...
Unpublishes given branch.
[ "Unpublishes", "given", "branch", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L202-L212
236,007
kennethreitz/legit
legit/scm.py
SCMRepo.undo
def undo(self, hard=False): """Makes last commit not exist""" if not self.fake: return self.repo.git.reset('HEAD^', working_tree=hard) else: click.echo(crayons.red('Faked! >>> git reset {}{}' .format('--hard ' if hard else '', 'HEAD^'))) return 0
python
def undo(self, hard=False): if not self.fake: return self.repo.git.reset('HEAD^', working_tree=hard) else: click.echo(crayons.red('Faked! >>> git reset {}{}' .format('--hard ' if hard else '', 'HEAD^'))) return 0
[ "def", "undo", "(", "self", ",", "hard", "=", "False", ")", ":", "if", "not", "self", ".", "fake", ":", "return", "self", ".", "repo", ".", "git", ".", "reset", "(", "'HEAD^'", ",", "working_tree", "=", "hard", ")", "else", ":", "click", ".", "ec...
Makes last commit not exist
[ "Makes", "last", "commit", "not", "exist" ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L220-L228
236,008
kennethreitz/legit
legit/scm.py
SCMRepo.get_branches
def get_branches(self, local=True, remote_branches=True): """Returns a list of local and remote branches.""" if not self.repo.remotes: remote_branches = False branches = [] if remote_branches: # Remote refs. try: for b in self.remote.refs: name = '/'.join(b.name.split('/')[1:]) if name not in legit_settings.forbidden_branches: branches.append(Branch(name, is_published=True)) except (IndexError, AssertionError): pass if local: # Local refs. for b in [h.name for h in self.repo.heads]: if (not remote_branches) or (b not in [br.name for br in branches]): if b not in legit_settings.forbidden_branches: branches.append(Branch(b, is_published=False)) return sorted(branches, key=attrgetter('name'))
python
def get_branches(self, local=True, remote_branches=True): if not self.repo.remotes: remote_branches = False branches = [] if remote_branches: # Remote refs. try: for b in self.remote.refs: name = '/'.join(b.name.split('/')[1:]) if name not in legit_settings.forbidden_branches: branches.append(Branch(name, is_published=True)) except (IndexError, AssertionError): pass if local: # Local refs. for b in [h.name for h in self.repo.heads]: if (not remote_branches) or (b not in [br.name for br in branches]): if b not in legit_settings.forbidden_branches: branches.append(Branch(b, is_published=False)) return sorted(branches, key=attrgetter('name'))
[ "def", "get_branches", "(", "self", ",", "local", "=", "True", ",", "remote_branches", "=", "True", ")", ":", "if", "not", "self", ".", "repo", ".", "remotes", ":", "remote_branches", "=", "False", "branches", "=", "[", "]", "if", "remote_branches", ":",...
Returns a list of local and remote branches.
[ "Returns", "a", "list", "of", "local", "and", "remote", "branches", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L289-L318
236,009
kennethreitz/legit
legit/scm.py
SCMRepo.display_available_branches
def display_available_branches(self): """Displays available branches.""" if not self.repo.remotes: remote_branches = False else: remote_branches = True branches = self.get_branches(local=True, remote_branches=remote_branches) if not branches: click.echo(crayons.red('No branches available')) return branch_col = len(max([b.name for b in branches], key=len)) + 1 for branch in branches: try: branch_is_selected = (branch.name == self.get_current_branch_name()) except TypeError: branch_is_selected = False marker = '*' if branch_is_selected else ' ' color = colored.green if branch_is_selected else colored.yellow pub = '(published)' if branch.is_published else '(unpublished)' click.echo(columns( [colored.red(marker), 2], [color(branch.name, bold=True), branch_col], [black(pub), 14] ))
python
def display_available_branches(self): if not self.repo.remotes: remote_branches = False else: remote_branches = True branches = self.get_branches(local=True, remote_branches=remote_branches) if not branches: click.echo(crayons.red('No branches available')) return branch_col = len(max([b.name for b in branches], key=len)) + 1 for branch in branches: try: branch_is_selected = (branch.name == self.get_current_branch_name()) except TypeError: branch_is_selected = False marker = '*' if branch_is_selected else ' ' color = colored.green if branch_is_selected else colored.yellow pub = '(published)' if branch.is_published else '(unpublished)' click.echo(columns( [colored.red(marker), 2], [color(branch.name, bold=True), branch_col], [black(pub), 14] ))
[ "def", "display_available_branches", "(", "self", ")", ":", "if", "not", "self", ".", "repo", ".", "remotes", ":", "remote_branches", "=", "False", "else", ":", "remote_branches", "=", "True", "branches", "=", "self", ".", "get_branches", "(", "local", "=", ...
Displays available branches.
[ "Displays", "available", "branches", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L326-L356
236,010
kennethreitz/legit
legit/utils.py
status_log
def status_log(func, message, *args, **kwargs): """Emits header message, executes a callable, and echoes the return strings.""" click.echo(message) log = func(*args, **kwargs) if log: out = [] for line in log.split('\n'): if not line.startswith('#'): out.append(line) click.echo(black('\n'.join(out)))
python
def status_log(func, message, *args, **kwargs): click.echo(message) log = func(*args, **kwargs) if log: out = [] for line in log.split('\n'): if not line.startswith('#'): out.append(line) click.echo(black('\n'.join(out)))
[ "def", "status_log", "(", "func", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "click", ".", "echo", "(", "message", ")", "log", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "log", ":", "out", "=", ...
Emits header message, executes a callable, and echoes the return strings.
[ "Emits", "header", "message", "executes", "a", "callable", "and", "echoes", "the", "return", "strings", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/utils.py#L8-L20
236,011
kennethreitz/legit
legit/utils.py
verbose_echo
def verbose_echo(str, verbose=False, fake=False): """Selectively output ``str``, with special formatting if ``fake`` is True""" verbose = fake or verbose if verbose: color = crayons.green prefix = '' if fake: color = crayons.red prefix = 'Faked!' click.echo(color('{} >>> {}'.format(prefix, str)))
python
def verbose_echo(str, verbose=False, fake=False): verbose = fake or verbose if verbose: color = crayons.green prefix = '' if fake: color = crayons.red prefix = 'Faked!' click.echo(color('{} >>> {}'.format(prefix, str)))
[ "def", "verbose_echo", "(", "str", ",", "verbose", "=", "False", ",", "fake", "=", "False", ")", ":", "verbose", "=", "fake", "or", "verbose", "if", "verbose", ":", "color", "=", "crayons", ".", "green", "prefix", "=", "''", "if", "fake", ":", "color...
Selectively output ``str``, with special formatting if ``fake`` is True
[ "Selectively", "output", "str", "with", "special", "formatting", "if", "fake", "is", "True" ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/utils.py#L23-L33
236,012
kennethreitz/legit
legit/utils.py
output_aliases
def output_aliases(aliases): """Display git aliases""" for alias in aliases: cmd = '!legit ' + alias click.echo(columns([colored.yellow('git ' + alias), 20], [cmd, None]))
python
def output_aliases(aliases): for alias in aliases: cmd = '!legit ' + alias click.echo(columns([colored.yellow('git ' + alias), 20], [cmd, None]))
[ "def", "output_aliases", "(", "aliases", ")", ":", "for", "alias", "in", "aliases", ":", "cmd", "=", "'!legit '", "+", "alias", "click", ".", "echo", "(", "columns", "(", "[", "colored", ".", "yellow", "(", "'git '", "+", "alias", ")", ",", "20", "]"...
Display git aliases
[ "Display", "git", "aliases" ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/utils.py#L36-L40
236,013
kennethreitz/legit
legit/cli.py
cli
def cli(ctx, verbose, fake, install, uninstall, config): """legit command line interface""" # Create a repo object and remember it as as the context object. From # this point onwards other commands can refer to it by using the # @pass_scm decorator. ctx.obj = SCMRepo() ctx.obj.fake = fake ctx.obj.verbose = fake or verbose if install: do_install(ctx, verbose, fake) ctx.exit() elif uninstall: do_uninstall(ctx, verbose, fake) ctx.exit() elif config: do_edit_settings(fake) ctx.exit() else: if ctx.invoked_subcommand is None: # Display help to user if no commands were passed. click.echo(format_help(ctx.get_help()))
python
def cli(ctx, verbose, fake, install, uninstall, config): # Create a repo object and remember it as as the context object. From # this point onwards other commands can refer to it by using the # @pass_scm decorator. ctx.obj = SCMRepo() ctx.obj.fake = fake ctx.obj.verbose = fake or verbose if install: do_install(ctx, verbose, fake) ctx.exit() elif uninstall: do_uninstall(ctx, verbose, fake) ctx.exit() elif config: do_edit_settings(fake) ctx.exit() else: if ctx.invoked_subcommand is None: # Display help to user if no commands were passed. click.echo(format_help(ctx.get_help()))
[ "def", "cli", "(", "ctx", ",", "verbose", ",", "fake", ",", "install", ",", "uninstall", ",", "config", ")", ":", "# Create a repo object and remember it as as the context object. From", "# this point onwards other commands can refer to it by using the", "# @pass_scm decorator."...
legit command line interface
[ "legit", "command", "line", "interface" ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L82-L103
236,014
kennethreitz/legit
legit/cli.py
switch
def switch(scm, to_branch, verbose, fake): """Switches from one branch to another, safely stashing and restoring local changes. """ scm.fake = fake scm.verbose = fake or verbose scm.repo_check() if to_branch is None: scm.display_available_branches() raise click.BadArgumentUsage('Please specify a branch to switch to') scm.stash_log() status_log(scm.checkout_branch, 'Switching to {0}.'.format( crayons.yellow(to_branch)), to_branch) scm.unstash_log()
python
def switch(scm, to_branch, verbose, fake): scm.fake = fake scm.verbose = fake or verbose scm.repo_check() if to_branch is None: scm.display_available_branches() raise click.BadArgumentUsage('Please specify a branch to switch to') scm.stash_log() status_log(scm.checkout_branch, 'Switching to {0}.'.format( crayons.yellow(to_branch)), to_branch) scm.unstash_log()
[ "def", "switch", "(", "scm", ",", "to_branch", ",", "verbose", ",", "fake", ")", ":", "scm", ".", "fake", "=", "fake", "scm", ".", "verbose", "=", "fake", "or", "verbose", "scm", ".", "repo_check", "(", ")", "if", "to_branch", "is", "None", ":", "s...
Switches from one branch to another, safely stashing and restoring local changes.
[ "Switches", "from", "one", "branch", "to", "another", "safely", "stashing", "and", "restoring", "local", "changes", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L111-L126
236,015
kennethreitz/legit
legit/cli.py
sync
def sync(ctx, scm, to_branch, verbose, fake): """Stashes unstaged changes, Fetches remote data, Performs smart pull+merge, Pushes local commits up, and Unstashes changes. Defaults to current branch. """ scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) if to_branch: # Optional branch specifier. branch = scm.fuzzy_match_branch(to_branch) if branch: is_external = True original_branch = scm.get_current_branch_name() else: raise click.BadArgumentUsage( "Branch {0} does not exist. Use an existing branch." .format(crayons.yellow(branch))) else: # Sync current branch. branch = scm.get_current_branch_name() is_external = False if branch in scm.get_branch_names(local=False): if is_external: ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake) scm.stash_log(sync=True) status_log(scm.smart_pull, 'Pulling commits from the server.') status_log(scm.push, 'Pushing commits to the server.', branch) scm.unstash_log(sync=True) if is_external: ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake) else: raise click.BadArgumentUsage( "Branch {0} is not published. Publish before syncing." .format(crayons.yellow(branch)))
python
def sync(ctx, scm, to_branch, verbose, fake): scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) if to_branch: # Optional branch specifier. branch = scm.fuzzy_match_branch(to_branch) if branch: is_external = True original_branch = scm.get_current_branch_name() else: raise click.BadArgumentUsage( "Branch {0} does not exist. Use an existing branch." .format(crayons.yellow(branch))) else: # Sync current branch. branch = scm.get_current_branch_name() is_external = False if branch in scm.get_branch_names(local=False): if is_external: ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake) scm.stash_log(sync=True) status_log(scm.smart_pull, 'Pulling commits from the server.') status_log(scm.push, 'Pushing commits to the server.', branch) scm.unstash_log(sync=True) if is_external: ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake) else: raise click.BadArgumentUsage( "Branch {0} is not published. Publish before syncing." .format(crayons.yellow(branch)))
[ "def", "sync", "(", "ctx", ",", "scm", ",", "to_branch", ",", "verbose", ",", "fake", ")", ":", "scm", ".", "fake", "=", "fake", "scm", ".", "verbose", "=", "fake", "or", "verbose", "scm", ".", "repo_check", "(", "require_remote", "=", "True", ")", ...
Stashes unstaged changes, Fetches remote data, Performs smart pull+merge, Pushes local commits up, and Unstashes changes. Defaults to current branch.
[ "Stashes", "unstaged", "changes", "Fetches", "remote", "data", "Performs", "smart", "pull", "+", "merge", "Pushes", "local", "commits", "up", "and", "Unstashes", "changes", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L135-L173
236,016
kennethreitz/legit
legit/cli.py
publish
def publish(scm, to_branch, verbose, fake): """Pushes an unpublished branch to a remote repository.""" scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) branch = scm.fuzzy_match_branch(to_branch) if not branch: branch = scm.get_current_branch_name() scm.display_available_branches() if to_branch is None: click.echo("Using current branch {0}".format(crayons.yellow(branch))) else: click.echo( "Branch {0} not found, using current branch {1}" .format(crayons.red(to_branch), crayons.yellow(branch))) branch_names = scm.get_branch_names(local=False) if branch in branch_names: raise click.BadArgumentUsage( "Branch {0} is already published. Use a branch that is not published." .format(crayons.yellow(branch))) status_log(scm.publish_branch, 'Publishing {0}.'.format( crayons.yellow(branch)), branch)
python
def publish(scm, to_branch, verbose, fake): scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) branch = scm.fuzzy_match_branch(to_branch) if not branch: branch = scm.get_current_branch_name() scm.display_available_branches() if to_branch is None: click.echo("Using current branch {0}".format(crayons.yellow(branch))) else: click.echo( "Branch {0} not found, using current branch {1}" .format(crayons.red(to_branch), crayons.yellow(branch))) branch_names = scm.get_branch_names(local=False) if branch in branch_names: raise click.BadArgumentUsage( "Branch {0} is already published. Use a branch that is not published." .format(crayons.yellow(branch))) status_log(scm.publish_branch, 'Publishing {0}.'.format( crayons.yellow(branch)), branch)
[ "def", "publish", "(", "scm", ",", "to_branch", ",", "verbose", ",", "fake", ")", ":", "scm", ".", "fake", "=", "fake", "scm", ".", "verbose", "=", "fake", "or", "verbose", "scm", ".", "repo_check", "(", "require_remote", "=", "True", ")", "branch", ...
Pushes an unpublished branch to a remote repository.
[ "Pushes", "an", "unpublished", "branch", "to", "a", "remote", "repository", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L181-L207
236,017
kennethreitz/legit
legit/cli.py
unpublish
def unpublish(scm, published_branch, verbose, fake): """Removes a published branch from the remote repository.""" scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) branch = scm.fuzzy_match_branch(published_branch) if not branch: scm.display_available_branches() raise click.BadArgumentUsage('Please specify a branch to unpublish') branch_names = scm.get_branch_names(local=False) if branch not in branch_names: raise click.BadArgumentUsage( "Branch {0} is not published. Use a branch that is published." .format(crayons.yellow(branch))) status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format( crayons.yellow(branch)), branch)
python
def unpublish(scm, published_branch, verbose, fake): scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) branch = scm.fuzzy_match_branch(published_branch) if not branch: scm.display_available_branches() raise click.BadArgumentUsage('Please specify a branch to unpublish') branch_names = scm.get_branch_names(local=False) if branch not in branch_names: raise click.BadArgumentUsage( "Branch {0} is not published. Use a branch that is published." .format(crayons.yellow(branch))) status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format( crayons.yellow(branch)), branch)
[ "def", "unpublish", "(", "scm", ",", "published_branch", ",", "verbose", ",", "fake", ")", ":", "scm", ".", "fake", "=", "fake", "scm", ".", "verbose", "=", "fake", "or", "verbose", "scm", ".", "repo_check", "(", "require_remote", "=", "True", ")", "br...
Removes a published branch from the remote repository.
[ "Removes", "a", "published", "branch", "from", "the", "remote", "repository", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L215-L235
236,018
kennethreitz/legit
legit/cli.py
undo
def undo(scm, verbose, fake, hard): """Removes the last commit from history.""" scm.fake = fake scm.verbose = fake or verbose scm.repo_check() status_log(scm.undo, 'Last commit removed from history.', hard)
python
def undo(scm, verbose, fake, hard): scm.fake = fake scm.verbose = fake or verbose scm.repo_check() status_log(scm.undo, 'Last commit removed from history.', hard)
[ "def", "undo", "(", "scm", ",", "verbose", ",", "fake", ",", "hard", ")", ":", "scm", ".", "fake", "=", "fake", "scm", ".", "verbose", "=", "fake", "or", "verbose", "scm", ".", "repo_check", "(", ")", "status_log", "(", "scm", ".", "undo", ",", "...
Removes the last commit from history.
[ "Removes", "the", "last", "commit", "from", "history", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L243-L250
236,019
kennethreitz/legit
legit/cli.py
do_install
def do_install(ctx, verbose, fake): """Installs legit git aliases.""" click.echo('The following git aliases will be installed:\n') aliases = cli.list_commands(ctx) output_aliases(aliases) if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake): for alias in aliases: cmd = '!legit ' + alias system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd) verbose_echo(system_command, verbose, fake) if not fake: os.system(system_command) if not fake: click.echo("\nAliases installed.") else: click.echo("\nAliases will not be installed.")
python
def do_install(ctx, verbose, fake): click.echo('The following git aliases will be installed:\n') aliases = cli.list_commands(ctx) output_aliases(aliases) if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake): for alias in aliases: cmd = '!legit ' + alias system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd) verbose_echo(system_command, verbose, fake) if not fake: os.system(system_command) if not fake: click.echo("\nAliases installed.") else: click.echo("\nAliases will not be installed.")
[ "def", "do_install", "(", "ctx", ",", "verbose", ",", "fake", ")", ":", "click", ".", "echo", "(", "'The following git aliases will be installed:\\n'", ")", "aliases", "=", "cli", ".", "list_commands", "(", "ctx", ")", "output_aliases", "(", "aliases", ")", "i...
Installs legit git aliases.
[ "Installs", "legit", "git", "aliases", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L262-L278
236,020
kennethreitz/legit
legit/cli.py
do_uninstall
def do_uninstall(ctx, verbose, fake): """Uninstalls legit git aliases, including deprecated legit sub-commands.""" aliases = cli.list_commands(ctx) # Add deprecated aliases aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall']) for alias in aliases: system_command = 'git config --global --unset-all alias.{0}'.format(alias) verbose_echo(system_command, verbose, fake) if not fake: os.system(system_command) if not fake: click.echo('\nThe following git aliases are uninstalled:\n') output_aliases(aliases)
python
def do_uninstall(ctx, verbose, fake): aliases = cli.list_commands(ctx) # Add deprecated aliases aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall']) for alias in aliases: system_command = 'git config --global --unset-all alias.{0}'.format(alias) verbose_echo(system_command, verbose, fake) if not fake: os.system(system_command) if not fake: click.echo('\nThe following git aliases are uninstalled:\n') output_aliases(aliases)
[ "def", "do_uninstall", "(", "ctx", ",", "verbose", ",", "fake", ")", ":", "aliases", "=", "cli", ".", "list_commands", "(", "ctx", ")", "# Add deprecated aliases", "aliases", ".", "extend", "(", "[", "'graft'", ",", "'harvest'", ",", "'sprout'", ",", "'res...
Uninstalls legit git aliases, including deprecated legit sub-commands.
[ "Uninstalls", "legit", "git", "aliases", "including", "deprecated", "legit", "sub", "-", "commands", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L281-L293
236,021
kennethreitz/legit
legit/cli.py
do_edit_settings
def do_edit_settings(fake): """Opens legit settings in editor.""" path = resources.user.open('config.ini').name click.echo('Legit Settings:\n') for (option, _, description) in legit_settings.config_defaults: click.echo(columns([crayons.yellow(option), 25], [description, None])) click.echo("") # separate settings info from os output if fake: click.echo(crayons.red('Faked! >>> edit {}'.format(path))) else: click.edit(path)
python
def do_edit_settings(fake): path = resources.user.open('config.ini').name click.echo('Legit Settings:\n') for (option, _, description) in legit_settings.config_defaults: click.echo(columns([crayons.yellow(option), 25], [description, None])) click.echo("") # separate settings info from os output if fake: click.echo(crayons.red('Faked! >>> edit {}'.format(path))) else: click.edit(path)
[ "def", "do_edit_settings", "(", "fake", ")", ":", "path", "=", "resources", ".", "user", ".", "open", "(", "'config.ini'", ")", ".", "name", "click", ".", "echo", "(", "'Legit Settings:\\n'", ")", "for", "(", "option", ",", "_", ",", "description", ")", ...
Opens legit settings in editor.
[ "Opens", "legit", "settings", "in", "editor", "." ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L296-L310
236,022
kennethreitz/legit
legit/cli.py
LegitGroup.get_command
def get_command(self, ctx, cmd_name): """Override to handle command aliases""" rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv cmd_name = self.command_aliases.get(cmd_name, "") return click.Group.get_command(self, ctx, cmd_name)
python
def get_command(self, ctx, cmd_name): rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv cmd_name = self.command_aliases.get(cmd_name, "") return click.Group.get_command(self, ctx, cmd_name)
[ "def", "get_command", "(", "self", ",", "ctx", ",", "cmd_name", ")", ":", "rv", "=", "click", ".", "Group", ".", "get_command", "(", "self", ",", "ctx", ",", "cmd_name", ")", "if", "rv", "is", "not", "None", ":", "return", "rv", "cmd_name", "=", "s...
Override to handle command aliases
[ "Override", "to", "handle", "command", "aliases" ]
699802c5be665bd358456a940953b5c1d8672754
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L42-L48
236,023
nschloe/meshio
meshio/cli.py
_get_parser
def _get_parser(): """Parse input options.""" import argparse parser = argparse.ArgumentParser(description=("Convert between mesh formats.")) parser.add_argument("infile", type=str, help="mesh file to be read from") parser.add_argument( "--input-format", "-i", type=str, choices=input_filetypes, help="input file format", default=None, ) parser.add_argument( "--output-format", "-o", type=str, choices=output_filetypes, help="output file format", default=None, ) parser.add_argument("outfile", type=str, help="mesh file to be written to") parser.add_argument( "--prune", "-p", action="store_true", help="remove lower order cells, remove orphaned nodes", ) parser.add_argument( "--prune-z-0", "-z", action="store_true", help="remove third (z) dimension if all points are 0", ) parser.add_argument( "--version", "-v", action="version", version="%(prog)s {}, Python {}".format(__version__, sys.version), help="display version information", ) return parser
python
def _get_parser(): import argparse parser = argparse.ArgumentParser(description=("Convert between mesh formats.")) parser.add_argument("infile", type=str, help="mesh file to be read from") parser.add_argument( "--input-format", "-i", type=str, choices=input_filetypes, help="input file format", default=None, ) parser.add_argument( "--output-format", "-o", type=str, choices=output_filetypes, help="output file format", default=None, ) parser.add_argument("outfile", type=str, help="mesh file to be written to") parser.add_argument( "--prune", "-p", action="store_true", help="remove lower order cells, remove orphaned nodes", ) parser.add_argument( "--prune-z-0", "-z", action="store_true", help="remove third (z) dimension if all points are 0", ) parser.add_argument( "--version", "-v", action="version", version="%(prog)s {}, Python {}".format(__version__, sys.version), help="display version information", ) return parser
[ "def", "_get_parser", "(", ")", ":", "import", "argparse", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "(", "\"Convert between mesh formats.\"", ")", ")", "parser", ".", "add_argument", "(", "\"infile\"", ",", "type", "=", "str", ...
Parse input options.
[ "Parse", "input", "options", "." ]
cb78285962b573fb46a4f3f54276206d922bdbcb
https://github.com/nschloe/meshio/blob/cb78285962b573fb46a4f3f54276206d922bdbcb/meshio/cli.py#L43-L93
236,024
nschloe/meshio
meshio/msh_io/main.py
_read_header
def _read_header(f): """Read the mesh format block specified as version(ASCII double; currently 4.1) file-type(ASCII int; 0 for ASCII mode, 1 for binary mode) data-size(ASCII int; sizeof(size_t)) < int with value one; only in binary mode, to detect endianness > though here the version is left as str """ # http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format line = f.readline().decode("utf-8") # Split the line # 4.1 0 8 # into its components. str_list = list(filter(None, line.split())) fmt_version = str_list[0] assert str_list[1] in ["0", "1"] is_ascii = str_list[1] == "0" data_size = int(str_list[2]) if not is_ascii: # The next line is the integer 1 in bytes. Useful for checking # endianness. Just assert that we get 1 here. one = f.read(struct.calcsize("i")) assert struct.unpack("i", one)[0] == 1 line = f.readline().decode("utf-8") assert line == "\n" line = f.readline().decode("utf-8") assert line.strip() == "$EndMeshFormat" return fmt_version, data_size, is_ascii
python
def _read_header(f): # http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format line = f.readline().decode("utf-8") # Split the line # 4.1 0 8 # into its components. str_list = list(filter(None, line.split())) fmt_version = str_list[0] assert str_list[1] in ["0", "1"] is_ascii = str_list[1] == "0" data_size = int(str_list[2]) if not is_ascii: # The next line is the integer 1 in bytes. Useful for checking # endianness. Just assert that we get 1 here. one = f.read(struct.calcsize("i")) assert struct.unpack("i", one)[0] == 1 line = f.readline().decode("utf-8") assert line == "\n" line = f.readline().decode("utf-8") assert line.strip() == "$EndMeshFormat" return fmt_version, data_size, is_ascii
[ "def", "_read_header", "(", "f", ")", ":", "# http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format", "line", "=", "f", ".", "readline", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", "# Split the line", "# 4.1 0 8", "# into its components.", "str_list", "=", "list"...
Read the mesh format block specified as version(ASCII double; currently 4.1) file-type(ASCII int; 0 for ASCII mode, 1 for binary mode) data-size(ASCII int; sizeof(size_t)) < int with value one; only in binary mode, to detect endianness > though here the version is left as str
[ "Read", "the", "mesh", "format", "block" ]
cb78285962b573fb46a4f3f54276206d922bdbcb
https://github.com/nschloe/meshio/blob/cb78285962b573fb46a4f3f54276206d922bdbcb/meshio/msh_io/main.py#L48-L81
236,025
nschloe/meshio
meshio/msh_io/main.py
write
def write(filename, mesh, fmt_version, write_binary=True): """Writes a Gmsh msh file. """ try: writer = _writers[fmt_version] except KeyError: try: writer = _writers[fmt_version.split(".")[0]] except KeyError: raise ValueError( "Need mesh format in {} (got {})".format( sorted(_writers.keys()), fmt_version ) ) writer.write(filename, mesh, write_binary=write_binary)
python
def write(filename, mesh, fmt_version, write_binary=True): try: writer = _writers[fmt_version] except KeyError: try: writer = _writers[fmt_version.split(".")[0]] except KeyError: raise ValueError( "Need mesh format in {} (got {})".format( sorted(_writers.keys()), fmt_version ) ) writer.write(filename, mesh, write_binary=write_binary)
[ "def", "write", "(", "filename", ",", "mesh", ",", "fmt_version", ",", "write_binary", "=", "True", ")", ":", "try", ":", "writer", "=", "_writers", "[", "fmt_version", "]", "except", "KeyError", ":", "try", ":", "writer", "=", "_writers", "[", "fmt_vers...
Writes a Gmsh msh file.
[ "Writes", "a", "Gmsh", "msh", "file", "." ]
cb78285962b573fb46a4f3f54276206d922bdbcb
https://github.com/nschloe/meshio/blob/cb78285962b573fb46a4f3f54276206d922bdbcb/meshio/msh_io/main.py#L84-L99
236,026
nschloe/meshio
meshio/helpers.py
write
def write(filename, mesh, file_format=None, **kwargs): """Writes mesh together with data to a file. :params filename: File to write to. :type filename: str :params point_data: Named additional point data to write to the file. :type point_data: dict """ if not file_format: # deduce file format from extension file_format = _filetype_from_filename(filename) # check cells for sanity for key, value in mesh.cells.items(): if key[:7] == "polygon": assert value.shape[1] == int(key[7:]) else: assert value.shape[1] == num_nodes_per_cell[key] try: interface, args, default_kwargs = _writer_map[file_format] except KeyError: raise KeyError( "Unknown format '{}'. Pick one of {}".format( file_format, sorted(list(_writer_map.keys())) ) ) # Build kwargs _kwargs = default_kwargs.copy() _kwargs.update(kwargs) # Write return interface.write(filename, mesh, *args, **_kwargs)
python
def write(filename, mesh, file_format=None, **kwargs): if not file_format: # deduce file format from extension file_format = _filetype_from_filename(filename) # check cells for sanity for key, value in mesh.cells.items(): if key[:7] == "polygon": assert value.shape[1] == int(key[7:]) else: assert value.shape[1] == num_nodes_per_cell[key] try: interface, args, default_kwargs = _writer_map[file_format] except KeyError: raise KeyError( "Unknown format '{}'. Pick one of {}".format( file_format, sorted(list(_writer_map.keys())) ) ) # Build kwargs _kwargs = default_kwargs.copy() _kwargs.update(kwargs) # Write return interface.write(filename, mesh, *args, **_kwargs)
[ "def", "write", "(", "filename", ",", "mesh", ",", "file_format", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "file_format", ":", "# deduce file format from extension", "file_format", "=", "_filetype_from_filename", "(", "filename", ")", "# chec...
Writes mesh together with data to a file. :params filename: File to write to. :type filename: str :params point_data: Named additional point data to write to the file. :type point_data: dict
[ "Writes", "mesh", "together", "with", "data", "to", "a", "file", "." ]
cb78285962b573fb46a4f3f54276206d922bdbcb
https://github.com/nschloe/meshio/blob/cb78285962b573fb46a4f3f54276206d922bdbcb/meshio/helpers.py#L193-L227
236,027
nschloe/meshio
meshio/medit_io.py
_ItemReader.next_items
def next_items(self, n): """Returns the next n items. Throws StopIteration when there is not enough data to return n items. """ items = [] while len(items) < n: if self._line_ptr >= len(self._line): # Load the next line. line = next(self._file).strip() # Skip all comment and empty lines. while not line or line[0] == "#": line = next(self._file).strip() self._line = self._re_delimiter.split(line) self._line_ptr = 0 n_read = min(n - len(items), len(self._line) - self._line_ptr) items.extend(self._line[self._line_ptr : self._line_ptr + n_read]) self._line_ptr += n_read return items
python
def next_items(self, n): items = [] while len(items) < n: if self._line_ptr >= len(self._line): # Load the next line. line = next(self._file).strip() # Skip all comment and empty lines. while not line or line[0] == "#": line = next(self._file).strip() self._line = self._re_delimiter.split(line) self._line_ptr = 0 n_read = min(n - len(items), len(self._line) - self._line_ptr) items.extend(self._line[self._line_ptr : self._line_ptr + n_read]) self._line_ptr += n_read return items
[ "def", "next_items", "(", "self", ",", "n", ")", ":", "items", "=", "[", "]", "while", "len", "(", "items", ")", "<", "n", ":", "if", "self", ".", "_line_ptr", ">=", "len", "(", "self", ".", "_line", ")", ":", "# Load the next line.", "line", "=", ...
Returns the next n items. Throws StopIteration when there is not enough data to return n items.
[ "Returns", "the", "next", "n", "items", "." ]
cb78285962b573fb46a4f3f54276206d922bdbcb
https://github.com/nschloe/meshio/blob/cb78285962b573fb46a4f3f54276206d922bdbcb/meshio/medit_io.py#L35-L53
236,028
nschloe/meshio
meshio/abaqus_io.py
get_param_map
def get_param_map(word, required_keys=None): """ get the optional arguments on a line Example ------- >>> iline = 0 >>> word = 'elset,instance=dummy2,generate' >>> params = get_param_map(iline, word, required_keys=['instance']) params = { 'elset' : None, 'instance' : 'dummy2, 'generate' : None, } """ if required_keys is None: required_keys = [] words = word.split(",") param_map = {} for wordi in words: if "=" not in wordi: key = wordi.strip() value = None else: sword = wordi.split("=") assert len(sword) == 2, sword key = sword[0].strip() value = sword[1].strip() param_map[key] = value msg = "" for key in required_keys: if key not in param_map: msg += "%r not found in %r\n" % (key, word) if msg: raise RuntimeError(msg) return param_map
python
def get_param_map(word, required_keys=None): if required_keys is None: required_keys = [] words = word.split(",") param_map = {} for wordi in words: if "=" not in wordi: key = wordi.strip() value = None else: sword = wordi.split("=") assert len(sword) == 2, sword key = sword[0].strip() value = sword[1].strip() param_map[key] = value msg = "" for key in required_keys: if key not in param_map: msg += "%r not found in %r\n" % (key, word) if msg: raise RuntimeError(msg) return param_map
[ "def", "get_param_map", "(", "word", ",", "required_keys", "=", "None", ")", ":", "if", "required_keys", "is", "None", ":", "required_keys", "=", "[", "]", "words", "=", "word", ".", "split", "(", "\",\"", ")", "param_map", "=", "{", "}", "for", "wordi...
get the optional arguments on a line Example ------- >>> iline = 0 >>> word = 'elset,instance=dummy2,generate' >>> params = get_param_map(iline, word, required_keys=['instance']) params = { 'elset' : None, 'instance' : 'dummy2, 'generate' : None, }
[ "get", "the", "optional", "arguments", "on", "a", "line" ]
cb78285962b573fb46a4f3f54276206d922bdbcb
https://github.com/nschloe/meshio/blob/cb78285962b573fb46a4f3f54276206d922bdbcb/meshio/abaqus_io.py#L193-L229
236,029
spulec/moto
moto/ssm/models.py
SimpleSystemManagerBackend.get_parameters_by_path
def get_parameters_by_path(self, path, with_decryption, recursive, filters=None): """Implement the get-parameters-by-path-API in the backend.""" result = [] # path could be with or without a trailing /. we handle this # difference here. path = path.rstrip('/') + '/' for param in self._parameters: if path != '/' and not param.startswith(path): continue if '/' in param[len(path) + 1:] and not recursive: continue if not self._match_filters(self._parameters[param], filters): continue result.append(self._parameters[param]) return result
python
def get_parameters_by_path(self, path, with_decryption, recursive, filters=None): result = [] # path could be with or without a trailing /. we handle this # difference here. path = path.rstrip('/') + '/' for param in self._parameters: if path != '/' and not param.startswith(path): continue if '/' in param[len(path) + 1:] and not recursive: continue if not self._match_filters(self._parameters[param], filters): continue result.append(self._parameters[param]) return result
[ "def", "get_parameters_by_path", "(", "self", ",", "path", ",", "with_decryption", ",", "recursive", ",", "filters", "=", "None", ")", ":", "result", "=", "[", "]", "# path could be with or without a trailing /. we handle this", "# difference here.", "path", "=", "pat...
Implement the get-parameters-by-path-API in the backend.
[ "Implement", "the", "get", "-", "parameters", "-", "by", "-", "path", "-", "API", "in", "the", "backend", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ssm/models.py#L255-L270
236,030
spulec/moto
moto/ssm/models.py
SimpleSystemManagerBackend._match_filters
def _match_filters(parameter, filters=None): """Return True if the given parameter matches all the filters""" for filter_obj in (filters or []): key = filter_obj['Key'] option = filter_obj.get('Option', 'Equals') values = filter_obj.get('Values', []) what = None if key == 'Type': what = parameter.type elif key == 'KeyId': what = parameter.keyid if option == 'Equals'\ and not any(what == value for value in values): return False elif option == 'BeginsWith'\ and not any(what.startswith(value) for value in values): return False # True if no false match (or no filters at all) return True
python
def _match_filters(parameter, filters=None): for filter_obj in (filters or []): key = filter_obj['Key'] option = filter_obj.get('Option', 'Equals') values = filter_obj.get('Values', []) what = None if key == 'Type': what = parameter.type elif key == 'KeyId': what = parameter.keyid if option == 'Equals'\ and not any(what == value for value in values): return False elif option == 'BeginsWith'\ and not any(what.startswith(value) for value in values): return False # True if no false match (or no filters at all) return True
[ "def", "_match_filters", "(", "parameter", ",", "filters", "=", "None", ")", ":", "for", "filter_obj", "in", "(", "filters", "or", "[", "]", ")", ":", "key", "=", "filter_obj", "[", "'Key'", "]", "option", "=", "filter_obj", ".", "get", "(", "'Option'"...
Return True if the given parameter matches all the filters
[ "Return", "True", "if", "the", "given", "parameter", "matches", "all", "the", "filters" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ssm/models.py#L273-L293
236,031
spulec/moto
scripts/scaffold.py
initialize_service
def initialize_service(service, operation, api_protocol): """create lib and test dirs if not exist """ lib_dir = get_lib_dir(service) test_dir = get_test_dir(service) print_progress('Initializing service', service, 'green') client = boto3.client(service) service_class = client.__class__.__name__ endpoint_prefix = client._service_model.endpoint_prefix tmpl_context = { 'service': service, 'service_class': service_class, 'endpoint_prefix': endpoint_prefix, 'api_protocol': api_protocol, 'escaped_service': get_escaped_service(service) } # initialize service directory if os.path.exists(lib_dir): print_progress('skip creating', lib_dir, 'yellow') else: print_progress('creating', lib_dir, 'green') os.makedirs(lib_dir) tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib') for tmpl_filename in os.listdir(tmpl_dir): render_template( tmpl_dir, tmpl_filename, tmpl_context, service ) # initialize test directory if os.path.exists(test_dir): print_progress('skip creating', test_dir, 'yellow') else: print_progress('creating', test_dir, 'green') os.makedirs(test_dir) tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') for tmpl_filename in os.listdir(tmpl_dir): alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None render_template( tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename ) # append mock to init files append_mock_to_init_py(service) append_mock_import_to_backends_py(service) append_mock_dict_to_backends_py(service)
python
def initialize_service(service, operation, api_protocol): lib_dir = get_lib_dir(service) test_dir = get_test_dir(service) print_progress('Initializing service', service, 'green') client = boto3.client(service) service_class = client.__class__.__name__ endpoint_prefix = client._service_model.endpoint_prefix tmpl_context = { 'service': service, 'service_class': service_class, 'endpoint_prefix': endpoint_prefix, 'api_protocol': api_protocol, 'escaped_service': get_escaped_service(service) } # initialize service directory if os.path.exists(lib_dir): print_progress('skip creating', lib_dir, 'yellow') else: print_progress('creating', lib_dir, 'green') os.makedirs(lib_dir) tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib') for tmpl_filename in os.listdir(tmpl_dir): render_template( tmpl_dir, tmpl_filename, tmpl_context, service ) # initialize test directory if os.path.exists(test_dir): print_progress('skip creating', test_dir, 'yellow') else: print_progress('creating', test_dir, 'green') os.makedirs(test_dir) tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') for tmpl_filename in os.listdir(tmpl_dir): alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None render_template( tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename ) # append mock to init files append_mock_to_init_py(service) append_mock_import_to_backends_py(service) append_mock_dict_to_backends_py(service)
[ "def", "initialize_service", "(", "service", ",", "operation", ",", "api_protocol", ")", ":", "lib_dir", "=", "get_lib_dir", "(", "service", ")", "test_dir", "=", "get_test_dir", "(", "service", ")", "print_progress", "(", "'Initializing service'", ",", "service",...
create lib and test dirs if not exist
[ "create", "lib", "and", "test", "dirs", "if", "not", "exist" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/scripts/scaffold.py#L167-L216
236,032
spulec/moto
scripts/scaffold.py
get_response_query_template
def get_response_query_template(service, operation): """refers to definition of API in botocore, and autogenerates template Assume that response format is xml when protocol is query You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ client = boto3.client(service) aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) result_wrapper = op_model.output_shape.serialization['resultWrapper'] response_wrapper = result_wrapper.replace('Result', 'Response') metadata = op_model.metadata xml_namespace = metadata['xmlNamespace'] # build xml tree t_root = etree.Element(response_wrapper, xmlns=xml_namespace) # build metadata t_metadata = etree.Element('ResponseMetadata') t_request_id = etree.Element('RequestId') t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE' t_metadata.append(t_request_id) t_root.append(t_metadata) # build result t_result = etree.Element(result_wrapper) outputs = op_model.output_shape.members replace_list = [] for output_name, output_shape in outputs.items(): t_result.append(_get_subtree(output_name, output_shape, replace_list)) t_root.append(t_result) xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8') xml_body_lines = xml_body.splitlines() for replace in replace_list: name = replace[0] prefix = replace[1] singular_name = singularize(name) start_tag = '<%s>' % name iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower() loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name) end_tag = '</%s>' % name loop_end = '{{ endfor }}' start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l] if len(start_tag_indexes) != 1: raise Exception('tag %s not found in response body' % start_tag) start_tag_index = start_tag_indexes[0] xml_body_lines.insert(start_tag_index + 1, loop_start) end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l] if len(end_tag_indexes) != 1: raise Exception('tag %s not found in response body' % end_tag) end_tag_index = end_tag_indexes[0] xml_body_lines.insert(end_tag_index, loop_end) xml_body = '\n'.join(xml_body_lines) body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body) return body
python
def get_response_query_template(service, operation): client = boto3.client(service) aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) result_wrapper = op_model.output_shape.serialization['resultWrapper'] response_wrapper = result_wrapper.replace('Result', 'Response') metadata = op_model.metadata xml_namespace = metadata['xmlNamespace'] # build xml tree t_root = etree.Element(response_wrapper, xmlns=xml_namespace) # build metadata t_metadata = etree.Element('ResponseMetadata') t_request_id = etree.Element('RequestId') t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE' t_metadata.append(t_request_id) t_root.append(t_metadata) # build result t_result = etree.Element(result_wrapper) outputs = op_model.output_shape.members replace_list = [] for output_name, output_shape in outputs.items(): t_result.append(_get_subtree(output_name, output_shape, replace_list)) t_root.append(t_result) xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8') xml_body_lines = xml_body.splitlines() for replace in replace_list: name = replace[0] prefix = replace[1] singular_name = singularize(name) start_tag = '<%s>' % name iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower() loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name) end_tag = '</%s>' % name loop_end = '{{ endfor }}' start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l] if len(start_tag_indexes) != 1: raise Exception('tag %s not found in response body' % start_tag) start_tag_index = start_tag_indexes[0] xml_body_lines.insert(start_tag_index + 1, loop_start) end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l] if len(end_tag_indexes) != 1: raise Exception('tag %s not found in response body' % end_tag) end_tag_index = end_tag_indexes[0] xml_body_lines.insert(end_tag_index, loop_end) xml_body = '\n'.join(xml_body_lines) body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body) return body
[ "def", "get_response_query_template", "(", "service", ",", "operation", ")", ":", "client", "=", "boto3", ".", "client", "(", "service", ")", "aws_operation_name", "=", "to_upper_camel_case", "(", "operation", ")", "op_model", "=", "client", ".", "_service_model",...
refers to definition of API in botocore, and autogenerates template Assume that response format is xml when protocol is query You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
[ "refers", "to", "definition", "of", "API", "in", "botocore", "and", "autogenerates", "template", "Assume", "that", "response", "format", "is", "xml", "when", "protocol", "is", "query" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/scripts/scaffold.py#L324-L382
236,033
spulec/moto
moto/core/utils.py
camelcase_to_underscores
def camelcase_to_underscores(argument): ''' Converts a camelcase param like theNewAttribute to the equivalent python underscore variable like the_new_attribute''' result = '' prev_char_title = True if not argument: return argument for index, char in enumerate(argument): try: next_char_title = argument[index + 1].istitle() except IndexError: next_char_title = True upper_to_lower = char.istitle() and not next_char_title lower_to_upper = char.istitle() and not prev_char_title if index and (upper_to_lower or lower_to_upper): # Only add underscore if char is capital, not first letter, and next # char is not capital result += "_" prev_char_title = char.istitle() if not char.isspace(): # Only add non-whitespace result += char.lower() return result
python
def camelcase_to_underscores(argument): ''' Converts a camelcase param like theNewAttribute to the equivalent python underscore variable like the_new_attribute''' result = '' prev_char_title = True if not argument: return argument for index, char in enumerate(argument): try: next_char_title = argument[index + 1].istitle() except IndexError: next_char_title = True upper_to_lower = char.istitle() and not next_char_title lower_to_upper = char.istitle() and not prev_char_title if index and (upper_to_lower or lower_to_upper): # Only add underscore if char is capital, not first letter, and next # char is not capital result += "_" prev_char_title = char.istitle() if not char.isspace(): # Only add non-whitespace result += char.lower() return result
[ "def", "camelcase_to_underscores", "(", "argument", ")", ":", "result", "=", "''", "prev_char_title", "=", "True", "if", "not", "argument", ":", "return", "argument", "for", "index", ",", "char", "in", "enumerate", "(", "argument", ")", ":", "try", ":", "n...
Converts a camelcase param like theNewAttribute to the equivalent python underscore variable like the_new_attribute
[ "Converts", "a", "camelcase", "param", "like", "theNewAttribute", "to", "the", "equivalent", "python", "underscore", "variable", "like", "the_new_attribute" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/utils.py#L17-L40
236,034
spulec/moto
moto/core/utils.py
underscores_to_camelcase
def underscores_to_camelcase(argument): ''' Converts a camelcase param like the_new_attribute to the equivalent camelcase version like theNewAttribute. Note that the first letter is NOT capitalized by this function ''' result = '' previous_was_underscore = False for char in argument: if char != '_': if previous_was_underscore: result += char.upper() else: result += char previous_was_underscore = char == '_' return result
python
def underscores_to_camelcase(argument): ''' Converts a camelcase param like the_new_attribute to the equivalent camelcase version like theNewAttribute. Note that the first letter is NOT capitalized by this function ''' result = '' previous_was_underscore = False for char in argument: if char != '_': if previous_was_underscore: result += char.upper() else: result += char previous_was_underscore = char == '_' return result
[ "def", "underscores_to_camelcase", "(", "argument", ")", ":", "result", "=", "''", "previous_was_underscore", "=", "False", "for", "char", "in", "argument", ":", "if", "char", "!=", "'_'", ":", "if", "previous_was_underscore", ":", "result", "+=", "char", ".",...
Converts a camelcase param like the_new_attribute to the equivalent camelcase version like theNewAttribute. Note that the first letter is NOT capitalized by this function
[ "Converts", "a", "camelcase", "param", "like", "the_new_attribute", "to", "the", "equivalent", "camelcase", "version", "like", "theNewAttribute", ".", "Note", "that", "the", "first", "letter", "is", "NOT", "capitalized", "by", "this", "function" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/utils.py#L43-L56
236,035
spulec/moto
moto/core/utils.py
convert_regex_to_flask_path
def convert_regex_to_flask_path(url_path): """ Converts a regex matching url to one that can be used with flask """ for token in ["$"]: url_path = url_path.replace(token, "") def caller(reg): match_name, match_pattern = reg.groups() return '<regex("{0}"):{1}>'.format(match_pattern, match_name) url_path = re.sub("\(\?P<(.*?)>(.*?)\)", caller, url_path) if url_path.endswith("/?"): # Flask does own handling of trailing slashes url_path = url_path.rstrip("/?") return url_path
python
def convert_regex_to_flask_path(url_path): for token in ["$"]: url_path = url_path.replace(token, "") def caller(reg): match_name, match_pattern = reg.groups() return '<regex("{0}"):{1}>'.format(match_pattern, match_name) url_path = re.sub("\(\?P<(.*?)>(.*?)\)", caller, url_path) if url_path.endswith("/?"): # Flask does own handling of trailing slashes url_path = url_path.rstrip("/?") return url_path
[ "def", "convert_regex_to_flask_path", "(", "url_path", ")", ":", "for", "token", "in", "[", "\"$\"", "]", ":", "url_path", "=", "url_path", ".", "replace", "(", "token", ",", "\"\"", ")", "def", "caller", "(", "reg", ")", ":", "match_name", ",", "match_p...
Converts a regex matching url to one that can be used with flask
[ "Converts", "a", "regex", "matching", "url", "to", "one", "that", "can", "be", "used", "with", "flask" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/utils.py#L80-L96
236,036
spulec/moto
moto/emr/responses.py
generate_boto3_response
def generate_boto3_response(operation): """The decorator to convert an XML response to JSON, if the request is determined to be from boto3. Pass the API action as a parameter. """ def _boto3_request(method): @wraps(method) def f(self, *args, **kwargs): rendered = method(self, *args, **kwargs) if 'json' in self.headers.get('Content-Type', []): self.response_headers.update( {'x-amzn-requestid': '2690d7eb-ed86-11dd-9877-6fad448a8419', 'date': datetime.now(pytz.utc).strftime('%a, %d %b %Y %H:%M:%S %Z'), 'content-type': 'application/x-amz-json-1.1'}) resp = xml_to_json_response( self.aws_service_spec, operation, rendered) return '' if resp is None else json.dumps(resp) return rendered return f return _boto3_request
python
def generate_boto3_response(operation): def _boto3_request(method): @wraps(method) def f(self, *args, **kwargs): rendered = method(self, *args, **kwargs) if 'json' in self.headers.get('Content-Type', []): self.response_headers.update( {'x-amzn-requestid': '2690d7eb-ed86-11dd-9877-6fad448a8419', 'date': datetime.now(pytz.utc).strftime('%a, %d %b %Y %H:%M:%S %Z'), 'content-type': 'application/x-amz-json-1.1'}) resp = xml_to_json_response( self.aws_service_spec, operation, rendered) return '' if resp is None else json.dumps(resp) return rendered return f return _boto3_request
[ "def", "generate_boto3_response", "(", "operation", ")", ":", "def", "_boto3_request", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rendered", "=", "method", ...
The decorator to convert an XML response to JSON, if the request is determined to be from boto3. Pass the API action as a parameter.
[ "The", "decorator", "to", "convert", "an", "XML", "response", "to", "JSON", "if", "the", "request", "is", "determined", "to", "be", "from", "boto3", ".", "Pass", "the", "API", "action", "as", "a", "parameter", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/emr/responses.py#L18-L37
236,037
spulec/moto
moto/ecr/models.py
ECRBackend.describe_repositories
def describe_repositories(self, registry_id=None, repository_names=None): """ maxResults and nextToken not implemented """ if repository_names: for repository_name in repository_names: if repository_name not in self.repositories: raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) repositories = [] for repository in self.repositories.values(): # If a registry_id was supplied, ensure this repository matches if registry_id: if repository.registry_id != registry_id: continue # If a list of repository names was supplied, esure this repository # is in that list if repository_names: if repository.name not in repository_names: continue repositories.append(repository.response_object) return repositories
python
def describe_repositories(self, registry_id=None, repository_names=None): if repository_names: for repository_name in repository_names: if repository_name not in self.repositories: raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) repositories = [] for repository in self.repositories.values(): # If a registry_id was supplied, ensure this repository matches if registry_id: if repository.registry_id != registry_id: continue # If a list of repository names was supplied, esure this repository # is in that list if repository_names: if repository.name not in repository_names: continue repositories.append(repository.response_object) return repositories
[ "def", "describe_repositories", "(", "self", ",", "registry_id", "=", "None", ",", "repository_names", "=", "None", ")", ":", "if", "repository_names", ":", "for", "repository_name", "in", "repository_names", ":", "if", "repository_name", "not", "in", "self", "....
maxResults and nextToken not implemented
[ "maxResults", "and", "nextToken", "not", "implemented" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ecr/models.py#L174-L195
236,038
spulec/moto
moto/ecr/models.py
ECRBackend.list_images
def list_images(self, repository_name, registry_id=None): """ maxResults and filtering not implemented """ repository = None found = False if repository_name in self.repositories: repository = self.repositories[repository_name] if registry_id: if repository.registry_id == registry_id: found = True else: found = True if not found: raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) images = [] for image in repository.images: images.append(image) return images
python
def list_images(self, repository_name, registry_id=None): repository = None found = False if repository_name in self.repositories: repository = self.repositories[repository_name] if registry_id: if repository.registry_id == registry_id: found = True else: found = True if not found: raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) images = [] for image in repository.images: images.append(image) return images
[ "def", "list_images", "(", "self", ",", "repository_name", ",", "registry_id", "=", "None", ")", ":", "repository", "=", "None", "found", "=", "False", "if", "repository_name", "in", "self", ".", "repositories", ":", "repository", "=", "self", ".", "reposito...
maxResults and filtering not implemented
[ "maxResults", "and", "filtering", "not", "implemented" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ecr/models.py#L208-L228
236,039
spulec/moto
moto/packages/httpretty/core.py
httprettified
def httprettified(test): "A decorator tests that use HTTPretty" def decorate_class(klass): for attr in dir(klass): if not attr.startswith('test_'): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue setattr(klass, attr, decorate_callable(attr_value)) return klass def decorate_callable(test): @functools.wraps(test) def wrapper(*args, **kw): httpretty.reset() httpretty.enable() try: return test(*args, **kw) finally: httpretty.disable() return wrapper if isinstance(test, ClassTypes): return decorate_class(test) return decorate_callable(test)
python
def httprettified(test): "A decorator tests that use HTTPretty" def decorate_class(klass): for attr in dir(klass): if not attr.startswith('test_'): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue setattr(klass, attr, decorate_callable(attr_value)) return klass def decorate_callable(test): @functools.wraps(test) def wrapper(*args, **kw): httpretty.reset() httpretty.enable() try: return test(*args, **kw) finally: httpretty.disable() return wrapper if isinstance(test, ClassTypes): return decorate_class(test) return decorate_callable(test)
[ "def", "httprettified", "(", "test", ")", ":", "def", "decorate_class", "(", "klass", ")", ":", "for", "attr", "in", "dir", "(", "klass", ")", ":", "if", "not", "attr", ".", "startswith", "(", "'test_'", ")", ":", "continue", "attr_value", "=", "getatt...
A decorator tests that use HTTPretty
[ "A", "decorator", "tests", "that", "use", "HTTPretty" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/packages/httpretty/core.py#L1089-L1116
236,040
spulec/moto
moto/packages/httpretty/core.py
URIMatcher.get_next_entry
def get_next_entry(self, method, info, request): """Cycle through available responses, but only once. Any subsequent requests will receive the last response""" if method not in self.current_entries: self.current_entries[method] = 0 # restrict selection to entries that match the requested method entries_for_method = [e for e in self.entries if e.method == method] if self.current_entries[method] >= len(entries_for_method): self.current_entries[method] = -1 if not self.entries or not entries_for_method: raise ValueError('I have no entries for method %s: %s' % (method, self)) entry = entries_for_method[self.current_entries[method]] if self.current_entries[method] != -1: self.current_entries[method] += 1 # Attach more info to the entry # So the callback can be more clever about what to do # This does also fix the case where the callback # would be handed a compiled regex as uri instead of the # real uri entry.info = info entry.request = request return entry
python
def get_next_entry(self, method, info, request): if method not in self.current_entries: self.current_entries[method] = 0 # restrict selection to entries that match the requested method entries_for_method = [e for e in self.entries if e.method == method] if self.current_entries[method] >= len(entries_for_method): self.current_entries[method] = -1 if not self.entries or not entries_for_method: raise ValueError('I have no entries for method %s: %s' % (method, self)) entry = entries_for_method[self.current_entries[method]] if self.current_entries[method] != -1: self.current_entries[method] += 1 # Attach more info to the entry # So the callback can be more clever about what to do # This does also fix the case where the callback # would be handed a compiled regex as uri instead of the # real uri entry.info = info entry.request = request return entry
[ "def", "get_next_entry", "(", "self", ",", "method", ",", "info", ",", "request", ")", ":", "if", "method", "not", "in", "self", ".", "current_entries", ":", "self", ".", "current_entries", "[", "method", "]", "=", "0", "# restrict selection to entries that ma...
Cycle through available responses, but only once. Any subsequent requests will receive the last response
[ "Cycle", "through", "available", "responses", "but", "only", "once", ".", "Any", "subsequent", "requests", "will", "receive", "the", "last", "response" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/packages/httpretty/core.py#L809-L837
236,041
spulec/moto
moto/xray/mock_client.py
mock_xray_client
def mock_xray_client(f): """ Mocks the X-Ray sdk by pwning its evil singleton with our methods The X-Ray SDK has normally been imported and `patched()` called long before we start mocking. This means the Context() will be very unhappy if an env var isnt present, so we set that, save the old context, then supply our new context. We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing that itno the recorder instance. """ @wraps(f) def _wrapped(*args, **kwargs): print("Starting X-Ray Patch") old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING') os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR' old_xray_context = aws_xray_sdk.core.xray_recorder._context old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter aws_xray_sdk.core.xray_recorder._context = AWSContext() aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() try: return f(*args, **kwargs) finally: if old_xray_context_var is None: del os.environ['AWS_XRAY_CONTEXT_MISSING'] else: os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter aws_xray_sdk.core.xray_recorder._context = old_xray_context return _wrapped
python
def mock_xray_client(f): @wraps(f) def _wrapped(*args, **kwargs): print("Starting X-Ray Patch") old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING') os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR' old_xray_context = aws_xray_sdk.core.xray_recorder._context old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter aws_xray_sdk.core.xray_recorder._context = AWSContext() aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() try: return f(*args, **kwargs) finally: if old_xray_context_var is None: del os.environ['AWS_XRAY_CONTEXT_MISSING'] else: os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter aws_xray_sdk.core.xray_recorder._context = old_xray_context return _wrapped
[ "def", "mock_xray_client", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "_wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "print", "(", "\"Starting X-Ray Patch\"", ")", "old_xray_context_var", "=", "os", ".", "environ", ".", "...
Mocks the X-Ray sdk by pwning its evil singleton with our methods The X-Ray SDK has normally been imported and `patched()` called long before we start mocking. This means the Context() will be very unhappy if an env var isnt present, so we set that, save the old context, then supply our new context. We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing that itno the recorder instance.
[ "Mocks", "the", "X", "-", "Ray", "sdk", "by", "pwning", "its", "evil", "singleton", "with", "our", "methods" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/xray/mock_client.py#L32-L65
236,042
spulec/moto
moto/iot/models.py
FakeCertificate.to_description_dict
def to_description_dict(self): """ You might need keys below in some situation - caCertificateId - previousOwnedBy """ return { 'certificateArn': self.arn, 'certificateId': self.certificate_id, 'status': self.status, 'certificatePem': self.certificate_pem, 'ownedBy': self.owner, 'creationDate': self.creation_date, 'lastModifiedDate': self.last_modified_date, 'transferData': self.transfer_data }
python
def to_description_dict(self): return { 'certificateArn': self.arn, 'certificateId': self.certificate_id, 'status': self.status, 'certificatePem': self.certificate_pem, 'ownedBy': self.owner, 'creationDate': self.creation_date, 'lastModifiedDate': self.last_modified_date, 'transferData': self.transfer_data }
[ "def", "to_description_dict", "(", "self", ")", ":", "return", "{", "'certificateArn'", ":", "self", ".", "arn", ",", "'certificateId'", ":", "self", ".", "certificate_id", ",", "'status'", ":", "self", ".", "status", ",", "'certificatePem'", ":", "self", "....
You might need keys below in some situation - caCertificateId - previousOwnedBy
[ "You", "might", "need", "keys", "below", "in", "some", "situation", "-", "caCertificateId", "-", "previousOwnedBy" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/iot/models.py#L122-L137
236,043
spulec/moto
moto/opsworks/models.py
OpsworkInstance.start
def start(self): """ create an ec2 reservation if one doesn't already exist and call start_instance. Update instance attributes to the newly created instance attributes """ if self.instance is None: reservation = self.ec2_backend.add_instances( image_id=self.ami_id, count=1, user_data="", security_group_names=[], security_group_ids=self.security_group_ids, instance_type=self.instance_type, key_name=self.ssh_keyname, ebs_optimized=self.ebs_optimized, subnet_id=self.subnet_id, associate_public_ip=self.associate_public_ip, ) self.instance = reservation.instances[0] self.reported_os = { 'Family': 'rhel (fixed)', 'Name': 'amazon (fixed)', 'Version': '2016.03 (fixed)' } self.platform = self.instance.platform self.security_group_ids = self.instance.security_groups self.architecture = self.instance.architecture self.virtualization_type = self.instance.virtualization_type self.subnet_id = self.instance.subnet_id self.root_device_type = self.instance.root_device_type self.ec2_backend.start_instances([self.instance.id])
python
def start(self): if self.instance is None: reservation = self.ec2_backend.add_instances( image_id=self.ami_id, count=1, user_data="", security_group_names=[], security_group_ids=self.security_group_ids, instance_type=self.instance_type, key_name=self.ssh_keyname, ebs_optimized=self.ebs_optimized, subnet_id=self.subnet_id, associate_public_ip=self.associate_public_ip, ) self.instance = reservation.instances[0] self.reported_os = { 'Family': 'rhel (fixed)', 'Name': 'amazon (fixed)', 'Version': '2016.03 (fixed)' } self.platform = self.instance.platform self.security_group_ids = self.instance.security_groups self.architecture = self.instance.architecture self.virtualization_type = self.instance.virtualization_type self.subnet_id = self.instance.subnet_id self.root_device_type = self.instance.root_device_type self.ec2_backend.start_instances([self.instance.id])
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "instance", "is", "None", ":", "reservation", "=", "self", ".", "ec2_backend", ".", "add_instances", "(", "image_id", "=", "self", ".", "ami_id", ",", "count", "=", "1", ",", "user_data", "=", ...
create an ec2 reservation if one doesn't already exist and call start_instance. Update instance attributes to the newly created instance attributes
[ "create", "an", "ec2", "reservation", "if", "one", "doesn", "t", "already", "exist", "and", "call", "start_instance", ".", "Update", "instance", "attributes", "to", "the", "newly", "created", "instance", "attributes" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/opsworks/models.py#L84-L116
236,044
spulec/moto
moto/core/responses.py
flatten_json_request_body
def flatten_json_request_body(prefix, dict_body, spec): """Convert a JSON request body into query params.""" if len(spec) == 1 and 'type' in spec: return {prefix: to_str(dict_body, spec)} flat = {} for key, value in dict_body.items(): node_type = spec[key]['type'] if node_type == 'list': for idx, v in enumerate(value, 1): pref = key + '.member.' + str(idx) flat.update(flatten_json_request_body( pref, v, spec[key]['member'])) elif node_type == 'map': for idx, (k, v) in enumerate(value.items(), 1): pref = key + '.entry.' + str(idx) flat.update(flatten_json_request_body( pref + '.key', k, spec[key]['key'])) flat.update(flatten_json_request_body( pref + '.value', v, spec[key]['value'])) else: flat.update(flatten_json_request_body(key, value, spec[key])) if prefix: prefix = prefix + '.' return dict((prefix + k, v) for k, v in flat.items())
python
def flatten_json_request_body(prefix, dict_body, spec): if len(spec) == 1 and 'type' in spec: return {prefix: to_str(dict_body, spec)} flat = {} for key, value in dict_body.items(): node_type = spec[key]['type'] if node_type == 'list': for idx, v in enumerate(value, 1): pref = key + '.member.' + str(idx) flat.update(flatten_json_request_body( pref, v, spec[key]['member'])) elif node_type == 'map': for idx, (k, v) in enumerate(value.items(), 1): pref = key + '.entry.' + str(idx) flat.update(flatten_json_request_body( pref + '.key', k, spec[key]['key'])) flat.update(flatten_json_request_body( pref + '.value', v, spec[key]['value'])) else: flat.update(flatten_json_request_body(key, value, spec[key])) if prefix: prefix = prefix + '.' return dict((prefix + k, v) for k, v in flat.items())
[ "def", "flatten_json_request_body", "(", "prefix", ",", "dict_body", ",", "spec", ")", ":", "if", "len", "(", "spec", ")", "==", "1", "and", "'type'", "in", "spec", ":", "return", "{", "prefix", ":", "to_str", "(", "dict_body", ",", "spec", ")", "}", ...
Convert a JSON request body into query params.
[ "Convert", "a", "JSON", "request", "body", "into", "query", "params", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/responses.py#L751-L776
236,045
spulec/moto
moto/core/responses.py
xml_to_json_response
def xml_to_json_response(service_spec, operation, xml, result_node=None): """Convert rendered XML response to JSON for use with boto3.""" def transform(value, spec): """Apply transformations to make the output JSON comply with the expected form. This function applies: (1) Type cast to nodes with "type" property (e.g., 'true' to True). XML field values are all in text so this step is necessary to convert it to valid JSON objects. (2) Squashes "member" nodes to lists. """ if len(spec) == 1: return from_str(value, spec) od = OrderedDict() for k, v in value.items(): if k.startswith('@'): continue if k not in spec: # this can happen when with an older version of # botocore for which the node in XML template is not # defined in service spec. log.warning( 'Field %s is not defined by the botocore version in use', k) continue if spec[k]['type'] == 'list': if v is None: od[k] = [] elif len(spec[k]['member']) == 1: if isinstance(v['member'], list): od[k] = transform(v['member'], spec[k]['member']) else: od[k] = [transform(v['member'], spec[k]['member'])] elif isinstance(v['member'], list): od[k] = [transform(o, spec[k]['member']) for o in v['member']] elif isinstance(v['member'], OrderedDict): od[k] = [transform(v['member'], spec[k]['member'])] else: raise ValueError('Malformatted input') elif spec[k]['type'] == 'map': if v is None: od[k] = {} else: items = ([v['entry']] if not isinstance(v['entry'], list) else v['entry']) for item in items: key = from_str(item['key'], spec[k]['key']) val = from_str(item['value'], spec[k]['value']) if k not in od: od[k] = {} od[k][key] = val else: if v is None: od[k] = None else: od[k] = transform(v, spec[k]) return od dic = xmltodict.parse(xml) output_spec = service_spec.output_spec(operation) try: for k in (result_node or (operation + 'Response', operation + 'Result')): dic = dic[k] except KeyError: return None else: return transform(dic, output_spec) return None
python
def xml_to_json_response(service_spec, operation, xml, result_node=None): def transform(value, spec): """Apply transformations to make the output JSON comply with the expected form. This function applies: (1) Type cast to nodes with "type" property (e.g., 'true' to True). XML field values are all in text so this step is necessary to convert it to valid JSON objects. (2) Squashes "member" nodes to lists. """ if len(spec) == 1: return from_str(value, spec) od = OrderedDict() for k, v in value.items(): if k.startswith('@'): continue if k not in spec: # this can happen when with an older version of # botocore for which the node in XML template is not # defined in service spec. log.warning( 'Field %s is not defined by the botocore version in use', k) continue if spec[k]['type'] == 'list': if v is None: od[k] = [] elif len(spec[k]['member']) == 1: if isinstance(v['member'], list): od[k] = transform(v['member'], spec[k]['member']) else: od[k] = [transform(v['member'], spec[k]['member'])] elif isinstance(v['member'], list): od[k] = [transform(o, spec[k]['member']) for o in v['member']] elif isinstance(v['member'], OrderedDict): od[k] = [transform(v['member'], spec[k]['member'])] else: raise ValueError('Malformatted input') elif spec[k]['type'] == 'map': if v is None: od[k] = {} else: items = ([v['entry']] if not isinstance(v['entry'], list) else v['entry']) for item in items: key = from_str(item['key'], spec[k]['key']) val = from_str(item['value'], spec[k]['value']) if k not in od: od[k] = {} od[k][key] = val else: if v is None: od[k] = None else: od[k] = transform(v, spec[k]) return od dic = xmltodict.parse(xml) output_spec = service_spec.output_spec(operation) try: for k in (result_node or (operation + 'Response', operation + 'Result')): dic = dic[k] except KeyError: return None else: return transform(dic, output_spec) return None
[ "def", "xml_to_json_response", "(", "service_spec", ",", "operation", ",", "xml", ",", "result_node", "=", "None", ")", ":", "def", "transform", "(", "value", ",", "spec", ")", ":", "\"\"\"Apply transformations to make the output JSON comply with the\n expected for...
Convert rendered XML response to JSON for use with boto3.
[ "Convert", "rendered", "XML", "response", "to", "JSON", "for", "use", "with", "boto3", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/responses.py#L779-L852
236,046
spulec/moto
moto/core/responses.py
BaseResponse.get_current_user
def get_current_user(self): """ Returns the access key id used in this request as the current user id """ if 'Authorization' in self.headers: match = self.access_key_regex.search(self.headers['Authorization']) if match: return match.group(1) if self.querystring.get('AWSAccessKeyId'): return self.querystring.get('AWSAccessKeyId') else: # Should we raise an unauthorized exception instead? return '111122223333'
python
def get_current_user(self): if 'Authorization' in self.headers: match = self.access_key_regex.search(self.headers['Authorization']) if match: return match.group(1) if self.querystring.get('AWSAccessKeyId'): return self.querystring.get('AWSAccessKeyId') else: # Should we raise an unauthorized exception instead? return '111122223333'
[ "def", "get_current_user", "(", "self", ")", ":", "if", "'Authorization'", "in", "self", ".", "headers", ":", "match", "=", "self", ".", "access_key_regex", ".", "search", "(", "self", ".", "headers", "[", "'Authorization'", "]", ")", "if", "match", ":", ...
Returns the access key id used in this request as the current user id
[ "Returns", "the", "access", "key", "id", "used", "in", "this", "request", "as", "the", "current", "user", "id" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/responses.py#L183-L196
236,047
spulec/moto
moto/kms/models.py
KmsBackend.delete_alias
def delete_alias(self, alias_name): """Delete the alias.""" for aliases in self.key_to_aliases.values(): if alias_name in aliases: aliases.remove(alias_name)
python
def delete_alias(self, alias_name): for aliases in self.key_to_aliases.values(): if alias_name in aliases: aliases.remove(alias_name)
[ "def", "delete_alias", "(", "self", ",", "alias_name", ")", ":", "for", "aliases", "in", "self", ".", "key_to_aliases", ".", "values", "(", ")", ":", "if", "alias_name", "in", "aliases", ":", "aliases", ".", "remove", "(", "alias_name", ")" ]
Delete the alias.
[ "Delete", "the", "alias", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/kms/models.py#L132-L136
236,048
spulec/moto
moto/dynamodb/models.py
DynamoType.compare
def compare(self, range_comparison, range_objs): """ Compares this type against comparison filters """ range_values = [obj.value for obj in range_objs] comparison_func = get_comparison_func(range_comparison) return comparison_func(self.value, *range_values)
python
def compare(self, range_comparison, range_objs): range_values = [obj.value for obj in range_objs] comparison_func = get_comparison_func(range_comparison) return comparison_func(self.value, *range_values)
[ "def", "compare", "(", "self", ",", "range_comparison", ",", "range_objs", ")", ":", "range_values", "=", "[", "obj", ".", "value", "for", "obj", "in", "range_objs", "]", "comparison_func", "=", "get_comparison_func", "(", "range_comparison", ")", "return", "c...
Compares this type against comparison filters
[ "Compares", "this", "type", "against", "comparison", "filters" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/dynamodb/models.py#L47-L53
236,049
spulec/moto
moto/cognitoidp/models.py
paginate
def paginate(limit, start_arg="next_token", limit_arg="max_results"): """Returns a limited result list, and an offset into list of remaining items Takes the next_token, and max_results kwargs given to a function and handles the slicing of the results. The kwarg `next_token` is the offset into the list to begin slicing from. `max_results` is the size of the result required If the max_results is not supplied then the `limit` parameter is used as a default :param limit_arg: the name of argument in the decorated function that controls amount of items returned :param start_arg: the name of the argument in the decorated that provides the starting offset :param limit: A default maximum items to return :return: a tuple containing a list of items, and the offset into the list """ default_start = 0 def outer_wrapper(func): @functools.wraps(func) def wrapper(*args, **kwargs): start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg]) lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg]) stop = start + lim result = func(*args, **kwargs) limited_results = list(itertools.islice(result, start, stop)) next_token = stop if stop < len(result) else None return limited_results, next_token return wrapper return outer_wrapper
python
def paginate(limit, start_arg="next_token", limit_arg="max_results"): default_start = 0 def outer_wrapper(func): @functools.wraps(func) def wrapper(*args, **kwargs): start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg]) lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg]) stop = start + lim result = func(*args, **kwargs) limited_results = list(itertools.islice(result, start, stop)) next_token = stop if stop < len(result) else None return limited_results, next_token return wrapper return outer_wrapper
[ "def", "paginate", "(", "limit", ",", "start_arg", "=", "\"next_token\"", ",", "limit_arg", "=", "\"max_results\"", ")", ":", "default_start", "=", "0", "def", "outer_wrapper", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def",...
Returns a limited result list, and an offset into list of remaining items Takes the next_token, and max_results kwargs given to a function and handles the slicing of the results. The kwarg `next_token` is the offset into the list to begin slicing from. `max_results` is the size of the result required If the max_results is not supplied then the `limit` parameter is used as a default :param limit_arg: the name of argument in the decorated function that controls amount of items returned :param start_arg: the name of the argument in the decorated that provides the starting offset :param limit: A default maximum items to return :return: a tuple containing a list of items, and the offset into the list
[ "Returns", "a", "limited", "result", "list", "and", "an", "offset", "into", "list", "of", "remaining", "items" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/cognitoidp/models.py#L24-L54
236,050
spulec/moto
moto/core/models.py
CallbackResponse._url_matches
def _url_matches(self, url, other, match_querystring=False): ''' Need to override this so we can fix querystrings breaking regex matching ''' if not match_querystring: other = other.split('?', 1)[0] if responses._is_string(url): if responses._has_unicode(url): url = responses._clean_unicode(url) if not isinstance(other, six.text_type): other = other.encode('ascii').decode('utf8') return self._url_matches_strict(url, other) elif isinstance(url, responses.Pattern) and url.match(other): return True else: return False
python
def _url_matches(self, url, other, match_querystring=False): ''' Need to override this so we can fix querystrings breaking regex matching ''' if not match_querystring: other = other.split('?', 1)[0] if responses._is_string(url): if responses._has_unicode(url): url = responses._clean_unicode(url) if not isinstance(other, six.text_type): other = other.encode('ascii').decode('utf8') return self._url_matches_strict(url, other) elif isinstance(url, responses.Pattern) and url.match(other): return True else: return False
[ "def", "_url_matches", "(", "self", ",", "url", ",", "other", ",", "match_querystring", "=", "False", ")", ":", "if", "not", "match_querystring", ":", "other", "=", "other", ".", "split", "(", "'?'", ",", "1", ")", "[", "0", "]", "if", "responses", "...
Need to override this so we can fix querystrings breaking regex matching
[ "Need", "to", "override", "this", "so", "we", "can", "fix", "querystrings", "breaking", "regex", "matching" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/models.py#L175-L191
236,051
spulec/moto
moto/core/models.py
BaseBackend.urls
def urls(self): """ A dictionary of the urls to be mocked with this service and the handlers that should be called in their place """ url_bases = self._url_module.url_bases unformatted_paths = self._url_module.url_paths urls = {} for url_base in url_bases: for url_path, handler in unformatted_paths.items(): url = url_path.format(url_base) urls[url] = handler return urls
python
def urls(self): url_bases = self._url_module.url_bases unformatted_paths = self._url_module.url_paths urls = {} for url_base in url_bases: for url_path, handler in unformatted_paths.items(): url = url_path.format(url_base) urls[url] = handler return urls
[ "def", "urls", "(", "self", ")", ":", "url_bases", "=", "self", ".", "_url_module", ".", "url_bases", "unformatted_paths", "=", "self", ".", "_url_module", ".", "url_paths", "urls", "=", "{", "}", "for", "url_base", "in", "url_bases", ":", "for", "url_path...
A dictionary of the urls to be mocked with this service and the handlers that should be called in their place
[ "A", "dictionary", "of", "the", "urls", "to", "be", "mocked", "with", "this", "service", "and", "the", "handlers", "that", "should", "be", "called", "in", "their", "place" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/models.py#L484-L498
236,052
spulec/moto
moto/core/models.py
BaseBackend.url_paths
def url_paths(self): """ A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place """ unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths
python
def url_paths(self): unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths
[ "def", "url_paths", "(", "self", ")", ":", "unformatted_paths", "=", "self", ".", "_url_module", ".", "url_paths", "paths", "=", "{", "}", "for", "unformatted_path", ",", "handler", "in", "unformatted_paths", ".", "items", "(", ")", ":", "path", "=", "unfo...
A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place
[ "A", "dictionary", "of", "the", "paths", "of", "the", "urls", "to", "be", "mocked", "with", "this", "service", "and", "the", "handlers", "that", "should", "be", "called", "in", "their", "place" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/models.py#L501-L513
236,053
spulec/moto
moto/core/models.py
BaseBackend.flask_paths
def flask_paths(self): """ The url paths that will be used for the flask server """ paths = {} for url_path, handler in self.url_paths.items(): url_path = convert_regex_to_flask_path(url_path) paths[url_path] = handler return paths
python
def flask_paths(self): paths = {} for url_path, handler in self.url_paths.items(): url_path = convert_regex_to_flask_path(url_path) paths[url_path] = handler return paths
[ "def", "flask_paths", "(", "self", ")", ":", "paths", "=", "{", "}", "for", "url_path", ",", "handler", "in", "self", ".", "url_paths", ".", "items", "(", ")", ":", "url_path", "=", "convert_regex_to_flask_path", "(", "url_path", ")", "paths", "[", "url_...
The url paths that will be used for the flask server
[ "The", "url", "paths", "that", "will", "be", "used", "for", "the", "flask", "server" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/models.py#L523-L532
236,054
spulec/moto
moto/cloudformation/utils.py
yaml_tag_constructor
def yaml_tag_constructor(loader, tag, node): """convert shorthand intrinsic function to full name """ def _f(loader, tag, node): if tag == '!GetAtt': return node.value.split('.') elif type(node) == yaml.SequenceNode: return loader.construct_sequence(node) else: return node.value if tag == '!Ref': key = 'Ref' else: key = 'Fn::{}'.format(tag[1:]) return {key: _f(loader, tag, node)}
python
def yaml_tag_constructor(loader, tag, node): def _f(loader, tag, node): if tag == '!GetAtt': return node.value.split('.') elif type(node) == yaml.SequenceNode: return loader.construct_sequence(node) else: return node.value if tag == '!Ref': key = 'Ref' else: key = 'Fn::{}'.format(tag[1:]) return {key: _f(loader, tag, node)}
[ "def", "yaml_tag_constructor", "(", "loader", ",", "tag", ",", "node", ")", ":", "def", "_f", "(", "loader", ",", "tag", ",", "node", ")", ":", "if", "tag", "==", "'!GetAtt'", ":", "return", "node", ".", "value", ".", "split", "(", "'.'", ")", "eli...
convert shorthand intrinsic function to full name
[ "convert", "shorthand", "intrinsic", "function", "to", "full", "name" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/cloudformation/utils.py#L36-L52
236,055
spulec/moto
moto/dynamodb/responses.py
DynamoHandler.get_endpoint_name
def get_endpoint_name(self, headers): """Parses request headers and extracts part od the X-Amz-Target that corresponds to a method of DynamoHandler ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables """ # Headers are case-insensitive. Probably a better way to do this. match = headers.get('x-amz-target') or headers.get('X-Amz-Target') if match: return match.split(".")[1]
python
def get_endpoint_name(self, headers): # Headers are case-insensitive. Probably a better way to do this. match = headers.get('x-amz-target') or headers.get('X-Amz-Target') if match: return match.split(".")[1]
[ "def", "get_endpoint_name", "(", "self", ",", "headers", ")", ":", "# Headers are case-insensitive. Probably a better way to do this.", "match", "=", "headers", ".", "get", "(", "'x-amz-target'", ")", "or", "headers", ".", "get", "(", "'X-Amz-Target'", ")", "if", "m...
Parses request headers and extracts part od the X-Amz-Target that corresponds to a method of DynamoHandler ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
[ "Parses", "request", "headers", "and", "extracts", "part", "od", "the", "X", "-", "Amz", "-", "Target", "that", "corresponds", "to", "a", "method", "of", "DynamoHandler" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/dynamodb/responses.py#L12-L21
236,056
spulec/moto
moto/ec2/models.py
InstanceBackend.get_reservations_by_instance_ids
def get_reservations_by_instance_ids(self, instance_ids, filters=None): """ Go through all of the reservations and filter to only return those associated with the given instance_ids. """ reservations = [] for reservation in self.all_reservations(): reservation_instance_ids = [ instance.id for instance in reservation.instances] matching_reservation = any( instance_id in reservation_instance_ids for instance_id in instance_ids) if matching_reservation: reservation.instances = [ instance for instance in reservation.instances if instance.id in instance_ids] reservations.append(reservation) found_instance_ids = [ instance.id for reservation in reservations for instance in reservation.instances] if len(found_instance_ids) != len(instance_ids): invalid_id = list(set(instance_ids).difference( set(found_instance_ids)))[0] raise InvalidInstanceIdError(invalid_id) if filters is not None: reservations = filter_reservations(reservations, filters) return reservations
python
def get_reservations_by_instance_ids(self, instance_ids, filters=None): reservations = [] for reservation in self.all_reservations(): reservation_instance_ids = [ instance.id for instance in reservation.instances] matching_reservation = any( instance_id in reservation_instance_ids for instance_id in instance_ids) if matching_reservation: reservation.instances = [ instance for instance in reservation.instances if instance.id in instance_ids] reservations.append(reservation) found_instance_ids = [ instance.id for reservation in reservations for instance in reservation.instances] if len(found_instance_ids) != len(instance_ids): invalid_id = list(set(instance_ids).difference( set(found_instance_ids)))[0] raise InvalidInstanceIdError(invalid_id) if filters is not None: reservations = filter_reservations(reservations, filters) return reservations
[ "def", "get_reservations_by_instance_ids", "(", "self", ",", "instance_ids", ",", "filters", "=", "None", ")", ":", "reservations", "=", "[", "]", "for", "reservation", "in", "self", ".", "all_reservations", "(", ")", ":", "reservation_instance_ids", "=", "[", ...
Go through all of the reservations and filter to only return those associated with the given instance_ids.
[ "Go", "through", "all", "of", "the", "reservations", "and", "filter", "to", "only", "return", "those", "associated", "with", "the", "given", "instance_ids", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ec2/models.py#L829-L851
236,057
spulec/moto
moto/ecs/models.py
EC2ContainerServiceBackend.list_task_definitions
def list_task_definitions(self): """ Filtering not implemented """ task_arns = [] for task_definition_list in self.task_definitions.values(): task_arns.extend( [task_definition.arn for task_definition in task_definition_list]) return task_arns
python
def list_task_definitions(self): task_arns = [] for task_definition_list in self.task_definitions.values(): task_arns.extend( [task_definition.arn for task_definition in task_definition_list]) return task_arns
[ "def", "list_task_definitions", "(", "self", ")", ":", "task_arns", "=", "[", "]", "for", "task_definition_list", "in", "self", ".", "task_definitions", ".", "values", "(", ")", ":", "task_arns", ".", "extend", "(", "[", "task_definition", ".", "arn", "for",...
Filtering not implemented
[ "Filtering", "not", "implemented" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ecs/models.py#L481-L489
236,058
spulec/moto
moto/iotdata/models.py
FakeShadow.create_from_previous_version
def create_from_previous_version(cls, previous_shadow, payload): """ set None to payload when you want to delete shadow """ version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {}) if payload is None: # if given payload is None, delete existing payload # this means the request was delete_thing_shadow shadow = FakeShadow(None, None, None, version, deleted=True) return shadow # we can make sure that payload has 'state' key desired = payload['state'].get( 'desired', previous_payload.get('state', {}).get('desired', None) ) reported = payload['state'].get( 'reported', previous_payload.get('state', {}).get('reported', None) ) shadow = FakeShadow(desired, reported, payload, version) return shadow
python
def create_from_previous_version(cls, previous_shadow, payload): version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {}) if payload is None: # if given payload is None, delete existing payload # this means the request was delete_thing_shadow shadow = FakeShadow(None, None, None, version, deleted=True) return shadow # we can make sure that payload has 'state' key desired = payload['state'].get( 'desired', previous_payload.get('state', {}).get('desired', None) ) reported = payload['state'].get( 'reported', previous_payload.get('state', {}).get('reported', None) ) shadow = FakeShadow(desired, reported, payload, version) return shadow
[ "def", "create_from_previous_version", "(", "cls", ",", "previous_shadow", ",", "payload", ")", ":", "version", ",", "previous_payload", "=", "(", "previous_shadow", ".", "version", "+", "1", ",", "previous_shadow", ".", "to_dict", "(", "include_delta", "=", "Fa...
set None to payload when you want to delete shadow
[ "set", "None", "to", "payload", "when", "you", "want", "to", "delete", "shadow" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/iotdata/models.py#L30-L52
236,059
spulec/moto
moto/iotdata/models.py
FakeShadow.to_dict
def to_dict(self, include_delta=True): """returning nothing except for just top-level keys for now. """ if self.deleted: return { 'timestamp': self.timestamp, 'version': self.version } delta = self.parse_payload(self.desired, self.reported) payload = {} if self.desired is not None: payload['desired'] = self.desired if self.reported is not None: payload['reported'] = self.reported if include_delta and (delta is not None and len(delta.keys()) != 0): payload['delta'] = delta metadata = {} if self.metadata_desired is not None: metadata['desired'] = self.metadata_desired if self.metadata_reported is not None: metadata['reported'] = self.metadata_reported return { 'state': payload, 'metadata': metadata, 'timestamp': self.timestamp, 'version': self.version }
python
def to_dict(self, include_delta=True): if self.deleted: return { 'timestamp': self.timestamp, 'version': self.version } delta = self.parse_payload(self.desired, self.reported) payload = {} if self.desired is not None: payload['desired'] = self.desired if self.reported is not None: payload['reported'] = self.reported if include_delta and (delta is not None and len(delta.keys()) != 0): payload['delta'] = delta metadata = {} if self.metadata_desired is not None: metadata['desired'] = self.metadata_desired if self.metadata_reported is not None: metadata['reported'] = self.metadata_reported return { 'state': payload, 'metadata': metadata, 'timestamp': self.timestamp, 'version': self.version }
[ "def", "to_dict", "(", "self", ",", "include_delta", "=", "True", ")", ":", "if", "self", ".", "deleted", ":", "return", "{", "'timestamp'", ":", "self", ".", "timestamp", ",", "'version'", ":", "self", ".", "version", "}", "delta", "=", "self", ".", ...
returning nothing except for just top-level keys for now.
[ "returning", "nothing", "except", "for", "just", "top", "-", "level", "keys", "for", "now", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/iotdata/models.py#L102-L130
236,060
spulec/moto
moto/iotdata/models.py
IoTDataPlaneBackend.delete_thing_shadow
def delete_thing_shadow(self, thing_name): """after deleting, get_thing_shadow will raise ResourceNotFound. But version of the shadow keep increasing... """ thing = iot_backends[self.region_name].describe_thing(thing_name) if thing.thing_shadow is None: raise ResourceNotFoundException() payload = None new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload) thing.thing_shadow = new_shadow return thing.thing_shadow
python
def delete_thing_shadow(self, thing_name): thing = iot_backends[self.region_name].describe_thing(thing_name) if thing.thing_shadow is None: raise ResourceNotFoundException() payload = None new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload) thing.thing_shadow = new_shadow return thing.thing_shadow
[ "def", "delete_thing_shadow", "(", "self", ",", "thing_name", ")", ":", "thing", "=", "iot_backends", "[", "self", ".", "region_name", "]", ".", "describe_thing", "(", "thing_name", ")", "if", "thing", ".", "thing_shadow", "is", "None", ":", "raise", "Resour...
after deleting, get_thing_shadow will raise ResourceNotFound. But version of the shadow keep increasing...
[ "after", "deleting", "get_thing_shadow", "will", "raise", "ResourceNotFound", ".", "But", "version", "of", "the", "shadow", "keep", "increasing", "..." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/iotdata/models.py#L175-L185
236,061
spulec/moto
moto/acm/models.py
AWSCertificateManagerBackend._get_arn_from_idempotency_token
def _get_arn_from_idempotency_token(self, token): """ If token doesnt exist, return None, later it will be set with an expiry and arn. If token expiry has passed, delete entry and return None Else return ARN :param token: String token :return: None or ARN """ now = datetime.datetime.now() if token in self._idempotency_tokens: if self._idempotency_tokens[token]['expires'] < now: # Token has expired, new request del self._idempotency_tokens[token] return None else: return self._idempotency_tokens[token]['arn'] return None
python
def _get_arn_from_idempotency_token(self, token): now = datetime.datetime.now() if token in self._idempotency_tokens: if self._idempotency_tokens[token]['expires'] < now: # Token has expired, new request del self._idempotency_tokens[token] return None else: return self._idempotency_tokens[token]['arn'] return None
[ "def", "_get_arn_from_idempotency_token", "(", "self", ",", "token", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "token", "in", "self", ".", "_idempotency_tokens", ":", "if", "self", ".", "_idempotency_tokens", "[", "token"...
If token doesnt exist, return None, later it will be set with an expiry and arn. If token expiry has passed, delete entry and return None Else return ARN :param token: String token :return: None or ARN
[ "If", "token", "doesnt", "exist", "return", "None", "later", "it", "will", "be", "set", "with", "an", "expiry", "and", "arn", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/acm/models.py#L287-L308
236,062
spulec/moto
moto/iam/models.py
IAMBackend._validate_tag_key
def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'): """Validates the tag key. :param all_tags: Dict to check if there is a duplicate tag. :param tag_key: The tag key to check against. :param exception_param: The exception parameter to send over to help format the message. This is to reflect the difference between the tag and untag APIs. :return: """ # Validate that the key length is correct: if len(tag_key) > 128: raise TagKeyTooBig(tag_key, param=exception_param) # Validate that the tag key fits the proper Regex: # [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+ match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key) # Kudos if you can come up with a better way of doing a global search :) if not len(match) or len(match[0]) < len(tag_key): raise InvalidTagCharacters(tag_key, param=exception_param)
python
def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'): # Validate that the key length is correct: if len(tag_key) > 128: raise TagKeyTooBig(tag_key, param=exception_param) # Validate that the tag key fits the proper Regex: # [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+ match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key) # Kudos if you can come up with a better way of doing a global search :) if not len(match) or len(match[0]) < len(tag_key): raise InvalidTagCharacters(tag_key, param=exception_param)
[ "def", "_validate_tag_key", "(", "self", ",", "tag_key", ",", "exception_param", "=", "'tags.X.member.key'", ")", ":", "# Validate that the key length is correct:", "if", "len", "(", "tag_key", ")", ">", "128", ":", "raise", "TagKeyTooBig", "(", "tag_key", ",", "p...
Validates the tag key. :param all_tags: Dict to check if there is a duplicate tag. :param tag_key: The tag key to check against. :param exception_param: The exception parameter to send over to help format the message. This is to reflect the difference between the tag and untag APIs. :return:
[ "Validates", "the", "tag", "key", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/iam/models.py#L639-L657
236,063
spulec/moto
moto/iam/models.py
IAMBackend.enable_mfa_device
def enable_mfa_device(self, user_name, serial_number, authentication_code_1, authentication_code_2): """Enable MFA Device for user.""" user = self.get_user(user_name) if serial_number in user.mfa_devices: raise IAMConflictException( "EntityAlreadyExists", "Device {0} already exists".format(serial_number) ) user.enable_mfa_device( serial_number, authentication_code_1, authentication_code_2 )
python
def enable_mfa_device(self, user_name, serial_number, authentication_code_1, authentication_code_2): user = self.get_user(user_name) if serial_number in user.mfa_devices: raise IAMConflictException( "EntityAlreadyExists", "Device {0} already exists".format(serial_number) ) user.enable_mfa_device( serial_number, authentication_code_1, authentication_code_2 )
[ "def", "enable_mfa_device", "(", "self", ",", "user_name", ",", "serial_number", ",", "authentication_code_1", ",", "authentication_code_2", ")", ":", "user", "=", "self", ".", "get_user", "(", "user_name", ")", "if", "serial_number", "in", "user", ".", "mfa_dev...
Enable MFA Device for user.
[ "Enable", "MFA", "Device", "for", "user", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/iam/models.py#L1066-L1083
236,064
spulec/moto
moto/iam/models.py
IAMBackend.deactivate_mfa_device
def deactivate_mfa_device(self, user_name, serial_number): """Deactivate and detach MFA Device from user if device exists.""" user = self.get_user(user_name) if serial_number not in user.mfa_devices: raise IAMNotFoundException( "Device {0} not found".format(serial_number) ) user.deactivate_mfa_device(serial_number)
python
def deactivate_mfa_device(self, user_name, serial_number): user = self.get_user(user_name) if serial_number not in user.mfa_devices: raise IAMNotFoundException( "Device {0} not found".format(serial_number) ) user.deactivate_mfa_device(serial_number)
[ "def", "deactivate_mfa_device", "(", "self", ",", "user_name", ",", "serial_number", ")", ":", "user", "=", "self", ".", "get_user", "(", "user_name", ")", "if", "serial_number", "not", "in", "user", ".", "mfa_devices", ":", "raise", "IAMNotFoundException", "(...
Deactivate and detach MFA Device from user if device exists.
[ "Deactivate", "and", "detach", "MFA", "Device", "from", "user", "if", "device", "exists", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/iam/models.py#L1085-L1093
236,065
spulec/moto
moto/route53/models.py
RecordSet.delete
def delete(self, *args, **kwargs): ''' Not exposed as part of the Route 53 API - used for CloudFormation. args are ignored ''' hosted_zone = route53_backend.get_hosted_zone_by_name( self.hosted_zone_name) if not hosted_zone: hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id) hosted_zone.delete_rrset_by_name(self.name)
python
def delete(self, *args, **kwargs): ''' Not exposed as part of the Route 53 API - used for CloudFormation. args are ignored ''' hosted_zone = route53_backend.get_hosted_zone_by_name( self.hosted_zone_name) if not hosted_zone: hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id) hosted_zone.delete_rrset_by_name(self.name)
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "hosted_zone", "=", "route53_backend", ".", "get_hosted_zone_by_name", "(", "self", ".", "hosted_zone_name", ")", "if", "not", "hosted_zone", ":", "hosted_zone", "=", "route53...
Not exposed as part of the Route 53 API - used for CloudFormation. args are ignored
[ "Not", "exposed", "as", "part", "of", "the", "Route", "53", "API", "-", "used", "for", "CloudFormation", ".", "args", "are", "ignored" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/route53/models.py#L159-L165
236,066
spulec/moto
moto/packages/httpretty/http.py
last_requestline
def last_requestline(sent_data): """ Find the last line in sent_data that can be parsed with parse_requestline """ for line in reversed(sent_data): try: parse_requestline(decode_utf8(line)) except ValueError: pass else: return line
python
def last_requestline(sent_data): for line in reversed(sent_data): try: parse_requestline(decode_utf8(line)) except ValueError: pass else: return line
[ "def", "last_requestline", "(", "sent_data", ")", ":", "for", "line", "in", "reversed", "(", "sent_data", ")", ":", "try", ":", "parse_requestline", "(", "decode_utf8", "(", "line", ")", ")", "except", "ValueError", ":", "pass", "else", ":", "return", "lin...
Find the last line in sent_data that can be parsed with parse_requestline
[ "Find", "the", "last", "line", "in", "sent_data", "that", "can", "be", "parsed", "with", "parse_requestline" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/packages/httpretty/http.py#L144-L154
236,067
spulec/moto
moto/sqs/models.py
Message.attribute_md5
def attribute_md5(self): """ The MD5 of all attributes is calculated by first generating a utf-8 string from each attribute and MD5-ing the concatenation of them all. Each attribute is encoded with some bytes that describe the length of each part and the type of attribute. Not yet implemented: List types (https://github.com/aws/aws-sdk-java/blob/7844c64cf248aed889811bf2e871ad6b276a89ca/aws-java-sdk-sqs/src/main/java/com/amazonaws/services/sqs/MessageMD5ChecksumHandler.java#L58k) """ def utf8(str): if isinstance(str, six.string_types): return str.encode('utf-8') return str md5 = hashlib.md5() struct_format = "!I".encode('ascii') # ensure it's a bytestring for name in sorted(self.message_attributes.keys()): attr = self.message_attributes[name] data_type = attr['data_type'] encoded = utf8('') # Each part of each attribute is encoded right after it's # own length is packed into a 4-byte integer # 'timestamp' -> b'\x00\x00\x00\t' encoded += struct.pack(struct_format, len(utf8(name))) + utf8(name) # The datatype is additionally given a final byte # representing which type it is encoded += struct.pack(struct_format, len(data_type)) + utf8(data_type) encoded += TRANSPORT_TYPE_ENCODINGS[data_type] if data_type == 'String' or data_type == 'Number': value = attr['string_value'] elif data_type == 'Binary': print(data_type, attr['binary_value'], type(attr['binary_value'])) value = base64.b64decode(attr['binary_value']) else: print("Moto hasn't implemented MD5 hashing for {} attributes".format(data_type)) # The following should be enough of a clue to users that # they are not, in fact, looking at a correct MD5 while # also following the character and length constraints of # MD5 so as not to break client softwre return('deadbeefdeadbeefdeadbeefdeadbeef') encoded += struct.pack(struct_format, len(utf8(value))) + utf8(value) md5.update(encoded) return md5.hexdigest()
python
def attribute_md5(self): def utf8(str): if isinstance(str, six.string_types): return str.encode('utf-8') return str md5 = hashlib.md5() struct_format = "!I".encode('ascii') # ensure it's a bytestring for name in sorted(self.message_attributes.keys()): attr = self.message_attributes[name] data_type = attr['data_type'] encoded = utf8('') # Each part of each attribute is encoded right after it's # own length is packed into a 4-byte integer # 'timestamp' -> b'\x00\x00\x00\t' encoded += struct.pack(struct_format, len(utf8(name))) + utf8(name) # The datatype is additionally given a final byte # representing which type it is encoded += struct.pack(struct_format, len(data_type)) + utf8(data_type) encoded += TRANSPORT_TYPE_ENCODINGS[data_type] if data_type == 'String' or data_type == 'Number': value = attr['string_value'] elif data_type == 'Binary': print(data_type, attr['binary_value'], type(attr['binary_value'])) value = base64.b64decode(attr['binary_value']) else: print("Moto hasn't implemented MD5 hashing for {} attributes".format(data_type)) # The following should be enough of a clue to users that # they are not, in fact, looking at a correct MD5 while # also following the character and length constraints of # MD5 so as not to break client softwre return('deadbeefdeadbeefdeadbeefdeadbeef') encoded += struct.pack(struct_format, len(utf8(value))) + utf8(value) md5.update(encoded) return md5.hexdigest()
[ "def", "attribute_md5", "(", "self", ")", ":", "def", "utf8", "(", "str", ")", ":", "if", "isinstance", "(", "str", ",", "six", ".", "string_types", ")", ":", "return", "str", ".", "encode", "(", "'utf-8'", ")", "return", "str", "md5", "=", "hashlib"...
The MD5 of all attributes is calculated by first generating a utf-8 string from each attribute and MD5-ing the concatenation of them all. Each attribute is encoded with some bytes that describe the length of each part and the type of attribute. Not yet implemented: List types (https://github.com/aws/aws-sdk-java/blob/7844c64cf248aed889811bf2e871ad6b276a89ca/aws-java-sdk-sqs/src/main/java/com/amazonaws/services/sqs/MessageMD5ChecksumHandler.java#L58k)
[ "The", "MD5", "of", "all", "attributes", "is", "calculated", "by", "first", "generating", "a", "utf", "-", "8", "string", "from", "each", "attribute", "and", "MD5", "-", "ing", "the", "concatenation", "of", "them", "all", ".", "Each", "attribute", "is", ...
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/sqs/models.py#L54-L100
236,068
spulec/moto
moto/sqs/models.py
Message.mark_received
def mark_received(self, visibility_timeout=None): """ When a message is received we will set the first receive timestamp, tap the ``approximate_receive_count`` and the ``visible_at`` time. """ if visibility_timeout: visibility_timeout = int(visibility_timeout) else: visibility_timeout = 0 if not self.approximate_first_receive_timestamp: self.approximate_first_receive_timestamp = int(unix_time_millis()) self.approximate_receive_count += 1 # Make message visible again in the future unless its # destroyed. if visibility_timeout: self.change_visibility(visibility_timeout) self.receipt_handle = generate_receipt_handle()
python
def mark_received(self, visibility_timeout=None): if visibility_timeout: visibility_timeout = int(visibility_timeout) else: visibility_timeout = 0 if not self.approximate_first_receive_timestamp: self.approximate_first_receive_timestamp = int(unix_time_millis()) self.approximate_receive_count += 1 # Make message visible again in the future unless its # destroyed. if visibility_timeout: self.change_visibility(visibility_timeout) self.receipt_handle = generate_receipt_handle()
[ "def", "mark_received", "(", "self", ",", "visibility_timeout", "=", "None", ")", ":", "if", "visibility_timeout", ":", "visibility_timeout", "=", "int", "(", "visibility_timeout", ")", "else", ":", "visibility_timeout", "=", "0", "if", "not", "self", ".", "ap...
When a message is received we will set the first receive timestamp, tap the ``approximate_receive_count`` and the ``visible_at`` time.
[ "When", "a", "message", "is", "received", "we", "will", "set", "the", "first", "receive", "timestamp", "tap", "the", "approximate_receive_count", "and", "the", "visible_at", "time", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/sqs/models.py#L111-L131
236,069
spulec/moto
moto/sqs/models.py
SQSBackend.receive_messages
def receive_messages(self, queue_name, count, wait_seconds_timeout, visibility_timeout): """ Attempt to retrieve visible messages from a queue. If a message was read by client and not deleted it is considered to be "inflight" and cannot be read. We make attempts to obtain ``count`` messages but we may return less if messages are in-flight or there are simple not enough messages in the queue. :param string queue_name: The name of the queue to read from. :param int count: The maximum amount of messages to retrieve. :param int visibility_timeout: The number of seconds the message should remain invisible to other queue readers. :param int wait_seconds_timeout: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds """ queue = self.get_queue(queue_name) result = [] previous_result_count = len(result) polling_end = unix_time() + wait_seconds_timeout # queue.messages only contains visible messages while True: if result or (wait_seconds_timeout and unix_time() > polling_end): break messages_to_dlq = [] for message in queue.messages: if not message.visible: continue if message in queue.pending_messages: # The message is pending but is visible again, so the # consumer must have timed out. queue.pending_messages.remove(message) if message.group_id and queue.fifo_queue: if message.group_id in queue.pending_message_groups: # There is already one active message with the same # group, so we cannot deliver this one. continue queue.pending_messages.add(message) if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']: messages_to_dlq.append(message) continue message.mark_received( visibility_timeout=visibility_timeout ) result.append(message) if len(result) >= count: break for message in messages_to_dlq: queue._messages.remove(message) queue.dead_letter_queue.add_message(message) if previous_result_count == len(result): if wait_seconds_timeout == 0: # There is timeout and we have added no additional results, # so break to avoid an infinite loop. break import time time.sleep(0.01) continue previous_result_count = len(result) return result
python
def receive_messages(self, queue_name, count, wait_seconds_timeout, visibility_timeout): queue = self.get_queue(queue_name) result = [] previous_result_count = len(result) polling_end = unix_time() + wait_seconds_timeout # queue.messages only contains visible messages while True: if result or (wait_seconds_timeout and unix_time() > polling_end): break messages_to_dlq = [] for message in queue.messages: if not message.visible: continue if message in queue.pending_messages: # The message is pending but is visible again, so the # consumer must have timed out. queue.pending_messages.remove(message) if message.group_id and queue.fifo_queue: if message.group_id in queue.pending_message_groups: # There is already one active message with the same # group, so we cannot deliver this one. continue queue.pending_messages.add(message) if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']: messages_to_dlq.append(message) continue message.mark_received( visibility_timeout=visibility_timeout ) result.append(message) if len(result) >= count: break for message in messages_to_dlq: queue._messages.remove(message) queue.dead_letter_queue.add_message(message) if previous_result_count == len(result): if wait_seconds_timeout == 0: # There is timeout and we have added no additional results, # so break to avoid an infinite loop. break import time time.sleep(0.01) continue previous_result_count = len(result) return result
[ "def", "receive_messages", "(", "self", ",", "queue_name", ",", "count", ",", "wait_seconds_timeout", ",", "visibility_timeout", ")", ":", "queue", "=", "self", ".", "get_queue", "(", "queue_name", ")", "result", "=", "[", "]", "previous_result_count", "=", "l...
Attempt to retrieve visible messages from a queue. If a message was read by client and not deleted it is considered to be "inflight" and cannot be read. We make attempts to obtain ``count`` messages but we may return less if messages are in-flight or there are simple not enough messages in the queue. :param string queue_name: The name of the queue to read from. :param int count: The maximum amount of messages to retrieve. :param int visibility_timeout: The number of seconds the message should remain invisible to other queue readers. :param int wait_seconds_timeout: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds
[ "Attempt", "to", "retrieve", "visible", "messages", "from", "a", "queue", "." ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/sqs/models.py#L469-L542
236,070
spulec/moto
moto/dynamodb2/models.py
DynamoDBBackend.get_table_keys_name
def get_table_keys_name(self, table_name, keys): """ Given a set of keys, extracts the key and range key """ table = self.tables.get(table_name) if not table: return None, None else: if len(keys) == 1: for key in keys: if key in table.hash_key_names: return key, None # for potential_hash, potential_range in zip(table.hash_key_names, table.range_key_names): # if set([potential_hash, potential_range]) == set(keys): # return potential_hash, potential_range potential_hash, potential_range = None, None for key in set(keys): if key in table.hash_key_names: potential_hash = key elif key in table.range_key_names: potential_range = key return potential_hash, potential_range
python
def get_table_keys_name(self, table_name, keys): table = self.tables.get(table_name) if not table: return None, None else: if len(keys) == 1: for key in keys: if key in table.hash_key_names: return key, None # for potential_hash, potential_range in zip(table.hash_key_names, table.range_key_names): # if set([potential_hash, potential_range]) == set(keys): # return potential_hash, potential_range potential_hash, potential_range = None, None for key in set(keys): if key in table.hash_key_names: potential_hash = key elif key in table.range_key_names: potential_range = key return potential_hash, potential_range
[ "def", "get_table_keys_name", "(", "self", ",", "table_name", ",", "keys", ")", ":", "table", "=", "self", ".", "tables", ".", "get", "(", "table_name", ")", "if", "not", "table", ":", "return", "None", ",", "None", "else", ":", "if", "len", "(", "ke...
Given a set of keys, extracts the key and range key
[ "Given", "a", "set", "of", "keys", "extracts", "the", "key", "and", "range", "key" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/dynamodb2/models.py#L834-L855
236,071
spulec/moto
moto/instance_metadata/responses.py
InstanceMetadataResponse.metadata_response
def metadata_response(self, request, full_url, headers): """ Mock response for localhost metadata http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html """ parsed_url = urlparse(full_url) tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1) credentials = dict( AccessKeyId="test-key", SecretAccessKey="test-secret-key", Token="test-session-token", Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ") ) path = parsed_url.path meta_data_prefix = "/latest/meta-data/" # Strip prefix if it is there if path.startswith(meta_data_prefix): path = path[len(meta_data_prefix):] if path == '': result = 'iam' elif path == 'iam': result = json.dumps({ 'security-credentials': { 'default-role': credentials } }) elif path == 'iam/security-credentials/': result = 'default-role' elif path == 'iam/security-credentials/default-role': result = json.dumps(credentials) else: raise NotImplementedError( "The {0} metadata path has not been implemented".format(path)) return 200, headers, result
python
def metadata_response(self, request, full_url, headers): parsed_url = urlparse(full_url) tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1) credentials = dict( AccessKeyId="test-key", SecretAccessKey="test-secret-key", Token="test-session-token", Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ") ) path = parsed_url.path meta_data_prefix = "/latest/meta-data/" # Strip prefix if it is there if path.startswith(meta_data_prefix): path = path[len(meta_data_prefix):] if path == '': result = 'iam' elif path == 'iam': result = json.dumps({ 'security-credentials': { 'default-role': credentials } }) elif path == 'iam/security-credentials/': result = 'default-role' elif path == 'iam/security-credentials/default-role': result = json.dumps(credentials) else: raise NotImplementedError( "The {0} metadata path has not been implemented".format(path)) return 200, headers, result
[ "def", "metadata_response", "(", "self", ",", "request", ",", "full_url", ",", "headers", ")", ":", "parsed_url", "=", "urlparse", "(", "full_url", ")", "tomorrow", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "+", "datetime", ".", "timedelta...
Mock response for localhost metadata http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html
[ "Mock", "response", "for", "localhost", "metadata" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/instance_metadata/responses.py#L11-L49
236,072
spulec/moto
moto/cloudwatch/models.py
CloudWatchBackend._list_element_starts_with
def _list_element_starts_with(items, needle): """True of any of the list elements starts with needle""" for item in items: if item.startswith(needle): return True return False
python
def _list_element_starts_with(items, needle): for item in items: if item.startswith(needle): return True return False
[ "def", "_list_element_starts_with", "(", "items", ",", "needle", ")", ":", "for", "item", "in", "items", ":", "if", "item", ".", "startswith", "(", "needle", ")", ":", "return", "True", "return", "False" ]
True of any of the list elements starts with needle
[ "True", "of", "any", "of", "the", "list", "elements", "starts", "with", "needle" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/cloudwatch/models.py#L193-L198
236,073
spulec/moto
moto/batch/models.py
BatchBackend._validate_compute_resources
def _validate_compute_resources(self, cr): """ Checks contents of sub dictionary for managed clusters :param cr: computeResources :type cr: dict """ for param in ('instanceRole', 'maxvCpus', 'minvCpus', 'instanceTypes', 'securityGroupIds', 'subnets', 'type'): if param not in cr: raise InvalidParameterValueException('computeResources must contain {0}'.format(param)) if self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: raise InvalidParameterValueException('could not find instanceRole {0}'.format(cr['instanceRole'])) if cr['maxvCpus'] < 0: raise InvalidParameterValueException('maxVCpus must be positive') if cr['minvCpus'] < 0: raise InvalidParameterValueException('minVCpus must be positive') if cr['maxvCpus'] < cr['minvCpus']: raise InvalidParameterValueException('maxVCpus must be greater than minvCpus') if len(cr['instanceTypes']) == 0: raise InvalidParameterValueException('At least 1 instance type must be provided') for instance_type in cr['instanceTypes']: if instance_type == 'optimal': pass # Optimal should pick from latest of current gen elif instance_type not in EC2_INSTANCE_TYPES: raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type)) for sec_id in cr['securityGroupIds']: if self.ec2_backend.get_security_group_from_id(sec_id) is None: raise InvalidParameterValueException('security group {0} does not exist'.format(sec_id)) if len(cr['securityGroupIds']) == 0: raise InvalidParameterValueException('At least 1 security group must be provided') for subnet_id in cr['subnets']: try: self.ec2_backend.get_subnet(subnet_id) except InvalidSubnetIdError: raise InvalidParameterValueException('subnet {0} does not exist'.format(subnet_id)) if len(cr['subnets']) == 0: raise InvalidParameterValueException('At least 1 subnet must be provided') if cr['type'] not in ('EC2', 'SPOT'): raise InvalidParameterValueException('computeResources.type must be either EC2 | SPOT') if cr['type'] == 'SPOT': raise InternalFailure('SPOT NOT SUPPORTED YET')
python
def _validate_compute_resources(self, cr): for param in ('instanceRole', 'maxvCpus', 'minvCpus', 'instanceTypes', 'securityGroupIds', 'subnets', 'type'): if param not in cr: raise InvalidParameterValueException('computeResources must contain {0}'.format(param)) if self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: raise InvalidParameterValueException('could not find instanceRole {0}'.format(cr['instanceRole'])) if cr['maxvCpus'] < 0: raise InvalidParameterValueException('maxVCpus must be positive') if cr['minvCpus'] < 0: raise InvalidParameterValueException('minVCpus must be positive') if cr['maxvCpus'] < cr['minvCpus']: raise InvalidParameterValueException('maxVCpus must be greater than minvCpus') if len(cr['instanceTypes']) == 0: raise InvalidParameterValueException('At least 1 instance type must be provided') for instance_type in cr['instanceTypes']: if instance_type == 'optimal': pass # Optimal should pick from latest of current gen elif instance_type not in EC2_INSTANCE_TYPES: raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type)) for sec_id in cr['securityGroupIds']: if self.ec2_backend.get_security_group_from_id(sec_id) is None: raise InvalidParameterValueException('security group {0} does not exist'.format(sec_id)) if len(cr['securityGroupIds']) == 0: raise InvalidParameterValueException('At least 1 security group must be provided') for subnet_id in cr['subnets']: try: self.ec2_backend.get_subnet(subnet_id) except InvalidSubnetIdError: raise InvalidParameterValueException('subnet {0} does not exist'.format(subnet_id)) if len(cr['subnets']) == 0: raise InvalidParameterValueException('At least 1 subnet must be provided') if cr['type'] not in ('EC2', 'SPOT'): raise InvalidParameterValueException('computeResources.type must be either EC2 | SPOT') if cr['type'] == 'SPOT': raise InternalFailure('SPOT NOT SUPPORTED YET')
[ "def", "_validate_compute_resources", "(", "self", ",", "cr", ")", ":", "for", "param", "in", "(", "'instanceRole'", ",", "'maxvCpus'", ",", "'minvCpus'", ",", "'instanceTypes'", ",", "'securityGroupIds'", ",", "'subnets'", ",", "'type'", ")", ":", "if", "para...
Checks contents of sub dictionary for managed clusters :param cr: computeResources :type cr: dict
[ "Checks", "contents", "of", "sub", "dictionary", "for", "managed", "clusters" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/batch/models.py#L669-L716
236,074
spulec/moto
moto/batch/models.py
BatchBackend.find_min_instances_to_meet_vcpus
def find_min_instances_to_meet_vcpus(instance_types, target): """ Finds the minimum needed instances to meed a vcpu target :param instance_types: Instance types, like ['t2.medium', 't2.small'] :type instance_types: list of str :param target: VCPU target :type target: float :return: List of instance types :rtype: list of str """ # vcpus = [ (vcpus, instance_type), (vcpus, instance_type), ... ] instance_vcpus = [] instances = [] for instance_type in instance_types: if instance_type == 'optimal': instance_type = 'm4.4xlarge' instance_vcpus.append( (EC2_INSTANCE_TYPES[instance_type]['vcpus'], instance_type) ) instance_vcpus = sorted(instance_vcpus, key=lambda item: item[0], reverse=True) # Loop through, # if biggest instance type smaller than target, and len(instance_types)> 1, then use biggest type # if biggest instance type bigger than target, and len(instance_types)> 1, then remove it and move on # if biggest instance type bigger than target and len(instan_types) == 1 then add instance and finish # if biggest instance type smaller than target and len(instan_types) == 1 then loop adding instances until target == 0 # ^^ boils down to keep adding last till target vcpus is negative # #Algorithm ;-) ... Could probably be done better with some quality lambdas while target > 0: current_vcpu, current_instance = instance_vcpus[0] if len(instance_vcpus) > 1: if current_vcpu <= target: target -= current_vcpu instances.append(current_instance) else: # try next biggest instance instance_vcpus.pop(0) else: # Were on the last instance target -= current_vcpu instances.append(current_instance) return instances
python
def find_min_instances_to_meet_vcpus(instance_types, target): # vcpus = [ (vcpus, instance_type), (vcpus, instance_type), ... ] instance_vcpus = [] instances = [] for instance_type in instance_types: if instance_type == 'optimal': instance_type = 'm4.4xlarge' instance_vcpus.append( (EC2_INSTANCE_TYPES[instance_type]['vcpus'], instance_type) ) instance_vcpus = sorted(instance_vcpus, key=lambda item: item[0], reverse=True) # Loop through, # if biggest instance type smaller than target, and len(instance_types)> 1, then use biggest type # if biggest instance type bigger than target, and len(instance_types)> 1, then remove it and move on # if biggest instance type bigger than target and len(instan_types) == 1 then add instance and finish # if biggest instance type smaller than target and len(instan_types) == 1 then loop adding instances until target == 0 # ^^ boils down to keep adding last till target vcpus is negative # #Algorithm ;-) ... Could probably be done better with some quality lambdas while target > 0: current_vcpu, current_instance = instance_vcpus[0] if len(instance_vcpus) > 1: if current_vcpu <= target: target -= current_vcpu instances.append(current_instance) else: # try next biggest instance instance_vcpus.pop(0) else: # Were on the last instance target -= current_vcpu instances.append(current_instance) return instances
[ "def", "find_min_instances_to_meet_vcpus", "(", "instance_types", ",", "target", ")", ":", "# vcpus = [ (vcpus, instance_type), (vcpus, instance_type), ... ]", "instance_vcpus", "=", "[", "]", "instances", "=", "[", "]", "for", "instance_type", "in", "instance_types", ":", ...
Finds the minimum needed instances to meed a vcpu target :param instance_types: Instance types, like ['t2.medium', 't2.small'] :type instance_types: list of str :param target: VCPU target :type target: float :return: List of instance types :rtype: list of str
[ "Finds", "the", "minimum", "needed", "instances", "to", "meed", "a", "vcpu", "target" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/batch/models.py#L719-L766
236,075
spulec/moto
moto/batch/models.py
BatchBackend.create_job_queue
def create_job_queue(self, queue_name, priority, state, compute_env_order): """ Create a job queue :param queue_name: Queue name :type queue_name: str :param priority: Queue priority :type priority: int :param state: Queue state :type state: string :param compute_env_order: Compute environment list :type compute_env_order: list of dict :return: Tuple of Name, ARN :rtype: tuple of str """ for variable, var_name in ((queue_name, 'jobQueueName'), (priority, 'priority'), (state, 'state'), (compute_env_order, 'computeEnvironmentOrder')): if variable is None: raise ClientException('{0} must be provided'.format(var_name)) if state not in ('ENABLED', 'DISABLED'): raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state)) if self.get_job_queue_by_name(queue_name) is not None: raise ClientException('Job queue {0} already exists'.format(queue_name)) if len(compute_env_order) == 0: raise ClientException('At least 1 compute environment must be provided') try: # orders and extracts computeEnvironment names ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])] env_objects = [] # Check each ARN exists, then make a list of compute env's for arn in ordered_compute_environments: env = self.get_compute_environment_by_arn(arn) if env is None: raise ClientException('Compute environment {0} does not exist'.format(arn)) env_objects.append(env) except Exception: raise ClientException('computeEnvironmentOrder is malformed') # Create new Job Queue queue = JobQueue(queue_name, priority, state, env_objects, compute_env_order, self.region_name) self._job_queues[queue.arn] = queue return queue_name, queue.arn
python
def create_job_queue(self, queue_name, priority, state, compute_env_order): for variable, var_name in ((queue_name, 'jobQueueName'), (priority, 'priority'), (state, 'state'), (compute_env_order, 'computeEnvironmentOrder')): if variable is None: raise ClientException('{0} must be provided'.format(var_name)) if state not in ('ENABLED', 'DISABLED'): raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state)) if self.get_job_queue_by_name(queue_name) is not None: raise ClientException('Job queue {0} already exists'.format(queue_name)) if len(compute_env_order) == 0: raise ClientException('At least 1 compute environment must be provided') try: # orders and extracts computeEnvironment names ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])] env_objects = [] # Check each ARN exists, then make a list of compute env's for arn in ordered_compute_environments: env = self.get_compute_environment_by_arn(arn) if env is None: raise ClientException('Compute environment {0} does not exist'.format(arn)) env_objects.append(env) except Exception: raise ClientException('computeEnvironmentOrder is malformed') # Create new Job Queue queue = JobQueue(queue_name, priority, state, env_objects, compute_env_order, self.region_name) self._job_queues[queue.arn] = queue return queue_name, queue.arn
[ "def", "create_job_queue", "(", "self", ",", "queue_name", ",", "priority", ",", "state", ",", "compute_env_order", ")", ":", "for", "variable", ",", "var_name", "in", "(", "(", "queue_name", ",", "'jobQueueName'", ")", ",", "(", "priority", ",", "'priority'...
Create a job queue :param queue_name: Queue name :type queue_name: str :param priority: Queue priority :type priority: int :param state: Queue state :type state: string :param compute_env_order: Compute environment list :type compute_env_order: list of dict :return: Tuple of Name, ARN :rtype: tuple of str
[ "Create", "a", "job", "queue" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/batch/models.py#L814-L857
236,076
spulec/moto
moto/batch/models.py
BatchBackend.update_job_queue
def update_job_queue(self, queue_name, priority, state, compute_env_order): """ Update a job queue :param queue_name: Queue name :type queue_name: str :param priority: Queue priority :type priority: int :param state: Queue state :type state: string :param compute_env_order: Compute environment list :type compute_env_order: list of dict :return: Tuple of Name, ARN :rtype: tuple of str """ if queue_name is None: raise ClientException('jobQueueName must be provided') job_queue = self.get_job_queue(queue_name) if job_queue is None: raise ClientException('Job queue {0} does not exist'.format(queue_name)) if state is not None: if state not in ('ENABLED', 'DISABLED'): raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state)) job_queue.state = state if compute_env_order is not None: if len(compute_env_order) == 0: raise ClientException('At least 1 compute environment must be provided') try: # orders and extracts computeEnvironment names ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])] env_objects = [] # Check each ARN exists, then make a list of compute env's for arn in ordered_compute_environments: env = self.get_compute_environment_by_arn(arn) if env is None: raise ClientException('Compute environment {0} does not exist'.format(arn)) env_objects.append(env) except Exception: raise ClientException('computeEnvironmentOrder is malformed') job_queue.env_order_json = compute_env_order job_queue.environments = env_objects if priority is not None: job_queue.priority = priority return queue_name, job_queue.arn
python
def update_job_queue(self, queue_name, priority, state, compute_env_order): if queue_name is None: raise ClientException('jobQueueName must be provided') job_queue = self.get_job_queue(queue_name) if job_queue is None: raise ClientException('Job queue {0} does not exist'.format(queue_name)) if state is not None: if state not in ('ENABLED', 'DISABLED'): raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state)) job_queue.state = state if compute_env_order is not None: if len(compute_env_order) == 0: raise ClientException('At least 1 compute environment must be provided') try: # orders and extracts computeEnvironment names ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])] env_objects = [] # Check each ARN exists, then make a list of compute env's for arn in ordered_compute_environments: env = self.get_compute_environment_by_arn(arn) if env is None: raise ClientException('Compute environment {0} does not exist'.format(arn)) env_objects.append(env) except Exception: raise ClientException('computeEnvironmentOrder is malformed') job_queue.env_order_json = compute_env_order job_queue.environments = env_objects if priority is not None: job_queue.priority = priority return queue_name, job_queue.arn
[ "def", "update_job_queue", "(", "self", ",", "queue_name", ",", "priority", ",", "state", ",", "compute_env_order", ")", ":", "if", "queue_name", "is", "None", ":", "raise", "ClientException", "(", "'jobQueueName must be provided'", ")", "job_queue", "=", "self", ...
Update a job queue :param queue_name: Queue name :type queue_name: str :param priority: Queue priority :type priority: int :param state: Queue state :type state: string :param compute_env_order: Compute environment list :type compute_env_order: list of dict :return: Tuple of Name, ARN :rtype: tuple of str
[ "Update", "a", "job", "queue" ]
4a286c4bc288933bb023396e2784a6fdbb966bc9
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/batch/models.py#L874-L924
236,077
chrismattmann/tika-python
tika/tika.py
toFilename
def toFilename(url): ''' gets url and returns filename ''' urlp = urlparse(url) path = urlp.path if not path: path = "file_{}".format(int(time.time())) value = re.sub(r'[^\w\s\.\-]', '-', path).strip().lower() return re.sub(r'[-\s]+', '-', value).strip("-")[-200:]
python
def toFilename(url): ''' gets url and returns filename ''' urlp = urlparse(url) path = urlp.path if not path: path = "file_{}".format(int(time.time())) value = re.sub(r'[^\w\s\.\-]', '-', path).strip().lower() return re.sub(r'[-\s]+', '-', value).strip("-")[-200:]
[ "def", "toFilename", "(", "url", ")", ":", "urlp", "=", "urlparse", "(", "url", ")", "path", "=", "urlp", ".", "path", "if", "not", "path", ":", "path", "=", "\"file_{}\"", ".", "format", "(", "int", "(", "time", ".", "time", "(", ")", ")", ")", ...
gets url and returns filename
[ "gets", "url", "and", "returns", "filename" ]
ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a
https://github.com/chrismattmann/tika-python/blob/ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a/tika/tika.py#L672-L681
236,078
chrismattmann/tika-python
tika/tika.py
main
def main(argv=None): """Run Tika from command line according to USAGE.""" global Verbose global EncodeUtf8 global csvOutput if argv is None: argv = sys.argv if (len(argv) < 3 and not (('-h' in argv) or ('--help' in argv))): log.exception('Bad args') raise TikaException('Bad args') try: opts, argv = getopt.getopt(argv[1:], 'hi:s:o:p:v:e:c', ['help', 'install=', 'server=', 'output=', 'port=', 'verbose', 'encode', 'csv']) except getopt.GetoptError as opt_error: msg, bad_opt = opt_error log.exception("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg)) raise TikaException("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg)) tikaServerJar = TikaServerJar serverHost = ServerHost outDir = '.' port = Port for opt, val in opts: if opt in ('-h', '--help'): echo2(USAGE); sys.exit() elif opt in ('--install'): tikaServerJar = val elif opt in ('--server'): serverHost = val elif opt in ('-o', '--output'): outDir = val elif opt in ('--port'): port = val elif opt in ('-v', '--verbose'): Verbose = 1 elif opt in ('-e', '--encode'): EncodeUtf8 = 1 elif opt in ('-c', '--csv'): csvOutput = 1 else: raise TikaException(USAGE) cmd = argv[0] option = argv[1] try: paths = argv[2:] except: paths = None return runCommand(cmd, option, paths, port, outDir, serverHost=serverHost, tikaServerJar=tikaServerJar, verbose=Verbose, encode=EncodeUtf8)
python
def main(argv=None): global Verbose global EncodeUtf8 global csvOutput if argv is None: argv = sys.argv if (len(argv) < 3 and not (('-h' in argv) or ('--help' in argv))): log.exception('Bad args') raise TikaException('Bad args') try: opts, argv = getopt.getopt(argv[1:], 'hi:s:o:p:v:e:c', ['help', 'install=', 'server=', 'output=', 'port=', 'verbose', 'encode', 'csv']) except getopt.GetoptError as opt_error: msg, bad_opt = opt_error log.exception("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg)) raise TikaException("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg)) tikaServerJar = TikaServerJar serverHost = ServerHost outDir = '.' port = Port for opt, val in opts: if opt in ('-h', '--help'): echo2(USAGE); sys.exit() elif opt in ('--install'): tikaServerJar = val elif opt in ('--server'): serverHost = val elif opt in ('-o', '--output'): outDir = val elif opt in ('--port'): port = val elif opt in ('-v', '--verbose'): Verbose = 1 elif opt in ('-e', '--encode'): EncodeUtf8 = 1 elif opt in ('-c', '--csv'): csvOutput = 1 else: raise TikaException(USAGE) cmd = argv[0] option = argv[1] try: paths = argv[2:] except: paths = None return runCommand(cmd, option, paths, port, outDir, serverHost=serverHost, tikaServerJar=tikaServerJar, verbose=Verbose, encode=EncodeUtf8)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "global", "Verbose", "global", "EncodeUtf8", "global", "csvOutput", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "if", "(", "len", "(", "argv", ")", "<", "3", "and", "not", "("...
Run Tika from command line according to USAGE.
[ "Run", "Tika", "from", "command", "line", "according", "to", "USAGE", "." ]
ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a
https://github.com/chrismattmann/tika-python/blob/ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a/tika/tika.py#L771-L812
236,079
ahupp/python-magic
magic.py
from_file
def from_file(filename, mime=False): """" Accepts a filename and returns the detected filetype. Return value is the mimetype if mime=True, otherwise a human readable name. >>> magic.from_file("testdata/test.pdf", mime=True) 'application/pdf' """ m = _get_magic_type(mime) return m.from_file(filename)
python
def from_file(filename, mime=False): "m = _get_magic_type(mime) return m.from_file(filename)
[ "def", "from_file", "(", "filename", ",", "mime", "=", "False", ")", ":", "m", "=", "_get_magic_type", "(", "mime", ")", "return", "m", ".", "from_file", "(", "filename", ")" ]
Accepts a filename and returns the detected filetype. Return value is the mimetype if mime=True, otherwise a human readable name. >>> magic.from_file("testdata/test.pdf", mime=True) 'application/pdf'
[ "Accepts", "a", "filename", "and", "returns", "the", "detected", "filetype", ".", "Return", "value", "is", "the", "mimetype", "if", "mime", "=", "True", "otherwise", "a", "human", "readable", "name", "." ]
c5b386b08bfbc01330e2ba836d97749d242429dc
https://github.com/ahupp/python-magic/blob/c5b386b08bfbc01330e2ba836d97749d242429dc/magic.py#L133-L143
236,080
ahupp/python-magic
magic.py
from_buffer
def from_buffer(buffer, mime=False): """ Accepts a binary string and returns the detected filetype. Return value is the mimetype if mime=True, otherwise a human readable name. >>> magic.from_buffer(open("testdata/test.pdf").read(1024)) 'PDF document, version 1.2' """ m = _get_magic_type(mime) return m.from_buffer(buffer)
python
def from_buffer(buffer, mime=False): m = _get_magic_type(mime) return m.from_buffer(buffer)
[ "def", "from_buffer", "(", "buffer", ",", "mime", "=", "False", ")", ":", "m", "=", "_get_magic_type", "(", "mime", ")", "return", "m", ".", "from_buffer", "(", "buffer", ")" ]
Accepts a binary string and returns the detected filetype. Return value is the mimetype if mime=True, otherwise a human readable name. >>> magic.from_buffer(open("testdata/test.pdf").read(1024)) 'PDF document, version 1.2'
[ "Accepts", "a", "binary", "string", "and", "returns", "the", "detected", "filetype", ".", "Return", "value", "is", "the", "mimetype", "if", "mime", "=", "True", "otherwise", "a", "human", "readable", "name", "." ]
c5b386b08bfbc01330e2ba836d97749d242429dc
https://github.com/ahupp/python-magic/blob/c5b386b08bfbc01330e2ba836d97749d242429dc/magic.py#L146-L156
236,081
yhat/ggpy
ggplot/colors/palettes.py
desaturate
def desaturate(color, prop): """Decrease the saturation channel of a color by some percent. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name prop : float saturation channel of color will be multiplied by this value Returns ------- new_color : rgb tuple desaturated color code in RGB tuple representation """ # Check inputs if not 0 <= prop <= 1: raise ValueError("prop must be between 0 and 1") # Get rgb tuple rep rgb = mplcol.colorConverter.to_rgb(color) # Convert to hls h, l, s = colorsys.rgb_to_hls(*rgb) # Desaturate the saturation channel s *= prop # Convert back to rgb new_color = colorsys.hls_to_rgb(h, l, s) return new_color
python
def desaturate(color, prop): # Check inputs if not 0 <= prop <= 1: raise ValueError("prop must be between 0 and 1") # Get rgb tuple rep rgb = mplcol.colorConverter.to_rgb(color) # Convert to hls h, l, s = colorsys.rgb_to_hls(*rgb) # Desaturate the saturation channel s *= prop # Convert back to rgb new_color = colorsys.hls_to_rgb(h, l, s) return new_color
[ "def", "desaturate", "(", "color", ",", "prop", ")", ":", "# Check inputs", "if", "not", "0", "<=", "prop", "<=", "1", ":", "raise", "ValueError", "(", "\"prop must be between 0 and 1\"", ")", "# Get rgb tuple rep", "rgb", "=", "mplcol", ".", "colorConverter", ...
Decrease the saturation channel of a color by some percent. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name prop : float saturation channel of color will be multiplied by this value Returns ------- new_color : rgb tuple desaturated color code in RGB tuple representation
[ "Decrease", "the", "saturation", "channel", "of", "a", "color", "by", "some", "percent", "." ]
b6d23c22d52557b983da8ce7a3a6992501dadcd6
https://github.com/yhat/ggpy/blob/b6d23c22d52557b983da8ce7a3a6992501dadcd6/ggplot/colors/palettes.py#L17-L49
236,082
yhat/ggpy
ggplot/colors/palettes.py
color_palette
def color_palette(name=None, n_colors=6, desat=None): """Return a list of colors defining a color palette. Availible seaborn palette names: deep, muted, bright, pastel, dark, colorblind Other options: hls, husl, any matplotlib palette Matplotlib paletes can be specified as reversed palettes by appending "_r" to the name or as dark palettes by appending "_d" to the name. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. Parameters ---------- name: None, string, or sequence Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int Number of colors in the palette. If larger than the number of colors in the palette, they will cycle. desat : float Value to desaturate each color by. Returns ------- palette : list of RGB tuples. Color palette. Examples -------- >>> p = color_palette("muted") >>> p = color_palette("Blues_d", 10) >>> p = color_palette("Set1", desat=.7) >>> import matplotlib.pyplot as plt >>> with color_palette("husl", 8): ... f, ax = plt.subplots() ... ax.plot(x, y) # doctest: +SKIP See Also -------- set_palette : set the default color cycle for all plots. axes_style : define parameters to set the style of plots plotting_context : define parameters to scale plot elements """ seaborn_palettes = dict( deep=["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"], muted=["#4878CF", "#6ACC65", "#D65F5F", "#B47CC7", "#C4AD66", "#77BEDB"], pastel=["#92C6FF", "#97F0AA", "#FF9F9A", "#D0BBFF", "#FFFEA3", "#B0E0E6"], bright=["#003FFF", "#03ED3A", "#E8000B", "#8A2BE2", "#FFC400", "#00D7FF"], dark=["#001C7F", "#017517", "#8C0900", "#7600A1", "#B8860B", "#006374"], colorblind=["#0072B2", "#009E73", "#D55E00", "#CC79A7", "#F0E442", "#56B4E9"], ) if name is None: palette = mpl.rcParams["axes.color_cycle"] elif not isinstance(name, string_types): palette = name elif name == "hls": palette = hls_palette(n_colors) elif name == "husl": palette = husl_palette(n_colors) elif name in seaborn_palettes: palette = seaborn_palettes[name] elif name in dir(mpl.cm): palette = mpl_palette(name, n_colors) elif name[:-2] in dir(mpl.cm): palette = mpl_palette(name, n_colors) else: raise ValueError("%s is not a valid palette name" % name) if desat is not None: palette = [desaturate(c, desat) for c in palette] # Always return as many colors as we asked for pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors)] # Always return in r, g, b tuple format try: palette = map(mpl.colors.colorConverter.to_rgb, palette) palette = _ColorPalette(palette) except ValueError: raise ValueError("Could not generate a palette for %s" % str(name)) return palette
python
def color_palette(name=None, n_colors=6, desat=None): seaborn_palettes = dict( deep=["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"], muted=["#4878CF", "#6ACC65", "#D65F5F", "#B47CC7", "#C4AD66", "#77BEDB"], pastel=["#92C6FF", "#97F0AA", "#FF9F9A", "#D0BBFF", "#FFFEA3", "#B0E0E6"], bright=["#003FFF", "#03ED3A", "#E8000B", "#8A2BE2", "#FFC400", "#00D7FF"], dark=["#001C7F", "#017517", "#8C0900", "#7600A1", "#B8860B", "#006374"], colorblind=["#0072B2", "#009E73", "#D55E00", "#CC79A7", "#F0E442", "#56B4E9"], ) if name is None: palette = mpl.rcParams["axes.color_cycle"] elif not isinstance(name, string_types): palette = name elif name == "hls": palette = hls_palette(n_colors) elif name == "husl": palette = husl_palette(n_colors) elif name in seaborn_palettes: palette = seaborn_palettes[name] elif name in dir(mpl.cm): palette = mpl_palette(name, n_colors) elif name[:-2] in dir(mpl.cm): palette = mpl_palette(name, n_colors) else: raise ValueError("%s is not a valid palette name" % name) if desat is not None: palette = [desaturate(c, desat) for c in palette] # Always return as many colors as we asked for pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors)] # Always return in r, g, b tuple format try: palette = map(mpl.colors.colorConverter.to_rgb, palette) palette = _ColorPalette(palette) except ValueError: raise ValueError("Could not generate a palette for %s" % str(name)) return palette
[ "def", "color_palette", "(", "name", "=", "None", ",", "n_colors", "=", "6", ",", "desat", "=", "None", ")", ":", "seaborn_palettes", "=", "dict", "(", "deep", "=", "[", "\"#4C72B0\"", ",", "\"#55A868\"", ",", "\"#C44E52\"", ",", "\"#8172B2\"", ",", "\"#...
Return a list of colors defining a color palette. Availible seaborn palette names: deep, muted, bright, pastel, dark, colorblind Other options: hls, husl, any matplotlib palette Matplotlib paletes can be specified as reversed palettes by appending "_r" to the name or as dark palettes by appending "_d" to the name. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. Parameters ---------- name: None, string, or sequence Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int Number of colors in the palette. If larger than the number of colors in the palette, they will cycle. desat : float Value to desaturate each color by. Returns ------- palette : list of RGB tuples. Color palette. Examples -------- >>> p = color_palette("muted") >>> p = color_palette("Blues_d", 10) >>> p = color_palette("Set1", desat=.7) >>> import matplotlib.pyplot as plt >>> with color_palette("husl", 8): ... f, ax = plt.subplots() ... ax.plot(x, y) # doctest: +SKIP See Also -------- set_palette : set the default color cycle for all plots. axes_style : define parameters to set the style of plots plotting_context : define parameters to scale plot elements
[ "Return", "a", "list", "of", "colors", "defining", "a", "color", "palette", "." ]
b6d23c22d52557b983da8ce7a3a6992501dadcd6
https://github.com/yhat/ggpy/blob/b6d23c22d52557b983da8ce7a3a6992501dadcd6/ggplot/colors/palettes.py#L67-L165
236,083
yhat/ggpy
ggplot/colors/palettes.py
mpl_palette
def mpl_palette(name, n_colors=6): """Return discrete colors from a matplotlib palette. Note that this handles the qualitative colorbrewer palettes properly, although if you ask for more colors than a particular qualitative palette can provide you will fewer than you are expecting. Parameters ---------- name : string name of the palette n_colors : int number of colors in the palette Returns ------- palette : list of tuples palette colors in r, g, b format """ brewer_qual_pals = {"Accent": 8, "Dark2": 8, "Paired": 12, "Pastel1": 9, "Pastel2": 8, "Set1": 9, "Set2": 8, "Set3": 12} if name.endswith("_d"): pal = ["#333333"] pal.extend(color_palette(name.replace("_d", "_r"), 2)) cmap = blend_palette(pal, n_colors, as_cmap=True) else: cmap = getattr(mpl.cm, name) if name in brewer_qual_pals: bins = np.linspace(0, 1, brewer_qual_pals[name])[:n_colors] else: bins = np.linspace(0, 1, n_colors + 2)[1:-1] palette = list(map(tuple, cmap(bins)[:, :3])) return palette
python
def mpl_palette(name, n_colors=6): brewer_qual_pals = {"Accent": 8, "Dark2": 8, "Paired": 12, "Pastel1": 9, "Pastel2": 8, "Set1": 9, "Set2": 8, "Set3": 12} if name.endswith("_d"): pal = ["#333333"] pal.extend(color_palette(name.replace("_d", "_r"), 2)) cmap = blend_palette(pal, n_colors, as_cmap=True) else: cmap = getattr(mpl.cm, name) if name in brewer_qual_pals: bins = np.linspace(0, 1, brewer_qual_pals[name])[:n_colors] else: bins = np.linspace(0, 1, n_colors + 2)[1:-1] palette = list(map(tuple, cmap(bins)[:, :3])) return palette
[ "def", "mpl_palette", "(", "name", ",", "n_colors", "=", "6", ")", ":", "brewer_qual_pals", "=", "{", "\"Accent\"", ":", "8", ",", "\"Dark2\"", ":", "8", ",", "\"Paired\"", ":", "12", ",", "\"Pastel1\"", ":", "9", ",", "\"Pastel2\"", ":", "8", ",", "...
Return discrete colors from a matplotlib palette. Note that this handles the qualitative colorbrewer palettes properly, although if you ask for more colors than a particular qualitative palette can provide you will fewer than you are expecting. Parameters ---------- name : string name of the palette n_colors : int number of colors in the palette Returns ------- palette : list of tuples palette colors in r, g, b format
[ "Return", "discrete", "colors", "from", "a", "matplotlib", "palette", "." ]
b6d23c22d52557b983da8ce7a3a6992501dadcd6
https://github.com/yhat/ggpy/blob/b6d23c22d52557b983da8ce7a3a6992501dadcd6/ggplot/colors/palettes.py#L232-L269
236,084
yhat/ggpy
ggplot/colors/palettes.py
dark_palette
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False): """Make a palette that blends from a deep gray to `color`. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional if True, return as a matplotlib colormap instead of list Returns ------- palette : list or colormap """ gray = "#222222" colors = [color, gray] if reverse else [gray, color] return blend_palette(colors, n_colors, as_cmap)
python
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False): gray = "#222222" colors = [color, gray] if reverse else [gray, color] return blend_palette(colors, n_colors, as_cmap)
[ "def", "dark_palette", "(", "color", ",", "n_colors", "=", "6", ",", "reverse", "=", "False", ",", "as_cmap", "=", "False", ")", ":", "gray", "=", "\"#222222\"", "colors", "=", "[", "color", ",", "gray", "]", "if", "reverse", "else", "[", "gray", ","...
Make a palette that blends from a deep gray to `color`. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional if True, return as a matplotlib colormap instead of list Returns ------- palette : list or colormap
[ "Make", "a", "palette", "that", "blends", "from", "a", "deep", "gray", "to", "color", "." ]
b6d23c22d52557b983da8ce7a3a6992501dadcd6
https://github.com/yhat/ggpy/blob/b6d23c22d52557b983da8ce7a3a6992501dadcd6/ggplot/colors/palettes.py#L272-L293
236,085
yhat/ggpy
ggplot/colors/palettes.py
blend_palette
def blend_palette(colors, n_colors=6, as_cmap=False): """Make a palette that blends between a list of colors. Parameters ---------- colors : sequence of matplotlib colors hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette as_cmap : bool, optional if True, return as a matplotlib colormap instead of list Returns ------- palette : list or colormap """ name = "-".join(map(str, colors)) pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors) if not as_cmap: pal = pal(np.linspace(0, 1, n_colors)) return pal
python
def blend_palette(colors, n_colors=6, as_cmap=False): name = "-".join(map(str, colors)) pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors) if not as_cmap: pal = pal(np.linspace(0, 1, n_colors)) return pal
[ "def", "blend_palette", "(", "colors", ",", "n_colors", "=", "6", ",", "as_cmap", "=", "False", ")", ":", "name", "=", "\"-\"", ".", "join", "(", "map", "(", "str", ",", "colors", ")", ")", "pal", "=", "mpl", ".", "colors", ".", "LinearSegmentedColor...
Make a palette that blends between a list of colors. Parameters ---------- colors : sequence of matplotlib colors hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette as_cmap : bool, optional if True, return as a matplotlib colormap instead of list Returns ------- palette : list or colormap
[ "Make", "a", "palette", "that", "blends", "between", "a", "list", "of", "colors", "." ]
b6d23c22d52557b983da8ce7a3a6992501dadcd6
https://github.com/yhat/ggpy/blob/b6d23c22d52557b983da8ce7a3a6992501dadcd6/ggplot/colors/palettes.py#L296-L317
236,086
yhat/ggpy
ggplot/colors/palettes.py
xkcd_palette
def xkcd_palette(colors): """Make a palette with color names from the xkcd color survey. This is just a simple wrapper around the seaborn.xkcd_rbg dictionary. See xkcd for the full list of colors: http://xkcd.com/color/rgb/ """ palette = [xkcd_rgb[name] for name in colors] return color_palette(palette, len(palette))
python
def xkcd_palette(colors): palette = [xkcd_rgb[name] for name in colors] return color_palette(palette, len(palette))
[ "def", "xkcd_palette", "(", "colors", ")", ":", "palette", "=", "[", "xkcd_rgb", "[", "name", "]", "for", "name", "in", "colors", "]", "return", "color_palette", "(", "palette", ",", "len", "(", "palette", ")", ")" ]
Make a palette with color names from the xkcd color survey. This is just a simple wrapper around the seaborn.xkcd_rbg dictionary. See xkcd for the full list of colors: http://xkcd.com/color/rgb/
[ "Make", "a", "palette", "with", "color", "names", "from", "the", "xkcd", "color", "survey", "." ]
b6d23c22d52557b983da8ce7a3a6992501dadcd6
https://github.com/yhat/ggpy/blob/b6d23c22d52557b983da8ce7a3a6992501dadcd6/ggplot/colors/palettes.py#L320-L329
236,087
yhat/ggpy
ggplot/colors/palettes.py
cubehelix_palette
def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8, light=.85, dark=.15, reverse=False, as_cmap=False): """Make a sequential palette from the cubehelix system. This produces a colormap with linearly-decreasing (or increasing) brightness. That means that information will be preserved if printed to black and white or viewed by someone who is colorblind. "cubehelix" is also availible as a matplotlib-based palette, but this function gives the user more control over the look of the palette and has a different set of defaults. Parameters ---------- n_colors : int Number of colors in the palette. start : float, 0 <= start <= 3 The hue at the start of the helix. rot : float Rotations around the hue wheel over the range of the palette. gamma : float 0 <= gamma Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1) colors. hue : float, 0 <= hue <= 1 Saturation of the colors. dark : float 0 <= dark <= 1 Intensity of the darkest color in the palette. light : float 0 <= light <= 1 Intensity of the lightest color in the palette. reverse : bool If True, the palette will go from dark to light. as_cmap : bool If True, return a matplotlib colormap instead of a list of colors. Returns ------- palette : list or colormap References ---------- Green, D. A. (2011). "A colour scheme for the display of astronomical intensity images". Bulletin of the Astromical Society of India, Vol. 39, p. 289-295. """ cdict = mpl._cm.cubehelix(gamma, start, rot, hue) cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict) x = np.linspace(light, dark, n_colors) pal = cmap(x)[:, :3].tolist() if reverse: pal = pal[::-1] if as_cmap: x_256 = np.linspace(light, dark, 256) if reverse: x_256 = x_256[::-1] pal_256 = cmap(x_256) cmap = mpl.colors.ListedColormap(pal_256) return cmap else: return pal
python
def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8, light=.85, dark=.15, reverse=False, as_cmap=False): cdict = mpl._cm.cubehelix(gamma, start, rot, hue) cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict) x = np.linspace(light, dark, n_colors) pal = cmap(x)[:, :3].tolist() if reverse: pal = pal[::-1] if as_cmap: x_256 = np.linspace(light, dark, 256) if reverse: x_256 = x_256[::-1] pal_256 = cmap(x_256) cmap = mpl.colors.ListedColormap(pal_256) return cmap else: return pal
[ "def", "cubehelix_palette", "(", "n_colors", "=", "6", ",", "start", "=", "0", ",", "rot", "=", ".4", ",", "gamma", "=", "1.0", ",", "hue", "=", "0.8", ",", "light", "=", ".85", ",", "dark", "=", ".15", ",", "reverse", "=", "False", ",", "as_cmap...
Make a sequential palette from the cubehelix system. This produces a colormap with linearly-decreasing (or increasing) brightness. That means that information will be preserved if printed to black and white or viewed by someone who is colorblind. "cubehelix" is also availible as a matplotlib-based palette, but this function gives the user more control over the look of the palette and has a different set of defaults. Parameters ---------- n_colors : int Number of colors in the palette. start : float, 0 <= start <= 3 The hue at the start of the helix. rot : float Rotations around the hue wheel over the range of the palette. gamma : float 0 <= gamma Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1) colors. hue : float, 0 <= hue <= 1 Saturation of the colors. dark : float 0 <= dark <= 1 Intensity of the darkest color in the palette. light : float 0 <= light <= 1 Intensity of the lightest color in the palette. reverse : bool If True, the palette will go from dark to light. as_cmap : bool If True, return a matplotlib colormap instead of a list of colors. Returns ------- palette : list or colormap References ---------- Green, D. A. (2011). "A colour scheme for the display of astronomical intensity images". Bulletin of the Astromical Society of India, Vol. 39, p. 289-295.
[ "Make", "a", "sequential", "palette", "from", "the", "cubehelix", "system", "." ]
b6d23c22d52557b983da8ce7a3a6992501dadcd6
https://github.com/yhat/ggpy/blob/b6d23c22d52557b983da8ce7a3a6992501dadcd6/ggplot/colors/palettes.py#L332-L392
236,088
kkroening/ffmpeg-python
ffmpeg/_run.py
get_args
def get_args(stream_spec, overwrite_output=False): """Build command-line arguments to be passed to ffmpeg.""" nodes = get_stream_spec_nodes(stream_spec) args = [] # TODO: group nodes together, e.g. `-i somefile -r somerate`. sorted_nodes, outgoing_edge_maps = topo_sort(nodes) input_nodes = [node for node in sorted_nodes if isinstance(node, InputNode)] output_nodes = [node for node in sorted_nodes if isinstance(node, OutputNode)] global_nodes = [node for node in sorted_nodes if isinstance(node, GlobalNode)] filter_nodes = [node for node in sorted_nodes if isinstance(node, FilterNode)] stream_name_map = {(node, None): str(i) for i, node in enumerate(input_nodes)} filter_arg = _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map) args += reduce(operator.add, [_get_input_args(node) for node in input_nodes]) if filter_arg: args += ['-filter_complex', filter_arg] args += reduce(operator.add, [_get_output_args(node, stream_name_map) for node in output_nodes]) args += reduce(operator.add, [_get_global_args(node) for node in global_nodes], []) if overwrite_output: args += ['-y'] return args
python
def get_args(stream_spec, overwrite_output=False): nodes = get_stream_spec_nodes(stream_spec) args = [] # TODO: group nodes together, e.g. `-i somefile -r somerate`. sorted_nodes, outgoing_edge_maps = topo_sort(nodes) input_nodes = [node for node in sorted_nodes if isinstance(node, InputNode)] output_nodes = [node for node in sorted_nodes if isinstance(node, OutputNode)] global_nodes = [node for node in sorted_nodes if isinstance(node, GlobalNode)] filter_nodes = [node for node in sorted_nodes if isinstance(node, FilterNode)] stream_name_map = {(node, None): str(i) for i, node in enumerate(input_nodes)} filter_arg = _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map) args += reduce(operator.add, [_get_input_args(node) for node in input_nodes]) if filter_arg: args += ['-filter_complex', filter_arg] args += reduce(operator.add, [_get_output_args(node, stream_name_map) for node in output_nodes]) args += reduce(operator.add, [_get_global_args(node) for node in global_nodes], []) if overwrite_output: args += ['-y'] return args
[ "def", "get_args", "(", "stream_spec", ",", "overwrite_output", "=", "False", ")", ":", "nodes", "=", "get_stream_spec_nodes", "(", "stream_spec", ")", "args", "=", "[", "]", "# TODO: group nodes together, e.g. `-i somefile -r somerate`.", "sorted_nodes", ",", "outgoing...
Build command-line arguments to be passed to ffmpeg.
[ "Build", "command", "-", "line", "arguments", "to", "be", "passed", "to", "ffmpeg", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_run.py#L135-L154
236,089
kkroening/ffmpeg-python
ffmpeg/_run.py
compile
def compile(stream_spec, cmd='ffmpeg', overwrite_output=False): """Build command-line for invoking ffmpeg. The :meth:`run` function uses this to build the commnad line arguments and should work in most cases, but calling this function directly is useful for debugging or if you need to invoke ffmpeg manually for whatever reason. This is the same as calling :meth:`get_args` except that it also includes the ``ffmpeg`` command as the first argument. """ if isinstance(cmd, basestring): cmd = [cmd] elif type(cmd) != list: cmd = list(cmd) return cmd + get_args(stream_spec, overwrite_output=overwrite_output)
python
def compile(stream_spec, cmd='ffmpeg', overwrite_output=False): if isinstance(cmd, basestring): cmd = [cmd] elif type(cmd) != list: cmd = list(cmd) return cmd + get_args(stream_spec, overwrite_output=overwrite_output)
[ "def", "compile", "(", "stream_spec", ",", "cmd", "=", "'ffmpeg'", ",", "overwrite_output", "=", "False", ")", ":", "if", "isinstance", "(", "cmd", ",", "basestring", ")", ":", "cmd", "=", "[", "cmd", "]", "elif", "type", "(", "cmd", ")", "!=", "list...
Build command-line for invoking ffmpeg. The :meth:`run` function uses this to build the commnad line arguments and should work in most cases, but calling this function directly is useful for debugging or if you need to invoke ffmpeg manually for whatever reason. This is the same as calling :meth:`get_args` except that it also includes the ``ffmpeg`` command as the first argument.
[ "Build", "command", "-", "line", "for", "invoking", "ffmpeg", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_run.py#L158-L173
236,090
kkroening/ffmpeg-python
ffmpeg/_run.py
run_async
def run_async( stream_spec, cmd='ffmpeg', pipe_stdin=False, pipe_stdout=False, pipe_stderr=False, quiet=False, overwrite_output=False): """Asynchronously invoke ffmpeg for the supplied node graph. Args: pipe_stdin: if True, connect pipe to subprocess stdin (to be used with ``pipe:`` ffmpeg inputs). pipe_stdout: if True, connect pipe to subprocess stdout (to be used with ``pipe:`` ffmpeg outputs). pipe_stderr: if True, connect pipe to subprocess stderr. quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``. **kwargs: keyword-arguments passed to ``get_args()`` (e.g. ``overwrite_output=True``). Returns: A `subprocess Popen`_ object representing the child process. Examples: Run and stream input:: process = ( ffmpeg .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height)) .output(out_filename, pix_fmt='yuv420p') .overwrite_output() .run_async(pipe_stdin=True) ) process.communicate(input=input_data) Run and capture output:: process = ( ffmpeg .input(in_filename) .output('pipe':, format='rawvideo', pix_fmt='rgb24') .run_async(pipe_stdout=True, pipe_stderr=True) ) out, err = process.communicate() Process video frame-by-frame using numpy:: process1 = ( ffmpeg .input(in_filename) .output('pipe:', format='rawvideo', pix_fmt='rgb24') .run_async(pipe_stdout=True) ) process2 = ( ffmpeg .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height)) .output(out_filename, pix_fmt='yuv420p') .overwrite_output() .run_async(pipe_stdin=True) ) while True: in_bytes = process1.stdout.read(width * height * 3) if not in_bytes: break in_frame = ( np .frombuffer(in_bytes, np.uint8) .reshape([height, width, 3]) ) out_frame = in_frame * 0.3 process2.stdin.write( frame .astype(np.uint8) .tobytes() ) process2.stdin.close() process1.wait() process2.wait() .. _subprocess Popen: https://docs.python.org/3/library/subprocess.html#popen-objects """ args = compile(stream_spec, cmd, overwrite_output=overwrite_output) stdin_stream = subprocess.PIPE if pipe_stdin else None stdout_stream = subprocess.PIPE if pipe_stdout or quiet else None stderr_stream = subprocess.PIPE if pipe_stderr or quiet else None return subprocess.Popen( args, stdin=stdin_stream, stdout=stdout_stream, stderr=stderr_stream)
python
def run_async( stream_spec, cmd='ffmpeg', pipe_stdin=False, pipe_stdout=False, pipe_stderr=False, quiet=False, overwrite_output=False): args = compile(stream_spec, cmd, overwrite_output=overwrite_output) stdin_stream = subprocess.PIPE if pipe_stdin else None stdout_stream = subprocess.PIPE if pipe_stdout or quiet else None stderr_stream = subprocess.PIPE if pipe_stderr or quiet else None return subprocess.Popen( args, stdin=stdin_stream, stdout=stdout_stream, stderr=stderr_stream)
[ "def", "run_async", "(", "stream_spec", ",", "cmd", "=", "'ffmpeg'", ",", "pipe_stdin", "=", "False", ",", "pipe_stdout", "=", "False", ",", "pipe_stderr", "=", "False", ",", "quiet", "=", "False", ",", "overwrite_output", "=", "False", ")", ":", "args", ...
Asynchronously invoke ffmpeg for the supplied node graph. Args: pipe_stdin: if True, connect pipe to subprocess stdin (to be used with ``pipe:`` ffmpeg inputs). pipe_stdout: if True, connect pipe to subprocess stdout (to be used with ``pipe:`` ffmpeg outputs). pipe_stderr: if True, connect pipe to subprocess stderr. quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``. **kwargs: keyword-arguments passed to ``get_args()`` (e.g. ``overwrite_output=True``). Returns: A `subprocess Popen`_ object representing the child process. Examples: Run and stream input:: process = ( ffmpeg .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height)) .output(out_filename, pix_fmt='yuv420p') .overwrite_output() .run_async(pipe_stdin=True) ) process.communicate(input=input_data) Run and capture output:: process = ( ffmpeg .input(in_filename) .output('pipe':, format='rawvideo', pix_fmt='rgb24') .run_async(pipe_stdout=True, pipe_stderr=True) ) out, err = process.communicate() Process video frame-by-frame using numpy:: process1 = ( ffmpeg .input(in_filename) .output('pipe:', format='rawvideo', pix_fmt='rgb24') .run_async(pipe_stdout=True) ) process2 = ( ffmpeg .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height)) .output(out_filename, pix_fmt='yuv420p') .overwrite_output() .run_async(pipe_stdin=True) ) while True: in_bytes = process1.stdout.read(width * height * 3) if not in_bytes: break in_frame = ( np .frombuffer(in_bytes, np.uint8) .reshape([height, width, 3]) ) out_frame = in_frame * 0.3 process2.stdin.write( frame .astype(np.uint8) .tobytes() ) process2.stdin.close() process1.wait() process2.wait() .. _subprocess Popen: https://docs.python.org/3/library/subprocess.html#popen-objects
[ "Asynchronously", "invoke", "ffmpeg", "for", "the", "supplied", "node", "graph", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_run.py#L177-L262
236,091
kkroening/ffmpeg-python
ffmpeg/_run.py
run
def run( stream_spec, cmd='ffmpeg', capture_stdout=False, capture_stderr=False, input=None, quiet=False, overwrite_output=False): """Invoke ffmpeg for the supplied node graph. Args: capture_stdout: if True, capture stdout (to be used with ``pipe:`` ffmpeg outputs). capture_stderr: if True, capture stderr. quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``. input: text to be sent to stdin (to be used with ``pipe:`` ffmpeg inputs) **kwargs: keyword-arguments passed to ``get_args()`` (e.g. ``overwrite_output=True``). Returns: (out, err) tuple containing captured stdout and stderr data. """ process = run_async( stream_spec, cmd, pipe_stdin=input is not None, pipe_stdout=capture_stdout, pipe_stderr=capture_stderr, quiet=quiet, overwrite_output=overwrite_output, ) out, err = process.communicate(input) retcode = process.poll() if retcode: raise Error('ffmpeg', out, err) return out, err
python
def run( stream_spec, cmd='ffmpeg', capture_stdout=False, capture_stderr=False, input=None, quiet=False, overwrite_output=False): process = run_async( stream_spec, cmd, pipe_stdin=input is not None, pipe_stdout=capture_stdout, pipe_stderr=capture_stderr, quiet=quiet, overwrite_output=overwrite_output, ) out, err = process.communicate(input) retcode = process.poll() if retcode: raise Error('ffmpeg', out, err) return out, err
[ "def", "run", "(", "stream_spec", ",", "cmd", "=", "'ffmpeg'", ",", "capture_stdout", "=", "False", ",", "capture_stderr", "=", "False", ",", "input", "=", "None", ",", "quiet", "=", "False", ",", "overwrite_output", "=", "False", ")", ":", "process", "=...
Invoke ffmpeg for the supplied node graph. Args: capture_stdout: if True, capture stdout (to be used with ``pipe:`` ffmpeg outputs). capture_stderr: if True, capture stderr. quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``. input: text to be sent to stdin (to be used with ``pipe:`` ffmpeg inputs) **kwargs: keyword-arguments passed to ``get_args()`` (e.g. ``overwrite_output=True``). Returns: (out, err) tuple containing captured stdout and stderr data.
[ "Invoke", "ffmpeg", "for", "the", "supplied", "node", "graph", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_run.py#L266-L296
236,092
kkroening/ffmpeg-python
ffmpeg/_ffmpeg.py
output
def output(*streams_and_filename, **kwargs): """Output file URL Syntax: `ffmpeg.output(stream1[, stream2, stream3...], filename, **ffmpeg_args)` Any supplied keyword arguments are passed to ffmpeg verbatim (e.g. ``t=20``, ``f='mp4'``, ``acodec='pcm'``, ``vcodec='rawvideo'``, etc.). Some keyword-arguments are handled specially, as shown below. Args: video_bitrate: parameter for ``-b:v``, e.g. ``video_bitrate=1000``. audio_bitrate: parameter for ``-b:a``, e.g. ``audio_bitrate=200``. format: alias for ``-f`` parameter, e.g. ``format='mp4'`` (equivalent to ``f='mp4'``). If multiple streams are provided, they are mapped to the same output. To tell ffmpeg to write to stdout, use ``pipe:`` as the filename. Official documentation: `Synopsis <https://ffmpeg.org/ffmpeg.html#Synopsis>`__ """ streams_and_filename = list(streams_and_filename) if 'filename' not in kwargs: if not isinstance(streams_and_filename[-1], basestring): raise ValueError('A filename must be provided') kwargs['filename'] = streams_and_filename.pop(-1) streams = streams_and_filename fmt = kwargs.pop('f', None) if fmt: if 'format' in kwargs: raise ValueError("Can't specify both `format` and `f` kwargs") kwargs['format'] = fmt return OutputNode(streams, output.__name__, kwargs=kwargs).stream()
python
def output(*streams_and_filename, **kwargs): streams_and_filename = list(streams_and_filename) if 'filename' not in kwargs: if not isinstance(streams_and_filename[-1], basestring): raise ValueError('A filename must be provided') kwargs['filename'] = streams_and_filename.pop(-1) streams = streams_and_filename fmt = kwargs.pop('f', None) if fmt: if 'format' in kwargs: raise ValueError("Can't specify both `format` and `f` kwargs") kwargs['format'] = fmt return OutputNode(streams, output.__name__, kwargs=kwargs).stream()
[ "def", "output", "(", "*", "streams_and_filename", ",", "*", "*", "kwargs", ")", ":", "streams_and_filename", "=", "list", "(", "streams_and_filename", ")", "if", "'filename'", "not", "in", "kwargs", ":", "if", "not", "isinstance", "(", "streams_and_filename", ...
Output file URL Syntax: `ffmpeg.output(stream1[, stream2, stream3...], filename, **ffmpeg_args)` Any supplied keyword arguments are passed to ffmpeg verbatim (e.g. ``t=20``, ``f='mp4'``, ``acodec='pcm'``, ``vcodec='rawvideo'``, etc.). Some keyword-arguments are handled specially, as shown below. Args: video_bitrate: parameter for ``-b:v``, e.g. ``video_bitrate=1000``. audio_bitrate: parameter for ``-b:a``, e.g. ``audio_bitrate=200``. format: alias for ``-f`` parameter, e.g. ``format='mp4'`` (equivalent to ``f='mp4'``). If multiple streams are provided, they are mapped to the same output. To tell ffmpeg to write to stdout, use ``pipe:`` as the filename. Official documentation: `Synopsis <https://ffmpeg.org/ffmpeg.html#Synopsis>`__
[ "Output", "file", "URL" ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_ffmpeg.py#L59-L94
236,093
kkroening/ffmpeg-python
examples/show_progress.py
_do_watch_progress
def _do_watch_progress(filename, sock, handler): """Function to run in a separate gevent greenlet to read progress events from a unix-domain socket.""" connection, client_address = sock.accept() data = b'' try: while True: more_data = connection.recv(16) if not more_data: break data += more_data lines = data.split(b'\n') for line in lines[:-1]: line = line.decode() parts = line.split('=') key = parts[0] if len(parts) > 0 else None value = parts[1] if len(parts) > 1 else None handler(key, value) data = lines[-1] finally: connection.close()
python
def _do_watch_progress(filename, sock, handler): connection, client_address = sock.accept() data = b'' try: while True: more_data = connection.recv(16) if not more_data: break data += more_data lines = data.split(b'\n') for line in lines[:-1]: line = line.decode() parts = line.split('=') key = parts[0] if len(parts) > 0 else None value = parts[1] if len(parts) > 1 else None handler(key, value) data = lines[-1] finally: connection.close()
[ "def", "_do_watch_progress", "(", "filename", ",", "sock", ",", "handler", ")", ":", "connection", ",", "client_address", "=", "sock", ".", "accept", "(", ")", "data", "=", "b''", "try", ":", "while", "True", ":", "more_data", "=", "connection", ".", "re...
Function to run in a separate gevent greenlet to read progress events from a unix-domain socket.
[ "Function", "to", "run", "in", "a", "separate", "gevent", "greenlet", "to", "read", "progress", "events", "from", "a", "unix", "-", "domain", "socket", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/examples/show_progress.py#L42-L62
236,094
kkroening/ffmpeg-python
examples/show_progress.py
_watch_progress
def _watch_progress(handler): """Context manager for creating a unix-domain socket and listen for ffmpeg progress events. The socket filename is yielded from the context manager and the socket is closed when the context manager is exited. Args: handler: a function to be called when progress events are received; receives a ``key`` argument and ``value`` argument. (The example ``show_progress`` below uses tqdm) Yields: socket_filename: the name of the socket file. """ with _tmpdir_scope() as tmpdir: socket_filename = os.path.join(tmpdir, 'sock') sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) with contextlib.closing(sock): sock.bind(socket_filename) sock.listen(1) child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler) try: yield socket_filename except: gevent.kill(child) raise
python
def _watch_progress(handler): with _tmpdir_scope() as tmpdir: socket_filename = os.path.join(tmpdir, 'sock') sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) with contextlib.closing(sock): sock.bind(socket_filename) sock.listen(1) child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler) try: yield socket_filename except: gevent.kill(child) raise
[ "def", "_watch_progress", "(", "handler", ")", ":", "with", "_tmpdir_scope", "(", ")", "as", "tmpdir", ":", "socket_filename", "=", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "'sock'", ")", "sock", "=", "socket", ".", "socket", "(", "socket", ...
Context manager for creating a unix-domain socket and listen for ffmpeg progress events. The socket filename is yielded from the context manager and the socket is closed when the context manager is exited. Args: handler: a function to be called when progress events are received; receives a ``key`` argument and ``value`` argument. (The example ``show_progress`` below uses tqdm) Yields: socket_filename: the name of the socket file.
[ "Context", "manager", "for", "creating", "a", "unix", "-", "domain", "socket", "and", "listen", "for", "ffmpeg", "progress", "events", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/examples/show_progress.py#L66-L92
236,095
kkroening/ffmpeg-python
examples/show_progress.py
show_progress
def show_progress(total_duration): """Create a unix-domain socket to watch progress and render tqdm progress bar.""" with tqdm(total=round(total_duration, 2)) as bar: def handler(key, value): if key == 'out_time_ms': time = round(float(value) / 1000000., 2) bar.update(time - bar.n) elif key == 'progress' and value == 'end': bar.update(bar.total - bar.n) with _watch_progress(handler) as socket_filename: yield socket_filename
python
def show_progress(total_duration): with tqdm(total=round(total_duration, 2)) as bar: def handler(key, value): if key == 'out_time_ms': time = round(float(value) / 1000000., 2) bar.update(time - bar.n) elif key == 'progress' and value == 'end': bar.update(bar.total - bar.n) with _watch_progress(handler) as socket_filename: yield socket_filename
[ "def", "show_progress", "(", "total_duration", ")", ":", "with", "tqdm", "(", "total", "=", "round", "(", "total_duration", ",", "2", ")", ")", "as", "bar", ":", "def", "handler", "(", "key", ",", "value", ")", ":", "if", "key", "==", "'out_time_ms'", ...
Create a unix-domain socket to watch progress and render tqdm progress bar.
[ "Create", "a", "unix", "-", "domain", "socket", "to", "watch", "progress", "and", "render", "tqdm", "progress", "bar", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/examples/show_progress.py#L97-L108
236,096
kkroening/ffmpeg-python
ffmpeg/_probe.py
probe
def probe(filename, cmd='ffprobe', **kwargs): """Run ffprobe on the specified file and return a JSON representation of the output. Raises: :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code, an :class:`Error` is returned with a generic error message. The stderr output can be retrieved by accessing the ``stderr`` property of the exception. """ args = [cmd, '-show_format', '-show_streams', '-of', 'json'] args += convert_kwargs_to_cmd_line_args(kwargs) args += [filename] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise Error('ffprobe', out, err) return json.loads(out.decode('utf-8'))
python
def probe(filename, cmd='ffprobe', **kwargs): args = [cmd, '-show_format', '-show_streams', '-of', 'json'] args += convert_kwargs_to_cmd_line_args(kwargs) args += [filename] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise Error('ffprobe', out, err) return json.loads(out.decode('utf-8'))
[ "def", "probe", "(", "filename", ",", "cmd", "=", "'ffprobe'", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "cmd", ",", "'-show_format'", ",", "'-show_streams'", ",", "'-of'", ",", "'json'", "]", "args", "+=", "convert_kwargs_to_cmd_line_args", "("...
Run ffprobe on the specified file and return a JSON representation of the output. Raises: :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code, an :class:`Error` is returned with a generic error message. The stderr output can be retrieved by accessing the ``stderr`` property of the exception.
[ "Run", "ffprobe", "on", "the", "specified", "file", "and", "return", "a", "JSON", "representation", "of", "the", "output", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_probe.py#L7-L24
236,097
kkroening/ffmpeg-python
ffmpeg/_filters.py
filter_multi_output
def filter_multi_output(stream_spec, filter_name, *args, **kwargs): """Apply custom filter with one or more outputs. This is the same as ``filter_`` except that the filter can produce more than one output. To reference an output stream, use either the ``.stream`` operator or bracket shorthand: Example: ``` split = ffmpeg.input('in.mp4').filter_multi_output('split') split0 = split.stream(0) split1 = split[1] ffmpeg.concat(split0, split1).output('out.mp4').run() ``` """ return FilterNode(stream_spec, filter_name, args=args, kwargs=kwargs, max_inputs=None)
python
def filter_multi_output(stream_spec, filter_name, *args, **kwargs): return FilterNode(stream_spec, filter_name, args=args, kwargs=kwargs, max_inputs=None)
[ "def", "filter_multi_output", "(", "stream_spec", ",", "filter_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "FilterNode", "(", "stream_spec", ",", "filter_name", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "max_inp...
Apply custom filter with one or more outputs. This is the same as ``filter_`` except that the filter can produce more than one output. To reference an output stream, use either the ``.stream`` operator or bracket shorthand: Example: ``` split = ffmpeg.input('in.mp4').filter_multi_output('split') split0 = split.stream(0) split1 = split[1] ffmpeg.concat(split0, split1).output('out.mp4').run() ```
[ "Apply", "custom", "filter", "with", "one", "or", "more", "outputs", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_filters.py#L8-L24
236,098
kkroening/ffmpeg-python
ffmpeg/_filters.py
filter
def filter(stream_spec, filter_name, *args, **kwargs): """Apply custom filter. ``filter_`` is normally used by higher-level filter functions such as ``hflip``, but if a filter implementation is missing from ``fmpeg-python``, you can call ``filter_`` directly to have ``fmpeg-python`` pass the filter name and arguments to ffmpeg verbatim. Args: stream_spec: a Stream, list of Streams, or label-to-Stream dictionary mapping filter_name: ffmpeg filter name, e.g. `colorchannelmixer` *args: list of args to pass to ffmpeg verbatim **kwargs: list of keyword-args to pass to ffmpeg verbatim The function name is suffixed with ``_`` in order avoid confusion with the standard python ``filter`` function. Example: ``ffmpeg.input('in.mp4').filter('hflip').output('out.mp4').run()`` """ return filter_multi_output(stream_spec, filter_name, *args, **kwargs).stream()
python
def filter(stream_spec, filter_name, *args, **kwargs): return filter_multi_output(stream_spec, filter_name, *args, **kwargs).stream()
[ "def", "filter", "(", "stream_spec", ",", "filter_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "filter_multi_output", "(", "stream_spec", ",", "filter_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "stream", "(", "...
Apply custom filter. ``filter_`` is normally used by higher-level filter functions such as ``hflip``, but if a filter implementation is missing from ``fmpeg-python``, you can call ``filter_`` directly to have ``fmpeg-python`` pass the filter name and arguments to ffmpeg verbatim. Args: stream_spec: a Stream, list of Streams, or label-to-Stream dictionary mapping filter_name: ffmpeg filter name, e.g. `colorchannelmixer` *args: list of args to pass to ffmpeg verbatim **kwargs: list of keyword-args to pass to ffmpeg verbatim The function name is suffixed with ``_`` in order avoid confusion with the standard python ``filter`` function. Example: ``ffmpeg.input('in.mp4').filter('hflip').output('out.mp4').run()``
[ "Apply", "custom", "filter", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_filters.py#L28-L47
236,099
kkroening/ffmpeg-python
ffmpeg/_filters.py
filter_
def filter_(stream_spec, filter_name, *args, **kwargs): """Alternate name for ``filter``, so as to not collide with the built-in python ``filter`` operator. """ return filter(stream_spec, filter_name, *args, **kwargs)
python
def filter_(stream_spec, filter_name, *args, **kwargs): return filter(stream_spec, filter_name, *args, **kwargs)
[ "def", "filter_", "(", "stream_spec", ",", "filter_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "filter", "(", "stream_spec", ",", "filter_name", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Alternate name for ``filter``, so as to not collide with the built-in python ``filter`` operator.
[ "Alternate", "name", "for", "filter", "so", "as", "to", "not", "collide", "with", "the", "built", "-", "in", "python", "filter", "operator", "." ]
ac111dc3a976ddbb872bc7d6d4fe24a267c1a956
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_filters.py#L51-L55