idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
22,700 | def parse_iptables_rule ( line ) : bits = line . split ( ) definition = { } key = None args = [ ] not_arg = False def add_args ( ) : arg_string = ' ' . join ( args ) if key in IPTABLES_ARGS : definition_key = ( 'not_{0}' . format ( IPTABLES_ARGS [ key ] ) if not_arg else IPTABLES_ARGS [ key ] ) definition [ definition_key ] = arg_string else : definition . setdefault ( 'extras' , [ ] ) . extend ( ( key , arg_string ) ) for bit in bits : if bit == '!' : if key : add_args ( ) args = [ ] key = None not_arg = True elif bit . startswith ( '-' ) : if key : add_args ( ) args = [ ] not_arg = False key = bit else : args . append ( bit ) if key : add_args ( ) if 'extras' in definition : definition [ 'extras' ] = set ( definition [ 'extras' ] ) return definition | Parse one iptables rule . Returns a dict where each iptables code argument is mapped to a name using IPTABLES_ARGS . | 247 | 32 |
22,701 | def add_op ( state , op_func , * args , * * kwargs ) : frameinfo = get_caller_frameinfo ( ) kwargs [ 'frameinfo' ] = frameinfo for host in state . inventory : op_func ( state , host , * args , * * kwargs ) | Prepare & add an operation to pyinfra . state by executing it on all hosts . | 69 | 19 |
22,702 | def add_deploy ( state , deploy_func , * args , * * kwargs ) : frameinfo = get_caller_frameinfo ( ) kwargs [ 'frameinfo' ] = frameinfo for host in state . inventory : deploy_func ( state , host , * args , * * kwargs ) | Prepare & add an deploy to pyinfra . state by executing it on all hosts . | 70 | 19 |
22,703 | def setup_arguments ( arguments ) : # Ensure parallel/port are numbers for key in ( '--parallel' , '--port' , '--fail-percent' ) : if arguments [ key ] : try : arguments [ key ] = int ( arguments [ key ] ) except ValueError : raise CliError ( '{0} is not a valid integer for {1}' . format ( arguments [ key ] , key , ) ) # Prep --run OP ARGS if arguments [ '--run' ] : op , args = setup_op_and_args ( arguments [ '--run' ] , arguments [ 'ARGS' ] ) else : op = args = None # Check deploy file exists if arguments [ 'DEPLOY' ] : if not path . exists ( arguments [ 'DEPLOY' ] ) : raise CliError ( 'Deploy file not found: {0}' . format ( arguments [ 'DEPLOY' ] ) ) # Check our key file exists if arguments [ '--key' ] : if not path . exists ( arguments [ '--key' ] ) : raise CliError ( 'Private key file not found: {0}' . format ( arguments [ '--key' ] ) ) # Setup the rest return { # Deploy options 'inventory' : arguments [ '-i' ] , 'deploy' : arguments [ 'DEPLOY' ] , 'verbose' : arguments [ '-v' ] , 'dry' : arguments [ '--dry' ] , 'serial' : arguments [ '--serial' ] , 'no_wait' : arguments [ '--no-wait' ] , 'debug' : arguments [ '--debug' ] , 'debug_data' : arguments [ '--debug-data' ] , 'debug_state' : arguments [ '--debug-state' ] , 'fact' : arguments [ '--fact' ] , 'limit' : arguments [ '--limit' ] , 'op' : op , 'op_args' : args , # Config options 'user' : arguments [ '--user' ] , 'key' : arguments [ '--key' ] , 'key_password' : arguments [ '--key-password' ] , 'password' : arguments [ '--password' ] , 'port' : arguments [ '--port' ] , 'sudo' : arguments [ '--sudo' ] , 'sudo_user' : arguments [ '--sudo-user' ] , 'su_user' : arguments [ '--su-user' ] , 'parallel' : arguments [ '--parallel' ] , 'fail_percent' : arguments [ '--fail-percent' ] , } | Prepares argumnents output by docopt . | 574 | 10 |
22,704 | def sql ( state , host , sql , database = None , # Details for speaking to MySQL via `mysql` CLI mysql_user = None , mysql_password = None , mysql_host = None , mysql_port = None , ) : yield make_execute_mysql_command ( sql , database = database , user = mysql_user , password = mysql_password , host = mysql_host , port = mysql_port , ) | Execute arbitrary SQL against MySQL . | 92 | 7 |
22,705 | def dump ( state , host , remote_filename , database = None , # Details for speaking to MySQL via `mysql` CLI mysql_user = None , mysql_password = None , mysql_host = None , mysql_port = None , ) : yield '{0} > {1}' . format ( make_mysql_command ( executable = 'mysqldump' , database = database , user = mysql_user , password = mysql_password , host = mysql_host , port = mysql_port , ) , remote_filename ) | Dump a MySQL database into a . sql file . Requires mysqldump . | 116 | 17 |
22,706 | def get_host ( self , name , default = NoHostError ) : if name in self . hosts : return self . hosts [ name ] if default is NoHostError : raise NoHostError ( 'No such host: {0}' . format ( name ) ) return default | Get a single host by name . | 59 | 7 |
22,707 | def get_group ( self , name , default = NoGroupError ) : if name in self . groups : return self . groups [ name ] if default is NoGroupError : raise NoGroupError ( 'No such group: {0}' . format ( name ) ) return default | Get a list of hosts belonging to a group . | 59 | 10 |
22,708 | def get_groups_data ( self , groups ) : data = { } for group in groups : data . update ( self . get_group_data ( group ) ) return data | Gets aggregated data from a list of groups . Vars are collected in order so for any groups which define the same var twice the last group s value will hold . | 38 | 35 |
22,709 | def get_deploy_data ( self ) : if self . state and self . state . deploy_data : return self . state . deploy_data return { } | Gets any default data attached to the current deploy if any . | 35 | 13 |
22,710 | def config ( state , host , key , value , repo = None , ) : existing_config = host . fact . git_config ( repo ) if key not in existing_config or existing_config [ key ] != value : if repo is None : yield 'git config --global {0} "{1}"' . format ( key , value ) else : yield 'cd {0} && git config --local {1} "{2}"' . format ( repo , key , value ) | Manage git config for a repository or globally . | 102 | 10 |
22,711 | def include ( filename , hosts = False , when = True ) : if not pyinfra . is_cli : raise PyinfraError ( 'local.include is only available in CLI mode.' ) if not when : return if hosts is not False : hosts = ensure_host_list ( hosts , inventory = pseudo_state . inventory ) if pseudo_host not in hosts : return if pseudo_state . deploy_dir : filename = path . join ( pseudo_state . deploy_dir , filename ) frameinfo = get_caller_frameinfo ( ) logger . debug ( 'Including local file: {0}' . format ( filename ) ) try : # Fixes a circular import because `pyinfra.local` is really a CLI # only thing (so should be `pyinfra_cli.local`). It is kept here # to maintain backwards compatability and the nicer public import # (ideally users never need to import from `pyinfra_cli`). from pyinfra_cli . config import extract_file_config from pyinfra_cli . util import exec_file # Load any config defined in the file and setup like a @deploy config_data = extract_file_config ( filename ) kwargs = { key . lower ( ) : value for key , value in six . iteritems ( config_data ) if key in [ 'SUDO' , 'SUDO_USER' , 'SU_USER' , 'PRESERVE_SUDO_ENV' , 'IGNORE_ERRORS' , ] } with pseudo_state . deploy ( filename , kwargs , None , frameinfo . lineno , in_deploy = False , ) : exec_file ( filename ) # One potential solution to the above is to add local as an actual # module, ie `pyinfra.modules.local`. except IOError as e : raise PyinfraError ( 'Could not include local file: {0}\n{1}' . format ( filename , e ) , ) | Executes a local python file within the pyinfra . pseudo_state . deploy_dir directory . | 431 | 21 |
22,712 | def send ( self , request , stem = None ) : if stem is not None : request . url = request . url + "/" + stem . lstrip ( "/" ) prepped = self . session . prepare_request ( request ) settings = self . session . merge_environment_settings ( url = prepped . url , proxies = { } , stream = None , verify = None , cert = None ) return self . session . send ( prepped , * * settings ) | Prepare and send a request | 100 | 6 |
22,713 | def list ( self ) : # Define the basic request. The per_page parameter is set to 100, which # is the maximum github allows. If the user has more than one page of # gists, this request object will be modified to retrieve each # successive page of gists. request = requests . Request ( 'GET' , 'https://api.github.com/gists' , headers = { 'Accept-Encoding' : 'identity, deflate, compress, gzip' , 'User-Agent' : 'python-requests/1.2.0' , 'Accept' : 'application/vnd.github.v3.base64' , } , params = { 'access_token' : self . token , 'per_page' : 100 , } , ) # Github provides a 'link' header that contains information to # navigate through a users page of gists. This regex is used to # extract the URLs contained in this header, and to find the next page # of gists. pattern = re . compile ( r'<([^>]*)>; rel="([^"]*)"' ) gists = [ ] while True : # Retrieve the next page of gists try : response = self . send ( request ) . json ( ) except Exception : break # Extract the list of gists for gist in response : try : gists . append ( GistInfo ( gist [ 'id' ] , gist [ 'public' ] , gist [ 'description' ] , ) ) except KeyError : continue try : link = response . headers [ 'link' ] # Search for the next page of gist. If a 'next' page is found, # the URL is set to this new page and the iteration continues. # If there is no next page, return the list of gists. for result in pattern . finditer ( link ) : url = result . group ( 1 ) rel = result . group ( 2 ) if rel == 'next' : request . url = url break else : return gists except Exception : break return gists | Returns a list of the users gists as GistInfo objects | 438 | 13 |
22,714 | def create ( self , request , desc , files , public = False ) : request . data = json . dumps ( { "description" : desc , "public" : public , "files" : files , } ) return self . send ( request ) . json ( ) [ 'html_url' ] | Creates a gist | 63 | 4 |
22,715 | def files ( self , request , id ) : gist = self . send ( request , id ) . json ( ) return gist [ 'files' ] | Returns a list of files in the gist | 31 | 8 |
22,716 | def content ( self , request , id ) : gist = self . send ( request , id ) . json ( ) def convert ( data ) : return base64 . b64decode ( data ) . decode ( 'utf-8' ) content = { } for name , data in gist [ 'files' ] . items ( ) : content [ name ] = convert ( data [ 'content' ] ) return content | Returns the content of the gist | 86 | 6 |
22,717 | def archive ( self , request , id ) : gist = self . send ( request , id ) . json ( ) with tarfile . open ( '{}.tar.gz' . format ( id ) , mode = 'w:gz' ) as archive : for name , data in gist [ 'files' ] . items ( ) : with tempfile . NamedTemporaryFile ( 'w+' ) as fp : fp . write ( data [ 'content' ] ) fp . flush ( ) archive . add ( fp . name , arcname = name ) | Create an archive of a gist | 120 | 6 |
22,718 | def edit ( self , request , id ) : with pushd ( tempfile . gettempdir ( ) ) : try : self . clone ( id ) with pushd ( id ) : files = [ f for f in os . listdir ( '.' ) if os . path . isfile ( f ) ] quoted = [ '"{}"' . format ( f ) for f in files ] os . system ( "{} {}" . format ( self . editor , ' ' . join ( quoted ) ) ) os . system ( 'git commit -av && git push' ) finally : shutil . rmtree ( id ) | Edit a gist | 132 | 3 |
22,719 | def description ( self , request , id , description ) : request . data = json . dumps ( { "description" : description } ) return self . send ( request , id ) . json ( ) [ 'html_url' ] | Updates the description of a gist | 48 | 7 |
22,720 | def clone ( self , id , name = None ) : url = 'git@gist.github.com:/{}' . format ( id ) if name is None : os . system ( 'git clone {}' . format ( url ) ) else : os . system ( 'git clone {} {}' . format ( url , name ) ) | Clone a gist | 72 | 4 |
22,721 | def command ( state , host , hostname , command , ssh_user = None ) : connection_target = hostname if ssh_user : connection_target = '@' . join ( ( ssh_user , hostname ) ) yield 'ssh {0} "{1}"' . format ( connection_target , command ) | Execute commands on other servers over SSH . | 68 | 9 |
22,722 | def upload ( state , host , hostname , filename , remote_filename = None , use_remote_sudo = False , ssh_keyscan = False , ssh_user = None , ) : remote_filename = remote_filename or filename # Figure out where we're connecting (host or user@host) connection_target = hostname if ssh_user : connection_target = '@' . join ( ( ssh_user , hostname ) ) if ssh_keyscan : yield keyscan ( state , host , hostname ) # If we're not using sudo on the remote side, just scp the file over if not use_remote_sudo : yield 'scp {0} {1}:{2}' . format ( filename , connection_target , remote_filename ) else : # Otherwise - we need a temporary location for the file temp_remote_filename = state . get_temp_filename ( ) # scp it to the temporary location upload_cmd = 'scp {0} {1}:{2}' . format ( filename , connection_target , temp_remote_filename , ) yield upload_cmd # And sudo sudo to move it yield command ( state , host , connection_target , 'sudo mv {0} {1}' . format ( temp_remote_filename , remote_filename , ) ) | Upload files to other servers using scp . | 281 | 9 |
22,723 | def download ( state , host , hostname , filename , local_filename = None , force = False , ssh_keyscan = False , ssh_user = None , ) : local_filename = local_filename or filename # Get local file info local_file_info = host . fact . file ( local_filename ) # Local file exists but isn't a file? if local_file_info is False : raise OperationError ( 'Local destination {0} already exists and is not a file' . format ( local_filename , ) , ) # If the local file exists and we're not forcing a re-download, no-op if local_file_info and not force : return # Figure out where we're connecting (host or user@host) connection_target = hostname if ssh_user : connection_target = '@' . join ( ( ssh_user , hostname ) ) if ssh_keyscan : yield keyscan ( state , host , hostname ) # Download the file with scp yield 'scp {0}:{1} {2}' . format ( connection_target , filename , local_filename ) | Download files from other servers using scp . | 239 | 9 |
22,724 | def pop_op_kwargs ( state , kwargs ) : meta_kwargs = state . deploy_kwargs or { } def get_kwarg ( key , default = None ) : return kwargs . pop ( key , meta_kwargs . get ( key , default ) ) # Get the env for this host: config env followed by command-level env env = state . config . ENV . copy ( ) env . update ( get_kwarg ( 'env' , { } ) ) hosts = get_kwarg ( 'hosts' ) hosts = ensure_host_list ( hosts , inventory = state . inventory ) # Filter out any hosts not in the meta kwargs (nested support) if meta_kwargs . get ( 'hosts' ) is not None : hosts = [ host for host in hosts if host in meta_kwargs [ 'hosts' ] ] return { # ENVars for commands in this operation 'env' : env , # Hosts to limit the op to 'hosts' : hosts , # When to limit the op (default always) 'when' : get_kwarg ( 'when' , True ) , # Locally & globally configurable 'sudo' : get_kwarg ( 'sudo' , state . config . SUDO ) , 'sudo_user' : get_kwarg ( 'sudo_user' , state . config . SUDO_USER ) , 'su_user' : get_kwarg ( 'su_user' , state . config . SU_USER ) , # Whether to preserve ENVars when sudoing (eg SSH forward agent socket) 'preserve_sudo_env' : get_kwarg ( 'preserve_sudo_env' , state . config . PRESERVE_SUDO_ENV , ) , # Ignore any errors during this operation 'ignore_errors' : get_kwarg ( 'ignore_errors' , state . config . IGNORE_ERRORS , ) , # Timeout on running the command 'timeout' : get_kwarg ( 'timeout' ) , # Get a PTY before executing commands 'get_pty' : get_kwarg ( 'get_pty' , False ) , # Forces serial mode for this operation (--serial for one op) 'serial' : get_kwarg ( 'serial' , False ) , # Only runs this operation once 'run_once' : get_kwarg ( 'run_once' , False ) , # Execute in batches of X hosts rather than all at once 'parallel' : get_kwarg ( 'parallel' ) , # Callbacks 'on_success' : get_kwarg ( 'on_success' ) , 'on_error' : get_kwarg ( 'on_error' ) , # Operation hash 'op' : get_kwarg ( 'op' ) , } | Pop and return operation global keyword arguments . | 617 | 8 |
22,725 | def get_template ( filename_or_string , is_string = False ) : # Cache against string sha or just the filename cache_key = sha1_hash ( filename_or_string ) if is_string else filename_or_string if cache_key in TEMPLATES : return TEMPLATES [ cache_key ] if is_string : # Set the input string as our template template_string = filename_or_string else : # Load template data into memory with open ( filename_or_string , 'r' ) as file_io : template_string = file_io . read ( ) TEMPLATES [ cache_key ] = Template ( template_string , keep_trailing_newline = True ) return TEMPLATES [ cache_key ] | Gets a jinja2 Template object for the input filename or string with caching based on the filename of the template or the SHA1 of the input string . | 169 | 33 |
22,726 | def underscore ( name ) : s1 = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , name ) return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s1 ) . lower ( ) | Transform CamelCase - > snake_case . | 76 | 9 |
22,727 | def sha1_hash ( string ) : hasher = sha1 ( ) hasher . update ( string . encode ( ) ) return hasher . hexdigest ( ) | Return the SHA1 of the input string . | 38 | 9 |
22,728 | def make_command ( command , env = None , su_user = None , sudo = False , sudo_user = None , preserve_sudo_env = False , ) : debug_meta = { } for key , value in ( ( 'sudo' , sudo ) , ( 'sudo_user' , sudo_user ) , ( 'su_user' , su_user ) , ( 'env' , env ) , ) : if value : debug_meta [ key ] = value logger . debug ( 'Building command ({0}): {1}' . format ( ' ' . join ( '{0}: {1}' . format ( key , value ) for key , value in six . iteritems ( debug_meta ) ) , command ) ) # Use env & build our actual command if env : env_string = ' ' . join ( [ '{0}={1}' . format ( key , value ) for key , value in six . iteritems ( env ) ] ) command = 'export {0}; {1}' . format ( env_string , command ) # Quote the command as a string command = shlex_quote ( command ) # Switch user with su if su_user : command = 'su {0} -c {1}' . format ( su_user , command ) # Otherwise just sh wrap the command else : command = 'sh -c {0}' . format ( command ) # Use sudo (w/user?) if sudo : sudo_bits = [ 'sudo' , '-H' ] if preserve_sudo_env : sudo_bits . append ( '-E' ) if sudo_user : sudo_bits . extend ( ( '-u' , sudo_user ) ) command = '{0} {1}' . format ( ' ' . join ( sudo_bits ) , command ) return command | Builds a shell command with various kwargs . | 393 | 11 |
22,729 | def make_hash ( obj ) : if isinstance ( obj , ( set , tuple , list ) ) : hash_string = '' . join ( [ make_hash ( e ) for e in obj ] ) elif isinstance ( obj , dict ) : hash_string = '' . join ( '' . join ( ( key , make_hash ( value ) ) ) for key , value in six . iteritems ( obj ) ) else : hash_string = ( # Constants - the values can change between hosts but we should still # group them under the same operation hash. '_PYINFRA_CONSTANT' if obj in ( True , False , None ) # Plain strings else obj if isinstance ( obj , six . string_types ) # Objects with __name__s else obj . __name__ if hasattr ( obj , '__name__' ) # Objects with names else obj . name if hasattr ( obj , 'name' ) # Repr anything else else repr ( obj ) ) return sha1_hash ( hash_string ) | Make a hash from an arbitrary nested dictionary list tuple or set used to generate ID s for operations based on their name & arguments . | 224 | 26 |
22,730 | def get_file_sha1 ( filename_or_io ) : file_data = get_file_io ( filename_or_io ) cache_key = file_data . cache_key if cache_key and cache_key in FILE_SHAS : return FILE_SHAS [ cache_key ] with file_data as file_io : hasher = sha1 ( ) buff = file_io . read ( BLOCKSIZE ) while len ( buff ) > 0 : if isinstance ( buff , six . text_type ) : buff = buff . encode ( 'utf-8' ) hasher . update ( buff ) buff = file_io . read ( BLOCKSIZE ) digest = hasher . hexdigest ( ) if cache_key : FILE_SHAS [ cache_key ] = digest return digest | Calculates the SHA1 of a file or file object using a buffer to handle larger files . | 175 | 20 |
22,731 | def read_buffer ( io , print_output = False , print_func = None ) : # TODO: research this further - some steps towards handling stdin (ie password requests # from programs that don't notice there's no TTY to accept passwords from!). This just # prints output as below, but stores partial lines in a buffer, which could be printed # when ready to accept input. Or detected and raise an error. # GitHub issue: https://github.com/Fizzadar/pyinfra/issues/40 # buff = '' # data = io.read(1) # while data: # # Append to the buffer # buff += data # # Newlines in the buffer? Break them out # if '\n' in buff: # lines = buff.split('\n') # # Set the buffer back to just the last line # buff = lines[-1] # # Get the other lines, strip them # lines = [ # line.strip() # for line in lines[:-1] # ] # out.extend(lines) # for line in lines: # _print(line) # # Get next data # data = io.read(1) # if buff: # line = buff.strip() # out.append(line) # _print(line) def _print ( line ) : if print_output : if print_func : formatted_line = print_func ( line ) else : formatted_line = line encoded_line = unicode ( formatted_line ) . encode ( 'utf-8' ) print ( encoded_line ) out = [ ] for line in io : # Handle local Popen shells returning list of bytes, not strings if not isinstance ( line , six . text_type ) : line = line . decode ( 'utf-8' ) line = line . strip ( ) out . append ( line ) _print ( line ) return out | Reads a file - like buffer object into lines and optionally prints the output . | 403 | 16 |
22,732 | def start ( state , host , ctid , force = False ) : args = [ '{0}' . format ( ctid ) ] if force : args . append ( '--force' ) yield 'vzctl start {0}' . format ( ' ' . join ( args ) ) | Start OpenVZ containers . | 65 | 6 |
22,733 | def stop ( state , host , ctid ) : args = [ '{0}' . format ( ctid ) ] yield 'vzctl stop {0}' . format ( ' ' . join ( args ) ) | Stop OpenVZ containers . | 49 | 6 |
22,734 | def restart ( state , host , ctid , force = False ) : yield stop ( state , host , ctid ) yield start ( state , host , ctid , force = force ) | Restart OpenVZ containers . | 42 | 7 |
22,735 | def create ( state , host , ctid , template = None ) : # Check we don't already have a container with this CTID current_containers = host . fact . openvz_containers if ctid in current_containers : raise OperationError ( 'An OpenVZ container with CTID {0} already exists' . format ( ctid ) , ) args = [ '{0}' . format ( ctid ) ] if template : args . append ( '--ostemplate {0}' . format ( template ) ) yield 'vzctl create {0}' . format ( ' ' . join ( args ) ) | Create OpenVZ containers . | 141 | 6 |
22,736 | def set ( state , host , ctid , save = True , * * settings ) : args = [ '{0}' . format ( ctid ) ] if save : args . append ( '--save' ) for key , value in six . iteritems ( settings ) : # Handle list values (eg --nameserver X --nameserver X) if isinstance ( value , list ) : args . extend ( '--{0} {1}' . format ( key , v ) for v in value ) else : args . append ( '--{0} {1}' . format ( key , value ) ) yield 'vzctl set {0}' . format ( ' ' . join ( args ) ) | Set OpenVZ container details . | 154 | 7 |
22,737 | def exec_file ( filename , return_locals = False , is_deploy_code = False ) : if filename not in PYTHON_CODES : with open ( filename , 'r' ) as f : code = f . read ( ) code = compile ( code , filename , 'exec' ) PYTHON_CODES [ filename ] = code # Create some base attributes for our "module" data = { '__file__' : filename , 'state' : pseudo_state , } # Execute the code with locals/globals going into the dict above exec ( PYTHON_CODES [ filename ] , data ) return data | Execute a Python file and optionally return it s attributes as a dict . | 145 | 15 |
22,738 | def shell ( state , host , commands , chdir = None ) : # Ensure we have a list if isinstance ( commands , six . string_types ) : commands = [ commands ] for command in commands : if chdir : yield 'cd {0} && ({1})' . format ( chdir , command ) else : yield command | Run raw shell code . | 71 | 5 |
22,739 | def script ( state , host , filename , chdir = None ) : temp_file = state . get_temp_filename ( filename ) yield files . put ( state , host , filename , temp_file ) yield chmod ( temp_file , '+x' ) if chdir : yield 'cd {0} && {1}' . format ( chdir , temp_file ) else : yield temp_file | Upload and execute a local script on the remote host . | 88 | 11 |
22,740 | def script_template ( state , host , template_filename , chdir = None , * * data ) : temp_file = state . get_temp_filename ( template_filename ) yield files . template ( state , host , template_filename , temp_file , * * data ) yield chmod ( temp_file , '+x' ) if chdir : yield 'cd {0} && {1}' . format ( chdir , temp_file ) else : yield temp_file | Generate upload and execute a local script template on the remote host . | 104 | 14 |
22,741 | def hostname ( state , host , hostname , hostname_file = None ) : if hostname_file is None : os = host . fact . os if os == 'Linux' : hostname_file = '/etc/hostname' elif os == 'OpenBSD' : hostname_file = '/etc/myname' current_hostname = host . fact . hostname if current_hostname != hostname : yield 'hostname {0}' . format ( hostname ) if hostname_file : # Create a whole new hostname file file = six . StringIO ( '{0}\n' . format ( hostname ) ) # And ensure it exists yield files . put ( state , host , file , hostname_file , ) | Set the system hostname . | 162 | 6 |
22,742 | def sysctl ( state , host , name , value , persist = False , persist_file = '/etc/sysctl.conf' , ) : string_value = ( ' ' . join ( value ) if isinstance ( value , list ) else value ) existing_value = host . fact . sysctl . get ( name ) if not existing_value or existing_value != value : yield 'sysctl {0}={1}' . format ( name , string_value ) if persist : yield files . line ( state , host , persist_file , '{0}[[:space:]]*=[[:space:]]*{1}' . format ( name , string_value ) , replace = '{0} = {1}' . format ( name , string_value ) , ) | Edit sysctl configuration . | 170 | 5 |
22,743 | def download ( state , host , source_url , destination , user = None , group = None , mode = None , cache_time = None , force = False , ) : # Get destination info info = host . fact . file ( destination ) # Destination is a directory? if info is False : raise OperationError ( 'Destination {0} already exists and is not a file' . format ( destination ) , ) # Do we download the file? Force by default download = force # Doesn't exist, lets download it if info is None : download = True # Destination file exists & cache_time: check when the file was last modified, # download if old elif cache_time : # Time on files is not tz-aware, and will be the same tz as the server's time, # so we can safely remove the tzinfo from host.fact.date before comparison. cache_time = host . fact . date . replace ( tzinfo = None ) - timedelta ( seconds = cache_time ) if info [ 'mtime' ] and info [ 'mtime' ] > cache_time : download = True # If we download, always do user/group/mode as SSH user may be different if download : yield 'wget -q {0} -O {1}' . format ( source_url , destination ) if user or group : yield chown ( destination , user , group ) if mode : yield chmod ( destination , mode ) | Download files from remote locations . | 309 | 6 |
22,744 | def replace ( state , host , name , match , replace , flags = None ) : yield sed_replace ( name , match , replace , flags = flags ) | A simple shortcut for replacing text in files with sed . | 33 | 11 |
22,745 | def sync ( state , host , source , destination , user = None , group = None , mode = None , delete = False , exclude = None , exclude_dir = None , add_deploy_dir = True , ) : # If we don't enforce the source ending with /, remote_dirname below might start with # a /, which makes the path.join cut off the destination bit. if not source . endswith ( path . sep ) : source = '{0}{1}' . format ( source , path . sep ) # Add deploy directory? if add_deploy_dir and state . deploy_dir : source = path . join ( state . deploy_dir , source ) # Ensure the source directory exists if not path . isdir ( source ) : raise IOError ( 'No such directory: {0}' . format ( source ) ) # Ensure exclude is a list/tuple if exclude is not None : if not isinstance ( exclude , ( list , tuple ) ) : exclude = [ exclude ] # Ensure exclude_dir is a list/tuple if exclude_dir is not None : if not isinstance ( exclude_dir , ( list , tuple ) ) : exclude_dir = [ exclude_dir ] put_files = [ ] ensure_dirnames = [ ] for dirname , _ , filenames in walk ( source ) : remote_dirname = dirname . replace ( source , '' ) # Should we exclude this dir? if exclude_dir and any ( fnmatch ( remote_dirname , match ) for match in exclude_dir ) : continue if remote_dirname : ensure_dirnames . append ( remote_dirname ) for filename in filenames : full_filename = path . join ( dirname , filename ) # Should we exclude this file? if exclude and any ( fnmatch ( full_filename , match ) for match in exclude ) : continue put_files . append ( ( # Join local as normal (unix, win) full_filename , # Join remote as unix like '/' . join ( item for item in ( destination , remote_dirname , filename ) if item ) , ) ) # Ensure the destination directory yield directory ( state , host , destination , user = user , group = group , ) # Ensure any remote dirnames for dirname in ensure_dirnames : yield directory ( state , host , '/' . join ( ( destination , dirname ) ) , user = user , group = group , ) # Put each file combination for local_filename , remote_filename in put_files : yield put ( state , host , local_filename , remote_filename , user = user , group = group , mode = mode , add_deploy_dir = False , ) # Delete any extra files if delete : remote_filenames = set ( host . fact . find_files ( destination ) or [ ] ) wanted_filenames = set ( [ remote_filename for _ , remote_filename in put_files ] ) files_to_delete = remote_filenames - wanted_filenames for filename in files_to_delete : # Should we exclude this file? if exclude and any ( fnmatch ( filename , match ) for match in exclude ) : continue yield file ( state , host , filename , present = False ) | Syncs a local directory with a remote one with delete support . Note that delete will remove extra files on the remote side but not extra directories . | 697 | 29 |
22,746 | def put ( state , host , local_filename , remote_filename , user = None , group = None , mode = None , add_deploy_dir = True , ) : # Upload IO objects as-is if hasattr ( local_filename , 'read' ) : local_file = local_filename # Assume string filename else : # Add deploy directory? if add_deploy_dir and state . deploy_dir : local_filename = path . join ( state . deploy_dir , local_filename ) local_file = local_filename if not path . isfile ( local_file ) : raise IOError ( 'No such file: {0}' . format ( local_file ) ) mode = ensure_mode_int ( mode ) remote_file = host . fact . file ( remote_filename ) # No remote file, always upload and user/group/mode if supplied if not remote_file : yield ( local_file , remote_filename ) if user or group : yield chown ( remote_filename , user , group ) if mode : yield chmod ( remote_filename , mode ) # File exists, check sum and check user/group/mode if supplied else : local_sum = get_file_sha1 ( local_filename ) remote_sum = host . fact . sha1_file ( remote_filename ) # Check sha1sum, upload if needed if local_sum != remote_sum : yield ( local_file , remote_filename ) if user or group : yield chown ( remote_filename , user , group ) if mode : yield chmod ( remote_filename , mode ) else : # Check mode if mode and remote_file [ 'mode' ] != mode : yield chmod ( remote_filename , mode ) # Check user/group if ( ( user and remote_file [ 'user' ] != user ) or ( group and remote_file [ 'group' ] != group ) ) : yield chown ( remote_filename , user , group ) | Copy a local file to the remote system . | 422 | 9 |
22,747 | def template ( state , host , template_filename , remote_filename , user = None , group = None , mode = None , * * data ) : if state . deploy_dir : template_filename = path . join ( state . deploy_dir , template_filename ) # Ensure host is always available inside templates data [ 'host' ] = host data [ 'inventory' ] = state . inventory # Render and make file-like it's output try : output = get_template ( template_filename ) . render ( data ) except ( TemplateSyntaxError , UndefinedError ) as e : _ , _ , trace = sys . exc_info ( ) # Jump through to the *second last* traceback, which contains the line number # of the error within the in-memory Template object while trace . tb_next : if trace . tb_next . tb_next : trace = trace . tb_next else : break line_number = trace . tb_frame . f_lineno # Quickly read the line in question and one above/below for nicer debugging template_lines = open ( template_filename , 'r' ) . readlines ( ) template_lines = [ line . strip ( ) for line in template_lines ] relevant_lines = template_lines [ max ( line_number - 2 , 0 ) : line_number + 1 ] raise OperationError ( 'Error in template: {0} (L{1}): {2}\n...\n{3}\n...' . format ( template_filename , line_number , e , '\n' . join ( relevant_lines ) , ) ) output_file = six . StringIO ( output ) # Set the template attribute for nicer debugging output_file . template = template_filename # Pass to the put function yield put ( state , host , output_file , remote_filename , user = user , group = group , mode = mode , add_deploy_dir = False , ) | Generate a template and write it to the remote system . | 418 | 12 |
22,748 | def sql ( state , host , sql , database = None , # Details for speaking to PostgreSQL via `psql` CLI postgresql_user = None , postgresql_password = None , postgresql_host = None , postgresql_port = None , ) : yield make_execute_psql_command ( sql , database = database , user = postgresql_user , password = postgresql_password , host = postgresql_host , port = postgresql_port , ) | Execute arbitrary SQL against PostgreSQL . | 109 | 8 |
22,749 | def dump ( state , host , remote_filename , database = None , # Details for speaking to PostgreSQL via `psql` CLI postgresql_user = None , postgresql_password = None , postgresql_host = None , postgresql_port = None , ) : yield '{0} > {1}' . format ( make_psql_command ( executable = 'pg_dump' , database = database , user = postgresql_user , password = postgresql_password , host = postgresql_host , port = postgresql_port , ) , remote_filename ) | Dump a PostgreSQL database into a . sql file . Requires mysqldump . | 132 | 18 |
22,750 | def get_fact ( state , host , name ) : # Expecting a function to return if callable ( getattr ( FACTS [ name ] , 'command' , None ) ) : def wrapper ( * args ) : fact_data = get_facts ( state , name , args = args , ensure_hosts = ( host , ) ) return fact_data . get ( host ) return wrapper # Expecting the fact as a return value else : # Get the fact fact_data = get_facts ( state , name , ensure_hosts = ( host , ) ) return fact_data . get ( host ) | Wrapper around get_facts returning facts for one host or a function that does . | 130 | 17 |
22,751 | def key ( state , host , key = None , keyserver = None , keyid = None ) : if key : # If URL, wget the key to stdout and pipe into apt-key, because the "adv" # apt-key passes to gpg which doesn't always support https! if urlparse ( key ) . scheme : yield 'wget -O- {0} | apt-key add -' . format ( key ) else : yield 'apt-key add {0}' . format ( key ) if keyserver and keyid : yield 'apt-key adv --keyserver {0} --recv-keys {1}' . format ( keyserver , keyid ) | Add apt gpg keys with apt - key . | 148 | 10 |
22,752 | def update ( state , host , cache_time = None , touch_periodic = False ) : # If cache_time check when apt was last updated, prevent updates if within time if cache_time : # Ubuntu provides this handy file cache_info = host . fact . file ( APT_UPDATE_FILENAME ) # Time on files is not tz-aware, and will be the same tz as the server's time, # so we can safely remove the tzinfo from host.fact.date before comparison. host_cache_time = host . fact . date . replace ( tzinfo = None ) - timedelta ( seconds = cache_time ) if cache_info and cache_info [ 'mtime' ] and cache_info [ 'mtime' ] > host_cache_time : return yield 'apt-get update' # Some apt systems (Debian) have the /var/lib/apt/periodic directory, but # don't bother touching anything in there - so pyinfra does it, enabling # cache_time to work. if cache_time : yield 'touch {0}' . format ( APT_UPDATE_FILENAME ) | Updates apt repos . | 250 | 6 |
22,753 | def run_shell_command ( state , host , command , get_pty = False , timeout = None , print_output = False , * * command_kwargs ) : command = make_command ( command , * * command_kwargs ) logger . debug ( '--> Running command on localhost: {0}' . format ( command ) ) if print_output : print ( '{0}>>> {1}' . format ( host . print_prefix , command ) ) process = Popen ( command , shell = True , stdout = PIPE , stderr = PIPE ) # Iterate through outputs to get an exit status and generate desired list # output, done in two greenlets so stdout isn't printed before stderr. Not # attached to state.pool to avoid blocking it with 2x n-hosts greenlets. stdout_reader = gevent . spawn ( read_buffer , process . stdout , print_output = print_output , print_func = lambda line : '{0}{1}' . format ( host . print_prefix , line ) , ) stderr_reader = gevent . spawn ( read_buffer , process . stderr , print_output = print_output , print_func = lambda line : '{0}{1}' . format ( host . print_prefix , click . style ( line , 'red' ) , ) , ) # Wait on output, with our timeout (or None) greenlets = gevent . wait ( ( stdout_reader , stderr_reader ) , timeout = timeout ) # Timeout doesn't raise an exception, but gevent.wait returns the greenlets # which did complete. So if both haven't completed, we kill them and fail # with a timeout. if len ( greenlets ) != 2 : stdout_reader . kill ( ) stderr_reader . kill ( ) raise timeout_error ( ) # Read the buffers into a list of lines stdout = stdout_reader . get ( ) stderr = stderr_reader . get ( ) logger . debug ( '--> Waiting for exit status...' ) process . wait ( ) # Close any open file descriptor process . stdout . close ( ) logger . debug ( '--> Command exit status: {0}' . format ( process . returncode ) ) return process . returncode == 0 , stdout , stderr | Execute a command on the local machine . | 513 | 9 |
22,754 | def upstart ( state , host , name , running = True , restarted = False , reloaded = False , command = None , enabled = None , ) : yield _handle_service_control ( name , host . fact . upstart_status , 'initctl {1} {0}' , running , restarted , reloaded , command , ) # Upstart jobs are setup w/runlevels etc in their config files, so here we just check # there's no override file. if enabled is True : yield files . file ( state , host , '/etc/init/{0}.override' . format ( name ) , present = False , ) # Set the override file to "manual" to disable automatic start elif enabled is False : yield 'echo "manual" > /etc/init/{0}.override' . format ( name ) | Manage the state of upstart managed services . | 183 | 10 |
22,755 | def service ( state , host , * args , * * kwargs ) : if host . fact . which ( 'systemctl' ) : yield systemd ( state , host , * args , * * kwargs ) return if host . fact . which ( 'initctl' ) : yield upstart ( state , host , * args , * * kwargs ) return if host . fact . directory ( '/etc/init.d' ) : yield d ( state , host , * args , * * kwargs ) return if host . fact . directory ( '/etc/rc.d' ) : yield rc ( state , host , * args , * * kwargs ) return raise OperationError ( ( 'No init system found ' '(no systemctl, initctl, /etc/init.d or /etc/rc.d found)' ) ) | Manage the state of services . This command checks for the presence of all the init systems pyinfra can handle and executes the relevant operation . See init system sepcific operation for arguments . | 180 | 38 |
22,756 | def connect ( state , host , for_fact = None ) : kwargs = _make_paramiko_kwargs ( state , host ) logger . debug ( 'Connecting to: {0} ({1})' . format ( host . name , kwargs ) ) # Hostname can be provided via SSH config (alias), data, or the hosts name hostname = kwargs . pop ( 'hostname' , host . data . ssh_hostname or host . name , ) try : # Create new client & connect to the host client = SSHClient ( ) client . set_missing_host_key_policy ( MissingHostKeyPolicy ( ) ) client . connect ( hostname , * * kwargs ) # Enable SSH forwarding session = client . get_transport ( ) . open_session ( ) AgentRequestHandler ( session ) # Log log_message = '{0}{1}' . format ( host . print_prefix , click . style ( 'Connected' , 'green' ) , ) if for_fact : log_message = '{0}{1}' . format ( log_message , ' (for {0} fact)' . format ( for_fact ) , ) logger . info ( log_message ) return client except AuthenticationException : auth_kwargs = { } for key , value in kwargs . items ( ) : if key in ( 'username' , 'password' ) : auth_kwargs [ key ] = value continue if key == 'pkey' and value : auth_kwargs [ 'key' ] = host . data . ssh_key auth_args = ', ' . join ( '{0}={1}' . format ( key , value ) for key , value in auth_kwargs . items ( ) ) _log_connect_error ( host , 'Authentication error' , auth_args ) except SSHException as e : _log_connect_error ( host , 'SSH error' , e ) except gaierror : _log_connect_error ( host , 'Could not resolve hostname' , hostname ) except socket_error as e : _log_connect_error ( host , 'Could not connect' , e ) except EOFError as e : _log_connect_error ( host , 'EOF error' , e ) | Connect to a single host . Returns the SSH client if succesful . Stateless by design so can be run in parallel . | 494 | 26 |
22,757 | def run_shell_command ( state , host , command , get_pty = False , timeout = None , print_output = False , * * command_kwargs ) : command = make_command ( command , * * command_kwargs ) logger . debug ( 'Running command on {0}: (pty={1}) {2}' . format ( host . name , get_pty , command , ) ) if print_output : print ( '{0}>>> {1}' . format ( host . print_prefix , command ) ) # Run it! Get stdout, stderr & the underlying channel _ , stdout_buffer , stderr_buffer = host . connection . exec_command ( command , get_pty = get_pty , ) channel = stdout_buffer . channel # Iterate through outputs to get an exit status and generate desired list # output, done in two greenlets so stdout isn't printed before stderr. Not # attached to state.pool to avoid blocking it with 2x n-hosts greenlets. stdout_reader = gevent . spawn ( read_buffer , stdout_buffer , print_output = print_output , print_func = lambda line : '{0}{1}' . format ( host . print_prefix , line ) , ) stderr_reader = gevent . spawn ( read_buffer , stderr_buffer , print_output = print_output , print_func = lambda line : '{0}{1}' . format ( host . print_prefix , click . style ( line , 'red' ) , ) , ) # Wait on output, with our timeout (or None) greenlets = gevent . wait ( ( stdout_reader , stderr_reader ) , timeout = timeout ) # Timeout doesn't raise an exception, but gevent.wait returns the greenlets # which did complete. So if both haven't completed, we kill them and fail # with a timeout. if len ( greenlets ) != 2 : stdout_reader . kill ( ) stderr_reader . kill ( ) raise timeout_error ( ) # Read the buffers into a list of lines stdout = stdout_reader . get ( ) stderr = stderr_reader . get ( ) logger . debug ( 'Waiting for exit status...' ) exit_status = channel . recv_exit_status ( ) logger . debug ( 'Command exit status: {0}' . format ( exit_status ) ) return exit_status == 0 , stdout , stderr | Execute a command on the specified host . | 547 | 9 |
22,758 | def put_file ( state , host , filename_or_io , remote_filename , sudo = False , sudo_user = None , su_user = None , print_output = False , ) : # sudo/su are a little more complicated, as you can only sftp with the SSH # user connected, so upload to tmp and copy/chown w/sudo and/or su_user if sudo or su_user : # Get temp file location temp_file = state . get_temp_filename ( remote_filename ) _put_file ( host , filename_or_io , temp_file ) if print_output : print ( '{0}file uploaded: {1}' . format ( host . print_prefix , remote_filename ) ) # Execute run_shell_command w/sudo and/or su_user command = 'mv {0} {1}' . format ( temp_file , remote_filename ) # Move it to the su_user if present if su_user : command = '{0} && chown {1} {2}' . format ( command , su_user , remote_filename ) # Otherwise any sudo_user elif sudo_user : command = '{0} && chown {1} {2}' . format ( command , sudo_user , remote_filename ) status , _ , stderr = run_shell_command ( state , host , command , sudo = sudo , sudo_user = sudo_user , su_user = su_user , print_output = print_output , ) if status is False : logger . error ( 'File error: {0}' . format ( '\n' . join ( stderr ) ) ) return False # No sudo and no su_user, so just upload it! else : _put_file ( host , filename_or_io , remote_filename ) if print_output : print ( '{0}file uploaded: {1}' . format ( host . print_prefix , remote_filename ) ) return True | Upload file - ios to the specified host using SFTP . Supports uploading files with sudo by uploading to a temporary directory then moving & chowning . | 438 | 31 |
22,759 | def deploy ( self , name , kwargs , data , line_number , in_deploy = True ) : # Handle nested deploy names if self . deploy_name : name = _make_name ( self . deploy_name , name ) # Store the previous values old_in_deploy = self . in_deploy old_deploy_name = self . deploy_name old_deploy_kwargs = self . deploy_kwargs old_deploy_data = self . deploy_data old_deploy_line_numbers = self . deploy_line_numbers self . in_deploy = in_deploy # Limit the new hosts to a subset of the old hosts if they existed if ( old_deploy_kwargs and old_deploy_kwargs . get ( 'hosts' ) is not None ) : # If we have hosts - subset them based on the old hosts if 'hosts' in kwargs : kwargs [ 'hosts' ] = [ host for host in kwargs [ 'hosts' ] if host in old_deploy_kwargs [ 'hosts' ] ] # Otherwise simply carry the previous hosts else : kwargs [ 'hosts' ] = old_deploy_kwargs [ 'hosts' ] # Make new line numbers - note convert from and back to tuple to avoid # keeping deploy_line_numbers mutable. new_line_numbers = list ( self . deploy_line_numbers or [ ] ) new_line_numbers . append ( line_number ) new_line_numbers = tuple ( new_line_numbers ) # Set the new values self . deploy_name = name self . deploy_kwargs = kwargs self . deploy_data = data self . deploy_line_numbers = new_line_numbers logger . debug ( 'Starting deploy {0} (args={1}, data={2})' . format ( name , kwargs , data , ) ) yield # Restore the previous values self . in_deploy = old_in_deploy self . deploy_name = old_deploy_name self . deploy_kwargs = old_deploy_kwargs self . deploy_data = old_deploy_data self . deploy_line_numbers = old_deploy_line_numbers logger . debug ( 'Reset deploy to {0} (args={1}, data={2})' . format ( old_deploy_name , old_deploy_kwargs , old_deploy_data , ) ) | Wraps a group of operations as a deploy this should not be used directly instead use pyinfra . api . deploy . deploy . | 555 | 27 |
22,760 | def activate_host ( self , host ) : logger . debug ( 'Activating host: {0}' . format ( host ) ) # Add to *both* activated and active - active will reduce as hosts fail # but connected will not, enabling us to track failed %. self . activated_hosts . add ( host ) self . active_hosts . add ( host ) | Flag a host as active . | 80 | 6 |
22,761 | def fail_hosts ( self , hosts_to_fail , activated_count = None ) : if not hosts_to_fail : return activated_count = activated_count or len ( self . activated_hosts ) logger . debug ( 'Failing hosts: {0}' . format ( ', ' . join ( ( host . name for host in hosts_to_fail ) , ) ) ) # Remove the failed hosts from the inventory self . active_hosts -= hosts_to_fail # Check we're not above the fail percent active_hosts = self . active_hosts # No hosts left! if not active_hosts : raise PyinfraError ( 'No hosts remaining!' ) if self . config . FAIL_PERCENT is not None : percent_failed = ( 1 - len ( active_hosts ) / activated_count ) * 100 if percent_failed > self . config . FAIL_PERCENT : raise PyinfraError ( 'Over {0}% of hosts failed ({1}%)' . format ( self . config . FAIL_PERCENT , int ( round ( percent_failed ) ) , ) ) | Flag a set of hosts as failed error for config . FAIL_PERCENT . | 244 | 17 |
22,762 | def is_host_in_limit ( self , host ) : limit_hosts = self . limit_hosts if not isinstance ( limit_hosts , list ) : return True return host in limit_hosts | Returns a boolean indicating if the host is within the current state limit . | 47 | 14 |
22,763 | def get_temp_filename ( self , hash_key = None ) : if not hash_key : hash_key = six . text_type ( uuid4 ( ) ) temp_filename = '{0}/{1}' . format ( self . config . TEMP_DIR , sha1_hash ( hash_key ) , ) return temp_filename | Generate a temporary filename for this deploy . | 79 | 9 |
22,764 | def _run_server_ops ( state , host , progress = None ) : logger . debug ( 'Running all ops on {0}' . format ( host ) ) for op_hash in state . get_op_order ( ) : op_meta = state . op_meta [ op_hash ] logger . info ( '--> {0} {1} on {2}' . format ( click . style ( '--> Starting operation:' , 'blue' ) , click . style ( ', ' . join ( op_meta [ 'names' ] ) , bold = True ) , click . style ( host . name , bold = True ) , ) ) result = _run_server_op ( state , host , op_hash ) # Trigger CLI progress if provided if progress : progress ( ( host , op_hash ) ) if result is False : raise PyinfraError ( 'Error in operation {0} on {1}' . format ( ', ' . join ( op_meta [ 'names' ] ) , host , ) ) if pyinfra . is_cli : print ( ) | Run all ops for a single server . | 232 | 8 |
22,765 | def _run_serial_ops ( state ) : for host in list ( state . inventory ) : host_operations = product ( [ host ] , state . get_op_order ( ) ) with progress_spinner ( host_operations ) as progress : try : _run_server_ops ( state , host , progress = progress , ) except PyinfraError : state . fail_hosts ( { host } ) | Run all ops for all servers one server at a time . | 91 | 12 |
22,766 | def _run_no_wait_ops ( state ) : hosts_operations = product ( state . inventory , state . get_op_order ( ) ) with progress_spinner ( hosts_operations ) as progress : # Spawn greenlet for each host to run *all* ops greenlets = [ state . pool . spawn ( _run_server_ops , state , host , progress = progress , ) for host in state . inventory ] gevent . joinall ( greenlets ) | Run all ops for all servers at once . | 103 | 9 |
22,767 | def _run_single_op ( state , op_hash ) : op_meta = state . op_meta [ op_hash ] op_types = [ ] if op_meta [ 'serial' ] : op_types . append ( 'serial' ) if op_meta [ 'run_once' ] : op_types . append ( 'run once' ) logger . info ( '{0} {1} {2}' . format ( click . style ( '--> Starting{0}operation:' . format ( ' {0} ' . format ( ', ' . join ( op_types ) ) if op_types else ' ' , ) , 'blue' ) , click . style ( ', ' . join ( op_meta [ 'names' ] ) , bold = True ) , tuple ( op_meta [ 'args' ] ) if op_meta [ 'args' ] else '' , ) ) failed_hosts = set ( ) if op_meta [ 'serial' ] : with progress_spinner ( state . inventory ) as progress : # For each host, run the op for host in state . inventory : result = _run_server_op ( state , host , op_hash ) progress ( host ) if not result : failed_hosts . add ( host ) else : # Start with the whole inventory in one batch batches = [ state . inventory ] # If parallel set break up the inventory into a series of batches if op_meta [ 'parallel' ] : parallel = op_meta [ 'parallel' ] hosts = list ( state . inventory ) batches = [ hosts [ i : i + parallel ] for i in range ( 0 , len ( hosts ) , parallel ) ] for batch in batches : with progress_spinner ( batch ) as progress : # Spawn greenlet for each host greenlet_to_host = { state . pool . spawn ( _run_server_op , state , host , op_hash ) : host for host in batch } # Trigger CLI progress as hosts complete if provided for greenlet in gevent . iwait ( greenlet_to_host . keys ( ) ) : host = greenlet_to_host [ greenlet ] progress ( host ) # Get all the results for greenlet , host in six . iteritems ( greenlet_to_host ) : if not greenlet . get ( ) : failed_hosts . add ( host ) # Now all the batches/hosts are complete, fail any failures if not op_meta [ 'ignore_errors' ] : state . fail_hosts ( failed_hosts ) if pyinfra . is_cli : print ( ) | Run a single operation for all servers . Can be configured to run in serial . | 559 | 16 |
22,768 | def run_ops ( state , serial = False , no_wait = False ) : # Flag state as deploy in process state . deploying = True # Run all ops, but server by server if serial : _run_serial_ops ( state ) # Run all the ops on each server in parallel (not waiting at each operation) elif no_wait : _run_no_wait_ops ( state ) # Default: run all ops in order, waiting at each for all servers to complete for op_hash in state . get_op_order ( ) : _run_single_op ( state , op_hash ) | Runs all operations across all servers in a configurable manner . | 130 | 13 |
22,769 | def serve ( service_brokers : Union [ List [ ServiceBroker ] , ServiceBroker ] , credentials : Union [ List [ BrokerCredentials ] , BrokerCredentials , None ] , logger : logging . Logger = logging . root , port = 5000 , debug = False ) : from gevent . pywsgi import WSGIServer from flask import Flask app = Flask ( __name__ ) app . debug = debug blueprint = get_blueprint ( service_brokers , credentials , logger ) logger . debug ( "Register openbrokerapi blueprint" ) app . register_blueprint ( blueprint ) logger . info ( "Start Flask on 0.0.0.0:%s" % port ) http_server = WSGIServer ( ( '0.0.0.0' , port ) , app ) http_server . serve_forever ( ) | Starts flask with the given brokers . You can provide a list or just one ServiceBroker | 188 | 19 |
22,770 | def multi_ping ( dest_addrs , timeout , retry = 0 , ignore_lookup_errors = False ) : retry = int ( retry ) if retry < 0 : retry = 0 timeout = float ( timeout ) if timeout < 0.1 : raise MultiPingError ( "Timeout < 0.1 seconds not allowed" ) retry_timeout = float ( timeout ) / ( retry + 1 ) if retry_timeout < 0.1 : raise MultiPingError ( "Time between ping retries < 0.1 seconds" ) mp = MultiPing ( dest_addrs , ignore_lookup_errors = ignore_lookup_errors ) results = { } retry_count = 0 while retry_count <= retry : # Send a batch of pings mp . send ( ) single_results , no_results = mp . receive ( retry_timeout ) # Add the results from the last sending of pings to the overall results results . update ( single_results ) if not no_results : # No addresses left? We are done. break retry_count += 1 return results , no_results | Combine send and receive measurement into single function . | 241 | 10 |
22,771 | def _checksum ( self , msg ) : def carry_around_add ( a , b ) : c = a + b return ( c & 0xffff ) + ( c >> 16 ) s = 0 for i in range ( 0 , len ( msg ) , 2 ) : w = ( msg [ i ] << 8 ) + msg [ i + 1 ] s = carry_around_add ( s , w ) s = ~ s & 0xffff return s | Calculate the checksum of a packet . | 98 | 10 |
22,772 | def send ( self ) : # Collect all the addresses for which we have not seen responses yet. if not self . _receive_has_been_called : all_addrs = self . _dest_addrs else : all_addrs = [ a for ( i , a ) in list ( self . _id_to_addr . items ( ) ) if i in self . _remaining_ids ] if self . _last_used_id is None : # Will attempt to continue at the last request ID we used. But if # we never sent anything before then we create a first ID # 'randomly' from the current time. ID is only a 16 bit field, so # need to trim it down. self . _last_used_id = int ( time . time ( ) ) & 0xffff # Send ICMPecho to all addresses... for addr in all_addrs : # Make a unique ID, wrapping around at 65535. self . _last_used_id = ( self . _last_used_id + 1 ) & 0xffff # Remember the address for each ID so we can produce meaningful # result lists later on. self . _id_to_addr [ self . _last_used_id ] = addr # Send an ICMPecho request packet. We specify a payload consisting # of the current time stamp. This is returned to us in the # response and allows us to calculate the 'ping time'. self . _send_ping ( addr , payload = struct . pack ( "d" , time . time ( ) ) ) | Send pings to multiple addresses ensuring unique IDs for each request . | 332 | 13 |
22,773 | def _read_all_from_socket ( self , timeout ) : pkts = [ ] try : self . _sock . settimeout ( timeout ) while True : p = self . _sock . recv ( 64 ) # Store the packet and the current time pkts . append ( ( bytearray ( p ) , time . time ( ) ) ) # Continue the loop to receive any additional packets that # may have arrived at this point. Changing the socket to # non-blocking (by setting the timeout to 0), so that we'll # only continue the loop until all current packets have been # read. self . _sock . settimeout ( 0 ) except socket . timeout : # In the first blocking read with timout, we may not receive # anything. This is not an error, it just means no data was # available in the specified time. pass except socket . error as e : # When we read in non-blocking mode, we may get this error with # errno 11 to indicate that no more data is available. That's ok, # just like the timeout. if e . errno == errno . EWOULDBLOCK : pass else : # We're not expecting any other socket exceptions, so we # re-raise in that case. raise if self . _ipv6_address_present : try : self . _sock6 . settimeout ( timeout ) while True : p = self . _sock6 . recv ( 128 ) pkts . append ( ( bytearray ( p ) , time . time ( ) ) ) self . _sock6 . settimeout ( 0 ) except socket . timeout : pass except socket . error as e : if e . errno == errno . EWOULDBLOCK : pass else : raise return pkts | Read all packets we currently can on the socket . | 381 | 10 |
22,774 | async def get ( self ) : try : return self . _parse ( await self . read_registers ( 0 , 16 ) ) except TimeoutError : return { 'ip' : self . ip , 'connected' : False } | Get current state from the Midas gas detector . | 50 | 10 |
22,775 | def _parse ( self , registers ) : result = { 'ip' : self . ip , 'connected' : True } decoder = BinaryPayloadDecoder . fromRegisters ( registers , byteorder = Endian . Big , wordorder = Endian . Little ) # Register 40001 is a collection of alarm status signals b = [ decoder . decode_bits ( ) , decoder . decode_bits ( ) ] reg_40001 = b [ 1 ] + b [ 0 ] # Bits 0-3 map to the monitor state monitor_integer = sum ( 1 << i for i , b in enumerate ( reg_40001 [ : 4 ] ) if b ) result [ 'state' ] = options [ 'monitor state' ] [ monitor_integer ] # Bits 4-5 map to fault status fault_integer = sum ( 1 << i for i , b in enumerate ( reg_40001 [ 4 : 6 ] ) if b ) result [ 'fault' ] = { 'status' : options [ 'fault status' ] [ fault_integer ] } # Bits 6 and 7 tell if low and high alarms are active low , high = reg_40001 [ 6 : 8 ] result [ 'alarm' ] = options [ 'alarm level' ] [ low + high ] # Bits 8-10 tell if internal sensor relays 1-3 are energized. Skipping. # Bit 11 is a heartbeat bit that toggles every two seconds. Skipping. # Bit 12 tells if relays are under modbus control. Skipping. # Remaining bits are empty. Skipping. # Register 40002 has a gas ID and a sensor cartridge ID. Skipping. decoder . _pointer += 2 # Registers 40003-40004 are the gas concentration as a float result [ 'concentration' ] = decoder . decode_32bit_float ( ) # Register 40005 is the concentration as an int. Skipping. decoder . _pointer += 2 # Register 40006 is the number of the most important fault. fault_number = decoder . decode_16bit_uint ( ) if fault_number != 0 : code = ( 'm' if fault_number < 30 else 'F' ) + str ( fault_number ) result [ 'fault' ] [ 'code' ] = code result [ 'fault' ] . update ( faults [ code ] ) # Register 40007 holds the concentration unit in the second byte # Instead of being an int, it's the position of the up bit unit_bit = decoder . decode_bits ( ) . index ( True ) result [ 'units' ] = options [ 'concentration unit' ] [ unit_bit ] decoder . _pointer += 1 # Register 40008 holds the sensor temperature in Celsius result [ 'temperature' ] = decoder . decode_16bit_int ( ) # Register 40009 holds number of hours remaining in cell life result [ 'life' ] = decoder . decode_16bit_uint ( ) / 24.0 # Register 40010 holds the number of heartbeats (16 LSB). Skipping. decoder . _pointer += 2 # Register 40011 is the sample flow rate in cc / min result [ 'flow' ] = decoder . decode_16bit_uint ( ) # Register 40012 is blank. Skipping. decoder . _pointer += 2 # Registers 40013-40016 are the alarm concentration thresholds result [ 'low-alarm threshold' ] = round ( decoder . decode_32bit_float ( ) , 6 ) result [ 'high-alarm threshold' ] = round ( decoder . decode_32bit_float ( ) , 6 ) # Despite what the manual says, thresholds are always reported in ppm. # Let's fix that to match the concentration units. if result [ 'units' ] == 'ppb' : result [ 'concentration' ] *= 1000 result [ 'low-alarm threshold' ] *= 1000 result [ 'high-alarm threshold' ] *= 1000 return result | Parse the response returning a dictionary . | 859 | 8 |
22,776 | async def _connect ( self ) : self . waiting = True await self . client . start ( self . ip ) self . waiting = False if self . client . protocol is None : raise IOError ( "Could not connect to '{}'." . format ( self . ip ) ) self . open = True | Start asynchronous reconnect loop . | 65 | 5 |
22,777 | async def read_registers ( self , address , count ) : registers = [ ] while count > 124 : r = await self . _request ( 'read_holding_registers' , address , 124 ) registers += r . registers address , count = address + 124 , count - 124 r = await self . _request ( 'read_holding_registers' , address , count ) registers += r . registers return registers | Read modbus registers . | 89 | 5 |
22,778 | async def write_register ( self , address , value , skip_encode = False ) : await self . _request ( 'write_registers' , address , value , skip_encode = skip_encode ) | Write a modbus register . | 48 | 6 |
22,779 | async def write_registers ( self , address , values , skip_encode = False ) : while len ( values ) > 62 : await self . _request ( 'write_registers' , address , values , skip_encode = skip_encode ) address , values = address + 124 , values [ 62 : ] await self . _request ( 'write_registers' , address , values , skip_encode = skip_encode ) | Write modbus registers . | 97 | 5 |
22,780 | async def _request ( self , method , * args , * * kwargs ) : if not self . open : await self . _connect ( ) while self . waiting : await asyncio . sleep ( 0.1 ) if self . client . protocol is None or not self . client . protocol . connected : raise TimeoutError ( "Not connected to device." ) try : future = getattr ( self . client . protocol , method ) ( * args , * * kwargs ) except AttributeError : raise TimeoutError ( "Not connected to device." ) self . waiting = True try : return await asyncio . wait_for ( future , timeout = self . timeout ) except asyncio . TimeoutError as e : if self . open : # This came from reading through the pymodbus@python3 source # Problem was that the driver was not detecting disconnect if hasattr ( self , 'modbus' ) : self . client . protocol_lost_connection ( self . modbus ) self . open = False raise TimeoutError ( e ) except pymodbus . exceptions . ConnectionException as e : raise ConnectionError ( e ) finally : self . waiting = False | Send a request to the device and awaits a response . | 247 | 11 |
22,781 | def _close ( self ) : self . client . stop ( ) self . open = False self . waiting = False | Close the TCP connection . | 24 | 5 |
22,782 | def command_line ( ) : import argparse import asyncio import json parser = argparse . ArgumentParser ( description = "Read a Honeywell Midas gas " "detector state from the command line." ) parser . add_argument ( 'address' , help = "The IP address of the gas detector." ) args = parser . parse_args ( ) async def get ( ) : async with GasDetector ( args . address ) as detector : print ( json . dumps ( await detector . get ( ) , indent = 4 , sort_keys = True ) ) loop = asyncio . get_event_loop ( ) loop . run_until_complete ( get ( ) ) loop . close ( ) | Command - line tool for Midas gas detector communication . | 147 | 11 |
22,783 | def build_masked_loss ( loss_function , mask_value ) : def masked_loss_function ( y_true , y_pred ) : mask = K . cast ( K . not_equal ( y_true , mask_value ) , K . floatx ( ) ) return loss_function ( y_true * mask , y_pred * mask ) return masked_loss_function | Builds a loss function that masks based on targets | 84 | 10 |
22,784 | def Run ( self ) : if not self . executable : logging . error ( 'Could not locate "%s"' % self . long_name ) return 0 finfo = os . stat ( self . executable ) self . date = time . localtime ( finfo [ stat . ST_MTIME ] ) logging . info ( 'Running: %s %s </dev/null 2>&1' % ( self . executable , FLAGS . help_flag ) ) # --help output is often routed to stderr, so we combine with stdout. # Re-direct stdin to /dev/null to encourage programs that # don't understand --help to exit. ( child_stdin , child_stdout_and_stderr ) = os . popen4 ( [ self . executable , FLAGS . help_flag ] ) child_stdin . close ( ) # '</dev/null' self . output = child_stdout_and_stderr . readlines ( ) child_stdout_and_stderr . close ( ) if len ( self . output ) < _MIN_VALID_USAGE_MSG : logging . error ( 'Error: "%s %s" returned only %d lines: %s' % ( self . name , FLAGS . help_flag , len ( self . output ) , self . output ) ) return 0 return 1 | Run it and collect output . | 298 | 6 |
22,785 | def Parse ( self ) : ( start_line , lang ) = self . ParseDesc ( ) if start_line < 0 : return if 'python' == lang : self . ParsePythonFlags ( start_line ) elif 'c' == lang : self . ParseCFlags ( start_line ) elif 'java' == lang : self . ParseJavaFlags ( start_line ) | Parse program output . | 86 | 5 |
22,786 | def ParseDesc ( self , start_line = 0 ) : exec_mod_start = self . executable + ':' after_blank = 0 start_line = 0 # ignore the passed-in arg for now (?) for start_line in range ( start_line , len ( self . output ) ) : # collect top description line = self . output [ start_line ] . rstrip ( ) # Python flags start with 'flags:\n' if ( 'flags:' == line and len ( self . output ) > start_line + 1 and '' == self . output [ start_line + 1 ] . rstrip ( ) ) : start_line += 2 logging . debug ( 'Flags start (python): %s' % line ) return ( start_line , 'python' ) # SWIG flags just have the module name followed by colon. if exec_mod_start == line : logging . debug ( 'Flags start (swig): %s' % line ) return ( start_line , 'python' ) # C++ flags begin after a blank line and with a constant string if after_blank and line . startswith ( ' Flags from ' ) : logging . debug ( 'Flags start (c): %s' % line ) return ( start_line , 'c' ) # java flags begin with a constant string if line == 'where flags are' : logging . debug ( 'Flags start (java): %s' % line ) start_line += 2 # skip "Standard flags:" return ( start_line , 'java' ) logging . debug ( 'Desc: %s' % line ) self . desc . append ( line ) after_blank = ( line == '' ) else : logging . warn ( 'Never found the start of the flags section for "%s"!' % self . long_name ) return ( - 1 , '' ) | Parse the initial description . | 390 | 6 |
22,787 | def ParseCFlags ( self , start_line = 0 ) : modname = None # name of current module modlist = [ ] flag = None for line_num in range ( start_line , len ( self . output ) ) : # collect flags line = self . output [ line_num ] . rstrip ( ) if not line : # blank lines terminate flags if flag : # save last flag modlist . append ( flag ) flag = None continue mobj = self . module_c_re . match ( line ) if mobj : # start of a new module modname = mobj . group ( 1 ) logging . debug ( 'Module: %s' % line ) if flag : modlist . append ( flag ) self . module_list . append ( modname ) self . modules . setdefault ( modname , [ ] ) modlist = self . modules [ modname ] flag = None continue mobj = self . flag_c_re . match ( line ) if mobj : # start of a new flag if flag : # save last flag modlist . append ( flag ) logging . debug ( 'Flag: %s' % line ) flag = Flag ( mobj . group ( 1 ) , mobj . group ( 2 ) ) continue # append to flag help. type and default are part of the main text if flag : flag . help += ' ' + line . strip ( ) else : logging . info ( 'Extra: %s' % line ) if flag : modlist . append ( flag ) | Parse C style flags . | 320 | 6 |
22,788 | def Filter ( self ) : if not self . desc : self . short_desc = '' return for i in range ( len ( self . desc ) ) : # replace full path with name if self . desc [ i ] . find ( self . executable ) >= 0 : self . desc [ i ] = self . desc [ i ] . replace ( self . executable , self . name ) self . short_desc = self . desc [ 0 ] word_list = self . short_desc . split ( ' ' ) all_names = [ self . name , self . short_name , ] # Since the short_desc is always listed right after the name, # trim it from the short_desc while word_list and ( word_list [ 0 ] in all_names or word_list [ 0 ] . lower ( ) in all_names ) : del word_list [ 0 ] self . short_desc = '' # signal need to reconstruct if not self . short_desc and word_list : self . short_desc = ' ' . join ( word_list ) | Filter parsed data to create derived fields . | 224 | 8 |
22,789 | def Output ( self ) : self . Open ( ) self . Header ( ) self . Body ( ) self . Footer ( ) | Output all sections of the page . | 27 | 7 |
22,790 | def GetFlagSuggestions ( attempt , longopt_list ) : # Don't suggest on very short strings, or if no longopts are specified. if len ( attempt ) <= 2 or not longopt_list : return [ ] option_names = [ v . split ( '=' ) [ 0 ] for v in longopt_list ] # Find close approximations in flag prefixes. # This also handles the case where the flag is spelled right but ambiguous. distances = [ ( _DamerauLevenshtein ( attempt , option [ 0 : len ( attempt ) ] ) , option ) for option in option_names ] distances . sort ( key = lambda t : t [ 0 ] ) least_errors , _ = distances [ 0 ] # Don't suggest excessively bad matches. if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len ( attempt ) : return [ ] suggestions = [ ] for errors , name in distances : if errors == least_errors : suggestions . append ( name ) else : break return suggestions | Get helpful similar matches for an invalid flag . | 224 | 9 |
22,791 | def _DamerauLevenshtein ( a , b ) : memo = { } def Distance ( x , y ) : """Recursively defined string distance with memoization.""" if ( x , y ) in memo : return memo [ x , y ] if not x : d = len ( y ) elif not y : d = len ( x ) else : d = min ( Distance ( x [ 1 : ] , y ) + 1 , # correct an insertion error Distance ( x , y [ 1 : ] ) + 1 , # correct a deletion error Distance ( x [ 1 : ] , y [ 1 : ] ) + ( x [ 0 ] != y [ 0 ] ) ) # correct a wrong character if len ( x ) >= 2 and len ( y ) >= 2 and x [ 0 ] == y [ 1 ] and x [ 1 ] == y [ 0 ] : # Correct a transposition. t = Distance ( x [ 2 : ] , y [ 2 : ] ) + 1 if d > t : d = t memo [ x , y ] = d return d return Distance ( a , b ) | Damerau - Levenshtein edit distance from a to b . | 234 | 15 |
22,792 | def FlagDictToArgs ( flag_map ) : for key , value in six . iteritems ( flag_map ) : if value is None : yield '--%s' % key elif isinstance ( value , bool ) : if value : yield '--%s' % key else : yield '--no%s' % key elif isinstance ( value , ( bytes , type ( u'' ) ) ) : # We don't want strings to be handled like python collections. yield '--%s=%s' % ( key , value ) else : # Now we attempt to deal with collections. try : yield '--%s=%s' % ( key , ',' . join ( str ( item ) for item in value ) ) except TypeError : # Default case. yield '--%s=%s' % ( key , value ) | Convert a dict of values into process call parameters . | 182 | 11 |
22,793 | def define_both_methods ( class_name , class_dict , old_name , new_name ) : # pylint: disable=invalid-name assert old_name not in class_dict or new_name not in class_dict , ( 'Class "{}" cannot define both "{}" and "{}" methods.' . format ( class_name , old_name , new_name ) ) if old_name in class_dict : class_dict [ new_name ] = class_dict [ old_name ] elif new_name in class_dict : class_dict [ old_name ] = class_dict [ new_name ] | Function to help CamelCase to PEP8 style class methods migration . | 139 | 14 |
22,794 | def _IsUnparsedFlagAccessAllowed ( self , name ) : if _UNPARSED_FLAG_ACCESS_ENV_NAME in os . environ : # We've been told explicitly what to do. allow_unparsed_flag_access = ( os . getenv ( _UNPARSED_FLAG_ACCESS_ENV_NAME ) == '1' ) elif self . __dict__ [ '__reset_called' ] : # Raise exception if .Reset() was called. This mostly happens in tests. allow_unparsed_flag_access = False elif _helpers . IsRunningTest ( ) : # Staged "rollout", based on name of the flag so that we don't break # everyone. Hashing the flag is a way of choosing a random but # consistent subset of flags to lock down which we can make larger # over time. name_bytes = name . encode ( 'utf8' ) if not isinstance ( name , bytes ) else name flag_percentile = ( struct . unpack ( '<I' , hashlib . md5 ( name_bytes ) . digest ( ) [ : 4 ] ) [ 0 ] % 100 ) allow_unparsed_flag_access = ( _UNPARSED_ACCESS_DISABLED_PERCENT <= flag_percentile ) else : allow_unparsed_flag_access = True return allow_unparsed_flag_access | Determine whether to allow unparsed flag access or not . | 318 | 14 |
22,795 | def _AssertValidators ( self , validators ) : for validator in sorted ( validators , key = lambda validator : validator . insertion_index ) : try : validator . verify ( self ) except exceptions . ValidationError as e : message = validator . print_flags_with_values ( self ) raise exceptions . IllegalFlagValueError ( '%s: %s' % ( message , str ( e ) ) ) | Assert if all validators in the list are satisfied . | 94 | 12 |
22,796 | def _RemoveAllFlagAppearances ( self , name ) : flag_dict = self . FlagDict ( ) if name not in flag_dict : raise exceptions . UnrecognizedFlagError ( name ) flag = flag_dict [ name ] names_to_remove = { name } names_to_remove . add ( flag . name ) if flag . short_name : names_to_remove . add ( flag . short_name ) for n in names_to_remove : self . __delattr__ ( n ) | Removes flag with name for all appearances . | 110 | 9 |
22,797 | def GetHelp ( self , prefix = '' , include_special_flags = True ) : # TODO(vrusinov): this function needs a test. helplist = [ ] flags_by_module = self . FlagsByModuleDict ( ) if flags_by_module : modules = sorted ( flags_by_module ) # Print the help for the main module first, if possible. main_module = sys . argv [ 0 ] if main_module in modules : modules . remove ( main_module ) modules = [ main_module ] + modules for module in modules : self . __RenderOurModuleFlags ( module , helplist ) if include_special_flags : self . __RenderModuleFlags ( 'gflags' , _helpers . SPECIAL_FLAGS . FlagDict ( ) . values ( ) , helplist ) else : # Just print one long list of flags. values = self . FlagDict ( ) . values ( ) if include_special_flags : values . append ( _helpers . SPECIAL_FLAGS . FlagDict ( ) . values ( ) ) self . __RenderFlagList ( values , helplist , prefix ) return '\n' . join ( helplist ) | Generates a help string for all known flags . | 262 | 10 |
22,798 | def __RenderOurModuleKeyFlags ( self , module , output_lines , prefix = '' ) : key_flags = self . _GetKeyFlagsForModule ( module ) if key_flags : self . __RenderModuleFlags ( module , key_flags , output_lines , prefix ) | Generates a help string for the key flags of a given module . | 60 | 14 |
22,799 | def ModuleHelp ( self , module ) : helplist = [ ] self . __RenderOurModuleKeyFlags ( module , helplist ) return '\n' . join ( helplist ) | Describe the key flags of a module . | 42 | 9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.