idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
244,600 | def execution_environment ( ) : context = { } context [ 'conf' ] = config ( ) if relation_id ( ) : context [ 'reltype' ] = relation_type ( ) context [ 'relid' ] = relation_id ( ) context [ 'rel' ] = relation_get ( ) context [ 'unit' ] = local_unit ( ) context [ 'rels' ] = relations ( ) context [ 'env' ] = os . environ return context | A convenient bundling of the current execution context | 102 | 9 |
244,601 | def relation_id ( relation_name = None , service_or_unit = None ) : if not relation_name and not service_or_unit : return os . environ . get ( 'JUJU_RELATION_ID' , None ) elif relation_name and service_or_unit : service_name = service_or_unit . split ( '/' ) [ 0 ] for relid in relation_ids ( relation_name ) : remote_service = remote_service_name ( relid ) if remote_service == service_name : return relid else : raise ValueError ( 'Must specify neither or both of relation_name and service_or_unit' ) | The relation ID for the current or a specified relation | 147 | 10 |
244,602 | def principal_unit ( ) : # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT principal_unit = os . environ . get ( 'JUJU_PRINCIPAL_UNIT' , None ) # If it's empty, then this unit is the principal if principal_unit == '' : return os . environ [ 'JUJU_UNIT_NAME' ] elif principal_unit is not None : return principal_unit # For Juju 2.1 and below, let's try work out the principle unit by # the various charms' metadata.yaml. for reltype in relation_types ( ) : for rid in relation_ids ( reltype ) : for unit in related_units ( rid ) : md = _metadata_unit ( unit ) if not md : continue subordinate = md . pop ( 'subordinate' , None ) if not subordinate : return unit return None | Returns the principal unit of this unit otherwise None | 201 | 9 |
244,603 | def relation_get ( attribute = None , unit = None , rid = None ) : _args = [ 'relation-get' , '--format=json' ] if rid : _args . append ( '-r' ) _args . append ( rid ) _args . append ( attribute or '-' ) if unit : _args . append ( unit ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None except CalledProcessError as e : if e . returncode == 2 : return None raise | Get relation information | 126 | 3 |
244,604 | def relation_set ( relation_id = None , relation_settings = None , * * kwargs ) : relation_settings = relation_settings if relation_settings else { } relation_cmd_line = [ 'relation-set' ] accepts_file = "--file" in subprocess . check_output ( relation_cmd_line + [ "--help" ] , universal_newlines = True ) if relation_id is not None : relation_cmd_line . extend ( ( '-r' , relation_id ) ) settings = relation_settings . copy ( ) settings . update ( kwargs ) for key , value in settings . items ( ) : # Force value to be a string: it always should, but some call # sites pass in things like dicts or numbers. if value is not None : settings [ key ] = "{}" . format ( value ) if accepts_file : # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from # stdin, but that feature is broken in 1.23.2: Bug #1454678. with tempfile . NamedTemporaryFile ( delete = False ) as settings_file : settings_file . write ( yaml . safe_dump ( settings ) . encode ( "utf-8" ) ) subprocess . check_call ( relation_cmd_line + [ "--file" , settings_file . name ] ) os . remove ( settings_file . name ) else : for key , value in settings . items ( ) : if value is None : relation_cmd_line . append ( '{}=' . format ( key ) ) else : relation_cmd_line . append ( '{}={}' . format ( key , value ) ) subprocess . check_call ( relation_cmd_line ) # Flush cache of any relation-gets for local unit flush ( local_unit ( ) ) | Set relation information for the current unit | 433 | 7 |
244,605 | def relation_clear ( r_id = None ) : settings = relation_get ( rid = r_id , unit = local_unit ( ) ) for setting in settings : if setting not in [ 'public-address' , 'private-address' ] : settings [ setting ] = None relation_set ( relation_id = r_id , * * settings ) | Clears any relation data already set on relation r_id | 77 | 12 |
244,606 | def relation_ids ( reltype = None ) : reltype = reltype or relation_type ( ) relid_cmd_line = [ 'relation-ids' , '--format=json' ] if reltype is not None : relid_cmd_line . append ( reltype ) return json . loads ( subprocess . check_output ( relid_cmd_line ) . decode ( 'UTF-8' ) ) or [ ] return [ ] | A list of relation_ids | 97 | 6 |
244,607 | def related_units ( relid = None ) : relid = relid or relation_id ( ) units_cmd_line = [ 'relation-list' , '--format=json' ] if relid is not None : units_cmd_line . extend ( ( '-r' , relid ) ) return json . loads ( subprocess . check_output ( units_cmd_line ) . decode ( 'UTF-8' ) ) or [ ] | A list of related units | 98 | 5 |
244,608 | def expected_peer_units ( ) : if not has_juju_version ( "2.4.0" ) : # goal-state first appeared in 2.4.0. raise NotImplementedError ( "goal-state" ) _goal_state = goal_state ( ) return ( key for key in _goal_state [ 'units' ] if '/' in key and key != local_unit ( ) ) | Get a generator for units we expect to join peer relation based on goal - state . | 92 | 17 |
244,609 | def expected_related_units ( reltype = None ) : if not has_juju_version ( "2.4.4" ) : # goal-state existed in 2.4.0, but did not list individual units to # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) raise NotImplementedError ( "goal-state relation unit count" ) reltype = reltype or relation_type ( ) _goal_state = goal_state ( ) return ( key for key in _goal_state [ 'relations' ] [ reltype ] if '/' in key ) | Get a generator for units we expect to join relation based on goal - state . | 137 | 16 |
244,610 | def relation_for_unit ( unit = None , rid = None ) : unit = unit or remote_unit ( ) relation = relation_get ( unit = unit , rid = rid ) for key in relation : if key . endswith ( '-list' ) : relation [ key ] = relation [ key ] . split ( ) relation [ '__unit__' ] = unit return relation | Get the json represenation of a unit s relation | 82 | 12 |
244,611 | def relations_for_id ( relid = None ) : relation_data = [ ] relid = relid or relation_ids ( ) for unit in related_units ( relid ) : unit_data = relation_for_unit ( unit , relid ) unit_data [ '__relid__' ] = relid relation_data . append ( unit_data ) return relation_data | Get relations of a specific relation ID | 84 | 7 |
244,612 | def relations_of_type ( reltype = None ) : relation_data = [ ] reltype = reltype or relation_type ( ) for relid in relation_ids ( reltype ) : for relation in relations_for_id ( relid ) : relation [ '__relid__' ] = relid relation_data . append ( relation ) return relation_data | Get relations of a specific type | 79 | 6 |
244,613 | def metadata ( ) : with open ( os . path . join ( charm_dir ( ) , 'metadata.yaml' ) ) as md : return yaml . safe_load ( md ) | Get the current charm metadata . yaml contents as a python object | 41 | 13 |
244,614 | def relation_types ( ) : rel_types = [ ] md = metadata ( ) for key in ( 'provides' , 'requires' , 'peers' ) : section = md . get ( key ) if section : rel_types . extend ( section . keys ( ) ) return rel_types | Get a list of relation types supported by this charm | 64 | 10 |
244,615 | def peer_relation_id ( ) : md = metadata ( ) section = md . get ( 'peers' ) if section : for key in section : relids = relation_ids ( key ) if relids : return relids [ 0 ] return None | Get the peers relation id if a peers relation has been joined else None . | 54 | 15 |
244,616 | def interface_to_relations ( interface_name ) : results = [ ] for role in ( 'provides' , 'requires' , 'peers' ) : results . extend ( role_and_interface_to_relations ( role , interface_name ) ) return results | Given an interface return a list of relation names for the current charm that use that interface . | 58 | 18 |
244,617 | def relations ( ) : rels = { } for reltype in relation_types ( ) : relids = { } for relid in relation_ids ( reltype ) : units = { local_unit ( ) : relation_get ( unit = local_unit ( ) , rid = relid ) } for unit in related_units ( relid ) : reldata = relation_get ( unit = unit , rid = relid ) units [ unit ] = reldata relids [ relid ] = units rels [ reltype ] = relids return rels | Get a nested dictionary of relation data for all related units | 120 | 11 |
244,618 | def _port_op ( op_name , port , protocol = "TCP" ) : _args = [ op_name ] icmp = protocol . upper ( ) == "ICMP" if icmp : _args . append ( protocol ) else : _args . append ( '{}/{}' . format ( port , protocol ) ) try : subprocess . check_call ( _args ) except subprocess . CalledProcessError : # Older Juju pre 2.3 doesn't support ICMP # so treat it as a no-op if it fails. if not icmp : raise | Open or close a service network port | 126 | 7 |
244,619 | def open_ports ( start , end , protocol = "TCP" ) : _args = [ 'open-port' ] _args . append ( '{}-{}/{}' . format ( start , end , protocol ) ) subprocess . check_call ( _args ) | Opens a range of service network ports | 62 | 8 |
244,620 | def unit_get ( attribute ) : _args = [ 'unit-get' , '--format=json' , attribute ] try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None | Get the unit ID for the remote unit | 60 | 8 |
244,621 | def storage_get ( attribute = None , storage_id = None ) : _args = [ 'storage-get' , '--format=json' ] if storage_id : _args . extend ( ( '-s' , storage_id ) ) if attribute : _args . append ( attribute ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None | Get storage attributes | 97 | 3 |
244,622 | def storage_list ( storage_name = None ) : _args = [ 'storage-list' , '--format=json' ] if storage_name : _args . append ( storage_name ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None except OSError as e : import errno if e . errno == errno . ENOENT : # storage-list does not exist return [ ] raise | List the storage IDs for the unit | 111 | 7 |
244,623 | def charm_dir ( ) : d = os . environ . get ( 'JUJU_CHARM_DIR' ) if d is not None : return d return os . environ . get ( 'CHARM_DIR' ) | Return the root directory of the current charm | 51 | 8 |
244,624 | def action_set ( values ) : cmd = [ 'action-set' ] for k , v in list ( values . items ( ) ) : cmd . append ( '{}={}' . format ( k , v ) ) subprocess . check_call ( cmd ) | Sets the values to be returned after the action finishes | 58 | 11 |
244,625 | def status_set ( workload_state , message ) : valid_states = [ 'maintenance' , 'blocked' , 'waiting' , 'active' ] if workload_state not in valid_states : raise ValueError ( '{!r} is not a valid workload state' . format ( workload_state ) ) cmd = [ 'status-set' , workload_state , message ] try : ret = subprocess . call ( cmd ) if ret == 0 : return except OSError as e : if e . errno != errno . ENOENT : raise log_message = 'status-set failed: {} {}' . format ( workload_state , message ) log ( log_message , level = 'INFO' ) | Set the workload state with a message | 158 | 7 |
244,626 | def status_get ( ) : cmd = [ 'status-get' , "--format=json" , "--include-data" ] try : raw_status = subprocess . check_output ( cmd ) except OSError as e : if e . errno == errno . ENOENT : return ( 'unknown' , "" ) else : raise else : status = json . loads ( raw_status . decode ( "UTF-8" ) ) return ( status [ "status" ] , status [ "message" ] ) | Retrieve the previously set juju workload state and message | 114 | 11 |
244,627 | def application_version_set ( version ) : cmd = [ 'application-version-set' ] cmd . append ( version ) try : subprocess . check_call ( cmd ) except OSError : log ( "Application Version: {}" . format ( version ) ) | Charm authors may trigger this command from any hook to output what version of the application is running . This could be a package version for instance postgres version 9 . 5 . It could also be a build number or version control revision identifier for instance git sha 6fb7ba68 . | 58 | 58 |
244,628 | def payload_register ( ptype , klass , pid ) : cmd = [ 'payload-register' ] for x in [ ptype , klass , pid ] : cmd . append ( x ) subprocess . check_call ( cmd ) | is used while a hook is running to let Juju know that a payload has been started . | 52 | 19 |
244,629 | def resource_get ( name ) : if not name : return False cmd = [ 'resource-get' , name ] try : return subprocess . check_output ( cmd ) . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return False | used to fetch the resource path of the given name . | 56 | 11 |
244,630 | def atstart ( callback , * args , * * kwargs ) : global _atstart _atstart . append ( ( callback , args , kwargs ) ) | Schedule a callback to run before the main hook . | 36 | 11 |
244,631 | def _run_atstart ( ) : global _atstart for callback , args , kwargs in _atstart : callback ( * args , * * kwargs ) del _atstart [ : ] | Hook frameworks must invoke this before running the main hook body . | 44 | 13 |
244,632 | def _run_atexit ( ) : global _atexit for callback , args , kwargs in reversed ( _atexit ) : callback ( * args , * * kwargs ) del _atexit [ : ] | Hook frameworks must invoke this after the main hook body has successfully completed . Do not invoke it if the hook fails . | 47 | 24 |
244,633 | def network_get ( endpoint , relation_id = None ) : if not has_juju_version ( '2.2' ) : raise NotImplementedError ( juju_version ( ) ) # earlier versions require --primary-address if relation_id and not has_juju_version ( '2.3' ) : raise NotImplementedError # 2.3 added the -r option cmd = [ 'network-get' , endpoint , '--format' , 'yaml' ] if relation_id : cmd . append ( '-r' ) cmd . append ( relation_id ) response = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) . strip ( ) return yaml . safe_load ( response ) | Retrieve the network details for a relation endpoint | 174 | 9 |
244,634 | def add_metric ( * args , * * kwargs ) : _args = [ 'add-metric' ] _kvpairs = [ ] _kvpairs . extend ( args ) _kvpairs . extend ( [ '{}={}' . format ( k , v ) for k , v in kwargs . items ( ) ] ) _args . extend ( sorted ( _kvpairs ) ) try : subprocess . check_call ( _args ) return except EnvironmentError as e : if e . errno != errno . ENOENT : raise log_message = 'add-metric failed: {}' . format ( ' ' . join ( _kvpairs ) ) log ( log_message , level = 'INFO' ) | Add metric values . Values may be expressed with keyword arguments . For metric names containing dashes these may be expressed as one or more key = value positional arguments . May only be called from the collect - metrics hook . | 163 | 43 |
244,635 | def iter_units_for_relation_name ( relation_name ) : RelatedUnit = namedtuple ( 'RelatedUnit' , 'rid, unit' ) for rid in relation_ids ( relation_name ) : for unit in related_units ( rid ) : yield RelatedUnit ( rid , unit ) | Iterate through all units in a relation | 64 | 8 |
244,636 | def ingress_address ( rid = None , unit = None ) : settings = relation_get ( rid = rid , unit = unit ) return ( settings . get ( 'ingress-address' ) or settings . get ( 'private-address' ) ) | Retrieve the ingress - address from a relation when available . Otherwise return the private - address . | 54 | 20 |
244,637 | def egress_subnets ( rid = None , unit = None ) : def _to_range ( addr ) : if re . search ( r'^(?:\d{1,3}\.){3}\d{1,3}$' , addr ) is not None : addr += '/32' elif ':' in addr and '/' not in addr : # IPv6 addr += '/128' return addr settings = relation_get ( rid = rid , unit = unit ) if 'egress-subnets' in settings : return [ n . strip ( ) for n in settings [ 'egress-subnets' ] . split ( ',' ) if n . strip ( ) ] if 'ingress-address' in settings : return [ _to_range ( settings [ 'ingress-address' ] ) ] if 'private-address' in settings : return [ _to_range ( settings [ 'private-address' ] ) ] return [ ] | Retrieve the egress - subnets from a relation . | 204 | 12 |
244,638 | def unit_doomed ( unit = None ) : if not has_juju_version ( "2.4.1" ) : # We cannot risk blindly returning False for 'we don't know', # because that could cause data loss; if call sites don't # need an accurate answer, they likely don't need this helper # at all. # goal-state existed in 2.4.0, but did not handle removals # correctly until 2.4.1. raise NotImplementedError ( "is_doomed" ) if unit is None : unit = local_unit ( ) gs = goal_state ( ) units = gs . get ( 'units' , { } ) if unit not in units : return True # I don't think 'dead' units ever show up in the goal-state, but # check anyway in addition to 'dying'. return units [ unit ] [ 'status' ] in ( 'dying' , 'dead' ) | Determines if the unit is being removed from the model | 209 | 12 |
244,639 | def env_proxy_settings ( selected_settings = None ) : SUPPORTED_SETTINGS = { 'http' : 'HTTP_PROXY' , 'https' : 'HTTPS_PROXY' , 'no_proxy' : 'NO_PROXY' , 'ftp' : 'FTP_PROXY' } if selected_settings is None : selected_settings = SUPPORTED_SETTINGS selected_vars = [ v for k , v in SUPPORTED_SETTINGS . items ( ) if k in selected_settings ] proxy_settings = { } for var in selected_vars : var_val = os . getenv ( var ) if var_val : proxy_settings [ var ] = var_val proxy_settings [ var . lower ( ) ] = var_val # Now handle juju-prefixed environment variables. The legacy vs new # environment variable usage is mutually exclusive charm_var_val = os . getenv ( 'JUJU_CHARM_{}' . format ( var ) ) if charm_var_val : proxy_settings [ var ] = charm_var_val proxy_settings [ var . lower ( ) ] = charm_var_val if 'no_proxy' in proxy_settings : if _contains_range ( proxy_settings [ 'no_proxy' ] ) : log ( RANGE_WARNING , level = WARNING ) return proxy_settings if proxy_settings else None | Get proxy settings from process environment variables . | 307 | 8 |
244,640 | def load_previous ( self , path = None ) : self . path = path or self . path with open ( self . path ) as f : try : self . _prev_dict = json . load ( f ) except ValueError as e : log ( 'Unable to parse previous config data - {}' . format ( str ( e ) ) , level = ERROR ) for k , v in copy . deepcopy ( self . _prev_dict ) . items ( ) : if k not in self : self [ k ] = v | Load previous copy of config from disk . | 113 | 8 |
244,641 | def changed ( self , key ) : if self . _prev_dict is None : return True return self . previous ( key ) != self . get ( key ) | Return True if the current value for this key is different from the previous value . | 34 | 16 |
244,642 | def save ( self ) : with open ( self . path , 'w' ) as f : os . fchmod ( f . fileno ( ) , 0o600 ) json . dump ( self , f ) | Save this config to disk . | 45 | 6 |
244,643 | def hook ( self , * hook_names ) : def wrapper ( decorated ) : for hook_name in hook_names : self . register ( hook_name , decorated ) else : self . register ( decorated . __name__ , decorated ) if '_' in decorated . __name__ : self . register ( decorated . __name__ . replace ( '_' , '-' ) , decorated ) return decorated return wrapper | Decorator registering them as hooks | 87 | 7 |
244,644 | def shutdown ( self ) : sys . stdout = self . old_stdout sys . stdin = self . old_stdin self . skt . close ( ) self . set_continue ( ) | Revert stdin and stdout close the socket . | 43 | 12 |
244,645 | def start ( ) : action_set ( 'meta.start' , time . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) ) COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' if os . path . exists ( COLLECT_PROFILE_DATA ) : subprocess . check_output ( [ COLLECT_PROFILE_DATA ] ) | If the collectd charm is also installed tell it to send a snapshot of the current profile data . | 96 | 20 |
244,646 | def get_os_codename_install_source ( src ) : ubuntu_rel = lsb_release ( ) [ 'DISTRIB_CODENAME' ] rel = '' if src is None : return rel if src in [ 'distro' , 'distro-proposed' , 'proposed' ] : try : rel = UBUNTU_OPENSTACK_RELEASE [ ubuntu_rel ] except KeyError : e = 'Could not derive openstack release for ' 'this Ubuntu release: %s' % ubuntu_rel error_out ( e ) return rel if src . startswith ( 'cloud:' ) : ca_rel = src . split ( ':' ) [ 1 ] ca_rel = ca_rel . split ( '-' ) [ 1 ] . split ( '/' ) [ 0 ] return ca_rel # Best guess match based on deb string provided if ( src . startswith ( 'deb' ) or src . startswith ( 'ppa' ) or src . startswith ( 'snap' ) ) : for v in OPENSTACK_CODENAMES . values ( ) : if v in src : return v | Derive OpenStack release codename from a given installation source . | 253 | 13 |
244,647 | def get_os_version_codename ( codename , version_map = OPENSTACK_CODENAMES ) : for k , v in six . iteritems ( version_map ) : if v == codename : return k e = 'Could not derive OpenStack version for ' 'codename: %s' % codename error_out ( e ) | Determine OpenStack version number from codename . | 77 | 11 |
244,648 | def get_os_version_codename_swift ( codename ) : for k , v in six . iteritems ( SWIFT_CODENAMES ) : if k == codename : return v [ - 1 ] e = 'Could not derive swift version for ' 'codename: %s' % codename error_out ( e ) | Determine OpenStack version number of swift from codename . | 74 | 13 |
244,649 | def get_swift_codename ( version ) : codenames = [ k for k , v in six . iteritems ( SWIFT_CODENAMES ) if version in v ] if len ( codenames ) > 1 : # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. for codename in reversed ( codenames ) : releases = UBUNTU_OPENSTACK_RELEASE release = [ k for k , v in six . iteritems ( releases ) if codename in v ] ret = subprocess . check_output ( [ 'apt-cache' , 'policy' , 'swift' ] ) if six . PY3 : ret = ret . decode ( 'UTF-8' ) if codename in ret or release [ 0 ] in ret : return codename elif len ( codenames ) == 1 : return codenames [ 0 ] # NOTE: fallback - attempt to match with just major.minor version match = re . match ( r'^(\d+)\.(\d+)' , version ) if match : major_minor_version = match . group ( 0 ) for codename , versions in six . iteritems ( SWIFT_CODENAMES ) : for release_version in versions : if release_version . startswith ( major_minor_version ) : return codename return None | Determine OpenStack codename that corresponds to swift version . | 306 | 13 |
244,650 | def get_os_codename_package ( package , fatal = True ) : if snap_install_requested ( ) : cmd = [ 'snap' , 'list' , package ] try : out = subprocess . check_output ( cmd ) if six . PY3 : out = out . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return None lines = out . split ( '\n' ) for line in lines : if package in line : # Second item in list is Version return line . split ( ) [ 1 ] import apt_pkg as apt cache = apt_cache ( ) try : pkg = cache [ package ] except Exception : if not fatal : return None # the package is unknown to the current apt cache. e = 'Could not determine version of package with no installation ' 'candidate: %s' % package error_out ( e ) if not pkg . current_ver : if not fatal : return None # package is known, but no version is currently installed. e = 'Could not determine version of uninstalled package: %s' % package error_out ( e ) vers = apt . upstream_version ( pkg . current_ver . ver_str ) if 'swift' in pkg . name : # Fully x.y.z match for swift versions match = re . match ( r'^(\d+)\.(\d+)\.(\d+)' , vers ) else : # x.y match only for 20XX.X # and ignore patch level for other packages match = re . match ( r'^(\d+)\.(\d+)' , vers ) if match : vers = match . group ( 0 ) # Generate a major version number for newer semantic # versions of openstack projects major_vers = vers . split ( '.' ) [ 0 ] # >= Liberty independent project versions if ( package in PACKAGE_CODENAMES and major_vers in PACKAGE_CODENAMES [ package ] ) : return PACKAGE_CODENAMES [ package ] [ major_vers ] else : # < Liberty co-ordinated project versions try : if 'swift' in pkg . name : return get_swift_codename ( vers ) else : return OPENSTACK_CODENAMES [ vers ] except KeyError : if not fatal : return None e = 'Could not determine OpenStack codename for version %s' % vers error_out ( e ) | Derive OpenStack release codename from an installed package . | 521 | 12 |
244,651 | def get_os_version_package ( pkg , fatal = True ) : codename = get_os_codename_package ( pkg , fatal = fatal ) if not codename : return None if 'swift' in pkg : vers_map = SWIFT_CODENAMES for cname , version in six . iteritems ( vers_map ) : if cname == codename : return version [ - 1 ] else : vers_map = OPENSTACK_CODENAMES for version , cname in six . iteritems ( vers_map ) : if cname == codename : return version | Derive OpenStack version number from an installed package . | 131 | 11 |
244,652 | def os_release ( package , base = 'essex' , reset_cache = False ) : global _os_rel if reset_cache : reset_os_release ( ) if _os_rel : return _os_rel _os_rel = ( get_os_codename_package ( package , fatal = False ) or get_os_codename_install_source ( config ( 'openstack-origin' ) ) or base ) return _os_rel | Returns OpenStack release codename from a cached global . | 100 | 11 |
244,653 | def import_key ( keyid ) : try : return fetch_import_key ( keyid ) except GPGKeyError as e : error_out ( "Could not import key: {}" . format ( str ( e ) ) ) | Import a key either ASCII armored or a GPG key id . | 50 | 13 |
244,654 | def get_source_and_pgp_key ( source_and_key ) : try : source , key = source_and_key . split ( '|' , 2 ) return source , key or None except ValueError : return source_and_key , None | Look for a pgp key ID or ascii - armor key in the given input . | 57 | 19 |
244,655 | def configure_installation_source ( source_plus_key ) : if source_plus_key . startswith ( 'snap' ) : # Do nothing for snap installs return # extract the key if there is one, denoted by a '|' in the rel source , key = get_source_and_pgp_key ( source_plus_key ) # handle the ordinary sources via add_source try : fetch_add_source ( source , key , fail_invalid = True ) except SourceConfigError as se : error_out ( str ( se ) ) | Configure an installation source . | 122 | 6 |
244,656 | def config_value_changed ( option ) : hook_data = unitdata . HookData ( ) with hook_data ( ) : db = unitdata . kv ( ) current = config ( option ) saved = db . get ( option ) db . set ( option , current ) if saved is None : return False return current != saved | Determine if config value changed since last call to this function . | 70 | 14 |
244,657 | def save_script_rc ( script_path = "scripts/scriptrc" , * * env_vars ) : juju_rc_path = "%s/%s" % ( charm_dir ( ) , script_path ) if not os . path . exists ( os . path . dirname ( juju_rc_path ) ) : os . mkdir ( os . path . dirname ( juju_rc_path ) ) with open ( juju_rc_path , 'wt' ) as rc_script : rc_script . write ( "#!/bin/bash\n" ) [ rc_script . write ( 'export %s=%s\n' % ( u , p ) ) for u , p in six . iteritems ( env_vars ) if u != "script_path" ] | Write an rc file in the charm - delivered directory containing exported environment variables provided by env_vars . Any charm scripts run outside the juju hook environment can source this scriptrc to obtain updated config information necessary to perform health checks or service changes . | 177 | 50 |
244,658 | def openstack_upgrade_available ( package ) : import apt_pkg as apt src = config ( 'openstack-origin' ) cur_vers = get_os_version_package ( package ) if not cur_vers : # The package has not been installed yet do not attempt upgrade return False if "swift" in package : codename = get_os_codename_install_source ( src ) avail_vers = get_os_version_codename_swift ( codename ) else : avail_vers = get_os_version_install_source ( src ) apt . init ( ) return apt . version_compare ( avail_vers , cur_vers ) >= 1 | Determines if an OpenStack upgrade is available from installation source based on version of installed package . | 147 | 20 |
244,659 | def ensure_block_device ( block_device ) : _none = [ 'None' , 'none' , None ] if ( block_device in _none ) : error_out ( 'prepare_storage(): Missing required input: block_device=%s.' % block_device ) if block_device . startswith ( '/dev/' ) : bdev = block_device elif block_device . startswith ( '/' ) : _bd = block_device . split ( '|' ) if len ( _bd ) == 2 : bdev , size = _bd else : bdev = block_device size = DEFAULT_LOOPBACK_SIZE bdev = ensure_loopback_device ( bdev , size ) else : bdev = '/dev/%s' % block_device if not is_block_device ( bdev ) : error_out ( 'Failed to locate valid block device at %s' % bdev ) return bdev | Confirm block_device create as loopback if necessary . | 209 | 12 |
244,660 | def os_requires_version ( ostack_release , pkg ) : def wrap ( f ) : @ wraps ( f ) def wrapped_f ( * args ) : if os_release ( pkg ) < ostack_release : raise Exception ( "This hook is not supported on releases" " before %s" % ostack_release ) f ( * args ) return wrapped_f return wrap | Decorator for hook to specify minimum supported release | 84 | 10 |
244,661 | def os_workload_status ( configs , required_interfaces , charm_func = None ) : def wrap ( f ) : @ wraps ( f ) def wrapped_f ( * args , * * kwargs ) : # Run the original function first f ( * args , * * kwargs ) # Set workload status now that contexts have been # acted on set_os_workload_status ( configs , required_interfaces , charm_func ) return wrapped_f return wrap | Decorator to set workload status based on complete contexts | 105 | 11 |
244,662 | def set_os_workload_status ( configs , required_interfaces , charm_func = None , services = None , ports = None ) : state , message = _determine_os_workload_status ( configs , required_interfaces , charm_func , services , ports ) status_set ( state , message ) | Set the state of the workload status for the charm . | 73 | 11 |
244,663 | def _determine_os_workload_status ( configs , required_interfaces , charm_func = None , services = None , ports = None ) : state , message = _ows_check_if_paused ( services , ports ) if state is None : state , message = _ows_check_generic_interfaces ( configs , required_interfaces ) if state != 'maintenance' and charm_func : # _ows_check_charm_func() may modify the state, message state , message = _ows_check_charm_func ( state , message , lambda : charm_func ( configs ) ) if state is None : state , message = _ows_check_services_running ( services , ports ) if state is None : state = 'active' message = "Unit is ready" juju_log ( message , 'INFO' ) return state , message | Determine the state of the workload status for the charm . | 192 | 13 |
244,664 | def _ows_check_generic_interfaces ( configs , required_interfaces ) : incomplete_rel_data = incomplete_relation_data ( configs , required_interfaces ) state = None message = None missing_relations = set ( ) incomplete_relations = set ( ) for generic_interface , relations_states in incomplete_rel_data . items ( ) : related_interface = None missing_data = { } # Related or not? for interface , relation_state in relations_states . items ( ) : if relation_state . get ( 'related' ) : related_interface = interface missing_data = relation_state . get ( 'missing_data' ) break # No relation ID for the generic_interface? if not related_interface : juju_log ( "{} relation is missing and must be related for " "functionality. " . format ( generic_interface ) , 'WARN' ) state = 'blocked' missing_relations . add ( generic_interface ) else : # Relation ID eists but no related unit if not missing_data : # Edge case - relation ID exists but departings _hook_name = hook_name ( ) if ( ( 'departed' in _hook_name or 'broken' in _hook_name ) and related_interface in _hook_name ) : state = 'blocked' missing_relations . add ( generic_interface ) juju_log ( "{} relation's interface, {}, " "relationship is departed or broken " "and is required for functionality." "" . format ( generic_interface , related_interface ) , "WARN" ) # Normal case relation ID exists but no related unit # (joining) else : juju_log ( "{} relations's interface, {}, is related but has" " no units in the relation." "" . format ( generic_interface , related_interface ) , "INFO" ) # Related unit exists and data missing on the relation else : juju_log ( "{} relation's interface, {}, is related awaiting " "the following data from the relationship: {}. " "" . format ( generic_interface , related_interface , ", " . join ( missing_data ) ) , "INFO" ) if state != 'blocked' : state = 'waiting' if generic_interface not in missing_relations : incomplete_relations . add ( generic_interface ) if missing_relations : message = "Missing relations: {}" . format ( ", " . join ( missing_relations ) ) if incomplete_relations : message += "; incomplete relations: {}" "" . format ( ", " . join ( incomplete_relations ) ) state = 'blocked' elif incomplete_relations : message = "Incomplete relations: {}" "" . format ( ", " . join ( incomplete_relations ) ) state = 'waiting' return state , message | Check the complete contexts to determine the workload status . | 600 | 10 |
244,665 | def _ows_check_services_running ( services , ports ) : messages = [ ] state = None if services is not None : services = _extract_services_list_helper ( services ) services_running , running = _check_running_services ( services ) if not all ( running ) : messages . append ( "Services not running that should be: {}" . format ( ", " . join ( _filter_tuples ( services_running , False ) ) ) ) state = 'blocked' # also verify that the ports that should be open are open # NB, that ServiceManager objects only OPTIONALLY have ports map_not_open , ports_open = ( _check_listening_on_services_ports ( services ) ) if not all ( ports_open ) : # find which service has missing ports. They are in service # order which makes it a bit easier. message_parts = { service : ", " . join ( [ str ( v ) for v in open_ports ] ) for service , open_ports in map_not_open . items ( ) } message = ", " . join ( [ "{}: [{}]" . format ( s , sp ) for s , sp in message_parts . items ( ) ] ) messages . append ( "Services with ports not open that should be: {}" . format ( message ) ) state = 'blocked' if ports is not None : # and we can also check ports which we don't know the service for ports_open , ports_open_bools = _check_listening_on_ports_list ( ports ) if not all ( ports_open_bools ) : messages . append ( "Ports which should be open, but are not: {}" . format ( ", " . join ( [ str ( p ) for p , v in ports_open if not v ] ) ) ) state = 'blocked' if state is not None : message = "; " . join ( messages ) return state , message return None , None | Check that the services that should be running are actually running and that any ports specified are being listened to . | 428 | 21 |
244,666 | def _check_listening_on_ports_list ( ports ) : ports_open = [ port_has_listener ( '0.0.0.0' , p ) for p in ports ] return zip ( ports , ports_open ) , ports_open | Check that the ports list given are being listened to | 58 | 10 |
244,667 | def workload_state_compare ( current_workload_state , workload_state ) : hierarchy = { 'unknown' : - 1 , 'active' : 0 , 'maintenance' : 1 , 'waiting' : 2 , 'blocked' : 3 , } if hierarchy . get ( workload_state ) is None : workload_state = 'unknown' if hierarchy . get ( current_workload_state ) is None : current_workload_state = 'unknown' # Set workload_state based on hierarchy of statuses if hierarchy . get ( current_workload_state ) > hierarchy . get ( workload_state ) : return current_workload_state else : return workload_state | Return highest priority of two states | 148 | 6 |
244,668 | def incomplete_relation_data ( configs , required_interfaces ) : complete_ctxts = configs . complete_contexts ( ) incomplete_relations = [ svc_type for svc_type , interfaces in required_interfaces . items ( ) if not set ( interfaces ) . intersection ( complete_ctxts ) ] return { i : configs . get_incomplete_context_data ( required_interfaces [ i ] ) for i in incomplete_relations } | Check complete contexts against required_interfaces Return dictionary of incomplete relation data . | 103 | 15 |
244,669 | def do_action_openstack_upgrade ( package , upgrade_callback , configs ) : ret = False if openstack_upgrade_available ( package ) : if config ( 'action-managed-upgrade' ) : juju_log ( 'Upgrading OpenStack release' ) try : upgrade_callback ( configs = configs ) action_set ( { 'outcome' : 'success, upgrade completed.' } ) ret = True except Exception : action_set ( { 'outcome' : 'upgrade failed, see traceback.' } ) action_set ( { 'traceback' : traceback . format_exc ( ) } ) action_fail ( 'do_openstack_upgrade resulted in an ' 'unexpected error' ) else : action_set ( { 'outcome' : 'action-managed-upgrade config is ' 'False, skipped upgrade.' } ) else : action_set ( { 'outcome' : 'no upgrade available.' } ) return ret | Perform action - managed OpenStack upgrade . | 212 | 9 |
244,670 | def manage_payload_services ( action , services = None , charm_func = None ) : actions = { 'pause' : service_pause , 'resume' : service_resume , 'start' : service_start , 'stop' : service_stop } action = action . lower ( ) if action not in actions . keys ( ) : raise RuntimeError ( "action: {} must be one of: {}" . format ( action , ', ' . join ( actions . keys ( ) ) ) ) services = _extract_services_list_helper ( services ) messages = [ ] success = True if services : for service in services . keys ( ) : rc = actions [ action ] ( service ) if not rc : success = False messages . append ( "{} didn't {} cleanly." . format ( service , action ) ) if charm_func : try : message = charm_func ( ) if message : messages . append ( message ) except Exception as e : success = False messages . append ( str ( e ) ) return success , messages | Run an action against all services . | 222 | 7 |
244,671 | def pausable_restart_on_change ( restart_map , stopstart = False , restart_functions = None ) : def wrap ( f ) : # py27 compatible nonlocal variable. When py3 only, replace with # nonlocal keyword __restart_map_cache = { 'cache' : None } @ functools . wraps ( f ) def wrapped_f ( * args , * * kwargs ) : if is_unit_paused_set ( ) : return f ( * args , * * kwargs ) if __restart_map_cache [ 'cache' ] is None : __restart_map_cache [ 'cache' ] = restart_map ( ) if callable ( restart_map ) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper ( ( lambda : f ( * args , * * kwargs ) ) , __restart_map_cache [ 'cache' ] , stopstart , restart_functions ) return wrapped_f return wrap | A restart_on_change decorator that checks to see if the unit is paused . If it is paused then the decorated function doesn t fire . | 226 | 30 |
244,672 | def ordered ( orderme ) : if not isinstance ( orderme , dict ) : raise ValueError ( 'argument must be a dict type' ) result = OrderedDict ( ) for k , v in sorted ( six . iteritems ( orderme ) , key = lambda x : x [ 0 ] ) : if isinstance ( v , dict ) : result [ k ] = ordered ( v ) else : result [ k ] = v return result | Converts the provided dictionary into a collections . OrderedDict . | 94 | 14 |
244,673 | def config_flags_parser ( config_flags ) : # If we find a colon before an equals sign then treat it as yaml. # Note: limit it to finding the colon first since this indicates assignment # for inline yaml. colon = config_flags . find ( ':' ) equals = config_flags . find ( '=' ) if colon > 0 : if colon < equals or equals < 0 : return ordered ( yaml . safe_load ( config_flags ) ) if config_flags . find ( '==' ) >= 0 : juju_log ( "config_flags is not in expected format (key=value)" , level = ERROR ) raise OSContextError # strip the following from each value. post_strippers = ' ,' # we strip any leading/trailing '=' or ' ' from the string then # split on '='. split = config_flags . strip ( ' =' ) . split ( '=' ) limit = len ( split ) flags = OrderedDict ( ) for i in range ( 0 , limit - 1 ) : current = split [ i ] next = split [ i + 1 ] vindex = next . rfind ( ',' ) if ( i == limit - 2 ) or ( vindex < 0 ) : value = next else : value = next [ : vindex ] if i == 0 : key = current else : # if this not the first entry, expect an embedded key. index = current . rfind ( ',' ) if index < 0 : juju_log ( "Invalid config value(s) at index %s" % ( i ) , level = ERROR ) raise OSContextError key = current [ index + 1 : ] # Add to collection. flags [ key . strip ( post_strippers ) ] = value . rstrip ( post_strippers ) return flags | Parses config flags string into dict . | 386 | 9 |
244,674 | def os_application_version_set ( package ) : application_version = get_upstream_version ( package ) # NOTE(jamespage) if not able to figure out package version, fallback to # openstack codename version detection. if not application_version : application_version_set ( os_release ( package ) ) else : application_version_set ( application_version ) | Set version of application for Juju 2 . 0 and later | 83 | 12 |
244,675 | def enable_memcache ( source = None , release = None , package = None ) : _release = None if release : _release = release else : _release = os_release ( package , base = 'icehouse' ) if not _release : _release = get_os_codename_install_source ( source ) return CompareOpenStackReleases ( _release ) >= 'mitaka' | Determine if memcache should be enabled on the local unit | 84 | 13 |
244,676 | def token_cache_pkgs ( source = None , release = None ) : packages = [ ] if enable_memcache ( source = source , release = release ) : packages . extend ( [ 'memcached' , 'python-memcache' ] ) return packages | Determine additional packages needed for token caching | 57 | 9 |
244,677 | def snap_install_requested ( ) : origin = config ( 'openstack-origin' ) or "" if not origin . startswith ( 'snap:' ) : return False _src = origin [ 5 : ] if '/' in _src : channel = _src . split ( '/' ) [ 1 ] else : # Handle snap:track with no channel channel = 'stable' return valid_snap_channel ( channel ) | Determine if installing from snaps | 90 | 7 |
244,678 | def get_snaps_install_info_from_origin ( snaps , src , mode = 'classic' ) : if not src . startswith ( 'snap:' ) : juju_log ( "Snap source is not a snap origin" , 'WARN' ) return { } _src = src [ 5 : ] channel = '--channel={}' . format ( _src ) return { snap : { 'channel' : channel , 'mode' : mode } for snap in snaps } | Generate a dictionary of snap install information from origin | 104 | 10 |
244,679 | def install_os_snaps ( snaps , refresh = False ) : def _ensure_flag ( flag ) : if flag . startswith ( '--' ) : return flag return '--{}' . format ( flag ) if refresh : for snap in snaps . keys ( ) : snap_refresh ( snap , _ensure_flag ( snaps [ snap ] [ 'channel' ] ) , _ensure_flag ( snaps [ snap ] [ 'mode' ] ) ) else : for snap in snaps . keys ( ) : snap_install ( snap , _ensure_flag ( snaps [ snap ] [ 'channel' ] ) , _ensure_flag ( snaps [ snap ] [ 'mode' ] ) ) | Install OpenStack snaps from channel and with mode | 154 | 9 |
244,680 | def series_upgrade_complete ( resume_unit_helper = None , configs = None ) : clear_unit_paused ( ) clear_unit_upgrading ( ) if configs : configs . write_all ( ) if resume_unit_helper : resume_unit_helper ( configs ) | Run common series upgrade complete tasks . | 69 | 7 |
244,681 | def get_certificate_request ( json_encode = True ) : req = CertRequest ( json_encode = json_encode ) req . add_hostname_cn ( ) # Add os-hostname entries for net_type in [ INTERNAL , ADMIN , PUBLIC ] : net_config = config ( ADDRESS_MAP [ net_type ] [ 'override' ] ) try : net_addr = resolve_address ( endpoint_type = net_type ) ip = network_get_primary_address ( ADDRESS_MAP [ net_type ] [ 'binding' ] ) addresses = [ net_addr , ip ] vip = get_vip_in_network ( resolve_network_cidr ( ip ) ) if vip : addresses . append ( vip ) if net_config : req . add_entry ( net_type , net_config , addresses ) else : # There is network address with no corresponding hostname. # Add the ip to the hostname cert to allow for this. req . add_hostname_cn_ip ( addresses ) except NoNetworkBinding : log ( "Skipping request for certificate for ip in {} space, no " "local address found" . format ( net_type ) , WARNING ) return req . get_request ( ) | Generate a certificatee requests based on the network confioguration | 281 | 14 |
244,682 | def create_ip_cert_links ( ssl_dir , custom_hostname_link = None ) : hostname = get_hostname ( unit_get ( 'private-address' ) ) hostname_cert = os . path . join ( ssl_dir , 'cert_{}' . format ( hostname ) ) hostname_key = os . path . join ( ssl_dir , 'key_{}' . format ( hostname ) ) # Add links to hostname cert, used if os-hostname vars not set for net_type in [ INTERNAL , ADMIN , PUBLIC ] : try : addr = resolve_address ( endpoint_type = net_type ) cert = os . path . join ( ssl_dir , 'cert_{}' . format ( addr ) ) key = os . path . join ( ssl_dir , 'key_{}' . format ( addr ) ) if os . path . isfile ( hostname_cert ) and not os . path . isfile ( cert ) : os . symlink ( hostname_cert , cert ) os . symlink ( hostname_key , key ) except NoNetworkBinding : log ( "Skipping creating cert symlink for ip in {} space, no " "local address found" . format ( net_type ) , WARNING ) if custom_hostname_link : custom_cert = os . path . join ( ssl_dir , 'cert_{}' . format ( custom_hostname_link ) ) custom_key = os . path . join ( ssl_dir , 'key_{}' . format ( custom_hostname_link ) ) if os . path . isfile ( hostname_cert ) and not os . path . isfile ( custom_cert ) : os . symlink ( hostname_cert , custom_cert ) os . symlink ( hostname_key , custom_key ) | Create symlinks for SAN records | 415 | 6 |
244,683 | def install_certs ( ssl_dir , certs , chain = None , user = 'root' , group = 'root' ) : for cn , bundle in certs . items ( ) : cert_filename = 'cert_{}' . format ( cn ) key_filename = 'key_{}' . format ( cn ) cert_data = bundle [ 'cert' ] if chain : # Append chain file so that clients that trust the root CA will # trust certs signed by an intermediate in the chain cert_data = cert_data + os . linesep + chain write_file ( path = os . path . join ( ssl_dir , cert_filename ) , owner = user , group = group , content = cert_data , perms = 0o640 ) write_file ( path = os . path . join ( ssl_dir , key_filename ) , owner = user , group = group , content = bundle [ 'key' ] , perms = 0o640 ) | Install the certs passed into the ssl dir and append the chain if provided . | 212 | 16 |
244,684 | def process_certificates ( service_name , relation_id , unit , custom_hostname_link = None , user = 'root' , group = 'root' ) : data = relation_get ( rid = relation_id , unit = unit ) ssl_dir = os . path . join ( '/etc/apache2/ssl/' , service_name ) mkdir ( path = ssl_dir ) name = local_unit ( ) . replace ( '/' , '_' ) certs = data . get ( '{}.processed_requests' . format ( name ) ) chain = data . get ( 'chain' ) ca = data . get ( 'ca' ) if certs : certs = json . loads ( certs ) install_ca_cert ( ca . encode ( ) ) install_certs ( ssl_dir , certs , chain , user = user , group = group ) create_ip_cert_links ( ssl_dir , custom_hostname_link = custom_hostname_link ) return True return False | Process the certificates supplied down the relation | 224 | 7 |
244,685 | def get_requests_for_local_unit ( relation_name = None ) : local_name = local_unit ( ) . replace ( '/' , '_' ) raw_certs_key = '{}.processed_requests' . format ( local_name ) relation_name = relation_name or 'certificates' bundles = [ ] for rid in relation_ids ( relation_name ) : for unit in related_units ( rid ) : data = relation_get ( rid = rid , unit = unit ) if data . get ( raw_certs_key ) : bundles . append ( { 'ca' : data [ 'ca' ] , 'chain' : data . get ( 'chain' ) , 'certs' : json . loads ( data [ raw_certs_key ] ) } ) return bundles | Extract any certificates data targeted at this unit down relation_name . | 179 | 14 |
244,686 | def get_bundle_for_cn ( cn , relation_name = None ) : entries = get_requests_for_local_unit ( relation_name ) cert_bundle = { } for entry in entries : for _cn , bundle in entry [ 'certs' ] . items ( ) : if _cn == cn : cert_bundle = { 'cert' : bundle [ 'cert' ] , 'key' : bundle [ 'key' ] , 'chain' : entry [ 'chain' ] , 'ca' : entry [ 'ca' ] } break if cert_bundle : break return cert_bundle | Extract certificates for the given cn . | 137 | 9 |
244,687 | def add_entry ( self , net_type , cn , addresses ) : self . entries . append ( { 'cn' : cn , 'addresses' : addresses } ) | Add a request to the batch | 39 | 6 |
244,688 | def add_hostname_cn ( self ) : ip = unit_get ( 'private-address' ) addresses = [ ip ] # If a vip is being used without os-hostname config or # network spaces then we need to ensure the local units # cert has the approriate vip in the SAN list vip = get_vip_in_network ( resolve_network_cidr ( ip ) ) if vip : addresses . append ( vip ) self . hostname_entry = { 'cn' : get_hostname ( ip ) , 'addresses' : addresses } | Add a request for the hostname of the machine | 128 | 10 |
244,689 | def add_hostname_cn_ip ( self , addresses ) : for addr in addresses : if addr not in self . hostname_entry [ 'addresses' ] : self . hostname_entry [ 'addresses' ] . append ( addr ) | Add an address to the SAN list for the hostname request | 54 | 12 |
244,690 | def get_request ( self ) : if self . hostname_entry : self . entries . append ( self . hostname_entry ) request = { } for entry in self . entries : sans = sorted ( list ( set ( entry [ 'addresses' ] ) ) ) request [ entry [ 'cn' ] ] = { 'sans' : sans } if self . json_encode : return { 'cert_requests' : json . dumps ( request , sort_keys = True ) } else : return { 'cert_requests' : request } | Generate request from the batched up entries | 119 | 9 |
244,691 | def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) # Apply the sysctl settings which are configured to be applied. audits . append ( SysctlConf ( ) ) # Make sure that only root has access to the sysctl.conf file, and # that it is read-only. audits . append ( FilePermissionAudit ( '/etc/sysctl.conf' , user = 'root' , group = 'root' , mode = 0o0440 ) ) # If module loading is not enabled, then ensure that the modules # file has the appropriate permissions and rebuild the initramfs if not settings [ 'security' ] [ 'kernel_enable_module_loading' ] : audits . append ( ModulesTemplate ( ) ) return audits | Get OS hardening sysctl audits . | 170 | 8 |
244,692 | def _stat ( file ) : out = subprocess . check_output ( [ 'stat' , '-c' , '%U %G %a' , file ] ) . decode ( 'utf-8' ) return Ownership ( * out . strip ( ) . split ( ' ' ) ) | Get the Ownership information from a file . | 64 | 9 |
244,693 | def _config_ini ( path ) : conf = configparser . ConfigParser ( ) conf . read ( path ) return dict ( conf ) | Parse an ini file | 29 | 6 |
244,694 | def _validate_file_mode ( mode , file_name , optional = False ) : try : ownership = _stat ( file_name ) except subprocess . CalledProcessError as e : print ( "Error reading file: {}" . format ( e ) ) if not optional : assert False , "Specified file does not exist: {}" . format ( file_name ) assert mode == ownership . mode , "{} has an incorrect mode: {} should be {}" . format ( file_name , ownership . mode , mode ) print ( "Validate mode of {}: PASS" . format ( file_name ) ) | Validate that a specified file has the specified permissions . | 131 | 11 |
244,695 | def _config_section ( config , section ) : path = os . path . join ( config . get ( 'config_path' ) , config . get ( 'config_file' ) ) conf = _config_ini ( path ) return conf . get ( section ) | Read the configuration file and return a section . | 57 | 9 |
244,696 | def validate_file_permissions ( config ) : files = config . get ( 'files' , { } ) for file_name , options in files . items ( ) : for key in options . keys ( ) : if key not in [ "owner" , "group" , "mode" ] : raise RuntimeError ( "Invalid ownership configuration: {}" . format ( key ) ) mode = options . get ( 'mode' , config . get ( 'permissions' , '600' ) ) optional = options . get ( 'optional' , config . get ( 'optional' , 'False' ) ) if '*' in file_name : for file in glob . glob ( file_name ) : if file not in files . keys ( ) : if os . path . isfile ( file ) : _validate_file_mode ( mode , file , optional ) else : if os . path . isfile ( file_name ) : _validate_file_mode ( mode , file_name , optional ) | Verify that permissions on configuration files are secure enough . | 215 | 11 |
244,697 | def validate_uses_tls_for_keystone ( audit_options ) : section = _config_section ( audit_options , 'keystone_authtoken' ) assert section is not None , "Missing section 'keystone_authtoken'" assert not section . get ( 'insecure' ) and "https://" in section . get ( "auth_uri" ) , "TLS is not used for Keystone" | Verify that TLS is used to communicate with Keystone . | 90 | 11 |
244,698 | def validate_uses_tls_for_glance ( audit_options ) : section = _config_section ( audit_options , 'glance' ) assert section is not None , "Missing section 'glance'" assert not section . get ( 'insecure' ) and "https://" in section . get ( "api_servers" ) , "TLS is not used for Glance" | Verify that TLS is used to communicate with Glance . | 86 | 12 |
244,699 | def is_ready ( self ) : ready = len ( self . get ( self . name , [ ] ) ) > 0 if not ready : hookenv . log ( 'Incomplete relation: {}' . format ( self . __class__ . __name__ ) , hookenv . DEBUG ) return ready | Returns True if all of the required_keys are available from any units . | 63 | 15 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.