idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
28,300
def _reload_if_necessary ( self , env ) : try : mod = sys . modules [ self . provider_module_name ] except KeyError : mod = None if ( mod is None or mod . provmod_timestamp != os . path . getmtime ( self . provid ) ) : logger = env . get_logger ( ) logger . log_debug ( "Need to reload provider at %s" % self . provid ) #first unload the module if self . provmod and hasattr ( self . provmod , "shutdown" ) : self . provmod . shutdown ( env ) #now reload and reinit module try : del sys . modules [ self . provider_module_name ] except KeyError : pass try : self . _load_provider_source ( logger ) self . _init_provider ( env ) except IOError as exc : raise pywbem . CIMError ( pywbem . CIM_ERR_FAILED , "Error loading provider %s: %s" % ( provid , exc ) )
Check timestamp of loaded python provider module and if it has changed since load then reload the provider module .
230
20
28,301
def _real_key ( self , key ) : if key is not None : try : return key . lower ( ) except AttributeError : raise TypeError ( _format ( "NocaseDict key {0!A} must be a string, " "but is {1}" , key , type ( key ) ) ) if self . allow_unnamed_keys : return None raise TypeError ( _format ( "NocaseDict key None (unnamed key) is not " "allowed for this object" ) )
Return the normalized key to be used for the internal dictionary from the input key .
113
16
28,302
def copy ( self ) : result = NocaseDict ( ) result . _data = self . _data . copy ( ) # pylint: disable=protected-access return result
Return a copy of the dictionary .
40
7
28,303
def t_error ( t ) : msg = _format ( "Illegal character {0!A}" , t . value [ 0 ] ) t . lexer . last_msg = msg t . lexer . skip ( 1 ) return t
Lexer error callback from PLY Lexer with token in error .
51
14
28,304
def p_error ( p ) : if p is None : raise MOFParseError ( msg = 'Unexpected end of file' ) msg = p . lexer . last_msg p . lexer . last_msg = None raise MOFParseError ( parser_token = p , msg = msg )
YACC Error Callback from the parser . The parameter is the token in error and contains information on the file and position of the error . If p is None PLY is returning eof error .
65
40
28,305
def _fixStringValue ( s , p ) : # pylint: disable=too-many-branches s = s [ 1 : - 1 ] rv = '' esc = False i = - 1 while i < len ( s ) - 1 : i += 1 ch = s [ i ] if ch == '\\' and not esc : esc = True continue if not esc : rv += ch continue if ch == '"' : rv += '"' elif ch == 'n' : rv += '\n' elif ch == 't' : rv += '\t' elif ch == 'b' : rv += '\b' elif ch == 'f' : rv += '\f' elif ch == 'r' : rv += '\r' elif ch == '\\' : rv += '\\' elif ch in [ 'x' , 'X' ] : hexc = 0 j = 0 i += 1 while j < 4 : c = s [ i + j ] c = c . upper ( ) if not c . isdigit ( ) and c not in 'ABCDEF' : break hexc <<= 4 if c . isdigit ( ) : hexc |= ord ( c ) - ord ( '0' ) else : hexc |= ord ( c ) - ord ( 'A' ) + 0XA j += 1 if j == 0 : # DSP0004 requires 1..4 hex chars - we have 0 raise MOFParseError ( parser_token = p , msg = "Unicode escape sequence (e.g. '\\x12AB') requires " "at least one hex character" ) rv += six . unichr ( hexc ) i += j - 1 esc = False return rv
Clean up string value including special characters etc .
384
9
28,306
def _build_flavors ( p , flist , qualdecl = None ) : flavors = { } if ( 'disableoverride' in flist and 'enableoverride' in flist ) or ( 'restricted' in flist and 'tosubclass' in flist ) : # noqa: E125 raise MOFParseError ( parser_token = p , msg = "Conflicting flavors are" "invalid" ) if qualdecl is not None : flavors = { 'overridable' : qualdecl . overridable , 'translatable' : qualdecl . translatable , 'tosubclass' : qualdecl . tosubclass , 'toinstance' : qualdecl . toinstance } if 'disableoverride' in flist : flavors [ 'overridable' ] = False if 'enableoverride' in flist : flavors [ 'overridable' ] = True if 'translatable' in flist : flavors [ 'translatable' ] = True if 'restricted' in flist : flavors [ 'tosubclass' ] = False if 'tosubclass' in flist : flavors [ 'tosubclass' ] = True if 'toinstance' in flist : flavors [ 'toinstance' ] = True # issue #193 ks 5/16 removed tosubclass & set toinstance. return flavors
Build and return a dictionary defining the flavors from the flist argument .
297
14
28,307
def _find_column ( input_ , token ) : i = token . lexpos while i > 0 : if input_ [ i ] == '\n' : break i -= 1 column = token . lexpos - i - 1 return column
Find the column in file where error occured . This is taken from token . lexpos converted to the position on the current line by finding the previous EOL .
51
33
28,308
def _get_error_context ( input_ , token ) : try : line = input_ [ token . lexpos : input_ . index ( '\n' , token . lexpos ) ] except ValueError : line = input_ [ token . lexpos : ] i = input_ . rfind ( '\n' , 0 , token . lexpos ) if i < 0 : i = 0 line = input_ [ i : token . lexpos ] + line lines = [ line . strip ( '\r\n' ) ] col = token . lexpos - i while len ( lines ) < 5 and i > 0 : end = i i = input_ . rfind ( '\n' , 0 , i ) if i < 0 : i = 0 lines . insert ( 0 , input_ [ i : end ] . strip ( '\r\n' ) ) pointer = '' for dummy_ch in str ( token . value ) : pointer += '^' pointline = '' i = 0 while i < col - 1 : if lines [ - 1 ] [ i ] . isspace ( ) : pointline += lines [ - 1 ] [ i ] # otherwise, tabs complicate the alignment else : pointline += ' ' i += 1 lines . append ( pointline + pointer ) return lines
Build a context string that defines where on the line the defined error occurs . This consists of the characters ^ at the position and for the length defined by the lexer position and token length
274
37
28,309
def _build ( verbose = False ) : if verbose : print ( _format ( "Building LEX/YACC modules for MOF compiler in: {0}" , _tabdir ) ) _yacc ( verbose ) _lex ( verbose )
Build the LEX and YACC table modules for the MOF compiler if they do not exist yet or if their table versions do not match the installed version of the ply package .
55
36
28,310
def _yacc ( verbose = False ) : # In yacc(), the 'debug' parameter controls the main error # messages to the 'errorlog' in addition to the debug messages # to the 'debuglog'. Because we want to see the error messages, # we enable debug but set the debuglog to the NullLogger. # To enable debug logging, set debuglog to some other logger # (ex. PlyLogger(sys.stdout) to generate log output. return yacc . yacc ( optimize = _optimize , tabmodule = _tabmodule , outputdir = _tabdir , debug = True , debuglog = yacc . NullLogger ( ) , errorlog = yacc . PlyLogger ( sys . stdout ) )
Return YACC parser object for the MOF compiler .
160
11
28,311
def _lex ( verbose = False ) : return lex . lex ( optimize = _optimize , lextab = _lextab , outputdir = _tabdir , debug = False , # debuglog = lex.PlyLogger(sys.stdout), errorlog = lex . PlyLogger ( sys . stdout ) )
Return LEX analyzer object for the MOF Compiler .
72
13
28,312
def _setns ( self , value ) : if self . conn is not None : self . conn . default_namespace = value else : self . __default_namespace = value
Set the default repository namespace to be used .
39
9
28,313
def CreateInstance ( self , * args , * * kwargs ) : inst = args [ 0 ] if args else kwargs [ 'NewInstance' ] try : self . instances [ self . default_namespace ] . append ( inst ) except KeyError : self . instances [ self . default_namespace ] = [ inst ] return inst . path
Create a CIM instance in the local repository of this class .
75
13
28,314
def GetClass ( self , * args , * * kwargs ) : cname = args [ 0 ] if args else kwargs [ 'ClassName' ] try : cc = self . classes [ self . default_namespace ] [ cname ] except KeyError : if self . conn is None : ce = CIMError ( CIM_ERR_NOT_FOUND , cname ) raise ce cc = self . conn . GetClass ( * args , * * kwargs ) try : self . classes [ self . default_namespace ] [ cc . classname ] = cc except KeyError : self . classes [ self . default_namespace ] = NocaseDict ( { cc . classname : cc } ) if 'LocalOnly' in kwargs and not kwargs [ 'LocalOnly' ] : if cc . superclass : try : del kwargs [ 'ClassName' ] except KeyError : pass if args : args = args [ 1 : ] super_ = self . GetClass ( cc . superclass , * args , * * kwargs ) for prop in super_ . properties . values ( ) : if prop . name not in cc . properties : cc . properties [ prop . name ] = prop for meth in super_ . methods . values ( ) : if meth . name not in cc . methods : cc . methods [ meth . name ] = meth return cc
Retrieve a CIM class from the local repository of this class .
299
14
28,315
def EnumerateQualifiers ( self , * args , * * kwargs ) : if self . conn is not None : rv = self . conn . EnumerateQualifiers ( * args , * * kwargs ) else : rv = [ ] try : rv += list ( self . qualifiers [ self . default_namespace ] . values ( ) ) except KeyError : pass return rv
Enumerate the qualifier types in the local repository of this class .
87
14
28,316
def GetQualifier ( self , * args , * * kwargs ) : qualname = args [ 0 ] if args else kwargs [ 'QualifierName' ] try : qual = self . qualifiers [ self . default_namespace ] [ qualname ] except KeyError : if self . conn is None : raise CIMError ( CIM_ERR_NOT_FOUND , qualname , conn_id = self . conn_id ) qual = self . conn . GetQualifier ( * args , * * kwargs ) return qual
Retrieve a qualifier type from the local repository of this class .
117
13
28,317
def SetQualifier ( self , * args , * * kwargs ) : qual = args [ 0 ] if args else kwargs [ 'QualifierDeclaration' ] try : self . qualifiers [ self . default_namespace ] [ qual . name ] = qual except KeyError : self . qualifiers [ self . default_namespace ] = NocaseDict ( { qual . name : qual } )
Create or modify a qualifier type in the local repository of this class .
87
14
28,318
def rollback ( self , verbose = False ) : for ns , insts in self . instances . items ( ) : insts . reverse ( ) for inst in insts : try : if verbose : print ( _format ( "Deleting instance {0}" , inst . path ) ) self . conn . DeleteInstance ( inst . path ) except CIMError as ce : print ( _format ( "Error deleting instance {0}" , inst . path ) ) print ( _format ( " {0} {1}" , ce . status_code , ce . status_description ) ) for ns , cnames in self . class_names . items ( ) : self . default_namespace = ns cnames . reverse ( ) for cname in cnames : try : if verbose : print ( _format ( "Deleting class {0!A}:{1!A}" , ns , cname ) ) self . conn . DeleteClass ( cname ) except CIMError as ce : print ( _format ( "Error deleting class {0!A}:{1!A}" , ns , cname ) ) print ( _format ( " {0} {1}" , ce . status_code , ce . status_description ) )
Remove classes and instances from the underlying repository that have been created in the local repository of this class .
264
20
28,319
def compile_string ( self , mof , ns , filename = None ) : lexer = self . lexer . clone ( ) lexer . parser = self . parser try : oldfile = self . parser . file except AttributeError : oldfile = None self . parser . file = filename try : oldmof = self . parser . mof except AttributeError : oldmof = None self . parser . mof = mof self . parser . handle . default_namespace = ns if ns not in self . parser . qualcache : self . parser . qualcache [ ns ] = NocaseDict ( ) if ns not in self . parser . classnames : self . parser . classnames [ ns ] = [ ] try : # Call the parser. To generate detailed output of states # add debug=... to following line where debug may be a # constant (ex. 1) or may be a log definition, ex.. # log = logging.getLogger() # logging.basicConfig(level=logging.DEBUG) rv = self . parser . parse ( mof , lexer = lexer ) self . parser . file = oldfile self . parser . mof = oldmof return rv except MOFParseError as pe : # Generate the error message into log and reraise error self . parser . log ( pe . get_err_msg ( ) ) raise except CIMError as ce : if hasattr ( ce , 'file_line' ) : self . parser . log ( _format ( "Fatal Error: {0}:{1}" , ce . file_line [ 0 ] , ce . file_line [ 1 ] ) ) else : self . parser . log ( "Fatal Error:" ) description = _format ( ":{0}" , ce . status_description ) if ce . status_description else "" self . parser . log ( _format ( "{0}{1}" , _statuscode2string ( ce . status_code ) , description ) ) raise
Compile a string of MOF statements into a namespace of the associated CIM repository .
426
18
28,320
def compile_file ( self , filename , ns ) : if self . parser . verbose : self . parser . log ( _format ( "Compiling file {0!A}" , filename ) ) if not os . path . exists ( filename ) : # try to find in search path rfilename = self . find_mof ( os . path . basename ( filename [ : - 4 ] ) . lower ( ) ) if rfilename is None : raise IOError ( _format ( "No such file: {0!A}" , filename ) ) filename = rfilename with open ( filename , "r" ) as f : mof = f . read ( ) return self . compile_string ( mof , ns , filename = filename )
Compile a MOF file into a namespace of the associated CIM repository .
156
16
28,321
def find_mof ( self , classname ) : classname = classname . lower ( ) for search in self . parser . search_paths : for root , dummy_dirs , files in os . walk ( search ) : for file_ in files : if file_ . endswith ( '.mof' ) and file_ [ : - 4 ] . lower ( ) == classname : return os . path . join ( root , file_ ) return None
Find the MOF file that defines a particular CIM class in the search path of the MOF compiler .
99
22
28,322
def configure_loggers_from_string ( log_configuration_str , log_filename = DEFAULT_LOG_FILENAME , connection = None , propagate = False ) : # pylint: disable=line-too-long # noqa: E501 # pylint: enable=line-too-long log_specs = log_configuration_str . split ( ',' ) for log_spec in log_specs : spec_split = log_spec . strip ( '=' ) . split ( "=" ) simple_name = spec_split [ 0 ] if not simple_name : raise ValueError ( _format ( "Simple logger name missing in log spec: {0}" , log_spec ) ) if len ( spec_split ) == 1 : log_dest = DEFAULT_LOG_DESTINATION detail_level = DEFAULT_LOG_DETAIL_LEVEL elif len ( spec_split ) == 2 : val_split = spec_split [ 1 ] . strip ( ':' ) . split ( ':' ) log_dest = val_split [ 0 ] or None if len ( val_split ) == 1 : detail_level = DEFAULT_LOG_DETAIL_LEVEL elif len ( val_split ) == 2 : detail_level = val_split [ 1 ] or None else : # len(val_split) > 2 raise ValueError ( _format ( "Too many components separated by : in log spec: " "{0}" , log_spec ) ) else : # len(spec_split) > 2: raise ValueError ( _format ( "Too many components separated by = in log spec: " "{0}" , log_spec ) ) # Convert to integer, if possible if detail_level : try : detail_level = int ( detail_level ) except ValueError : pass configure_logger ( simple_name , log_dest = log_dest , detail_level = detail_level , log_filename = log_filename , connection = connection , propagate = propagate )
Configure the pywbem loggers and optionally activate WBEM connections for logging and setting a log detail level from a log configuration string .
436
28
28,323
def display_paths ( instances , type_str ) : print ( '%ss: count=%s' % ( type_str , len ( instances ) , ) ) for path in [ instance . path for instance in instances ] : print ( '%s: %s' % ( type_str , path ) ) if len ( instances ) : print ( '' )
Display the count and paths for the list of instances in instances .
78
13
28,324
def get_default_ca_certs ( ) : # pylint: disable=protected-access if not hasattr ( get_default_ca_certs , '_path' ) : for path in get_default_ca_cert_paths ( ) : if os . path . exists ( path ) : get_default_ca_certs . _path = path break else : get_default_ca_certs . _path = None return get_default_ca_certs . _path
Try to find out system path with ca certificates . This path is cached and returned . If no path is found out None is returned .
109
27
28,325
def get_cimobject_header ( obj ) : # Local namespace path if isinstance ( obj , six . string_types ) : return obj # Local class path if isinstance ( obj , CIMClassName ) : return obj . to_wbem_uri ( format = 'cimobject' ) # Local instance path if isinstance ( obj , CIMInstanceName ) : return obj . to_wbem_uri ( format = 'cimobject' ) raise TypeError ( _format ( "Invalid object type {0} to generate CIMObject header value " "from" , type ( obj ) ) )
Return the value for the CIM - XML extension header field CIMObject using the given object .
131
20
28,326
def print_profile_info ( org_vm , inst ) : org = org_vm . tovalues ( inst [ 'RegisteredOrganization' ] ) name = inst [ 'RegisteredName' ] vers = inst [ 'RegisteredVersion' ] print ( " %s %s Profile %s" % ( org , name , vers ) )
Print the registered org name version for the profile defined by inst
70
12
28,327
def explore_server ( server_url , username , password ) : print ( "WBEM server URL:\n %s" % server_url ) conn = WBEMConnection ( server_url , ( username , password ) , no_verification = True ) server = WBEMServer ( conn ) print ( "Brand:\n %s" % server . brand ) print ( "Version:\n %s" % server . version ) print ( "Interop namespace:\n %s" % server . interop_ns ) print ( "All namespaces:" ) for ns in server . namespaces : print ( " %s" % ns ) print ( "Advertised management profiles:" ) org_vm = ValueMapping . for_property ( server , server . interop_ns , 'CIM_RegisteredProfile' , 'RegisteredOrganization' ) for inst in server . profiles : print_profile_info ( org_vm , inst ) indication_profiles = server . get_selected_profiles ( 'DMTF' , 'Indications' ) print ( 'Profiles for DMTF:Indications' ) for inst in indication_profiles : print_profile_info ( org_vm , inst ) server_profiles = server . get_selected_profiles ( 'SNIA' , 'Server' ) print ( 'Profiles for SNIA:Server' ) for inst in server_profiles : print_profile_info ( org_vm , inst ) # get Central Instances for inst in indication_profiles : org = org_vm . tovalues ( inst [ 'RegisteredOrganization' ] ) name = inst [ 'RegisteredName' ] vers = inst [ 'RegisteredVersion' ] print ( "Central instances for profile %s:%s:%s (component):" % ( org , name , vers ) ) try : ci_paths = server . get_central_instances ( inst . path , "CIM_IndicationService" , "CIM_System" , [ "CIM_HostedService" ] ) except Exception as exc : print ( "Error: %s" % str ( exc ) ) ci_paths = [ ] for ip in ci_paths : print ( " %s" % str ( ip ) ) for inst in server_profiles : org = org_vm . tovalues ( inst [ 'RegisteredOrganization' ] ) name = inst [ 'RegisteredName' ] vers = inst [ 'RegisteredVersion' ] print ( "Central instances for profile %s:%s:%s(autonomous):" % ( org , name , vers ) ) try : ci_paths = server . get_central_instances ( inst . path ) except Exception as exc : print ( "Error: %s" % str ( exc ) ) ci_paths = [ ] for ip in ci_paths : print ( " %s" % str ( ip ) )
Demo of exploring a cim server for characteristics defined by the server class
625
15
28,328
def delete_namespace ( self , namespace ) : std_namespace = _ensure_unicode ( namespace . strip ( '/' ) ) # Use approach 1: DeleteInstance of CIM class for namespaces # Refresh the list of namespaces in this object to make sure # it is up to date. self . _determine_namespaces ( ) if std_namespace not in self . namespaces : raise CIMError ( CIM_ERR_NOT_FOUND , _format ( "Specified namespace does not exist: {0!A}" , std_namespace ) , conn_id = self . conn . conn_id ) ns_path = None for p in self . namespace_paths : if p . keybindings [ 'Name' ] == std_namespace : ns_path = p assert ns_path is not None # Ensure the namespace is empty. We do not check for instances, because # classes are a prerequisite for instances, so if no classes exist, # no instances will exist. # WBEM servers that do not support class operations (e.g. SFCB) will # raise a CIMError with status CIM_ERR_NOT_SUPPORTED. class_paths = self . conn . EnumerateClassNames ( namespace = std_namespace , ClassName = None , DeepInheritance = False ) quals = self . conn . EnumerateQualifiers ( namespace = std_namespace ) if class_paths or quals : raise CIMError ( CIM_ERR_NAMESPACE_NOT_EMPTY , _format ( "Specified namespace {0!A} is not empty; it contains " "{1} top-level classes and {2} qualifier types" , std_namespace , len ( class_paths ) , len ( quals ) ) , conn_id = self . conn . conn_id ) self . conn . DeleteInstance ( ns_path ) # Refresh the list of namespaces in this object to remove the one # we just deleted. self . _determine_namespaces ( ) return std_namespace
Delete the specified CIM namespace in the WBEM server and update this WBEMServer object to reflect the removed namespace there .
454
25
28,329
def _traverse ( self , start_paths , traversal_path ) : assert len ( traversal_path ) >= 2 assoc_class = traversal_path [ 0 ] far_class = traversal_path [ 1 ] total_next_paths = [ ] for path in start_paths : next_paths = self . _conn . AssociatorNames ( ObjectName = path , AssocClass = assoc_class , ResultClass = far_class ) total_next_paths . extend ( next_paths ) traversal_path = traversal_path [ 2 : ] if traversal_path : total_next_paths = self . _traverse ( total_next_paths , traversal_path ) return total_next_paths
Traverse a multi - hop traversal path from a list of start instance paths and return the resulting list of instance paths .
168
25
28,330
def _validate_interop_ns ( self , interop_ns ) : test_classname = 'CIM_Namespace' try : self . _conn . EnumerateInstanceNames ( test_classname , namespace = interop_ns ) except CIMError as exc : # We tolerate it if the WBEM server does not implement this class, # as long as it does not return CIM_ERR_INVALID_NAMESPACE. if exc . status_code in ( CIM_ERR_INVALID_CLASS , CIM_ERR_NOT_FOUND ) : pass else : raise self . _interop_ns = interop_ns
Validate whether the specified Interop namespace exists in the WBEM server by communicating with it .
148
19
28,331
def _determine_profiles ( self ) : mp_insts = self . _conn . EnumerateInstances ( "CIM_RegisteredProfile" , namespace = self . interop_ns ) self . _profiles = mp_insts
Determine the WBEM management profiles advertised by the WBEM server by communicating with it and enumerating the instances of CIM_RegisteredProfile .
55
30
28,332
def _represent_undefined ( self , data ) : raise RepresenterError ( _format ( "Cannot represent an object: {0!A} of type: {1}; " "yaml_representers: {2!A}, " "yaml_multi_representers: {3!A}" , data , type ( data ) , self . yaml_representers . keys ( ) , self . yaml_multi_representers . keys ( ) ) )
Raises flag for objects that cannot be represented
101
9
28,333
def open_file ( filename , file_mode = 'w' ) : if six . PY2 : # Open with codecs to define text mode return codecs . open ( filename , mode = file_mode , encoding = 'utf-8' ) return open ( filename , file_mode , encoding = 'utf8' )
A static convenience function that performs the open of the recorder file correctly for different versions of Python .
70
19
28,334
def reset ( self , pull_op = None ) : self . _pywbem_method = None self . _pywbem_args = None self . _pywbem_result_ret = None self . _pywbem_result_exc = None self . _http_request_version = None self . _http_request_url = None self . _http_request_target = None self . _http_request_method = None self . _http_request_headers = None self . _http_request_payload = None self . _http_response_version = None self . _http_response_status = None self . _http_response_reason = None self . _http_response_headers = None self . _http_response_payload = None self . _pull_op = pull_op
Reset all the attributes in the class . This also allows setting the pull_op attribute that defines whether the operation is to be a traditional or pull operation . This does NOT reset _conn . id as that exists through the life of the connection .
178
50
28,335
def stage_pywbem_args ( self , method , * * kwargs ) : # pylint: disable=attribute-defined-outside-init self . _pywbem_method = method self . _pywbem_args = kwargs
Set requst method and all args . Normally called before the cmd is executed to record request parameters
56
19
28,336
def stage_pywbem_result ( self , ret , exc ) : # pylint: disable=attribute-defined-outside-init self . _pywbem_result_ret = ret self . _pywbem_result_exc = exc
Set Result return info or exception info
54
7
28,337
def stage_http_request ( self , conn_id , version , url , target , method , headers , payload ) : # pylint: disable=attribute-defined-outside-init self . _http_request_version = version self . _http_request_conn_id = conn_id self . _http_request_url = url self . _http_request_target = target self . _http_request_method = method self . _http_request_headers = headers self . _http_request_payload = payload
Set request HTTP information including url headers etc .
115
9
28,338
def stage_http_response1 ( self , conn_id , version , status , reason , headers ) : # pylint: disable=attribute-defined-outside-init self . _http_response_version = version self . _http_response_status = status self . _http_response_reason = reason self . _http_response_headers = headers
Set response http info including headers status etc . conn_id unused here . Used in log
77
18
28,339
def record_staged ( self ) : if self . enabled : pwargs = OpArgs ( self . _pywbem_method , self . _pywbem_args ) pwresult = OpResult ( self . _pywbem_result_ret , self . _pywbem_result_exc ) httpreq = HttpRequest ( self . _http_request_version , self . _http_request_url , self . _http_request_target , self . _http_request_method , self . _http_request_headers , self . _http_request_payload ) httpresp = HttpResponse ( self . _http_response_version , self . _http_response_status , self . _http_response_reason , self . _http_response_headers , self . _http_response_payload ) self . record ( pwargs , pwresult , httpreq , httpresp )
Encode staged information on request and result to output
201
10
28,340
def set_detail_level ( self , detail_levels ) : if detail_levels is None : return self . detail_levels = detail_levels if 'api' in detail_levels : self . api_detail_level = detail_levels [ 'api' ] if 'http' in detail_levels : self . http_detail_level = detail_levels [ 'http' ] if isinstance ( self . api_detail_level , int ) : self . api_maxlen = self . api_detail_level if isinstance ( self . http_detail_level , int ) : self . http_maxlen = self . http_detail_level
Sets the detail levels from the input dictionary in detail_levels .
139
14
28,341
def stage_pywbem_args ( self , method , * * kwargs ) : # pylint: disable=attribute-defined-outside-init self . _pywbem_method = method if self . enabled and self . api_detail_level is not None and self . apilogger . isEnabledFor ( logging . DEBUG ) : # TODO: future bypassed code to only ouput name and method if the # detail is summary. We are not doing this because this is # effectively the same information in the response so the only # additional infomation is the time stamp. # if self.api_detail_level == summary: # self.apilogger.debug('Request:%s %s', self._conn_id, method) # return # Order kwargs. Note that this is done automatically starting # with python 3.6 kwstr = ', ' . join ( [ ( '{0}={1!r}' . format ( key , kwargs [ key ] ) ) for key in sorted ( six . iterkeys ( kwargs ) ) ] ) if self . api_maxlen and ( len ( kwstr ) > self . api_maxlen ) : kwstr = kwstr [ : self . api_maxlen ] + '...' # pylint: disable=bad-continuation self . apilogger . debug ( 'Request:%s %s(%s)' , self . _conn_id , method , kwstr )
Log request method and all args . Normally called before the cmd is executed to record request parameters . This method does not support the summary detail_level because that seems to add little info to the log that is not also in the response .
325
47
28,342
def stage_http_request ( self , conn_id , version , url , target , method , headers , payload ) : if self . enabled and self . http_detail_level is not None and self . httplogger . isEnabledFor ( logging . DEBUG ) : # pylint: disable=attribute-defined-outside-init # if Auth header, mask data if 'Authorization' in headers : authtype , cred = headers [ 'Authorization' ] . split ( ' ' ) headers [ 'Authorization' ] = _format ( "{0} {1}" , authtype , 'X' * len ( cred ) ) header_str = ' ' . join ( '{0}:{1!r}' . format ( k , v ) for k , v in headers . items ( ) ) if self . http_detail_level == 'summary' : upayload = "" elif isinstance ( payload , six . binary_type ) : upayload = payload . decode ( 'utf-8' ) else : upayload = payload if self . http_maxlen and ( len ( payload ) > self . http_maxlen ) : upayload = upayload [ : self . http_maxlen ] + '...' self . httplogger . debug ( 'Request:%s %s %s %s %s %s\n %s' , conn_id , method , target , version , url , header_str , upayload )
Log request HTTP information including url headers etc .
316
9
28,343
def stage_http_response2 ( self , payload ) : # required because http code uses sending all None to reset # parameters. We ignore that if not self . _http_response_version and not payload : return if self . enabled and self . http_detail_level is not None and self . httplogger . isEnabledFor ( logging . DEBUG ) : if self . _http_response_headers : header_str = ' ' . join ( '{0}:{1!r}' . format ( k , v ) for k , v in self . _http_response_headers . items ( ) ) else : header_str = '' if self . http_detail_level == 'summary' : upayload = "" elif self . http_maxlen and ( len ( payload ) > self . http_maxlen ) : upayload = ( _ensure_unicode ( payload [ : self . http_maxlen ] ) + '...' ) else : upayload = _ensure_unicode ( payload ) self . httplogger . debug ( 'Response:%s %s:%s %s %s\n %s' , self . _http_response_conn_id , self . _http_response_status , self . _http_response_reason , self . _http_response_version , header_str , upayload )
Log complete http response including response1 and payload
296
9
28,344
def _to_int ( self , val_str ) : val = _integerValue_to_int ( val_str ) if val is None : raise ValueError ( _format ( "The value-mapped {0} has an invalid integer " "representation in a ValueMap entry: {1!A}" , self . _element_str ( ) , val_str ) ) return val
Conver val_str to an integer or raise ValueError
84
12
28,345
def _element_str ( self ) : # pylint: disable=no-else-return if isinstance ( self . element , CIMProperty ) : return _format ( "property {0!A} in class {1!A} (in {2!A})" , self . propname , self . classname , self . namespace ) elif isinstance ( self . element , CIMMethod ) : return _format ( "method {0!A} in class {1!A} (in {2!A})" , self . methodname , self . classname , self . namespace ) assert isinstance ( self . element , CIMParameter ) return _format ( "parameter {0!A} of method {1!A} in class {2!A} " "(in {3!A})" , self . parametername , self . methodname , self . classname , self . namespace )
Return a string that identifies the value - mapped element .
198
11
28,346
def tovalues ( self , element_value ) : if not isinstance ( element_value , ( six . integer_types , CIMInt ) ) : raise TypeError ( _format ( "The value for value-mapped {0} is not " "integer-typed, but has Python type: {1}" , self . _element_str ( ) , type ( element_value ) ) ) # try single value try : return self . _b2v_single_dict [ element_value ] except KeyError : pass # try value ranges for range_tuple in self . _b2v_range_tuple_list : lo , hi , values_str = range_tuple if lo <= element_value <= hi : return values_str # try catch-all '..' if self . _b2v_unclaimed is not None : return self . _b2v_unclaimed raise ValueError ( _format ( "The value for value-mapped {0} is outside of the set " "defined by its ValueMap qualifier: {1!A}" , self . _element_str ( ) , element_value ) )
Return the Values string for an element value based upon this value mapping .
246
14
28,347
def tobinary ( self , values_str ) : if not isinstance ( values_str , six . string_types ) : raise TypeError ( _format ( "The values string for value-mapped {0} is not " "string-typed, but has Python type: {1}" , self . _element_str ( ) , type ( values_str ) ) ) try : return self . _v2b_dict [ values_str ] except KeyError : raise ValueError ( _format ( "The values string for value-mapped {0} is outside " "of the set defined by its Values qualifier: {1!A}" , self . _element_str ( ) , values_str ) )
Return the integer value or values for a Values string based upon this value mapping .
153
16
28,348
def _get_server ( self , server_id ) : if server_id not in self . _servers : raise ValueError ( _format ( "WBEM server {0!A} not known by subscription manager" , server_id ) ) return self . _servers [ server_id ]
Internal method to get the server object given a server_id .
64
13
28,349
def add_server ( self , server ) : if not isinstance ( server , WBEMServer ) : raise TypeError ( "Server argument of add_server() must be a " "WBEMServer object" ) server_id = server . url if server_id in self . _servers : raise ValueError ( _format ( "WBEM server already known by listener: {0!A}" , server_id ) ) # Create dictionary entries for this server self . _servers [ server_id ] = server self . _owned_subscriptions [ server_id ] = [ ] self . _owned_filters [ server_id ] = [ ] self . _owned_destinations [ server_id ] = [ ] # Recover any owned destination, filter, and subscription instances # that exist on this server this_host = getfqdn ( ) dest_name_pattern = re . compile ( _format ( r'^pywbemdestination:owned:{0}:{1}:[^:]*$' , this_host , self . _subscription_manager_id ) ) dest_insts = server . conn . EnumerateInstances ( DESTINATION_CLASSNAME , namespace = server . interop_ns ) for inst in dest_insts : if re . match ( dest_name_pattern , inst . path . keybindings [ 'Name' ] ) and inst . path . keybindings [ 'SystemName' ] == this_host : self . _owned_destinations [ server_id ] . append ( inst ) filter_name_pattern = re . compile ( _format ( r'^pywbemfilter:owned:{0}:{1}:[^:]*:[^:]*$' , this_host , self . _subscription_manager_id ) ) filter_insts = server . conn . EnumerateInstances ( FILTER_CLASSNAME , namespace = server . interop_ns ) for inst in filter_insts : if re . match ( filter_name_pattern , inst . path . keybindings [ 'Name' ] ) and inst . path . keybindings [ 'SystemName' ] == this_host : self . _owned_filters [ server_id ] . append ( inst ) sub_insts = server . conn . EnumerateInstances ( SUBSCRIPTION_CLASSNAME , namespace = server . interop_ns ) owned_filter_paths = [ inst . path for inst in self . _owned_filters [ server_id ] ] owned_destination_paths = [ inst . path for inst in self . _owned_destinations [ server_id ] ] for inst in sub_insts : if inst . path . keybindings [ 'Filter' ] in owned_filter_paths or inst . path . keybindings [ 'Handler' ] in owned_destination_paths : self . _owned_subscriptions [ server_id ] . append ( inst ) return server_id
Register a WBEM server with the subscription manager . This is a prerequisite for adding listener destinations indication filters and indication subscriptions to the server .
643
27
28,350
def remove_server ( self , server_id ) : # Validate server_id server = self . _get_server ( server_id ) # Delete any instances we recorded to be cleaned up if server_id in self . _owned_subscriptions : inst_list = self . _owned_subscriptions [ server_id ] # We iterate backwards because we change the list for i in six . moves . range ( len ( inst_list ) - 1 , - 1 , - 1 ) : inst = inst_list [ i ] server . conn . DeleteInstance ( inst . path ) del inst_list [ i ] del self . _owned_subscriptions [ server_id ] if server_id in self . _owned_filters : inst_list = self . _owned_filters [ server_id ] # We iterate backwards because we change the list for i in six . moves . range ( len ( inst_list ) - 1 , - 1 , - 1 ) : inst = inst_list [ i ] server . conn . DeleteInstance ( inst . path ) del inst_list [ i ] del self . _owned_filters [ server_id ] if server_id in self . _owned_destinations : inst_list = self . _owned_destinations [ server_id ] # We iterate backwards because we change the list for i in six . moves . range ( len ( inst_list ) - 1 , - 1 , - 1 ) : inst = inst_list [ i ] server . conn . DeleteInstance ( inst . path ) del inst_list [ i ] del self . _owned_destinations [ server_id ] # Remove server from this listener del self . _servers [ server_id ]
Remove a registered WBEM server from the subscription manager . This also unregisters listeners from that server and removes all owned indication subscriptions owned indication filters and owned listener destinations .
370
34
28,351
def remove_all_servers ( self ) : for server_id in list ( self . _servers . keys ( ) ) : self . remove_server ( server_id )
Remove all registered WBEM servers from the subscription manager . This also unregisters listeners from these servers and removes all owned indication subscriptions owned indication filters and owned listener destinations .
39
34
28,352
def add_listener_destinations ( self , server_id , listener_urls , owned = True ) : # server_id is validated in _create_...() method. # If list, recursively call this function with each list item. if isinstance ( listener_urls , list ) : dest_insts = [ ] for listener_url in listener_urls : new_dest_insts = self . add_listener_destinations ( server_id , listener_url ) dest_insts . extend ( new_dest_insts ) return dest_insts # Here, the variable will be a single list item. listener_url = listener_urls dest_inst = self . _create_destination ( server_id , listener_url , owned ) return [ dest_inst ]
Register WBEM listeners to be the target of indications sent by a WBEM server .
175
17
28,353
def get_owned_destinations ( self , server_id ) : # Validate server_id self . _get_server ( server_id ) return list ( self . _owned_destinations [ server_id ] )
Return the listener destinations in a WBEM server owned by this subscription manager .
48
15
28,354
def get_all_destinations ( self , server_id ) : # Validate server_id server = self . _get_server ( server_id ) return server . conn . EnumerateInstances ( DESTINATION_CLASSNAME , namespace = server . interop_ns )
Return all listener destinations in a WBEM server .
62
10
28,355
def remove_destinations ( self , server_id , destination_paths ) : # pylint: disable=line-too-long # noqa: E501 # Validate server_id server = self . _get_server ( server_id ) conn_id = server . conn . conn_id if server . conn is not None else None # If list, recursively call this function with each list item. if isinstance ( destination_paths , list ) : for dest_path in destination_paths : self . remove_destinations ( server_id , dest_path ) return # Here, the variable will be a single list item. dest_path = destination_paths # Verify referencing subscriptions. ref_paths = server . conn . ReferenceNames ( dest_path , ResultClass = SUBSCRIPTION_CLASSNAME ) if ref_paths : # DSP1054 1.2 defines that this CIM error is raised by the server # in that case, so we simulate that behavior on the client side. raise CIMError ( CIM_ERR_FAILED , "The listener destination is referenced by subscriptions." , conn_id = conn_id ) server . conn . DeleteInstance ( dest_path ) inst_list = self . _owned_destinations [ server_id ] # We iterate backwards because we change the list for i in six . moves . range ( len ( inst_list ) - 1 , - 1 , - 1 ) : inst = inst_list [ i ] if inst . path == dest_path : del inst_list [ i ]
Remove listener destinations from a WBEM server by deleting the listener destination instances in the server .
338
18
28,356
def get_owned_filters ( self , server_id ) : # Validate server_id self . _get_server ( server_id ) return list ( self . _owned_filters [ server_id ] )
Return the indication filters in a WBEM server owned by this subscription manager .
48
15
28,357
def get_all_filters ( self , server_id ) : # Validate server_id server = self . _get_server ( server_id ) return server . conn . EnumerateInstances ( 'CIM_IndicationFilter' , namespace = server . interop_ns )
Return all indication filters in a WBEM server .
63
10
28,358
def remove_filter ( self , server_id , filter_path ) : # Validate server_id server = self . _get_server ( server_id ) conn_id = server . conn . conn_id if server . conn is not None else None # Verify referencing subscriptions. ref_paths = server . conn . ReferenceNames ( filter_path , ResultClass = SUBSCRIPTION_CLASSNAME ) if ref_paths : # DSP1054 1.2 defines that this CIM error is raised by the server # in that case, so we simulate that behavior on the client side. raise CIMError ( CIM_ERR_FAILED , "The indication filter is referenced by subscriptions." , conn_id = conn_id ) server . conn . DeleteInstance ( filter_path ) inst_list = self . _owned_filters [ server_id ] # We iterate backwards because we change the list for i in six . moves . range ( len ( inst_list ) - 1 , - 1 , - 1 ) : inst = inst_list [ i ] if inst . path == filter_path : del inst_list [ i ]
Remove an indication filter from a WBEM server by deleting the indication filter instance in the WBEM server .
245
21
28,359
def get_owned_subscriptions ( self , server_id ) : # Validate server_id self . _get_server ( server_id ) return list ( self . _owned_subscriptions [ server_id ] )
Return the indication subscriptions in a WBEM server owned by this subscription manager .
50
15
28,360
def get_all_subscriptions ( self , server_id ) : # Validate server_id server = self . _get_server ( server_id ) return server . conn . EnumerateInstances ( SUBSCRIPTION_CLASSNAME , namespace = server . interop_ns )
Return all indication subscriptions in a WBEM server .
62
10
28,361
def _create_destination ( self , server_id , dest_url , owned ) : # Validate server_id server = self . _get_server ( server_id ) # validate the URL by reconstructing it. Do not allow defaults host , port , ssl = parse_url ( dest_url , allow_defaults = False ) schema = 'https' if ssl else 'http' listener_url = '{0}://{1}:{2}' . format ( schema , host , port ) this_host = getfqdn ( ) ownership = "owned" if owned else "permanent" dest_path = CIMInstanceName ( DESTINATION_CLASSNAME , namespace = server . interop_ns ) dest_inst = CIMInstance ( DESTINATION_CLASSNAME ) dest_inst . path = dest_path dest_inst [ 'CreationClassName' ] = DESTINATION_CLASSNAME dest_inst [ 'SystemCreationClassName' ] = SYSTEM_CREATION_CLASSNAME dest_inst [ 'SystemName' ] = this_host dest_inst [ 'Name' ] = _format ( 'pywbemdestination:{0}:{1}:{2}' , ownership , self . _subscription_manager_id , uuid . uuid4 ( ) ) dest_inst [ 'Destination' ] = listener_url if owned : for i , inst in enumerate ( self . _owned_destinations [ server_id ] ) : if inst . path == dest_path : # It already exists, now check its properties if inst != dest_inst : server . conn . ModifyInstance ( dest_inst ) dest_inst = server . conn . GetInstance ( dest_path ) self . _owned_destinations [ server_id ] [ i ] = dest_inst return dest_inst dest_path = server . conn . CreateInstance ( dest_inst ) dest_inst = server . conn . GetInstance ( dest_path ) self . _owned_destinations [ server_id ] . append ( dest_inst ) else : # Responsibility to ensure it does not exist yet is with the user dest_path = server . conn . CreateInstance ( dest_inst ) dest_inst = server . conn . GetInstance ( dest_path ) return dest_inst
Create a listener destination instance in the Interop namespace of a WBEM server and return that instance .
497
20
28,362
def _create_subscription ( self , server_id , dest_path , filter_path , owned ) : # Validate server_id server = self . _get_server ( server_id ) sub_path = CIMInstanceName ( SUBSCRIPTION_CLASSNAME , namespace = server . interop_ns ) sub_inst = CIMInstance ( SUBSCRIPTION_CLASSNAME ) sub_inst . path = sub_path sub_inst [ 'Filter' ] = filter_path sub_inst [ 'Handler' ] = dest_path if owned : for inst in self . _owned_subscriptions [ server_id ] : if inst . path == sub_path : # It does not have any properties besides its keys, # so checking the path is sufficient. return sub_inst sub_path = server . conn . CreateInstance ( sub_inst ) sub_inst = server . conn . GetInstance ( sub_path ) self . _owned_subscriptions [ server_id ] . append ( sub_inst ) else : # Responsibility to ensure it does not exist yet is with the user sub_path = server . conn . CreateInstance ( sub_inst ) sub_inst = server . conn . GetInstance ( sub_path ) return sub_inst
Create an indication subscription instance in the Interop namespace of a WBEM server and return that instance .
267
20
28,363
def CreateClass ( self , * args , * * kwargs ) : cc = args [ 0 ] if args else kwargs [ 'NewClass' ] namespace = self . getns ( ) try : self . compile_ordered_classnames . append ( cc . classname ) # The following generates an exception for each new ns self . classes [ self . default_namespace ] [ cc . classname ] = cc except KeyError : self . classes [ namespace ] = NocaseDict ( { cc . classname : cc } ) # Validate that references and embedded instance properties, methods, # etc. have classes that exist in repo. This also institates the # mechanism that gets insures that prerequisite classes are inserted # into the repo. objects = list ( cc . properties . values ( ) ) for meth in cc . methods . values ( ) : objects += list ( meth . parameters . values ( ) ) for obj in objects : # Validate that reference_class exists in repo if obj . type == 'reference' : try : self . GetClass ( obj . reference_class , LocalOnly = True , IncludeQualifiers = True ) except CIMError as ce : if ce . status_code == CIM_ERR_NOT_FOUND : raise CIMError ( CIM_ERR_INVALID_PARAMETER , _format ( "Class {0!A} referenced by element {1!A} " "of class {2!A} in namespace {3!A} does " "not exist" , obj . reference_class , obj . name , cc . classname , self . getns ( ) ) , conn_id = self . conn_id ) raise elif obj . type == 'string' : if 'EmbeddedInstance' in obj . qualifiers : eiqualifier = obj . qualifiers [ 'EmbeddedInstance' ] try : self . GetClass ( eiqualifier . value , LocalOnly = True , IncludeQualifiers = False ) except CIMError as ce : if ce . status_code == CIM_ERR_NOT_FOUND : raise CIMError ( CIM_ERR_INVALID_PARAMETER , _format ( "Class {0!A} specified by " "EmbeddInstance qualifier on element " "{1!A} of class {2!A} in namespace " "{3!A} does not exist" , eiqualifier . value , obj . name , cc . classname , self . getns ( ) ) , conn_id = self . conn_id ) raise ccr = self . conn . _resolve_class ( # pylint: disable=protected-access cc , namespace , self . qualifiers [ namespace ] ) if namespace not in self . classes : self . classes [ namespace ] = NocaseDict ( ) self . classes [ namespace ] [ ccr . classname ] = ccr try : self . class_names [ namespace ] . append ( ccr . classname ) except KeyError : self . class_names [ namespace ] = [ ccr . classname ]
Override the CreateClass method in MOFWBEMConnection
659
11
28,364
def _get_class ( self , superclass , namespace = None , local_only = False , include_qualifiers = True , include_classorigin = True ) : return self . GetClass ( superclass , namespace = namespace , local_only = local_only , include_qualifiers = include_qualifiers , include_classorigin = include_classorigin )
This method is just rename of GetClass to support same method with both MOFWBEMConnection and FakedWBEMConnection
77
25
28,365
def xml_to_tupletree_sax ( xml_string , meaning , conn_id = None ) : handler = CIMContentHandler ( ) # The following conversion to a byte string is required for two reasons: # 1. xml.sax.parseString() raises UnicodeEncodeError for unicode strings # that contain any non-ASCII characters (despite its Python 2.7 # documentation which states that would be supported). # 2. The SAX parser in Python 2.6 and 3.4 (pywbem does not support 3.1 - # 3.3) does not accept unicode strings, raising: # SAXParseException: "<unknown>:1:1: not well-formed (invalid token)" # or: # TypeError: 'str' does not support the buffer interface xml_string = _ensure_bytes ( xml_string ) try : xml . sax . parseString ( xml_string , handler , None ) except xml . sax . SAXParseException as exc : # xml.sax.parse() is documented to only raise SAXParseException. In # earlier versions of this code, xml.sax.parseString() has been found # to raise UnicodeEncodeError when unicode strings were passed, but # that is no longer done, so that exception is no longer caught. # Other exception types are unexpected and will perculate upwards. # Traceback of the exception that was caught org_tb = sys . exc_info ( ) [ 2 ] # Improve quality of exception info (the check...() functions may # raise XMLParseError): _chk_str = check_invalid_utf8_sequences ( xml_string , meaning , conn_id ) check_invalid_xml_chars ( _chk_str , meaning , conn_id ) # If the checks above pass, re-raise the SAX exception info, with its # original traceback info: lineno , colno , new_colno , line = get_failing_line ( xml_string , str ( exc ) ) if lineno is not None : marker_line = ' ' * ( new_colno - 1 ) + '^' xml_msg = _format ( "Line {0} column {1} of XML string (as binary UTF-8 string):\n" "{2}\n" "{3}" , lineno , colno , line , marker_line ) else : xml_msg = _format ( "XML string (as binary UTF-8 string):\n" "{0}" , line ) pe = XMLParseError ( _format ( "XML parsing error encountered in {0}: {1}\n{2}\n" , meaning , exc , xml_msg ) , conn_id = conn_id ) six . reraise ( type ( pe ) , pe , org_tb ) # ignore this call in traceback! return handler . root
Parse an XML string into tupletree with SAX parser .
627
15
28,366
def check_invalid_xml_chars ( xml_string , meaning , conn_id = None ) : context_before = 16 # number of chars to print before any bad chars context_after = 16 # number of chars to print after any bad chars try : assert isinstance ( xml_string , six . text_type ) except AssertionError : raise TypeError ( _format ( "xml_string parameter is not a unicode string, but has " "type {0}" , type ( xml_string ) ) ) # Check for Unicode characters that cannot legally be represented as XML # characters. ixc_list = list ( ) last_ixc_pos = - 2 for m in _ILLEGAL_XML_CHARS_RE . finditer ( xml_string ) : ixc_pos = m . start ( 1 ) ixc_char = m . group ( 1 ) if ixc_pos > last_ixc_pos + 1 : ixc_list . append ( ( ixc_pos , ixc_char ) ) last_ixc_pos = ixc_pos if ixc_list : exc_txt = "Invalid XML characters found in {0}:" . format ( meaning ) for ( ixc_pos , ixc_char ) in ixc_list : cpos1 = max ( ixc_pos - context_before , 0 ) cpos2 = min ( ixc_pos + context_after , len ( xml_string ) ) exc_txt += _format ( "\n At offset {0}: U+{1:04X}, " "CIM-XML snippet: {2!A}" , ixc_pos , ord ( ixc_char ) , xml_string [ cpos1 : cpos2 ] ) raise XMLParseError ( exc_txt , conn_id = conn_id )
Examine an XML string and raise a pywbem . XMLParseError exception if the string contains characters that cannot legally be represented as XML characters .
414
31
28,367
def wrapped_spawn ( self , cmdElements , tag ) : import uuid a = uuid . uuid1 ( ) print ( "travis_fold:start:%s-%s" % ( tag , a ) ) try : spawn0 ( self , cmdElements ) finally : print ( "travis_fold:end:%s-%s" % ( tag , a ) )
wrap spawn with unique - ish travis fold prints
86
11
28,368
def _build ( self , build_method ) : logger . info ( "building image '%s'" , self . image ) self . ensure_not_built ( ) self . temp_dir = tempfile . mkdtemp ( ) temp_path = os . path . join ( self . temp_dir , BUILD_JSON ) try : with open ( temp_path , 'w' ) as build_json : json . dump ( self . build_args , build_json ) self . build_container_id = build_method ( self . build_image , self . temp_dir ) try : logs_gen = self . dt . logs ( self . build_container_id , stream = True ) wait_for_command ( logs_gen ) return_code = self . dt . wait ( self . build_container_id ) except KeyboardInterrupt : logger . info ( "killing build container on user's request" ) self . dt . remove_container ( self . build_container_id , force = True ) results = BuildResults ( ) results . return_code = 1 return results else : results = self . _load_results ( self . build_container_id ) results . return_code = return_code return results finally : shutil . rmtree ( self . temp_dir )
build image from provided build_args
281
7
28,369
def _load_results ( self , container_id ) : if self . temp_dir : dt = DockerTasker ( ) # FIXME: load results only when requested # results_path = os.path.join(self.temp_dir, RESULTS_JSON) # df_path = os.path.join(self.temp_dir, 'Dockerfile') # try: # with open(results_path, 'r') as results_fp: # results = json.load(results_fp, cls=BuildResultsJSONDecoder) # except (IOError, OSError) as ex: # logger.error("Can't open results: '%s'", repr(ex)) # for l in self.dt.logs(self.build_container_id, stream=False): # logger.debug(l.strip()) # raise RuntimeError("Can't open results: '%s'" % repr(ex)) # results.dockerfile = open(df_path, 'r').read() results = BuildResults ( ) results . build_logs = dt . logs ( container_id , stream = False ) results . container_id = container_id return results
load results from recent build
258
5
28,370
def commit_buildroot ( self ) : logger . info ( "committing buildroot" ) self . ensure_is_built ( ) commit_message = "docker build of '%s' (%s)" % ( self . image , self . uri ) self . buildroot_image_name = ImageName ( repo = "buildroot-%s" % self . image , # save the time when image was built tag = datetime . datetime . now ( ) . strftime ( '%Y-%m-%d-%H-%M-%S' ) ) self . buildroot_image_id = self . dt . commit_container ( self . build_container_id , commit_message ) return self . buildroot_image_id
create image from buildroot
164
5
28,371
def create_main_synopsis ( self , parser ) : self . add_usage ( parser . usage , parser . _actions , parser . _mutually_exclusive_groups , prefix = '' ) usage = self . _format_usage ( None , parser . _actions , parser . _mutually_exclusive_groups , '' ) usage = usage . replace ( '%s ' % self . _prog , '' ) usage = '.SH SYNOPSIS\n \\fB%s\\fR %s\n' % ( self . _markup ( self . _prog ) , usage ) return usage
create synopsis from main parser
131
5
28,372
def create_subcommand_synopsis ( self , parser ) : self . add_usage ( parser . usage , parser . _get_positional_actions ( ) , None , prefix = '' ) usage = self . _format_usage ( parser . usage , parser . _get_positional_actions ( ) , None , '' ) return self . _bold ( usage )
show usage with description for commands
79
6
28,373
def get_worker_build_info ( workflow , platform ) : workspace = workflow . plugin_workspace [ OrchestrateBuildPlugin . key ] return workspace [ WORKSPACE_KEY_BUILD_INFO ] [ platform ]
Obtain worker build information for a given platform
47
9
28,374
def override_build_kwarg ( workflow , k , v , platform = None ) : key = OrchestrateBuildPlugin . key # Use None to indicate an override for all platforms workspace = workflow . plugin_workspace . setdefault ( key , { } ) override_kwargs = workspace . setdefault ( WORKSPACE_KEY_OVERRIDE_KWARGS , { } ) override_kwargs . setdefault ( platform , { } ) override_kwargs [ platform ] [ k ] = v
Override a build - kwarg for all worker builds
106
11
28,375
def wait_for_any_cluster ( contexts ) : try : earliest_retry_at = min ( ctx . retry_at for ctx in contexts . values ( ) if not ctx . failed ) except ValueError : # can't take min() of empty sequence raise AllClustersFailedException ( "Could not find appropriate cluster for worker build." ) time_until_next = earliest_retry_at - dt . datetime . now ( ) time . sleep ( max ( timedelta ( seconds = 0 ) , time_until_next ) . seconds )
Wait until any of the clusters are out of retry - wait
124
13
28,376
def validate_arrangement_version ( self ) : arrangement_version = self . build_kwargs [ 'arrangement_version' ] if arrangement_version is None : return if arrangement_version <= 5 : # TODO: raise as ValueError in release 1.6.38+ self . log . warning ( "arrangement_version <= 5 is deprecated and will be removed" " in release 1.6.38" )
Validate if the arrangement_version is supported
93
9
28,377
def get_clusters ( self , platform , retry_contexts , all_clusters ) : possible_cluster_info = { } candidates = set ( copy . copy ( all_clusters ) ) while candidates and not possible_cluster_info : wait_for_any_cluster ( retry_contexts ) for cluster in sorted ( candidates , key = attrgetter ( 'priority' ) ) : ctx = retry_contexts [ cluster . name ] if ctx . in_retry_wait : continue if ctx . failed : continue try : cluster_info = self . get_cluster_info ( cluster , platform ) possible_cluster_info [ cluster ] = cluster_info except OsbsException : ctx . try_again_later ( self . find_cluster_retry_delay ) candidates -= set ( [ c for c in candidates if retry_contexts [ c . name ] . failed ] ) ret = sorted ( possible_cluster_info . values ( ) , key = lambda c : c . cluster . priority ) ret = sorted ( ret , key = lambda c : c . load ) return ret
return clusters sorted by load .
249
6
28,378
def get_koji_upload_dir ( ) : dir_prefix = 'koji-upload' random_chars = '' . join ( [ random . choice ( ascii_letters ) for _ in range ( 8 ) ] ) unique_fragment = '%r.%s' % ( time . time ( ) , random_chars ) return os . path . join ( dir_prefix , unique_fragment )
Create a path name for uploading files to
94
8
28,379
def select_and_start_cluster ( self , platform ) : clusters = self . reactor_config . get_enabled_clusters_for_platform ( platform ) if not clusters : raise UnknownPlatformException ( 'No clusters found for platform {}!' . format ( platform ) ) retry_contexts = { cluster . name : ClusterRetryContext ( self . max_cluster_fails ) for cluster in clusters } while True : try : possible_cluster_info = self . get_clusters ( platform , retry_contexts , clusters ) except AllClustersFailedException as ex : cluster = ClusterInfo ( None , platform , None , None ) build_info = WorkerBuildInfo ( build = None , cluster_info = cluster , logger = self . log ) build_info . monitor_exception = repr ( ex ) self . worker_builds . append ( build_info ) return for cluster_info in possible_cluster_info : ctx = retry_contexts [ cluster_info . cluster . name ] try : self . log . info ( 'Attempting to start build for platform %s on cluster %s' , platform , cluster_info . cluster . name ) self . do_worker_build ( cluster_info ) return except OsbsException : ctx . try_again_later ( self . failure_retry_delay )
Choose a cluster and start a build on it
292
9
28,380
def set_build_image ( self ) : current_platform = platform . processor ( ) orchestrator_platform = current_platform or 'x86_64' current_buildimage = self . get_current_buildimage ( ) for plat , build_image in self . build_image_override . items ( ) : self . log . debug ( 'Overriding build image for %s platform to %s' , plat , build_image ) self . build_image_digests [ plat ] = build_image manifest_list_platforms = self . platforms - set ( self . build_image_override . keys ( ) ) if not manifest_list_platforms : self . log . debug ( 'Build image override used for all platforms, ' 'skipping build image manifest list checks' ) return # orchestrator platform is same as platform on which we want to built on, # so we can use the same image if manifest_list_platforms == set ( [ orchestrator_platform ] ) : self . build_image_digests [ orchestrator_platform ] = current_buildimage return # BuildConfig exists build_image , imagestream = self . get_image_info_from_buildconfig ( ) if not ( build_image or imagestream ) : # get image build from build metadata, which is set for direct builds # this is explicitly set by osbs-client, it isn't default OpenShift behaviour build_image , imagestream = self . get_image_info_from_annotations ( ) # if imageStream is used if imagestream : build_image = self . get_build_image_from_imagestream ( imagestream ) # we have build_image with tag, so we can check for manifest list if build_image : self . check_manifest_list ( build_image , orchestrator_platform , manifest_list_platforms , current_buildimage )
Overrides build_image for worker to be same as in orchestrator build
411
16
28,381
def get_manifest ( self , session , repository , ref ) : self . log . debug ( "%s: Retrieving manifest for %s:%s" , session . registry , repository , ref ) headers = { 'Accept' : ', ' . join ( self . manifest_media_types ) } url = '/v2/{}/manifests/{}' . format ( repository , ref ) response = session . get ( url , headers = headers ) response . raise_for_status ( ) return ( response . content , response . headers [ 'Docker-Content-Digest' ] , response . headers [ 'Content-Type' ] , int ( response . headers [ 'Content-Length' ] ) )
Downloads a manifest from a registry . ref can be a digest or a tag .
154
17
28,382
def link_manifest_references_into_repository ( self , session , manifest , media_type , source_repo , target_repo ) : if source_repo == target_repo : return parsed = json . loads ( manifest . decode ( 'utf-8' ) ) references = [ ] if media_type in ( MEDIA_TYPE_DOCKER_V2_SCHEMA2 , MEDIA_TYPE_OCI_V1 ) : references . append ( parsed [ 'config' ] [ 'digest' ] ) for l in parsed [ 'layers' ] : references . append ( l [ 'digest' ] ) else : # manifest list support could be added here, but isn't needed currently, since # we never copy a manifest list as a whole between repositories raise RuntimeError ( "Unhandled media-type {}" . format ( media_type ) ) for digest in references : self . link_blob_into_repository ( session , digest , source_repo , target_repo )
Links all the blobs referenced by the manifest from source_repo into target_repo .
226
20
28,383
def store_manifest_in_repository ( self , session , manifest , media_type , source_repo , target_repo , digest = None , tag = None ) : if tag : self . log . debug ( "%s: Tagging manifest (or list) from %s as %s:%s" , session . registry , source_repo , target_repo , tag ) ref = tag elif digest : self . log . debug ( "%s: Storing manifest (or list) %s from %s in %s" , session . registry , digest , source_repo , target_repo ) ref = digest else : raise RuntimeError ( "Either digest or tag must be specified" ) self . link_manifest_references_into_repository ( session , manifest , media_type , source_repo , target_repo ) url = '/v2/{}/manifests/{}' . format ( target_repo , ref ) headers = { 'Content-Type' : media_type } response = session . put ( url , data = manifest , headers = headers ) response . raise_for_status ( )
Stores the manifest into target_repo possibly tagging it . This may involve copying referenced blobs from source_repo .
253
26
28,384
def build_list ( self , manifests ) : media_type = manifests [ 0 ] [ 'media_type' ] if ( not all ( m [ 'media_type' ] == media_type for m in manifests ) ) : raise PluginFailedException ( 'worker manifests have inconsistent types: {}' . format ( manifests ) ) if media_type == MEDIA_TYPE_DOCKER_V2_SCHEMA2 : list_type = MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST elif media_type == MEDIA_TYPE_OCI_V1 : list_type = MEDIA_TYPE_OCI_V1_INDEX else : raise PluginFailedException ( 'worker manifests have unsupported type: {}' . format ( media_type ) ) return list_type , json . dumps ( { "schemaVersion" : 2 , "mediaType" : list_type , "manifests" : [ { "mediaType" : media_type , "size" : m [ 'size' ] , "digest" : m [ 'digest' ] , "platform" : { "architecture" : m [ 'architecture' ] , "os" : "linux" } } for m in manifests ] , } , indent = 4 )
Builds a manifest list or OCI image out of the given manifests
281
14
28,385
def group_manifests_and_tag ( self , session , worker_digests ) : self . log . info ( "%s: Creating manifest list" , session . registry ) # Extract information about the manifests that we will group - we get the # size and content type of the manifest by querying the registry manifests = [ ] for platform , worker_image in worker_digests . items ( ) : repository = worker_image [ 'repository' ] digest = worker_image [ 'digest' ] media_type = get_manifest_media_type ( worker_image [ 'version' ] ) if media_type not in self . manifest_media_types : continue content , _ , media_type , size = self . get_manifest ( session , repository , digest ) manifests . append ( { 'content' : content , 'repository' : repository , 'digest' : digest , 'size' : size , 'media_type' : media_type , 'architecture' : self . goarch . get ( platform , platform ) , } ) list_type , list_json = self . build_list ( manifests ) self . log . info ( "%s: Created manifest, Content-Type=%s\n%s" , session . registry , list_type , list_json ) # Now push the manifest list to the registry once per each tag self . log . info ( "%s: Tagging manifest list" , session . registry ) for image in self . workflow . tag_conf . images : target_repo = image . to_str ( registry = False , tag = False ) # We have to call store_manifest_in_repository directly for each # referenced manifest, since they potentially come from different repos for manifest in manifests : self . store_manifest_in_repository ( session , manifest [ 'content' ] , manifest [ 'media_type' ] , manifest [ 'repository' ] , target_repo , digest = manifest [ 'digest' ] ) self . store_manifest_in_repository ( session , list_json , list_type , target_repo , target_repo , tag = image . tag ) # Get the digest of the manifest list using one of the tags registry_image = self . workflow . tag_conf . unique_images [ 0 ] _ , digest_str , _ , _ = self . get_manifest ( session , registry_image . to_str ( registry = False , tag = False ) , registry_image . tag ) if list_type == MEDIA_TYPE_OCI_V1_INDEX : digest = ManifestDigest ( oci_index = digest_str ) else : digest = ManifestDigest ( v2_list = digest_str ) # And store the manifest list in the push_conf push_conf_registry = self . workflow . push_conf . add_docker_registry ( session . registry , insecure = session . insecure ) for image in self . workflow . tag_conf . images : push_conf_registry . digests [ image . tag ] = digest self . log . info ( "%s: Manifest list digest is %s" , session . registry , digest_str ) return registry_image . get_repo ( explicit_namespace = False ) , digest
Creates a manifest list or OCI image index that groups the different manifests in worker_digests then tags the result with with all the configured tags found in workflow . tag_conf .
716
38
28,386
def tag_manifest_into_registry ( self , session , worker_digest ) : self . log . info ( "%s: Tagging manifest" , session . registry ) digest = worker_digest [ 'digest' ] source_repo = worker_digest [ 'repository' ] image_manifest , _ , media_type , _ = self . get_manifest ( session , source_repo , digest ) if media_type == MEDIA_TYPE_DOCKER_V2_SCHEMA2 : digests = ManifestDigest ( v1 = digest ) elif media_type == MEDIA_TYPE_OCI_V1 : digests = ManifestDigest ( oci = digest ) else : raise RuntimeError ( "Unexpected media type found in worker repository: {}" . format ( media_type ) ) push_conf_registry = self . workflow . push_conf . add_docker_registry ( session . registry , insecure = session . insecure ) for image in self . workflow . tag_conf . images : target_repo = image . to_str ( registry = False , tag = False ) self . store_manifest_in_repository ( session , image_manifest , media_type , source_repo , target_repo , tag = image . tag ) # add a tag for any plugins running later that expect it push_conf_registry . digests [ image . tag ] = digests
Tags the manifest identified by worker_digest into session . registry with all the configured tags found in workflow . tag_conf .
318
26
28,387
def detect_parent_image_nvr ( self , image_name , inspect_data = None ) : if inspect_data is None : inspect_data = self . workflow . builder . parent_image_inspect ( image_name ) labels = Labels ( inspect_data [ INSPECT_CONFIG ] . get ( 'Labels' , { } ) ) label_names = [ Labels . LABEL_TYPE_COMPONENT , Labels . LABEL_TYPE_VERSION , Labels . LABEL_TYPE_RELEASE ] label_values = [ ] for lbl_name in label_names : try : _ , lbl_value = labels . get_name_and_value ( lbl_name ) label_values . append ( lbl_value ) except KeyError : self . log . info ( "Failed to find label '%s' in parent image '%s'." , labels . get_name ( lbl_name ) , image_name ) if len ( label_values ) != len ( label_names ) : # don't have all the necessary labels self . log . info ( "Image '%s' NVR missing; not searching for Koji build." , image_name ) return None return '-' . join ( label_values )
Look for the NVR labels if any in the image .
279
12
28,388
def wait_for_parent_image_build ( self , nvr ) : self . log . info ( 'Waiting for Koji build for parent image %s' , nvr ) poll_start = time . time ( ) while time . time ( ) - poll_start < self . poll_timeout : build = self . koji_session . getBuild ( nvr ) if build : self . log . info ( 'Parent image Koji build found with id %s' , build . get ( 'id' ) ) if build [ 'state' ] != koji . BUILD_STATES [ 'COMPLETE' ] : exc_msg = ( 'Parent image Koji build for {} with id {} state is not COMPLETE.' ) raise KojiParentBuildMissing ( exc_msg . format ( nvr , build . get ( 'id' ) ) ) return build time . sleep ( self . poll_interval ) raise KojiParentBuildMissing ( 'Parent image Koji build NOT found for {}!' . format ( nvr ) )
Given image NVR wait for the build that produced it to show up in koji . If it doesn t within the timeout raise an error .
222
29
28,389
def make_result ( self ) : result = { } if self . _base_image_build : result [ BASE_IMAGE_KOJI_BUILD ] = self . _base_image_build if self . _parent_builds : result [ PARENT_IMAGES_KOJI_BUILDS ] = self . _parent_builds return result if result else None
Construct the result dict to be preserved in the build metadata .
84
12
28,390
def wait_for_command ( logs_generator ) : logger . info ( "wait_for_command" ) cr = CommandResult ( ) for item in logs_generator : cr . parse_item ( item ) logger . info ( "no more logs" ) return cr
Create a CommandResult from given iterator
59
7
28,391
def print_version_of_tools ( ) : logger . info ( "Using these tools:" ) for tool in get_version_of_tools ( ) : logger . info ( "%s-%s at %s" , tool [ "name" ] , tool [ "version" ] , tool [ "path" ] )
print versions of used tools to logger
69
7
28,392
def guess_manifest_media_type ( content ) : encoding = guess_json_utf ( content ) try : manifest = json . loads ( content . decode ( encoding ) ) except ( ValueError , # Not valid JSON TypeError , # Not an object UnicodeDecodeError ) : # Unable to decode the bytes logger . exception ( "Unable to decode JSON" ) logger . debug ( "response content (%s): %r" , encoding , content ) return None try : return manifest [ 'mediaType' ] except KeyError : # no mediaType key if manifest . get ( 'schemaVersion' ) == 1 : return get_manifest_media_type ( 'v1' ) logger . warning ( "no mediaType or schemaVersion=1 in manifest, keys: %s" , manifest . keys ( ) )
Guess the media type for the given manifest content
174
10
28,393
def manifest_is_media_type ( response , media_type ) : try : received_media_type = response . headers [ 'Content-Type' ] except KeyError : # Guess media type from content logger . debug ( "No Content-Type header; inspecting content" ) received_media_type = guess_manifest_media_type ( response . content ) logger . debug ( "guessed media type: %s" , received_media_type ) if received_media_type is None : return media_type is None # Only compare prefix as response may use +prettyjws suffix # which is the case for signed manifest response_h_prefix = received_media_type . rsplit ( '+' , 1 ) [ 0 ] request_h_prefix = media_type . rsplit ( '+' , 1 ) [ 0 ] return response_h_prefix == request_h_prefix
Attempt to confirm the returned manifest is of a given media type
192
12
28,394
def get_manifest_list ( image , registry , insecure = False , dockercfg_path = None ) : version = 'v2_list' registry_session = RegistrySession ( registry , insecure = insecure , dockercfg_path = dockercfg_path ) response , _ = get_manifest ( image , registry_session , version ) return response
Return manifest list for image .
77
6
28,395
def get_all_manifests ( image , registry , insecure = False , dockercfg_path = None , versions = ( 'v1' , 'v2' , 'v2_list' ) ) : digests = { } registry_session = RegistrySession ( registry , insecure = insecure , dockercfg_path = dockercfg_path ) for version in versions : response , _ = get_manifest ( image , registry_session , version ) if response : digests [ version ] = response return digests
Return manifest digests for image .
112
7
28,396
def get_inspect_for_image ( image , registry , insecure = False , dockercfg_path = None ) : all_man_digests = get_all_manifests ( image , registry , insecure = insecure , dockercfg_path = dockercfg_path ) blob_config = None config_digest = None image_inspect = { } # we have manifest list (get digest for 1st platform) if 'v2_list' in all_man_digests : man_list_json = all_man_digests [ 'v2_list' ] . json ( ) if man_list_json [ 'manifests' ] [ 0 ] [ 'mediaType' ] != MEDIA_TYPE_DOCKER_V2_SCHEMA2 : raise RuntimeError ( 'Image {image_name}: v2 schema 1 ' 'in manifest list' . format ( image_name = image ) ) v2_digest = man_list_json [ 'manifests' ] [ 0 ] [ 'digest' ] blob_config , config_digest = get_config_and_id_from_registry ( image , registry , v2_digest , insecure = insecure , version = 'v2' , dockercfg_path = dockercfg_path ) # get config for v2 digest elif 'v2' in all_man_digests : blob_config , config_digest = get_config_and_id_from_registry ( image , registry , image . tag , insecure = insecure , version = 'v2' , dockercfg_path = dockercfg_path ) # read config from v1 elif 'v1' in all_man_digests : v1_json = all_man_digests [ 'v1' ] . json ( ) if PY2 : blob_config = json . loads ( v1_json [ 'history' ] [ 0 ] [ 'v1Compatibility' ] . decode ( 'utf-8' ) ) else : blob_config = json . loads ( v1_json [ 'history' ] [ 0 ] [ 'v1Compatibility' ] ) else : raise RuntimeError ( "Image {image_name} not found: No v2 schema 1 image, or v2 schema 2 " "image or list, found" . format ( image_name = image ) ) # dictionary to convert config keys to inspect keys config_2_inspect = { 'created' : 'Created' , 'os' : 'Os' , 'container_config' : 'ContainerConfig' , 'architecture' : 'Architecture' , 'docker_version' : 'DockerVersion' , 'config' : 'Config' , } if not blob_config : raise RuntimeError ( "Image {image_name}: Couldn't get inspect data " "from digest config" . format ( image_name = image ) ) # set Id, which isn't in config blob, won't be set for v1, as for that image has to be pulled image_inspect [ 'Id' ] = config_digest # only v2 has rootfs, not v1 if 'rootfs' in blob_config : image_inspect [ 'RootFS' ] = blob_config [ 'rootfs' ] for old_key , new_key in config_2_inspect . items ( ) : image_inspect [ new_key ] = blob_config [ old_key ] return image_inspect
Return inspect for image .
761
5
28,397
def df_parser ( df_path , workflow = None , cache_content = False , env_replace = True , parent_env = None ) : p_env = { } if parent_env : # If parent_env passed in, just use that p_env = parent_env elif workflow : # If parent_env is not provided, but workflow is then attempt to inspect # the workflow for the parent_env try : parent_config = workflow . builder . base_image_inspect [ INSPECT_CONFIG ] except ( AttributeError , TypeError , KeyError ) : logger . debug ( "base image unable to be inspected" ) else : try : tmp_env = parent_config [ "Env" ] logger . debug ( "Parent Config ENV: %s" % tmp_env ) if isinstance ( tmp_env , dict ) : p_env = tmp_env elif isinstance ( tmp_env , list ) : try : for key_val in tmp_env : key , val = key_val . split ( "=" , 1 ) p_env [ key ] = val except ValueError : logger . debug ( "Unable to parse all of Parent Config ENV" ) except KeyError : logger . debug ( "Parent Environment not found, not applied to Dockerfile" ) try : dfparser = DockerfileParser ( df_path , cache_content = cache_content , env_replace = env_replace , parent_env = p_env ) except TypeError : logger . debug ( "Old version of dockerfile-parse detected, unable to set inherited parent " "ENVs" ) dfparser = DockerfileParser ( df_path , cache_content = cache_content , env_replace = env_replace , ) return dfparser
Wrapper for dockerfile_parse s DockerfileParser that takes into account parent_env inheritance .
380
20
28,398
def are_plugins_in_order ( plugins_conf , * plugins_names ) : all_plugins_names = [ plugin [ 'name' ] for plugin in plugins_conf or [ ] ] start_index = 0 for plugin_name in plugins_names : try : start_index = all_plugins_names . index ( plugin_name , start_index ) except ValueError : return False return True
Check if plugins are configured in given order .
86
9
28,399
def get_parent_image_koji_data ( workflow ) : koji_parent = workflow . prebuild_results . get ( PLUGIN_KOJI_PARENT_KEY ) or { } image_metadata = { } parents = { } for img , build in ( koji_parent . get ( PARENT_IMAGES_KOJI_BUILDS ) or { } ) . items ( ) : if not build : parents [ str ( img ) ] = None else : parents [ str ( img ) ] = { key : val for key , val in build . items ( ) if key in ( 'id' , 'nvr' ) } image_metadata [ PARENT_IMAGE_BUILDS_KEY ] = parents # ordered list of parent images image_metadata [ PARENT_IMAGES_KEY ] = workflow . builder . parents_ordered # don't add parent image id key for scratch if workflow . builder . base_from_scratch : return image_metadata base_info = koji_parent . get ( BASE_IMAGE_KOJI_BUILD ) or { } parent_id = base_info . get ( 'id' ) if parent_id is not None : try : parent_id = int ( parent_id ) except ValueError : logger . exception ( "invalid koji parent id %r" , parent_id ) else : image_metadata [ BASE_IMAGE_BUILD_ID_KEY ] = parent_id return image_metadata
Transform koji_parent plugin results into metadata dict .
321
11