idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
12,200
def make_path ( phase ) -> str : return "{}/{}{}{}" . format ( conf . instance . output_path , phase . phase_path , phase . phase_name , phase . phase_tag )
Create the path to the folder at which the metadata and optimizer pickle should be saved
47
18
12,201
def save_optimizer_for_phase ( phase ) : with open ( make_optimizer_pickle_path ( phase ) , "w+b" ) as f : f . write ( pickle . dumps ( phase . optimizer ) )
Save the optimizer associated with the phase as a pickle
53
12
12,202
def assert_optimizer_pickle_matches_for_phase ( phase ) : path = make_optimizer_pickle_path ( phase ) if os . path . exists ( path ) : with open ( path , "r+b" ) as f : loaded_optimizer = pickle . loads ( f . read ( ) ) if phase . optimizer != loaded_optimizer : raise exc . PipelineException ( f"Can't restart phase at path {path} because settings don't match. " f"Did you change the optimizer settings or model?" )
Assert that the previously saved optimizer is equal to the phase s optimizer if a saved optimizer is found .
121
24
12,203
def add ( self , phase_name , result ) : if phase_name in self . __result_dict : raise exc . PipelineException ( "Results from a phase called {} already exist in the pipeline" . format ( phase_name ) ) self . __result_list . append ( result ) self . __result_dict [ phase_name ] = result
Add the result of a phase .
75
7
12,204
def from_phase ( self , phase_name ) : try : return self . __result_dict [ phase_name ] except KeyError : raise exc . PipelineException ( "No previous phase named {} found in results ({})" . format ( phase_name , ", " . join ( self . __result_dict . keys ( ) ) ) )
Returns the result of a previous phase by its name
73
10
12,205
def save_metadata ( self , phase , data_name ) : with open ( "{}/.metadata" . format ( make_path ( phase ) ) , "w+" ) as f : f . write ( "pipeline={}\nphase={}\ndata={}" . format ( self . pipeline_name , phase . phase_name , data_name ) )
Save metadata associated with the phase such as the name of the pipeline the name of the phase and the name of the data being fit
79
26
12,206
def run_function ( self , func , data_name = None , assert_optimizer_pickle_matches = True ) : results = ResultsCollection ( ) for i , phase in enumerate ( self . phases ) : logger . info ( "Running Phase {} (Number {})" . format ( phase . optimizer . phase_name , i ) ) if assert_optimizer_pickle_matches : assert_optimizer_pickle_matches_for_phase ( phase ) save_optimizer_for_phase ( phase ) self . save_metadata ( phase , data_name ) results . add ( phase . phase_name , func ( phase , results ) ) return results
Run the function for each phase in the pipeline .
146
10
12,207
def strtobytes ( input , encoding ) : py_version = sys . version_info [ 0 ] if py_version >= 3 : return _strtobytes_py3 ( input , encoding ) return _strtobytes_py2 ( input , encoding )
Take a str and transform it into a byte array .
58
11
12,208
def index_impute2 ( fn ) : logger . info ( "Indexing {} (IMPUTE2)" . format ( fn ) ) impute2_index ( fn , cols = [ 0 , 1 , 2 ] , names = [ "chrom" , "name" , "pos" ] , sep = " " ) logger . info ( "Index generated" )
Indexes an IMPUTE2 file .
79
8
12,209
def index_bgen ( fn , legacy = False ) : logger . info ( "Indexing {} (BGEN) using 'bgenix'{}" . format ( fn , " (legacy mode)" if legacy else "" , ) ) command = [ "bgenix" , "-g" , fn , "-index" ] if legacy : command . append ( "-with-rowid" ) try : logger . info ( "Executing '{}'" . format ( " " . join ( command ) ) ) subprocess . Popen ( command ) . communicate ( ) except FileNotFoundError : logger . error ( "Cannot find 'bgenix', impossible to index {}" . format ( fn ) ) sys . exit ( 1 ) logger . info ( "Index generated" )
Indexes a BGEN file .
166
7
12,210
def create_untl_xml_subelement ( parent , element , prefix = '' ) : subelement = SubElement ( parent , prefix + element . tag ) if element . content is not None : subelement . text = element . content if element . qualifier is not None : subelement . attrib [ "qualifier" ] = element . qualifier if element . children > 0 : for child in element . children : SubElement ( subelement , prefix + child . tag ) . text = child . content else : subelement . text = element . content return subelement
Create a UNTL XML subelement .
119
8
12,211
def add_missing_children ( required_children , element_children ) : element_tags = [ element . tag for element in element_children ] # Loop through the elements that should be in the form. for contained_element in required_children : # If the element doesn't exist in the form, # add the element to the children. if contained_element not in element_tags : try : added_child = PYUNTL_DISPATCH [ contained_element ] ( content = '' ) except : added_child = PYUNTL_DISPATCH [ contained_element ] ( ) element_children . append ( added_child ) return element_children
Determine if there are elements not in the children that need to be included as blank elements in the form .
140
23
12,212
def set_qualifier ( self , value ) : if self . allows_qualifier : self . qualifier = value . strip ( ) else : raise UNTLStructureException ( 'Element "%s" does not allow a qualifier' % ( self . tag , ) )
Set the qualifier for the element .
56
7
12,213
def add_form ( self , * * kwargs ) : vocabularies = kwargs . get ( 'vocabularies' , None ) qualifier = kwargs . get ( 'qualifier' , None ) content = kwargs . get ( 'content' , None ) parent_tag = kwargs . get ( 'parent_tag' , None ) superuser = kwargs . get ( 'superuser' , False ) # Element has both the qualifier and content. if qualifier is not None and content is not None : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , qualifier_value = qualifier , input_value = content , untl_object = self , superuser = superuser , ) # Element just has a qualifier. elif qualifier is not None : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , qualifier_value = qualifier , untl_object = self , superuser = superuser , ) # Element just has content. elif content is not None : # If the element is a child element, # create the form attribute. if parent_tag is None : self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , input_value = content , untl_object = self , superuser = superuser , ) else : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , input_value = content , untl_object = self , parent_tag = parent_tag , superuser = superuser , ) # Element has children and no qualifiers or content # or is blank (not originally in the UNTL record). else : # Element is a child element. if parent_tag is None : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , untl_object = self , superuser = superuser , ) else : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , untl_object = self , parent_tag = parent_tag , superuser = superuser , )
Add the form attribute to the UNTL Python object .
536
11
12,214
def record_content_length ( self ) : untldict = py2dict ( self ) untldict . pop ( 'meta' , None ) return len ( str ( untldict ) )
Calculate length of record excluding metadata .
42
9
12,215
def create_form_data ( self , * * kwargs ) : # Get the specified keyword arguments. children = kwargs . get ( 'children' , [ ] ) sort_order = kwargs . get ( 'sort_order' , None ) solr_response = kwargs . get ( 'solr_response' , None ) superuser = kwargs . get ( 'superuser' , False ) # Get the vocabularies to pull the qualifiers from. vocabularies = self . get_vocabularies ( ) # Loop through all UNTL elements in the Python object. for element in children : # Add children that are missing from the form. element . children = add_missing_children ( element . contained_children , element . children , ) # Add the form attribute to the element. element . add_form ( vocabularies = vocabularies , qualifier = element . qualifier , content = element . content , superuser = superuser , ) # Element can contain children. if element . form . has_children : # If the parent has a qualifier, # create a representative form element for the parent. if getattr ( element . form , 'qualifier_name' , False ) : add_parent = PARENT_FORM [ element . form . qualifier_name ] ( content = element . qualifier , ) # Add the parent to the list of child elements. element . children . append ( add_parent ) # Sort the elements by the index of child sort. element . children . sort ( key = lambda obj : element . form . child_sort . index ( obj . tag ) ) # Loop through the element's children (if it has any). for child in element . children : # Add the form attribute to the element. child . add_form ( vocabularies = vocabularies , qualifier = child . qualifier , content = child . content , parent_tag = element . tag , superuser = superuser , ) element_group_dict = { } # Group related objects together. for element in children : # Make meta-hidden its own group. if element . form . name == 'meta' and element . qualifier == 'hidden' : element_group_dict [ 'hidden' ] = [ element ] # Element is not meta-hidden. else : # Make sure the dictionary key exists. if element . form . name not in element_group_dict : element_group_dict [ element . form . name ] = [ ] element_group_dict [ element . form . name ] . append ( element ) # If the hidden meta element doesn't exist, add it to its own group. if 'hidden' not in element_group_dict : hidden_element = PYUNTL_DISPATCH [ 'meta' ] ( qualifier = 'hidden' , content = 'False' ) hidden_element . add_form ( vocabularies = vocabularies , qualifier = hidden_element . qualifier , content = hidden_element . content , superuser = superuser , ) element_group_dict [ 'hidden' ] = [ hidden_element ] # Create a list of group object elements. element_list = self . create_form_groupings ( vocabularies , solr_response , element_group_dict , sort_order , ) # Return the list of UNTL elements with form data added. return element_list
Create groupings of form elements .
719
7
12,216
def create_form_groupings ( self , vocabularies , solr_response , element_group_dict , sort_order ) : element_list = [ ] # Loop through the group dictionary. for group_name , group_list in element_group_dict . items ( ) : # Create the element group. element_group = UNTL_GROUP_DISPATCH [ group_name ] ( vocabularies = vocabularies , solr_response = solr_response , group_name = group_name , group_list = group_list , ) # Loop through the adjustable forms of the group if they exist. if element_group . adjustable_form is not None : for adj_name , form_dict in element_group . adjustable_form . items ( ) : # If an item has an adjustable form, # append it to the adjustable list. if form_dict [ 'value_py' ] is not None : self . adjustable_items . append ( adj_name ) # Append the group to the element group list. element_list . append ( element_group ) # Sort the elements by the index of sort_order pre-ordered list. element_list . sort ( key = lambda obj : sort_order . index ( obj . group_name ) ) return element_list
Create a group object from groupings of element objects .
279
11
12,217
def get_vocabularies ( self ) : # Timeout in seconds. timeout = 15 socket . setdefaulttimeout ( timeout ) # Create the ordered vocabulary URL. vocab_url = VOCABULARIES_URL . replace ( 'all' , 'all-verbose' ) # Request the vocabularies dictionary. try : vocab_dict = eval ( urllib2 . urlopen ( vocab_url ) . read ( ) ) except : raise UNTLStructureException ( 'Could not retrieve the vocabularies' ) return vocab_dict
Get the vocabularies to pull the qualifiers from .
122
12
12,218
def create_xml_string ( self ) : root = self . create_xml ( ) xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring ( root , pretty_print = True ) return xml
Create a UNTL document in a string from a UNTL metadata root object .
54
16
12,219
def create_xml ( self , useNamespace = False ) : UNTL_NAMESPACE = 'http://digital2.library.unt.edu/untl/' UNTL = '{%s}' % UNTL_NAMESPACE NSMAP = { 'untl' : UNTL_NAMESPACE } if useNamespace : root = Element ( UNTL + self . tag , nsmap = NSMAP ) else : root = Element ( self . tag ) # Sort the elements by the index of # UNTL_XML_ORDER pre-ordered list. self . sort_untl ( UNTL_XML_ORDER ) # Create an XML structure from field list. for element in self . children : if useNamespace : create_untl_xml_subelement ( root , element , UNTL ) else : create_untl_xml_subelement ( root , element ) return root
Create an ElementTree representation of the object .
201
9
12,220
def create_element_dict ( self ) : untl_dict = { } # Loop through all UNTL elements in the Python object. for element in self . children : # If an entry for the element list hasn't been made in the # dictionary, start an empty element list. if element . tag not in untl_dict : untl_dict [ element . tag ] = [ ] # Create a dictionary to put the element into. # Add any qualifier. element_dict = { } if element . qualifier is not None : element_dict [ 'qualifier' ] = element . qualifier # Add any children that have content. if len ( element . contained_children ) > 0 : child_dict = { } for child in element . children : if child . content is not None : child_dict [ child . tag ] = child . content # Set the element's content as the dictionary # of children elements. element_dict [ 'content' ] = child_dict # The element has content, but no children. elif element . content is not None : element_dict [ 'content' ] = element . content # Append the dictionary element to the element list. untl_dict [ element . tag ] . append ( element_dict ) return untl_dict
Convert a UNTL Python object into a UNTL Python dictionary .
266
14
12,221
def create_xml_file ( self , untl_filename ) : try : f = open ( untl_filename , 'w' ) f . write ( self . create_xml_string ( ) . encode ( 'utf-8' ) ) f . close ( ) except : raise UNTLStructureException ( 'Failed to create UNTL XML file. File: %s' % ( untl_filename ) )
Create a UNTL file .
90
6
12,222
def sort_untl ( self , sort_structure ) : self . children . sort ( key = lambda obj : sort_structure . index ( obj . tag ) )
Sort the UNTL Python object by the index of a sort structure pre - ordered list .
37
18
12,223
def generate_form_data ( self , * * kwargs ) : # Add elements that are missing from the form. self . children = add_missing_children ( self . contained_children , self . children ) # Add children to the keyword arguments. kwargs [ 'children' ] = self . children # Create the form object. return FormGenerator ( * * kwargs )
Create a form dictionary with the key being the element name and the value being a list of form element objects .
83
22
12,224
def contributor_director ( * * kwargs ) : if kwargs . get ( 'qualifier' ) in ETD_MS_CONTRIBUTOR_EXPANSION : # Return the element object. return ETD_MSContributor ( role = ETD_MS_CONTRIBUTOR_EXPANSION [ kwargs . get ( 'qualifier' ) ] , * * kwargs ) else : return None
Define the expanded qualifier name .
94
7
12,225
def date_director ( * * kwargs ) : # If the date is a creation date, return the element object. if kwargs . get ( 'qualifier' ) == 'creation' : return ETD_MSDate ( content = kwargs . get ( 'content' ) . strip ( ) ) elif kwargs . get ( 'qualifier' ) != 'digitized' : # Return the element object. return ETD_MSDate ( content = kwargs . get ( 'content' ) . strip ( ) ) else : return None
Direct which class should be used based on the date qualifier or if the date should be converted at all .
120
21
12,226
def subject_director ( * * kwargs ) : if kwargs . get ( 'qualifier' ) not in [ 'KWD' , '' ] : return ETD_MSSubject ( scheme = kwargs . get ( 'qualifier' ) , * * kwargs ) else : return ETD_MSSubject ( content = kwargs . get ( 'content' ) )
Direct how to handle a subject element .
88
8
12,227
def get_child_content ( self , children , element_name ) : # Loop through the children and get the specified element. for child in children : # If the child is the requested element, return its content. if child . tag == element_name : return child . content return ''
Get the requested element content from a list of children .
60
11
12,228
def shiftedColorMap ( cmap , start = 0 , midpoint = 0.5 , stop = 1.0 , name = 'shiftedcmap' ) : cdict = { 'red' : [ ] , 'green' : [ ] , 'blue' : [ ] , 'alpha' : [ ] } # regular index to compute the colors reg_index = np . linspace ( start , stop , 257 ) # shifted index to match the data shift_index = np . hstack ( [ np . linspace ( 0.0 , midpoint , 128 , endpoint = False ) , np . linspace ( midpoint , 1.0 , 129 , endpoint = True ) ] ) for ri , si in zip ( reg_index , shift_index ) : r , g , b , a = cmap ( ri ) cdict [ 'red' ] . append ( ( si , r , r ) ) cdict [ 'green' ] . append ( ( si , g , g ) ) cdict [ 'blue' ] . append ( ( si , b , b ) ) cdict [ 'alpha' ] . append ( ( si , a , a ) ) newcmap = mpl . colors . LinearSegmentedColormap ( name , cdict ) plt . register_cmap ( cmap = newcmap ) return newcmap
Function to offset the center of a colormap . Useful for data with a negative min and positive max and you want the middle of the colormap s dynamic range to be at zero
291
38
12,229
def read_lastmodfile ( directory ) : filename = '{0}/exe/inv.lastmod' . format ( directory ) # filename HAS to exist. Otherwise the inversion was not finished if ( not os . path . isfile ( filename ) ) : return None linestring = open ( filename , 'r' ) . readline ( ) . strip ( ) linestring = linestring . replace ( "\n" , '' ) linestring = linestring . replace ( ".mag" , '' ) linestring = linestring . replace ( "../inv/rho" , '' ) return linestring
Return the number of the final inversion result .
135
10
12,230
def setHandler ( self , event_name , callback ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) if callable ( event_name ) : raise TypeError ( '{} is not callable' . format ( callback ) ) self . handlers [ event_name ] = callback
Set an handler for given event .
81
7
12,231
def isHandlerPresent ( self , event_name ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) return self . handlers [ event_name ] is not None
Check if an event has an handler .
55
8
12,232
def removeHandler ( self , event_name ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) self . handlers [ event_name ] = None
Remove handler for given event .
52
6
12,233
def _get_fct_number_of_arg ( self , fct ) : py_version = sys . version_info [ 0 ] if py_version >= 3 : return len ( inspect . signature ( fct ) . parameters ) return len ( inspect . getargspec ( fct ) [ 0 ] )
Get the number of argument of a fuction .
67
10
12,234
def event_tracker ( func ) : @ wraps ( func ) async def wrapper ( * args , * * kwargs ) : """ Wraps function to provide redis tracking """ event = Event ( args [ 0 ] ) session = kwargs [ 'session' ] service_name = session . name await track_event ( event , EventState . started , service_name ) await func ( * args , * * kwargs ) await track_event ( event , EventState . completed , service_name ) return wrapper
Event tracking handler
110
3
12,235
def ensure_ajax ( valid_request_methods , error_response_context = None ) : def real_decorator ( view_func ) : def wrap_func ( request , * args , * * kwargs ) : if not isinstance ( request , HttpRequest ) : # make sure the request is a django httprequest return generate_error_json_response ( "Invalid request!" , error_response_context ) elif not request . is_ajax ( ) : # ensure the request is an ajax request return generate_error_json_response ( "Invalid request type!" , error_response_context ) elif request . method not in valid_request_methods : # check if the request method is in allowed request methods return generate_error_json_response ( "Invalid request method!" , error_response_context ) else : return view_func ( request , * args , * * kwargs ) wrap_func . __doc__ = view_func . __doc__ wrap_func . __name__ = view_func . __name__ return wrap_func return real_decorator
Intends to ensure the received the request is ajax request and it is included in the valid request methods
241
22
12,236
def generate_error_json_response ( error_dict , error_response_context = None ) : response = error_dict if isinstance ( error_dict , str ) : response = { "error" : response } if error_response_context is None : error_response_context = { 'draw' : 0 , 'recordsTotal' : 0 , 'recordsFiltered' : 0 , 'data' : [ ] } response . update ( error_response_context ) return JsonResponse ( response )
Intends to build an error json response . If the error_response_context is None then we generate this response using data tables format
110
27
12,237
def _mergeGoSymbols ( self , jsons = [ ] ) : # <siXy> imports are per file, exports are per package # on the highest level we have: pkgname, types, funcs, vars, imports. symbols = { } symbols [ "types" ] = [ ] symbols [ "funcs" ] = [ ] symbols [ "vars" ] = [ ] for file_json in jsons : symbols [ "types" ] += file_json [ "types" ] symbols [ "funcs" ] += file_json [ "funcs" ] symbols [ "vars" ] += file_json [ "vars" ] return symbols
Exported symbols for a given package does not have any prefix . So I can drop all import paths that are file specific and merge all symbols . Assuming all files in the given package has mutual exclusive symbols .
147
41
12,238
def read ( self , n ) : out = ctypes . create_string_buffer ( n ) ctypes . windll . kernel32 . RtlMoveMemory ( out , self . view + self . pos , n ) self . pos += n return out . raw
Read n bytes from mapped view .
56
7
12,239
def _output ( cls , fluents : Sequence [ FluentPair ] ) -> Sequence [ tf . Tensor ] : output = [ ] for _ , fluent in fluents : tensor = fluent . tensor if tensor . dtype != tf . float32 : tensor = tf . cast ( tensor , tf . float32 ) output . append ( tensor ) return tuple ( output )
Converts fluents to tensors with datatype tf . float32 .
85
16
12,240
def set ( self , key , value ) : self . store [ key ] = value return self . store
Sets a hyperparameter . Can be used to set an array of hyperparameters .
22
19
12,241
def config_at ( self , i ) : selections = { } for key in self . store : value = self . store [ key ] if isinstance ( value , list ) : selected = i % len ( value ) i = i // len ( value ) selections [ key ] = value [ selected ] else : selections [ key ] = value return Config ( selections )
Gets the ith config
76
6
12,242
def top ( self , sort_by ) : sort = sorted ( self . results , key = sort_by ) return sort
Get the best results according to your custom sort method .
26
11
12,243
def load_or_create_config ( self , filename , config = None ) : os . makedirs ( os . path . dirname ( os . path . expanduser ( filename ) ) , exist_ok = True ) if os . path . exists ( filename ) : return self . load ( filename ) if ( config == None ) : config = self . random_config ( ) self . save ( filename , config ) return config
Loads a config from disk . Defaults to a random config if none is specified
91
17
12,244
def configure ( self , repositories ) : self . enable_repositories ( repositories ) self . create_stack_user ( ) self . install_base_packages ( ) self . clean_system ( ) self . yum_update ( allow_reboot = True ) self . install_osp ( ) self . set_selinux ( 'permissive' ) self . fix_hostname ( )
Prepare the system to be ready for an undercloud installation .
86
13
12,245
def openstack_undercloud_install ( self ) : instack_undercloud_ver , _ = self . run ( 'repoquery --whatprovides /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp' ) if instack_undercloud_ver . rstrip ( '\n' ) == 'instack-undercloud-0:2.2.0-1.el7ost.noarch' : LOG . warn ( 'Workaround for BZ1298189' ) self . run ( "sed -i \"s/.*Keystone_domain\['heat_domain'\].*/Service\['keystone'\] -> Class\['::keystone::roles::admin'\] -> Class\['::heat::keystone::domain'\]/\" /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp" ) self . run ( 'OS_PASSWORD=bob openstack undercloud install' , user = 'stack' ) # NOTE(Gonéri): we also need this after the overcloud deployment if self . run ( 'rpm -qa openstack-ironic-api' ) [ 0 ] . rstrip ( '\n' ) == 'openstack-ironic-api-4.2.2-3.el7ost.noarch' : LOG . warn ( 'Workaround for BZ1297796' ) self . run ( 'systemctl start openstack-ironic-api.service' ) self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( 'heat stack-list' , user = 'stack' )
Deploy an undercloud on the host .
383
8
12,246
def create_flavor ( self , name ) : self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( 'openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 baremetal' , user = 'stack' , success_status = ( 0 , 1 ) ) self . run ( 'openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal' , user = 'stack' ) self . run ( 'openstack flavor set --property "capabilities:profile"="baremetal" baremetal' , user = 'stack' )
Create a new baremetal flavor .
151
7
12,247
def list_nodes ( self ) : self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) ret , _ = self . run ( "ironic node-list --fields uuid|awk '/-.*-/ {print $2}'" , user = 'stack' ) # NOTE(Gonéri): the good new is, the order of the nodes is preserved and follow the one from # the instackenv.json, BUT it may be interesting to add a check. return ret . split ( )
List the Ironic nodes UUID .
117
8
12,248
def set_flavor ( self , node , flavor ) : command = ( 'ironic node-update {uuid} add ' 'properties/capabilities=profile:{flavor},boot_option:local' ) . format ( uuid = node . uuid , flavor = flavor ) node . flavor = flavor self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( command , user = 'stack' )
Set a flavor to a given ironic node .
99
9
12,249
def read_by ( cls , removed = False , * * kwargs ) : if not removed : kwargs [ 'time_removed' ] = 0 return cls . query . filter_by ( * * kwargs )
filter_by query helper that handles soft delete logic . If your query conditions require expressions use read .
52
20
12,250
def read ( cls , * criteria , * * kwargs ) : if not kwargs . get ( 'removed' , False ) : return cls . query . filter ( cls . time_removed == 0 , * criteria ) return cls . query . filter ( * criteria )
filter query helper that handles soft delete logic . If your query conditions do not require expressions consider using read_by .
64
23
12,251
def delete ( self , session , commit = True , soft = True ) : if soft : self . time_removed = sqlalchemy . func . unix_timestamp ( ) else : session . delete ( self ) if commit : session . commit ( )
Delete a row from the DB .
55
7
12,252
def walk_paths ( self , base : Optional [ pathlib . PurePath ] = pathlib . PurePath ( ) ) -> Iterator [ pathlib . PurePath ] : raise NotImplementedError ( )
Recursively traverse all paths inside this entity including the entity itself .
46
14
12,253
def _walk_paths ( self , base : pathlib . PurePath ) -> Iterator [ pathlib . PurePath ] : return self . walk_paths ( base )
Internal helper for walking paths . This is required to exclude the name of the root entity from the walk .
38
21
12,254
def from_path ( cls , path : pathlib . Path ) -> 'Entity' : if path . is_file ( ) : return File . from_path ( path ) return Directory . from_path ( path )
Create an entity from a local path .
47
8
12,255
def _md5 ( path : pathlib . PurePath ) : hash_ = hashlib . md5 ( ) with open ( path , 'rb' ) as f : for chunk in iter ( lambda : f . read ( 4096 ) , b'' ) : hash_ . update ( chunk ) return hash_ . hexdigest ( )
Calculate the MD5 checksum of a file .
70
12
12,256
def from_path ( cls , path : pathlib . Path ) -> 'File' : if not path . is_file ( ) : raise ValueError ( 'Path does not point to a file' ) return File ( path . name , path . stat ( ) . st_size , cls . _md5 ( path ) )
Create a file entity from a file path .
71
9
12,257
def from_path ( cls , path : pathlib . Path ) -> 'Directory' : if not path . is_dir ( ) : raise ValueError ( 'Path does not point to a directory' ) return Directory ( path . name , { entity . name : Entity . from_path ( entity ) for entity in path . iterdir ( ) } )
Create a directory entity from a directory path .
75
9
12,258
def best_result ( self ) : best_result = None for result in self . results : if best_result is None or result . figure_of_merit > best_result . figure_of_merit : best_result = result return best_result
The best result of the grid search . That is the result output by the non linear search that had the highest maximum figure of merit .
56
27
12,259
def make_lists ( self , grid_priors ) : return optimizer . make_lists ( len ( grid_priors ) , step_size = self . hyper_step_size , centre_steps = False )
Produces a list of lists of floats where each list of floats represents the values in each dimension for one step of the grid search .
47
27
12,260
def fit ( self , analysis , grid_priors ) : grid_priors = list ( set ( grid_priors ) ) results = [ ] lists = self . make_lists ( grid_priors ) results_list = [ list ( map ( self . variable . name_for_prior , grid_priors ) ) + [ "figure_of_merit" ] ] def write_results ( ) : with open ( "{}/results" . format ( self . phase_output_path ) , "w+" ) as f : f . write ( "\n" . join ( map ( lambda ls : ", " . join ( map ( lambda value : "{:.2f}" . format ( value ) if isinstance ( value , float ) else str ( value ) , ls ) ) , results_list ) ) ) for values in lists : arguments = self . make_arguments ( values , grid_priors ) model_mapper = self . variable . mapper_from_partial_prior_arguments ( arguments ) labels = [ ] for prior in arguments . values ( ) : labels . append ( "{}_{:.2f}_{:.2f}" . format ( model_mapper . name_for_prior ( prior ) , prior . lower_limit , prior . upper_limit ) ) name_path = "{}{}/{}" . format ( self . phase_name , self . phase_tag , "_" . join ( labels ) ) optimizer_instance = self . optimizer_instance ( model_mapper , name_path ) optimizer_instance . constant = self . constant result = optimizer_instance . fit ( analysis ) results . append ( result ) results_list . append ( [ * [ prior . lower_limit for prior in arguments . values ( ) ] , result . figure_of_merit ] ) write_results ( ) return GridSearchResult ( results , lists )
Fit an analysis with a set of grid priors . The grid priors are priors associated with the model mapper of this instance that are replaced by uniform priors for each step of the grid search .
414
42
12,261
def portTryReduce ( root : LNode , port : LPort ) : if not port . children : return for p in port . children : portTryReduce ( root , p ) target_nodes = { } ch_cnt = countDirectlyConnected ( port , target_nodes ) if not target_nodes : # disconnected port return new_target , children_edge_to_destroy = max ( target_nodes . items ( ) , key = lambda x : len ( x [ 1 ] ) ) cnt = len ( children_edge_to_destroy ) if cnt < ch_cnt / 2 or cnt == 1 and ch_cnt == 2 : # too small to few shared connection to reduce return children_to_destroy = set ( ) on_target_children_to_destroy = set ( ) for child , edge in children_edge_to_destroy : if child . direction == PortType . OUTPUT : target_ch = edge . dsts elif child . direction == PortType . INPUT : target_ch = edge . srcs else : raise ValueError ( child . direction ) if len ( target_ch ) != 1 : raise NotImplementedError ( "multiple connected nodes" , target_ch ) target_ch = target_ch [ 0 ] try : assert target_ch . parent is new_target , ( target_ch , target_ch . parent , new_target ) except AssertionError : print ( 'Wrong target:\n' , edge . src , "\n" , edge . dst , "\n" , target_ch . parent , "\n" , new_target ) raise if child . direction == PortType . OUTPUT : edge . removeTarget ( target_ch ) elif child . direction == PortType . INPUT : edge . removeTarget ( child ) if not edge . srcs or not edge . dsts : edge . remove ( ) if not target_ch . incomingEdges and not target_ch . outgoingEdges : # disconnect selected children from this port and target on_target_children_to_destroy . add ( target_ch ) if not child . incomingEdges and not child . outgoingEdges : children_to_destroy . add ( child ) # destroy children of new target and this port if possible port . children = [ ch for ch in port . children if ch not in children_to_destroy ] new_target . children = [ ch for ch in new_target . children if ch not in on_target_children_to_destroy ] # connect this port to new target as it was connected by children before # [TODO] names for new edges if port . direction == PortType . OUTPUT : root . addEdge ( port , new_target ) elif port . direction == PortType . INPUT : root . addEdge ( new_target , port ) else : raise NotImplementedError ( port . direction )
Check if majority of children is connected to same port if it is the case reduce children and connect this port instead children
625
23
12,262
def resolveSharedConnections ( root : LNode ) : for ch in root . children : resolveSharedConnections ( ch ) for ch in root . children : for p in ch . iterPorts ( ) : portTryReduce ( root , p )
Walk all ports on all nodes and group subinterface connections to only parent interface connection if it is possible
55
20
12,263
def countDirectlyConnected ( port : LPort , result : dict ) -> int : inEdges = port . incomingEdges outEdges = port . outgoingEdges if port . children : ch_cnt = 0 # try: # assert not inEdges, (port, port.children, inEdges) # assert not outEdges, (port, port.children, outEdges) # except AssertionError: # raise for ch in port . children : ch_cnt += countDirectlyConnected ( ch , result ) return ch_cnt elif not inEdges and not outEdges : # this port is not connected, just check if it expected state if port . direction == PortType . INPUT : if port . originObj is not None : assert not port . originObj . src . drivers , port . originObj else : print ( "Warning" , port , "not connected" ) return 0 else : connectedElemCnt = 0 for e in inEdges : connectedElemCnt += len ( e . srcs ) if connectedElemCnt > 1 : return 0 for e in outEdges : connectedElemCnt += len ( e . dsts ) if connectedElemCnt > 1 : return 0 if connectedElemCnt != 1 : return 0 if inEdges : e = inEdges [ 0 ] else : e = outEdges [ 0 ] # if is connected to different port if e . srcs [ 0 ] . name != e . dsts [ 0 ] . name : return 0 if e . srcs [ 0 ] is port : p = e . dsts [ 0 ] . parent else : # (can be hyperedge and then this does not have to be) # assert e.dsts[0] is port, (e, port) p = e . srcs [ 0 ] . parent # if is part of interface which can be reduced if not isinstance ( p , LNode ) : connections = result . get ( p , [ ] ) connections . append ( ( port , e ) ) result [ p ] = connections return 1
Count how many ports are directly connected to other nodes
453
10
12,264
def deploy ( self , image_name , ip , flavor = 'm1.small' ) : body_value = { "port" : { "admin_state_up" : True , "name" : self . name + '_provision' , "network_id" : os_utils . get_network_id ( self . nova_api , 'provision_bob' ) , 'fixed_ips' : [ { 'ip_address' : ip } ] } } response = self . neutron . create_port ( body = body_value ) self . _provision_port_id = response [ 'port' ] [ 'id' ] self . mac = response [ 'port' ] [ 'mac_address' ] image_id_to_boot_from = os_utils . get_image_id ( self . nova_api , image_name ) flavor_id = os_utils . get_flavor_id ( self . nova_api , flavor ) # TODO(Gonéri): We don't need keypair for the BM nodes keypair_id = os_utils . get_keypair_id ( self . nova_api , self . _keypair ) # Ensure with get DHCP lease on the provision network first nics = [ { 'port-id' : self . _provision_port_id } ] self . _os_instance = os_provisioner . build_openstack_instance ( self . nova_api , self . name , image_id_to_boot_from , flavor_id , keypair_id , nics ) if not self . _os_instance : LOG . error ( "deployment has failed" ) raise Exception ( ) os_provisioner . add_provision_security_group ( self . nova_api ) os_utils . add_security_groups ( self . _os_instance , [ 'provision' ] ) os_utils . add_security_groups ( self . _os_instance , self . _security_groups ) LOG . info ( "add security groups '%s'" % self . _security_groups ) LOG . info ( "instance '%s' ready to use" % self . name ) # the instance should be off for Ironic self . _os_instance . stop ( )
Create the node .
502
4
12,265
def pxe_netboot ( self , filename ) : new_port = { 'extra_dhcp_opts' : [ { 'opt_name' : 'bootfile-name' , 'opt_value' : 'http://192.0.2.240:8088/' + filename , 'ip_version' : 4 , } , { 'opt_name' : 'tftp-server' , 'opt_value' : '192.0.2.240' , 'ip_version' : '4' } , { 'opt_name' : 'server-ip-address' , 'opt_value' : '192.0.2.240' , 'ip_version' : '4' } ] } self . neutron . update_port ( self . _provision_port_id , { 'port' : new_port } )
Specify which file ipxe should load during the netboot .
189
13
12,266
def initialize ( self , size = 2 ) : # The IP should be in this range, this is the default DHCP range used by the introspection. # inspection_iprange = 192.0.2.100,192.0.2.120 for i in range ( 0 , size ) : self . nodes . append ( Baremetal ( self . nova_api , self . neutron , self . _keypair , self . _key_filename , self . _security_groups , name = 'baremetal_%d' % i ) ) with concurrent . futures . ThreadPoolExecutor ( max_workers = 5 ) as executor : for bm_node in self . nodes : future = executor . submit ( bm_node . deploy , 'ipxe.usb' , '192.0.2.%d' % self . _idx , flavor = 'm1.large' ) self . _idx += 1 bm_node . _future = future for bm_node in self . nodes : bm_node . _future . result ( ) pm_addr = self . bmc . register_host ( bm_node . name ) self . instackenv . append ( { "pm_type" : "pxe_ipmitool" , "mac" : [ bm_node . mac ] , # TODO(Gonéri): We should get these informations from the baremetal node's flavor "cpu" : "4" , "memory" : "8196" , "disk" : "80" , "arch" : "x86_64" , "pm_user" : "admin" , "pm_password" : "password" , "pm_addr" : pm_addr } ) self . bmc . ssh_pool . stop_all ( )
Populate the node poll .
389
6
12,267
def create_bmc ( self , os_username , os_password , os_project_id , os_auth_url ) : bmc = ovb_bmc . OvbBmc ( nova_api = self . nova_api , neutron = self . neutron , keypair = self . _keypair , key_filename = self . _key_filename , security_groups = self . _security_groups , image_name = 'Fedora 23 x86_64' , ip = '192.0.2.254' , os_username = os_username , os_password = os_password , os_project_id = os_project_id , os_auth_url = os_auth_url ) return bmc
Deploy the BMC machine .
161
5
12,268
def untlxml2py ( untl_filename ) : # Create a stack to hold parents. parent_stack = [ ] # Use iterparse to open the file and loop through elements. for event , element in iterparse ( untl_filename , events = ( 'start' , 'end' ) ) : if NAMESPACE_REGEX . search ( element . tag , 0 ) : element_tag = NAMESPACE_REGEX . search ( element . tag , 0 ) . group ( 1 ) else : element_tag = element . tag # Process the element if it exists in UNTL. if element_tag in PYUNTL_DISPATCH : # If it is the element's opening tag, # add it to the parent stack. if event == 'start' : parent_stack . append ( PYUNTL_DISPATCH [ element_tag ] ( ) ) # If it is the element's closing tag, # remove element from stack. Add qualifier and content. elif event == 'end' : child = parent_stack . pop ( ) if element . text is not None : content = element . text . strip ( ) if content != '' : child . set_content ( element . text ) if element . get ( 'qualifier' , False ) : child . set_qualifier ( element . get ( 'qualifier' ) ) # Add the element to its parent. if len ( parent_stack ) > 0 : parent_stack [ - 1 ] . add_child ( child ) # If it doesn't have a parent, it is the root element, # so return it. else : return child else : raise PyuntlException ( 'Element "%s" not in UNTL dispatch.' % ( element_tag ) )
Parse a UNTL XML file object into a pyuntl element tree .
372
16
12,269
def untldict2py ( untl_dict ) : # Create the root element. untl_root = PYUNTL_DISPATCH [ 'metadata' ] ( ) untl_py_list = [ ] for element_name , element_list in untl_dict . items ( ) : # Loop through the element dictionaries in the element list. for element_dict in element_list : qualifier = element_dict . get ( 'qualifier' , None ) content = element_dict . get ( 'content' , None ) child_list = [ ] # Handle content that is children elements. if isinstance ( content , dict ) : for key , value in content . items ( ) : child_list . append ( PYUNTL_DISPATCH [ key ] ( content = value ) , ) # Create the UNTL element that will have children elements # added to it. if qualifier is not None : untl_element = PYUNTL_DISPATCH [ element_name ] ( qualifier = qualifier ) else : untl_element = PYUNTL_DISPATCH [ element_name ] ( ) # Add the element's children to the element. for child in child_list : untl_element . add_child ( child ) # If not child element, create the element and # add qualifier and content as available. elif content is not None and qualifier is not None : untl_element = PYUNTL_DISPATCH [ element_name ] ( qualifier = qualifier , content = content , ) elif qualifier is not None : untl_element = PYUNTL_DISPATCH [ element_name ] ( qualifier = qualifier , ) elif content is not None : untl_element = PYUNTL_DISPATCH [ element_name ] ( content = content , ) # Create element that only has children. elif len ( child_list ) > 0 : untl_element = PYUNTL_DISPATCH [ element_name ] ( ) # Add the UNTL element to the Python element list. untl_py_list . append ( untl_element ) # Add the UNTL elements to the root element. for untl_element in untl_py_list : untl_root . add_child ( untl_element ) return untl_root
Convert a UNTL dictionary into a Python object .
497
11
12,270
def untlpy2dcpy ( untl_elements , * * kwargs ) : sDate = None eDate = None ark = kwargs . get ( 'ark' , None ) domain_name = kwargs . get ( 'domain_name' , None ) scheme = kwargs . get ( 'scheme' , 'http' ) resolve_values = kwargs . get ( 'resolve_values' , None ) resolve_urls = kwargs . get ( 'resolve_urls' , None ) verbose_vocabularies = kwargs . get ( 'verbose_vocabularies' , None ) # If either resolvers were requested, get the vocabulary data. if resolve_values or resolve_urls : if verbose_vocabularies : # If the vocabularies were passed to the function, use them. vocab_data = verbose_vocabularies else : # Otherwise, retrieve them using the pyuntl method. vocab_data = retrieve_vocab ( ) else : vocab_data = None # Create the DC parent element. dc_root = DC_CONVERSION_DISPATCH [ 'dc' ] ( ) for element in untl_elements . children : # Check if the UNTL element should be converted to DC. if element . tag in DC_CONVERSION_DISPATCH : # Check if the element has its content stored in children nodes. if element . children : dc_element = DC_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , children = element . children , resolve_values = resolve_values , resolve_urls = resolve_urls , vocab_data = vocab_data , ) # It is a normal element. else : dc_element = DC_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , resolve_values = resolve_values , resolve_urls = resolve_urls , vocab_data = vocab_data , ) if element . tag == 'coverage' : # Handle start and end dates. if element . qualifier == 'sDate' : sDate = dc_element elif element . qualifier == 'eDate' : eDate = dc_element # Otherwise, add the coverage element to the structure. else : dc_root . add_child ( dc_element ) # Add non coverage DC element to the structure. elif dc_element : dc_root . add_child ( dc_element ) # If the domain and ark were specified # try to turn them into indentifier elements. if ark and domain_name : # Create and add the permalink identifier. permalink_identifier = DC_CONVERSION_DISPATCH [ 'identifier' ] ( qualifier = 'permalink' , domain_name = domain_name , ark = ark , scheme = scheme ) dc_root . add_child ( permalink_identifier ) # Create and add the ark identifier. ark_identifier = DC_CONVERSION_DISPATCH [ 'identifier' ] ( qualifier = 'ark' , content = ark , ) dc_root . add_child ( ark_identifier ) if sDate and eDate : # If a start and end date exist, combine them into one element. dc_element = DC_CONVERSION_DISPATCH [ 'coverage' ] ( content = '%s-%s' % ( sDate . content , eDate . content ) , ) dc_root . add_child ( dc_element ) elif sDate : dc_root . add_child ( sDate ) elif eDate : dc_root . add_child ( eDate ) return dc_root
Convert the UNTL elements structure into a DC structure .
815
12
12,271
def untlpy2highwirepy ( untl_elements , * * kwargs ) : highwire_list = [ ] title = None publisher = None creation = None escape = kwargs . get ( 'escape' , False ) for element in untl_elements . children : # If the UNTL element should be converted to highwire, # create highwire element. if element . tag in HIGHWIRE_CONVERSION_DISPATCH : highwire_element = HIGHWIRE_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , children = element . children , escape = escape , ) if highwire_element : if element . tag == 'title' : if element . qualifier != 'officialtitle' and not title : title = highwire_element elif element . qualifier == 'officialtitle' : title = highwire_element elif element . tag == 'publisher' : if not publisher : # This is the first publisher element. publisher = highwire_element highwire_list . append ( publisher ) elif element . tag == 'date' : # If a creation date hasn't been found yet, # verify this date is acceptable. if not creation and element . qualifier == 'creation' : if highwire_element . content : creation = highwire_element if creation : highwire_list . append ( creation ) # Otherwise, add the element to the list if it has content. elif highwire_element . content : highwire_list . append ( highwire_element ) # If the title was found, add it to the list. if title : highwire_list . append ( title ) return highwire_list
Convert a UNTL Python object to a highwire Python object .
362
14
12,272
def untlpydict2dcformatteddict ( untl_dict , * * kwargs ) : ark = kwargs . get ( 'ark' , None ) domain_name = kwargs . get ( 'domain_name' , None ) scheme = kwargs . get ( 'scheme' , 'http' ) resolve_values = kwargs . get ( 'resolve_values' , None ) resolve_urls = kwargs . get ( 'resolve_urls' , None ) verbose_vocabularies = kwargs . get ( 'verbose_vocabularies' , None ) # Get the UNTL object. untl_py = untldict2py ( untl_dict ) # Convert it to a DC object. dc_py = untlpy2dcpy ( untl_py , ark = ark , domain_name = domain_name , resolve_values = resolve_values , resolve_urls = resolve_urls , verbose_vocabularies = verbose_vocabularies , scheme = scheme ) # Return a formatted DC dictionary. return dcpy2formatteddcdict ( dc_py )
Convert a UNTL data dictionary to a formatted DC data dictionary .
260
14
12,273
def formatted_dc_dict ( dc_dict ) : for key , element_list in dc_dict . items ( ) : new_element_list = [ ] # Add the content for each element to the new element list. for element in element_list : new_element_list . append ( element [ 'content' ] ) dc_dict [ key ] = new_element_list return dc_dict
Change the formatting of the DC data dictionary .
86
9
12,274
def generate_dc_xml ( dc_dict ) : # Define the root namespace. root_namespace = '{%s}' % DC_NAMESPACES [ 'oai_dc' ] # Set the elements namespace URL. elements_namespace = '{%s}' % DC_NAMESPACES [ 'dc' ] schema_location = ( 'http://www.openarchives.org/OAI/2.0/oai_dc/ ' 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd' ) root_attributes = { '{%s}schemaLocation' % XSI : schema_location , } # Return the DC XML string. return pydict2xmlstring ( dc_dict , ordering = DC_ORDER , root_label = 'dc' , root_namespace = root_namespace , elements_namespace = elements_namespace , namespace_map = DC_NAMESPACES , root_attributes = root_attributes , )
Generate a DC XML string .
229
7
12,275
def generate_dc_json ( dc_dict ) : formatted_dict = formatted_dc_dict ( dc_dict ) return json . dumps ( formatted_dict , sort_keys = True , indent = 4 )
Generate DC JSON data .
45
6
12,276
def highwirepy2dict ( highwire_elements ) : highwire_dict = { } # Make a list of content dictionaries for each element name. for element in highwire_elements : if element . name not in highwire_dict : highwire_dict [ element . name ] = [ ] highwire_dict [ element . name ] . append ( { 'content' : element . content } ) return highwire_dict
Convert a list of highwire elements into a dictionary .
93
12
12,277
def generate_highwire_json ( highwire_elements ) : highwire_dict = highwirepy2dict ( highwire_elements ) return json . dumps ( highwire_dict , sort_keys = True , indent = 4 )
Convert highwire elements into a JSON structure .
52
10
12,278
def dcdict2rdfpy ( dc_dict ) : ark_prefix = 'ark: ark:' uri = URIRef ( '' ) # Create the RDF Python object. rdf_py = ConjunctiveGraph ( ) # Set DC namespace definition. DC = Namespace ( 'http://purl.org/dc/elements/1.1/' ) # Get the ark for the subject URI from the ark identifier. for element_value in dc_dict [ 'identifier' ] : if element_value [ 'content' ] . startswith ( ark_prefix ) : uri = URIRef ( element_value [ 'content' ] . replace ( ark_prefix , 'info:ark' ) ) # Bind the prefix/namespace pair. rdf_py . bind ( 'dc' , DC ) # Get the values for each element in the ordered DC elements. for element_name in DC_ORDER : element_value_list = dc_dict . get ( element_name , [ ] ) # Add the values to the RDF object. for element_value in element_value_list : # Handle URL values differently. if ( 'http' in element_value [ 'content' ] and ' ' not in element_value [ 'content' ] ) : rdf_py . add ( ( uri , DC [ element_name ] , URIRef ( element_value [ 'content' ] ) ) ) else : rdf_py . add ( ( uri , DC [ element_name ] , Literal ( element_value [ 'content' ] ) ) ) return rdf_py
Convert a DC dictionary into an RDF Python object .
355
12
12,279
def add_empty_fields ( untl_dict ) : # Iterate the ordered UNTL XML element list to determine # which elements are missing from the untl_dict. for element in UNTL_XML_ORDER : if element not in untl_dict : # Try to create an element with content and qualifier. try : py_object = PYUNTL_DISPATCH [ element ] ( content = '' , qualifier = '' , ) except : # Try to create an element with content. try : py_object = PYUNTL_DISPATCH [ element ] ( content = '' ) except : # Try to create an element without content. try : py_object = PYUNTL_DISPATCH [ element ] ( ) except : raise PyuntlException ( 'Could not add empty element field.' ) else : untl_dict [ element ] = [ { 'content' : { } } ] else : # Handle element without children. if not py_object . contained_children : untl_dict [ element ] = [ { 'content' : '' } ] else : untl_dict [ element ] = [ { 'content' : { } } ] else : # Handle element without children. if not py_object . contained_children : untl_dict [ element ] = [ { 'content' : '' , 'qualifier' : '' } ] else : untl_dict [ element ] = [ { 'content' : { } , 'qualifier' : '' } ] # Add empty contained children. for child in py_object . contained_children : untl_dict [ element ] [ 0 ] . setdefault ( 'content' , { } ) untl_dict [ element ] [ 0 ] [ 'content' ] [ child ] = '' return untl_dict
Add empty values if UNTL fields don t have values .
383
12
12,280
def add_empty_etd_ms_fields ( etd_ms_dict ) : # Determine which ETD MS elements are missing from the etd_ms_dict. for element in ETD_MS_ORDER : if element not in etd_ms_dict : # Try to create an element with content and qualifier. try : py_object = ETD_MS_CONVERSION_DISPATCH [ element ] ( content = '' , qualifier = '' , ) except : # Try to create an element with content. try : py_object = ETD_MS_CONVERSION_DISPATCH [ element ] ( content = '' ) except : # Try to create an element without content. try : py_object = ETD_MS_CONVERSION_DISPATCH [ element ] ( ) except : raise PyuntlException ( 'Could not add empty element field.' ) else : etd_ms_dict [ element ] = [ { 'content' : { } } ] else : # Handle element without children. if not py_object . contained_children : etd_ms_dict [ element ] = [ { 'content' : '' } ] else : etd_ms_dict [ element ] = [ { 'content' : { } } ] else : # Handle element without children. if py_object : if not py_object . contained_children : etd_ms_dict [ element ] = [ { 'content' : '' , 'qualifier' : '' } ] else : etd_ms_dict [ element ] = [ { 'content' : { } , 'qualifier' : '' } ] # Add empty contained children. if py_object : for child in py_object . contained_children : etd_ms_dict [ element ] [ 0 ] . setdefault ( 'content' , { } ) etd_ms_dict [ element ] [ 0 ] [ 'content' ] [ child ] = '' return etd_ms_dict
Add empty values for ETD_MS fields that don t have values .
421
15
12,281
def find_untl_errors ( untl_dict , * * kwargs ) : fix_errors = kwargs . get ( 'fix_errors' , False ) error_dict = { } # Loop through all elements that require qualifiers. for element_name in REQUIRES_QUALIFIER : # Loop through the existing elements that require qualifers. for element in untl_dict . get ( element_name , [ ] ) : error_dict [ element_name ] = 'no_qualifier' # If it should be fixed, set an empty qualifier # if it doesn't have one. if fix_errors : element . setdefault ( 'qualifier' , '' ) # Combine the error dict and UNTL dict into a dict. found_data = { 'untl_dict' : untl_dict , 'error_dict' : error_dict , } return found_data
Add empty required qualifiers to create valid UNTL .
194
10
12,282
def untlpy2etd_ms ( untl_elements , * * kwargs ) : degree_children = { } date_exists = False seen_creation = False # Make the root element. etd_ms_root = ETD_MS_CONVERSION_DISPATCH [ 'thesis' ] ( ) for element in untl_elements . children : etd_ms_element = None # Convert the UNTL element to etd_ms where applicable. if element . tag in ETD_MS_CONVERSION_DISPATCH : # Create the etd_ms_element if the element's content # is stored in children nodes. if element . children : etd_ms_element = ETD_MS_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , children = element . children , ) # If we hit a degree element, make just that one. elif element . tag == 'degree' : # Make a dict of the degree children information. if element . qualifier in [ 'name' , 'level' , 'discipline' , 'grantor' ] : degree_children [ element . qualifier ] = element . content # For date elements, limit to first instance of creation date. elif element . tag == 'date' : if element . qualifier == 'creation' : # If the root already has a date, delete the child. for child in etd_ms_root . children : if child . tag == 'date' : del child if not seen_creation : date_exists = False seen_creation = True if not date_exists : # Create the etd_ms element. etd_ms_element = ETD_MS_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , ) date_exists = True # It is a normal element. elif element . tag not in [ 'date' , 'degree' ] : # Create the etd_ms_element. etd_ms_element = ETD_MS_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , ) # Add the element to the structure if the element exists. if etd_ms_element : etd_ms_root . add_child ( etd_ms_element ) if element . tag == 'meta' : # Initialize ark to False because it may not exist yet. ark = False # Iterate through children and look for ark. for i in etd_ms_root . children : if i . tag == 'identifier' and i . content . startswith ( 'http://digital.library.unt.edu/' ) : ark = True # If the ark doesn't yet exist, try and create it. if not ark : # Reset for future tests. ark = False if element . qualifier == 'ark' : ark = element . content if ark is not None : # Create the ark identifier element and add it. ark_identifier = ETD_MS_CONVERSION_DISPATCH [ 'identifier' ] ( ark = ark , ) etd_ms_root . add_child ( ark_identifier ) # If children exist for the degree, make a degree element. if degree_children : degree_element = ETD_MS_CONVERSION_DISPATCH [ 'degree' ] ( ) # When we have all the elements stored, add the children to the # degree node. degree_child_element = None for k , v in degree_children . iteritems ( ) : # Create the individual classes for degrees. degree_child_element = ETD_MS_DEGREE_DISPATCH [ k ] ( content = v , ) # If the keys in degree_children are valid, # add it to the child. if degree_child_element : degree_element . add_child ( degree_child_element ) etd_ms_root . add_child ( degree_element ) return etd_ms_root
Convert the UNTL elements structure into an ETD_MS structure .
881
15
12,283
def etd_ms_dict2xmlfile ( filename , metadata_dict ) : try : f = open ( filename , 'w' ) f . write ( generate_etd_ms_xml ( metadata_dict ) . encode ( "utf-8" ) ) f . close ( ) except : raise MetadataGeneratorException ( 'Failed to create an XML file. Filename: %s' % ( filename ) )
Create an ETD MS XML file .
93
8
12,284
def signal_to_noise_map ( self ) : signal_to_noise_map = np . divide ( self . data , self . noise_map ) signal_to_noise_map [ signal_to_noise_map < 0 ] = 0 return signal_to_noise_map
The signal - to - noise_map of the data and noise - map which are fitted .
67
19
12,285
def structure ( cls ) : # type: () -> Text if cls . signature is NotImplemented : raise NotImplementedError ( "no signature defined" ) up = cls . cutter . elucidate ( ) down = str ( Seq ( up ) . reverse_complement ( ) ) ovhg = cls . cutter . ovhgseq upsig , downsig = cls . signature if cls . cutter . is_5overhang ( ) : upsite = "^{}_" . format ( ovhg ) downsite = "_{}^" . format ( Seq ( ovhg ) . reverse_complement ( ) ) else : upsite = "_{}^" . format ( ovhg ) downsite = "^{}_" . format ( Seq ( ovhg ) . reverse_complement ( ) ) if issubclass ( cls , AbstractModule ) : return "" . join ( [ up . replace ( upsite , "({})(" . format ( upsig ) ) , "N*" , down . replace ( downsite , ")({})" . format ( downsig ) ) , ] ) elif issubclass ( cls , AbstractVector ) : return "" . join ( [ down . replace ( downsite , "({})(" . format ( downsig ) ) , "N*" , up . replace ( upsite , ")({})" . format ( upsig ) ) , ] ) else : raise RuntimeError ( "Part must be either a module or a vector!" )
Get the part structure as a DNA regex pattern .
335
10
12,286
def characterize ( cls , record ) : classes = list ( cls . __subclasses__ ( ) ) if not isabstract ( cls ) : classes . append ( cls ) for subclass in classes : entity = subclass ( record ) if entity . is_valid ( ) : return entity raise RuntimeError ( "could not find the type for '{}'" . format ( record . id ) )
Load the record in a concrete subclass of this type .
85
11
12,287
def global_request ( self , kind , data = None , wait = True ) : if wait : self . completion_event = threading . Event ( ) m = Message ( ) m . add_byte ( cMSG_GLOBAL_REQUEST ) m . add_string ( kind ) m . add_boolean ( wait ) if data is not None : m . add ( * data ) self . _log ( DEBUG , 'Sending global request "%s"' % kind ) self . _send_user_message ( m ) if not wait : return None while True : self . completion_event . wait ( 0.1 ) if not self . active : return None if self . completion_event . isSet ( ) : break return self . global_response
Make a global request to the remote host . These are normally extensions to the SSH2 protocol .
162
19
12,288
def _activate_inbound ( self ) : block_size = self . _cipher_info [ self . remote_cipher ] [ 'block-size' ] if self . server_mode : IV_in = self . _compute_key ( 'A' , block_size ) key_in = self . _compute_key ( 'C' , self . _cipher_info [ self . remote_cipher ] [ 'key-size' ] ) else : IV_in = self . _compute_key ( 'B' , block_size ) key_in = self . _compute_key ( 'D' , self . _cipher_info [ self . remote_cipher ] [ 'key-size' ] ) engine = self . _get_cipher ( self . remote_cipher , key_in , IV_in ) mac_size = self . _mac_info [ self . remote_mac ] [ 'size' ] mac_engine = self . _mac_info [ self . remote_mac ] [ 'class' ] # initial mac keys are done in the hash's natural size (not the potentially truncated # transmission size) if self . server_mode : mac_key = self . _compute_key ( 'E' , mac_engine ( ) . digest_size ) else : mac_key = self . _compute_key ( 'F' , mac_engine ( ) . digest_size ) self . packetizer . set_inbound_cipher ( engine , block_size , mac_engine , mac_size , mac_key ) compress_in = self . _compression_info [ self . remote_compression ] [ 1 ] if ( compress_in is not None ) and ( ( self . remote_compression != 'zlib@openssh.com' ) or self . authenticated ) : self . _log ( DEBUG , 'Switching on inbound compression ...' ) self . packetizer . set_inbound_compressor ( compress_in ( ) )
switch on newly negotiated encryption parameters for inbound traffic
439
10
12,289
def enable_user ( self , user ) : if user in self . ssh_pool . _ssh_clients : return if user == 'root' : _root_ssh_client = ssh . SshClient ( hostname = self . hostname , user = 'root' , key_filename = self . _key_filename , via_ip = self . via_ip ) # connect as a root user _root_ssh_client . start ( ) result , _ = _root_ssh_client . run ( 'uname -a' ) image_user = None # check if root is not allowed if 'Please login as the user "cloud-user"' in result : image_user = 'cloud-user' _root_ssh_client . stop ( ) elif 'Please login as the user "fedora" rather than the user "root"' in result : image_user = 'fedora' _root_ssh_client . stop ( ) elif 'Please login as the user "centos" rather than the user "root"' in result : image_user = 'centos' _root_ssh_client . stop ( ) if image_user : self . enable_user ( image_user ) LOG . info ( 'enabling the root user' ) _cmd = "sudo sed -i 's,.*ssh-rsa,ssh-rsa,' /root/.ssh/authorized_keys" self . ssh_pool . run ( image_user , _cmd ) _root_ssh_client . start ( ) self . ssh_pool . add_ssh_client ( 'root' , _root_ssh_client ) return # add the cloud user to the ssh pool self . ssh_pool . build_ssh_client ( hostname = self . hostname , user = user , key_filename = self . _key_filename , via_ip = self . via_ip )
Enable the root account on the remote host .
404
9
12,290
def send_file ( self , local_path , remote_path , user = 'root' , unix_mode = None ) : self . enable_user ( user ) return self . ssh_pool . send_file ( user , local_path , remote_path , unix_mode = unix_mode )
Upload a local file on the remote host .
68
9
12,291
def send_dir ( self , local_path , remote_path , user = 'root' ) : self . enable_user ( user ) return self . ssh_pool . send_dir ( user , local_path , remote_path )
Upload a directory on the remote host .
51
8
12,292
def create_file ( self , path , content , mode = 'w' , user = 'root' ) : self . enable_user ( user ) return self . ssh_pool . create_file ( user , path , content , mode )
Create a file on the remote host .
51
8
12,293
def yum_install ( self , packages , ignore_error = False ) : return self . run ( 'yum install -y --quiet ' + ' ' . join ( packages ) , ignore_error = ignore_error , retry = 5 )
Install some packages on the remote host .
53
8
12,294
def rhsm_register ( self , rhsm ) : # Get rhsm credentials login = rhsm . get ( 'login' ) password = rhsm . get ( 'password' , os . environ . get ( 'RHN_PW' ) ) pool_id = rhsm . get ( 'pool_id' ) # Ensure the RHEL beta channel are disabled self . run ( 'rm /etc/pki/product/69.pem' , ignore_error = True ) custom_log = 'subscription-manager register --username %s --password *******' % login self . run ( 'subscription-manager register --username %s --password "%s"' % ( login , password ) , success_status = ( 0 , 64 ) , custom_log = custom_log , retry = 3 ) if pool_id : self . run ( 'subscription-manager attach --pool %s' % pool_id ) else : self . run ( 'subscription-manager attach --auto' ) self . rhsm_active = True
Register the host on the RHSM .
224
9
12,295
def enable_repositories ( self , repositories ) : for r in repositories : if r [ 'type' ] != 'rhsm_channel' : continue if r [ 'name' ] not in self . rhsm_channels : self . rhsm_channels . append ( r [ 'name' ] ) if self . rhsm_active : subscription_cmd = "subscription-manager repos '--disable=*' --enable=" + ' --enable=' . join ( self . rhsm_channels ) self . run ( subscription_cmd ) repo_files = [ r for r in repositories if r [ 'type' ] == 'yum_repo' ] for repo_file in repo_files : self . create_file ( repo_file [ 'dest' ] , repo_file [ 'content' ] ) packages = [ r [ 'name' ] for r in repositories if r [ 'type' ] == 'package' ] if packages : self . yum_install ( packages )
Enable a list of RHSM repositories .
216
9
12,296
def create_stack_user ( self ) : self . run ( 'adduser -m stack' , success_status = ( 0 , 9 ) ) self . create_file ( '/etc/sudoers.d/stack' , 'stack ALL=(root) NOPASSWD:ALL\n' ) self . run ( 'mkdir -p /home/stack/.ssh' ) self . run ( 'cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys' ) self . run ( 'chown -R stack:stack /home/stack/.ssh' ) self . run ( 'chmod 700 /home/stack/.ssh' ) self . run ( 'chmod 600 /home/stack/.ssh/authorized_keys' ) self . ssh_pool . build_ssh_client ( self . hostname , 'stack' , self . _key_filename , self . via_ip )
Create the stack user on the machine .
198
8
12,297
def fetch_image ( self , path , dest , user = 'root' ) : self . run ( 'test -f %s || curl -L -s -o %s %s' % ( dest , dest , path ) , user = user , ignore_error = True )
Store in the user home directory an image from a remote location .
60
13
12,298
def clean_system ( self ) : self . run ( 'systemctl disable NetworkManager' , success_status = ( 0 , 1 ) ) self . run ( 'systemctl stop NetworkManager' , success_status = ( 0 , 5 ) ) self . run ( 'pkill -9 dhclient' , success_status = ( 0 , 1 ) ) self . yum_remove ( [ 'cloud-init' , 'NetworkManager' ] ) self . run ( 'systemctl enable network' ) self . run ( 'systemctl restart network' )
Clean up unnecessary packages from the system .
117
8
12,299
def yum_update ( self , allow_reboot = False ) : self . run ( 'yum clean all' ) self . run ( 'test -f /usr/bin/subscription-manager && subscription-manager repos --list-enabled' , ignore_error = True ) self . run ( 'yum repolist' ) self . run ( 'yum update -y --quiet' , retry = 3 ) # reboot if a new initrd has been generated since the boot if allow_reboot : self . run ( 'grubby --set-default $(ls /boot/vmlinuz-*.x86_64|tail -1)' ) default_kernel = self . run ( 'grubby --default-kernel' ) [ 0 ] . rstrip ( ) cur_kernel = self . run ( 'uname -r' ) [ 0 ] . rstrip ( ) if cur_kernel not in default_kernel : self . run ( 'reboot' , ignore_error = True ) self . ssh_pool . stop_all ( )
Do a yum update on the system .
227
9