idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
12,600
def is_tomodir ( directory ) : if os . path . isdir ( directory ) : if ( os . path . isdir ( directory + "/exe" ) and os . path . isdir ( directory + "/config" ) and os . path . isdir ( directory + "/rho" ) and os . path . isdir ( directory + "/inv" ) and os . path . isdir ( directory + "/mod" ) ) : return True else : return False else : return False
Check if the supplied directory is a tomodir
105
10
12,601
def td_is_finished ( tomodir ) : if not is_tomodir ( tomodir ) : raise Exception ( 'Supplied directory is not a tomodir!' ) # crmod finished is determined by: # config.dat/rho.dat/crmod.cfg are present # volt.dat is present if ( os . path . isfile ( tomodir + os . sep + 'config/config.dat' ) and os . path . isfile ( tomodir + os . sep + 'rho/rho.dat' ) and os . path . isfile ( tomodir + os . sep + 'grid/elem.dat' ) and os . path . isfile ( tomodir + os . sep + 'grid/elec.dat' ) and os . path . isfile ( tomodir + os . sep + 'exe/crmod.cfg' ) and os . path . isfile ( tomodir + os . sep + 'mod/volt.dat' ) ) : crmod_is_finished = True else : crmod_is_finished = False # crtomo is finished if # crtomo.cfg/volt.dat/elem.dat/elec.dat are present # inv/run.ctr contains the word "CPU" in the last line if ( os . path . isfile ( tomodir + os . sep + 'grid/elem.dat' ) and os . path . isfile ( tomodir + os . sep + 'grid/elec.dat' ) and os . path . isfile ( tomodir + os . sep + 'exe/crtomo.cfg' ) and os . path . isfile ( tomodir + os . sep + 'inv/inv.ctr' ) and os . path . isfile ( tomodir + os . sep + 'inv/run.ctr' ) and os . path . isfile ( tomodir + os . sep + 'mod/volt.dat' ) ) : with open ( tomodir + os . sep + 'inv/run.ctr' , 'r' ) as fid : lines = fid . readlines ( ) crtomo_is_finished = False # check the last 5 lines for line in lines [ - 5 : ] : test_line = line . strip ( ) regex = re . compile ( 'CPU' ) result = regex . match ( test_line ) if result is not None : crtomo_is_finished = True else : crtomo_is_finished = False return crmod_is_finished , crtomo_is_finished
Return the state of modeling and inversion for a given tomodir . The result does not take into account sensitivities or potentials as optionally generated by CRMod .
565
34
12,602
def is_sipdir ( directory ) : is_sipdir = True if ( not os . path . isfile ( directory + os . sep + 'frequencies.dat' ) ) : is_sipdir = False if ( not os . path . isdir ( directory + os . sep + 'invmod' ) ) : is_sipdir = False return is_sipdir
Simple check if the supplied directory is a SIP directory .
86
12
12,603
def sipdir_is_finished ( sipdir ) : if not is_sipdir ( sipdir ) : raise Exception ( 'Directory is not a valid SIP directory!' ) subdirs_raw = sorted ( glob . glob ( sipdir + os . sep + 'invmod' + os . sep + '*' ) ) subdirs = [ x for x in subdirs_raw if os . path . isdir ( x ) ] crmod_finished = True crtomo_finished = True for subdir in subdirs : subcrmod , subcrtomo = td_is_finished ( subdir ) if not subcrmod : crmod_finished = False if not subcrtomo : crtomo_finished = False return crmod_finished , crtomo_finished
Return the state of modeling and inversion for a given SIP dir . The result does not take into account sensitivities or potentials as optionally generated by CRMod .
172
34
12,604
def enable_neutron_hack ( self , os_username , os_password , os_project_id , os_auth_url ) : self . yum_install ( [ 'python-neutronclient' ] ) self . send_file ( pkg_data_filename ( 'static' , 'ovb_fix_neutron_addr' ) , '/usr/local/bin/ovb_fix_neutron_addr' , unix_mode = 0o755 ) content = """ [Unit] Description=OVB neutron hack Service [Service] ExecStart=/usr/local/bin/ovb_fix_neutron_addr --os-user {os_username} --os-password {os_password} --os-project-id {os_project_id} --os-auth-url {os_auth_url} User=root StandardOutput=kmsg+console StandardError=inherit Restart=always [Install] WantedBy=multi-user.target """ unit = 'ovb_fix_neutron_addr.service' self . create_file ( '/usr/lib/systemd/system/%s' % unit , content . format ( os_username = os_username , os_password = protect_password ( os_password ) , os_project_id = os_project_id , os_auth_url = os_auth_url ) ) self . run ( 'systemctl enable %s' % unit ) self . run ( 'systemctl start %s' % unit )
Enable the neutron hack on the undercloud .
336
9
12,605
def patch_ironic_ramdisk ( self ) : tmpdir = self . run ( 'mktemp -d' ) [ 0 ] . rstrip ( '\n' ) self . run ( 'cd {tmpdir}; zcat /home/stack/ironic-python-agent.initramfs| cpio -id' . format ( tmpdir = tmpdir ) ) self . send_file ( pkg_data_filename ( 'static' , 'ironic-wipefs.patch' ) , '/tmp/ironic-wipefs.patch' ) self . run ( 'cd {tmpdir}; patch -p0 < /tmp/ironic-wipefs.patch' . format ( tmpdir = tmpdir ) ) self . run ( 'cd {tmpdir}; find . | cpio --create --format=newc > /home/stack/ironic-python-agent.initramfs' . format ( tmpdir = tmpdir ) )
Clean the disk before flushing the new image .
209
10
12,606
def show_menu ( title , options , default = None , height = None , width = None , multiselect = False , precolored = False ) : plugins = [ FilterPlugin ( ) ] if any ( isinstance ( opt , OptionGroup ) for opt in options ) : plugins . append ( OptionGroupPlugin ( ) ) if title : plugins . append ( TitlePlugin ( title ) ) if precolored : plugins . append ( PrecoloredPlugin ( ) ) menu = Termenu ( options , default = default , height = height , width = width , multiselect = multiselect , plugins = plugins ) return menu . show ( )
Shows an interactive menu in the terminal .
134
9
12,607
def pluggable ( method ) : def wrapped ( self , * args , * * kwargs ) : if hasattr ( self , "_plugins" ) : # call the last plugin, it may call the previous via self.parent.method # creating a call call chain return getattr ( self . _plugins [ - 1 ] , method . __name__ ) ( * args , * * kwargs ) else : return method ( self , * args , * * kwargs ) wrapped . original = method return wrapped
Mark a class method as extendable with plugins .
109
10
12,608
def register_plugin ( host , plugin ) : class OriginalMethods ( object ) : def __getattr__ ( self , name ) : return lambda * args , * * kwargs : getattr ( host , name ) . original ( host , * args , * * kwargs ) if not hasattr ( host , "_plugins" ) : host . _plugins = [ OriginalMethods ( ) ] plugin . parent = host . _plugins [ - 1 ] plugin . host = host host . _plugins . append ( plugin )
Register a plugin with a host object . Some
109
9
12,609
def chdir ( self , path = None ) : if path is None : self . _cwd = None return if not stat . S_ISDIR ( self . stat ( path ) . st_mode ) : raise SFTPError ( errno . ENOTDIR , "%s: %s" % ( os . strerror ( errno . ENOTDIR ) , path ) ) self . _cwd = b ( self . normalize ( path ) )
Change the current directory of this SFTP session . Since SFTP doesn t really have the concept of a current working directory this is emulated by Paramiko . Once you use this method to set a working directory all operations on this . SFTPClient object will be relative to that path . You can pass in None to stop using a current working directory .
98
71
12,610
def get_int ( self ) : byte = self . get_bytes ( 1 ) if byte == max_byte : return util . inflate_long ( self . get_binary ( ) ) byte += self . get_bytes ( 3 ) return struct . unpack ( '>I' , byte ) [ 0 ]
Fetch an int from the stream .
67
8
12,611
def set_logger ( name , level = 'INFO' , fmt = None , datefmt = None , propagate = 1 , remove_handlers = False ) : logger = logging . getLogger ( name ) logger . setLevel ( getattr ( logging , level ) ) logger . propagate = propagate if remove_handlers : logger . handlers = [ ] return handler = None for h in logger . handlers : if isinstance ( h , logging . StreamHandler ) : # use existing instead of clean and create handler = h break if not handler : handler = logging . StreamHandler ( ) logger . addHandler ( handler ) formatter_kwgs = { } for i in ( 'fmt' , 'datefmt' ) : if locals ( ) [ i ] is not None : formatter_kwgs [ i ] = locals ( ) [ i ] handler . setFormatter ( BaseFormatter ( * * formatter_kwgs ) )
This function will clear the previous handlers and set only one handler which will only be StreamHandler for the logger .
199
22
12,612
def format ( self , record ) : self . _format_record ( record ) record_dict = { } for k , v in record . __dict__ . items ( ) : if isinstance ( k , str ) : k = decode_ ( k , 'utf8' ) if isinstance ( v , str ) : v = decode_ ( v , 'utf8' , 'replace' ) record_dict [ k ] = v if 'color' in self . fmt or 'end_color' in self . fmt : record_dict [ 'color' ] , record_dict [ 'end_color' ] = _color ( record . levelno ) log = self . ufmt % record_dict if record . exc_text : if log [ - 1 : ] != '\n' : log += '\n' log += decode_ ( record . exc_text , 'utf8' , 'replace' ) log = log . replace ( '\n' , '\n' + self . tab ) return log
return log in unicode
218
5
12,613
def list ( self , source_ids = None , seniority = "all" , stage = None , date_start = "1494539999" , date_end = TIMESTAMP_NOW , filter_id = None , page = 1 , limit = 30 , sort_by = 'ranking' , filter_reference = None , order_by = None ) : query_params = { } query_params [ "date_end" ] = _validate_timestamp ( date_end , "date_end" ) query_params [ "date_start" ] = _validate_timestamp ( date_start , "date_start" ) if filter_id : query_params [ "filter_id" ] = _validate_filter_id ( filter_id ) if filter_reference : query_params [ "filter_reference" ] = _validate_filter_reference ( filter_reference ) query_params [ "limit" ] = _validate_limit ( limit ) query_params [ "page" ] = _validate_page ( page ) query_params [ "seniority" ] = _validate_seniority ( seniority ) query_params [ "sort_by" ] = _validate_sort_by ( sort_by ) query_params [ "source_ids" ] = json . dumps ( _validate_source_ids ( source_ids ) ) query_params [ "stage" ] = _validate_stage ( stage ) query_params [ "order_by" ] = order_by response = self . client . get ( "profiles" , query_params ) return response . json ( )
Retreive all profiles that match the query param .
356
11
12,614
def add ( self , source_id = None , file_path = None , profile_reference = "" , timestamp_reception = None , training_metadata = [ ] ) : data = { } data [ "source_id" ] = _validate_source_id ( source_id ) data [ "profile_reference" ] = _validate_profile_reference ( profile_reference ) data [ "timestamp_reception" ] = _validate_timestamp ( timestamp_reception , "timestamp_reception" ) data [ "training_metadata" ] = _validate_training_metadata ( training_metadata ) files = _get_file_metadata ( file_path , profile_reference ) response = None with open ( file_path , 'rb' ) as in_file : files = ( files [ 0 ] , in_file , files [ 2 ] ) response = self . client . post ( "profile" , data = data , files = { "file" : files } ) return response . json ( )
Add a profile resume to a sourced id .
221
9
12,615
def addList ( self , source_id , dir_path , is_recurcive = False , timestamp_reception = None , training_metadata = [ ] ) : if not path . isdir ( dir_path ) : raise ValueError ( dir_path + ' is not a directory' ) files_to_send = _get_files_from_dir ( dir_path , is_recurcive ) succeed_upload = { } failed_upload = { } for file_path in files_to_send : try : resp = self . add ( source_id = source_id , file_path = file_path , profile_reference = "" , timestamp_reception = timestamp_reception , training_metadata = training_metadata ) if resp [ 'code' ] != 200 and resp [ 'code' ] != 201 : failed_upload [ file_path ] = ValueError ( 'Invalid response: ' + str ( resp ) ) else : succeed_upload [ file_path ] = resp except BaseException as e : failed_upload [ file_path ] = e result = { 'success' : succeed_upload , 'fail' : failed_upload } return result
Add all profile from a given directory .
253
8
12,616
def get ( self , source_id = None , profile_id = None , profile_reference = None ) : query_params = { } query_params [ "source_id" ] = _validate_source_id ( source_id ) if profile_id : query_params [ "profile_id" ] = _validate_profile_id ( profile_id ) if profile_reference : query_params [ "profile_reference" ] = _validate_profile_reference ( profile_reference ) response = self . client . get ( 'profile' , query_params ) return response . json ( )
Retrieve the profile information associated with profile id .
131
10
12,617
def set ( self , source_id = None , profile_id = None , filter_id = None , stage = None , profile_reference = None , filter_reference = None ) : data = { } data [ "source_id" ] = _validate_source_id ( source_id ) if profile_id : data [ "profile_id" ] = _validate_profile_id ( profile_id ) if filter_id : data [ "filter_id" ] = _validate_filter_id ( filter_id ) if profile_reference : data [ "profile_reference" ] = _validate_profile_reference ( profile_reference ) if filter_reference : data [ "filter_reference" ] = _validate_filter_reference ( filter_reference ) data [ "stage" ] = _validate_stage ( stage ) response = self . client . patch ( 'profile/stage' , data = data ) return response . json ( )
Edit the profile stage given a filter .
208
8
12,618
def get ( self , source_id = None , profile_id = None , profile_reference = None , filter_id = None , filter_reference = None ) : query_params = { } query_params [ "source_id" ] = _validate_source_id ( source_id ) if profile_id : query_params [ "profile_id" ] = _validate_profile_id ( profile_id ) if profile_reference : query_params [ "profile_reference" ] = _validate_profile_reference ( profile_reference ) if filter_id : query_params [ "filter_id" ] = _validate_filter_id ( filter_id ) if filter_reference : query_params [ "filter_reference" ] = _validate_filter_reference ( filter_reference ) response = self . client . get ( 'profile/revealing' , query_params ) return response
Retrieve the interpretability information .
198
7
12,619
def check ( self , profile_data , training_metadata = [ ] ) : data = { "profile_json" : _validate_dict ( profile_data , "profile_data" ) , "training_metadata" : _validate_training_metadata ( training_metadata ) , } response = self . client . post ( "profile/json/check" , data = data ) return response . json ( )
Use the api to check weither the profile_data are valid .
89
14
12,620
def add ( self , source_id , profile_data , training_metadata = [ ] , profile_reference = None , timestamp_reception = None ) : data = { "source_id" : _validate_source_id ( source_id ) , "profile_json" : _validate_dict ( profile_data , "profile_data" ) , "training_metadata" : _validate_training_metadata ( training_metadata ) , "profile_reference" : profile_reference } # some enrichement for profile_json if timestamp_reception is not None : data [ 'timestamp_reception' ] = _validate_timestamp ( timestamp_reception , 'timestamp_reception' ) response = self . client . post ( "profile/json" , data = data ) return response . json ( )
Use the api to add a new profile using profile_data .
181
13
12,621
def md5sum ( self , f ) : m = hashlib . md5 ( ) fh = open ( f , 'r' ) while 1 : chunk = fh . read ( BUF_SIZE ) if not chunk : break m . update ( chunk ) fh . close ( ) return m . hexdigest ( )
md5sums a file returning the hex digest
70
10
12,622
def iterdupes ( self , compare = None , filt = None ) : if not compare : compare = self . md5sum seen_siz = { } ## store size -> first seen filename seen_sum = { } ## store chksum -> first seen filename size_func = lambda x : os . stat ( x ) . st_size for ( fsize , f ) in self . iteritems ( want_dirs = False , func = size_func , filt = filt ) : if fsize not in seen_siz : ## state 1: no previous size collisions seen_siz [ fsize ] = f continue else : if seen_siz [ fsize ] : ## state 2: defined key => str (initial, unscanned path) chksum = compare ( seen_siz [ fsize ] ) if chksum in seen_sum : yield ( chksum , seen_siz [ fsize ] ) else : seen_sum [ chksum ] = seen_siz [ fsize ] seen_siz [ fsize ] = None ## state 3: defined key => None (already scanned path, no-op) chksum = compare ( f ) if chksum in seen_sum : ## if it's a dupe, check if the first one was ever yielded then yield if seen_sum [ chksum ] : yield ( chksum , seen_sum [ chksum ] ) seen_sum [ chksum ] = None yield ( chksum , f ) else : ## if not, set the initial filename seen_sum [ chksum ] = f
streaming item iterator with low overhead duplicate file detection
347
10
12,623
def objects_to_root ( objects : List ) -> Root : def _to_tree ( objs : Iterable ) -> Dict : """ Build a tree structure from a flat list of objects. :param objs: The raw iterable of S3 `ObjectSummary`s, as returned by a bucket listing. :return: The listing as a nested dictionary where keys are directory and file names. The values of directories will in turn be a dict. The values of keys representing files will be the `ObjectSummary` instance. """ path_tree = { } for obj in objs : is_dir = obj . key . endswith ( '/' ) chunks = [ chunk for chunk in obj . key . split ( '/' ) if chunk ] chunk_count = len ( chunks ) tmp = path_tree for i , chunk in enumerate ( chunks ) : is_last_chunk = i == chunk_count - 1 if is_last_chunk and not is_dir : tmp [ chunk ] = obj else : # must be a directory if chunk not in tmp : # it doesn't exist - create it tmp [ chunk ] = { } tmp = tmp [ chunk ] return path_tree def _to_entity ( key : str , value : Union [ Dict , Any ] ) -> Entity : """ Turn a nested dictionary representing an S3 bucket into the correct `Entity` object. :param key: The name of the entity. :param value: If the entity is a directory, the nested dict representing its contents. Otherwise, the `ObjectSummary` instance representing the file. :return: The entity representing the entity name and value pair. """ if isinstance ( value , dict ) : return Directory ( key , { key_ : _to_entity ( key_ , value_ ) for key_ , value_ in value . items ( ) } ) return File ( pathlib . PurePath ( value . key ) . name , value . size , value . e_tag . strip ( '"' ) ) tree = _to_tree ( objects ) return Root ( { pathlib . PurePath ( key ) . name : _to_entity ( key , value ) for key , value in tree . items ( ) } )
Convert a list of s3 ObjectSummaries into a directory tree .
467
16
12,624
def _delete ( self , paths : Iterable [ str ] ) -> None : for chunk in util . chunk ( paths , self . _MAX_DELETES_PER_REQUEST ) : keys = list ( [ self . _prefix + key for key in chunk ] ) logger . info ( 'Deleting %d objects (%s)' , len ( keys ) , ', ' . join ( keys ) ) response = self . _bucket . delete_objects ( Delete = { 'Objects' : [ { 'Key' : key } for key in keys ] , 'Quiet' : True } ) logger . debug ( 'Delete objects response: %s' , response )
Delete a collection of paths from S3 .
143
9
12,625
def _upload ( self , items : Iterable [ Tuple [ str , str ] ] ) -> None : for src , key in items : logger . info ( f'Uploading {src} to {key}' ) mimetype , _ = mimetypes . guess_type ( src ) if mimetype is None : logger . warning ( f'Could not guess MIME type for {src}' ) mimetype = 'application/octet-stream' logger . debug ( f'Deduced MIME type: {mimetype}' ) self . _bucket . upload_file ( src , key , ExtraArgs = { 'ContentType' : mimetype } )
Upload a collection of paths to S3 .
149
9
12,626
def rotmat ( alpha ) : R = np . array ( ( ( np . cos ( alpha ) , - np . sin ( alpha ) ) , ( np . sin ( alpha ) , np . cos ( alpha ) ) ) ) return R
Rotate around z - axis
50
6
12,627
def apply_async ( self , args = None , kwargs = None , * * options ) : # pylint: disable=arguments-differ result = super ( LoggedTask , self ) . apply_async ( args = args , kwargs = kwargs , * * options ) log . info ( 'Task {}[{}] submitted with arguments {}, {}' . format ( self . name , result . id , args , kwargs ) ) return result
Emit a log statement when the task is submitted .
104
11
12,628
def on_retry ( self , exc , task_id , args , kwargs , einfo ) : super ( LoggedTask , self ) . on_retry ( exc , task_id , args , kwargs , einfo ) log . warning ( '[{}] retried due to {}' . format ( task_id , getattr ( einfo , 'traceback' , None ) ) )
Capture the exception that caused the task to be retried if any .
88
14
12,629
def on_failure ( self , exc , task_id , args , kwargs , einfo ) : log . error ( '[{}] failed due to {}' . format ( task_id , getattr ( einfo , 'traceback' , None ) ) ) super ( LoggedTask , self ) . on_failure ( exc , task_id , args , kwargs , einfo )
Capture the exception that caused the task to fail if any .
87
12
12,630
def nodes_to_object ( self , node , object ) : for n in list ( node ) : self . node_to_object ( n , object )
Map all child nodes to one object s attributes
34
9
12,631
def node_to_object ( self , node , object ) : attribute = self . to_lower ( node . tag ) # Yield is a protected keyword in Python, so let's rename it attribute = "_yield" if attribute == "yield" else attribute try : valueString = node . text or "" value = float ( valueString ) except ValueError : value = node . text try : setattr ( object , attribute , value ) except AttributeError ( ) : sys . stderr . write ( "Attribute <%s> not supported." % attribute )
Map a single node to one object s attributes
120
9
12,632
def parse ( self , xml_file ) : recipes = [ ] with open ( xml_file , "rt" ) as f : tree = ElementTree . parse ( f ) for recipeNode in tree . iter ( ) : if self . to_lower ( recipeNode . tag ) != "recipe" : continue recipe = Recipe ( ) recipes . append ( recipe ) for recipeProperty in list ( recipeNode ) : tag_name = self . to_lower ( recipeProperty . tag ) if tag_name == "fermentables" : for fermentable_node in list ( recipeProperty ) : fermentable = Fermentable ( ) self . nodes_to_object ( fermentable_node , fermentable ) recipe . fermentables . append ( fermentable ) elif tag_name == "yeasts" : for yeast_node in list ( recipeProperty ) : yeast = Yeast ( ) self . nodes_to_object ( yeast_node , yeast ) recipe . yeasts . append ( yeast ) elif tag_name == "hops" : for hop_node in list ( recipeProperty ) : hop = Hop ( ) self . nodes_to_object ( hop_node , hop ) recipe . hops . append ( hop ) elif tag_name == "miscs" : for misc_node in list ( recipeProperty ) : misc = Misc ( ) self . nodes_to_object ( misc_node , misc ) recipe . miscs . append ( misc ) elif tag_name == "style" : style = Style ( ) recipe . style = style self . nodes_to_object ( recipeProperty , style ) elif tag_name == "mash" : for mash_node in list ( recipeProperty ) : mash = Mash ( ) recipe . mash = mash if self . to_lower ( mash_node . tag ) == "mash_steps" : for mash_step_node in list ( mash_node ) : mash_step = MashStep ( ) self . nodes_to_object ( mash_step_node , mash_step ) mash . steps . append ( mash_step ) else : self . nodes_to_object ( mash_node , mash ) else : self . node_to_object ( recipeProperty , recipe ) return recipes
Get a list of parsed recipes from BeerXML input
477
11
12,633
def to_lower ( self , string ) : value = None try : value = string . lower ( ) except AttributeError : value = "" finally : return value
Helper function to transform strings to lower case
34
8
12,634
def _to_dot_key ( cls , section , key = None ) : if key : return ( NON_ALPHA_NUM . sub ( '_' , section . lower ( ) ) , NON_ALPHA_NUM . sub ( '_' , key . lower ( ) ) ) else : return NON_ALPHA_NUM . sub ( '_' , section . lower ( ) )
Return the section and key in dot notation format .
88
10
12,635
def save ( self , target_file = None , as_template = False ) : self . _read_sources ( ) if not target_file : if not self . _last_source : raise AttributeError ( 'Target file is required when last source is not set during instantiation' ) target_file = self . _last_source output = str ( self ) if as_template : output_tmpl = [ ] for line in output . split ( '\n' ) : if line and not line . startswith ( '#' ) : line = '# %s' % line output_tmpl . append ( line ) output = '\n' . join ( output_tmpl ) with open ( target_file , 'w' ) as fp : fp . write ( output )
Save the config
173
3
12,636
def _parse_extra ( self , fp ) : comment = '' section = '' fp . seek ( 0 ) for line in fp : line = line . rstrip ( ) if not line : if comment : comment += '\n' continue if line . startswith ( '#' ) : # Comment comment += line + '\n' continue if line . startswith ( '[' ) : # Section section = line . strip ( '[]' ) self . _add_dot_key ( section ) if comment : self . _comments [ section ] = comment . rstrip ( ) elif CONFIG_KEY_RE . match ( line ) : # Config key = line . split ( '=' , 1 ) [ 0 ] . strip ( ) self . _add_dot_key ( section , key ) if comment : self . _comments [ ( section , key ) ] = comment . rstrip ( ) comment = '' if comment : self . _comments [ self . LAST_COMMENT_KEY ] = comment
Parse and store the config comments and create maps for dot notion lookup
216
14
12,637
def _typed_value ( self , value ) : if value not in self . _value_cache : new_value = value if is_int ( value ) : new_value = int ( value ) elif is_float ( value ) : new_value = float ( value ) elif is_bool ( value ) : new_value = to_bool ( value ) elif is_none ( value ) : new_value = None self . _value_cache [ value ] = new_value return self . _value_cache [ value ]
Transform string value to an actual data type of the same value .
117
13
12,638
def add_section ( self , section , comment = None ) : self . _read_sources ( ) if self . _to_dot_key ( section ) in self . _dot_keys : raise DuplicateSectionError ( section ) self . _parser . add_section ( section ) self . _add_dot_key ( section ) if comment : self . _set_comment ( section , comment )
Add a section
87
3
12,639
def _set_comment ( self , section , comment , key = None ) : if '\n' in comment : comment = '\n# ' . join ( comment . split ( '\n' ) ) comment = '# ' + comment if key : self . _comments [ ( section , key ) ] = comment else : self . _comments [ section ] = comment
Set a comment for section or key
79
7
12,640
def _sample_actions ( self , state : Sequence [ tf . Tensor ] ) -> Tuple [ Sequence [ tf . Tensor ] , tf . Tensor , tf . Tensor ] : default = self . compiler . compile_default_action ( self . batch_size ) bound_constraints = self . compiler . compile_action_bound_constraints ( state ) action = self . _sample_action ( bound_constraints , default ) n , action , checking = self . _check_preconditions ( state , action , bound_constraints , default ) return action , n , checking
Returns sampled action fluents and tensors related to the sampling .
131
13
12,641
def _check_preconditions ( self , state : Sequence [ tf . Tensor ] , action : Sequence [ tf . Tensor ] , bound_constraints : Dict [ str , Constraints ] , default : Sequence [ tf . Tensor ] ) -> Tuple [ tf . Tensor , Sequence [ tf . Tensor ] , tf . Tensor ] : def condition ( i , a , checking ) : not_checking = tf . reduce_any ( tf . logical_not ( checking ) ) return not_checking def body ( i , a , checking ) : new_action = [ ] new_sampled_action = self . _sample_action ( bound_constraints , default ) new_preconds_checking = self . compiler . compile_action_preconditions_checking ( state , new_sampled_action ) for action_fluent , new_sampled_action_fluent in zip ( a , new_sampled_action ) : new_action_fluent = tf . where ( checking , action_fluent , new_sampled_action_fluent ) new_action . append ( new_action_fluent ) new_action = tuple ( new_action ) new_checking = tf . logical_or ( checking , new_preconds_checking ) return ( i + 1 , new_action , new_checking ) i0 = tf . constant ( 0 ) preconds_checking = self . compiler . compile_action_preconditions_checking ( state , action ) return tf . while_loop ( condition , body , loop_vars = [ i0 , action , preconds_checking ] )
Samples action fluents until all preconditions are satisfied .
354
13
12,642
def _sample_action ( self , constraints : Dict [ str , Constraints ] , default : Sequence [ tf . Tensor ] , prob : float = 0.3 ) -> Sequence [ tf . Tensor ] : ordering = self . compiler . rddl . domain . action_fluent_ordering dtypes = map ( rddl2tf . utils . range_type_to_dtype , self . compiler . rddl . action_range_type ) size = self . compiler . rddl . action_size action = [ ] for name , dtype , size , default_value in zip ( ordering , dtypes , size , default ) : action_fluent = self . _sample_action_fluent ( name , dtype , size , constraints , default_value , prob ) action . append ( action_fluent ) return tuple ( action )
Samples action fluents respecting the given bound constraints .
187
11
12,643
def _sample_action_fluent ( self , name : str , dtype : tf . DType , size : Sequence [ int ] , constraints : Dict [ str , Constraints ] , default_value : tf . Tensor , prob : float ) -> tf . Tensor : shape = [ self . batch_size ] + list ( size ) if dtype == tf . float32 : bounds = constraints . get ( name ) if bounds is None : low , high = - self . MAX_REAL_VALUE , self . MAX_REAL_VALUE dist = tf . distributions . Uniform ( low = low , high = high ) sampled_fluent = dist . sample ( shape ) else : low , high = bounds batch = ( low is not None and low . batch ) or ( high is not None and high . batch ) low = tf . cast ( low . tensor , tf . float32 ) if low is not None else - self . MAX_REAL_VALUE high = tf . cast ( high . tensor , tf . float32 ) if high is not None else self . MAX_REAL_VALUE dist = tf . distributions . Uniform ( low = low , high = high ) if batch : sampled_fluent = dist . sample ( ) elif isinstance ( low , tf . Tensor ) or isinstance ( high , tf . Tensor ) : if ( low + high ) . shape . as_list ( ) == list ( size ) : sampled_fluent = dist . sample ( [ self . batch_size ] ) else : raise ValueError ( 'bounds are not compatible with action fluent.' ) else : sampled_fluent = dist . sample ( shape ) elif dtype == tf . int32 : logits = [ 1.0 ] * self . MAX_INT_VALUE dist = tf . distributions . Categorical ( logits = logits , dtype = tf . int32 ) sampled_fluent = dist . sample ( shape ) elif dtype == tf . bool : probs = 0.5 dist = tf . distributions . Bernoulli ( probs = probs , dtype = tf . bool ) sampled_fluent = dist . sample ( shape ) select_default = tf . distributions . Bernoulli ( prob , dtype = tf . bool ) . sample ( self . batch_size ) action_fluent = tf . where ( select_default , default_value , sampled_fluent ) return action_fluent
Samples the action fluent with given name dtype and size .
524
13
12,644
def UnitToLNode ( u : Unit , node : Optional [ LNode ] = None , toL : Optional [ dict ] = None , optimizations = [ ] ) -> LNode : if toL is None : toL = { } if node is None : root = LNode ( name = u . _name , originObj = u , node2lnode = toL ) else : root = node stmPorts = { } # {RtlSignal: NetCtx} netCtx = NetCtxs ( root ) # create subunits for su in u . _units : n = root . addNode ( name = su . _name , originObj = su ) UnitToLNode ( su , n , toL , optimizations ) # create subunits from statements for stm in u . _ctx . statements : n = addStmAsLNode ( root , stm , stmPorts , netCtx ) # create ports for this unit for intf in u . _interfaces : addPort ( root , intf ) # render content of statements for stm in u . _ctx . statements : n = toL . get ( stm , None ) if n is not None : if isinstance ( n , VirtualLNode ) : # statement is not in wrap and does not need any port context p = None else : # statement is in wrap and needs a port context # to resolve port connections to wrap p = stmPorts [ n ] r = StatementRenderer ( n , toL , p , netCtx ) r . renderContent ( ) # connect nets inside this unit for s in u . _ctx . signals : if not s . hidden : net , _ = netCtx . getDefault ( s ) for e in s . endpoints : if isinstance ( e , PortItem ) : net . addEndpoint ( toL [ e ] ) for d in s . drivers : if isinstance ( d , PortItem ) : net . addDriver ( toL [ d ] ) netCtx . applyConnections ( root ) for opt in optimizations : opt ( root ) isRootOfWholeGraph = root . parent is None if not isRootOfWholeGraph : for intf in u . _interfaces : # connect my external port to port on my container on parent # also override toL to use this new port ext_p = toL [ originObjOfPort ( intf ) ] . parentNode nodePort = addPortToLNode ( root , intf ) # connect this node which represents port to port of this node if intf . _direction == INTF_DIRECTION . SLAVE : src = nodePort dst = ext_p . addPort ( "" , PortType . INPUT , PortSide . WEST ) else : src = ext_p . addPort ( "" , PortType . OUTPUT , PortSide . EAST ) dst = nodePort root . addEdge ( src , dst , name = repr ( intf ) , originObj = intf ) return root
Build LNode instance from Unit instance
645
7
12,645
def configure ( self , rhsm = None , repositories = None ) : if rhsm is not None : self . rhsm_register ( rhsm ) if repositories is not None : self . enable_repositories ( repositories ) self . create_stack_user ( ) self . deploy_hypervisor ( )
This method will configure the host0 and run the hypervisor .
66
13
12,646
def deploy_hypervisor ( self ) : self . yum_install ( [ 'libvirt-daemon-driver-nwfilter' , 'libvirt-client' , 'libvirt-daemon-config-network' , 'libvirt-daemon-driver-nodedev' , 'libvirt-daemon-kvm' , 'libvirt-python' , 'libvirt-daemon-config-nwfilter' , 'libvirt-glib' , 'libvirt-daemon' , 'libvirt-daemon-driver-storage' , 'libvirt' , 'libvirt-daemon-driver-network' , 'libvirt-devel' , 'libvirt-gobject' , 'libvirt-daemon-driver-secret' , 'libvirt-daemon-driver-qemu' , 'libvirt-daemon-driver-interface' , 'libguestfs-tools' , 'virt-install' , 'genisoimage' , 'openstack-tripleo' , 'instack-undercloud' ] ) self . run ( 'sed -i "s,#auth_unix_rw,auth_unix_rw," /etc/libvirt/libvirtd.conf' ) self . run ( 'systemctl start libvirtd' ) self . run ( 'systemctl status libvirtd' ) self . install_base_packages ( ) self . clean_system ( ) self . yum_update ( )
Install the libvirtd and instack - undercloud packages .
321
13
12,647
def build_undercloud_on_libvirt ( self , image_path , rhsm = None , repositories = [ ] ) : self . run ( 'sysctl net.ipv4.ip_forward=1' ) self . fetch_image ( path = image_path , dest = '/home/stack/guest_image.qcow2' , user = 'stack' ) # NOTE(Gonéri): this is a hack for our OpenStack, the MTU of its outgoing route # is 1400 and libvirt do not provide a mechanism to adjust the guests MTU. self . run ( "LIBGUESTFS_BACKEND=direct virt-customize -a /home/stack/guest_image.qcow2 --run-command 'echo MTU=\"1400\" >> /etc/sysconfig/network-scripts/ifcfg-eth0'" ) env = Environment ( ) env . loader = FileSystemLoader ( pkg_data_filename ( 'template' ) ) template = env . get_template ( 'virt-setup-env.j2' ) self . run ( 'mkdir -p /home/stack/DIB' , user = 'stack' ) self . run ( 'cp -v /etc/yum.repos.d/*.repo /home/stack/DIB' , user = 'stack' ) # NOTE(Gonéri): Hack to be sure DIB won't complain because of missing gpg files # self.run('sed -i "s,gpgcheck=1,gpgcheck=0," /home/stack/DIB/*.repo', user='stack') dib_yum_repo_conf = self . run ( 'find /home/stack/DIB -type f' , user = 'stack' ) [ 0 ] . split ( ) virt_setup_template = { 'dib_yum_repo_conf' : dib_yum_repo_conf , 'node' : { 'count' : 2 , 'mem' : 6144 , 'cpu' : 2 } , 'undercloud_node_mem' : 8192 , 'guest_image_name' : '/home/stack/guest_image.qcow2' } if rhsm is not None : virt_setup_template [ 'rhsm' ] = { 'login' : rhsm . get ( 'login' ) , 'password' : rhsm . get ( 'password' , os . environ . get ( 'RHN_PW' ) ) , 'pool_id' : rhsm . get ( 'pool_id' , '' ) , 'repositories' : [ i [ 'name' ] for i in repositories if i [ 'type' ] == 'rhsm_channel' ] } virt_setup_env = template . render ( virt_setup_template ) self . create_file ( 'virt-setup-env' , virt_setup_env , user = 'stack' ) self . run ( 'virsh destroy instack' , ignore_error = True ) self . run ( 'virsh undefine instack --remove-all-storage' , ignore_error = True ) self . run ( 'source virt-setup-env; instack-virt-setup' , user = 'stack' ) undercloud_ip = self . run ( '/sbin/ip n | grep $(tripleo get-vm-mac instack) | awk \'{print $1;}\'' , user = 'stack' ) [ 0 ] assert undercloud_ip , 'undercloud should have an IP' undercloud = Undercloud ( hostname = undercloud_ip , via_ip = self . hostname , user = 'root' , key_filename = self . _key_filename ) return undercloud
Build the Undercloud by using instack - virt - setup script .
826
14
12,648
def login ( config , api_key = "" ) : if not api_key : info_out ( "If you don't have an API Key, go to:\n" "https://bugzilla.mozilla.org/userprefs.cgi?tab=apikey\n" ) api_key = getpass . getpass ( "API Key: " ) # Before we store it, let's test it. url = urllib . parse . urljoin ( config . bugzilla_url , "/rest/whoami" ) assert url . startswith ( "https://" ) , url response = requests . get ( url , params = { "api_key" : api_key } ) if response . status_code == 200 : if response . json ( ) . get ( "error" ) : error_out ( "Failed - {}" . format ( response . json ( ) ) ) else : update ( config . configfile , { "BUGZILLA" : { "bugzilla_url" : config . bugzilla_url , "api_key" : api_key , # "login": login, } } , ) success_out ( "Yay! It worked!" ) else : error_out ( "Failed - {} ({})" . format ( response . status_code , response . json ( ) ) )
Store your Bugzilla API Key
288
6
12,649
def logout ( config ) : state = read ( config . configfile ) if state . get ( "BUGZILLA" ) : remove ( config . configfile , "BUGZILLA" ) success_out ( "Forgotten" ) else : error_out ( "No stored Bugzilla credentials" )
Remove and forget your Bugzilla credentials
66
7
12,650
def get_hypergeometric_stats ( N , indices ) : assert isinstance ( N , ( int , np . integer ) ) assert isinstance ( indices , np . ndarray ) and np . issubdtype ( indices . dtype , np . uint16 ) K = indices . size pvals = np . empty ( N + 1 , dtype = np . float64 ) folds = np . empty ( N + 1 , dtype = np . float64 ) pvals [ 0 ] = 1.0 folds [ 0 ] = 1.0 n = 0 k = 0 p = 1.0 while n < N : if k < K and indices [ k ] == n : # "add one" # calculate f(k+1; N,K,n+1) from f(k; N,K,n) p *= ( float ( ( n + 1 ) * ( K - k ) ) / float ( ( N - n ) * ( k + 1 ) ) ) k += 1 else : # "add zero" # calculate f(k; N,K,n+1) from f(k; N,K,n) p *= ( float ( ( n + 1 ) * ( N - K - n + k ) ) / float ( ( N - n ) * ( n - k + 1 ) ) ) n += 1 # calculate hypergeometric p-value pvals [ n ] = get_hgp ( p , k , N , K , n ) # calculate fold enrichment folds [ n ] = k / ( K * ( n / float ( N ) ) ) return pvals , folds
Calculates hypergeom . p - values and fold enrichments for all cutoffs .
346
19
12,651
def parse ( self , prefix ) : # reset to default values self . _prefix = "" url = re . sub ( r'http://' , '' , prefix ) url = re . sub ( r'https://' , '' , url ) # any prefix customization before parsing? custom_prefix = self . detectCustomImportPaths ( url ) if custom_prefix != { } : url = custom_prefix [ "provider_prefix" ] info = self . _parsePrefix ( url ) self . _signature = info [ "signature" ] self . _prefix = info [ "prefix" ] return self
Parse import path into provider project repository and other recognizable parts
130
12
12,652
def detectKnownRepo ( self , url ) : if url . startswith ( 'github.com' ) : return GITHUB if url . startswith ( 'code.google.com/p' ) : return GOOGLECODE if url . startswith ( 'golang.org/x' ) : return GOLANGORG if url . startswith ( 'gopkg.in' ) : return GOPKG if url . startswith ( 'bitbucket.org' ) : return BITBUCKET if url . startswith ( 'google.golang.org' ) : return GOOGLEGOLANGORG return UNKNOWN
For given import path detect provider .
150
7
12,653
def get_qualifier_dict ( vocabularies , qualifier_vocab ) : # Raise exception if the vocabulary can't be found. if vocabularies . get ( qualifier_vocab , None ) is None : raise UNTLFormException ( 'Could not retrieve qualifier vocabulary "%s" for the form.' % ( qualifier_vocab ) ) else : # Return the sorted vocabulary. return vocabularies . get ( qualifier_vocab )
Get the qualifier dictionary based on the element s qualifier vocabulary .
96
12
12,654
def get_content_dict ( vocabularies , content_vocab ) : # Raise exception if the vocabulary can't be found. if vocabularies . get ( content_vocab , None ) is None : raise UNTLFormException ( 'Could not retrieve content vocabulary "%s" for the form.' % ( content_vocab ) ) else : # Return the sorted vocabulary. return vocabularies . get ( content_vocab )
Get the content dictionary based on the element s content vocabulary .
95
12
12,655
def get_group_usage_link ( self ) : first_element = self . group_list [ 0 ] usage_link = getattr ( first_element . form , 'usage_link' , None ) return usage_link
Get the usage link for the group element .
49
9
12,656
def get_adjustable_form ( self , element_dispatch ) : adjustable_form = { } # Loop through the qualifiers to create the adjustable form. for key in element_dispatch . keys ( ) : adjustable_form [ key ] = element_dispatch [ key ] ( ) return adjustable_form
Create an adjustable form from an element dispatch table .
65
10
12,657
def set_coverage_placeName ( self ) : if ( self . solr_response and self . solr_response != 'error' and self . solr_response . response != 'error' ) : location_list = self . solr_response . get_location_list_facet ( ) . facet_list else : location_list = [ ] form_dict = { 'view_type' : 'prefill' , 'value_json' : json . dumps ( location_list , ensure_ascii = False ) , 'value_py' : location_list , } return form_dict
Determine the properties for the placeName coverage field .
133
12
12,658
def get_meta_attributes ( self , * * kwargs ) : superuser = kwargs . get ( 'superuser' , False ) if ( self . untl_object . qualifier == 'recordStatus' or self . untl_object . qualifier == 'system' ) : if superuser : self . editable = True self . repeatable = True else : self . editable = False self . view_type = 'qualified-input' elif self . untl_object . qualifier == 'hidden' : self . label = 'Object Hidden' self . view_type = 'radio' else : self . editable = False self . view_type = 'qualified-input'
Determine the form attributes for the meta field .
149
11
12,659
def _bit_mismatch ( int1 : int , int2 : int ) -> int : for i in range ( max ( int1 . bit_length ( ) , int2 . bit_length ( ) ) ) : if ( int1 >> i ) & 1 != ( int2 >> i ) & 1 : return i return - 1
Returns the index of the first different bit or - 1 if the values are the same .
72
18
12,660
def searchRootOfTree ( reducibleChildren : Set [ LNode ] , nodeFromTree : LNode ) : while True : out_e = nodeFromTree . east [ 0 ] . outgoingEdges # node has no successors if not out_e : return nodeFromTree nextNode = out_e [ 0 ] . dsts [ 0 ] . parentNode if nextNode in reducibleChildren : # can reduce node, walk the tree to root nodeFromTree = nextNode else : # can not reduce, return last root of tree return nodeFromTree
Walk tree of nodes to root
116
6
12,661
def collectNodesInTree ( treeRoot : LNode , reducibleChildren : Set [ LNode ] ) : # List[Tuple[LNode, LPort, LEdge]] inputEdges = [ ] # List[LNode] reducedNodes = [ ] # Set[LNode] reducedNodesSet = set ( ) # An iterative process to print preorder traveral of tree # List[Typle[LNode, LPort, LEdge]] nodeStack = [ ] nodeStack . append ( ( treeRoot , None , None ) ) # collect nodes in tree and input edges while nodeStack : # pop the node from stack and try to find it's children node , p , e = nodeStack . pop ( ) if node in reducibleChildren and node not in reducedNodesSet : reducedNodes . append ( node ) reducedNodesSet . add ( node ) # walk inputs and add child nodes to stack for _p in node . west : for _e in _p . iterEdges ( ) : # assert len(e.srcs) == 1 and len(e.dsts) == 1 nodeStack . append ( ( _e . srcs [ 0 ] . parentNode , _p , _e ) ) else : inputEdges . append ( ( node , p , e ) ) return reducedNodes , inputEdges
Collect nodes which will be reduced and input nodes of tree for tree of nodes .
288
16
12,662
def __initLock ( self ) : self . _isLocked = False self . _timer = 0 self . _operation = False
Init lock for sending request to projector when it is busy .
28
12
12,663
def __setLock ( self , command ) : if command in ( TURN_ON , TURN_OFF ) : self . _operation = command elif command in INV_SOURCES : self . _operation = SOURCE else : self . _operation = ALL self . _isLocked = True self . _timer = time . time ( )
Set lock on requests .
75
5
12,664
def __unLock ( self ) : self . _operation = False self . _timer = 0 self . _isLocked = False
Unlock sending requests to projector .
28
7
12,665
def __checkLock ( self ) : if self . _isLocked : if ( time . time ( ) - self . _timer ) > TIMEOUT_TIMES [ self . _operation ] : self . __unLock ( ) return False return True return False
Lock checking .
56
3
12,666
async def get_property ( self , command ) : _LOGGER . debug ( "Getting property %s" , command ) if self . __checkLock ( ) : return BUSY timeout = self . __get_timeout ( command ) response = await self . send_request ( timeout = timeout , params = EPSON_KEY_COMMANDS [ command ] , type = 'json_query' ) if not response : return False try : return response [ 'projector' ] [ 'feature' ] [ 'reply' ] except KeyError : return BUSY
Get property state from device .
118
6
12,667
async def send_command ( self , command ) : _LOGGER . debug ( "Sending command to projector %s" , command ) if self . __checkLock ( ) : return False self . __setLock ( command ) response = await self . send_request ( timeout = self . __get_timeout ( command ) , params = EPSON_KEY_COMMANDS [ command ] , type = 'directsend' , command = command ) return response
Send command to Epson .
97
6
12,668
async def send_request ( self , params , timeout , type = 'json_query' , command = False ) : try : with async_timeout . timeout ( timeout ) : url = '{url}{type}' . format ( url = self . _http_url , type = type ) async with self . websession . get ( url = url , params = params , headers = self . _headers ) as response : if response . status != HTTP_OK : _LOGGER . warning ( "Error message %d from Epson." , response . status ) return False if command == TURN_ON and self . _powering_on : self . _powering_on = False if type == 'json_query' : return await response . json ( ) return response except ( aiohttp . ClientError , aiohttp . ClientConnectionError ) : _LOGGER . error ( "Error request" ) return False
Send request to Epson .
195
6
12,669
def remove_instances_by_prefix ( nova_api , prefix ) : for server in nova_api . servers . list ( ) : if server . name . startswith ( prefix ) : LOG . info ( "Remove instance '%s'" % server . name ) server . delete ( )
Remove all the instances on which their name start by a prefix .
65
13
12,670
def purge_existing_ovb ( nova_api , neutron ) : LOG . info ( 'Cleaning up OVB environment from the tenant.' ) for server in nova_api . servers . list ( ) : if server . name in ( 'bmc' , 'undercloud' ) : server . delete ( ) if server . name . startswith ( 'baremetal_' ) : server . delete ( ) for router in neutron . list_routers ( ) . get ( 'routers' ) : if router [ 'name' ] not in ( 'router' , 'bmc_router' ) : continue for subnet in neutron . list_subnets ( ) . get ( 'subnets' ) : if not ( subnet [ 'name' ] . startswith ( 'bmc_eth' ) or subnet [ 'name' ] == 'rdo-m-subnet' ) : continue try : neutron . remove_interface_router ( router [ 'id' ] , { 'subnet_id' : subnet [ 'id' ] } ) except neutronclient . common . exceptions . NotFound : pass try : bmc_router = neutron . list_routers ( name = 'bmc_router' ) . get ( 'routers' ) [ 0 ] for port in neutron . list_ports ( device_id = bmc_router [ 'id' ] ) [ 'ports' ] : if port . get ( 'device_owner' ) == 'network:router_gateway' : continue info = { 'id' : router [ 'id' ] , 'port_id' : port [ 'id' ] , 'tenant_id' : bmc_router . get ( 'tenant_id' ) , } neutron . remove_interface_router ( bmc_router [ 'id' ] , info ) neutron . delete_router ( bmc_router [ 'id' ] ) except IndexError : # already doesnt exist pass for _ in range ( 0 , 5 ) : try : for port in neutron . list_ports ( ) [ 'ports' ] : if port [ 'name' ] . endswith ( '_provision' ) : neutron . delete_port ( port [ 'id' ] ) for net in neutron . list_networks ( ) . get ( 'networks' ) : if not net [ 'name' ] . startswith ( 'provision_' ) : continue for port in neutron . list_ports ( network_id = net [ 'id' ] ) [ 'ports' ] : if port . get ( 'device_owner' ) == 'network:router_interface' : continue try : neutron . delete_port ( port [ 'id' ] ) except neutronclient . common . exceptions . PortNotFoundClient : pass for subnet in neutron . list_subnets ( network_id = net [ 'id' ] ) [ 'subnets' ] : neutron . delete_subnet ( subnet [ 'id' ] ) neutron . delete_network ( net [ 'id' ] ) except neutronclient . common . exceptions . Conflict : LOG . debug ( 'waiting for all the ports to be freed...' ) time . sleep ( 5 ) else : return
Purge any trace of an existing OVB deployment .
705
11
12,671
def initialize_network ( neutron ) : body_sample = { "network" : { "name" : 'provision_bob' , "admin_state_up" : True , } } netw = neutron . create_network ( body = body_sample ) [ 'network' ] body_create_subnet = { 'subnets' : [ { 'name' : 'rdo-m-subnet' , 'cidr' : '192.0.2.0/24' , 'ip_version' : 4 , 'network_id' : netw [ 'id' ] , 'host_routes' : [ { 'destination' : '169.254.169.254/32' , 'nexthop' : '192.0.2.240' } ] , 'gateway_ip' : '192.0.2.1' , 'dns_nameservers' : [ '8.8.8.8' , '8.8.4.4' ] , 'allocation_pools' : [ { 'start' : '192.0.2.30' , 'end' : '192.0.2.199' } ] } ] } response = neutron . create_subnet ( body = body_create_subnet ) subnet_id = response [ 'subnets' ] [ 0 ] [ 'id' ] router = neutron . list_routers ( name = 'router' ) . get ( 'routers' ) [ 0 ] response = neutron . add_interface_router ( router [ 'id' ] , { 'subnet_id' : subnet_id } )
Initialize an OVB network called provision_bob .
363
12
12,672
def description_director ( * * kwargs ) : description_type = { 'physical' : DCFormat } qualifier = kwargs . get ( 'qualifier' ) # Determine the type of element needed, based on the qualifier. element_class = description_type . get ( qualifier , DCDescription ) # Create the element object of that element type. element = element_class ( qualifier = qualifier , content = kwargs . get ( 'content' ) , ) return element
Direct which class should be used based on the director qualifier .
102
12
12,673
def add_child ( self , child ) : # Make sure the child exists before adding it. if child : # Append child if it is allowed to exist under the parent. if child . tag in self . contained_children : self . children . append ( child ) else : raise DC_StructureException ( 'Invalid child "%s" for parent "%s"' % ( child . tag , self . tag ) )
This adds a child object to the current one . It will check the contained_children list to make sure that the object is allowable and throw an exception if not .
87
33
12,674
def determine_vocab ( self , qualifier ) : vocab_value = VOCAB_INDEX . get ( self . tag , None ) if isinstance ( vocab_value , dict ) : if qualifier is None : qualifier = 'None' # Find the value based on the qualifier. return vocab_value . get ( qualifier , None ) elif vocab_value is not None : return vocab_value else : return None
Determine the vocab from the qualifier .
93
10
12,675
def resolver ( self , vocab_data , attribute ) : term_list = vocab_data . get ( self . content_vocab , [ ] ) # Loop through the terms from the vocabulary. for term_dict in term_list : # Match the name to the current content. if term_dict [ 'name' ] == self . content : return term_dict [ attribute ] return self . content
Pull the requested attribute based on the given vocabulary and content .
87
12
12,676
def check_separator ( self , data ) : sep_list = [ r'\t' , r';' , r',' , r'|' , r'\s+' ] data_copy = data for sep in sep_list : # Check if the count matches each line splitted = data_copy . split ( "\n" ) parts = [ len ( re . split ( sep , line ) ) for line in splitted ] # If we did not split anything continue if sum ( parts ) == len ( splitted ) : continue diff = 0 for i in range ( len ( parts [ 1 : - 1 ] ) ) : diff += abs ( parts [ i ] - parts [ i + 1 ] ) if diff == 0 : return sep , parts [ 0 ] # If we reach this point we did not find a separator return None
THis method evaluates a list of separators on the input data to check which one is correct . This is done by first splitting the input by newline and then checking if the split by separator is equal for each input row except the last that might be incomplete due to the limited input data
178
59
12,677
def head ( self , file_path ) : processor = lambda path , node , tail_only = True , append = False : self . _handle_head ( path , node ) # Find items and go for item in self . _client . _find_items ( [ file_path ] , processor , include_toplevel = True , include_children = False , recurse = False ) : if item : return item
Onlye read the first packets that come try to max out at 1024kb
89
15
12,678
def packageExists ( self , package ) : url = "%s/packages" % self . base_url params = { "pattern" : package } response = requests . get ( url , params = params ) if response . status_code != requests . codes . ok : return False return True
Check if the package already exists
61
6
12,679
def getGolangPackages ( self ) : packages = { } # get all packages url = "%s/packages" % self . base_url params = { "pattern" : "golang-*" , "limit" : 200 } response = requests . get ( url , params = params ) if response . status_code != requests . codes . ok : return { } data = response . json ( ) for package in data [ "packages" ] : packages [ package [ "name" ] ] = self . _processPackageData ( package ) # accumulate packages from all pages for page in range ( 2 , data [ "page_total" ] + 1 ) : params = { "pattern" : "golang-*" , "limit" : 200 , "page" : page } response = requests . get ( url , params = params ) if response . status_code != requests . codes . ok : continue data = response . json ( ) for package in data [ "packages" ] : packages [ package [ "name" ] ] = self . _processPackageData ( package ) # get branches of all packages MAX_LEN = 30 # break the list of packages into lists of at most 50 packages package_names = packages . keys ( ) packages_total = len ( package_names ) packages_counter = 0 logger . info ( "%s packages to process" % packages_total ) for i in range ( 0 , packages_total , MAX_LEN ) : sublist = package_names [ i : i + MAX_LEN ] branches = self . _getPackageBranches ( sublist ) for package in sublist : packages [ package ] [ "branches" ] = branches [ package ] packages_counter = packages_counter + len ( branches ) logger . info ( "%s/%s packages processed" % ( packages_counter , packages_total ) ) return packages
Get a list of all golang packages for all available branches
399
12
12,680
def onClose ( self , wasClean ) : self . log . error ( 'lost connection to crossbar on session %' + str ( self . session_id ) ) for task in asyncio . Task . all_tasks ( ) : task . cancel ( ) asyncio . get_event_loop ( ) . stop ( )
Disconnect when connection to message broker is lost
70
9
12,681
def onUserError ( self , fail , message ) : self . log . error ( fail ) self . log . error ( message )
Handle user errors
28
3
12,682
async def show_sessions ( self ) : res = await self . call ( "wamp.session.list" ) for session_id in res : session = await self . call ( "wamp.session.get" , session_id ) self . log . info ( session )
Returns an object with a lists of the session IDs for all sessions currently attached to the realm
62
18
12,683
async def lookup_session ( self , topic_name ) : res = await self . call ( "wamp.subscription.lookup" , topic_name ) self . log . info ( res )
Attempts to find the session id for a given topic
44
10
12,684
def setup_runner ( self ) : runner = ApplicationRunner ( url = self . config [ 'transport_host' ] , realm = u'realm1' , extra = { 'config' : self . config , 'handlers' : self . handlers , } ) return runner
Setup instance of runner var
59
5
12,685
def reconnect ( self ) : connect_attempt = 0 max_retries = self . config [ 'max_reconnect_retries' ] logging . info ( 'attempting to reconnect to crossbar' ) runner = self . setup_runner ( ) while True : if connect_attempt == max_retries : logging . info ( 'max retries reached; stopping service' ) sys . exit ( 1 ) self . check_event_loop ( ) try : logging . info ( 'waiting 5 seconds' ) time . sleep ( 5 ) if self . check_transport_host ( ) : logging . info ( 'waiting 10 seconds to ensure that crossbar has initialized before reconnecting' ) time . sleep ( 10 ) runner . run ( Component ) else : logging . error ( 'crossbar host port 8080 not available...' ) except RuntimeError as error : logging . error ( error ) except ConnectionRefusedError as error : logging . error ( error ) except ConnectionError as error : logging . error ( error ) except KeyboardInterrupt : logging . info ( 'User initiated shutdown' ) loop = asyncio . get_event_loop ( ) loop . stop ( ) sys . exit ( 1 ) connect_attempt += 1
Handle reconnect logic if connection to crossbar is lost
262
10
12,686
def reduceUselessAssignments ( root : LNode ) : for n in root . children : if n . children : reduceUselessAssignments ( n ) do_update = False for n in root . children : if isinstance ( n . originObj , Assignment ) and not n . originObj . indexes and len ( n . west ) == 1 : src = n . originObj . src if isinstance ( src , RtlSignalBase ) and src . hidden : continue if not do_update : nodes = set ( root . children ) do_update = True nodes . remove ( n ) srcPorts = [ ] dstPorts = [ ] edgesToRemove = [ ] inP = getSinglePort ( n . west ) outP = getSinglePort ( n . east ) for e in inP . incomingEdges : sPort = e . src srcPorts . append ( ( sPort , e . originObj ) ) edgesToRemove . append ( e ) for e in outP . outgoingEdges : dPort = e . dst dstPorts . append ( dPort ) edgesToRemove . append ( e ) for e in edgesToRemove : e . remove ( ) for srcPort , originObj in srcPorts : for dstPort in dstPorts : root . addEdge ( srcPort , dstPort , originObj = originObj ) if do_update : root . children = list ( nodes )
Remove assignments if it is only a direct connection and can be replaced with direct link
301
16
12,687
def _constructTypeQualifiedName ( self , type , full = False ) : t = type [ "type" ] if t == TYPE_IDENT : return type [ "def" ] elif t == TYPE_POINTER : return self . _constructTypeQualifiedName ( type [ "def" ] ) elif t == TYPE_SELECTOR : if full : return "%s.%s" % ( type [ "prefix" ] , type [ "item" ] ) else : return type [ "item" ] else : raise ValueError ( "Type %s can not be used for FQN" % t )
For given type construct its full qualified name .
131
9
12,688
def crop_to_bounding_box ( image , offset_height , offset_width , target_height , target_width , dynamic_shape = False ) : image = ops . convert_to_tensor ( image , name = 'image' ) _Check3DImage ( image , require_static = ( not dynamic_shape ) ) height , width , _ = _ImageDimensions ( image , dynamic_shape = dynamic_shape ) if not dynamic_shape : if offset_width < 0 : raise ValueError ( 'offset_width must be >= 0.' ) if offset_height < 0 : raise ValueError ( 'offset_height must be >= 0.' ) if width < ( target_width + offset_width ) : raise ValueError ( 'width must be >= target + offset.' ) if height < ( target_height + offset_height ) : raise ValueError ( 'height must be >= target + offset.' ) cropped = array_ops . slice ( image , array_ops . pack ( [ offset_height , offset_width , 0 ] ) , array_ops . pack ( [ target_height , target_width , - 1 ] ) ) return cropped
Crops an image to a specified bounding box .
247
11
12,689
def pad_to_bounding_box ( image , offset_height , offset_width , target_height , target_width , dynamic_shape = False ) : image = ops . convert_to_tensor ( image , name = 'image' ) _Check3DImage ( image , require_static = ( not dynamic_shape ) ) height , width , depth = _ImageDimensions ( image , dynamic_shape = dynamic_shape ) after_padding_width = target_width - offset_width - width after_padding_height = target_height - offset_height - height if not dynamic_shape : if target_width < width : raise ValueError ( 'target_width must be >= width' ) if target_height < height : raise ValueError ( 'target_height must be >= height' ) if after_padding_width < 0 : raise ValueError ( 'target_width not possible given ' 'offset_width and image width' ) if after_padding_height < 0 : raise ValueError ( 'target_height not possible given ' 'offset_height and image height' ) # Do not pad on the depth dimensions. if ( dynamic_shape or offset_width or offset_height or after_padding_width or after_padding_height ) : paddings = array_ops . reshape ( array_ops . pack ( [ offset_height , after_padding_height , offset_width , after_padding_width , 0 , 0 ] ) , [ 3 , 2 ] ) padded = array_ops . pad ( image , paddings ) if not dynamic_shape : padded . set_shape ( [ target_height , target_width , depth ] ) else : padded = image return padded
Pad image with zeros to the specified height and width .
361
12
12,690
def determine_completeness ( py_untl ) : # Default values for the completeness dictionary. completeness_dict = { 'title' : { 'present' : False , 'weight' : 10 , } , 'description' : { 'present' : False , 'weight' : 1 , } , 'language' : { 'present' : False , 'weight' : 1 , } , 'collection' : { 'present' : False , 'weight' : 10 , } , 'institution' : { 'present' : False , 'weight' : 10 , } , 'resourceType' : { 'present' : False , 'weight' : 5 , } , 'format' : { 'present' : False , 'weight' : 1 , } , 'subject' : { 'present' : False , 'weight' : 1 , } , 'meta' : { 'present' : False , 'weight' : 20 , } , } total_points = sum ( item [ 'weight' ] for item in completeness_dict . values ( ) ) py_untl_object_score = 0.0 # Iterate through the attributes of the pyuntl record. # This loop will toggle the Boolean for scoring. for i in py_untl . children : # Process attribute that is scorable and has content. if i . tag in PYUNTL_COMPLETENESS_SCORED_ATTRIBUTES : if i . content : content = i . content . lower ( ) # Try and match against new default placeholders. match = bool ( DEFAULT_VALUE_REGEX . search ( content ) ) # The content is not a legacy placeholder. if content not in COMMON_DEFAULT_ATTRIBUTE_VALUES and not match : # Only consider <meta qualifier="system"> records. if i . tag == 'meta' : if i . qualifier == 'system' : completeness_dict [ '%s' % i . tag ] [ 'present' ] = True else : completeness_dict [ '%s' % i . tag ] [ 'present' ] = True # Get total score of the pyuntl object. for k , v in completeness_dict . iteritems ( ) : # If presence was toggled true, adjust score based on weight. if v [ 'present' ] : py_untl_object_score += completeness_dict [ k ] [ 'weight' ] # Calculate the float score completeness. completeness = py_untl_object_score / total_points return completeness
Take a Python untl and calculate the completeness .
547
11
12,691
def init_app ( self , app , config_prefix = None ) : # Restore self.kill_session(). self . kill_session = self . original_kill_session # Normalize the prefix and add this instance to app.extensions. config_prefix = ( config_prefix or 'JIRA' ) . rstrip ( '_' ) . upper ( ) if not hasattr ( app , 'extensions' ) : app . extensions = dict ( ) if config_prefix . lower ( ) in app . extensions : raise ValueError ( 'Already registered config prefix {0!r}.' . format ( config_prefix ) ) app . extensions [ config_prefix . lower ( ) ] = _JIRAState ( self , app ) # Read config. args = read_config ( app . config , config_prefix ) # Initialize fully. try : super ( JIRA , self ) . __init__ ( * * args ) except ConnectionError : if not app . config . get ( '{0}_IGNORE_INITIAL_CONNECTION_FAILURE' . format ( config_prefix ) ) : raise LOG . exception ( 'Ignoring ConnectionError.' )
Actual method to read JIRA settings from app configuration and initialize the JIRA instance .
253
20
12,692
def zip_dict ( a : Dict [ str , A ] , b : Dict [ str , B ] ) -> Dict [ str , Tuple [ Optional [ A ] , Optional [ B ] ] ] : return { key : ( a . get ( key ) , b . get ( key ) ) for key in a . keys ( ) | b . keys ( ) }
Combine the values within two dictionaries by key .
80
11
12,693
def flattenPort ( port : LPort ) : yield port if port . children : for ch in port . children : yield from flattenPort ( ch ) port . children . clear ( )
Flatten hierarchical ports
40
4
12,694
def _flattenPortsSide ( side : List [ LNode ] ) -> List [ LNode ] : new_side = [ ] for i in side : for new_p in flattenPort ( i ) : new_side . append ( new_p ) return new_side
Flatten hierarchical ports on node side
60
7
12,695
def flattenPorts ( root : LNode ) : for u in root . children : u . west = _flattenPortsSide ( u . west ) u . east = _flattenPortsSide ( u . east ) u . north = _flattenPortsSide ( u . north ) u . south = _flattenPortsSide ( u . south )
Flatten ports to simplify layout generation
79
7
12,696
def set_missing_defaults ( self ) : if 'pub_options' not in self . config : self . config [ 'pub_options' ] = { 'acknowledge' : True , 'retain' : True } if 'sub_options' not in self . config : self . config [ 'sub_options' ] = { 'get_retained' : False } if 'subscribed_topics' not in self . config : self . config [ 'subscribed_topics' ] = None if 'replay_events' not in self . config : self . config [ 'replay_events' ] = False if 'max_reconnect_retries' not in self . config : self . config [ 'max_reconnect_retries' ] = 10
Ensure that minimal configuration is setup and set defaults for missing values
169
13
12,697
def config_sanity_check ( self ) : if 'name' not in self . config : raise EventifyConfigError ( """Required configuration parameter missing! Please configure "name" as a string in your configuration.""" ) if 'publish_topic' not in self . config : raise EventifyConfigError ( """Required configuration parameter missing! Please configure "public_topic" as an object in your configuration.""" ) if 'topic' not in self . config [ 'publish_topic' ] : raise EventifyConfigError ( """Required configuration parameter missing! Please configure "topic" as a key in your "public_topic object.""" )
Base configuration sanity checks
136
4
12,698
def load_config ( self ) : logger . debug ( 'loading config file: %s' , self . config_file ) if os . path . exists ( self . config_file ) : with open ( self . config_file ) as file_handle : return json . load ( file_handle ) else : logger . error ( 'configuration file is required for eventify' ) logger . error ( 'unable to load configuration for service' ) raise EventifyConfigError ( 'Configuration is required! Missing: %s' % self . config_file )
Load configuration for the service
118
5
12,699
def check_event_loop ( ) : loop = asyncio . get_event_loop ( ) if loop . is_closed ( ) : asyncio . set_event_loop ( asyncio . new_event_loop ( ) )
Check if event loop is closed and create a new event loop
50
12