signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def MakeDeployableBinary ( self , template_path , output_path ) : """Repackage the template zip with the installer ."""
context = self . context + [ "Client Context" ] zip_data = io . BytesIO ( ) output_zip = zipfile . ZipFile ( zip_data , mode = "w" , compression = zipfile . ZIP_DEFLATED ) z_template = zipfile . ZipFile ( open ( template_path , "rb" ) ) # Track which files we ' ve copied already . completed_files = [ "grr-client.exe" , "GRRservice.exe" , "dbg_grr-client.exe" , "dbg_GRRservice.exe" ] # Change the name of the main binary to the configured name . client_bin_name = config . CONFIG . Get ( "Client.binary_name" , context = context ) console_build = config . CONFIG . Get ( "ClientBuilder.console" , context = context ) if console_build : client_filename = "dbg_grr-client.exe" service_filename = "dbg_GRRservice.exe" else : client_filename = "grr-client.exe" service_filename = "GRRservice.exe" bin_name = z_template . getinfo ( client_filename ) output_zip . writestr ( client_bin_name , z_template . read ( bin_name ) ) CopyFileInZip ( z_template , "grr-client.exe.manifest" , output_zip , "%s.manifest" % client_bin_name ) completed_files . append ( "grr-client.exe.manifest" ) # Change the name of the service binary to the configured name . service_template = z_template . getinfo ( service_filename ) service_bin_name = config . CONFIG . Get ( "Nanny.service_binary_name" , context = context ) output_zip . writestr ( service_bin_name , z_template . read ( service_template ) ) if config . CONFIG [ "Client.fleetspeak_enabled" ] : self . _GenerateFleetspeakServiceConfig ( output_zip ) if self . signed_template : # If the template libs were already signed we can skip signing CreateNewZipWithSignedLibs ( z_template , output_zip , ignore_files = completed_files ) else : CreateNewZipWithSignedLibs ( z_template , output_zip , ignore_files = completed_files , signer = self . signer ) output_zip . close ( ) return self . MakeSelfExtractingZip ( zip_data . getvalue ( ) , output_path )
def get ( self , ** kwargs ) : '''Returns this relation'''
redis = type ( self . obj ) . get_redis ( ) related = list ( map ( lambda id : self . model ( ) . get ( debyte_string ( id ) ) , self . get_related_ids ( redis , ** kwargs ) ) ) return related
def load_schema ( schema_ref , # type : Union [ CommentedMap , CommentedSeq , Text ] cache = None # type : Dict ) : # type : ( . . . ) - > Tuple [ Loader , Union [ Names , SchemaParseException ] , Dict [ Text , Any ] , Loader ] """Load a schema that can be used to validate documents using load _ and _ validate . return : document _ loader , avsc _ names , schema _ metadata , metaschema _ loader"""
metaschema_names , _metaschema_doc , metaschema_loader = get_metaschema ( ) if cache is not None : metaschema_loader . cache . update ( cache ) schema_doc , schema_metadata = metaschema_loader . resolve_ref ( schema_ref , "" ) if not isinstance ( schema_doc , MutableSequence ) : raise ValueError ( "Schema reference must resolve to a list." ) validate_doc ( metaschema_names , schema_doc , metaschema_loader , True ) metactx = schema_metadata . get ( "@context" , { } ) metactx . update ( collect_namespaces ( schema_metadata ) ) schema_ctx = jsonld_context . salad_to_jsonld_context ( schema_doc , metactx ) [ 0 ] # Create the loader that will be used to load the target document . document_loader = Loader ( schema_ctx , cache = cache ) # Make the Avro validation that will be used to validate the target # document avsc_names = make_avro_schema ( schema_doc , document_loader ) return document_loader , avsc_names , schema_metadata , metaschema_loader
def install_python_module_locally ( name ) : """instals a python module using pip"""
with settings ( hide ( 'warnings' , 'running' , 'stdout' , 'stderr' ) , warn_only = False , capture = True ) : local ( 'pip --quiet install %s' % name )
def to_argv_schema ( data , arg_names = None , arg_abbrevs = None , filters = None , defaults = None ) : '''to _ argv _ schema ( instructions ) yields a valid tuple of CommandLineParser instructions for the given instructions tuple ; by itself , this will only return the instructions as they are , but optional arguments ( below ) will override the values in the instructions if provided . to _ argv _ schema ( plan ) yields a valid tuple of CommandLineParser instructions for the given plan object . These schema returned by this function will parse a command - line list ( sys . argv ) for parameters either listed in the instructions ( see help ( CommandLineParser ) ) or the afferent parameters of the plan . Generally , this should be called by the argv _ parse ( ) function and not directly . If a plan is given as the first argument , then the following rules are used to determine how arguments are parsed : * An argument that begins with - - ( e . g . , - - quux - factor = 10 ) is checked for a matching plan parameter ; the argument name " quux - factor " will match either a parameter called " quux - factor " or " quux _ factor " ( dashes in command - line arguments are auto - translated into underscores ) . * If " quux _ factor " is a parameter to the plan and is the only parameter that starts with a ' q ' , then - q10 or - q 10 are both equivalent to - - quux - factor = 10 . If other parameters also start with a ' q ' then neither " quux _ factor " or the other parameter ( s ) will be matched with the - q flag unless it is specified explicitly via the arg _ abbrevs option . * Argument values are parsed using Python ' s ast . literal _ eval ( ) ; if this raises an exception then the value is left as a string . * If an argument or flag is provided without an argument ( e . g . " - - quuxztize " or " - q " ) then it is interpreted as a boolean flag and is given the value True . * Arguments that come after the flag " - - " are never processed . The following options may be given : * arg _ names ( default : None ) may be a dictionary that specifies explicity command - line argument names for the plan parameters ; plan parameters should be keys and the argument names should be values . Any parameter not listed in this option will be interpreted according to the above rules . If a parameter is mapped to None then it will not be filled from the command - line arguments . * arg _ abbrevs ( default : None ) may be a dictionary that is handled identically to that of arg _ names except that its values must be single letters , which are used for the abbreviated flag names . * defaults ( default : None ) may specify the default values for the plan parameters ; this dictionary overrides the default values of the plan itself .'''
if is_plan ( data ) : # First we must convert it to a valid instruction list ( plan , data ) = ( data , { } ) # we go through the afferent parameters . . . for aff in plan . afferents : # these are provided by the parsing mechanism and shouldn ' t be processed if aff in [ 'argv' , 'argv_parsed' , 'stdout' , 'stderr' , 'stdin' ] : continue # we ignore defaults for now data [ aff ] = ( None , aff . replace ( '_' , '-' ) , aff ) # and let ' s try to guess at abbreviation names entries = sorted ( data . keys ( ) ) n = len ( entries ) for ( ii , entry ) in enumerate ( entries ) : if ii > 0 and entry [ 0 ] == entries [ ii - 1 ] [ 0 ] : continue if ii < n - 1 and entry [ 0 ] == entries [ ii + 1 ] [ 0 ] : continue r = data [ entry ] data [ entry ] = ( entry [ 0 ] , r [ 1 ] , entry , r [ 3 ] ) if len ( r ) == 4 else ( entry [ 0 ] , r [ 1 ] , entry ) # now go through and fix defaults . . . for ( entry , dflt ) in six . iteritems ( plan . defaults ) : if entry not in data : continue r = data [ entry ] data [ entry ] = ( r [ 0 ] , r [ 1 ] , r [ 2 ] , dflt ) elif arg_names is None and arg_abbrevs is None and defaults is None : # return the same object if there are no changes to a schema return data else : data = { r [ 2 ] : r for r in data } # Now we go through and make updates based on the optional arguments if arg_names is None : arg_names = { } for ( entry , arg_name ) in six . iteritems ( arg_names ) : if entry not in data : continue r = data [ entry ] data [ entry ] = ( r [ 0 ] , arg_name , entry ) if len ( r ) == 3 else ( r [ 0 ] , arg_name , entry , r [ 3 ] ) if arg_abbrevs is None : arg_abbrevs = { } for ( entry , arg_abbrev ) in six . iteritems ( arg_abbrevs ) : if entry not in data : continue r = data [ entry ] data [ entry ] = ( arg_abbrev , r [ 1 ] , entry ) if len ( r ) == 3 else ( arg_abbrev , r [ 1 ] , entry , r [ 3 ] ) if defaults is None : defaults = { } for ( entry , dflt ) in six . iteritems ( defaults ) : if entry not in data : continue r = data [ entry ] data [ entry ] = ( r [ 0 ] , r [ 1 ] , entry , dflt ) # return the list - ified version of this return [ tuple ( row ) for row in six . itervalues ( data ) ]
def _get_files ( file_patterns , top = HERE ) : """Expand file patterns to a list of paths . Parameters file _ patterns : list or str A list of glob patterns for the data file locations . The globs can be recursive if they include a ` * * ` . They should be relative paths from the top directory or absolute paths . top : str the directory to consider for data files Note : Files in ` node _ modules ` are ignored ."""
if not isinstance ( file_patterns , ( list , tuple ) ) : file_patterns = [ file_patterns ] for i , p in enumerate ( file_patterns ) : if os . path . isabs ( p ) : file_patterns [ i ] = os . path . relpath ( p , top ) matchers = [ _compile_pattern ( p ) for p in file_patterns ] files = set ( ) for root , dirnames , filenames in os . walk ( top ) : # Don ' t recurse into node _ modules if 'node_modules' in dirnames : dirnames . remove ( 'node_modules' ) for m in matchers : for filename in filenames : fn = os . path . relpath ( _glob_pjoin ( root , filename ) , top ) fn = fn . replace ( os . sep , '/' ) if m ( fn ) : files . add ( fn . replace ( os . sep , '/' ) ) return list ( files )
def get_selected_submissions ( self , course , filter_type , selected_tasks , users , aggregations , stype ) : """Returns the submissions that have been selected by the admin : param course : course : param filter _ type : users or aggregations : param selected _ tasks : selected tasks id : param users : selected usernames : param aggregations : selected aggregations : param stype : single or all submissions : return :"""
if filter_type == "users" : self . _validate_list ( users ) aggregations = list ( self . database . aggregations . find ( { "courseid" : course . get_id ( ) , "students" : { "$in" : users } } ) ) # Tweak if not using classrooms : classroom [ ' students ' ] may content ungrouped users aggregations = dict ( [ ( username , aggregation if course . use_classrooms ( ) or ( len ( aggregation [ 'groups' ] ) and username in aggregation [ 'groups' ] [ 0 ] [ "students" ] ) else None ) for aggregation in aggregations for username in users ] ) else : self . _validate_list ( aggregations ) aggregations = list ( self . database . aggregations . find ( { "_id" : { "$in" : [ ObjectId ( cid ) for cid in aggregations ] } } ) ) # Tweak if not using classrooms : classroom [ ' students ' ] may content ungrouped users aggregations = dict ( [ ( username , aggregation if course . use_classrooms ( ) or ( len ( aggregation [ 'groups' ] ) and username in aggregation [ 'groups' ] [ 0 ] [ "students" ] ) else None ) for aggregation in aggregations for username in aggregation [ "students" ] ] ) if stype == "single" : user_tasks = list ( self . database . user_tasks . find ( { "username" : { "$in" : list ( aggregations . keys ( ) ) } , "taskid" : { "$in" : selected_tasks } , "courseid" : course . get_id ( ) } ) ) submissionsid = [ user_task [ 'submissionid' ] for user_task in user_tasks if user_task [ 'submissionid' ] is not None ] submissions = list ( self . database . submissions . find ( { "_id" : { "$in" : submissionsid } } ) ) else : submissions = list ( self . database . submissions . find ( { "username" : { "$in" : list ( aggregations . keys ( ) ) } , "taskid" : { "$in" : selected_tasks } , "courseid" : course . get_id ( ) , "status" : { "$in" : [ "done" , "error" ] } } ) ) return submissions , aggregations
def _column_names ( self ) : """Return the column names"""
index_names = set ( _normalize_index_names ( self . _index_names ) ) column_names = [ col_name for col_name in self . _schema_rdd . columns if col_name not in index_names ] return column_names
def calc_qigz1_v1 ( self ) : """Aggregate the amount of the first interflow component released by all HRUs . Required control parameters : | NHRU | | FHRU | Required flux sequence : | QIB1 | Calculated state sequence : | QIGZ1 | Basic equation : : math : ` QIGZ1 = \\ Sigma ( FHRU \\ cdot QIB1 ) ` Example : > > > from hydpy . models . lland import * > > > parameterstep ( ) > > > nhru ( 2) > > > fhru ( 0.75 , 0.25) > > > fluxes . qib1 = 1.0 , 5.0 > > > model . calc _ qigz1 _ v1 ( ) > > > states . qigz1 qigz1(2.0)"""
con = self . parameters . control . fastaccess flu = self . sequences . fluxes . fastaccess sta = self . sequences . states . fastaccess sta . qigz1 = 0. for k in range ( con . nhru ) : sta . qigz1 += con . fhru [ k ] * flu . qib1 [ k ]
def clockIsBroken ( ) : """Returns whether twisted . internet . task . Clock has the bug that returns the wrong DelayedCall or not ."""
clock = Clock ( ) dc1 = clock . callLater ( 10 , lambda : None ) dc2 = clock . callLater ( 1 , lambda : None ) if dc1 is dc2 : return True else : return False
def run ( bam_file , data , out_dir ) : """Run viral QC analysis : 1 . Extract the unmapped reads 2 . BWA - MEM to the viral sequences from GDC database https : / / gdc . cancer . gov / about - data / data - harmonization - and - generation / gdc - reference - files 3 . Report viruses that are in more than 50 % covered by at least 5x"""
source_link = 'https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files' viral_target = "gdc-viral" out = { } if vcfutils . get_paired_phenotype ( data ) : viral_refs = [ x for x in dd . get_viral_files ( data ) if os . path . basename ( x ) == "%s.fa" % viral_target ] if viral_refs and utils . file_exists ( viral_refs [ 0 ] ) : viral_ref = viral_refs [ 0 ] viral_bam = os . path . join ( utils . safe_makedir ( out_dir ) , "%s-%s.bam" % ( dd . get_sample_name ( data ) , utils . splitext_plus ( os . path . basename ( viral_ref ) ) [ 0 ] ) ) out_file = "%s-completeness.txt" % utils . splitext_plus ( viral_bam ) [ 0 ] cores = dd . get_num_cores ( data ) if not utils . file_uptodate ( out_file , bam_file ) : if not utils . file_uptodate ( viral_bam , bam_file ) : with file_transaction ( data , viral_bam ) as tx_out_file : tmpfile = "%s-tmp" % utils . splitext_plus ( tx_out_file ) [ 0 ] cmd = ( "samtools view -u -f 4 {bam_file} | " "bamtofastq collate=0 | " "bwa mem -t {cores} {viral_ref} - | " "bamsort tmpfile={tmpfile} inputthreads={cores} outputthreads={cores} " "inputformat=sam index=1 indexfilename={tx_out_file}.bai O={tx_out_file}" ) do . run ( cmd . format ( ** locals ( ) ) , "Align unmapped reads to viral genome" ) with file_transaction ( data , out_file ) as tx_out_file : sample_name = dd . get_sample_name ( data ) mosdepth_prefix = os . path . splitext ( viral_bam ) [ 0 ] cmd = ( "mosdepth -t {cores} {mosdepth_prefix} {viral_bam} -n --thresholds 1,5,25 --by " "<(awk 'BEGIN {{FS=\"\\t\"}}; {{print $1 FS \"0\" FS $2}}' {viral_ref}.fai) && " "echo '## Viral sequences (from {source_link}) found in unmapped reads' > {tx_out_file} &&" "echo '## Sample: {sample_name}' >> {tx_out_file} && " "echo '#virus\tsize\tdepth\t1x\t5x\t25x' >> {tx_out_file} && " "paste <(zcat {mosdepth_prefix}.regions.bed.gz) <(zgrep -v ^# {mosdepth_prefix}.thresholds.bed.gz) | " "awk 'BEGIN {{FS=\"\\t\"}} {{ print $1 FS $3 FS $4 FS $10/$3 FS $11/$3 FS $12/$3}}' | " "sort -n -r -k 5,5 >> {tx_out_file}" ) do . run ( cmd . format ( ** locals ( ) ) , "Analyse coverage of viral genomes" ) out [ "base" ] = out_file out [ "secondary" ] = [ ] return out
def _adjust ( hsl , attribute , percent ) : """Internal adjust function"""
hsl = list ( hsl ) if attribute > 0 : hsl [ attribute ] = _clamp ( hsl [ attribute ] + percent ) else : hsl [ attribute ] += percent return hsl
def _set_group_type ( self , v , load = False ) : """Setter method for group _ type , mapped from YANG variable / openflow _ state / group / group _ info _ list / group _ type ( group - type ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ group _ type is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ group _ type ( ) directly . YANG Description : Group type"""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'dcm-group-type-select' : { 'value' : 2 } , u'dcm-group-type-invalid' : { 'value' : 0 } , u'dcm-group-type-fast-failover' : { 'value' : 4 } , u'dcm-group-type-indirect' : { 'value' : 3 } , u'dcm-group-type-all' : { 'value' : 1 } } , ) , is_leaf = True , yang_name = "group-type" , rest_name = "group-type" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , namespace = 'urn:brocade.com:mgmt:brocade-openflow-operational' , defining_module = 'brocade-openflow-operational' , yang_type = 'group-type' , is_config = False ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """group_type must be of a type compatible with group-type""" , 'defined-type' : "brocade-openflow-operational:group-type" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-group-type-select': {'value': 2}, u'dcm-group-type-invalid': {'value': 0}, u'dcm-group-type-fast-failover': {'value': 4}, u'dcm-group-type-indirect': {'value': 3}, u'dcm-group-type-all': {'value': 1}},), is_leaf=True, yang_name="group-type", rest_name="group-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='group-type', is_config=False)""" , } ) self . __group_type = t if hasattr ( self , '_set' ) : self . _set ( )
def notify ( self , method_name : str , * args : Any , trim_log_values : Optional [ bool ] = None , validate_against_schema : Optional [ bool ] = None , ** kwargs : Any ) -> Response : """Send a JSON - RPC request , without expecting a response . Args : method _ name : The remote procedure ' s method name . args : Positional arguments passed to the remote procedure . kwargs : Keyword arguments passed to the remote procedure . trim _ log _ values : Abbreviate the log entries of requests and responses . validate _ against _ schema : Validate response against the JSON - RPC schema ."""
return self . send ( Notification ( method_name , * args , ** kwargs ) , trim_log_values = trim_log_values , validate_against_schema = validate_against_schema , )
def send_state_event ( self , room_id , event_type , content , state_key = "" , timestamp = None ) : """Perform PUT / rooms / $ room _ id / state / $ event _ type Args : room _ id ( str ) : The room ID to send the state event in . event _ type ( str ) : The state event type to send . content ( dict ) : The JSON content to send . state _ key ( str ) : Optional . The state key for the event . timestamp ( int ) : Set origin _ server _ ts ( For application services only )"""
path = "/rooms/%s/state/%s" % ( quote ( room_id ) , quote ( event_type ) , ) if state_key : path += "/%s" % ( quote ( state_key ) ) params = { } if timestamp : params [ "ts" ] = timestamp return self . _send ( "PUT" , path , content , query_params = params )
def recur ( self , klass , data , interval , offset = 0 , priority = None , tags = None , retries = None , jid = None ) : '''Place a recurring job in this queue'''
return self . client ( 'recur' , self . name , jid or uuid . uuid4 ( ) . hex , self . class_string ( klass ) , json . dumps ( data ) , 'interval' , interval , offset , 'priority' , priority or 0 , 'tags' , json . dumps ( tags or [ ] ) , 'retries' , retries or 5 )
def add_classdiff_optgroup ( parser ) : """option group specific to class checking"""
g = parser . add_argument_group ( "Class Checking Options" ) g . add_argument ( "--ignore-version-up" , action = "store_true" , default = False ) g . add_argument ( "--ignore-version-down" , action = "store_true" , default = False ) g . add_argument ( "--ignore-platform-up" , action = "store_true" , default = False ) g . add_argument ( "--ignore-platform-down" , action = "store_true" , default = False ) g . add_argument ( "--ignore-absolute-lines" , action = "store_true" , default = False ) g . add_argument ( "--ignore-relative-lines" , action = "store_true" , default = False ) g . add_argument ( "--ignore-deprecated" , action = "store_true" , default = False ) g . add_argument ( "--ignore-added" , action = "store_true" , default = False ) g . add_argument ( "--ignore-pool" , action = "store_true" , default = False ) g . add_argument ( "--ignore-lines" , nargs = 0 , help = "ignore relative and absolute line-number changes" , action = _opt_cb_ign_lines ) g . add_argument ( "--ignore-platform" , nargs = 0 , help = "ignore platform changes" , action = _opt_cb_ign_platform ) g . add_argument ( "--ignore-version" , nargs = 0 , help = "ignore version changes" , action = _opt_cb_ign_version )
def mode_collection ( ) : """Manage an existing collection node ."""
print globals ( ) [ 'mode_collection' ] . __doc__ collection_node_id = existing_node_input ( ) value = render_value_for_node ( collection_node_id ) if not value : return None print "Collection length: {0}" . format ( len ( value ) ) print safe_dump ( value , default_flow_style = False ) item_attr_list = [ ] if len ( value ) : for key in value . items ( ) [ 0 ] [ 1 ] . keys ( ) : m = re . match ( r'(.*) \((\d+)\)' , key ) item_attr_list . append ( m . group ( 1 ) ) selection = True while selection : selection = select ( [ 'View collection' , 'Add item' , 'Add attribute' , 'Remove item' , 'Remove attribute' , 'Purge collection' ] ) if selection == 'View collection' : print safe_dump ( value , default_flow_style = False ) elif selection == 'Purge collection' : confirm = raw_input ( "Delete all {0} items and their {1} attributes from the collection? y/n\n" . format ( len ( value . keys ( ) ) , len ( item_attr_list ) ) ) if confirm == 'y' : delete_node ( node_id = collection_node_id ) purge_collection ( value . keys ( ) ) elif selection == 'Remove item' : item_node_id = existing_node_input ( ) if item_node_id < 0 : return value = render_value_for_node ( item_node_id ) print safe_dump ( value , default_flow_style = False ) confirm = raw_input ( "Delete this node and it's attributes? y/n\n" ) . format ( len ( value . keys ( ) ) , len ( item_attr_list ) ) if confirm == 'y' : delete_node ( node_id = item_node_id ) purge_collection ( value . keys ( ) ) elif selection == 'Add item' : result = select_node ( node_id = collection_node_id ) collection_name = result [ 0 ] . get ( 'name' ) add_item_with_attributes_to_collection ( collection_name = collection_name , collection_node_id = collection_node_id , item_attr_list = item_attr_list ) elif selection == 'Remove attribute' : print "Select the attribute that will be removed:" attribute_selection = select ( item_attr_list ) if attribute_selection : confirm = raw_input ( "Delete attribute '{0}' from all {1} items in the collection? y/n\n" . format ( attribute_selection , len ( value . keys ( ) ) ) ) if confirm == 'y' : for item_key , item in value . items ( ) : for key in item . keys ( ) : m = re . match ( r'(.*) \((\d+)\)' , key ) if m . group ( 1 ) == attribute_selection : delete_node ( node_id = m . group ( 2 ) ) break elif selection == 'Add attribute' : item_attr = raw_input ( "Add a collection item attribute name: " ) if item_attr : item_index = 0 for item_key , item in value . items ( ) : item_index += 1 m = re . match ( r'(.*) \((\d+)\)' , item_key ) item_value = render_value_for_node ( m . group ( 2 ) ) print "item {0} of {1} items" . format ( item_index , len ( value ) ) print safe_dump ( item_value , default_flow_style = False ) new_attr_value = raw_input ( "Enter item attribute value for '{0}': " . format ( item_attr ) ) # set value to none if it ' s an empty string new_attr_value = new_attr_value if len ( new_attr_value ) else None item_attr_node_id = insert_node ( name = item_attr , value = new_attr_value ) insert_node_node ( node_id = m . group ( 2 ) , target_node_id = item_attr_node_id ) # Update the value after each operation value = render_value_for_node ( collection_node_id )
def mine_sub_trees ( self , threshold ) : """Generate subtrees and mine them for patterns ."""
patterns = { } mining_order = sorted ( self . frequent . keys ( ) , key = lambda x : self . frequent [ x ] ) # Get items in tree in reverse order of occurrences . for item in mining_order : suffixes = [ ] conditional_tree_input = [ ] node = self . headers [ item ] # Follow node links to get a list of # all occurrences of a certain item . while node is not None : suffixes . append ( node ) node = node . link # For each occurrence of the item , # trace the path back to the root node . for suffix in suffixes : frequency = suffix . count path = [ ] parent = suffix . parent while parent . parent is not None : path . append ( parent . value ) parent = parent . parent for i in range ( frequency ) : conditional_tree_input . append ( path ) # Now we have the input for a subtree , # so construct it and grab the patterns . subtree = FPTree ( conditional_tree_input , threshold , item , self . frequent [ item ] ) subtree_patterns = subtree . mine_patterns ( threshold ) # Insert subtree patterns into main patterns dictionary . for pattern in subtree_patterns . keys ( ) : if pattern in patterns : patterns [ pattern ] += subtree_patterns [ pattern ] else : patterns [ pattern ] = subtree_patterns [ pattern ] return patterns
def from_service_account_file ( cls , filename , * args , ** kwargs ) : """Creates an instance of this client using the provided credentials file . Args : filename ( str ) : The path to the service account private key json file . args : Additional arguments to pass to the constructor . kwargs : Additional arguments to pass to the constructor . Returns : dialogflow _ v2 . SessionEntityTypesClient : The constructed client ."""
credentials = service_account . Credentials . from_service_account_file ( filename ) kwargs [ 'credentials' ] = credentials return cls ( * args , ** kwargs )
def timeFormat ( time_from , time_to = None , prefix = "" , infix = None ) : """Format the times time _ from and optionally time _ to , e . g . 10am"""
retval = "" if time_from != "" and time_from is not None : retval += prefix retval += dateformat . time_format ( time_from , "fA" ) . lower ( ) if time_to != "" and time_to is not None : to = format ( dateformat . time_format ( time_to , "fA" ) . lower ( ) ) if infix is not None : retval = "{} {} {}" . format ( retval , infix , to ) else : retval = _ ( "{fromTime} to {toTime}" ) . format ( fromTime = retval , toTime = to ) return retval . strip ( )
def get_state_machine_m ( self , two_factor_check = True ) : """Get respective state machine model Get a reference of the state machine model the state model belongs to . As long as the root state model has no direct reference to its state machine model the state machine manager model is checked respective model . : rtype : rafcon . gui . models . state _ machine . StateMachineModel : return : respective state machine model"""
from rafcon . gui . singleton import state_machine_manager_model state_machine = self . state . get_state_machine ( ) if state_machine : if state_machine . state_machine_id in state_machine_manager_model . state_machines : sm_m = state_machine_manager_model . state_machines [ state_machine . state_machine_id ] if not two_factor_check or sm_m . get_state_model_by_path ( self . state . get_path ( ) ) is self : return sm_m else : logger . debug ( "State model requesting its state machine model parent seems to be obsolete. " "This is a hint to duplicated models and dirty coding" ) return None
def plot ( self , leaf_separation = 1 , cmap = 'viridis' , select_clusters = False , label_clusters = False , selection_palette = None , axis = None , colorbar = True , log_size = False , max_rectangles_per_icicle = 20 ) : """Use matplotlib to plot an ' icicle plot ' dendrogram of the condensed tree . Effectively this is a dendrogram where the width of each cluster bar is equal to the number of points ( or log of the number of points ) in the cluster at the given lambda value . Thus bars narrow as points progressively drop out of clusters . The make the effect more apparent the bars are also colored according the the number of points ( or log of the number of points ) . Parameters leaf _ separation : float , optional ( default 1) How far apart to space the final leaves of the dendrogram . cmap : string or matplotlib colormap , optional ( default viridis ) The matplotlib colormap to use to color the cluster bars . select _ clusters : boolean , optional ( default False ) Whether to draw ovals highlighting which cluster bar represent the clusters that were selected by HDBSCAN as the final clusters . label _ clusters : boolean , optional ( default False ) If select _ clusters is True then this determines whether to draw text labels on the clusters . selection _ palette : list of colors , optional ( default None ) If not None , and at least as long as the number of clusters , draw ovals in colors iterating through this palette . This can aid in cluster identification when plotting . axis : matplotlib axis or None , optional ( default None ) The matplotlib axis to render to . If None then a new axis will be generated . The rendered axis will be returned . colorbar : boolean , optional ( default True ) Whether to draw a matplotlib colorbar displaying the range of cluster sizes as per the colormap . log _ size : boolean , optional ( default False ) Use log scale for the ' size ' of clusters ( i . e . number of points in the cluster at a given lambda value ) . max _ rectangles _ per _ icicle : int , optional ( default 20) To simplify the plot this method will only emit ` ` max _ rectangles _ per _ icicle ` ` bars per branch of the dendrogram . This ensures that we don ' t suffer from massive overplotting in cases with a lot of data points . Returns axis : matplotlib axis The axis on which the ' icicle plot ' has been rendered ."""
try : import matplotlib . pyplot as plt except ImportError : raise ImportError ( 'You must install the matplotlib library to plot the condensed tree.' 'Use get_plot_data to calculate the relevant data without plotting.' ) plot_data = self . get_plot_data ( leaf_separation = leaf_separation , log_size = log_size , max_rectangle_per_icicle = max_rectangles_per_icicle ) if cmap != 'none' : sm = plt . cm . ScalarMappable ( cmap = cmap , norm = plt . Normalize ( 0 , max ( plot_data [ 'bar_widths' ] ) ) ) sm . set_array ( plot_data [ 'bar_widths' ] ) bar_colors = [ sm . to_rgba ( x ) for x in plot_data [ 'bar_widths' ] ] else : bar_colors = 'black' if axis is None : axis = plt . gca ( ) axis . bar ( plot_data [ 'bar_centers' ] , plot_data [ 'bar_tops' ] , bottom = plot_data [ 'bar_bottoms' ] , width = plot_data [ 'bar_widths' ] , color = bar_colors , align = 'center' , linewidth = 0 ) drawlines = [ ] for xs , ys in zip ( plot_data [ 'line_xs' ] , plot_data [ 'line_ys' ] ) : drawlines . append ( xs ) drawlines . append ( ys ) axis . plot ( * drawlines , color = 'black' , linewidth = 1 ) # for xs , ys in zip ( plot _ data [ ' line _ xs ' ] , plot _ data [ ' line _ ys ' ] ) : # axis . plot ( xs , ys , color = ' black ' , linewidth = 1) if select_clusters : try : from matplotlib . patches import Ellipse except ImportError : raise ImportError ( 'You must have matplotlib.patches available to plot selected clusters.' ) chosen_clusters = self . _select_clusters ( ) # Extract the chosen cluster bounds . If enough duplicate data points exist in the # data the lambda value might be infinite . This breaks labeling and highlighting # the chosen clusters . cluster_bounds = np . array ( [ plot_data [ 'cluster_bounds' ] [ c ] for c in chosen_clusters ] ) if not np . isfinite ( cluster_bounds ) . all ( ) : warn ( 'Infinite lambda values encountered in chosen clusters.' ' This might be due to duplicates in the data.' ) # Extract the plot range of the y - axis and set default center and height values for ellipses . # Extremly dense clusters might result in near infinite lambda values . Setting max _ height # based on the percentile should alleviate the impact on plotting . plot_range = np . hstack ( [ plot_data [ 'bar_tops' ] , plot_data [ 'bar_bottoms' ] ] ) plot_range = plot_range [ np . isfinite ( plot_range ) ] mean_y_center = np . mean ( [ np . max ( plot_range ) , np . min ( plot_range ) ] ) max_height = np . diff ( np . percentile ( plot_range , q = [ 10 , 90 ] ) ) for i , c in enumerate ( chosen_clusters ) : c_bounds = plot_data [ 'cluster_bounds' ] [ c ] width = ( c_bounds [ CB_RIGHT ] - c_bounds [ CB_LEFT ] ) height = ( c_bounds [ CB_TOP ] - c_bounds [ CB_BOTTOM ] ) center = ( np . mean ( [ c_bounds [ CB_LEFT ] , c_bounds [ CB_RIGHT ] ] ) , np . mean ( [ c_bounds [ CB_TOP ] , c_bounds [ CB_BOTTOM ] ] ) , ) # Set center and height to default values if necessary if not np . isfinite ( center [ 1 ] ) : center = ( center [ 0 ] , mean_y_center ) if not np . isfinite ( height ) : height = max_height # Ensure the ellipse is visible min_height = 0.1 * max_height if height < min_height : height = min_height if selection_palette is not None and len ( selection_palette ) >= len ( chosen_clusters ) : oval_color = selection_palette [ i ] else : oval_color = 'r' box = Ellipse ( center , 2.0 * width , 1.2 * height , facecolor = 'none' , edgecolor = oval_color , linewidth = 2 ) if label_clusters : axis . annotate ( str ( i ) , xy = center , xytext = ( center [ 0 ] - 4.0 * width , center [ 1 ] + 0.65 * height ) , horizontalalignment = 'left' , verticalalignment = 'bottom' ) axis . add_artist ( box ) if colorbar : cb = plt . colorbar ( sm ) if log_size : cb . ax . set_ylabel ( 'log(Number of points)' ) else : cb . ax . set_ylabel ( 'Number of points' ) axis . set_xticks ( [ ] ) for side in ( 'right' , 'top' , 'bottom' ) : axis . spines [ side ] . set_visible ( False ) axis . invert_yaxis ( ) axis . set_ylabel ( '$\lambda$ value' ) return axis
def fit ( self , X , u = None , sv = None , v = None ) : """Fit X into an embedded space . Parameters X : array , shape ( n _ samples , n _ features ) y : Ignored"""
if self . mode is 'parallel' : Xall = X . copy ( ) X = np . reshape ( Xall . copy ( ) , ( - 1 , Xall . shape [ - 1 ] ) ) # X - = X . mean ( axis = - 1 ) [ : , np . newaxis ] if ( u is None ) or ( sv is None ) or ( v is None ) : # compute svd and keep iPC ' s of data nmin = min ( [ X . shape [ 0 ] , X . shape [ 1 ] ] ) nmin = np . minimum ( nmin - 1 , self . nPC ) u , sv , v = svdecon ( np . float64 ( X ) , k = nmin ) # u , sv , v = np . float32 ( u ) , np . float32 ( sv ) , np . float32 ( v ) self . nPC = sv . size # first smooth in Y ( if n _ Y > 0) # this will be a 1 - D fit isort2 = [ ] if self . n_Y > 0 : vsort = np . argsort ( v [ : , 0 ] ) [ : , np . newaxis ] isort2 , iclustup = self . _map ( v * sv , 1 , self . n_Y , vsort ) # X = gaussian _ filter1d ( X [ : , isort2 ] , self . sig _ Y , axis = 1) # u , sv , v = svdecon ( np . float64 ( X ) , k = nmin ) self . u = u self . sv = sv self . v = v if self . mode is 'parallel' : NN = Xall . shape [ 1 ] X = np . zeros ( ( 2 , NN , u . shape [ 1 ] ) , 'float64' ) for j in range ( 2 ) : Xall [ j ] -= Xall [ j ] . mean ( axis = - 1 ) [ : , np . newaxis ] X [ j ] = Xall [ j ] @ self . v else : NN = X . shape [ 0 ] X = X @ self . v if self . init == 'pca' : u = u * np . sign ( skew ( u , axis = 0 ) ) init_sort = np . argsort ( u [ : NN , : self . n_components ] , axis = 0 ) # init _ sort = u [ : NN , : self . n _ components ] if False : ix = init_sort > 0 iy = init_sort < 0 init_sort [ ix ] = init_sort [ ix ] - 100. init_sort [ iy ] = init_sort [ iy ] + 100. elif self . init == 'random' : init_sort = np . random . permutation ( NN ) [ : , np . newaxis ] for j in range ( 1 , self . n_components ) : init_sort = np . concatenate ( ( init_sort , np . random . permutation ( NN ) [ : , np . newaxis ] ) , axis = - 1 ) else : init_sort = self . init if self . n_components == 1 and init_sort . ndim == 1 : init_sort = init_sort [ : , np . newaxis ] # now sort in X isort1 , iclustup = self . _map ( X , self . n_components , self . n_X , init_sort ) self . isort2 = isort2 self . isort1 = isort1 self . embedding = iclustup return self
def make_repo ( repodir , keyid = None , env = None , use_passphrase = False , gnupghome = '/etc/salt/gpgkeys' , runas = 'root' , timeout = 15.0 ) : '''Make a package repository and optionally sign packages present Given the repodir , create a ` ` yum ` ` repository out of the rpms therein and optionally sign it and packages present , the name is directory to turn into a repo . This state is best used with onchanges linked to your package building states . repodir The directory to find packages that will be in the repository . keyid . . versionchanged : : 2016.3.0 Optional Key ID to use in signing packages and repository . Utilizes Public and Private keys associated with keyid which have been loaded into the minion ' s Pillar data . For example , contents from a Pillar data file with named Public and Private keys as follows : . . code - block : : yaml gpg _ pkg _ priv _ key : | - - - - - BEGIN PGP PRIVATE KEY BLOCK - - - - - Version : GnuPG v1 lQO + BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF / 9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc Ebe + 8JCQTwqSXPRTzXmy / b5WXDeM79CkLWvuGpXFor76D + ECMRPv / rawukEcNptn R5OmgHqvydEnO4pWbn8JzQO9YX / Us0SMHBVzLC8eIi5ZIopzalvX = JvW8 - - - - - END PGP PRIVATE KEY BLOCK - - - - - gpg _ pkg _ priv _ keyname : gpg _ pkg _ key . pem gpg _ pkg _ pub _ key : | - - - - - BEGIN PGP PUBLIC KEY BLOCK - - - - - Version : GnuPG v1 mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF / 9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta + 4alcWivvoP 4QIxE + / + trC6QRw2m2dHk6aAeq / J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki inNqW9c = = s1CX - - - - - END PGP PUBLIC KEY BLOCK - - - - - gpg _ pkg _ pub _ keyname : gpg _ pkg _ key . pub env . . versionchanged : : 2016.3.0 A dictionary of environment variables to be utilized in creating the repository . . . note : : This parameter is not used for making ` ` yum ` ` repositories . use _ passphrase : False . . versionadded : : 2016.3.0 Use a passphrase with the signing key presented in ` ` keyid ` ` . Passphrase is received from Pillar data which could be passed on the command line with ` ` pillar ` ` parameter . For example : . . code - block : : bash pillar = ' { " gpg _ passphrase " : " my _ passphrase " } ' gnupghome : / etc / salt / gpgkeys . . versionadded : : 2016.3.0 Location where GPG related files are stored , used with ` ` keyid ` ` . runas : root . . versionadded : : 2016.3.0 User to create the repository as , and optionally sign packages . . . note : : Ensure the user has correct permissions to any files and directories which are to be utilized . timeout : 15.0 . . versionadded : : 2016.3.4 Timeout in seconds to wait for the prompt for inputting the passphrase . CLI Example : . . code - block : : bash salt ' * ' pkgbuild . make _ repo / var / www / html /'''
SIGN_PROMPT_RE = re . compile ( r'Enter pass phrase: ' , re . M ) define_gpg_name = '' local_keyid = None local_uids = None phrase = '' if keyid is not None : # import _ keys pkg_pub_key_file = '{0}/{1}' . format ( gnupghome , __salt__ [ 'pillar.get' ] ( 'gpg_pkg_pub_keyname' , None ) ) pkg_priv_key_file = '{0}/{1}' . format ( gnupghome , __salt__ [ 'pillar.get' ] ( 'gpg_pkg_priv_keyname' , None ) ) if pkg_pub_key_file is None or pkg_priv_key_file is None : raise SaltInvocationError ( 'Pillar data should contain Public and Private keys associated with \'keyid\'' ) try : __salt__ [ 'gpg.import_key' ] ( user = runas , filename = pkg_pub_key_file , gnupghome = gnupghome ) __salt__ [ 'gpg.import_key' ] ( user = runas , filename = pkg_priv_key_file , gnupghome = gnupghome ) except SaltInvocationError : raise SaltInvocationError ( 'Public and Private key files associated with Pillar data and \'keyid\' ' '{0} could not be found' . format ( keyid ) ) # gpg keys should have been loaded as part of setup # retrieve specified key and preset passphrase local_keys = __salt__ [ 'gpg.list_keys' ] ( user = runas , gnupghome = gnupghome ) for gpg_key in local_keys : if keyid == gpg_key [ 'keyid' ] [ 8 : ] : local_uids = gpg_key [ 'uids' ] local_keyid = gpg_key [ 'keyid' ] break if local_keyid is None : raise SaltInvocationError ( 'The key ID \'{0}\' was not found in GnuPG keyring at \'{1}\'' . format ( keyid , gnupghome ) ) if use_passphrase : phrase = __salt__ [ 'pillar.get' ] ( 'gpg_passphrase' ) if local_uids : define_gpg_name = '--define=\'%_signature gpg\' --define=\'%_gpg_name {0}\'' . format ( local_uids [ 0 ] ) # need to update rpm with public key cmd = 'rpm --import {0}' . format ( pkg_pub_key_file ) retrc = __salt__ [ 'cmd.retcode' ] ( cmd , runas = runas , use_vt = True ) if retrc != 0 : raise SaltInvocationError ( 'Failed to import public key from file {0} with return ' 'error {1}, check logs for further details' . format ( pkg_pub_key_file , retrc ) ) # sign _ it _ here # interval of 0.125 is really too fast on some systems interval = 0.5 for fileused in os . listdir ( repodir ) : if fileused . endswith ( '.rpm' ) : abs_file = os . path . join ( repodir , fileused ) number_retries = timeout / interval times_looped = 0 error_msg = 'Failed to sign file {0}' . format ( abs_file ) cmd = 'rpm {0} --addsign {1}' . format ( define_gpg_name , abs_file ) preexec_fn = functools . partial ( salt . utils . user . chugid_and_umask , runas , None ) try : stdout , stderr = None , None proc = salt . utils . vt . Terminal ( cmd , shell = True , preexec_fn = preexec_fn , stream_stdout = True , stream_stderr = True ) while proc . has_unread_data : stdout , stderr = proc . recv ( ) if stdout and SIGN_PROMPT_RE . search ( stdout ) : # have the prompt for inputting the passphrase proc . sendline ( phrase ) else : times_looped += 1 if times_looped > number_retries : raise SaltInvocationError ( 'Attemping to sign file {0} failed, timed out after {1} seconds' . format ( abs_file , int ( times_looped * interval ) ) ) time . sleep ( interval ) proc_exitstatus = proc . exitstatus if proc_exitstatus != 0 : raise SaltInvocationError ( 'Signing file {0} failed with proc.status {1}' . format ( abs_file , proc_exitstatus ) ) except salt . utils . vt . TerminalException as err : trace = traceback . format_exc ( ) log . error ( error_msg , err , trace ) finally : proc . close ( terminate = True , kill = True ) cmd = 'createrepo --update {0}' . format ( repodir ) return __salt__ [ 'cmd.run_all' ] ( cmd , runas = runas )
def _parse_keyvals ( self , line_iter ) : """Generate dictionary from key / value pairs ."""
out = None line = None for line in line_iter : if len ( line ) == 1 and line [ 0 ] . upper ( ) == line [ 0 ] : break else : # setup output dictionaries , trimming off blank columns if out is None : while not line [ - 1 ] : line = line [ : - 1 ] out = [ { } for _ in line [ 1 : ] ] # add blank values if the line is stripped while len ( line ) < len ( out ) + 1 : line . append ( "" ) for i in range ( len ( out ) ) : out [ i ] [ line [ 0 ] ] = line [ i + 1 ] . strip ( ) line = None return out , line
def fromjson ( cls , json_string : str ) -> 'Event' : """Create a new Event from a from a psycopg2 - pgevent event JSON . Parameters json _ string : str Valid psycopg2 - pgevent event JSON . Returns Event Event created from JSON deserialization ."""
obj = json . loads ( json_string ) return cls ( UUID ( obj [ 'event_id' ] ) , obj [ 'event_type' ] , obj [ 'schema_name' ] , obj [ 'table_name' ] , obj [ 'row_id' ] )
def partition ( lst , n ) : """Divide list into n equal parts"""
q , r = divmod ( len ( lst ) , n ) indices = [ q * i + min ( i , r ) for i in xrange ( n + 1 ) ] return [ lst [ indices [ i ] : indices [ i + 1 ] ] for i in xrange ( n ) ] , [ list ( xrange ( indices [ i ] , indices [ i + 1 ] ) ) for i in xrange ( n ) ]
def all ( self ) : """Synchronize all registered plugins and plugin points to database ."""
# Django > = 1.9 changed something with the migration logic causing # plugins to be executed before the corresponding database tables # exist . This method will only return something if the database # tables have already been created . # XXX : I don ' t fully understand the issue and there should be # another way but this appears to work fine . if django_version >= ( 1 , 9 ) and ( not db_table_exists ( Plugin . _meta . db_table ) or not db_table_exists ( PluginPoint . _meta . db_table ) ) : return self . points ( )
def create_koji_session ( hub_url , auth_info = None ) : """Creates and returns a Koji session . If auth _ info is provided , the session will be authenticated . : param hub _ url : str , Koji hub URL : param auth _ info : dict , authentication parameters used for koji _ login : return : koji . ClientSession instance"""
session = KojiSessionWrapper ( koji . ClientSession ( hub_url , opts = { 'krb_rdns' : False } ) ) if auth_info is not None : koji_login ( session , ** auth_info ) return session
def get_config ( ) : """Prepare and return alembic config These configurations used to live in alembic config initialiser , but that just tight coupling . Ideally we should move that to userspace and find a way to pass these into alembic commands . @ todo : think about it"""
from boiler . migrations . config import MigrationsConfig # used for errors map = dict ( path = 'MIGRATIONS_PATH' , db_url = 'SQLALCHEMY_DATABASE_URI' , metadata = 'SQLAlchemy metadata' ) app = bootstrap . get_app ( ) params = dict ( ) params [ 'path' ] = app . config . get ( map [ 'path' ] , 'migrations' ) params [ 'db_url' ] = app . config . get ( map [ 'db_url' ] ) params [ 'metadata' ] = db . metadata for param , value in params . items ( ) : if not value : msg = 'Configuration error: [{}] is undefined' raise Exception ( msg . format ( map [ param ] ) ) config = MigrationsConfig ( ** params ) return config
def trim ( self ) : """Clear not used counters"""
for key , value in list ( iteritems ( self . counters ) ) : if value . empty ( ) : del self . counters [ key ]
def create_table_from_orm_class ( engine : Engine , ormclass : DeclarativeMeta , without_constraints : bool = False ) -> None : """From an SQLAlchemy ORM class , creates the database table via the specified engine , using a ` ` CREATE TABLE ` ` SQL ( DDL ) statement . Args : engine : SQLAlchemy : class : ` Engine ` object ormclass : SQLAlchemy ORM class without _ constraints : don ' t add foreign key constraints"""
table = ormclass . __table__ # type : Table log . info ( "Creating table {} on engine {}{}" , table . name , get_safe_url_from_engine ( engine ) , " (omitting constraints)" if without_constraints else "" ) # https : / / stackoverflow . com / questions / 19175311 / how - to - create - only - one - table - with - sqlalchemy # noqa if without_constraints : include_foreign_key_constraints = [ ] else : include_foreign_key_constraints = None # the default creator = CreateTable ( table , include_foreign_key_constraints = include_foreign_key_constraints ) creator . execute ( bind = engine )
def _to_dict ( self ) : """Return a json dictionary representing this model ."""
_dict = { } if hasattr ( self , 'match' ) and self . match is not None : _dict [ 'match' ] = self . match return _dict
def replace ( self , key , value ) : """Replaces the entry for a key only if it is currently mapped to some value . This is equivalent to : > > > if map . contains _ key ( key ) : > > > return map . put ( key , value ) > > > else : > > > return None except that the action is performed atomically . * * Warning : This method uses _ _ hash _ _ and _ _ eq _ _ methods of binary form of the key , not the actual implementations of _ _ hash _ _ and _ _ eq _ _ defined in key ' s class . * * * * Warning 2: This method returns a clone of the previous value , not the original ( identically equal ) value previously put into the map . * * : param key : ( object ) , the specified key . : param value : ( object ) , the value to replace the previous value . : return : ( object ) , previous value associated with key , or ` ` None ` ` if there was no mapping for key ."""
check_not_none ( key , "key can't be None" ) check_not_none ( value , "value can't be None" ) key_data = self . _to_data ( key ) value_data = self . _to_data ( value ) return self . _replace_internal ( key_data , value_data )
def _get_corresponding_parsers ( self , func ) : """Get the parser that has been set up by the given ` function `"""
if func in self . _used_functions : yield self if self . _subparsers_action is not None : for parser in self . _subparsers_action . choices . values ( ) : for sp in parser . _get_corresponding_parsers ( func ) : yield sp
def method_name ( func ) : """Method wrapper that adds the name of the method being called to its arguments list in Pascal case"""
@ wraps ( func ) def _method_name ( * args , ** kwargs ) : name = to_pascal_case ( func . __name__ ) return func ( name = name , * args , ** kwargs ) return _method_name
def Move_to ( self , x , y , dl = 0 ) : """鼠标移动到x , y的坐标处"""
self . Delay ( dl ) self . mouse . move ( x , y )
def augmentation_transform ( self , data , label ) : # pylint : disable = arguments - differ """Override Transforms input data with specified augmentations ."""
for aug in self . auglist : data , label = aug ( data , label ) return ( data , label )
def mass_3d ( self , R , Rs , rho0 , r_trunc ) : """mass enclosed a 3d sphere or radius r : param r : : param Ra : : param Rs : : return :"""
x = R * Rs ** - 1 func = ( r_trunc ** 2 * ( - 2 * x * ( 1 + r_trunc ** 2 ) + 4 * ( 1 + x ) * r_trunc * np . arctan ( x / r_trunc ) - 2 * ( 1 + x ) * ( - 1 + r_trunc ** 2 ) * np . log ( Rs ) + 2 * ( 1 + x ) * ( - 1 + r_trunc ** 2 ) * np . log ( Rs * ( 1 + x ) ) + 2 * ( 1 + x ) * ( - 1 + r_trunc ** 2 ) * np . log ( Rs * r_trunc ) - ( 1 + x ) * ( - 1 + r_trunc ** 2 ) * np . log ( Rs ** 2 * ( x ** 2 + r_trunc ** 2 ) ) ) ) / ( 2. * ( 1 + x ) * ( 1 + r_trunc ** 2 ) ** 2 ) m_3d = 4 * np . pi * Rs ** 3 * rho0 * func return m_3d
def hostname ( self , hostname ) : """hostname setter"""
if not isinstance ( hostname , six . string_types ) : raise TypeError ( "hostname must be a string. {0} was passed." . format ( type ( hostname ) ) ) # if a host name is passed and its not valid raise else set hostname empty strings are the docker default . if hostname and not is_valid_hostname ( hostname ) : raise ValueError ( "{0} isn't a valid hostname" ) . format ( hostname ) else : self . _hostname = hostname
def switch_inline ( text , query = '' , same_peer = False ) : """Creates a new button to switch to inline query . If ` query ` is given , it will be the default text to be used when making the inline query . If ` ` same _ peer is True ` ` the inline query will directly be set under the currently opened chat . Otherwise , the user will have to select a different dialog to make the query ."""
return types . KeyboardButtonSwitchInline ( text , query , same_peer )
def load_network_from_checkpoint ( checkpoint , model_json , input_shape = None ) : """Function to read the weights from checkpoint based on json description . Args : checkpoint : tensorflow checkpoint with trained model to verify model _ json : path of json file with model description of the network list of dictionary items for each layer containing ' type ' , ' weight _ var ' , ' bias _ var ' and ' is _ transpose ' ' type ' is one of { ' ff ' , ' ff _ relu ' or ' conv ' } ; ' weight _ var ' is the name of tf variable for weights of layer i ; ' bias _ var ' is the name of tf variable for bias of layer i ; ' is _ transpose ' is set to True if the weights have to be transposed as per convention Note that last layer is always feedforward net _ weights : list of numpy matrices of weights of each layer convention : x [ i + 1 ] = W [ i ] x [ i ] net _ biases : list of numpy arrays of biases of each layer net _ layer _ types : type of each layer [ ' ff ' or ' ff _ relu ' or ' ff _ conv ' or ' ff _ conv _ relu ' ] ' ff ' : Simple feedforward layer with no activations ' ff _ relu ' : Simple feedforward layer with ReLU activations ' ff _ conv ' : Convolution layer with no activation ' ff _ conv _ relu ' : Convolution layer with ReLU activation Raises : ValueError : If layer _ types are invalid or variable names not found in checkpoint"""
# Load checkpoint reader = tf . train . load_checkpoint ( checkpoint ) variable_map = reader . get_variable_to_shape_map ( ) checkpoint_variable_names = variable_map . keys ( ) # Parse JSON file for names with tf . gfile . Open ( model_json ) as f : list_model_var = json . load ( f ) net_layer_types = [ ] net_weights = [ ] net_biases = [ ] cnn_params = [ ] # Checking validity of the input and adding to list for layer_model_var in list_model_var : if layer_model_var [ 'type' ] not in { 'ff' , 'ff_relu' , 'conv' } : raise ValueError ( 'Invalid layer type in description' ) if ( layer_model_var [ 'weight_var' ] not in checkpoint_variable_names or layer_model_var [ 'bias_var' ] not in checkpoint_variable_names ) : raise ValueError ( 'Variable names not found in checkpoint' ) net_layer_types . append ( layer_model_var [ 'type' ] ) layer_weight = reader . get_tensor ( layer_model_var [ 'weight_var' ] ) layer_bias = reader . get_tensor ( layer_model_var [ 'bias_var' ] ) # TODO ( aditirag ) : is there a way to automatically check when to transpose # We want weights W such that x ^ { i + 1 } = W ^ i x ^ i + b ^ i # Can think of a hack involving matching shapes but if shapes are equal # it can be ambiguous if layer_model_var [ 'type' ] in { 'ff' , 'ff_relu' } : layer_weight = np . transpose ( layer_weight ) cnn_params . append ( None ) if layer_model_var [ 'type' ] in { 'conv' } : if 'stride' not in layer_model_var or 'padding' not in layer_model_var : raise ValueError ( 'Please define stride and padding for conv layers.' ) cnn_params . append ( { 'stride' : layer_model_var [ 'stride' ] , 'padding' : layer_model_var [ 'padding' ] } ) net_weights . append ( layer_weight ) net_biases . append ( np . reshape ( layer_bias , ( np . size ( layer_bias ) , 1 ) ) ) return NeuralNetwork ( net_weights , net_biases , net_layer_types , input_shape , cnn_params )
def binary_to_spin ( linear , quadratic , offset ) : """convert linear , quadratic and offset from binary to spin . Does no checking of vartype . Copies all of the values into new objects ."""
h = { } J = { } linear_offset = 0.0 quadratic_offset = 0.0 for u , bias in iteritems ( linear ) : h [ u ] = .5 * bias linear_offset += bias for ( u , v ) , bias in iteritems ( quadratic ) : J [ ( u , v ) ] = .25 * bias h [ u ] += .25 * bias h [ v ] += .25 * bias quadratic_offset += bias offset += .5 * linear_offset + .25 * quadratic_offset return h , J , offset
def _extract_image_urls ( self ) : """Retrieves image URLs from the current page"""
resultsPage = self . _chromeDriver . page_source resultsPageSoup = BeautifulSoup ( resultsPage , 'html.parser' ) images = resultsPageSoup . find_all ( 'div' , class_ = 'rg_meta' ) images = [ json . loads ( image . contents [ 0 ] ) for image in images ] [ self . _imageURLs . append ( image [ 'ou' ] ) for image in images ] self . _imageURLsExtractedCount += len ( images )
def is_carrying_minerals ( self ) -> bool : """Checks if a worker or MULE is carrying ( gold - ) minerals ."""
return any ( buff . value in self . _proto . buff_ids for buff in { BuffId . CARRYMINERALFIELDMINERALS , BuffId . CARRYHIGHYIELDMINERALFIELDMINERALS } )
def renew_session ( self ) : """Clears all session data and starts a new session using the same settings as before . This method can be used to clear session data , e . g . , cookies . Future requests will use a new session initiated with the same settings and authentication method ."""
logger . debug ( "API session renewed" ) self . session = self . authentication . get_session ( ) self . session . headers . update ( { 'User-Agent' : 'MoneyBird for Python %s' % VERSION , 'Accept' : 'application/json' , } )
def unlock ( self ) : """Unlock the doors and extend handles where applicable ."""
if self . __lock_state : data = self . _controller . command ( self . _id , 'door_unlock' , wake_if_asleep = True ) if data [ 'response' ] [ 'result' ] : self . __lock_state = False self . __manual_update_time = time . time ( )
def update_health_monitor ( self , health_monitor , body = None ) : """Updates a load balancer health monitor ."""
return self . put ( self . health_monitor_path % ( health_monitor ) , body = body )
def choose_args ( metadata , config ) : """Choose database connection arguments ."""
return dict ( connect_args = choose_connect_args ( metadata , config ) , echo = config . echo , max_overflow = config . max_overflow , pool_size = config . pool_size , pool_timeout = config . pool_timeout , )
def _explore ( self , pop ) : """Exploration phase : Find a set of candidate states based on the current population . : param pop : The starting population for this generation ."""
new_pop = set ( pop ) exploration_per_state = self . args . max_exploration // len ( pop ) mutations = [ ] if self . args . brokers : mutations . append ( self . _move_partition ) if self . args . leaders : mutations . append ( self . _move_leadership ) for state in pop : for _ in range ( exploration_per_state ) : new_state = random . choice ( mutations ) ( state ) if new_state : new_pop . add ( new_state ) return new_pop
def get_relationships_for_idents ( self , cid , idents ) : '''Get relationships between ` ` idents ` ` and a ` ` cid ` ` . Returns a dictionary mapping the identifiers in ` ` idents ` ` to either None , if no relationship label is found between the identifier and ` ` cid ` ` , or a RelationshipType classifying the strength of the relationship between the identifier and ` ` cid ` ` .'''
keys = [ ( cid , ident , ) for ident in idents ] key_ranges = zip ( keys , keys ) mapping = { } for k , v in self . kvl . scan ( self . TABLE , * key_ranges ) : label = self . _label_from_kvlayer ( k , v ) ident = label . other ( cid ) rel_strength = label . rel_strength mapping [ ident ] = label . rel_strength return mapping
def serial_wire_viewer ( jlink_serial , device ) : """Implements a Serial Wire Viewer ( SWV ) . A Serial Wire Viewer ( SWV ) allows us implement real - time logging of output from a connected device over Serial Wire Output ( SWO ) . Args : jlink _ serial ( str ) : the J - Link serial number device ( str ) : the target CPU Returns : Always returns ` ` 0 ` ` . Raises : JLinkException : on error"""
buf = StringIO . StringIO ( ) jlink = pylink . JLink ( log = buf . write , detailed_log = buf . write ) jlink . open ( serial_no = jlink_serial ) # Use Serial Wire Debug as the target interface . Need this in order to use # Serial Wire Output . jlink . set_tif ( pylink . enums . JLinkInterfaces . SWD ) jlink . connect ( device , verbose = True ) jlink . coresight_configure ( ) jlink . set_reset_strategy ( pylink . enums . JLinkResetStrategyCortexM3 . RESETPIN ) # Have to halt the CPU before getitng its speed . jlink . reset ( ) jlink . halt ( ) # Output the information about the program . sys . stdout . write ( 'Serial Wire Viewer\n' ) sys . stdout . write ( 'Press Ctrl-C to Exit\n' ) sys . stdout . write ( 'Reading data from port 0:\n\n' ) # Reset the core without halting so that it runs . jlink . reset ( ms = 10 , halt = False ) # Use the ` try ` loop to catch a keyboard interrupt in order to stop logging # serial wire output . try : while True : # Check the vector catch . if jlink . register_read ( 0x0 ) != 0x05 : continue offset = jlink . register_read ( 0x1 ) handle , ptr , num_bytes = jlink . memory_read32 ( offset , 3 ) read = '' . join ( map ( chr , jlink . memory_read8 ( ptr , num_bytes ) ) ) if num_bytes == 0 : # If no bytes exist , sleep for a bit before trying again . time . sleep ( 1 ) continue jlink . register_write ( 0x0 , 0 ) jlink . step ( thumb = True ) jlink . restart ( 2 , skip_breakpoints = True ) sys . stdout . write ( read ) sys . stdout . flush ( ) except KeyboardInterrupt : pass sys . stdout . write ( '\n' ) return 0
def _initBlockMajorMap ( self ) : """Parses / proc / devices to initialize device class - major number map for block devices ."""
self . _mapMajorDevclass = { } try : fp = open ( devicesFile , 'r' ) data = fp . read ( ) fp . close ( ) except : raise IOError ( 'Failed reading device information from file: %s' % devicesFile ) skip = True for line in data . splitlines ( ) : if skip : if re . match ( 'block.*:' , line , re . IGNORECASE ) : skip = False else : mobj = re . match ( '\s*(\d+)\s+([\w\-]+)$' , line ) if mobj : major = int ( mobj . group ( 1 ) ) devtype = mobj . group ( 2 ) self . _mapMajorDevclass [ major ] = devtype if devtype == 'device-mapper' : self . _dmMajorNum = major
def _do_get ( cnf , get_path ) : """: param cnf : Configuration object to print out : param get _ path : key path given in - - get option : return : updated Configuration object if no error"""
( cnf , err ) = API . get ( cnf , get_path ) if cnf is None : # Failed to get the result . _exit_with_output ( "Failed to get result: err=%s" % err , 1 ) return cnf
def create_slab_label ( self ) : """Returns a label ( str ) for this particular slab based on composition , coverage and Miller index ."""
if "label" in self . data . keys ( ) : return self . data [ "label" ] label = str ( self . miller_index ) ads_strs = list ( self . ads_entries_dict . keys ( ) ) cleaned = self . cleaned_up_slab label += " %s" % ( cleaned . composition . reduced_composition ) if self . adsorbates : for ads in ads_strs : label += r"+%s" % ( ads ) label += r", %.3f ML" % ( self . get_monolayer ) return label
def features ( self ) : """lazy fetch and cache features"""
if self . _features is None : metadata = self . metadata ( ) if "features" in metadata : self . _features = metadata [ "features" ] else : self . _features = [ ] return self . _features
def unregister_handle_func ( self , _handle_func_name , topic ) : """注销handle _ func"""
handler_list = self . _handle_funcs . get ( topic , [ ] ) for i , h in enumerate ( handler_list ) : if h is _handle_func_name or h . __name__ == _handle_func_name : handler_list . pop ( i ) if self . _handle_funcs . get ( topic ) == [ ] : self . _handle_funcs . pop ( topic )
def supports_version_type ( self , version_type ) : """Tests if the given version type is supported . arg : version _ type ( osid . type . Type ) : a version Type return : ( boolean ) - ` ` true ` ` if the type is supported , ` ` false ` ` otherwise raise : IllegalState - syntax is not a ` ` VERSION ` ` raise : NullArgument - ` ` version _ type ` ` is ` ` null ` ` * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for osid . Metadata . supports _ coordinate _ type if self . _kwargs [ 'syntax' ] not in [ '``VERSION``' ] : raise errors . IllegalState ( ) return version_type in self . get_version_types
def free_parameters ( self ) : """Returns a dictionary of free parameters for this function : return : dictionary of free parameters"""
free_parameters = collections . OrderedDict ( [ ( k , v ) for k , v in self . parameters . iteritems ( ) if v . free ] ) return free_parameters
def phon ( self , cls = 'current' , previousdelimiter = "" , strict = False , correctionhandling = CorrectionHandling . CURRENT ) : """See : meth : ` AbstractElement . phon `"""
if cls == 'original' : correctionhandling = CorrectionHandling . ORIGINAL # backward compatibility if correctionhandling in ( CorrectionHandling . CURRENT , CorrectionHandling . EITHER ) : for e in self : if isinstance ( e , New ) or isinstance ( e , Current ) : return previousdelimiter + e . phon ( cls , "" , strict , correctionhandling ) if correctionhandling in ( CorrectionHandling . ORIGINAL , CorrectionHandling . EITHER ) : for e in self : if isinstance ( e , Original ) : return previousdelimiter + e . phon ( cls , "" , correctionhandling ) raise NoSuchPhon
def switch_to_output ( self , value = False , ** kwargs ) : """Switch the pin state to a digital output with the provided starting value ( True / False for high or low , default is False / low ) ."""
self . direction = digitalio . Direction . OUTPUT self . value = value
def _setTypecodeList ( self ) : """generates ofwhat content , minOccurs / maxOccurs facet generation . Dependency instance attribute : mgContent - - expected to be either a complex definition with model group content , a model group , or model group content . TODO : should only support the first two . localTypes - - produce local class definitions later tcListElements - - elements , local / global"""
self . logger . debug ( "_setTypecodeList(%r): %s" % ( self . mgContent , self . _item . getItemTrace ( ) ) ) flat = [ ] content = self . mgContent # TODO : too much slop permitted here , impossible # to tell what is going on . if type ( content ) is not tuple : mg = content if not mg . isModelGroup ( ) : raise Wsdl2PythonErr ( "Expecting ModelGroup: %s" % mg . getItemTrace ( ) ) self . logger . debug ( "ModelGroup(%r) contents(%r): %s" % ( mg , mg . content , mg . getItemTrace ( ) ) ) # < group ref > if mg . isReference ( ) : raise RuntimeError ( "Unexpected modelGroup reference: %s" % mg . getItemTrace ( ) ) # < group name > if mg . isDefinition ( ) : mg = mg . content if mg . isAll ( ) : flat = mg . content content = [ ] elif mg . isSequence ( ) : content = mg . content elif mg . isChoice ( ) : content = mg . content else : raise RuntimeError ( "Unknown schema item" ) idx = 0 content = list ( content ) self . logger . debug ( "content: %r" % content ) while idx < len ( content ) : c = orig = content [ idx ] if c . isElement ( ) : flat . append ( c ) idx += 1 continue if c . isReference ( ) and c . isModelGroup ( ) : c = c . getModelGroupReference ( ) if c . isDefinition ( ) and c . isModelGroup ( ) : c = c . content if c . isSequence ( ) or c . isChoice ( ) : begIdx = idx endIdx = begIdx + len ( c . content ) for i in range ( begIdx , endIdx ) : content . insert ( i , c . content [ i - begIdx ] ) content . remove ( orig ) continue raise ContainerError , 'unexpected schema item: %s' % c . getItemTrace ( ) # TODO : Need to store " parents " in a dict [ id ] = list ( ) , # because cannot follow references , but not currently # a big concern . self . logger . debug ( "flat: %r" % list ( flat ) ) for c in flat : tc = TcListComponentContainer ( ) # TODO : Remove _ getOccurs min , max , nil = self . _getOccurs ( c ) min = max = None maxOccurs = 1 parent = c defs = [ ] # stop recursion via global ModelGroupDefinition while defs . count ( parent ) <= 1 : max = parent . getAttribute ( 'maxOccurs' ) if max == 'unbounded' : maxOccurs = '"%s"' % max break maxOccurs = int ( max ) * maxOccurs parent = parent . _parent ( ) if not parent . isModelGroup ( ) : break if parent . isReference ( ) : parent = parent . getModelGroupReference ( ) if parent . isDefinition ( ) : parent = parent . content defs . append ( parent ) del defs parent = c while 1 : minOccurs = int ( parent . getAttribute ( 'minOccurs' ) ) if minOccurs == 0 or parent . isChoice ( ) : minOccurs = 0 break parent = parent . _parent ( ) if not parent . isModelGroup ( ) : minOccurs = int ( c . getAttribute ( 'minOccurs' ) ) break if parent . isReference ( ) : parent = parent . getModelGroupReference ( ) if parent . isDefinition ( ) : parent = parent . content tc . setOccurs ( minOccurs , maxOccurs , nil ) processContents = self . _getProcessContents ( c ) tc . setProcessContents ( processContents ) if c . isDeclaration ( ) and c . isElement ( ) : global_type = c . getAttribute ( 'type' ) content = getattr ( c , 'content' , None ) if c . isLocal ( ) and c . isQualified ( ) is False : tc . unQualified ( ) if c . isWildCard ( ) : tc . setStyleAnyElement ( ) elif global_type is not None : tc . name = c . getAttribute ( 'name' ) ns = global_type [ 0 ] if ns in SCHEMA . XSD_LIST : tpc = BTI . get_typeclass ( global_type [ 1 ] , global_type [ 0 ] ) tc . klass = tpc # elif ( self . ns , self . name ) = = global _ type : # # elif self . _ isRecursiveElement ( c ) # # TODO : Remove this , it only works for 1 level . # tc . setStyleRecursion ( ) else : tc . setGlobalType ( * global_type ) # tc . klass = ' % s . % s ' % ( NAD . getAlias ( ns ) , # type _ class _ name ( global _ type [ 1 ] ) ) del ns elif content is not None and content . isLocal ( ) and content . isComplex ( ) : tc . name = c . getAttribute ( 'name' ) tc . klass = 'self.__class__.%s' % ( element_class_name ( tc . name ) ) # TODO : Not an element reference , confusing nomenclature tc . setStyleElementReference ( ) self . localTypes . append ( c ) elif content is not None and content . isLocal ( ) and content . isSimple ( ) : # Local Simple Type tc . name = c . getAttribute ( 'name' ) tc . klass = 'self.__class__.%s' % ( element_class_name ( tc . name ) ) # TODO : Not an element reference , confusing nomenclature tc . setStyleElementReference ( ) self . localTypes . append ( c ) else : raise ContainerError , 'unexpected item: %s' % c . getItemTrace ( ) elif c . isReference ( ) : # element references ref = c . getAttribute ( 'ref' ) # tc . klass = ' % s . % s ' % ( NAD . getAlias ( ref [ 0 ] ) , # element _ class _ name ( ref [ 1 ] ) ) tc . setStyleElementReference ( ) tc . setGlobalType ( * ref ) else : raise ContainerError , 'unexpected item: %s' % c . getItemTrace ( ) self . tcListElements . append ( tc )
def get ( block_id ) : """Processing block detail resource ."""
_url = get_root_url ( ) try : block = DB . get_block_details ( [ block_id ] ) . __next__ ( ) response = block response [ 'links' ] = { 'self' : '{}' . format ( request . url ) , 'list' : '{}/processing-blocks' . format ( _url ) , 'home' : '{}' . format ( _url ) } return block except IndexError as error : response = dict ( message = 'Unable to GET Processing Block' , id = '{}' . format ( block_id ) , error = error . __str__ ( ) ) response [ 'links' ] = { 'list' : '{}/processing-blocks' . format ( _url ) , 'home' : '{}' . format ( _url ) } return response , HTTPStatus . NOT_FOUND
def scrub ( zpool , stop = False , pause = False ) : '''Scrub a storage pool zpool : string Name of storage pool stop : boolean If ` ` True ` ` , cancel ongoing scrub pause : boolean If ` ` True ` ` , pause ongoing scrub . . versionadded : : 2018.3.0 . . note : : Pause is only available on recent versions of ZFS . If both ` ` pause ` ` and ` ` stop ` ` are ` ` True ` ` , then ` ` stop ` ` will win . CLI Example : . . code - block : : bash salt ' * ' zpool . scrub myzpool'''
# # select correct action if stop : action = [ '-s' ] elif pause : action = [ '-p' ] else : action = None # # Scrub storage pool res = __salt__ [ 'cmd.run_all' ] ( __utils__ [ 'zfs.zpool_command' ] ( command = 'scrub' , flags = action , target = zpool , ) , python_shell = False , ) if res [ 'retcode' ] != 0 : return __utils__ [ 'zfs.parse_command_result' ] ( res , 'scrubbing' ) ret = OrderedDict ( ) if stop or pause : ret [ 'scrubbing' ] = False else : ret [ 'scrubbing' ] = True return ret
def get_service_packages ( self , ** kwargs ) : # noqa : E501 """Get all service packages . # noqa : E501 Get information of all service packages for the currently authenticated commercial account . The response is returned in descending order by service package created timestamp , listing first the pending service package , then the active service package and finally the previous service packages . * * Example usage : * * curl - X GET https : / / api . us - east - 1 . mbedcloud . com / v3 / service - packages - H ' authorization : Bearer { api - key } ' # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass asynchronous = True > > > thread = api . get _ service _ packages ( asynchronous = True ) > > > result = thread . get ( ) : param asynchronous bool : return : ServicePackagesResponse If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'asynchronous' ) : return self . get_service_packages_with_http_info ( ** kwargs ) # noqa : E501 else : ( data ) = self . get_service_packages_with_http_info ( ** kwargs ) # noqa : E501 return data
def next ( self ) : """Return the next row of a query result set , respecting if cursor was closed ."""
if self . rows is None : raise ProgrammingError ( "No result available. " + "execute() or executemany() must be called first." ) elif not self . _closed : return next ( self . rows ) else : raise ProgrammingError ( "Cursor closed" )
def convert_type ( self , value , spec ) : """Some well - educated format guessing ."""
data_type = spec . get ( 'type' , 'string' ) . lower ( ) . strip ( ) if data_type in [ 'bool' , 'boolean' ] : return value . lower ( ) in BOOL_TRUISH elif data_type in [ 'int' , 'integer' ] : try : return int ( value ) except ( ValueError , TypeError ) : return None elif data_type in [ 'float' , 'decimal' , 'real' ] : try : return float ( value ) except ( ValueError , TypeError ) : return None elif data_type in [ 'date' , 'datetime' , 'timestamp' ] : if 'format' in spec : format_list = self . _get_date_format_list ( spec . get ( 'format' ) ) if format_list is None : raise MappingException ( '%s format mapping is not valid: %r' % ( spec . get ( 'column' ) , spec . get ( 'format' ) ) ) for format , precision in format_list : try : return { 'value' : datetime . strptime ( value , format ) , 'value_precision' : precision } except ( ValueError , TypeError ) : pass return None else : try : return parser . parse ( value ) except ( ValueError , TypeError ) : return None elif data_type == 'file' : try : return self . _get_file ( value ) except : raise return value
def validate ( self ) : """Validate the whole document"""
if not self . mustValidate : return True res = { } for field in self . validators . keys ( ) : try : if isinstance ( self . validators [ field ] , dict ) and field not in self . store : self . store [ field ] = DocumentStore ( self . collection , validators = self . validators [ field ] , initDct = { } , subStore = True , validateInit = self . validateInit ) self . validateField ( field ) except InvalidDocument as e : res . update ( e . errors ) except ( ValidationError , SchemaViolation ) as e : res [ field ] = str ( e ) if len ( res ) > 0 : raise InvalidDocument ( res ) return True
def remove_in_progress_notifications ( self , master = True ) : """Remove all notifications from notifications _ in _ progress Preserves some specific notifications ( downtime , . . . ) : param master : remove master notifications only if True ( default value ) : type master : bool : param force : force remove all notifications except if False : type force : bool : return : None"""
for notification in list ( self . notifications_in_progress . values ( ) ) : if master and notification . contact : continue # Do not remove some specific notifications if notification . type in [ u'DOWNTIMESTART' , u'DOWNTIMEEND' , u'DOWNTIMECANCELLED' , u'CUSTOM' , u'ACKNOWLEDGEMENT' ] : continue self . remove_in_progress_notification ( notification )
def handler ( self , data ) : '''Function to handle notification data as part of Callback URL handler . : param str data : data posted to Callback URL by connector . : return : nothing'''
if isinstance ( data , r . models . Response ) : self . log . debug ( "data is request object = %s" , str ( data . content ) ) data = data . content elif isinstance ( data , str ) : self . log . info ( "data is json string with len %d" , len ( data ) ) if len ( data ) == 0 : self . log . warn ( "Handler received data of 0 length, exiting handler." ) return else : self . log . error ( "Input is not valid request object or json string : %s" % str ( data ) ) return False try : data = json . loads ( data ) if 'async-responses' in data . keys ( ) : self . async_responses_callback ( data ) if 'notifications' in data . keys ( ) : self . notifications_callback ( data ) if 'registrations' in data . keys ( ) : self . registrations_callback ( data ) if 'reg-updates' in data . keys ( ) : self . reg_updates_callback ( data ) if 'de-registrations' in data . keys ( ) : self . de_registrations_callback ( data ) if 'registrations-expired' in data . keys ( ) : self . registrations_expired_callback ( data ) except : self . log . error ( "handle router had an issue and threw an exception" ) ex_type , ex , tb = sys . exc_info ( ) traceback . print_tb ( tb ) self . log . error ( sys . exc_info ( ) ) del tb
def save_spectral_lines_ds9 ( rectwv_coeff , debugplot = 0 ) : """Save expected location of arc and OH airglow to ds9 region files . Parameters rectwv _ coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration . debugplot : int Debugging level for messages and plots . For details see ' numina . array . display . pause _ debugplot . py ' ."""
for spectral_lines , rectified , suffix in zip ( [ 'arc' , 'arc' , 'oh' , 'oh' ] , [ False , True , False , True ] , [ 'rawimage' , 'rectified' , 'rawimage' , 'rectified' ] ) : output = spectral_lines_to_ds9 ( rectwv_coeff = rectwv_coeff , spectral_lines = spectral_lines , rectified = rectified ) filename = 'ds9_' + spectral_lines + '_' + suffix + '.reg' if abs ( debugplot ) >= 10 : print ( '>>> Saving: ' , filename ) save_ds9 ( output , filename )
def _handleBulletWidth ( bulletText , style , maxWidths ) : """work out bullet width and adjust maxWidths [ 0 ] if neccessary"""
if bulletText : if isinstance ( bulletText , basestring ) : bulletWidth = stringWidth ( bulletText , style . bulletFontName , style . bulletFontSize ) else : # it ' s a list of fragments bulletWidth = 0 for f in bulletText : bulletWidth = bulletWidth + stringWidth ( f . text , f . fontName , f . fontSize ) bulletRight = style . bulletIndent + bulletWidth + 0.6 * style . bulletFontSize indent = style . leftIndent + style . firstLineIndent if bulletRight > indent : # . . then it overruns , and we have less space available on line 1 maxWidths [ 0 ] -= ( bulletRight - indent )
def register_rp_hook ( r , * args , ** kwargs ) : """This is a requests hook to register RP automatically . You should not use this command manually , this is added automatically by the SDK . See requests documentation for details of the signature of this function . http : / / docs . python - requests . org / en / master / user / advanced / # event - hooks"""
if r . status_code == 409 and 'msrest' in kwargs : rp_name = _check_rp_not_registered_err ( r ) if rp_name : session = kwargs [ 'msrest' ] [ 'session' ] url_prefix = _extract_subscription_url ( r . request . url ) if not _register_rp ( session , url_prefix , rp_name ) : return req = r . request # Change the ' x - ms - client - request - id ' otherwise the Azure endpoint # just returns the same 409 payload without looking at the actual query if 'x-ms-client-request-id' in req . headers : req . headers [ 'x-ms-client-request-id' ] = str ( uuid . uuid1 ( ) ) return session . send ( req )
def get_process_mapping ( ) : """Try to look up the process tree via Linux ' s / proc"""
with open ( '/proc/{0}/stat' . format ( os . getpid ( ) ) ) as f : self_tty = f . read ( ) . split ( ) [ STAT_TTY ] processes = { } for pid in os . listdir ( '/proc' ) : if not pid . isdigit ( ) : continue try : stat = '/proc/{0}/stat' . format ( pid ) cmdline = '/proc/{0}/cmdline' . format ( pid ) with open ( stat ) as fstat , open ( cmdline ) as fcmdline : stat = re . findall ( r'\(.+\)|\S+' , fstat . read ( ) ) cmd = fcmdline . read ( ) . split ( '\x00' ) [ : - 1 ] ppid = stat [ STAT_PPID ] tty = stat [ STAT_TTY ] if tty == self_tty : processes [ pid ] = Process ( args = tuple ( cmd ) , pid = pid , ppid = ppid , ) except IOError : # Process has disappeared - just ignore it . continue return processes
def filter_by ( self , ** kwargs ) : """Apply the given filtering criterion to a copy of this Query , using keyword expressions ."""
query = self . _copy ( ) for field , value in kwargs . items ( ) : query . domain . append ( ( field , '=' , value ) ) return query
def dict_values ( src ) : """Recursively get values in dict . Unlike the builtin dict . values ( ) function , this method will descend into nested dicts , returning all nested values . Arguments : src ( dict ) : Source dict . Returns : list : List of values ."""
for v in src . values ( ) : if isinstance ( v , dict ) : for v in dict_values ( v ) : yield v else : yield v
def flightmode_colours ( ) : '''return mapping of flight mode to colours'''
from MAVProxy . modules . lib . grapher import flightmode_colours mapping = { } idx = 0 for ( mode , t0 , t1 ) in flightmodes : if not mode in mapping : mapping [ mode ] = flightmode_colours [ idx ] idx += 1 if idx >= len ( flightmode_colours ) : idx = 0 return mapping
def var ( ctx , clear_target , clear_all ) : """Install variable data to / var / [ lib , cache ] / hfos"""
install_var ( str ( ctx . obj [ 'instance' ] ) , clear_target , clear_all )
def H_iso ( x , params ) : """Isochrone Hamiltonian = - GM / ( b + sqrt ( b * * 2 + ( r - r0 ) * * 2 ) )"""
# r = ( np . sqrt ( np . sum ( x [ : 3 ] * * 2 ) ) - params [ 2 ] ) * * 2 r = np . sum ( x [ : 3 ] ** 2 ) return 0.5 * np . sum ( x [ 3 : ] ** 2 ) - Grav * params [ 0 ] / ( params [ 1 ] + np . sqrt ( params [ 1 ] ** 2 + r ) )
def delete_api_service ( self , name , ** kwargs ) : """delete an APIService This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . delete _ api _ service ( name , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str name : name of the APIService ( required ) : param str pretty : If ' true ' , then the output is pretty printed . : param V1DeleteOptions body : : param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed : param int grace _ period _ seconds : The duration in seconds before the object should be deleted . Value must be non - negative integer . The value zero indicates delete immediately . If this value is nil , the default grace period for the specified type will be used . Defaults to a per object value if not specified . zero means delete immediately . : param bool orphan _ dependents : Deprecated : please use the PropagationPolicy , this field will be deprecated in 1.7 . Should the dependent objects be orphaned . If true / false , the \" orphan \" finalizer will be added to / removed from the object ' s finalizers list . Either this field or PropagationPolicy may be set , but not both . : param str propagation _ policy : Whether and how garbage collection will be performed . Either this field or OrphanDependents may be set , but not both . The default policy is decided by the existing finalizer set in the metadata . finalizers and the resource - specific default policy . Acceptable values are : ' Orphan ' - orphan the dependents ; ' Background ' - allow the garbage collector to delete the dependents in the background ; ' Foreground ' - a cascading policy that deletes all dependents in the foreground . : return : V1Status If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . delete_api_service_with_http_info ( name , ** kwargs ) else : ( data ) = self . delete_api_service_with_http_info ( name , ** kwargs ) return data
def scope_choices ( self , exclude_internal = True ) : """Return list of scope choices . : param exclude _ internal : Exclude internal scopes or not . ( Default : ` ` True ` ` ) : returns : A list of tuples ( id , scope ) ."""
return [ ( k , scope ) for k , scope in sorted ( self . scopes . items ( ) ) if not exclude_internal or not scope . is_internal ]
def parse_env_zones ( self ) : '''returns a list of comma separated zones parsed from the GCE _ ZONE environment variable . If provided , this will be used to filter the results of the grouped _ instances call'''
import csv reader = csv . reader ( [ os . environ . get ( 'GCE_ZONE' , "" ) ] , skipinitialspace = True ) zones = [ r for r in reader ] return [ z for z in zones [ 0 ] ]
def config ( * args , ** attrs ) : """Override configuration"""
attrs . setdefault ( "metavar" , "KEY=VALUE" ) attrs . setdefault ( "multiple" , True ) return option ( config , * args , ** attrs )
def _save_rest_method ( self , method_name , api_name , version , method ) : """Store Rest api methods in a list for lookup at call time . The list is self . _ rest _ methods , a list of tuples : [ ( < compiled _ path > , < path _ pattern > , < method _ dict > ) , . . . ] where : < compiled _ path > is a compiled regex to match against the incoming URL < path _ pattern > is a string representing the original path pattern , checked on insertion to prevent duplicates . - and - < method _ dict > is a dict of httpMethod = > ( method _ name , method ) This structure is a bit complex , it supports use in two contexts : Creation time : - SaveRestMethod is called repeatedly , each method will have a path , which we want to be compiled for fast lookup at call time - We want to prevent duplicate incoming path patterns , so store the un - compiled path , not counting on a compiled regex being a stable comparison as it is not documented as being stable for this use . - Need to store the method that will be mapped at calltime . - Different methods may have the same path but different http method . Call time : - Quickly scan through the list attempting . match ( path ) on each compiled regex to find the path that matches . - When a path is matched , look up the API method from the request and get the method name and method config for the matching API method and method name . Args : method _ name : A string containing the name of the API method . api _ name : A string containing the name of the API . version : A string containing the version of the API . method : A dict containing the method descriptor ( as in the api config file ) ."""
path_pattern = '/' . join ( ( api_name , version , method . get ( 'path' , '' ) ) ) http_method = method . get ( 'httpMethod' , '' ) . lower ( ) for _ , path , methods in self . _rest_methods : if path == path_pattern : methods [ http_method ] = method_name , method break else : self . _rest_methods . append ( ( self . _compile_path_pattern ( path_pattern ) , path_pattern , { http_method : ( method_name , method ) } ) )
def ensure_outdir_exists ( filepath ) : """Make dir to dump ' filepath ' if that dir does not exist . : param filepath : path of file to dump"""
outdir = os . path . dirname ( filepath ) if outdir and not os . path . exists ( outdir ) : LOGGER . debug ( "Making output dir: %s" , outdir ) os . makedirs ( outdir )
def GetPackages ( classification , visibility ) : """Gets a list of Blueprint Packages filtered by classification and visibility . https : / / t3n . zendesk . com / entries / 20411357 - Get - Packages : param classification : package type filter ( System , Script , Software ) : param visibility : package visibility filter ( Public , Private , Shared )"""
r = clc . v1 . API . Call ( 'post' , 'Blueprint/GetPackages' , { 'Classification' : Blueprint . classification_stoi [ classification ] , 'Visibility' : Blueprint . visibility_stoi [ visibility ] } ) if int ( r [ 'StatusCode' ] ) == 0 : return ( r [ 'Packages' ] )
def _subArrayShape ( self ) : """Returns the shape of the sub - array . An empty tuple is returned for regular fields , which have no sub array ."""
fieldName = self . nodeName fieldDtype = self . _array . dtype . fields [ fieldName ] [ 0 ] return fieldDtype . shape
def get_clamav_conf ( filename ) : """Initialize clamav configuration ."""
if os . path . isfile ( filename ) : return ClamavConfig ( filename ) log . warn ( LOG_PLUGIN , "No ClamAV config file found at %r." , filename )
def get_namedtuple_factory ( cls , field_mappings , name = "Record" ) : """Gets a method that will convert a dictionary to a namedtuple , as defined by get _ namedtuple ( field _ mappings ) ."""
ntup = cls . get_namedtuple ( field_mappings , name ) return lambda data : ntup ( ** data )
def fold_all ( self ) : """Folds / unfolds all levels in the editor"""
line_count = self . GetLineCount ( ) expanding = True # find out if we are folding or unfolding for line_num in range ( line_count ) : if self . GetFoldLevel ( line_num ) & stc . STC_FOLDLEVELHEADERFLAG : expanding = not self . GetFoldExpanded ( line_num ) break line_num = 0 while line_num < line_count : level = self . GetFoldLevel ( line_num ) if level & stc . STC_FOLDLEVELHEADERFLAG and ( level & stc . STC_FOLDLEVELNUMBERMASK ) == stc . STC_FOLDLEVELBASE : if expanding : self . SetFoldExpanded ( line_num , True ) line_num = self . expand ( line_num , True ) line_num = line_num - 1 else : last_child = self . GetLastChild ( line_num , - 1 ) self . SetFoldExpanded ( line_num , False ) if last_child > line_num : self . HideLines ( line_num + 1 , last_child ) line_num = line_num + 1
def solve_filter ( expr , vars ) : """Filter values on the LHS by evaluating RHS with each value . Returns any LHS values for which RHS evaluates to a true value ."""
lhs_values , _ = __solve_for_repeated ( expr . lhs , vars ) def lazy_filter ( ) : for lhs_value in repeated . getvalues ( lhs_values ) : if solve ( expr . rhs , __nest_scope ( expr . lhs , vars , lhs_value ) ) . value : yield lhs_value return Result ( repeated . lazy ( lazy_filter ) , ( ) )
def getReffs ( self , level : int = 1 , subreference : CtsReference = None ) -> CtsReferenceSet : """CtsReference available at a given level : param level : Depth required . If not set , should retrieve first encountered level ( 1 based ) : param subreference : Subreference ( optional ) : returns : List of levels"""
if not subreference and hasattr ( self , "reference" ) : subreference = self . reference elif subreference and not isinstance ( subreference , CtsReference ) : subreference = CtsReference ( subreference ) return self . getValidReff ( level = level , reference = subreference )
def _build_native_function_call ( fn ) : """If fn can be interpreted and handled as a native function : i . e . fn is one of the extensions , or fn is a simple lambda closure using one of the extensions . fn = tc . extensions . add fn = lambda x : tc . extensions . add ( 5) Then , this returns a closure object , which describes the function call which can then be passed to C + + . Returns a _ Closure object on success , raises an exception on failure ."""
# See if fn is the native function itself native_function_name = _get_toolkit_function_name_from_function ( fn ) if native_function_name != "" : # yup ! # generate an " identity " argument list argnames = _get_argument_list_from_toolkit_function_name ( native_function_name ) arglist = [ [ 0 , i ] for i in range ( len ( argnames ) ) ] return _Closure ( native_function_name , arglist ) # ok . its not a native function from . util . lambda_closure_capture import translate from . util . lambda_closure_capture import Parameter # Lets see if it is a simple lambda capture = translate ( fn ) # ok . build up the closure arguments # Try to pick up the lambda function = _descend_namespace ( capture . caller_globals , capture . closure_fn_name ) native_function_name = _get_toolkit_function_name_from_function ( function ) if native_function_name == "" : raise RuntimeError ( "Lambda does not contain a native function" ) argnames = _get_argument_list_from_toolkit_function_name ( native_function_name ) # ok . build up the argument list . this is mildly annoying due to the mix of # positional and named arguments # make an argument list with a placeholder for everything first arglist = [ [ - 1 , i ] for i in argnames ] # loop through the positional arguments for i in range ( len ( capture . positional_args ) ) : arg = capture . positional_args [ i ] if type ( arg ) is Parameter : # This is a lambda argument # arg . name is the actual string of the argument # here we need the index arglist [ i ] = [ 0 , capture . input_arg_names . index ( arg . name ) ] else : # this is a captured value arglist [ i ] = [ 1 , arg ] # now . the named arguments are somewhat annoying for i in capture . named_args : arg = capture . named_args [ i ] if type ( arg ) is Parameter : # This is a lambda argument # arg . name is the actual string of the argument # here we need the index arglist [ argnames . index ( i ) ] = [ 0 , capture . input_arg_names . index ( arg . name ) ] else : # this is a captured value arglist [ argnames . index ( i ) ] = [ 1 , arg ] # done . Make sure all arguments are filled for i in arglist : if i [ 0 ] == - 1 : raise RuntimeError ( "Incomplete function specification" ) # attempt to recursively break down any other functions import inspect for i in range ( len ( arglist ) ) : if arglist [ i ] [ 0 ] == 1 and inspect . isfunction ( arglist [ i ] [ 1 ] ) : try : arglist [ i ] [ 1 ] = _build_native_function_call ( arglist [ i ] [ 1 ] ) except : pass return _Closure ( native_function_name , arglist )
def list_namespaced_secret ( self , namespace , ** kwargs ) : """list or watch objects of kind Secret This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . list _ namespaced _ secret ( namespace , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str namespace : object name and auth scope , such as for teams and projects ( required ) : param str pretty : If ' true ' , then the output is pretty printed . : param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications . : param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything . : param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything . : param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned . : param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv . : param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity . : param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion . : return : V1SecretList If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . list_namespaced_secret_with_http_info ( namespace , ** kwargs ) else : ( data ) = self . list_namespaced_secret_with_http_info ( namespace , ** kwargs ) return data
def keep_recent_datasets ( max_dataset_history , info = None ) : """Keep track of the most recent recordings . Parameters max _ dataset _ history : int maximum number of datasets to remember info : str , optional TODO path to file Returns list of str paths to most recent datasets ( only if you don ' t specify new _ dataset )"""
history = settings . value ( 'recent_recordings' , [ ] ) if isinstance ( history , str ) : history = [ history ] if info is not None and info . filename is not None : new_dataset = info . filename if new_dataset in history : lg . debug ( new_dataset + ' already present, will be replaced' ) history . remove ( new_dataset ) if len ( history ) > max_dataset_history : lg . debug ( 'Removing last dataset ' + history [ - 1 ] ) history . pop ( ) lg . debug ( 'Adding ' + new_dataset + ' to list of recent datasets' ) history . insert ( 0 , new_dataset ) settings . setValue ( 'recent_recordings' , history ) return None else : return history
def _execute ( self , stmt , * values ) : """Gets a cursor , executes ` stmt ` and closes the cursor , fetching one row afterwards and returning its result ."""
c = self . _cursor ( ) try : return c . execute ( stmt , values ) . fetchone ( ) finally : c . close ( )
def call_proxy ( self , engine , payload , method , analyze_json_error_param , retry_request_substr_variants , stream = False ) : """: param engine : Система : param payload : Данные для запроса : param method : string Может содержать native _ call | tsv | json _ newline : param analyze _ json _ error _ param : Нужно ли производить анализ параметра error в ответе прокси : param retry _ request _ substr _ variants : Список подстрок , при наличии которых в ответе будет происходить перезапрос : param stream : : return :"""
return self . __api_proxy_call ( engine , payload , method , analyze_json_error_param , retry_request_substr_variants , stream )
def AddArguments ( cls , argument_group ) : """Adds command line arguments to an argument group . This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports . Args : argument _ group ( argparse . _ ArgumentGroup | argparse . ArgumentParser ) : argparse group ."""
argument_group . add_argument ( '--analysis' , metavar = 'PLUGIN_LIST' , dest = 'analysis_plugins' , default = '' , action = 'store' , type = str , help = ( 'A comma separated list of analysis plugin names to be loaded ' 'or "--analysis list" to see a list of available plugins.' ) ) arguments = sys . argv [ 1 : ] argument_index = 0 if '--analysis' in arguments : argument_index = arguments . index ( '--analysis' ) + 1 if 0 < argument_index < len ( arguments ) : names = [ name . strip ( ) for name in arguments [ argument_index ] . split ( ',' ) ] else : names = None if names and names != [ 'list' ] : manager . ArgumentHelperManager . AddCommandLineArguments ( argument_group , category = 'analysis' , names = names )
def get_service_details ( self , service_id ) : """Get the details of an individual service and return a ServiceDetails instance . Positional arguments : service _ id : A Darwin LDB service id"""
service_query = self . _soap_client . service [ 'LDBServiceSoap' ] [ 'GetServiceDetails' ] try : soap_response = service_query ( serviceID = service_id ) except WebFault : raise WebServiceError return ServiceDetails ( soap_response )