signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def targets ( tgt , tgt_type = 'glob' , ** kwargs ) : '''Return the targets from the flat yaml file , checks opts for location but defaults to / etc / salt / roster'''
template = get_roster_file ( __opts__ ) rend = salt . loader . render ( __opts__ , { } ) raw = compile_template ( template , rend , __opts__ [ 'renderer' ] , __opts__ [ 'renderer_blacklist' ] , __opts__ [ 'renderer_whitelist' ] , mask_value = 'passw*' , ** kwargs ) conditioned_raw = { } for minion in raw : conditioned_raw [ six . text_type ( minion ) ] = salt . config . apply_sdb ( raw [ minion ] ) return __utils__ [ 'roster_matcher.targets' ] ( conditioned_raw , tgt , tgt_type , 'ipv4' )
def _base64_encode ( self , string_to_encode ) : """Base64 encodes a string , with either Python 2 or 3. : param string _ to _ encode : the string to encode"""
try : # python 2 return base64 . b64encode ( string_to_encode ) except TypeError : # python 3 encoding = sys . getdefaultencoding ( ) base64_bytes = base64 . b64encode ( bytes ( string_to_encode , encoding ) ) return base64_bytes . decode ( encoding )
def enumerate ( self , ** kwargs ) : '''Iterate through all possible sequences ( lists ) . By default , will stop after 50 items have been yielded . This value can be change by supplying a different value via the max _ enumerate kwarg .'''
for item in self . set . enumerate ( ** kwargs ) : yield flattened ( item )
def enable_mac ( self , app = None ) : """Enable event loop integration with MacOSX . We call function pyplot . pause , which updates and displays active figure during pause . It ' s not MacOSX - specific , but it enables to avoid inputhooks in native MacOSX backend . Also we shouldn ' t import pyplot , until user does it . Cause it ' s possible to choose backend before importing pyplot for the first time only ."""
def inputhook_mac ( app = None ) : if self . pyplot_imported : pyplot = sys . modules [ 'matplotlib.pyplot' ] try : pyplot . pause ( 0.01 ) except : pass else : if 'matplotlib.pyplot' in sys . modules : self . pyplot_imported = True self . set_inputhook ( inputhook_mac ) self . _current_gui = GUI_OSX
def _disconnect_locked ( self ) : """Closes the current connection . Assume self . _ lock is held ."""
self . _connected = False self . _connect_cond . notify_all ( ) self . _telnet = None _LOGGER . warning ( "Disconnected" )
def gas_liquid_viscosity ( x , mul , mug , rhol = None , rhog = None , Method = None , AvailableMethods = False ) : r'''This function handles the calculation of two - phase liquid - gas viscosity . Six calculation methods are available ; three of them require only ` x ` , ` mul ` , and ` mug ` ; the other three require ` rhol ` and ` rhog ` as well . The ' McAdams ' method will be used if no method is specified . The full list of correlation can be obtained with the ` AvailableMethods ` flag . * * ALL OF THESE METHODS ARE ONLY SUGGESTED DEFINITIONS , POTENTIALLY USEFUL FOR EMPIRICAL WORK ONLY ! * * Parameters x : float Quality of fluid , [ - ] mul : float Viscosity of liquid , [ Pa * s ] mug : float Viscosity of gas , [ Pa * s ] rhol : float , optional Liquid density , [ kg / m ^ 3] rhog : float , optional Gas density , [ kg / m ^ 3] Returns mu _ lg : float Liquid - gas viscosity ( * * a suggested definition , potentially useful for empirical work only ! * * ) [ Pa * s ] methods : list , only returned if AvailableMethods = = True List of methods which can be used to calculate two - phase liquid - gas viscosity with the given inputs . Other Parameters Method : string , optional A string of the function name to use , as in the dictionary liquid _ gas _ viscosity _ correlations . AvailableMethods : bool , optional If True , function will consider which methods which can be used to calculate two - phase liquid - gas viscosity with the given inputs and return them as a list instead of performing a calculation . Notes All of these models converge to the liquid or gas viscosity as the quality approaches either limits . Other definitions have been proposed , such as using only liquid viscosity . These values cannot just be plugged into single phase correlations ! Examples > > > gas _ liquid _ viscosity ( x = 0.4 , mul = 1E - 3 , mug = 1E - 5 , rhol = 850 , rhog = 1.2 , Method = ' Duckler ' ) 1.2092040385066917e - 05 > > > gas _ liquid _ viscosity ( x = 0.4 , mul = 1E - 3 , mug = 1E - 5) 2.4630541871921184e - 05'''
def list_methods ( ) : methods = [ 'McAdams' , 'Cicchitti' , 'Lin Kwok' ] if rhol is not None and rhog is not None : methods = list ( liquid_gas_viscosity_correlations . keys ( ) ) return methods if AvailableMethods : return list_methods ( ) if not Method : Method = 'McAdams' if Method in liquid_gas_viscosity_correlations : f , i = liquid_gas_viscosity_correlations [ Method ] if i == 0 : return f ( x , mul , mug ) elif i == 1 : return f ( x , mul , mug , rhol = rhol , rhog = rhog ) else : raise Exception ( 'Method not recognized; available methods are %s' % list ( liquid_gas_viscosity_correlations . keys ( ) ) )
def set ( ctx , key , value ) : """Set configuration parameters"""
if key == "default_account" and value [ 0 ] == "@" : value = value [ 1 : ] ctx . blockchain . config [ key ] = value
def ctime ( self , fpath = None ) : """Returns : str : strftime - formatted ctime ( creation time ) of fpath"""
return dtformat ( datetime . datetime . utcfromtimestamp ( os . path . getctime ( fpath or self . fpath ) ) )
def get_dict_of_all_args ( self ) : """Generates a dictionary from a handler paths query string and returns it : returns : Dictionary of all key / values in arguments list : rtype : dict"""
dictionary = { } for arg in [ arg for arg in self . request . arguments if arg not in self . settings . get ( "reserved_query_string_params" , [ ] ) ] : val = self . get_argument ( arg , default = None ) if val : dictionary [ arg ] = val return dictionary
def clear_created_date ( self ) : """Removes the created date . raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` is ` ` true ` ` or ` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` ` * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for osid . assessment . AssessmentOfferedForm . clear _ start _ time _ template if ( self . get_created_date_metadata ( ) . is_read_only ( ) or self . get_created_date_metadata ( ) . is_required ( ) ) : raise errors . NoAccess ( ) self . _my_map [ 'createdDate' ] = self . _created_date_default
def set ( self , data , path = KISSmetrics . SET_PATH , resp = False ) : """Set a properties provided in ` data ` for identity . : param data : key - value pairs to associate with identity : type data : dict : param path : endpoint path ; defaults to ` ` KISSmetrics . SET _ PATH ` ` : param resp : indicate whether to return response : type resp : boolean : returns : an HTTP response for request if ` resp = True ` : rtype : ` urllib3 . response . HTTPResponse ` : raises : Exception if either ` identity ` or ` key ` not set"""
self . check_id_key ( ) timestamp = None response = self . client . set ( person = self . identity , properties = data , timestamp = timestamp , path = path ) if resp : return response
def tee ( iterable , n = 2 ) : """Return n independent iterators from a single iterable . Once tee ( ) has made a split , the original iterable should not be used anywhere else ; otherwise , the iterable could get advanced without the tee objects being informed . This itertool may require significant auxiliary storage ( depending on how much temporary data needs to be stored ) . In general , if one iterator uses most or all of the data before another iterator starts , it is faster to use list ( ) instead of tee ( ) ."""
tees = tuple ( AsyncTeeIterable ( iterable ) for _ in range ( n ) ) for tee in tees : tee . _siblings = tees return tees
def extract_keywords ( func ) : """Parses the keywords from the given function . : param func | < function >"""
if hasattr ( func , 'im_func' ) : func = func . im_func try : return func . func_code . co_varnames [ - len ( func . func_defaults ) : ] except ( TypeError , ValueError , IndexError ) : return tuple ( )
def sample_train_batch ( self ) : """Sample a training batch ( data and label ) ."""
batch = [ ] labels = [ ] num_groups = self . batch_size // self . batch_k # For CUB200 , we use the first 100 classes for training . sampled_classes = np . random . choice ( 100 , num_groups , replace = False ) for i in range ( num_groups ) : img_fnames = np . random . choice ( self . train_image_files [ sampled_classes [ i ] ] , self . batch_k , replace = False ) batch += [ self . get_image ( img_fname , is_train = True ) for img_fname in img_fnames ] labels += [ sampled_classes [ i ] for _ in range ( self . batch_k ) ] return nd . concatenate ( batch , axis = 0 ) , labels
def on_success ( self , retval , task_id , args , kwargs ) : """Store results in the backend even if we ' re always eager . This ensures the ` delay _ or _ run ` calls always at least have results ."""
if self . request . is_eager : # Store the result because celery wouldn ' t otherwise self . update_state ( task_id , SUCCESS , retval )
def present ( name , user , enc = 'ssh-rsa' , comment = '' , source = '' , options = None , config = '.ssh/authorized_keys' , fingerprint_hash_type = None , ** kwargs ) : '''Verifies that the specified SSH key is present for the specified user name The SSH key to manage user The user who owns the SSH authorized keys file to modify enc Defines what type of key is being used ; can be ed25519 , ecdsa , ssh - rsa or ssh - dss comment The comment to be placed with the SSH public key source The source file for the key ( s ) . Can contain any number of public keys , in standard " authorized _ keys " format . If this is set , comment and enc will be ignored . . . note : : The source file must contain keys in the format ` ` < enc > < key > < comment > ` ` . If you have generated a keypair using PuTTYgen , then you will need to do the following to retrieve an OpenSSH - compatible public key . 1 . In PuTTYgen , click ` ` Load ` ` , and select the * private * key file ( not the public key ) , and click ` ` Open ` ` . 2 . Copy the public key from the box labeled ` ` Public key for pasting into OpenSSH authorized _ keys file ` ` . 3 . Paste it into a new file . options The options passed to the key , pass a list object config The location of the authorized keys file relative to the user ' s home directory , defaults to " . ssh / authorized _ keys " . Token expansion % u and % h for username and home path supported . fingerprint _ hash _ type The public key fingerprint hash type that the public key fingerprint was originally hashed with . This defaults to ` ` sha256 ` ` if not specified .'''
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' } if source == '' : # check if this is of form { options } { enc } { key } { comment } sshre = re . compile ( r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$' ) fullkey = sshre . search ( name ) # if it is { key } [ comment ] if not fullkey : key_and_comment = name . split ( None , 1 ) name = key_and_comment [ 0 ] if len ( key_and_comment ) == 2 : comment = key_and_comment [ 1 ] else : # if there are options , set them if fullkey . group ( 1 ) : options = fullkey . group ( 1 ) . split ( ',' ) # key is of format : { enc } { key } [ comment ] comps = fullkey . group ( 2 ) . split ( None , 2 ) enc = comps [ 0 ] name = comps [ 1 ] if len ( comps ) == 3 : comment = comps [ 2 ] if __opts__ [ 'test' ] : ret [ 'result' ] , ret [ 'comment' ] = _present_test ( user , name , enc , comment , options or [ ] , source , config , fingerprint_hash_type ) return ret # Get only the path to the file without env referrences to check if exists if source != '' : source_path = __salt__ [ 'cp.get_url' ] ( source , None , saltenv = __env__ ) if source != '' and not source_path : data = 'no key' elif source != '' and source_path : key = __salt__ [ 'cp.get_file_str' ] ( source , saltenv = __env__ ) filehasoptions = False # check if this is of form { options } { enc } { key } { comment } sshre = re . compile ( r'^(ssh\-|ecds).*' ) key = key . rstrip ( ) . split ( '\n' ) for keyline in key : filehasoptions = sshre . match ( keyline ) if not filehasoptions : data = __salt__ [ 'ssh.set_auth_key_from_file' ] ( user , source , config = config , saltenv = __env__ , fingerprint_hash_type = fingerprint_hash_type ) else : # Split keyline to get key and comment keyline = keyline . split ( ' ' ) key_type = keyline [ 0 ] key_value = keyline [ 1 ] key_comment = keyline [ 2 ] if len ( keyline ) > 2 else '' data = __salt__ [ 'ssh.set_auth_key' ] ( user , key_value , enc = key_type , comment = key_comment , options = options or [ ] , config = config , fingerprint_hash_type = fingerprint_hash_type ) else : data = __salt__ [ 'ssh.set_auth_key' ] ( user , name , enc = enc , comment = comment , options = options or [ ] , config = config , fingerprint_hash_type = fingerprint_hash_type ) if data == 'replace' : ret [ 'changes' ] [ name ] = 'Updated' ret [ 'comment' ] = ( 'The authorized host key {0} for user {1} was ' 'updated' . format ( name , user ) ) return ret elif data == 'no change' : ret [ 'comment' ] = ( 'The authorized host key {0} is already present ' 'for user {1}' . format ( name , user ) ) elif data == 'new' : ret [ 'changes' ] [ name ] = 'New' ret [ 'comment' ] = ( 'The authorized host key {0} for user {1} was added' . format ( name , user ) ) elif data == 'no key' : ret [ 'result' ] = False ret [ 'comment' ] = ( 'Failed to add the ssh key. Source file {0} is ' 'missing' . format ( source ) ) elif data == 'fail' : ret [ 'result' ] = False err = sys . modules [ __salt__ [ 'test.ping' ] . __module__ ] . __context__ . pop ( 'ssh_auth.error' , None ) if err : ret [ 'comment' ] = err else : ret [ 'comment' ] = ( 'Failed to add the ssh key. Is the home ' 'directory available, and/or does the key file ' 'exist?' ) elif data == 'invalid' or data == 'Invalid public key' : ret [ 'result' ] = False ret [ 'comment' ] = 'Invalid public ssh key, most likely has spaces or invalid syntax' return ret
def from_name ( cls , name , all_fallback = True ) : """Gets a vocation filter from a vocation ' s name . Parameters name : : class : ` str ` The name of the vocation . all _ fallback : : class : ` bool ` Whether to return : py : attr : ` ALL ` if no match is found . Otherwise , ` ` None ` ` will be returned . Returns VocationFilter , optional : The matching vocation filter ."""
name = name . upper ( ) for vocation in cls : # type : VocationFilter if vocation . name in name or vocation . name [ : - 1 ] in name and vocation != cls . ALL : return vocation if all_fallback or name . upper ( ) == "ALL" : return cls . ALL return None
def do_set ( self , line ) : """set [ parameter [ value ] ] set ( without parameters ) : Display the value of all session variables . set < session variable > : Display the value of a single session variable . set < session variable > < value > : Set the value of a session variable ."""
session_parameter , value = self . _split_args ( line , 0 , 2 ) if value is None : self . _command_processor . get_session ( ) . print_variable ( session_parameter ) else : self . _command_processor . get_session ( ) . set_with_conversion ( session_parameter , value ) self . _print_info_if_verbose ( 'Set session variable {} to "{}"' . format ( session_parameter , value ) )
def random_like ( ary = None , shape = None , dtype = None ) : """Returns a random array of the same shape and type as the supplied array argument , or the supplied shape and dtype"""
if ary is not None : shape , dtype = ary . shape , ary . dtype elif shape is None or dtype is None : raise ValueError ( ( 'random_like(ary, shape, dtype) must be supplied ' 'with either an array argument, or the shape and dtype ' 'of the desired random array.' ) ) if np . issubdtype ( dtype , np . complexfloating ) : return ( np . random . random ( size = shape ) + np . random . random ( size = shape ) * 1j ) . astype ( dtype ) else : return np . random . random ( size = shape ) . astype ( dtype )
def fail_to ( future ) : """A decorator for function callbacks to catch uncaught non - async exceptions and forward them to the given future . The primary use for this is to catch exceptions in async callbacks and propagate them to futures . For example , consider , . . code - block : : python answer = Future ( ) def on _ done ( future ) : foo = bar ( ) answer . set _ result ( foo ) some _ async _ operation ( ) . add _ done _ callback ( on _ done ) If ` ` bar ( ) ` ` fails , ` ` answer ` ` will never get filled with an exception or a result . Now if we change ` ` on _ done ` ` to , . . code - block : : python @ fail _ to ( answer ) def on _ done ( future ) : foo = bar ( ) answer . set _ result ( foo ) Uncaught exceptions in ` ` on _ done ` ` will be caught and propagated to ` ` answer ` ` . Note that ` ` on _ done ` ` will return None if an exception was caught . : param answer : Future to which the result will be written ."""
assert is_future ( future ) , 'you forgot to pass a future' def decorator ( f ) : @ wraps ( f ) def new_f ( * args , ** kwargs ) : try : return f ( * args , ** kwargs ) except Exception : future . set_exc_info ( sys . exc_info ( ) ) return new_f return decorator
def nearby_faces ( mesh , points ) : """For each point find nearby faces relatively quickly . The closest point on the mesh to the queried point is guaranteed to be on one of the faces listed . Does this by finding the nearest vertex on the mesh to each point , and then returns all the faces that intersect the axis aligned bounding box centered at the queried point and extending to the nearest vertex . Parameters mesh : Trimesh object points : ( n , 3 ) float , points in space Returns candidates : ( points , ) int , sequence of indexes for mesh . faces"""
points = np . asanyarray ( points , dtype = np . float64 ) if not util . is_shape ( points , ( - 1 , 3 ) ) : raise ValueError ( 'points must be (n,3)!' ) # an r - tree containing the axis aligned bounding box for every triangle rtree = mesh . triangles_tree # a kd - tree containing every vertex of the mesh kdtree = mesh . kdtree # query the distance to the nearest vertex to get AABB of a sphere distance_vertex = kdtree . query ( points ) [ 0 ] . reshape ( ( - 1 , 1 ) ) distance_vertex += tol . merge # axis aligned bounds bounds = np . column_stack ( ( points - distance_vertex , points + distance_vertex ) ) # faces that intersect axis aligned bounding box candidates = [ list ( rtree . intersection ( b ) ) for b in bounds ] return candidates
def set_problem_feedback ( feedback , problem_id , append = False ) : """Set problem specific feedback"""
rdict = load_feedback ( ) if not 'problems' in rdict : rdict [ 'problems' ] = { } cur_val = rdict [ 'problems' ] . get ( problem_id , '' ) rdict [ 'problems' ] [ problem_id ] = ( cur_val + feedback if append else feedback ) if type ( cur_val ) == str else [ cur_val [ 0 ] , ( cur_val [ 1 ] + feedback if append else feedback ) ] save_feedback ( rdict )
def adjustPitchmarkers ( self ) : '''Adjusts the location and orientation of pitch markers .'''
pitchdiff = self . dist10deg * ( self . pitch / 10.0 ) rollRotate = mpl . transforms . Affine2D ( ) . rotate_deg_around ( 0.0 , - pitchdiff , self . roll ) + self . axes . transData j = 0 for i in [ - 9 , - 8 , - 7 , - 6 , - 5 , - 4 , - 3 , - 2 , - 1 , 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] : width = self . calcPitchMarkerWidth ( i ) self . pitchPatches [ j ] . set_xy ( ( - width / 2.0 , self . dist10deg * i - ( self . thick / 2.0 ) - pitchdiff ) ) self . pitchPatches [ j ] . set_transform ( rollRotate ) j += 1 # Adjust Text Size and rotation i = 0 for j in [ - 9 , - 6 , - 3 , 3 , 6 , 9 ] : self . pitchLabelsLeft [ i ] . set_y ( j * self . dist10deg - pitchdiff ) self . pitchLabelsRight [ i ] . set_y ( j * self . dist10deg - pitchdiff ) self . pitchLabelsLeft [ i ] . set_size ( self . fontSize ) self . pitchLabelsRight [ i ] . set_size ( self . fontSize ) self . pitchLabelsLeft [ i ] . set_rotation ( self . roll ) self . pitchLabelsRight [ i ] . set_rotation ( self . roll ) self . pitchLabelsLeft [ i ] . set_transform ( rollRotate ) self . pitchLabelsRight [ i ] . set_transform ( rollRotate ) i += 1
def _get_entity_prop ( self , entity , prop ) : """returns Wikidata entity property value"""
variant = self . params . get ( 'variant' ) lang = self . params . get ( 'lang' ) if entity . get ( prop ) : ent = entity [ prop ] try : return ent [ variant or lang ] . get ( 'value' ) except AttributeError : return ent . get ( 'value' )
def isdir ( self , path ) : """Return ` True ` if directory at ` path ` exist , False otherwise ."""
try : self . remote_context . check_output ( [ "test" , "-d" , path ] ) except subprocess . CalledProcessError as e : if e . returncode == 1 : return False else : raise return True
def get_default_config ( self ) : """Returns the default collector settings"""
config = super ( EntropyStatCollector , self ) . get_default_config ( ) config . update ( { 'path' : 'entropy' } ) return config
def compare ( dicts ) : """Compare by iteration"""
common_members = { } common_keys = reduce ( lambda x , y : x & y , map ( dict . keys , dicts ) ) for k in common_keys : common_members [ k ] = list ( reduce ( lambda x , y : x & y , [ set ( d [ k ] ) for d in dicts ] ) ) return common_members
def antonym ( phrase , format = "json" ) : """queries the bighugelabs API for the antonym . The results include - " syn " ( synonym ) - " ant " ( antonym ) - " rel " ( related terms ) - " sim " ( similar terms ) - " usr " ( user suggestions ) But currently parsing only the antonym as I have already done - synonym ( using glosbe API ) : param phrase : word for which antonym is to be found : param format : response structure type . Defaults to : " json " : returns : returns a json object : raises KeyError : returns False when no antonyms are found"""
base_url = Vocabulary . __get_api_link ( "bighugelabs" ) url = base_url . format ( word = phrase ) json_obj = Vocabulary . __return_json ( url ) if not json_obj : return False result = [ ] visited = { } idx = 0 for key in json_obj . keys ( ) : antonyms = json_obj [ key ] . get ( 'ant' , False ) if not antonyms : continue for antonym in antonyms : if visited . get ( antonym , False ) : continue result . append ( { 'seq' : idx , 'text' : antonym } ) idx += 1 visited [ antonym ] = True if not result : return False return Response ( ) . respond ( result , format )
def scheduled ( self ) -> ScheduledTxsAggregate : """Scheduled Transactions"""
if not self . __scheduled_tx_aggregate : self . __scheduled_tx_aggregate = ScheduledTxsAggregate ( self . book ) return self . __scheduled_tx_aggregate
def _run_internal ( self , context , pipeline_key , root_pipeline_key , caller_output ) : """Used by the Pipeline evaluator to execute this Pipeline ."""
self . _set_values_internal ( context , pipeline_key , root_pipeline_key , caller_output , _PipelineRecord . RUN ) logging . debug ( 'Running %s(*%s, **%s)#%s' , self . _class_path , _short_repr ( self . args ) , _short_repr ( self . kwargs ) , self . _pipeline_key . name ( ) ) return self . run ( * self . args , ** self . kwargs )
def add_warning ( self , s , tag = None ) : """Add a warning string ."""
item = ( tag , s ) if item not in self . warnings and tag not in self . aggregate . config [ "ignorewarnings" ] : self . warnings . append ( item )
def rna_transcript_expression_dict_from_args ( args ) : """Returns a dictionary mapping Ensembl transcript IDs to FPKM expression values or None if neither Cufflinks tracking file nor StringTie GTF file were specified ."""
if args . rna_transcript_fpkm_tracking_file : return load_cufflinks_fpkm_dict ( args . rna_transcript_fpkm_tracking_file ) elif args . rna_transcript_fpkm_gtf_file : return load_transcript_fpkm_dict_from_gtf ( args . rna_transcript_fpkm_gtf_file ) else : return None
def _hide_loading_page ( self ) : """Hide animation shown while the kernel is loading ."""
self . infowidget . hide ( ) self . shellwidget . show ( ) self . info_page = self . blank_page self . set_info_page ( ) self . shellwidget . sig_prompt_ready . disconnect ( self . _hide_loading_page )
def get_since_until ( time_range : Optional [ str ] = None , since : Optional [ str ] = None , until : Optional [ str ] = None , time_shift : Optional [ str ] = None , relative_end : Optional [ str ] = None ) -> Tuple [ datetime , datetime ] : """Return ` since ` and ` until ` date time tuple from string representations of time _ range , since , until and time _ shift . This functiom supports both reading the keys separately ( from ` since ` and ` until ` ) , as well as the new ` time _ range ` key . Valid formats are : - ISO 8601 - X days / years / hours / day / year / weeks - X days / years / hours / day / year / weeks ago - X days / years / hours / day / year / weeks from now - freeform Additionally , for ` time _ range ` ( these specify both ` since ` and ` until ` ) : - Last day - Last week - Last month - Last quarter - Last year - No filter - Last X seconds / minutes / hours / days / weeks / months / years - Next X seconds / minutes / hours / days / weeks / months / years"""
separator = ' : ' relative_end = parse_human_datetime ( relative_end if relative_end else 'today' ) common_time_frames = { 'Last day' : ( relative_end - relativedelta ( days = 1 ) , relative_end ) , # noqa : T400 'Last week' : ( relative_end - relativedelta ( weeks = 1 ) , relative_end ) , # noqa : T400 'Last month' : ( relative_end - relativedelta ( months = 1 ) , relative_end ) , # noqa : E501 , T400 'Last quarter' : ( relative_end - relativedelta ( months = 3 ) , relative_end ) , # noqa : E501 , T400 'Last year' : ( relative_end - relativedelta ( years = 1 ) , relative_end ) , # noqa : T400 } if time_range : if separator in time_range : since , until = time_range . split ( separator , 1 ) if since and since not in common_time_frames : since = add_ago_to_since ( since ) since = parse_human_datetime ( since ) until = parse_human_datetime ( until ) elif time_range in common_time_frames : since , until = common_time_frames [ time_range ] elif time_range == 'No filter' : since = until = None else : rel , num , grain = time_range . split ( ) if rel == 'Last' : since = relative_end - relativedelta ( ** { grain : int ( num ) } ) # noqa : T400 until = relative_end else : # rel = = ' Next ' since = relative_end until = relative_end + relativedelta ( ** { grain : int ( num ) } ) # noqa : T400 else : since = since or '' if since : since = add_ago_to_since ( since ) since = parse_human_datetime ( since ) until = parse_human_datetime ( until ) if until else relative_end if time_shift : time_shift = parse_human_timedelta ( time_shift ) since = since if since is None else ( since - time_shift ) # noqa : T400 until = until if until is None else ( until - time_shift ) # noqa : T400 if since and until and since > until : raise ValueError ( _ ( 'From date cannot be larger than to date' ) ) return since , until
def main ( ) : """This demo shows the main features of the pythondialog Dialog class ."""
try : demo ( ) except dialog . error , exc_instance : sys . stderr . write ( "Error:\n\n%s\n" % exc_instance . complete_message ( ) ) sys . exit ( 1 ) sys . exit ( 0 )
def count ( args ) : """count occurences in a list of lists > > > count ( [ [ ' a ' , ' b ' ] , [ ' a ' ] ] ) defaultdict ( int , { ' a ' : 2 , ' b ' : 1 } )"""
counts = defaultdict ( int ) for arg in args : for item in arg : counts [ item ] = counts [ item ] + 1 return counts
def get_snapshot_brok ( self , snap_output , exit_status ) : """Create snapshot ( check _ result type ) brok : param snap _ output : value of output : type snap _ output : str : param exit _ status : status of exit : type exit _ status : integer : return : Brok object : rtype : alignak . Brok"""
data = { 'uuid' : self . uuid , 'snapshot_output' : snap_output , 'snapshot_time' : int ( time . time ( ) ) , 'snapshot_exit_status' : exit_status , } self . fill_data_brok_from ( data , 'check_result' ) return Brok ( { 'type' : self . my_type + '_snapshot' , 'data' : data } )
def needsEncoding ( self , s ) : """Get whether string I { s } contains special characters . @ param s : A string to check . @ type s : str @ return : True if needs encoding . @ rtype : boolean"""
if isinstance ( s , str ) : for c in self . special : if c in s : return True return False
def Deserialize ( self , reader ) : """Deserialize full object . Args : reader ( neo . IO . BinaryReader ) :"""
self . HashStart = reader . ReadSerializableArray ( 'neocore.UInt256.UInt256' ) self . HashStop = reader . ReadUInt256 ( )
def remove ( self , path ) : """Remove the file at the given path . This only works on files ; for removing folders ( directories ) , use L { rmdir } . @ param path : path ( absolute or relative ) of the file to remove @ type path : str @ raise IOError : if the path refers to a folder ( directory )"""
path = self . _adjust_cwd ( path ) self . _log ( DEBUG , 'remove(%r)' % path ) self . _request ( CMD_REMOVE , path )
def _value_ref ( self , column , value , * , dumped = False , inner = False ) : """inner = True uses column . typedef . inner _ type instead of column . typedef"""
ref = ":v{}" . format ( self . next_index ) # Need to dump this value if not dumped : typedef = column . typedef for segment in path_of ( column ) : typedef = typedef [ segment ] if inner : typedef = typedef . inner_typedef value = self . engine . _dump ( typedef , value ) self . attr_values [ ref ] = value self . counts [ ref ] += 1 return ref , value
def expand_dict ( flat_dict , sep = '_' ) : """Expand a flattened dictionary . : param dict flat _ dict : a nested dictionary that has been flattened so the keys are composite : param str sep : the separator between concatenated keys : rtype : dict"""
res = { } rdict = defaultdict ( list ) for flat_key , value in flat_dict . items ( ) : key = flat_key . split ( sep , 1 ) if 1 == len ( key ) : res [ key [ 0 ] ] = value else : rdict [ key [ 0 ] ] . append ( ( key [ 1 : ] , value ) ) for k , v in rdict . items ( ) : res [ k ] = expand_dict ( { ik : iv for ( ik , ) , iv in v } ) return res
def mean ( data , units = False , time = False ) : """Function to compute mean of data Parameters data : numpy . ndarray 1st axis unit , 2nd axis time units : bool Average over units time : bool Average over time Returns if units = False and time = False : error if units = True : 1 dim numpy . ndarray ; time series if time = True : 1 dim numpy . ndarray ; series of unit means across time if units = True and time = True : float ; unit and time mean Examples > > > mean ( np . array ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] ) , units = True ) array ( [ 2.5 , 3.5 , 4.5 ] ) > > > mean ( np . array ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] ) , time = True ) array ( [ 2 . , 5 . ] ) > > > mean ( np . array ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] ) , units = True , time = True ) 3.5"""
assert ( units is not False or time is not False ) if units is True and time is False : return np . mean ( data , axis = 0 ) elif units is False and time is True : return np . mean ( data , axis = 1 ) elif units is True and time is True : return np . mean ( data )
def extract_pdf ( file_name ) : """Extract text from a pdf file : param file _ name path to pdf to read : return text from pdf"""
rsrcmgr = pdfminer . pdfinterp . PDFResourceManager ( ) sio = StringIO ( ) laparams = LAParams ( ) device = TextConverter ( rsrcmgr , sio , codec = 'utf-8' , laparams = laparams ) interpreter = pdfminer . pdfinterp . PDFPageInterpreter ( rsrcmgr , device ) # Extract text from pdf file with open ( file_name , 'rb' ) as fp : for page in PDFPage . get_pages ( fp , maxpages = 20 ) : interpreter . process_page ( page ) text = sio . getvalue ( ) # Cleanup device . close ( ) sio . close ( ) return text
def _get_kernel_from_bayesian_model ( self , model ) : """Computes the Gibbs transition models from a Bayesian Network . ' Probabilistic Graphical Model Principles and Techniques ' , Koller and Friedman , Section 12.3.3 pp 512-513. Parameters : model : BayesianModel The model from which probabilities will be computed ."""
self . variables = np . array ( model . nodes ( ) ) self . cardinalities = { var : model . get_cpds ( var ) . variable_card for var in self . variables } for var in self . variables : other_vars = [ v for v in self . variables if var != v ] other_cards = [ self . cardinalities [ v ] for v in other_vars ] cpds = [ cpd for cpd in model . cpds if var in cpd . scope ( ) ] prod_cpd = factor_product ( * cpds ) kernel = { } scope = set ( prod_cpd . scope ( ) ) for tup in itertools . product ( * [ range ( card ) for card in other_cards ] ) : states = [ State ( v , s ) for v , s in zip ( other_vars , tup ) if v in scope ] prod_cpd_reduced = prod_cpd . reduce ( states , inplace = False ) kernel [ tup ] = prod_cpd_reduced . values / sum ( prod_cpd_reduced . values ) self . transition_models [ var ] = kernel
def dump ( self , dump_file_name = None ) : # pragma : no cover , never called # pylint : disable = unused - argument """Dump Item object properties : return : dictionary with properties : rtype : dict"""
dump = { } for prop in self . properties : if not hasattr ( self , prop ) : continue attr = getattr ( self , prop ) if isinstance ( attr , list ) and attr and isinstance ( attr [ 0 ] , Item ) : dump [ prop ] = [ i . dump ( ) for i in attr ] elif isinstance ( attr , Item ) : dump [ prop ] = attr . dump ( ) elif attr : dump [ prop ] = getattr ( self , prop ) return dump
def _is_contiguous ( positions ) : """Given a non - empty list , does it consist of contiguous integers ?"""
previous = positions [ 0 ] for current in positions [ 1 : ] : if current != previous + 1 : return False previous = current return True
def trim_decimals ( s , precision = - 3 ) : """Convert from scientific notation using precision"""
encoded = s . encode ( 'ascii' , 'ignore' ) str_val = "" if six . PY3 : str_val = str ( encoded , encoding = 'ascii' , errors = 'ignore' ) [ : precision ] else : # If precision is 0 , this must be handled seperately if precision == 0 : str_val = str ( encoded ) else : str_val = str ( encoded ) [ : precision ] if len ( str_val ) > 0 : return float ( str_val ) else : return 0
def init ( * args , ** kwargs ) : """Initializes the SDK and optionally integrations . This takes the same arguments as the client constructor ."""
global _initial_client client = Client ( * args , ** kwargs ) Hub . current . bind_client ( client ) rv = _InitGuard ( client ) if client is not None : _initial_client = weakref . ref ( client ) return rv
def get_all_last_24h_kline ( self , _async = False ) : """获取所有24小时的概况 : param _ async : : return :"""
params = { } url = u . MARKET_URL + '/market/tickers' return http_get_request ( url , params , _async = _async )
def sliding ( self , size , step = 1 ) : """Groups elements in fixed size blocks by passing a sliding window over them . The last window has at least one element but may have less than size elements : param size : size of sliding window : param step : step size between windows : return : sequence of sliding windows"""
return self . _transform ( transformations . sliding_t ( _wrap , size , step ) )
def _ConvertCollectionsCounterToDict ( cls , collections_counter ) : """Converts a collections . Counter object into a JSON dictionary . The resulting dictionary of the JSON serialized objects consists of : ' _ _ type _ _ ' : ' collections . Counter ' Here ' _ _ type _ _ ' indicates the object base type . In this case ' collections . Counter ' . The rest of the elements of the dictionary make up the collections . Counter object attributes . Args : collections _ counter ( collections . Counter ) : counter . Returns : dict [ str , object ] : JSON serialized objects . Raises : TypeError : if not an instance of collections . Counter ."""
if not isinstance ( collections_counter , collections . Counter ) : raise TypeError json_dict = { '__type__' : 'collections.Counter' } for attribute_name , attribute_value in iter ( collections_counter . items ( ) ) : if attribute_value is None : continue if isinstance ( attribute_value , py2to3 . BYTES_TYPE ) : attribute_value = { '__type__' : 'bytes' , 'stream' : '{0:s}' . format ( binascii . b2a_qp ( attribute_value ) ) } json_dict [ attribute_name ] = attribute_value return json_dict
def _validate_method_decoration ( meta , class_ ) : """Validate the usage of ` ` @ override ` ` and ` ` @ final ` ` modifiers on methods of the given ` ` class _ ` ` ."""
# TODO ( xion ) : employ some code inspection tricks to serve ClassErrors # as if they were thrown at the offending class ' s / method ' s definition super_mro = class_ . __mro__ [ 1 : ] own_methods = ( ( name , member ) for name , member in class_ . __dict__ . items ( ) if is_method ( member ) ) # check that ` ` @ override ` ` modifier is present where it should be # and absent where it shouldn ' t ( e . g . ` ` @ final ` ` methods ) for name , method in own_methods : shadowed_method , base_class = next ( ( ( getattr ( base , name ) , base ) for base in super_mro if hasattr ( base , name ) ) , ( None , None ) ) if meta . _is_override ( method ) : # ` ` @ override ` ` is legal only when the method actually shadows # a method from a superclass , and that metod is not ` ` @ final ` ` if not shadowed_method : raise ClassError ( "unnecessary @override on %s.%s" % ( class_ . __name__ , name ) , class_ = class_ ) if meta . _is_final ( shadowed_method ) : raise ClassError ( "illegal @override on a @final method %s.%s" % ( base_class . __name__ , name ) , class_ = class_ ) # if @ override had parameter supplied , verify if it was # the same class as the base of shadowed method override_base = meta . _get_override_base ( method ) if override_base and base_class is not override_base : if is_class ( override_base ) : raise ClassError ( "incorrect override base: expected %s, got %s" % ( base_class . __name__ , override_base . __name__ ) ) else : raise ClassError ( "invalid override base specified: %s" % ( override_base , ) ) setattr ( class_ , name , method . method ) else : if shadowed_method and name not in meta . OVERRIDE_EXEMPTIONS : if meta . _is_final ( shadowed_method ) : msg = "%s.%s is hiding a @final method %s.%s" % ( class_ . __name__ , name , base_class . __name__ , name ) else : msg = ( "overridden method %s.%s " "must be marked with @override" % ( class_ . __name__ , name ) ) raise ClassError ( msg , class_ = class_ )
def config ( p_path = None , p_overrides = None ) : """Retrieve the config instance . If a path is given , the instance is overwritten by the one that supplies an additional filename ( for testability ) . Moreover , no other configuration files will be read when a path is given . Overrides will discard a setting in any configuration file and use the passed value instead . Structure : ( section , option ) = > value The previous configuration instance will be discarded ."""
if not config . instance or p_path is not None or p_overrides is not None : try : config . instance = _Config ( p_path , p_overrides ) except configparser . ParsingError as perr : raise ConfigError ( str ( perr ) ) from perr return config . instance
def log_to_logger ( fn ) : """Wrap a Bottle request so that a log line is emitted after it ' s handled ."""
@ wraps ( fn ) def _log_to_logger ( * args , ** kwargs ) : actual_response = fn ( * args , ** kwargs ) # modify this to log exactly what you need : logger . info ( '%s %s %s %s' % ( bottle . request . remote_addr , bottle . request . method , bottle . request . url , bottle . response . status ) ) return actual_response return _log_to_logger
def lipd_read ( path ) : """Loads a LiPD file from local path . Unzip , read , and process data Steps : create tmp , unzip lipd , read files into memory , manipulate data , move to original dir , delete tmp . : param str path : Source path : return none :"""
_j = { } dir_original = os . getcwd ( ) # Import metadata into object try : print ( "reading: {}" . format ( print_filename ( path ) ) ) # bigger than 2mb file ? This could take a while if os . stat ( path ) . st_size > 1000000 : _size = os . stat ( path ) . st_size print ( "{} :That's a big file! This may take a while to load..." . format ( "{} MB" . format ( round ( _size / 1000000 , 2 ) ) ) ) dir_tmp = create_tmp_dir ( ) unzipper ( path , dir_tmp ) os . chdir ( dir_tmp ) _dir_data = find_files ( ) os . chdir ( _dir_data ) _j = read_jsonld ( ) _j = rm_empty_fields ( _j ) _j = check_dsn ( path , _j ) _j = update_lipd_version ( _j ) _j = idx_num_to_name ( _j ) _j = rm_empty_doi ( _j ) _j = rm_empty_fields ( _j ) _j = put_tsids ( _j ) _csvs = read_csvs ( ) _j = merge_csv_metadata ( _j , _csvs ) # Why ? Because we need to align the csv filenames with the table filenames . We don ' t need the csv output here . _j , _csv = get_csv_from_metadata ( _j [ "dataSetName" ] , _j ) os . chdir ( dir_original ) shutil . rmtree ( dir_tmp ) except FileNotFoundError : print ( "Error: lipd_read: LiPD file not found. Please make sure the filename includes the .lpd extension" ) except Exception as e : logger_lipd . error ( "lipd_read: {}" . format ( e ) ) print ( "Error: lipd_read: unable to read LiPD: {}" . format ( e ) ) os . chdir ( dir_original ) logger_lipd . info ( "lipd_read: record loaded: {}" . format ( path ) ) return _j
def validate_items ( self ) : """Validates the items in the backing array , including performing type validation . Sets the _ typed property and clears the dirty flag as a side effect Returns : The typed array"""
logger . debug ( fmt ( "Validating {}" , self ) ) from python_jsonschema_objects import classbuilder if self . __itemtype__ is None : return type_checks = self . __itemtype__ if not isinstance ( type_checks , ( tuple , list ) ) : # we were given items = { ' type ' : ' blah ' } ; thus ensure the type for all data . type_checks = [ type_checks ] * len ( self . data ) elif len ( type_checks ) > len ( self . data ) : raise ValidationError ( "{1} does not have sufficient elements to validate against {0}" . format ( self . __itemtype__ , self . data ) ) typed_elems = [ ] for elem , typ in zip ( self . data , type_checks ) : if isinstance ( typ , dict ) : for param , paramval in six . iteritems ( typ ) : validator = registry ( param ) if validator is not None : validator ( paramval , elem , typ ) typed_elems . append ( elem ) elif util . safe_issubclass ( typ , classbuilder . LiteralValue ) : val = typ ( elem ) val . validate ( ) typed_elems . append ( val ) elif util . safe_issubclass ( typ , classbuilder . ProtocolBase ) : if not isinstance ( elem , typ ) : try : if isinstance ( elem , ( six . string_types , six . integer_types , float ) ) : val = typ ( elem ) else : val = typ ( ** util . coerce_for_expansion ( elem ) ) except TypeError as e : raise ValidationError ( "'{0}' is not a valid value for '{1}': {2}" . format ( elem , typ , e ) ) else : val = elem val . validate ( ) typed_elems . append ( val ) elif util . safe_issubclass ( typ , ArrayWrapper ) : val = typ ( elem ) val . validate ( ) typed_elems . append ( val ) elif isinstance ( typ , ( classbuilder . TypeProxy , classbuilder . TypeRef ) ) : try : if isinstance ( elem , ( six . string_types , six . integer_types , float ) ) : val = typ ( elem ) else : val = typ ( ** util . coerce_for_expansion ( elem ) ) except TypeError as e : raise ValidationError ( "'{0}' is not a valid value for '{1}': {2}" . format ( elem , typ , e ) ) else : val . validate ( ) typed_elems . append ( val ) self . _dirty = False self . _typed = typed_elems return typed_elems
def register_column ( self , column , expr , deltas = None , checkpoints = None , odo_kwargs = None ) : """Explicitly map a single bound column to a collection of blaze expressions . The expressions need to have ` ` timestamp ` ` and ` ` as _ of ` ` columns . Parameters column : BoundColumn The pipeline dataset to map to the given expressions . expr : Expr The baseline values . deltas : Expr , optional The deltas for the data . checkpoints : Expr , optional The forward fill checkpoints for the data . odo _ kwargs : dict , optional The keyword arguments to forward to the odo calls internally . See Also : func : ` zipline . pipeline . loaders . blaze . from _ blaze `"""
self . _table_expressions [ column ] = ExprData ( expr , deltas , checkpoints , odo_kwargs , )
def can_float ( item , max_weight ) : """A function that determines if an object ' item ' can float . The object ' item ' canfloat if the list is a palindrome and the total of its elements is smaller or equal to the ' max _ weight ' . Args : item ( List [ int ] ) : List of integer weights max _ weight ( int ) : Maximum possible weight for it to float Returns : bool : True if it can float , False otherwise . Examples : > > > can _ float ( [ 1 , 2 ] , 5) False # 1 + 2 is less than the max _ weight , but it ' s not a palindrome . > > > can _ float ( [ 3 , 2 , 3 ] , 1) False # The object weight is more than max _ weight although it ' s a palindrome . > > > can _ float ( [ 3 , 2 , 3 ] , 9) True # The object weight is less than max _ weight and it ' s a palindrome . > > > can _ float ( [ 3 ] , 5) True # The object weight is less than max _ weight and it ' s a palindrome ."""
if sum ( item ) > max_weight : return False return item == item [ : : - 1 ]
def get_share_info ( self , grantee_type = None , grantee_id = None , grantee_name = None , owner = None , owner_type = 'name' ) : """: returns : list of dict representing shares informations"""
params = { } if grantee_type : if 'grantee' not in params . keys ( ) : params [ 'grantee' ] = { } params [ 'grantee' ] . update ( { 'type' : grantee_type } ) if grantee_id : if 'grantee' not in params . keys ( ) : params [ 'grantee' ] = { } params [ 'grantee' ] . update ( { 'id' : grantee_id } ) if grantee_name : if 'grantee' not in params . keys ( ) : params [ 'grantee' ] = { } params [ 'grantee' ] . update ( { 'name' : grantee_name } ) if owner : params [ 'owner' ] = { 'by' : owner_type , '_content' : owner } try : resp = self . request ( 'GetShareInfo' , params ) # if user never logged in , no mailbox was created except ZimbraSoapServerError as e : if 'mailbox not found for account' in str ( e ) : return [ ] else : raise e if resp and isinstance ( resp [ 'share' ] , list ) : return resp [ 'share' ] elif resp and isinstance ( resp [ 'share' ] , dict ) : return [ resp [ 'share' ] ] else : return [ ]
def sendCommandAPDU ( self , command ) : """Send an APDU command to the connected smartcard . @ param command : list of APDU bytes , e . g . [ 0xA0 , 0xA4 , 0x00 , 0x00 , 0x02] @ return : a tuple ( response , sw1 , sw2 ) where response is the APDU response sw1 , sw2 are the two status words"""
response , sw1 , sw2 = self . cs . connection . transmit ( command ) if len ( response ) > 2 : response . append ( sw1 ) response . append ( sw2 ) return response , sw1 , sw2
def simulated_binary_crossover ( random , mom , dad , args ) : """Return the offspring of simulated binary crossover on the candidates . This function performs simulated binary crossover ( SBX ) , following the implementation in NSGA - II ` ( Deb et al . , ICANNGA 1999 ) < http : / / vision . ucsd . edu / ~ sagarwal / icannga . pdf > ` _ . . . Arguments : random - - the random number generator object mom - - the first parent candidate dad - - the second parent candidate args - - a dictionary of keyword arguments Optional keyword arguments in args : - * crossover _ rate * - - the rate at which crossover is performed ( default 1.0) - * sbx _ distribution _ index * - - the non - negative distribution index ( default 10) A small value of the ` sbx _ distribution _ index ` optional argument allows solutions far away from parents to be created as child solutions , while a large value restricts only near - parent solutions to be created as child solutions ."""
crossover_rate = args . setdefault ( 'crossover_rate' , 1.0 ) if random . random ( ) < crossover_rate : di = args . setdefault ( 'sbx_distribution_index' , 10 ) bounder = args [ '_ec' ] . bounder bro = copy . copy ( dad ) sis = copy . copy ( mom ) for i , ( m , d , lb , ub ) in enumerate ( zip ( mom , dad , bounder . lower_bound , bounder . upper_bound ) ) : try : if m > d : m , d = d , m beta = 1.0 + 2 * min ( m - lb , ub - d ) / float ( d - m ) alpha = 2.0 - 1.0 / beta ** ( di + 1.0 ) u = random . random ( ) if u <= ( 1.0 / alpha ) : beta_q = ( u * alpha ) ** ( 1.0 / float ( di + 1.0 ) ) else : beta_q = ( 1.0 / ( 2.0 - u * alpha ) ) ** ( 1.0 / float ( di + 1.0 ) ) bro_val = 0.5 * ( ( m + d ) - beta_q * ( d - m ) ) bro_val = max ( min ( bro_val , ub ) , lb ) sis_val = 0.5 * ( ( m + d ) + beta_q * ( d - m ) ) sis_val = max ( min ( sis_val , ub ) , lb ) if random . random ( ) > 0.5 : bro_val , sis_val = sis_val , bro_val bro [ i ] = bro_val sis [ i ] = sis_val except ZeroDivisionError : # The offspring already have legitimate values for every element , # so no need to take any special action here . pass return [ bro , sis ] else : return [ mom , dad ]
def _create_buffers ( self , view_size , buffer_size ) : """Create the buffers , taking in account pixel alpha or colorkey : param view _ size : pixel size of the view : param buffer _ size : pixel size of the buffer"""
requires_zoom_buffer = not view_size == buffer_size self . _zoom_buffer = None if self . _clear_color is None : if requires_zoom_buffer : self . _zoom_buffer = Surface ( view_size ) self . _buffer = Surface ( buffer_size ) elif self . _clear_color == self . _rgba_clear_color : if requires_zoom_buffer : self . _zoom_buffer = Surface ( view_size , flags = pygame . SRCALPHA ) self . _buffer = Surface ( buffer_size , flags = pygame . SRCALPHA ) self . data . convert_surfaces ( self . _buffer , True ) elif self . _clear_color is not self . _rgb_clear_color : if requires_zoom_buffer : self . _zoom_buffer = Surface ( view_size , flags = pygame . RLEACCEL ) self . _zoom_buffer . set_colorkey ( self . _clear_color ) self . _buffer = Surface ( buffer_size , flags = pygame . RLEACCEL ) self . _buffer . set_colorkey ( self . _clear_color ) self . _buffer . fill ( self . _clear_color )
def run ( self ) : '''Run the LaTeX compilation .'''
# store files self . old_dir = [ ] if self . opt . clean : self . old_dir = os . listdir ( '.' ) cite_counter , toc_file , gloss_files = self . _read_latex_files ( ) self . latex_run ( ) self . read_glossaries ( ) gloss_changed = self . makeindex_runs ( gloss_files ) if gloss_changed or self . _is_toc_changed ( toc_file ) : self . latex_run ( ) if self . _need_bib_run ( cite_counter ) : self . bibtex_run ( ) self . latex_run ( ) while ( self . latex_run_counter < MAX_RUNS ) : if not self . need_latex_rerun ( ) : break self . latex_run ( ) if self . opt . check_cite : cites = set ( ) with open ( '%s.aux' % self . project_name ) as fobj : aux_content = fobj . read ( ) for match in BIBCITE_PATTERN . finditer ( aux_content ) : name = match . groups ( ) [ 0 ] cites . add ( name ) with open ( '%s.bib' % self . bib_file ) as fobj : bib_content = fobj . read ( ) for match in BIBENTRY_PATTERN . finditer ( bib_content ) : name = match . groups ( ) [ 0 ] if name not in cites : self . log . info ( 'Bib entry not cited: "%s"' % name ) if self . opt . clean : ending = '.dvi' if self . opt . pdf : ending = '.pdf' for fname in os . listdir ( '.' ) : if not ( fname in self . old_dir or fname . endswith ( ending ) ) : try : os . remove ( fname ) except IOError : pass if self . opt . preview : self . open_preview ( )
def release ( self , device_info ) : """This function is called by the segmentation state machine when it has finished with the device information ."""
if _debug : DeviceInfoCache . _debug ( "release %r" , device_info ) # this information record might be used by more than one SSM if device_info . _ref_count == 0 : raise RuntimeError ( "reference count" ) # decrement the reference count device_info . _ref_count -= 1
def update_table ( self , tablename , throughput = None , global_indexes = None , index_updates = None ) : """Update the throughput of a table and / or global indexes Parameters tablename : str Name of the table to update throughput : : class : ` ~ dynamo3 . fields . Throughput ` , optional The new throughput of the table global _ indexes : dict , optional DEPRECATED . Use index _ updates now . Map of index name to : class : ` ~ dynamo3 . fields . Throughput ` index _ updates : list of : class : ` ~ dynamo3 . fields . IndexUpdate ` , optional List of IndexUpdates to perform"""
kwargs = { 'TableName' : tablename } all_attrs = set ( ) if throughput is not None : kwargs [ 'ProvisionedThroughput' ] = throughput . schema ( ) if index_updates is not None : updates = [ ] for update in index_updates : all_attrs . update ( update . get_attrs ( ) ) updates . append ( update . serialize ( ) ) kwargs [ 'GlobalSecondaryIndexUpdates' ] = updates elif global_indexes is not None : kwargs [ 'GlobalSecondaryIndexUpdates' ] = [ { 'Update' : { 'IndexName' : key , 'ProvisionedThroughput' : value . schema ( ) , } } for key , value in six . iteritems ( global_indexes ) ] if all_attrs : attr_definitions = [ attr . definition ( ) for attr in all_attrs ] kwargs [ 'AttributeDefinitions' ] = attr_definitions return self . call ( 'update_table' , ** kwargs )
def require_version ( namespace , version ) : """Set a version for the namespace to be loaded . This needs to be called before importing the namespace or any namespace that depends on it ."""
global _versions repo = GIRepository ( ) namespaces = repo . get_loaded_namespaces ( ) if namespace in namespaces : loaded_version = repo . get_version ( namespace ) if loaded_version != version : raise ValueError ( 'Namespace %s is already loaded with version %s' % ( namespace , loaded_version ) ) if namespace in _versions and _versions [ namespace ] != version : raise ValueError ( 'Namespace %s already requires version %s' % ( namespace , _versions [ namespace ] ) ) available_versions = repo . enumerate_versions ( namespace ) if not available_versions : raise ValueError ( 'Namespace %s not available' % namespace ) if version not in available_versions : raise ValueError ( 'Namespace %s not available for version %s' % ( namespace , version ) ) _versions [ namespace ] = version
async def on_raw_433 ( self , message ) : """Nickname in use ."""
if not self . registered : self . _registration_attempts += 1 # Attempt to set new nickname . if self . _attempt_nicknames : await self . set_nickname ( self . _attempt_nicknames . pop ( 0 ) ) else : await self . set_nickname ( self . _nicknames [ 0 ] + '_' * ( self . _registration_attempts - len ( self . _nicknames ) ) )
def make_data ( ) : """1 . . T : set of periods K : set of resources P : set of items f [ t , p ] : set - up costs g [ t , p ] : set - up times c [ t , p ] : variable costs d [ t , p ] : demand values h [ t , p ] : holding costs a [ t , k , p ] : amount of resource k for producing product p in period . t M [ t , k ] : resource upper bounds UB [ t , p ] : upper bound of production time of product p in period t phi [ ( i , j ) ] : units of i required to produce a unit of j ( j parent of i )"""
T = 5 K = [ 1 ] P = [ 1 , 2 , 3 , 4 , 5 ] _ , f , g , c , d , h , UB = multidict ( { ( 1 , 1 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 1 , 2 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 1 , 3 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 1 , 4 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 1 , 5 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 2 , 1 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 2 , 2 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 2 , 3 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 2 , 4 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 2 , 5 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 3 , 1 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 3 , 2 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 3 , 3 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 3 , 4 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 3 , 5 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 4 , 1 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 4 , 2 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 4 , 3 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 4 , 4 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 4 , 5 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 5 , 1 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 5 , 2 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 5 , 3 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 5 , 4 ) : [ 10 , 1 , 2 , 0 , 0.5 , 24 ] , ( 5 , 5 ) : [ 10 , 1 , 2 , 5 , 0.5 , 24 ] , } ) a = { ( 1 , 1 , 1 ) : 1 , ( 1 , 1 , 2 ) : 1 , ( 1 , 1 , 3 ) : 1 , ( 1 , 1 , 4 ) : 1 , ( 1 , 1 , 5 ) : 1 , ( 2 , 1 , 1 ) : 1 , ( 2 , 1 , 2 ) : 1 , ( 2 , 1 , 3 ) : 1 , ( 2 , 1 , 4 ) : 1 , ( 2 , 1 , 5 ) : 1 , ( 3 , 1 , 1 ) : 1 , ( 3 , 1 , 2 ) : 1 , ( 3 , 1 , 3 ) : 1 , ( 3 , 1 , 4 ) : 1 , ( 3 , 1 , 5 ) : 1 , ( 4 , 1 , 1 ) : 1 , ( 4 , 1 , 2 ) : 1 , ( 4 , 1 , 3 ) : 1 , ( 4 , 1 , 4 ) : 1 , ( 4 , 1 , 5 ) : 1 , ( 5 , 1 , 1 ) : 1 , ( 5 , 1 , 2 ) : 1 , ( 5 , 1 , 3 ) : 1 , ( 5 , 1 , 4 ) : 1 , ( 5 , 1 , 5 ) : 1 , } M = { ( 1 , 1 ) : 15 , ( 2 , 1 ) : 15 , ( 3 , 1 ) : 15 , ( 4 , 1 ) : 15 , ( 5 , 1 ) : 15 , } phi = { # phi [ ( i , j ) ] : units of i required to produce a unit of j ( j parent of i ) ( 1 , 3 ) : 2 , ( 2 , 3 ) : 3 , ( 2 , 4 ) : 3 / 2. , ( 3 , 5 ) : 1 / 2. , ( 4 , 5 ) : 3 } return T , K , P , f , g , c , d , h , a , M , UB , phi
def trim ( self ) : """Discard the ancestry of this state ."""
new_hist = self . copy ( { } ) new_hist . parent = None self . state . register_plugin ( 'history' , new_hist )
def retrieve_prop ( name ) : """retrieve a property handler"""
handler_get , handler_set = None , None if name in props_get : handler_get = props_get [ name ] if name in props_set : handler_set = props_set [ name ] return ( name , handler_get , handler_set )
def get_transitions_forward_steps ( self , indexes , forward_steps , discount_factor ) : """Get dictionary of transition data"""
return self . deque . get_transitions_forward_steps ( indexes , forward_steps , discount_factor )
def create ( self , title , description = None , document_ids = None ) : """Creates a new project . Returns its unique identifer in documentcloud Example usage : > > documentcloud . projects . create ( " The Ruben Salazar Files " )"""
params = { 'title' : title , } if description : params [ 'description' ] = description params = urllib . parse . urlencode ( params , doseq = True ) if document_ids : # These need to be specially formatted in the style documentcloud # expects arrays . The example they provide is : # ? document _ ids [ ] = 28 - boumediene & document _ ids [ ] = 207 - academy \ # & document _ ids [ ] = 30 - insider - trading params += "" . join ( [ '&document_ids[]=%s' % id for id in document_ids ] ) response = self . _make_request ( self . BASE_URI + "projects.json" , params . encode ( "utf-8" ) ) new_id = json . loads ( response . decode ( "utf-8" ) ) [ 'project' ] [ 'id' ] # If it doesn ' t exist , that suggests the project already exists if not new_id : raise DuplicateObjectError ( "The Project title you tried to create \ already exists" ) # Fetch the actual project object from the API and return that . return self . get ( new_id )
def split_sequence ( seq , n ) : """Generates tokens of length n from a sequence . The last token may be of smaller length ."""
tokens = [ ] while seq : tokens . append ( seq [ : n ] ) seq = seq [ n : ] return tokens
def get_build_report ( self , project , build_id , type = None ) : """GetBuildReport . [ Preview API ] Gets a build report . : param str project : Project ID or project name : param int build _ id : The ID of the build . : param str type : : rtype : : class : ` < BuildReportMetadata > < azure . devops . v5_0 . build . models . BuildReportMetadata > `"""
route_values = { } if project is not None : route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' ) if build_id is not None : route_values [ 'buildId' ] = self . _serialize . url ( 'build_id' , build_id , 'int' ) query_parameters = { } if type is not None : query_parameters [ 'type' ] = self . _serialize . query ( 'type' , type , 'str' ) response = self . _send ( http_method = 'GET' , location_id = '45bcaa88-67e1-4042-a035-56d3b4a7d44c' , version = '5.0-preview.2' , route_values = route_values , query_parameters = query_parameters ) return self . _deserialize ( 'BuildReportMetadata' , response )
def head ( self , n ) : """Return a DataAccessObject containing the first n rows : param n : number of rows : return : DataAccessObject"""
return DataAccessObject ( { k : self . dict [ k ] [ : n ] for k in self . dict } )
def get_n_config_to_keep ( self , n_suggestions , bracket_iteration ) : """Return the number of configs to keep and resume ."""
n_configs = n_suggestions * ( self . eta ** - bracket_iteration ) return int ( n_configs / self . eta )
def sepBy1 ( p , sep ) : '''` sepBy1 ( p , sep ) ` parses one or more occurrences of ` p ` , separated by ` sep ` . Returns a list of values returned by ` p ` .'''
return separated ( p , sep , 1 , maxt = float ( 'inf' ) , end = False )
def frombed ( args ) : """% prog frombed bedfile Generate AGP file based on bed file . The bed file must have at least 6 columns . With the 4 - th column indicating the new object ."""
p = OptionParser ( frombed . __doc__ ) p . add_option ( "--gapsize" , default = 100 , type = "int" , help = "Insert gaps of size [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args gapsize = opts . gapsize agpfile = bedfile . replace ( ".bed" , ".agp" ) fw = open ( agpfile , "w" ) bed = Bed ( bedfile , sorted = False ) for object , beds in groupby ( bed , key = lambda x : x . accn ) : beds = list ( beds ) for i , b in enumerate ( beds ) : if gapsize and i != 0 : print ( "\t" . join ( str ( x ) for x in ( object , 0 , 0 , 0 , "U" , gapsize , "scaffold" , "yes" , "map" ) ) , file = fw ) print ( "\t" . join ( str ( x ) for x in ( object , 0 , 0 , 0 , "W" , b . seqid , b . start , b . end , b . strand ) ) , file = fw ) fw . close ( ) # Reindex return reindex ( [ agpfile , "--inplace" ] )
def get_zernike_indexes ( limit = 10 ) : """Return a list of all Zernike indexes up to the given limit limit - return all Zernike indexes with N less than this limit returns an array of 2 - tuples . Each tuple is organized as ( N , M ) . The Zernikes are stored as complex numbers with the real part being ( N , M ) and the imaginary being ( N , - M )"""
def zernike_indexes_iter ( n_max ) : for n in range ( 0 , n_max ) : for m in range ( n % 2 , n + 1 , 2 ) : yield n yield m z_ind = np . fromiter ( zernike_indexes_iter ( limit ) , np . intc ) z_ind = z_ind . reshape ( ( len ( z_ind ) // 2 , 2 ) ) return z_ind
def add_empty_magic_table ( self , dtype , col_names = None , groups = None ) : """Add a blank MagicDataFrame to the contribution . You can provide either a list of column names , or a list of column group names . If provided , col _ names takes precedence ."""
if dtype not in self . table_names : print ( "-W- {} is not a valid MagIC table name" . format ( dtype ) ) print ( "-I- Valid table names are: {}" . format ( ", " . join ( self . table_names ) ) ) return data_container = MagicDataFrame ( dtype = dtype , columns = col_names , groups = groups ) self . tables [ dtype ] = data_container
def connect ( self ) : """Returns an open connection ( socket ) to the Splunk instance . This method is used for writing bulk events to an index or similar tasks where the overhead of opening a connection multiple times would be prohibitive . : returns : A socket . * * Example * * : : import splunklib . binding as binding c = binding . connect ( . . . ) socket = c . connect ( ) socket . write ( " POST % s HTTP / 1.1 \\ r \\ n " % " some / path / to / post / to " ) socket . write ( " Host : % s : % s \\ r \\ n " % ( c . host , c . port ) ) socket . write ( " Accept - Encoding : identity \\ r \\ n " ) socket . write ( " Authorization : % s \\ r \\ n " % c . token ) socket . write ( " X - Splunk - Input - Mode : Streaming \\ r \\ n " ) socket . write ( " \\ r \\ n " )"""
sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) if self . scheme == "https" : sock = ssl . wrap_socket ( sock ) sock . connect ( ( socket . gethostbyname ( self . host ) , self . port ) ) return sock
def send_photo ( self , * args , ** kwargs ) : """See : func : ` send _ photo `"""
return send_photo ( * args , ** self . _merge_overrides ( ** kwargs ) ) . run ( )
def _initialize_generator ( self , gen , obj = None ) : """Add ' last time ' and ' last value ' attributes to the generator ."""
# CEBALERT : use a dictionary to hold these things . if hasattr ( obj , "_Dynamic_time_fn" ) : gen . _Dynamic_time_fn = obj . _Dynamic_time_fn gen . _Dynamic_last = None # CEB : I ' d use None for this , except can ' t compare a fixedpoint # number with None ( e . g . 1 > None but FixedPoint ( 1 ) > None can ' t be done ) gen . _Dynamic_time = - 1 gen . _saved_Dynamic_last = [ ] gen . _saved_Dynamic_time = [ ]
def availability_rtf ( ) -> bool : """Is an RTF processor available ?"""
unrtf = tools [ 'unrtf' ] if unrtf : return True elif pyth : log . warning ( "RTF conversion: unrtf missing; " "using pyth (less efficient)" ) return True else : return False
def get_incomings_per_page ( self , per_page = 1000 , page = 1 , params = None ) : """Get incomings per page : param per _ page : How many objects per page . Default : 1000 : param page : Which page . Default : 1 : param params : Search parameters . Default : { } : return : list"""
return self . _get_resource_per_page ( resource = INCOMINGS , per_page = per_page , page = page , params = params )
def get_categories ( self , languages = None ) : """GetCategories . [ Preview API ] : param str languages : : rtype : [ str ]"""
query_parameters = { } if languages is not None : query_parameters [ 'languages' ] = self . _serialize . query ( 'languages' , languages , 'str' ) response = self . _send ( http_method = 'GET' , location_id = 'e0a5a71e-3ac3-43a0-ae7d-0bb5c3046a2a' , version = '5.1-preview.1' , query_parameters = query_parameters ) return self . _deserialize ( '[str]' , self . _unwrap_collection ( response ) )
def run_board ( args ) : """Run main entry for AutoMLBoard . Args : args : args parsed from command line"""
init_config ( args ) # backend service , should import after django settings initialized from backend . collector import CollectorService service = CollectorService ( args . logdir , args . reload_interval , standalone = False , log_level = args . log_level ) service . run ( ) # frontend service logger . info ( "Try to start automlboard on port %s\n" % args . port ) command = [ os . path . join ( root_path , "manage.py" ) , "runserver" , "0.0.0.0:%s" % args . port , "--noreload" ] execute_from_command_line ( command )
def timestamp_from_dt ( dt , epoch = datetime ( 1970 , 1 , 1 ) ) : """Convert a datetime to a timestamp . https : / / stackoverflow . com / a / 8778548/141395"""
delta = dt - epoch # return delta . total _ seconds ( ) return delta . seconds + delta . days * 86400
def Reset ( self ) : """Resets the internal state of the analyzer ."""
hasher_names = hashers_manager . HashersManager . GetHasherNamesFromString ( self . _hasher_names_string ) self . _hashers = hashers_manager . HashersManager . GetHashers ( hasher_names )
def start_r_creation_task_with_single_url ( self , body , ** kwargs ) : """Start Repository Creation task with url autodetect ( internal vs . external ) . This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please define a ` callback ` function to be invoked when receiving the response . > > > def callback _ function ( response ) : > > > pprint ( response ) > > > thread = api . start _ r _ creation _ task _ with _ single _ url ( body , callback = callback _ function ) : param callback function : The callback function for asynchronous request . ( optional ) : param RepositoryCreationUrlAutoRest body : Task parameters . ( required ) : return : int If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'callback' ) : return self . start_r_creation_task_with_single_url_with_http_info ( body , ** kwargs ) else : ( data ) = self . start_r_creation_task_with_single_url_with_http_info ( body , ** kwargs ) return data
def get_focus_widget ( self ) : """Returns the Widget with focus . : return : Widget with focus . : rtype : QWidget"""
current_widget = QApplication . focusWidget ( ) if current_widget is None : return False if current_widget . objectName ( ) == "Script_Editor_Output_plainTextEdit" or isinstance ( current_widget , Editor ) : return current_widget
def cmdline_split ( s : str , platform : Union [ int , str ] = 'this' ) -> List [ str ] : """As per https : / / stackoverflow . com / questions / 33560364 / python - windows - parsing - command - lines - with - shlex . Multi - platform variant of ` ` shlex . split ( ) ` ` for command - line splitting . For use with ` ` subprocess ` ` , for ` ` argv ` ` injection etc . Using fast REGEX . Args : string to split platform : - ` ` ' this ' ` ` = auto from current platform ; - ` ` 1 ` ` = POSIX ; - ` ` 0 ` ` = Windows / CMD - ( other values reserved )"""
# noqa if platform == 'this' : platform = ( sys . platform != 'win32' ) # RNC : includes 64 - bit Windows if platform == 1 : # POSIX re_cmd_lex = r'''"((?:\\["\\]|[^"])*)"|'([^']*)'|(\\.)|(&&?|\|\|?|\d?\>|[<])|([^\s'"\\&|<>]+)|(\s+)|(.)''' # noqa elif platform == 0 : # Windows / CMD re_cmd_lex = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' # noqa else : raise AssertionError ( 'unknown platform %r' % platform ) args = [ ] accu = None # collects pieces of one arg for qs , qss , esc , pipe , word , white , fail in re . findall ( re_cmd_lex , s ) : if word : pass # most frequent elif esc : word = esc [ 1 ] elif white or pipe : if accu is not None : args . append ( accu ) if pipe : args . append ( pipe ) accu = None continue elif fail : raise ValueError ( "invalid or incomplete shell string" ) elif qs : word = qs . replace ( '\\"' , '"' ) . replace ( '\\\\' , '\\' ) if platform == 0 : word = word . replace ( '""' , '"' ) else : word = qss # may be even empty ; must be last accu = ( accu or '' ) + word if accu is not None : args . append ( accu ) return args
async def stream_as_text ( stream ) : """Given a stream of bytes or text , if any of the items in the stream are bytes convert them to text . This function can be removed once we return text streams instead of byte streams ."""
async for data in stream : if not isinstance ( data , six . text_type ) : data = data . decode ( 'utf-8' , 'replace' ) yield data
def validate_metadata ( train_config ) : """Perform some checks that the trainig config is correct . Args : train _ config : train config as produced by merge _ metadata ( ) Raises : ValueError : if columns look wrong ."""
# Make sure we have a default for every column if len ( train_config [ 'csv_header' ] ) != len ( train_config [ 'csv_defaults' ] ) : raise ValueError ( 'Unequal number of columns in input features file and ' 'schema file.' ) # Check there are no missing columns . sorted _ colums has two copies of the # target column because the target column is also listed in # categorical _ columns or numerical _ columns . sorted_columns = sorted ( train_config [ 'csv_header' ] + [ train_config [ 'target_column' ] ] ) sorted_columns2 = sorted ( train_config [ 'categorical_columns' ] + train_config [ 'numerical_columns' ] + [ train_config [ 'key_column' ] ] + [ train_config [ 'target_column' ] ] ) if sorted_columns2 != sorted_columns : raise ValueError ( 'Each csv header must be a numerical/categorical type, a ' ' key, or a target.' )
def _IsMariaDB ( cursor ) : """Checks if we are running against MariaDB ."""
for variable in [ "version" , "version_comment" ] : cursor . execute ( "SHOW VARIABLES LIKE %s;" , ( variable , ) ) version = cursor . fetchone ( ) if version and "MariaDB" in version [ 1 ] : return True return False
def save ( self , basename ) : """Save a set of V1 images for flashing . Parameter is a base filename ."""
# IROM data goes in its own plain binary file irom_segment = self . get_irom_segment ( ) if irom_segment is not None : with open ( "%s0x%05x.bin" % ( basename , irom_segment . addr - ESP8266ROM . IROM_MAP_START ) , "wb" ) as f : f . write ( irom_segment . data ) # everything but IROM goes at 0x00000 in an image file normal_segments = self . get_non_irom_segments ( ) with open ( "%s0x00000.bin" % basename , 'wb' ) as f : self . write_common_header ( f , normal_segments ) checksum = ESPLoader . ESP_CHECKSUM_MAGIC for segment in normal_segments : checksum = self . save_segment ( f , segment , checksum ) self . append_checksum ( f , checksum )
def locate_primers ( sequences , forward_primer , reverse_primer , reverse_complement , max_hamming_distance ) : """Find forward and reverse primers in a set of sequences , return two tuples : ( forward _ start , forward _ end ) , ( reverse _ start , reverse _ end )"""
forward_loc = None reverse_loc = None seq_length = None # Reverse complement the reverse primer , if appropriate if reverse_complement : reverse_primer = reverse_primer . reverse_complement ( ) forward_aligner = PrimerAligner ( forward_primer ) reverse_aligner = PrimerAligner ( reverse_primer ) for i , sequence in enumerate ( sequences ) : if seq_length is None : seq_length = len ( sequence ) elif len ( sequence ) != seq_length : raise ValueError ( ( "Sequence Length Heterogeneity: {0} != {1}. " "Is this an alignment?" ) . format ( len ( sequence ) , seq_length ) ) index_map = ungap_index_map ( sequence . seq ) if forward_loc is None : ham_dist , start , end = forward_aligner . align ( sequence . seq . ungap ( ) ) if ham_dist <= max_hamming_distance : forward_loc = index_map [ start ] , index_map [ end ] logging . info ( "Forward in sequence %d: indexes %d to %d" , i + 1 , * forward_loc ) if reverse_loc is None : ham_dist , start , end = reverse_aligner . align ( sequence . seq . ungap ( ) ) if ham_dist <= max_hamming_distance : reverse_loc = index_map [ start ] , index_map [ end ] logging . info ( "Reverse in sequence %d: indexes %d to %d" , i + 1 , * reverse_loc ) if forward_loc and reverse_loc : # Both found # Check order if forward_loc [ 0 ] > reverse_loc [ 0 ] : raise PrimerOrderError ( forward_loc [ 0 ] , reverse_loc [ 0 ] ) return forward_loc , reverse_loc else : logging . debug ( "Sequence %d: %d/2 primers found" , i + 1 , sum ( j is not None for j in ( forward_loc , reverse_loc ) ) ) # Did not find either the forward or reverse primer : if not forward_loc : raise PrimerNotFound ( forward_primer ) else : raise PrimerNotFound ( reverse_primer )
def qt4_menu_nib_dir ( ) : """Return path to Qt resource dir qt _ menu . nib ."""
menu_dir = '' # Detect MacPorts prefix ( usually / opt / local ) . # Suppose that PyInstaller is using python from macports . macports_prefix = sys . executable . split ( '/Library' ) [ 0 ] # list of directories where to look for qt _ menu . nib dirs = [ # Qt4 from MacPorts not compiled as framework . os . path . join ( macports_prefix , 'lib' , 'Resources' ) , # Qt4 from MacPorts compiled as framework . os . path . join ( macports_prefix , 'libexec' , 'qt4-mac' , 'lib' , 'QtGui.framework' , 'Versions' , '4' , 'Resources' ) , # Qt4 installed into default location . '/Library/Frameworks/QtGui.framework/Resources' , '/Library/Frameworks/QtGui.framework/Versions/4/Resources' , '/Library/Frameworks/QtGui.Framework/Versions/Current/Resources' , ] # Check directory existence for d in dirs : d = os . path . join ( d , 'qt_menu.nib' ) if os . path . exists ( d ) : menu_dir = d break if not menu_dir : logger . error ( 'Cannont find qt_menu.nib directory' ) return menu_dir
def make_single_array ( ds , batch_size = 8 * 1024 ) : """Create a single numpy array from a dataset . The dataset must have only one dimension , that is , the length of its ` output _ shapes ` and ` output _ types ` is 1 , and its output shape must be ` [ ] ` , that is , every tensor in the dataset must be a scalar . Args : ds : a TF Dataset . batch _ size : how many elements to read per pass Returns : a single numpy array ."""
if isinstance ( ds . output_types , tuple ) or isinstance ( ds . output_shapes , tuple ) : raise ValueError ( 'Dataset must have a single type and shape' ) nshapes = len ( ds . output_shapes ) if nshapes > 0 : raise ValueError ( 'Dataset must be comprised of scalars (TensorShape=[])' ) batches = [ ] with tf . Session ( ) as sess : ds = ds . batch ( batch_size ) iterator = ds . make_initializable_iterator ( ) sess . run ( iterator . initializer ) get_next = iterator . get_next ( ) with tqdm ( desc = 'Elements' , unit_scale = 1 ) as pbar : try : while True : batches . append ( sess . run ( get_next ) ) pbar . update ( len ( batches [ - 1 ] ) ) except tf . errors . OutOfRangeError : pass if batches : return np . concatenate ( batches ) return np . array ( [ ] , dtype = ds . output_types . as_numpy_dtype )