signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _perform_binds ( self , binds ) : """Binds queues to exchanges . Parameters binds : list of dicts a list of dicts with the following keys : queue : string - name of the queue to bind exchange : string - name of the exchange to bind routing _ key : string - routing key to use for this bind"""
for bind in binds : self . logger . debug ( "Binding queue {0} to exchange {1} with key {2}" . format ( bind [ 'queue' ] , bind [ 'exchange' ] , bind [ 'routing_key' ] ) ) self . channel . queue_bind ( ** bind )
def commandline_to_list ( self , cmdline_str , trigger_string ) : '''cmdline _ str is the string of the command line trigger _ string is the trigger string , to be removed'''
cmdline = salt . utils . args . shlex_split ( cmdline_str [ len ( trigger_string ) : ] ) # Remove slack url parsing # Translate target = < http : / / host . domain . net | host . domain . net > # to target = host . domain . net cmdlist = [ ] for cmditem in cmdline : pattern = r'(?P<begin>.*)(<.*\|)(?P<url>.*)(>)(?P<remainder>.*)' mtch = re . match ( pattern , cmditem ) if mtch : origtext = mtch . group ( 'begin' ) + mtch . group ( 'url' ) + mtch . group ( 'remainder' ) cmdlist . append ( origtext ) else : cmdlist . append ( cmditem ) return cmdlist
def parse ( self , value ) : """Parse date"""
value = super ( DateOpt , self ) . parse ( value ) if value is None : return None if isinstance ( value , str ) : value = self . parse_date ( value ) if isinstance ( value , datetime ) and self . date_only : value = value . date ( ) return value
def switch_service ( self , new_service = None ) : """Start a new service in a subprocess . : param new _ service : Either a service name or a service class . If not set , start up a new instance of the previous class : return : True on success , False on failure ."""
if new_service : self . _service_factory = new_service with self . __lock : # Terminate existing service if necessary if self . _service is not None : self . _terminate_service ( ) # Find service class if necessary if isinstance ( self . _service_factory , basestring ) : self . _service_factory = workflows . services . lookup ( self . _service_factory ) if not self . _service_factory : return False # Set up new service object service_instance = self . _service_factory ( environment = self . _service_environment ) # Set up pipes and connect service object svc_commands , self . _pipe_commands = multiprocessing . Pipe ( False ) self . _pipe_service , svc_tofrontend = multiprocessing . Pipe ( False ) service_instance . connect ( commands = svc_commands , frontend = svc_tofrontend ) # Set up transport layer for new service service_instance . transport = self . _transport_factory ( ) # Start new service in a separate process self . _service = multiprocessing . Process ( target = service_instance . start , args = ( ) , kwargs = { "verbose_log" : self . _verbose_service } , ) self . _service_name = service_instance . get_name ( ) self . _service_class_name = service_instance . __class__ . __name__ self . _service . daemon = True self . _service . name = "workflows-service" self . _service . start ( ) self . _service_starttime = time . time ( ) # Starting the process copies all file descriptors . # At this point ( and no sooner ! ) the passed pipe objects must be closed # in this process here . svc_commands . close ( ) svc_tofrontend . close ( ) self . log . info ( "Started service: %s" , self . _service_name ) return True
def create_dockwidget ( self ) : """Add to parent QMainWindow as a dock widget"""
# Creating dock widget dock = SpyderDockWidget ( self . get_plugin_title ( ) , self . main ) # Set properties dock . setObjectName ( self . __class__ . __name__ + "_dw" ) dock . setAllowedAreas ( self . ALLOWED_AREAS ) dock . setFeatures ( self . FEATURES ) dock . setWidget ( self ) self . update_margins ( ) dock . visibilityChanged . connect ( self . visibility_changed ) dock . topLevelChanged . connect ( self . on_top_level_changed ) dock . sig_plugin_closed . connect ( self . plugin_closed ) self . dockwidget = dock if self . shortcut is not None : sc = QShortcut ( QKeySequence ( self . shortcut ) , self . main , self . switch_to_plugin ) self . register_shortcut ( sc , "_" , "Switch to %s" % self . CONF_SECTION ) return ( dock , self . LOCATION )
def make_jobs ( ) : """creates a list of Job objects , which carry all information needed for a function to be executed on SGE : - function object - arguments - settings"""
# set up list of arguments inputvec = [ [ 3 ] , [ 5 ] , [ 10 ] , [ 20 ] ] # create empty job vector jobs = [ ] # create job objects for arg in inputvec : # The default queue used by the Job class is all . q . You must specify # the ` queue ` keyword argument if that is not the name of your queue . job = Job ( compute_factorial , arg , queue = 'all.q' ) jobs . append ( job ) return jobs
def get ( self , request , bot_id , handler_id , id , format = None ) : """Get url parameter by id serializer : AbsParamSerializer responseMessages : - code : 401 message : Not authenticated"""
return super ( UrlParameterDetail , self ) . get ( request , bot_id , handler_id , id , format )
def dropNodesByCount ( grph , minCount = - float ( 'inf' ) , maxCount = float ( 'inf' ) , parameterName = 'count' , ignoreMissing = False ) : """Modifies _ grph _ by dropping nodes that do not have a count that is within inclusive bounds of _ minCount _ and _ maxCount _ , i . e after running _ grph _ will only have nodes whose degrees meet the following inequality : _ minCount _ < = node ' s degree < = _ maxCount _ . Count is determined by the count attribute , _ parameterName _ , and if missing will result in a ` KeyError ` being raised . _ ignoreMissing _ can be set to ` True ` to suppress the error . minCount and maxCount default to negative and positive infinity respectively so without specifying either the output should be the input # Parameters _ grph _ : ` networkx Graph ` > The graph to be modified . _ minCount _ : ` optional [ int or double ] ` > default ` - inf ` , the minimum Count for an node to be kept in the graph . _ maxCount _ : ` optional [ int or double ] ` > default ` inf ` , the maximum Count for an node to be kept in the graph . _ parameterName _ : ` optional [ str ] ` > default ` ' count ' ` , key to count field in the nodes ' s attribute dictionary , the default is the same thoughout metaknowledge so is likely to be correct . _ ignoreMissing _ : ` optional [ bool ] ` > default ` False ` , if ` True ` nodes missing a count will be kept in the graph instead of raising an exception"""
count = 0 total = len ( grph . nodes ( ) ) if metaknowledge . VERBOSE_MODE : progArgs = ( 0 , "Dropping nodes by count" ) progKwargs = { } else : progArgs = ( 0 , "Dropping nodes by count" ) progKwargs = { 'dummy' : True } with _ProgressBar ( * progArgs , ** progKwargs ) as PBar : badNodes = [ ] for n in grph . nodes ( data = True ) : if PBar : count += 1 if count % 10000 == 0 : PBar . updateVal ( count / total , str ( count ) + "nodes analysed and {} nodes dropped" . format ( len ( badNodes ) ) ) try : val = n [ 1 ] [ parameterName ] except KeyError : if not ignoreMissing : raise KeyError ( "One or more nodes do not have counts or " + str ( parameterName ) , " is not the name of the count parameter" ) else : pass else : if val < minCount or val > maxCount : badNodes . append ( n [ 0 ] ) if PBar : PBar . updateVal ( 1 , "Cleaning up graph" ) grph . remove_nodes_from ( badNodes ) if PBar : PBar . finish ( "{} nodes out of {} dropped, {} returned" . format ( len ( badNodes ) , total , total - len ( badNodes ) ) )
def fail ( self ) : """Fail a vector ."""
if self . failed is True : raise AttributeError ( "Cannot fail {} - it has already failed." . format ( self ) ) else : self . failed = True self . time_of_death = timenow ( ) for t in self . transmissions ( ) : t . fail ( )
def send_and_match_output ( self , send , matches , retry = 3 , strip = True , note = None , echo = False , loglevel = logging . DEBUG ) : """Returns true if the output of the command matches any of the strings in the matches list of regexp strings . Handles matching on a per - line basis and does not cross lines . @ param send : See send ( ) @ param matches : String - or list of strings - of regexp ( s ) to check @ param retry : Number of times to retry command ( default 3) @ param strip : Whether to strip output ( defaults to True ) @ param note : See send ( ) @ type send : string @ type matches : list @ type retry : integer @ type strip : boolean"""
shutit = self . shutit shutit . handle_note ( note ) shutit . log ( 'Matching output from: "' + send + '" to one of these regexps:' + str ( matches ) , level = logging . INFO ) echo = shutit . get_echo_override ( echo ) output = self . send_and_get_output ( send , retry = retry , strip = strip , echo = echo , loglevel = loglevel ) if isinstance ( matches , str ) : matches = [ matches ] shutit . handle_note_after ( note = note ) for match in matches : if shutit . match_string ( output , match ) != None : shutit . log ( 'Matched output, return True' , level = logging . DEBUG ) return True shutit . log ( 'Failed to match output, return False' , level = logging . DEBUG ) return False
def fingerprint_from_keybase ( fingerprint , kb_obj ) : """Extracts a key matching a specific fingerprint from a Keybase API response"""
if 'public_keys' in kb_obj and 'pgp_public_keys' in kb_obj [ 'public_keys' ] : for key in kb_obj [ 'public_keys' ] [ 'pgp_public_keys' ] : keyprint = fingerprint_from_var ( key ) . lower ( ) fingerprint = fingerprint . lower ( ) if fingerprint == keyprint or keyprint . startswith ( fingerprint ) or keyprint . endswith ( fingerprint ) : return { 'fingerprint' : keyprint , 'bundle' : key } return None
def pgrrec ( body , lon , lat , alt , re , f ) : """Convert planetographic coordinates to rectangular coordinates . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / pgrrec _ c . html : param body : Body with which coordinate system is associated . : type body : str : param lon : Planetographic longitude of a point ( radians ) . : type lon : float : param lat : Planetographic latitude of a point ( radians ) . : type lat : float : param alt : Altitude of a point above reference spheroid . : type alt : float : param re : Equatorial radius of the reference spheroid . : type re : float : param f : Flattening coefficient . : type f : float : return : Rectangular coordinates of the point . : rtype : 3 - Element Array of floats"""
body = stypes . stringToCharP ( body ) lon = ctypes . c_double ( lon ) lat = ctypes . c_double ( lat ) alt = ctypes . c_double ( alt ) re = ctypes . c_double ( re ) f = ctypes . c_double ( f ) rectan = stypes . emptyDoubleVector ( 3 ) libspice . pgrrec_c ( body , lon , lat , alt , re , f , rectan ) return stypes . cVectorToPython ( rectan )
def is_isolated_list_abundance ( graph : BELGraph , node : BaseEntity , cls : Type [ ListAbundance ] = ListAbundance ) -> bool : """Return if the node is a list abundance but has no qualified edges ."""
return ( isinstance ( node , cls ) and 0 == graph . in_degree ( node ) and all ( data [ RELATION ] == HAS_COMPONENT for _ , __ , data in graph . out_edges ( node , data = True ) ) )
def quit ( self ) : """Quit socket server"""
logging . info ( "quiting sock server" ) if self . __quit is not None : self . __quit . set ( ) self . join ( ) return
def d_deta_from_phalf ( arr , pfull_coord ) : """Compute pressure level thickness from half level pressures ."""
d_deta = arr . diff ( dim = internal_names . PHALF_STR , n = 1 ) return replace_coord ( d_deta , internal_names . PHALF_STR , internal_names . PFULL_STR , pfull_coord )
def get_index ( binstr , end_index = 160 ) : """Return the position of the first 1 bit from the left in the word until end _ index : param binstr : : param end _ index : : return :"""
res = - 1 try : res = binstr . index ( '1' ) + 1 except ValueError : res = end_index return res
def copyWorkitem ( self , copied_from , title = None , description = None , prefix = None ) : """Create a workitem by copying from an existing one : param copied _ from : the to - be - copied workitem id : param title : the new workitem title / summary . If ` None ` , will copy that from a to - be - copied workitem : param description : the new workitem description . If ` None ` , will copy that from a to - be - copied workitem : param prefix : used to add a prefix to the copied title and description : return : the : class : ` rtcclient . workitem . Workitem ` object : rtype : rtcclient . workitem . Workitem"""
copied_wi = self . getWorkitem ( copied_from ) if title is None : title = copied_wi . title if prefix is not None : title = prefix + title if description is None : description = copied_wi . description if prefix is not None : description = prefix + description self . log . info ( "Start to create a new <Workitem>, copied from " , "<Workitem %s>" , copied_from ) wi_url_post = "/" . join ( [ self . url , "oslc/contexts/%s" % copied_wi . contextId , "workitems" , "%s" % copied_wi . type . split ( "/" ) [ - 1 ] ] ) wi_raw = self . templater . renderFromWorkitem ( copied_from , keep = True , encoding = "UTF-8" , title = title , description = description ) return self . _createWorkitem ( wi_url_post , wi_raw )
def basic_auth_string ( username , password ) : """Encode a username and password for use in an HTTP Basic Authentication header"""
b64 = base64 . encodestring ( '%s:%s' % ( username , password ) ) . strip ( ) return 'Basic %s' % b64
def main ( ) : """Install entry - point"""
from os import path as op from inspect import getfile , currentframe from setuptools import setup , find_packages from niworkflows . __about__ import ( __packagename__ , __author__ , __email__ , __maintainer__ , __license__ , __description__ , __longdesc__ , __url__ , DOWNLOAD_URL , CLASSIFIERS , REQUIRES , SETUP_REQUIRES , LINKS_REQUIRES , TESTS_REQUIRES , EXTRA_REQUIRES , ) pkg_data = { 'niworkflows' : [ 'data/t1-mni_registration*.json' , 'data/bold-mni_registration*.json' , 'reports/figures.json' , 'reports/fmriprep.yml' , 'reports/report.tpl' , ] } root_dir = op . dirname ( op . abspath ( getfile ( currentframe ( ) ) ) ) version = None cmdclass = { } if op . isfile ( op . join ( root_dir , __packagename__ , 'VERSION' ) ) : with open ( op . join ( root_dir , __packagename__ , 'VERSION' ) ) as vfile : version = vfile . readline ( ) . strip ( ) pkg_data [ __packagename__ ] . insert ( 0 , 'VERSION' ) if version is None : import versioneer version = versioneer . get_version ( ) cmdclass = versioneer . get_cmdclass ( ) setup ( name = __packagename__ , version = version , description = __description__ , long_description = __longdesc__ , author = __author__ , author_email = __email__ , maintainer = __maintainer__ , maintainer_email = __email__ , license = __license__ , url = __url__ , download_url = DOWNLOAD_URL , classifiers = CLASSIFIERS , packages = find_packages ( exclude = [ '*.tests' ] ) , zip_safe = False , # Dependencies handling setup_requires = SETUP_REQUIRES , install_requires = list ( set ( REQUIRES ) ) , dependency_links = LINKS_REQUIRES , tests_require = TESTS_REQUIRES , extras_require = EXTRA_REQUIRES , # Data package_data = pkg_data , include_package_data = True , cmdclass = cmdclass , )
def copy_file ( host , file_path , remote_path = '.' , username = None , key_path = None , action = 'put' ) : """Copy a file via SCP , proxied through the mesos master : param host : host or IP of the machine to execute the command on : type host : str : param file _ path : the local path to the file to be copied : type file _ path : str : param remote _ path : the remote path to copy the file to : type remote _ path : str : param username : SSH username : type username : str : param key _ path : path to the SSH private key to use for SSH authentication : type key _ path : str : return : True if successful , False otherwise : rtype : bool"""
if not username : username = shakedown . cli . ssh_user if not key_path : key_path = shakedown . cli . ssh_key_file key = validate_key ( key_path ) transport = get_transport ( host , username , key ) transport = start_transport ( transport , username , key ) if transport . is_authenticated ( ) : start = time . time ( ) channel = scp . SCPClient ( transport ) if action == 'get' : print ( "\n{}scp {}:{} {}\n" . format ( shakedown . cli . helpers . fchr ( '>>' ) , host , remote_path , file_path ) ) channel . get ( remote_path , file_path ) else : print ( "\n{}scp {} {}:{}\n" . format ( shakedown . cli . helpers . fchr ( '>>' ) , file_path , host , remote_path ) ) channel . put ( file_path , remote_path ) print ( "{} bytes copied in {} seconds." . format ( str ( os . path . getsize ( file_path ) ) , str ( round ( time . time ( ) - start , 2 ) ) ) ) try_close ( channel ) try_close ( transport ) return True else : print ( "error: unable to authenticate {}@{} with key {}" . format ( username , host , key_path ) ) return False
def data ( self , value ) : """Saves a new image to disk"""
self . loader . save_image ( self . category , self . image , value )
def human_or_01 ( X , y , model_generator , method_name ) : """OR ( false / true ) This tests how well a feature attribution method agrees with human intuition for an OR operation combined with linear effects . This metric deals specifically with the question of credit allocation for the following function when all three inputs are true : if fever : + 2 points if cough : + 2 points if fever or cough : + 6 points transform = " identity " sort _ order = 1"""
return _human_or ( X , model_generator , method_name , False , True )
def filter ( args ) : """% prog filter < deltafile | coordsfile > Produce a new delta / coords file and filter based on id % or cov % . Use ` delta - filter ` for . delta file ."""
p = OptionParser ( filter . __doc__ ) p . set_align ( pctid = 0 , hitlen = 0 ) p . add_option ( "--overlap" , default = False , action = "store_true" , help = "Print overlap status (e.g. terminal, contained)" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) pctid = opts . pctid hitlen = opts . hitlen filename , = args if pctid == 0 and hitlen == 0 : return filename pf , suffix = filename . rsplit ( "." , 1 ) outfile = "" . join ( ( pf , ".P{0}L{1}." . format ( int ( pctid ) , int ( hitlen ) ) , suffix ) ) if not need_update ( filename , outfile ) : return outfile if suffix == "delta" : cmd = "delta-filter -i {0} -l {1} {2}" . format ( pctid , hitlen , filename ) sh ( cmd , outfile = outfile ) return outfile fp = open ( filename ) fw = must_open ( outfile , "w" ) for row in fp : try : c = CoordsLine ( row ) except AssertionError : continue if c . identity < pctid : continue if c . len2 < hitlen : continue if opts . overlap and not c . overlap : continue outrow = row . rstrip ( ) if opts . overlap : ov = Overlap_types [ c . overlap ] outrow += "\t" + ov print ( outrow , file = fw ) return outfile
def gap_to_sorl ( time_gap ) : """P1D to + 1DAY : param time _ gap : : return : solr ' s format duration ."""
quantity , unit = parse_ISO8601 ( time_gap ) if unit [ 0 ] == "WEEKS" : return "+{0}DAYS" . format ( quantity * 7 ) else : return "+{0}{1}" . format ( quantity , unit [ 0 ] )
def right_click_specimen_equalarea ( self , event ) : """toggles between zoom and pan effects for the specimen equal area on right click Parameters event : the wx . MouseEvent that triggered the call of this function Alters specimen _ EA _ setting , toolbar2 setting"""
if event . LeftIsDown ( ) or event . ButtonDClick ( ) : return elif self . specimen_EA_setting == "Zoom" : self . specimen_EA_setting = "Pan" try : self . toolbar2 . pan ( 'off' ) except TypeError : pass elif self . specimen_EA_setting == "Pan" : self . specimen_EA_setting = "Zoom" try : self . toolbar2 . zoom ( ) except TypeError : pass
def equal_to_be ( self , be_record ) : # type : ( PathTableRecord ) - > bool '''A method to compare a little - endian path table record to its big - endian counterpart . This is used to ensure that the ISO is sane . Parameters : be _ record - The big - endian object to compare with the little - endian object . Returns : True if this record is equal to the big - endian record passed in , False otherwise .'''
if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'This Path Table Record is not yet initialized' ) if be_record . len_di != self . len_di or be_record . xattr_length != self . xattr_length or utils . swab_32bit ( be_record . extent_location ) != self . extent_location or utils . swab_16bit ( be_record . parent_directory_num ) != self . parent_directory_num or be_record . directory_identifier != self . directory_identifier : return False return True
def get_items ( self , jid , node , * , max_items = None ) : """Request the most recent items from a node . : param jid : Address of the PubSub service . : type jid : : class : ` aioxmpp . JID ` : param node : Name of the PubSub node to query . : type node : : class : ` str ` : param max _ items : Number of items to return at most . : type max _ items : : class : ` int ` or : data : ` None ` : raises aioxmpp . errors . XMPPError : as returned by the service : return : The response from the server . : rtype : : class : ` . xso . Request ` . By default , as many as possible items are requested . If ` max _ items ` is given , it must be a positive integer specifying the maximum number of items which is to be returned by the server . Return the : class : ` . xso . Request ` object , which has a : class : ` ~ . xso . Items ` : attr : ` ~ . xso . Request . payload ` ."""
iq = aioxmpp . stanza . IQ ( to = jid , type_ = aioxmpp . structs . IQType . GET ) iq . payload = pubsub_xso . Request ( pubsub_xso . Items ( node , max_items = max_items ) ) return ( yield from self . client . send ( iq ) )
def process_data ( self , ** kwargs ) : """get the data from the cache : param kwargs : contain keyword args : trigger _ id at least : type kwargs : dict"""
cache = caches [ 'django_th' ] cache_data = cache . get ( kwargs . get ( 'cache_stack' ) + '_' + kwargs . get ( 'trigger_id' ) ) return PublishingLimit . get_data ( kwargs . get ( 'cache_stack' ) , cache_data , int ( kwargs . get ( 'trigger_id' ) ) )
def check_user_can_vote ( cmt_id , client_ip_address , uid = - 1 ) : """Checks if a user hasn ' t already voted : param cmt _ id : comment id : param client _ ip _ address : IP = > use : str ( req . remote _ ip ) : param uid : user id , as given by invenio . legacy . webuser . getUid ( req )"""
cmt_id = wash_url_argument ( cmt_id , 'int' ) client_ip_address = wash_url_argument ( client_ip_address , 'str' ) uid = wash_url_argument ( uid , 'int' ) query = """SELECT "id_cmtRECORDCOMMENT" FROM "cmtACTIONHISTORY" WHERE "id_cmtRECORDCOMMENT"=%s""" params = ( cmt_id , ) if uid < 0 : query += " AND client_host=inet_aton(%s)" params += ( client_ip_address , ) else : query += " AND id_user=%s" params += ( uid , ) res = run_sql ( query , params ) return ( len ( res ) == 0 )
def _create_badges ( self ) : """Creates badges ."""
rn = RandomNicknames ( ) for name in rn . random_nicks ( count = 20 ) : slug = slugify ( name ) badge = Badge . objects . create ( name = name , slug = slug , description = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit' ) logger . info ( 'Created badge: %s' , badge . name )
def crud_fields ( obj , fields = None ) : """Display object fields in table rows : : < table > { % crud _ fields object ' id , % } < / table > * ` ` fields ` ` fields to include If fields is ` ` None ` ` all fields will be displayed . If fields is ` ` string ` ` comma separated field names will be displayed . if field is dictionary , key should be field name and value field verbose name ."""
if fields is None : fields = utils . get_fields ( type ( obj ) ) elif isinstance ( fields , six . string_types ) : field_names = [ f . strip ( ) for f in fields . split ( ',' ) ] fields = utils . get_fields ( type ( obj ) , include = field_names ) return { 'object' : obj , 'fields' : fields , }
def find_transitionid_by_name ( self , issue , transition_name ) : """Get a transitionid available on the specified issue to the current user . Look at https : / / developer . atlassian . com / static / rest / jira / 6.1 . html # d2e1074 for json reference : param issue : ID or key of the issue to get the transitions from : param trans _ name : iname of transition we are looking for"""
transitions_json = self . transitions ( issue ) id = None for transition in transitions_json : if transition [ "name" ] . lower ( ) == transition_name . lower ( ) : id = transition [ "id" ] break return id
def cielab_to_xyz ( cielab , refwhite ) : """Convert CIE L * a * b * color values to CIE XYZ , * cielab * should be of shape ( * , 3 ) . * refwhite * is the reference white value in the L * a * b * color space , of shape ( 3 , ) . Return value has same shape as * cielab *"""
def func ( t ) : pow = t ** 3 scale = 0.128419 * t - 0.0177129 return np . where ( t > 0.206897 , pow , scale ) xyz = np . empty_like ( cielab ) lscale = 1. / 116 * ( cielab [ ... , L ] + 16 ) xyz [ ... , X ] = func ( lscale + 0.002 * cielab [ ... , A ] ) xyz [ ... , Y ] = func ( lscale ) xyz [ ... , Z ] = func ( lscale - 0.005 * cielab [ ... , B ] ) xyz *= refwhite return xyz
def remove_badge ( self , kind ) : '''Perform an atomic removal for a given badge'''
self . update ( __raw__ = { '$pull' : { 'badges' : { 'kind' : kind } } } ) self . reload ( ) on_badge_removed . send ( self , kind = kind ) post_save . send ( self . __class__ , document = self )
def stratify ( self ) : """Stratifies the sample based on propensity score . By default the sample is divided into five equal - sized bins . The number of bins can be set by modifying the object attribute named blocks . Alternatively , custom - sized bins can be created by setting blocks equal to a sorted list of numbers between 0 and 1 indicating the bin boundaries . This method should only be executed after the propensity score has been estimated ."""
Y , D , X = self . raw_data [ 'Y' ] , self . raw_data [ 'D' ] , self . raw_data [ 'X' ] pscore = self . raw_data [ 'pscore' ] if isinstance ( self . blocks , int ) : blocks = split_equal_bins ( pscore , self . blocks ) else : blocks = self . blocks [ : ] # make a copy ; should be sorted blocks [ 0 ] = 0 # avoids always dropping 1st unit def subset ( p_low , p_high ) : return ( p_low < pscore ) & ( pscore <= p_high ) subsets = [ subset ( * ps ) for ps in zip ( blocks , blocks [ 1 : ] ) ] strata = [ CausalModel ( Y [ s ] , D [ s ] , X [ s ] ) for s in subsets ] self . strata = Strata ( strata , subsets , pscore )
def get_fisher_scores_vs_background ( self ) : '''Returns pd . DataFrame of fisher scores vs background'''
df = self . get_term_and_background_counts ( ) odds_ratio , p_values = self . _get_fisher_scores_from_counts ( df [ 'corpus' ] , df [ 'background' ] ) df [ 'Odds ratio' ] = odds_ratio df [ 'Bonferroni-corrected p-values' ] = p_values * len ( df ) df . sort_values ( by = [ 'Bonferroni-corrected p-values' , 'Odds ratio' ] , ascending = [ True , False ] ) return df
def from_json_dict ( dct , validate = True ) : # type : ( Dict [ str , Any ] , bool ) - > Schema """Create a Schema for v1 or v2 according to dct : param dct : This dictionary must have a ` ' features ' ` key specifying the columns of the dataset . It must have a ` ' version ' ` key containing the master schema version that this schema conforms to . It must have a ` ' hash ' ` key with all the globals . : param validate : ( default True ) Raise an exception if the schema does not conform to the master schema . : return : the Schema"""
if validate : # This raises iff the schema is invalid . validate_schema_dict ( dct ) version = dct [ 'version' ] if version == 1 : dct = convert_v1_to_v2 ( dct ) if validate : validate_schema_dict ( dct ) elif version != 2 : msg = ( 'Schema version {} is not supported. ' 'Consider updating clkhash.' ) . format ( version ) raise SchemaError ( msg ) clk_config = dct [ 'clkConfig' ] l = clk_config [ 'l' ] xor_folds = clk_config . get ( 'xor_folds' , 0 ) kdf = clk_config [ 'kdf' ] kdf_type = kdf [ 'type' ] kdf_hash = kdf . get ( 'hash' , 'SHA256' ) kdf_info_string = kdf . get ( 'info' ) kdf_info = ( base64 . b64decode ( kdf_info_string ) if kdf_info_string is not None else None ) kdf_salt_string = kdf . get ( 'salt' ) kdf_salt = ( base64 . b64decode ( kdf_salt_string ) if kdf_salt_string is not None else None ) kdf_key_size = kdf . get ( 'keySize' , DEFAULT_KDF_KEY_SIZE ) fields = list ( map ( spec_from_json_dict , dct [ 'features' ] ) ) return Schema ( fields , l , xor_folds , kdf_type , kdf_hash , kdf_info , kdf_salt , kdf_key_size )
def tdbr2EOL ( td ) : """convert the < br / > in < td > block into line ending ( EOL = \n )"""
for br in td . find_all ( "br" ) : br . replace_with ( "\n" ) txt = six . text_type ( td ) # make it back into test # would be unicode ( id ) in python2 soup = BeautifulSoup ( txt , 'lxml' ) # read it as a BeautifulSoup ntxt = soup . find ( 'td' ) # BeautifulSoup has lot of other html junk . # this line will extract just the < td > block return ntxt
def forward_until ( self , condition ) : """Forward until one of the provided matches is found . The returned string contains all characters found * before the condition was met . In other words , the condition will be true for the remainder of the buffer . : param condition : set of valid strings"""
c = TokenWithPosition ( '' , self . peek ( ) . position ) while self . hasNext ( ) and not condition ( self . peek ( ) ) : c += self . forward ( 1 ) return c
def remove_request ( self , uuid ) : """Remove any RPC request ( s ) using this uuid . : param str uuid : Rpc Identifier . : return :"""
for key in list ( self . _request ) : if self . _request [ key ] == uuid : del self . _request [ key ]
def get_pdb_contents_to_pose_residue_map ( pdb_file_contents , rosetta_scripts_path , rosetta_database_path = None , pdb_id = None , extra_flags = '' ) : '''Takes a string containing a PDB file , the RosettaScripts executable , and the Rosetta database and then uses the features database to map PDB residue IDs to pose residue IDs . On success , ( True , the residue mapping ) is returned . On failure , ( False , a list of errors ) is returned . Note : extra _ flags should typically include ' - ignore _ zero _ occupancy false ' and ' - ignore _ unrecognized _ res ' .'''
filename = write_temp_file ( "/tmp" , pdb_file_contents ) success , mapping = get_pdb_to_pose_residue_map ( filename , rosetta_scripts_path , rosetta_database_path = rosetta_database_path , pdb_id = pdb_id , extra_flags = extra_flags ) os . remove ( filename ) return success , mapping
def _updateInternals ( self ) : """Update internal attributes related to likelihood . Should be called any time branch lengths or model parameters are changed ."""
rootnode = self . nnodes - 1 if self . _distributionmodel : catweights = self . model . catweights else : catweights = scipy . ones ( 1 , dtype = 'float' ) # When there are multiple categories , it is acceptable # for some ( but not all ) of them to have underflow at # any given site . Note that we still include a check for # Underflow by ensuring that none of the site likelihoods is # zero . undererrstate = 'ignore' if len ( catweights ) > 1 else 'raise' with scipy . errstate ( over = 'raise' , under = undererrstate , divide = 'raise' , invalid = 'raise' ) : self . underflowlogscale . fill ( 0.0 ) self . _computePartialLikelihoods ( ) sitelik = scipy . zeros ( self . nsites , dtype = 'float' ) assert ( self . L [ rootnode ] >= 0 ) . all ( ) , str ( self . L [ rootnode ] ) for k in self . _catindices : sitelik += scipy . sum ( self . _stationarystate ( k ) * self . L [ rootnode ] [ k ] , axis = 1 ) * catweights [ k ] assert ( sitelik > 0 ) . all ( ) , "Underflow:\n{0}\n{1}" . format ( sitelik , self . underflowlogscale ) self . siteloglik = scipy . log ( sitelik ) + self . underflowlogscale self . loglik = scipy . sum ( self . siteloglik ) + self . model . logprior if self . dparamscurrent : self . _dloglik = { } for param in self . model . freeparams : if self . _distributionmodel and ( param in self . model . distributionparams ) : name = self . model . distributedparam weighted_dk = ( self . model . d_distributionparams [ param ] * catweights ) else : name = param weighted_dk = catweights dsiteloglik = 0 for k in self . _catindices : dsiteloglik += ( scipy . sum ( self . _dstationarystate ( k , name ) * self . L [ rootnode ] [ k ] + self . dL [ name ] [ rootnode ] [ k ] * self . _stationarystate ( k ) , axis = - 1 ) * weighted_dk [ k ] ) dsiteloglik /= sitelik self . _dloglik [ param ] = ( scipy . sum ( dsiteloglik , axis = - 1 ) + self . model . dlogprior ( param ) ) if self . dtcurrent : self . _dloglik_dt = 0 dLnroot_dt = scipy . array ( [ self . dL_dt [ n2 ] [ rootnode ] for n2 in sorted ( self . dL_dt . keys ( ) ) ] ) for k in self . _catindices : if isinstance ( k , int ) : dLnrootk_dt = dLnroot_dt . swapaxes ( 0 , 1 ) [ k ] else : assert k == slice ( None ) dLnrootk_dt = dLnroot_dt self . _dloglik_dt += catweights [ k ] * scipy . sum ( self . _stationarystate ( k ) * dLnrootk_dt , axis = - 1 ) self . _dloglik_dt /= sitelik self . _dloglik_dt = scipy . sum ( self . _dloglik_dt , axis = - 1 ) assert self . _dloglik_dt . shape == self . t . shape
def remove_framework_search_paths ( self , paths , target_name = None , configuration_name = None ) : """Removes the given search paths from the FRAMEWORK _ SEARCH _ PATHS section of the target on the configurations : param paths : A string or array of strings : param target _ name : Target name or list of target names to remove the flag from or None for every target : param configuration _ name : Configuration name to add the flag to or None for every configuration : return : void"""
self . remove_search_paths ( XCBuildConfigurationFlags . FRAMEWORK_SEARCH_PATHS , paths , target_name , configuration_name )
def poll ( self , device_code , expires_in , interval , ** kwargs ) : """Construct the device authentication poller . : param device _ code : Device authentication code : type device _ code : str : param expires _ in : Device authentication code expiry ( in seconds ) : type in : int : param interval : Device authentication poll interval : type interval : int : rtype : DeviceOAuthPoller"""
return DeviceOAuthPoller ( self . client , device_code , expires_in , interval )
def _write_section ( self , fp , section_name , section_items , delimiter ) : """Write a single section to the specified ` fp ' ."""
fp . write ( "[{0}]\n" . format ( section_name ) ) for key , value in section_items : value = self . _interpolation . before_write ( self , section_name , key , value ) if value is not None or not self . _allow_no_value : value = delimiter + str ( value ) . replace ( '\n' , '\n\t' ) else : value = "" fp . write ( "{0}{1}\n" . format ( key , value ) ) fp . write ( "\n" )
def list_groups ( name ) : '''Return a list of groups the named user belongs to . name The name of the user for which to list groups . Starting in Salt 2016.11.0, all groups for the user , including groups beginning with an underscore will be listed . . . versionchanged : : 2016.11.0 CLI Example : . . code - block : : bash salt ' * ' user . list _ groups foo'''
groups = [ group for group in salt . utils . user . get_group_list ( name ) ] return groups
def output_scores ( self , name = None ) : """Returns : N x # class scores , summed to one for each box ."""
return tf . nn . softmax ( self . label_logits , name = name )
def uniquelogins ( sessions ) : """Unique logins per days / weeks / months . : return : daily , weekly , monthly 3 lists of dictionaries of the following format [ { ' x ' : epoch , ' y ' : value } , ]"""
# sessions = LoginSession . query . order _ by ( LoginSession . started _ at . asc ( ) ) . all ( ) if not sessions : return [ ] , [ ] , [ ] dates = { } for session in sessions : user = session . user # time value is discarded to aggregate on days only date = session . started_at . strftime ( "%Y/%m/%d" ) if date not in dates : dates [ date ] = set ( ) # we want unique users on a given day dates [ date ] . add ( user ) else : dates [ date ] . add ( user ) daily = [ ] weekly = [ ] monthly = [ ] for date in sorted ( dates . keys ( ) ) : # print u " { } : { } " . format ( date , len ( dates [ date ] ) ) date_epoch = unix_time_millis ( datetime . strptime ( date , "%Y/%m/%d" ) ) daily . append ( { "x" : date_epoch , "y" : len ( dates [ date ] ) } ) # first _ day = data [ 0 ] [ ' x ' ] # last _ day = data [ - 1 ] [ ' x ' ] daily_serie = pd . Series ( dates ) # convert the index to Datetime type daily_serie . index = pd . DatetimeIndex ( daily_serie . index ) # calculate the values instead of users lists daily_serie = daily_serie . apply ( lambda x : len ( x ) ) # GroupBy Week / month , Thanks Panda weekly_serie = daily_serie . groupby ( pd . Grouper ( freq = "W" ) ) . aggregate ( numpysum ) monthly_serie = daily_serie . groupby ( pd . Grouper ( freq = "M" ) ) . aggregate ( numpysum ) for date , value in weekly_serie . items ( ) : try : value = int ( value ) except ValueError : continue date_epoch = unix_time_millis ( date ) weekly . append ( { "x" : date_epoch , "y" : value } ) for date , value in monthly_serie . items ( ) : try : value = int ( value ) except ValueError : continue date_epoch = unix_time_millis ( date ) monthly . append ( { "x" : date_epoch , "y" : value } ) return daily , weekly , monthly
def query ( cls , automation = None , offset = None , limit = None , api = None ) : """Query ( List ) apps . : param automation : Automation id . : param offset : Pagination offset . : param limit : Pagination limit . : param api : Api instance . : return : collection object"""
automation_id = Transform . to_automation ( automation ) api = api or cls . _API return super ( AutomationMember , cls ) . _query ( url = cls . _URL [ 'query' ] . format ( automation_id = automation_id ) , automation_id = automation_id , offset = offset , limit = limit , api = api , )
def include ( code , persist = True , compilerflags = None ) : """This function replaces the * calling module * with a dynamic module that generates code on demand . The code is generated from type descriptions that are created by gccxml compiling the C code ' code ' . If < persist > is True , generated code is appended to the module ' s source code , otherwise the generated code is executed and then thrown away . The calling module must load all the shared libraries that it uses * BEFORE * this function is called . NOTE : - the calling module MUST contain ' from ctypes import * ' , and , on windows , also ' from ctypes . wintypes import * ' ."""
compilerflags = compilerflags or [ "-c" ] # create a hash for the code , and use that as basename for the # files we have to create fullcode = "/* compilerflags: %r */\n%s" % ( compilerflags , code ) hashval = md5 ( fullcode ) . hexdigest ( ) fnm = os . path . abspath ( os . path . join ( gen_dir , hashval ) ) h_file = fnm + ".h" xml_file = fnm + ".xml" tdesc_file = fnm + ".typedesc.bz2" if not os . path . exists ( h_file ) : open ( h_file , "w" ) . write ( fullcode ) if is_newer ( h_file , tdesc_file ) : if is_newer ( h_file , xml_file ) : print ( "# Compiling into..." , xml_file , file = sys . stderr ) from ctypeslib import h2xml h2xml . compile_to_xml ( [ "h2xml" , "-I" , os . path . dirname ( fnm ) , "-q" , h_file , "-o" , xml_file ] + list ( compilerflags ) ) if is_newer ( xml_file , tdesc_file ) : print ( "# Parsing XML file and compressing type descriptions..." , file = sys . stderr ) decls = gccxmlparser . parse ( xml_file ) ofi = bz2 . BZ2File ( tdesc_file , "w" ) data = cPickle . dump ( decls , ofi , - 1 ) os . remove ( xml_file ) # not needed any longer . frame = sys . _getframe ( 1 ) glob = frame . f_globals name = glob [ "__name__" ] mod = sys . modules [ name ] sys . modules [ name ] = DynamicModule ( mod , tdesc_file , persist = persist )
def _create_attach_record ( self , id , timed ) : """Create a new pivot attachement record ."""
record = { } record [ self . _foreign_key ] = self . _parent . get_key ( ) record [ self . _other_key ] = id if timed : record = self . _set_timestamps_on_attach ( record ) return record
def get_prev_step ( self , step = None ) : """Returns the previous step before the given ` step ` . If there are no steps available , None will be returned . If the ` step ` argument is None , the current step will be determined automatically ."""
if step is None : step = self . steps . current form_list = self . get_form_list ( ) key = form_list . keyOrder . index ( step ) - 1 if key >= 0 : return form_list . keyOrder [ key ] return None
def get_config_dir ( program = '' , system_wide = False ) : '''Get the configuration directory . Get the configuration directories , optionally for a specific program . Args : program ( str ) : The name of the program whose configuration directories have to be found . system _ wide ( bool ) : Gets the system - wide configuration directories . Returns : list : A list of all matching configuration directories found .'''
config_homes = [ ] if system_wide : if os . name == 'nt' : config_homes . append ( winreg . ExpandEnvironmentStrings ( '%PROGRAMDATA%' ) ) else : config_homes . append ( '/etc' ) config_homes . append ( '/etc/xdg' ) if os . name == 'darwin' : config_homes . append ( '/Library' ) else : if os . name == 'nt' : import winreg config_homes . append ( winreg . ExpandEnvironmentStrings ( '%LOCALAPPDATA%' ) ) config_homes . append ( os . path . join ( winreg . ExpandEnvironmentStrings ( '%APPDATA%' ) , 'Roaming' ) ) else : if os . getenv ( 'XDG_CONFIG_HOME' ) : config_homes . append ( os . getenv ( 'XDG_CONFIG_HOME' ) ) else : try : from xdg import BaseDirectory config_homes . append ( BaseDirectory . xdg_config_home ) except ImportError : config_homes . append ( os . path . expanduser ( '~/.config' ) ) config_homes . append ( os . path . expanduser ( '~' ) ) if os . name == 'darwin' : config_homes . append ( os . path . expanduser ( '~/Library' ) ) if program : def __find_homes ( app , dirs ) : homes = [ ] for home in dirs : if os . path . isdir ( os . path . join ( home , app ) ) : homes . append ( os . path . join ( home , app ) ) if os . path . isdir ( os . path . join ( home , '.' + app ) ) : homes . append ( os . path . join ( home , '.' + app ) ) if os . path . isdir ( os . path . join ( home , app + '.d' ) ) : homes . append ( os . path . join ( home , app + '.d' ) ) return homes app_homes = __find_homes ( program , config_homes ) # Special Cases if program == 'vim' : app_homes . extend ( __find_homes ( 'vimfiles' , config_homes ) ) elif program == 'chrome' : app_homes . extend ( __find_homes ( 'google-chrome' , config_homes ) ) elif program in [ 'firefox' , 'thunderbird' ] : app_homes . extend ( __find_homes ( program , [ os . path . expanduser ( '~/.mozilla' ) ] ) ) return app_homes return config_homes
def request_status ( r , detailed = False ) : """Returns a formatted string about the status , useful for logging . args : r - takes requests . models . Response"""
base_string = "HTTP {r.request.method} {r.request.url}: {r.status_code}" if r . status_code in range ( 200 , 99 ) : string = base_string if detailed is True : string += " - {r.json()}" else : string += " - 👍" return string . format ( r = r ) else : string = base_string return string . format ( r = r )
def _is_kpoint ( line ) : """Is this line the start of a new k - point block"""
# Try to parse the k - point ; false otherwise toks = line . split ( ) # k - point header lines have 4 tokens if len ( toks ) != 4 : return False try : # K - points are centered at the origin xs = [ float ( x ) for x in toks [ : 3 ] ] # Weights are in [ 0,1] w = float ( toks [ 3 ] ) return all ( abs ( x ) <= 0.5 for x in xs ) and w >= 0.0 and w <= 1.0 except ValueError : return False
def find_common_elements ( list1 : list , list2 : list ) -> list : """This function returns a sorted list of unique common elements between two lists . Args : list1 ( list ) : The first list . list2 ( list ) : The second list . Returns : list : A sorted list of common elements . Example : > > > find _ common _ elements ( [ 1 , 4 , 3 , 34 , 653 , 2 , 5 ] , [ 5 , 7 , 1 , 5 , 9 , 653 , 121 ] ) [1 , 5 , 653] > > > find _ common _ elements ( [ 5 , 3 , 2 , 8 ] , [ 3 , 2 ] ) [2 , 3]"""
common_elements = set ( list1 ) & set ( list2 ) return sorted ( common_elements )
def transform_vector_coorb_to_inertial ( vec_coorb , orbPhase , quat_copr ) : """Given a vector ( of size 3 ) in coorbital frame , orbital phase in coprecessing frame and a minimal rotation frame quat , transforms the vector from the coorbital to the inertial frame ."""
# Transform to coprecessing frame vec_copr = rotate_in_plane ( vec_coorb , - orbPhase ) # Transform to inertial frame vec = transformTimeDependentVector ( np . array ( [ quat_copr ] ) . T , np . array ( [ vec_copr ] ) . T ) . T [ 0 ] return np . array ( vec )
def _sample_variant_file_in_population ( x ) : """Check if a sample file is the same as the population file . This is true for batches where we don ' t extract into samples and do not run decomposition for gemini ."""
if "population" in x : a = _get_project_vcf ( x ) b = _get_variant_file ( x , ( "vrn_file" , ) ) decomposed = tz . get_in ( ( "population" , "decomposed" ) , x ) if ( a and b and not decomposed and len ( a ) > 0 and len ( b ) > 0 and vcfutils . get_samples ( a [ 0 ] [ "path" ] ) == vcfutils . get_samples ( b [ 0 ] [ "path" ] ) ) : return True return False
def QA_indicator_MACD ( DataFrame , short = 12 , long = 26 , mid = 9 ) : """MACD CALC"""
CLOSE = DataFrame [ 'close' ] DIF = EMA ( CLOSE , short ) - EMA ( CLOSE , long ) DEA = EMA ( DIF , mid ) MACD = ( DIF - DEA ) * 2 return pd . DataFrame ( { 'DIF' : DIF , 'DEA' : DEA , 'MACD' : MACD } )
def cached_getter ( func ) : """Decorate a property by executing it at instatiation time and cache the result on the instance object ."""
@ wraps ( func ) def wrapper ( self ) : attribute = f'_{func.__name__}' value = getattr ( self , attribute , None ) if value is None : value = func ( self ) setattr ( self , attribute , value ) return value return wrapper
def solver ( A , config ) : """Generate an SA solver given matrix A and a configuration . Parameters A : array , matrix , csr _ matrix , bsr _ matrix Matrix to invert , CSR or BSR format preferred for efficiency config : dict A dictionary of solver configuration parameters that is used to generate a smoothed aggregation solver Returns ml : smoothed _ aggregation _ solver smoothed aggregation hierarchy Notes config must contain the following parameter entries for smoothed _ aggregation _ solver : symmetry , smooth , presmoother , postsmoother , B , strength , max _ levels , max _ coarse , coarse _ solver , aggregate , keep Examples > > > from pyamg . gallery import poisson > > > from pyamg import solver _ configuration , solver > > > A = poisson ( ( 40,40 ) , format = ' csr ' ) > > > config = solver _ configuration ( A , verb = False ) > > > ml = solver ( A , config )"""
# Convert A to acceptable format A = make_csr ( A ) # Generate smoothed aggregation solver try : return smoothed_aggregation_solver ( A , B = config [ 'B' ] , BH = config [ 'BH' ] , smooth = config [ 'smooth' ] , strength = config [ 'strength' ] , max_levels = config [ 'max_levels' ] , max_coarse = config [ 'max_coarse' ] , coarse_solver = config [ 'coarse_solver' ] , symmetry = config [ 'symmetry' ] , aggregate = config [ 'aggregate' ] , presmoother = config [ 'presmoother' ] , postsmoother = config [ 'postsmoother' ] , keep = config [ 'keep' ] ) except BaseException : raise TypeError ( 'Failed generating smoothed_aggregation_solver' )
def pixels ( self , value : int ) -> 'Gap' : """Set the margin in pixels ."""
raise_not_number ( value ) self . gap = '{}px' . format ( value ) return self
def load_settings_sizes ( ) : """Load sizes from settings or fallback to the module constants"""
page_size = AGNOCOMPLETE_DEFAULT_PAGESIZE settings_page_size = getattr ( settings , 'AGNOCOMPLETE_DEFAULT_PAGESIZE' , None ) page_size = settings_page_size or page_size page_size_min = AGNOCOMPLETE_MIN_PAGESIZE settings_page_size_min = getattr ( settings , 'AGNOCOMPLETE_MIN_PAGESIZE' , None ) page_size_min = settings_page_size_min or page_size_min page_size_max = AGNOCOMPLETE_MAX_PAGESIZE settings_page_size_max = getattr ( settings , 'AGNOCOMPLETE_MAX_PAGESIZE' , None ) page_size_max = settings_page_size_max or page_size_max # Query sizes query_size = AGNOCOMPLETE_DEFAULT_QUERYSIZE settings_query_size = getattr ( settings , 'AGNOCOMPLETE_DEFAULT_QUERYSIZE' , None ) query_size = settings_query_size or query_size query_size_min = AGNOCOMPLETE_MIN_QUERYSIZE settings_query_size_min = getattr ( settings , 'AGNOCOMPLETE_MIN_QUERYSIZE' , None ) query_size_min = settings_query_size_min or query_size_min return ( page_size , page_size_min , page_size_max , query_size , query_size_min , )
def dump_value ( key , value , f , indent = 0 ) : '''Save a value of any libconfig type This function serializes takes ` ` key ` ` and ` ` value ` ` and serializes them into ` ` f ` ` . If ` ` key ` ` is ` ` None ` ` , a list - style output is produced . Otherwise , output has ` ` key = value ` ` format .'''
spaces = ' ' * indent if key is None : key_prefix = '' key_prefix_nl = '' else : key_prefix = key + ' = ' key_prefix_nl = key + ' =\n' + spaces dtype = get_dump_type ( value ) if dtype == 'd' : f . write ( u'{}{}{{\n' . format ( spaces , key_prefix_nl ) ) dump_dict ( value , f , indent + 4 ) f . write ( u'{}}}' . format ( spaces ) ) elif dtype == 'l' : f . write ( u'{}{}(\n' . format ( spaces , key_prefix_nl ) ) dump_collection ( value , f , indent + 4 ) f . write ( u'\n{})' . format ( spaces ) ) elif dtype == 'a' : f . write ( u'{}{}[\n' . format ( spaces , key_prefix_nl ) ) value_dtype = get_array_value_dtype ( value ) # If int array contains one or more Int64 , promote all values to i64. if value_dtype == 'i64' : value = [ LibconfInt64 ( v ) for v in value ] dump_collection ( value , f , indent + 4 ) f . write ( u'\n{}]' . format ( spaces ) ) elif dtype == 's' : f . write ( u'{}{}{}' . format ( spaces , key_prefix , dump_string ( value ) ) ) elif dtype == 'i' or dtype == 'i64' : f . write ( u'{}{}{}' . format ( spaces , key_prefix , dump_int ( value ) ) ) elif dtype == 'f' or dtype == 'b' : f . write ( u'{}{}{}' . format ( spaces , key_prefix , value ) ) else : raise ConfigSerializeError ( "Can not serialize object %r of type %s" % ( value , type ( value ) ) )
def easy_time_extrator ( self , string ) : """简单时间抽取 , 即年月日同时出现 Keyword arguments : string - - 含有时间的文本 , str类型"""
try : if not self . year_check and not self . month_check and not self . day_check : str_all = re . search ( '([\u4e00-\u9fa5〇○]{4})年([\u4e00-\u9fa5]{1,3})月([\u4e00-\u9fa5]{1,3})日' , string ) str_year = self . str_to_num ( str_all . group ( 1 ) ) str_month = self . str_to_num ( str_all . group ( 2 ) ) str_day = self . str_to_num ( str_all . group ( 3 ) ) check_year = datetime . datetime . now ( ) . year if str_year in range ( 1970 , check_year + 1 ) and str_month in range ( 1 , 13 ) and str_day in range ( 1 , 32 ) : self . year = str_year self . month = str_month self . day = str_day self . year_check = True self . month_check = True self . day_check = True except : pass try : if not self . year_check and not self . month_check and not self . day_check : str_all = re . search ( '(\d{4})[-._年](\d{1,2})[-._月](\d{1,2})' , string ) str_year = int ( str_all . group ( 1 ) ) str_month = int ( str_all . group ( 2 ) ) str_day = int ( str_all . group ( 3 ) ) check_year = datetime . datetime . now ( ) . year if str_year in range ( 1970 , check_year + 1 ) and str_month in range ( 1 , 13 ) and str_day in range ( 1 , 32 ) : self . year = str_year self . month = str_month self . day = str_day self . year_check = True self . month_check = True self . day_check = True except : pass
def describe_thing_type ( thingTypeName , region = None , key = None , keyid = None , profile = None ) : '''Given a thing type name describe its properties . Returns a dictionary of interesting properties . . . versionadded : : 2016.11.0 CLI Example : . . code - block : : bash salt myminion boto _ iot . describe _ thing _ type mythingtype'''
try : conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile ) res = conn . describe_thing_type ( thingTypeName = thingTypeName ) if res : res . pop ( 'ResponseMetadata' , None ) thingTypeMetadata = res . get ( 'thingTypeMetadata' ) if thingTypeMetadata : for dtype in ( 'creationDate' , 'deprecationDate' ) : dval = thingTypeMetadata . get ( dtype ) if dval and isinstance ( dval , datetime . date ) : thingTypeMetadata [ dtype ] = '{0}' . format ( dval ) return { 'thing_type' : res } else : return { 'thing_type' : None } except ClientError as e : err = __utils__ [ 'boto3.get_error' ] ( e ) if e . response . get ( 'Error' , { } ) . get ( 'Code' ) == 'ResourceNotFoundException' : return { 'thing_type' : None } return { 'error' : err }
def setup_logging ( namespace ) : """setup global logging"""
loglevel = { 0 : logging . ERROR , 1 : logging . WARNING , 2 : logging . INFO , 3 : logging . DEBUG , } . get ( namespace . verbosity , logging . DEBUG ) if namespace . verbosity > 1 : logformat = '%(levelname)s csvpandas %(lineno)s %(message)s' else : logformat = 'csvpandas %(message)s' logging . basicConfig ( stream = namespace . log , format = logformat , level = loglevel )
def get_tile_image ( imgs , tile_shape = None , result_img = None , margin_color = None ) : """Concatenate images whose sizes are different . @ param imgs : image list which should be concatenated @ param tile _ shape : shape for which images should be concatenated @ param result _ img : numpy array to put result image"""
def resize ( * args , ** kwargs ) : # anti _ aliasing arg cannot be passed to skimage < 0.14 # use LooseVersion to allow 0.14dev . if LooseVersion ( skimage . __version__ ) < LooseVersion ( '0.14' ) : kwargs . pop ( 'anti_aliasing' , None ) return skimage . transform . resize ( * args , ** kwargs ) def get_tile_shape ( img_num ) : x_num = 0 y_num = int ( math . sqrt ( img_num ) ) while x_num * y_num < img_num : x_num += 1 return y_num , x_num if tile_shape is None : tile_shape = get_tile_shape ( len ( imgs ) ) # get max tile size to which each image should be resized max_height , max_width = np . inf , np . inf for img in imgs : max_height = min ( [ max_height , img . shape [ 0 ] ] ) max_width = min ( [ max_width , img . shape [ 1 ] ] ) # resize and concatenate images for i , img in enumerate ( imgs ) : h , w = img . shape [ : 2 ] dtype = img . dtype h_scale , w_scale = max_height / h , max_width / w scale = min ( [ h_scale , w_scale ] ) h , w = int ( scale * h ) , int ( scale * w ) img = resize ( image = img , output_shape = ( h , w ) , mode = 'reflect' , preserve_range = True , anti_aliasing = True , ) . astype ( dtype ) if len ( img . shape ) == 3 : img = centerize ( img , ( max_height , max_width , 3 ) , margin_color ) else : img = centerize ( img , ( max_height , max_width ) , margin_color ) imgs [ i ] = img return _tile_images ( imgs , tile_shape , result_img )
def add_event_source ( event_source , lambda_arn , target_function , boto_session , dry = False ) : """Given an event _ source dictionary , create the object and add the event source ."""
event_source_obj , ctx , funk = get_event_source ( event_source , lambda_arn , target_function , boto_session , dry = False ) # TODO : Detect changes in config and refine exists algorithm if not dry : if not event_source_obj . status ( funk ) : event_source_obj . add ( funk ) return 'successful' if event_source_obj . status ( funk ) else 'failed' else : return 'exists' return 'dryrun'
def _handle_one_message ( self ) : """Handle one single incoming message on any socket . This is the inner loop of the main application loop . Returns True if further messages should be received , False otherwise ( it should quit , that is ) . It is crucial that this class function always respond with a query _ socket . sent ( ) for every query _ socket . recv ( ) call . Otherwise , clients and / or server might be stuck in limbo ."""
result = True requesttype = self . query_socket . recv ( ) if requesttype == b"PUBLISH" : self . _handle_incoming_event ( ) elif requesttype == b"QUERY" : self . _handle_event_query ( ) elif ( self . exit_message is not None and requesttype == self . exit_message ) : _logger . warn ( "Asked to quit through an exit message." "I'm quitting..." ) self . query_socket . send ( b'QUIT' ) result = False else : _logger . warn ( "Could not identify request type: %s" , requesttype ) self . _handle_unknown_command ( ) return result
def prune_graph ( graph_str , package_name ) : """Prune a package graph so it only contains nodes accessible from the given package . Args : graph _ str ( str ) : Dot - language graph string . package _ name ( str ) : Name of package of interest . Returns : Pruned graph , as a string ."""
# find nodes of interest g = read_dot ( graph_str ) nodes = set ( ) for node , attrs in g . node_attr . iteritems ( ) : attr = [ x for x in attrs if x [ 0 ] == "label" ] if attr : label = attr [ 0 ] [ 1 ] try : req_str = _request_from_label ( label ) request = PackageRequest ( req_str ) except PackageRequestError : continue if request . name == package_name : nodes . add ( node ) if not nodes : raise ValueError ( "The package %r does not appear in the graph." % package_name ) # find nodes upstream from these nodes g_rev = g . reverse ( ) accessible_nodes = set ( ) access = accessibility ( g_rev ) for node in nodes : nodes_ = access . get ( node , [ ] ) accessible_nodes |= set ( nodes_ ) # remove inaccessible nodes inaccessible_nodes = set ( g . nodes ( ) ) - accessible_nodes for node in inaccessible_nodes : g . del_node ( node ) return write_dot ( g )
def add_group_coordinator ( self , group , response ) : """Update with metadata for a group coordinator Arguments : group ( str ) : name of group from GroupCoordinatorRequest response ( GroupCoordinatorResponse ) : broker response Returns : bool : True if metadata is updated , False on error"""
log . debug ( "Updating coordinator for %s: %s" , group , response ) error_type = Errors . for_code ( response . error_code ) if error_type is not Errors . NoError : log . error ( "GroupCoordinatorResponse error: %s" , error_type ) self . _groups [ group ] = - 1 return False node_id = response . coordinator_id coordinator = BrokerMetadata ( response . coordinator_id , response . host , response . port , None ) # Assume that group coordinators are just brokers # ( this is true now , but could diverge in future ) if node_id not in self . _brokers : self . _brokers [ node_id ] = coordinator # If this happens , either brokers have moved without # changing IDs , or our assumption above is wrong else : node = self . _brokers [ node_id ] if coordinator . host != node . host or coordinator . port != node . port : log . error ( "GroupCoordinator metadata conflicts with existing" " broker metadata. Coordinator: %s, Broker: %s" , coordinator , node ) self . _groups [ group ] = node_id return False log . info ( "Group coordinator for %s is %s" , group , coordinator ) self . _groups [ group ] = node_id return True
def cli ( env ) : """Print environment variables ."""
filtered_vars = dict ( [ ( k , v ) for k , v in env . vars . items ( ) if not k . startswith ( '_' ) ] ) env . fout ( formatting . iter_to_table ( filtered_vars ) )
def sg_leaky_relu ( x , opt ) : r"""" See [ Xu , et al . 2015 ] ( https : / / arxiv . org / pdf / 1505.00853v2 . pdf ) Args : x : A tensor opt : name : A name for the operation ( optional ) . Returns : A ` Tensor ` with the same type and shape as ` x ` ."""
return tf . where ( tf . greater ( x , 0 ) , x , 0.01 * x , name = opt . name )
def resfinderreporter ( self ) : """Custom reports for ResFinder analyses . These reports link the gene ( s ) found to their resistance phenotypes"""
# Initialise resistance dictionaries from the notes . txt file resistance_classes = ResistanceNotes . classes ( self . targetpath ) # Create a workbook to store the report . Using xlsxwriter rather than a simple csv format , as I want to be # able to have appropriately sized , multi - line cells workbook = xlsxwriter . Workbook ( os . path . join ( self . reportpath , '{}.xlsx' . format ( self . analysistype ) ) ) # New worksheet to store the data worksheet = workbook . add_worksheet ( ) # Add a bold format for header cells . Using a monotype font size 10 bold = workbook . add_format ( { 'bold' : True , 'font_name' : 'Courier New' , 'font_size' : 8 } ) # Format for data cells . Monotype , size 10 , top vertically justified courier = workbook . add_format ( { 'font_name' : 'Courier New' , 'font_size' : 8 } ) courier . set_align ( 'top' ) # Initialise the position within the worksheet to be ( 0,0) row = 0 col = 0 # A dictionary to store the column widths for every header columnwidth = dict ( ) extended = False headers = [ 'Strain' , 'Gene' , 'Allele' , 'Resistance' , 'PercentIdentity' , 'PercentCovered' , 'Contig' , 'Location' , 'nt_sequence' ] for sample in self . metadata : # Create an attribute to store the string for the eventual pipeline report sample [ self . analysistype ] . pipelineresults = list ( ) sample [ self . analysistype ] . sampledata = list ( ) try : blastresults = sample [ self . analysistype ] . blastresults except AttributeError : blastresults = 'NA' # Process the sample only if the script could find targets if blastresults != 'NA' : for result in sample [ self . analysistype ] . blastresults : # Set the name to avoid writing out the dictionary [ key ] multiple times name = result [ 'subject_id' ] # Use the ResistanceNotes gene name extraction method to get the necessary variables gname , genename , accession , allele = ResistanceNotes . gene_name ( name ) # Initialise a list to store all the data for each strain data = list ( ) # Determine resistance phenotype of the gene resistance = ResistanceNotes . resistance ( name , resistance_classes ) # Append the necessary values to the data list data . append ( genename ) data . append ( allele ) data . append ( resistance ) percentid = result [ 'percentidentity' ] data . append ( percentid ) data . append ( result [ 'alignment_fraction' ] ) data . append ( result [ 'query_id' ] ) data . append ( '...' . join ( [ str ( result [ 'low' ] ) , str ( result [ 'high' ] ) ] ) ) try : # Populate the attribute storing the resfinder results sample [ self . analysistype ] . pipelineresults . append ( '{rgene} ({pid}%) {rclass}' . format ( rgene = genename , pid = percentid , rclass = resistance ) ) # Only if the alignment option is selected , for inexact results , add alignments if self . align and percentid != 100.00 : # Align the protein ( and nucleotide ) sequences to the reference self . alignprotein ( sample , name ) if not extended : # Add the appropriate headers headers . extend ( [ 'aa_Identity' , 'aa_Alignment' , 'aa_SNP_location' , 'nt_Alignment' , 'nt_SNP_location' ] ) extended = True # Create a FASTA - formatted sequence output of the query sequence record = SeqRecord ( sample [ self . analysistype ] . dnaseq [ name ] , id = '{}_{}' . format ( sample . name , name ) , description = '' ) # Add the alignment , and the location of mismatches for both nucleotide and amino # acid sequences data . extend ( [ record . format ( 'fasta' ) , sample [ self . analysistype ] . aaidentity [ name ] , sample [ self . analysistype ] . aaalign [ name ] , sample [ self . analysistype ] . aaindex [ name ] , sample [ self . analysistype ] . ntalign [ name ] , sample [ self . analysistype ] . ntindex [ name ] ] ) else : record = SeqRecord ( Seq ( result [ 'subject_sequence' ] , IUPAC . unambiguous_dna ) , id = '{}_{}' . format ( sample . name , name ) , description = '' ) data . append ( record . format ( 'fasta' ) ) if self . align : # Add ' - ' s for the empty results , as there are no alignments for exact matches data . extend ( [ '100' , '-' , '-' , '-' , '-' ] ) # If there are no blast results for the target , add a ' - ' except ( KeyError , TypeError ) : data . append ( '-' ) sample [ self . analysistype ] . sampledata . append ( data ) if 'nt_sequence' not in headers : headers . append ( 'nt_sequence' ) # Write the header to the spreadsheet for header in headers : worksheet . write ( row , col , header , bold ) # Set the column width based on the longest header try : columnwidth [ col ] = len ( header ) if len ( header ) > columnwidth [ col ] else columnwidth [ col ] except KeyError : columnwidth [ col ] = len ( header ) worksheet . set_column ( col , col , columnwidth [ col ] ) col += 1 # Increment the row and reset the column to zero in preparation of writing results row += 1 col = 0 # Write out the data to the spreadsheet for sample in self . metadata : if not sample [ self . analysistype ] . sampledata : # Increment the row and reset the column to zero in preparation of writing results row += 1 col = 0 # Set the width of the row to be the number of lines ( number of newline characters ) * 12 worksheet . set_row ( row ) worksheet . set_column ( col , col , columnwidth [ col ] ) for data in sample [ self . analysistype ] . sampledata : columnwidth [ col ] = len ( sample . name ) + 2 worksheet . set_column ( col , col , columnwidth [ col ] ) worksheet . write ( row , col , sample . name , courier ) col += 1 # List of the number of lines for each result totallines = list ( ) for results in data : worksheet . write ( row , col , results , courier ) try : # Counting the length of multi - line strings yields columns that are far too wide , only count # the length of the string up to the first line break alignmentcorrect = len ( str ( results ) . split ( '\n' ) [ 1 ] ) # Count the number of lines for the data lines = results . count ( '\n' ) if results . count ( '\n' ) >= 1 else 1 # Add the number of lines to the list totallines . append ( lines ) except IndexError : try : # Counting the length of multi - line strings yields columns that are far too wide , only count # the length of the string up to the first line break alignmentcorrect = len ( str ( results ) . split ( '\n' ) [ 0 ] ) # Count the number of lines for the data lines = results . count ( '\n' ) if results . count ( '\n' ) >= 1 else 1 # Add the number of lines to the list totallines . append ( lines ) # If there are no newline characters , set the width to the length of the string except AttributeError : alignmentcorrect = len ( str ( results ) ) lines = 1 # Add the number of lines to the list totallines . append ( lines ) # Increase the width of the current column , if necessary try : columnwidth [ col ] = alignmentcorrect if alignmentcorrect > columnwidth [ col ] else columnwidth [ col ] except KeyError : columnwidth [ col ] = alignmentcorrect worksheet . set_column ( col , col , columnwidth [ col ] ) col += 1 # Set the width of the row to be the number of lines ( number of newline characters ) * 12 worksheet . set_row ( row , max ( totallines ) * 11 ) # Increase the row counter for the next strain ' s data row += 1 col = 0 # Close the workbook workbook . close ( )
def read_query ( self , sql , index_col = None , coerce_float = True , parse_dates = None , params = None , chunksize = None ) : """Read SQL query into a DataFrame . Parameters sql : string SQL query to be executed . index _ col : string , optional , default : None Column name to use as index for the returned DataFrame object . coerce _ float : boolean , default True Attempt to convert values of non - string , non - numeric objects ( like decimal . Decimal ) to floating point , useful for SQL result sets . params : list , tuple or dict , optional , default : None List of parameters to pass to execute method . The syntax used to pass parameters is database driver dependent . Check your database driver documentation for which of the five syntax styles , described in PEP 249 ' s paramstyle , is supported . Eg . for psycopg2 , uses % ( name ) s so use params = { ' name ' : ' value ' } parse _ dates : list or dict , default : None - List of column names to parse as dates . - Dict of ` ` { column _ name : format string } ` ` where format string is strftime compatible in case of parsing string times , or is one of ( D , s , ns , ms , us ) in case of parsing integer timestamps . - Dict of ` ` { column _ name : arg dict } ` ` , where the arg dict corresponds to the keyword arguments of : func : ` pandas . to _ datetime ` Especially useful with databases without native Datetime support , such as SQLite . chunksize : int , default None If specified , return an iterator where ` chunksize ` is the number of rows to include in each chunk . Returns DataFrame See Also read _ sql _ table : Read SQL database table into a DataFrame . read _ sql"""
args = _convert_params ( sql , params ) result = self . execute ( * args ) columns = result . keys ( ) if chunksize is not None : return self . _query_iterator ( result , chunksize , columns , index_col = index_col , coerce_float = coerce_float , parse_dates = parse_dates ) else : data = result . fetchall ( ) frame = _wrap_result ( data , columns , index_col = index_col , coerce_float = coerce_float , parse_dates = parse_dates ) return frame
def initialize ( plugins , exclude_files_regex = None , exclude_lines_regex = None , path = '.' , scan_all_files = False , ) : """Scans the entire codebase for secrets , and returns a SecretsCollection object . : type plugins : tuple of detect _ secrets . plugins . base . BasePlugin : param plugins : rules to initialize the SecretsCollection with . : type exclude _ files _ regex : str | None : type exclude _ lines _ regex : str | None : type path : str : type scan _ all _ files : bool : rtype : SecretsCollection"""
output = SecretsCollection ( plugins , exclude_files = exclude_files_regex , exclude_lines = exclude_lines_regex , ) if os . path . isfile ( path ) : # This option allows for much easier adhoc usage . files_to_scan = [ path ] elif scan_all_files : files_to_scan = _get_files_recursively ( path ) else : files_to_scan = _get_git_tracked_files ( path ) if not files_to_scan : return output if exclude_files_regex : exclude_files_regex = re . compile ( exclude_files_regex , re . IGNORECASE ) files_to_scan = filter ( lambda file : ( not exclude_files_regex . search ( file ) ) , files_to_scan , ) for file in files_to_scan : output . scan_file ( file ) return output
def emit ( * args ) : """Convert the given args to a Quad ( 3 address code ) instruction"""
quad = Quad ( * args ) __DEBUG__ ( 'EMIT ' + str ( quad ) ) MEMORY . append ( quad )
def reject ( self ) : """Reject this message . The message will be discarded by the server . : raises MessageStateError : If the message has already been acknowledged / requeued / rejected ."""
if self . acknowledged : raise self . MessageStateError ( "Message already acknowledged with state: %s" % self . _state ) self . backend . reject ( self . delivery_tag ) self . _state = "REJECTED"
def project_decrease_permissions ( object_id , input_params = { } , always_retry = True , ** kwargs ) : """Invokes the / project - xxxx / decreasePermissions API method . For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Project - Permissions - and - Sharing # API - method % 3A - % 2Fproject - xxxx % 2FdecreasePermissions"""
return DXHTTPRequest ( '/%s/decreasePermissions' % object_id , input_params , always_retry = always_retry , ** kwargs )
def get_state ( self ) : """Return the sampler ' s current state in order to restart sampling at a later time ."""
state = dict ( sampler = { } , stochastics = { } ) # The state of the sampler itself . for s in self . _state : state [ 'sampler' ] [ s ] = getattr ( self , s ) # The state of each stochastic parameter for s in self . stochastics : state [ 'stochastics' ] [ s . __name__ ] = s . value return state
def acquire ( self , blocking = True , timeout = None , shared = False ) : """Acquire the lock in shared or exclusive mode ."""
with self . _lock : if shared : self . _acquire_shared ( blocking , timeout ) else : self . _acquire_exclusive ( blocking , timeout ) assert not ( self . is_shared and self . is_exclusive )
def matchers ( ) -> List [ SimAlgorithm ] : """Matchers in owlsim2"""
return [ SimAlgorithm . PHENODIGM , SimAlgorithm . JACCARD , SimAlgorithm . SIM_GIC , SimAlgorithm . RESNIK , SimAlgorithm . SYMMETRIC_RESNIK ]
def validate ( self , path ) : """Run path against filter sets and return True if all pass"""
# Exclude hidden files and folders with ' . ' prefix if os . path . basename ( path ) . startswith ( '.' ) : return False # Check that current path level is more than min path and less than max path if not self . check_level ( path ) : return False if self . filters : if not self . _level_filters ( path ) : return False # Force include and exclude iterations to be strings in case of integer filters # Handle exclusions if self . to_exclude : if any ( str ( ex ) . lower ( ) in path . lower ( ) for ex in self . to_exclude ) : return False # Handle inclusions if self . to_include : if not any ( str ( inc ) . lower ( ) in path . lower ( ) for inc in self . to_include ) : return False return True
def inverse ( self , encoded , duration = None ) : '''Inverse transformation'''
ann = jams . Annotation ( namespace = self . namespace , duration = duration ) for start , end , value in self . decode_intervals ( encoded , duration = duration , transition = self . transition , p_init = self . p_init , p_state = self . p_state ) : # Map start : end to frames f_start , f_end = time_to_frames ( [ start , end ] , sr = self . sr , hop_length = self . hop_length ) confidence = np . mean ( encoded [ f_start : f_end + 1 , value ] ) value_dec = self . encoder . inverse_transform ( np . atleast_2d ( value ) ) [ 0 ] for vd in value_dec : ann . append ( time = start , duration = end - start , value = vd , confidence = confidence ) return ann
def get_selected_uuidtab ( self ) : # TODO DBUS ONLY """Returns the uuid of the current selected terminal"""
page_num = self . get_notebook ( ) . get_current_page ( ) terminals = self . get_notebook ( ) . get_terminals_for_page ( page_num ) return str ( terminals [ 0 ] . get_uuid ( ) )
def number_occurences ( self , proc ) : """Returns the number of occurencies of commands that contain given text Returns : int : The number of occurencies of commands with given text . . note : : ' proc ' can match anywhere in the command path , name or arguments ."""
return len ( [ True for row in self . data if proc in row [ self . command_name ] ] )
def word_count ( ctx , text , by_spaces = False ) : """Returns the number of words in the given text string"""
text = conversions . to_string ( text , ctx ) by_spaces = conversions . to_boolean ( by_spaces , ctx ) return len ( __get_words ( text , by_spaces ) )
def error ( request , message , extra_tags = '' , fail_silently = False ) : """Adds a message with the ` ` ERROR ` ` level ."""
add_message ( request , constants . ERROR , message , extra_tags = extra_tags , fail_silently = fail_silently )
def get_time_to_merge_request_response ( self , item ) : """Get the first date at which a review was made on the PR by someone other than the user who created the PR"""
review_dates = [ str_to_datetime ( review [ 'created_at' ] ) for review in item [ 'review_comments_data' ] if item [ 'user' ] [ 'login' ] != review [ 'user' ] [ 'login' ] ] if review_dates : return min ( review_dates ) return None
def query ( url , ** kwargs ) : '''Query a resource , and decode the return data Passes through all the parameters described in the : py : func : ` utils . http . query function < salt . utils . http . query > ` : . . autofunction : : salt . utils . http . query CLI Example : . . code - block : : bash salt ' * ' http . query http : / / somelink . com / salt ' * ' http . query http : / / somelink . com / method = POST params = ' key1 = val1 & key2 = val2' salt ' * ' http . query http : / / somelink . com / method = POST data = ' < xml > somecontent < / xml > ' For more information about the ` ` http . query ` ` module , refer to the : ref : ` HTTP Tutorial < tutorial - http > ` .'''
opts = __opts__ . copy ( ) if 'opts' in kwargs : opts . update ( kwargs [ 'opts' ] ) del kwargs [ 'opts' ] return salt . utils . http . query ( url = url , opts = opts , ** kwargs )
def lt ( self , other , axis = "columns" , level = None ) : """Checks element - wise that this is less than other . Args : other : A DataFrame or Series or scalar to compare to . axis : The axis to perform the lt over . level : The Multilevel index level to apply lt over . Returns : A new DataFrame filled with Booleans ."""
return self . _binary_op ( "lt" , other , axis = axis , level = level )
def _parse_outgoing_mail ( sender , to , msgstring ) : """Parse an outgoing mail and put it into the OUTBOX . Arguments : - ` sender ` : str - ` to ` : str - ` msgstring ` : str Return : None Exceptions : None"""
global OUTBOX OUTBOX . append ( email . message_from_string ( msgstring ) ) return
def get_http_connection ( self , host , is_secure ) : """Gets a connection from the pool for the named host . Returns None if there is no connection that can be reused ."""
if is_secure : return AsyncHTTPSConnection ( host , http_client = self . _httpclient ) else : return AsyncHTTPConnection ( host , http_client = self . _httpclient )
def generate ( directory , outfilename = DEFAULT_BUILDFILE ) : """Generate a build - file for quilt build from a directory of source files ."""
try : buildfilepath = generate_build_file ( directory , outfilename = outfilename ) except BuildException as builderror : raise CommandException ( str ( builderror ) ) print ( "Generated build-file %s." % ( buildfilepath ) )
def parse_mixed_delim_str ( line ) : """Turns . obj face index string line into [ verts , texcoords , normals ] numeric tuples ."""
arrs = [ [ ] , [ ] , [ ] ] for group in line . split ( ' ' ) : for col , coord in enumerate ( group . split ( '/' ) ) : if coord : arrs [ col ] . append ( int ( coord ) ) return [ tuple ( arr ) for arr in arrs ]
def check_spam ( self , ip = None , email = None , name = None , login = None , realname = None , subject = None , body = None , subject_type = 'plain' , body_type = 'plain' ) : """http : / / api . yandex . ru / cleanweb / doc / dg / concepts / check - spam . xml subject _ type = plain | html | bbcode body _ type = plain | html | bbcode"""
data = { 'ip' : ip , 'email' : email , 'name' : name , 'login' : login , 'realname' : realname , 'body-%s' % body_type : body , 'subject-%s' % subject_type : subject } r = self . request ( 'post' , 'http://cleanweb-api.yandex.ru/1.0/check-spam' , data = data ) root = ET . fromstring ( r . content ) return { 'id' : root . findtext ( 'id' ) , 'spam_flag' : yesnobool ( root . find ( 'text' ) . attrib [ 'spam-flag' ] ) , 'links' : [ ( link . attrib [ 'href' ] , yesnobool ( link . attrib [ 'spam-flag' ] ) ) for link in root . findall ( './links/link' ) ] }
def version_at_least ( version_string , major , minor , micro , patch ) : """This returns True if the version _ string represents a Tor version of at least ` ` major ` ` . ` ` minor ` ` . ` ` micro ` ` . ` ` patch ` ` version , ignoring any trailing specifiers ."""
parts = re . match ( r'^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+).*$' , version_string , ) for ver , gold in zip ( parts . group ( 1 , 2 , 3 , 4 ) , ( major , minor , micro , patch ) ) : if int ( ver ) < int ( gold ) : return False elif int ( ver ) > int ( gold ) : return True return True
def _create_tag_highlevel ( self , tag_name , message = None ) : """Create a tag on the toplevel repo if there is no patch repo , or a tag on the patch repo and bookmark on the top repo if there is a patch repo Returns a list where each entry is a dict for each bookmark or tag created , which looks like { ' type ' : ( ' bookmark ' or ' tag ' ) , ' patch ' : bool }"""
results = [ ] if self . patch_path : # make a tag on the patch queue tagged = self . _create_tag_lowlevel ( tag_name , message = message , patch = True ) if tagged : results . append ( { 'type' : 'tag' , 'patch' : True } ) # use a bookmark on the main repo since we can ' t change it self . hg ( 'bookmark' , '-f' , tag_name ) results . append ( { 'type' : 'bookmark' , 'patch' : False } ) else : tagged = self . _create_tag_lowlevel ( tag_name , message = message , patch = False ) if tagged : results . append ( { 'type' : 'tag' , 'patch' : False } ) return results
def flux_units ( self , units ) : """A setter for the flux units Parameters units : str , astropy . units . core . PrefixUnit The desired units of the zeropoint flux density"""
# Check that the units are valid dtypes = ( q . core . PrefixUnit , q . quantity . Quantity , q . core . CompositeUnit ) if not isinstance ( units , dtypes ) : raise ValueError ( units , "units not understood." ) # Check that the units changed if units != self . flux_units : # Convert to new units sfd = q . spectral_density ( self . wave_eff ) self . zp = self . zp . to ( units , equivalencies = sfd ) # Store new units self . _flux_units = units