signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def remove_security_group ( self , name ) : """Remove a security group from container"""
for group in self . security_groups : if group . isc_name == name : group . delete ( )
def HandleVersion ( self , payload ) : """Process the response of ` self . RequestVersion ` ."""
self . Version = IOHelper . AsSerializableWithType ( payload , "neo.Network.Payloads.VersionPayload.VersionPayload" ) if not self . Version : return if self . incoming_client : if self . Version . Nonce == self . nodeid : self . Disconnect ( ) self . SendVerack ( ) else : self . nodeid = self . Version . Nonce self . SendVersion ( )
def form_group_wrapped ( f ) : """Wrap a field within a bootstrap form - group . Additionally sets has - error This decorator sets has - error if the field has any errors ."""
@ wraps ( f ) def wrapped ( self , field , * args , ** kwargs ) : classes = [ 'form-group' ] if field . errors : classes . append ( 'has-error' ) html = """<div class="{classes}">{rendered_field}</div>""" . format ( classes = ' ' . join ( classes ) , rendered_field = f ( self , field , * args , ** kwargs ) ) return HTMLString ( html ) return wrapped
def toArray ( self ) : """Returns a copy of this SparseVector as a 1 - dimensional NumPy array ."""
arr = np . zeros ( ( self . size , ) , dtype = np . float64 ) arr [ self . indices ] = self . values return arr
def request ( self , api_call , data = None , api_version = None , http_method = None , concurrent_scans_retries = 0 , concurrent_scans_retry_delay = 0 ) : """Return QualysGuard API response ."""
logger . debug ( 'api_call =\n%s' % api_call ) logger . debug ( 'api_version =\n%s' % api_version ) logger . debug ( 'data %s =\n %s' % ( type ( data ) , str ( data ) ) ) logger . debug ( 'http_method =\n%s' % http_method ) logger . debug ( 'concurrent_scans_retries =\n%s' % str ( concurrent_scans_retries ) ) logger . debug ( 'concurrent_scans_retry_delay =\n%s' % str ( concurrent_scans_retry_delay ) ) concurrent_scans_retries = int ( concurrent_scans_retries ) concurrent_scans_retry_delay = int ( concurrent_scans_retry_delay ) # Determine API version . # Preformat call . api_call = self . preformat_call ( api_call ) if api_version : # API version specified , format API version inputted . api_version = self . format_api_version ( api_version ) else : # API version not specified , determine automatically . api_version = self . which_api_version ( api_call ) # Set up base url . url = self . url_api_version ( api_version ) # Set up headers . headers = { "X-Requested-With" : "Parag Baxi QualysAPI (python) v%s" % ( qualysapi . version . __version__ , ) } logger . debug ( 'headers =\n%s' % ( str ( headers ) ) ) # Portal API takes in XML text , requiring custom header . if api_version in ( 'am' , 'was' , 'am2' ) : headers [ 'Content-type' ] = 'text/xml' # Set up http request method , if not specified . if not http_method : http_method = self . format_http_method ( api_version , api_call , data ) logger . debug ( 'http_method =\n%s' % http_method ) # Format API call . api_call = self . format_call ( api_version , api_call ) logger . debug ( 'api_call =\n%s' % ( api_call ) ) # Append api _ call to url . url += api_call # Format data , if applicable . if data is not None : data = self . format_payload ( api_version , data ) # Make request at least once ( more if concurrent _ retry is enabled ) . retries = 0 # set a warning threshold for the rate limit rate_warn_threshold = 10 while retries <= concurrent_scans_retries : # Make request . logger . debug ( 'url =\n%s' % ( str ( url ) ) ) logger . debug ( 'data =\n%s' % ( str ( data ) ) ) logger . debug ( 'headers =\n%s' % ( str ( headers ) ) ) if http_method == 'get' : # GET logger . debug ( 'GET request.' ) request = self . session . get ( url , params = data , auth = self . auth , headers = headers , proxies = self . proxies ) else : # POST logger . debug ( 'POST request.' ) # Make POST request . request = self . session . post ( url , data = data , auth = self . auth , headers = headers , proxies = self . proxies ) logger . debug ( 'response headers =\n%s' % ( str ( request . headers ) ) ) # Remember how many times left user can make against api _ call . try : self . rate_limit_remaining [ api_call ] = int ( request . headers [ 'x-ratelimit-remaining' ] ) logger . debug ( 'rate limit for api_call, %s = %s' % ( api_call , self . rate_limit_remaining [ api_call ] ) ) if ( self . rate_limit_remaining [ api_call ] > rate_warn_threshold ) : logger . debug ( 'rate limit for api_call, %s = %s' % ( api_call , self . rate_limit_remaining [ api_call ] ) ) elif ( self . rate_limit_remaining [ api_call ] <= rate_warn_threshold ) and ( self . rate_limit_remaining [ api_call ] > 0 ) : logger . warning ( 'Rate limit is about to being reached (remaining api calls = %s)' % self . rate_limit_remaining [ api_call ] ) elif self . rate_limit_remaining [ api_call ] <= 0 : logger . critical ( 'ATTENTION! RATE LIMIT HAS BEEN REACHED (remaining api calls = %s)!' % self . rate_limit_remaining [ api_call ] ) except KeyError as e : # Likely a bad api _ call . logger . debug ( e ) pass except TypeError as e : # Likely an asset search api _ call . logger . debug ( e ) pass # Response received . response = request . text logger . debug ( 'response text =\n%s' % ( response ) ) # Keep track of how many retries . retries += 1 # Check for concurrent scans limit . if not ( '<responseCode>INVALID_REQUEST</responseCode>' in response and '<errorMessage>You have reached the maximum number of concurrent running scans' in response and '<errorResolution>Please wait until your previous scans have completed</errorResolution>' in response ) : # Did not hit concurrent scan limit . break else : # Hit concurrent scan limit . logger . critical ( response ) # If trying again , delay next try by concurrent _ scans _ retry _ delay . if retries <= concurrent_scans_retries : logger . warning ( 'Waiting %d seconds until next try.' % concurrent_scans_retry_delay ) time . sleep ( concurrent_scans_retry_delay ) # Inform user of how many retries . logger . critical ( 'Retry #%d' % retries ) else : # Ran out of retries . Let user know . print ( 'Alert! Ran out of concurrent_scans_retries!' ) logger . critical ( 'Alert! Ran out of concurrent_scans_retries!' ) return False # Check to see if there was an error . try : request . raise_for_status ( ) except requests . HTTPError as e : # Error print ( 'Error! Received a 4XX client error or 5XX server error response.' ) print ( 'Content = \n' , response ) logger . error ( 'Content = \n%s' % response ) print ( 'Headers = \n' , request . headers ) logger . error ( 'Headers = \n%s' % str ( request . headers ) ) request . raise_for_status ( ) if '<RETURN status="FAILED" number="2007">' in response : print ( 'Error! Your IP address is not in the list of secure IPs. Manager must include this IP (QualysGuard VM > Users > Security).' ) print ( 'Content = \n' , response ) logger . error ( 'Content = \n%s' % response ) print ( 'Headers = \n' , request . headers ) logger . error ( 'Headers = \n%s' % str ( request . headers ) ) return False return response
def clean_dateobject_to_string ( x ) : """Convert a Pandas Timestamp object or datetime object to ' YYYY - MM - DD ' string Parameters x : str , list , tuple , numpy . ndarray , pandas . DataFrame A Pandas Timestamp object or datetime object , or an array of these objects Returns y : str , list , tuple , numpy . ndarray , pandas . DataFrame A string ' YYYY - MM - DD ' or array of date strings . Example The function aims to convert a string as follows Timestamp ( ' 2014-09-23 00:00:00 ' ) = > ' 2014-09-23' datetime . datetime ( 2014,9,23,0,0 ) = > ' 2014-09-23' Code Example print ( clean _ dateobject _ to _ string ( pd . Timestamp ( ' 2014-09-23 00:00:00 ' ) ) ) '2014-09-23' print ( clean _ dateobject _ to _ string ( datetime ( 2014,9,23,0,0 ) ) ) '2014-09-23' Behavior - If it is not an object with strftime function the None is return"""
import numpy as np import pandas as pd def proc_elem ( e ) : try : return e . strftime ( "%Y-%m-%d" ) except Exception as e : print ( e ) return None def proc_list ( x ) : return [ proc_elem ( e ) for e in x ] def proc_ndarray ( x ) : tmp = proc_list ( list ( x . reshape ( ( x . size , ) ) ) ) return np . array ( tmp ) . reshape ( x . shape ) # transform string , list / tuple , numpy array , pandas dataframe if "strftime" in dir ( x ) : return proc_elem ( x ) elif isinstance ( x , ( list , tuple ) ) : return proc_list ( x ) elif isinstance ( x , np . ndarray ) : return proc_ndarray ( x ) elif isinstance ( x , pd . DataFrame ) : return pd . DataFrame ( proc_ndarray ( x . values ) , columns = x . columns , index = x . index ) else : return None
async def getNodeByBuid ( self , buid ) : '''Retrieve a node tuple by binary id . Args : buid ( bytes ) : The binary ID for the node . Returns : Optional [ s _ node . Node ] : The node object or None .'''
node = self . livenodes . get ( buid ) if node is not None : return node props = { } proplayr = { } for layr in self . layers : layerprops = await layr . getBuidProps ( buid ) props . update ( layerprops ) proplayr . update ( { k : layr for k in layerprops } ) node = s_node . Node ( self , buid , props . items ( ) , proplayr = proplayr ) # Give other tasks a chance to run await asyncio . sleep ( 0 ) if node . ndef is None : return None # Add node to my buidcache self . buidcache . append ( node ) self . livenodes [ buid ] = node return node
def printPolicy ( policy ) : """Print out a policy vector as a table to console Let ` ` S ` ` = number of states . The output is a table that has the population class as rows , and the years since a fire as the columns . The items in the table are the optimal action for that population class and years since fire combination . Parameters p : array ` ` p ` ` is a numpy array of length ` ` S ` ` ."""
p = np . array ( policy ) . reshape ( POPULATION_CLASSES , FIRE_CLASSES ) range_F = range ( FIRE_CLASSES ) print ( " " + " " . join ( "%2d" % f for f in range_F ) ) print ( " " + "---" * FIRE_CLASSES ) for x in range ( POPULATION_CLASSES ) : print ( " %2d|" % x + " " . join ( "%2d" % p [ x , f ] for f in range_F ) )
def helical_laminar_fd_Schmidt ( Re , Di , Dc ) : r'''Calculates Darcy friction factor for a fluid flowing inside a curved pipe such as a helical coil under laminar conditions , using the method of Schmidt [ 1 ] _ as shown in [ 2 ] _ and [ 3 ] _ . . . math : : f _ { curved } = f _ { \ text { straight , laminar } } \ left [ 1 + 0.14 \ left ( \ frac { D _ i } { D _ c } \ right ) ^ { 0.97 } Re ^ { \ left [ 1 - 0.644 \ left ( \ frac { D _ i } { D _ c } \ right ) ^ { 0.312 } \ right ] } \ right ] Parameters Re : float Reynolds number with ` D = Di ` , [ - ] Di : float Inner diameter of the coil , [ m ] Dc : float Diameter of the helix / coil measured from the center of the tube on one side to the center of the tube on the other side , [ m ] Returns fd : float Darcy friction factor for a curved pipe [ - ] Notes The range of validity of this equation is specified only for Re , : math : ` 100 < Re < Re _ { critical } ` . The form of the equation is such that as the curvature becomes negligible , straight tube result is obtained . Examples > > > helical _ laminar _ fd _ Schmidt ( 250 , . 02 , . 1) 0.47460725672835236 References . . [ 1 ] Schmidt , Eckehard F . " Wärmeübergang Und Druckverlust in Rohrschlangen . " Chemie Ingenieur Technik 39 , no . 13 ( July 10 , 1967 ) : 781-89 . doi : 10.1002 / cite . 330391302. . . [ 2 ] El - Genk , Mohamed S . , and Timothy M . Schriener . " A Review and Correlations for Convection Heat Transfer and Pressure Losses in Toroidal and Helically Coiled Tubes . " Heat Transfer Engineering 0 , no . 0 ( June 7 , 2016 ) : 1-28 . doi : 10.1080/01457632.2016.1194693. . . [ 3 ] Pimenta , T . A . , and J . B . L . M . Campos . " Friction Losses of Newtonian and Non - Newtonian Fluids Flowing in Laminar Regime in a Helical Coil . " Experimental Thermal and Fluid Science 36 ( January 2012 ) : 194-204 . doi : 10.1016 / j . expthermflusci . 2011.09.013.'''
fd = friction_laminar ( Re ) D_ratio = Di / Dc return fd * ( 1. + 0.14 * D_ratio ** 0.97 * Re ** ( 1. - 0.644 * D_ratio ** 0.312 ) )
def blockhash_from_blocknumber ( self , block_number : BlockSpecification ) -> BlockHash : """Given a block number , query the chain to get its corresponding block hash"""
block = self . get_block ( block_number ) return BlockHash ( bytes ( block [ 'hash' ] ) )
def make_job ( job_name , ** kwargs ) : """Decorator to create a Job from a function . Give a job name and add extra fields to the job . @ make _ job ( " ExecuteDecJob " , command = mongoengine . StringField ( required = True ) , output = mongoengine . StringField ( default = None ) ) def execute ( job : Job ) : job . log _ info ( ' ExecuteJob % s - Executing command . . . ' % job . uuid ) result = subprocess . run ( job . command , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) job . output = result . stdout . decode ( ' utf - 8 ' ) + " " + result . stderr . decode ( ' utf - 8 ' )"""
def wraps ( func ) : kwargs [ 'process' ] = func job = type ( job_name , ( Job , ) , kwargs ) globals ( ) [ job_name ] = job return job return wraps
def intern_atom ( self , name , only_if_exists = 0 ) : """Intern the string name , returning its atom number . If only _ if _ exists is true and the atom does not already exist , it will not be created and X . NONE is returned ."""
r = request . InternAtom ( display = self . display , name = name , only_if_exists = only_if_exists ) return r . atom
def _get_rsa_key ( self ) : '''get steam RSA key , build and return cipher'''
url = 'https://steamcommunity.com/mobilelogin/getrsakey/' values = { 'username' : self . _username , 'donotcache' : self . _get_donotcachetime ( ) , } req = self . post ( url , data = values ) data = req . json ( ) if not data [ 'success' ] : raise SteamWebError ( 'Failed to get RSA key' , data ) # Construct RSA and cipher mod = int ( str ( data [ 'publickey_mod' ] ) , 16 ) exp = int ( str ( data [ 'publickey_exp' ] ) , 16 ) rsa = RSA . construct ( ( mod , exp ) ) self . rsa_cipher = PKCS1_v1_5 . new ( rsa ) self . rsa_timestamp = data [ 'timestamp' ]
def flexifunction_directory_ack_send ( self , target_system , target_component , directory_type , start_index , count , result , force_mavlink1 = False ) : '''Acknowldge sucess or failure of a flexifunction command target _ system : System ID ( uint8 _ t ) target _ component : Component ID ( uint8 _ t ) directory _ type : 0 = inputs , 1 = outputs ( uint8 _ t ) start _ index : index of first directory entry to write ( uint8 _ t ) count : count of directory entries to write ( uint8 _ t ) result : result of acknowledge , 0 = fail , 1 = good ( uint16 _ t )'''
return self . send ( self . flexifunction_directory_ack_encode ( target_system , target_component , directory_type , start_index , count , result ) , force_mavlink1 = force_mavlink1 )
def create_string ( self , value : str ) -> String : """Creates a new : class : ` ConstantString ` , adding it to the pool and returning it . : param value : The value of the new string as a UTF8 string ."""
self . append ( ( 8 , self . create_utf8 ( value ) . index ) ) return self . get ( self . raw_count - 1 )
def create ( self , name , plugin_name , plugin_version , cluster_template_id = None , default_image_id = None , is_transient = None , description = None , cluster_configs = None , node_groups = None , user_keypair_id = None , anti_affinity = None , net_id = None , count = None , use_autoconfig = None , shares = None , is_public = None , is_protected = None ) : """Launch a Cluster ."""
data = { 'name' : name , 'plugin_name' : plugin_name , 'plugin_version' : plugin_version , } return self . _do_create ( data , cluster_template_id , default_image_id , is_transient , description , cluster_configs , node_groups , user_keypair_id , anti_affinity , net_id , count , use_autoconfig , shares , is_public , is_protected , api_ver = 2 )
def open ( self , path , delimiter = None , mode = 'r' , buffering = - 1 , encoding = None , errors = None , newline = None ) : """Reads and parses input files as defined . If delimiter is not None , then the file is read in bulk then split on it . If it is None ( the default ) , then the file is parsed as sequence of lines . The rest of the options are passed directly to builtins . open with the exception that write / append file modes is not allowed . > > > seq . open ( ' examples / gear _ list . txt ' ) . take ( 1) [ u ' tent \\ n ' ] : param path : path to file : param delimiter : delimiter to split joined text on . if None , defaults to per line split : param mode : file open mode : param buffering : passed to builtins . open : param encoding : passed to builtins . open : param errors : passed to builtins . open : param newline : passed to builtins . open : return : output of file depending on options wrapped in a Sequence via seq"""
if not re . match ( '^[rbt]{1,3}$' , mode ) : raise ValueError ( 'mode argument must be only have r, b, and t' ) file_open = get_read_function ( path , self . disable_compression ) file = file_open ( path , mode = mode , buffering = buffering , encoding = encoding , errors = errors , newline = newline ) if delimiter is None : return self ( file ) else : return self ( '' . join ( list ( file ) ) . split ( delimiter ) )
def add_page ( self , page , process_id , wit_ref_name ) : """AddPage . [ Preview API ] Adds a page to the work item form . : param : class : ` < Page > < azure . devops . v5_0 . work _ item _ tracking _ process . models . Page > ` page : The page . : param str process _ id : The ID of the process . : param str wit _ ref _ name : The reference name of the work item type . : rtype : : class : ` < Page > < azure . devops . v5_0 . work _ item _ tracking _ process . models . Page > `"""
route_values = { } if process_id is not None : route_values [ 'processId' ] = self . _serialize . url ( 'process_id' , process_id , 'str' ) if wit_ref_name is not None : route_values [ 'witRefName' ] = self . _serialize . url ( 'wit_ref_name' , wit_ref_name , 'str' ) content = self . _serialize . body ( page , 'Page' ) response = self . _send ( http_method = 'POST' , location_id = '1cc7b29f-6697-4d9d-b0a1-2650d3e1d584' , version = '5.0-preview.1' , route_values = route_values , content = content ) return self . _deserialize ( 'Page' , response )
def delete_item_list ( self , item_list_url ) : """Delete an Item List on the server : type item _ list _ url : String or ItemList : param item _ list _ url : the URL of the list to which to add the items , or an ItemList object : rtype : Boolean : returns : True if the item list was deleted : raises : APIError if the request was not successful"""
try : resp = self . api_request ( str ( item_list_url ) , method = "DELETE" ) # all good if it says success if 'success' in resp : return True else : raise APIError ( '200' , 'Operation Failed' , 'Delete operation failed' ) except APIError as e : if e . http_status_code == 302 : return True else : raise e
def change_wavelength ( self , wavelength ) : '''Changes the wavelength of the structure . This will affect the mode solver and potentially the refractive indices used ( provided functions were provided as refractive indices ) . Args : wavelength ( float ) : The new wavelength .'''
for axis in self . axes : if issubclass ( type ( axis ) , Slabs ) : axis . change_wavelength ( wavelength ) self . xx , self . xy , self . yx , self . yy , self . zz = self . axes self . _wl = wavelength
def ngettext_lazy ( singular , plural , n , domain = DEFAULT_DOMAIN ) : """Mark a message with plural forms translateable , and delay the translation until the message is used . Works the same was a ` ngettext ` , with a delaying functionality similiar to ` gettext _ lazy ` . Args : singular ( unicode ) : The singular form of the message . plural ( unicode ) : The plural form of the message . n ( int ) : The number that is used to decide which form should be used . domain ( basestring ) : The domain of the message . Defaults to ' messages ' , which is the domain where all application messages should be located . Returns : unicode : The correct pluralization , with the translation being delayed until the message is used ."""
return LazyProxy ( ngettext , singular , plural , n , domain = domain , enable_cache = False )
def Colebrook ( Re , eD , tol = None ) : r'''Calculates Darcy friction factor using the Colebrook equation originally published in [ 1 ] _ . Normally , this function uses an exact solution to the Colebrook equation , derived with a CAS . A numerical can also be used . . . math : : \ frac { 1 } { \ sqrt { f } } = - 2 \ log _ { 10 } \ left ( \ frac { \ epsilon / D } { 3.7} + \ frac { 2.51 } { \ text { Re } \ sqrt { f } } \ right ) Parameters Re : float Reynolds number , [ - ] eD : float Relative roughness , [ - ] tol : float , optional None for analytical solution ( default ) ; user specified value to use the numerical solution ; 0 to use ` mpmath ` and provide a bit - correct exact solution to the maximum fidelity of the system ' s ` float ` ; -1 to apply the Clamond solution where appropriate for greater speed ( Re > 10 ) , [ - ] Returns fd : float Darcy friction factor [ - ] Notes The solution is as follows : . . math : : f _ d = \ frac { \ ln ( 10 ) ^ 2 \ cdot { 3.7 } ^ 2 \ cdot { 2.51 } ^ 2} { \ left ( \ log ( 10 ) \ epsilon / D \ cdot \ text { Re } - 2 \ cdot 2.51 \ cdot 3.7 \ cdot \ text { lambertW } \ left [ \ log ( \ sqrt { 10 } ) \ sqrt { 10 ^ { \ left ( \ frac { \ epsilon \ text { Re } } { 2.51 \ cdot 3.7D } \ right ) } \ cdot \ text { Re } ^ 2 / { 2.51 } ^ 2 } \ right ] \ right ) } Some effort to optimize this function has been made . The ` lambertw ` function from scipy is used , and is defined to solve the specific function : . . math : : y = x \ exp ( x ) \ text { lambertW } ( y ) = x This is relatively slow despite its explicit form as it uses the mathematical function ` lambertw ` which is expensive to compute . For high relative roughness and Reynolds numbers , an OverflowError can be encountered in the solution of this equation . The numerical solution is then used . The numerical solution provides values which are generally within an rtol of 1E - 12 to the analytical solution ; however , due to the different rounding order , it is possible for them to be as different as rtol 1E - 5 or higher . The 1E - 5 accuracy regime has been tested and confirmed numerically for hundreds of thousand of points within the region 1E - 12 < Re < 1E12 and 0 < eD < 0.1. The numerical solution attempts the secant method using ` scipy ` ' s ` newton ` solver , and in the event of nonconvergence , attempts the ` fsolve ` solver as well . An initial guess is provided via the ` Clamond ` function . The numerical and analytical solution take similar amounts of time ; the ` mpmath ` solution used when ` tol = 0 ` is approximately 45 times slower . This function takes approximately 8 us normally . Examples > > > Colebrook ( 1E5 , 1E - 4) 0.018513866077471648 References . . [ 1 ] Colebrook , C F . " Turbulent Flow in Pipes , with Particular Reference to the Transition Region Between the Smooth and Rough Pipe Laws . " Journal of the ICE 11 , no . 4 ( February 1 , 1939 ) : 133-156. doi : 10.1680 / ijoti . 1939.13150.'''
if tol == - 1 : if Re > 10.0 : return Clamond ( Re , eD ) else : tol = None elif tol == 0 : # from sympy import LambertW , Rational , log , sqrt # Re = Rational ( Re ) # eD _ Re = Rational ( eD ) * Re # sub = 1 / Rational ( ' 6.3001 ' ) * 10 * * ( 1 / Rational ( ' 9.287 ' ) * eD _ Re ) * Re * Re # lambert _ term = LambertW ( log ( sqrt ( 10 ) ) * sqrt ( sub ) ) # den = log ( 10 ) * eD _ Re - 18.574 * lambert _ term # return float ( log ( 10 ) * * 2 * Rational ( ' 3.7 ' ) * * 2 * Rational ( ' 2.51 ' ) * * 2 / ( den * den ) ) try : from mpmath import mpf , log , sqrt , mp from mpmath import lambertw as mp_lambertw except : raise ImportError ( 'For exact solutions, the `mpmath` library is ' 'required' ) mp . dps = 50 Re = mpf ( Re ) eD_Re = mpf ( eD ) * Re sub = 1 / mpf ( '6.3001' ) * 10 ** ( 1 / mpf ( '9.287' ) * eD_Re ) * Re * Re lambert_term = mp_lambertw ( log ( sqrt ( 10 ) ) * sqrt ( sub ) ) den = log ( 10 ) * eD_Re - 18.574 * lambert_term return float ( log ( 10 ) ** 2 * mpf ( '3.7' ) ** 2 * mpf ( '2.51' ) ** 2 / ( den * den ) ) if tol is None : try : eD_Re = eD * Re # 9.287 = 2.51*3.7 ; 6.3001 = 2.51 * * 2 # xn = 1/6.3001 = 0.15872763924382155 # 1/9.287 = 0.10767739851405189 sub = 0.15872763924382155 * 10.0 ** ( 0.10767739851405189 * eD_Re ) * Re * Re if isinf ( sub ) : # Can ' t continue , need numerical approach raise OverflowError # 1.15129 . . . = log ( sqrt ( 10 ) ) lambert_term = float ( lambertw ( 1.151292546497022950546806896454654633998870849609375 * sub ** 0.5 ) . real ) # log ( 10 ) = 2.302585 . . . ; 2*2.51*3.7 = 18.574 # 457.28 . . . = log ( 10 ) * * 2*3.7 * * 2*2.51 * * 2 den = 2.30258509299404590109361379290930926799774169921875 * eD_Re - 18.574 * lambert_term return 457.28006463294371997108100913465023040771484375 / ( den * den ) except OverflowError : pass # Either user - specified tolerance , or an error in the analytical solution if tol is None : tol = 1e-12 try : fd_guess = Clamond ( Re , eD ) except ValueError : fd_guess = Blasius ( Re ) def err ( x ) : # Convert the newton search domain to always positive f_12_inv = abs ( x ) ** - 0.5 # 0.27027027027027023 = 1/3.7 return f_12_inv + 2.0 * log10 ( eD * 0.27027027027027023 + 2.51 / Re * f_12_inv ) try : fd = abs ( newton ( err , fd_guess , tol = tol ) ) if fd > 1E10 : raise ValueError return fd except : from scipy . optimize import fsolve return abs ( float ( fsolve ( err , fd_guess , xtol = tol ) ) )
def _file_dict ( self , fn_ ) : '''Take a path and return the contents of the file as a string'''
if not os . path . isfile ( fn_ ) : err = 'The referenced file, {0} is not available.' . format ( fn_ ) sys . stderr . write ( err + '\n' ) sys . exit ( 42 ) with salt . utils . files . fopen ( fn_ , 'r' ) as fp_ : data = fp_ . read ( ) return { fn_ : data }
def _2ndDerivInt ( x , y , z , dens , densDeriv , b2 , c2 , i , j , glx = None , glw = None ) : """Integral that gives the 2nd derivative of the potential in x , y , z"""
def integrand ( s ) : t = 1 / s ** 2. - 1. m = numpy . sqrt ( x ** 2. / ( 1. + t ) + y ** 2. / ( b2 + t ) + z ** 2. / ( c2 + t ) ) return ( densDeriv ( m ) * ( x / ( 1. + t ) * ( i == 0 ) + y / ( b2 + t ) * ( i == 1 ) + z / ( c2 + t ) * ( i == 2 ) ) * ( x / ( 1. + t ) * ( j == 0 ) + y / ( b2 + t ) * ( j == 1 ) + z / ( c2 + t ) * ( j == 2 ) ) / m + dens ( m ) * ( i == j ) * ( ( 1. / ( 1. + t ) * ( i == 0 ) + 1. / ( b2 + t ) * ( i == 1 ) + 1. / ( c2 + t ) * ( i == 2 ) ) ) ) / numpy . sqrt ( ( 1. + ( b2 - 1. ) * s ** 2. ) * ( 1. + ( c2 - 1. ) * s ** 2. ) ) if glx is None : return integrate . quad ( integrand , 0. , 1. ) [ 0 ] else : return numpy . sum ( glw * integrand ( glx ) )
def replay_config ( self , switch_ip ) : """Sends pending config data in OpenStack to Nexus ."""
LOG . debug ( "Replaying config for switch ip %(switch_ip)s" , { 'switch_ip' : switch_ip } ) # Before replaying all config , initialize trunk interfaces # to none as required . If this fails , the switch may not # be up all the way . Quit and retry later . try : self . _initialize_trunk_interfaces_to_none ( switch_ip ) except Exception : return nve_bindings = nxos_db . get_nve_switch_bindings ( switch_ip ) # If configured to set global VXLAN values and # there exists VXLAN data base entries , then configure # the " interface nve " entry on the switch . if ( len ( nve_bindings ) > 0 and cfg . CONF . ml2_cisco . vxlan_global_config ) : LOG . debug ( "Nexus: Replay NVE Interface" ) loopback = self . _mdriver . get_nve_loopback ( switch_ip ) self . _driver . enable_vxlan_feature ( switch_ip , const . NVE_INT_NUM , loopback ) for x in nve_bindings : try : self . _driver . create_nve_member ( switch_ip , const . NVE_INT_NUM , x . vni , x . mcast_group ) except Exception as e : LOG . error ( "Failed to configure nve_member for " "switch %(switch_ip)s, vni %(vni)s" "Reason:%(reason)s " , { 'switch_ip' : switch_ip , 'vni' : x . vni , 'reason' : e } ) self . _mdriver . register_switch_as_inactive ( switch_ip , 'replay create_nve_member' ) return try : port_bindings = nxos_db . get_nexusport_switch_bindings ( switch_ip ) except excep . NexusPortBindingNotFound : LOG . warning ( "No port entries found for switch ip " "%(switch_ip)s during replay." , { 'switch_ip' : switch_ip } ) return try : self . _mdriver . configure_switch_entries ( switch_ip , port_bindings ) except Exception as e : LOG . error ( "Unexpected exception while replaying " "entries for switch %(switch_ip)s, Reason:%(reason)s " , { 'switch_ip' : switch_ip , 'reason' : e } ) self . _mdriver . register_switch_as_inactive ( switch_ip , 'replay switch_entries' )
def _writeToTransport ( self , data ) : '''Frame the array - like thing and write it .'''
self . transport . writeData ( data ) self . heartbeater . schedule ( )
def uniform_random_global_points ( n = 100 ) : """Returns an array of ` n ` uniformally distributed ` shapely . geometry . Point ` objects . Points are coordinates distributed equivalently across the Earth ' s surface ."""
xs = np . random . uniform ( - 180 , 180 , n ) ys = np . random . uniform ( - 90 , 90 , n ) return [ shapely . geometry . Point ( x , y ) for x , y in zip ( xs , ys ) ]
def main ( * args ) : """Enter point ."""
args = args or sys . argv [ 1 : ] params = PARSER . parse_args ( args ) from . log import setup_logging setup_logging ( params . level . upper ( ) ) from . core import Starter starter = Starter ( params ) if not starter . params . TEMPLATES or starter . params . list : setup_logging ( 'WARN' ) for t in sorted ( starter . iterate_templates ( ) ) : logging . warn ( "%s -- %s" , t . name , t . params . get ( 'description' , 'no description' ) ) return True try : starter . copy ( ) except Exception as e : # noqa logging . error ( e ) sys . exit ( 1 )
def codebox ( msg = "" , title = " " , text = "" ) : """Display some text in a monospaced font , with no line wrapping . This function is suitable for displaying code and text that is formatted using spaces . The text parameter should be a string , or a list or tuple of lines to be displayed in the textbox . : param str msg : the msg to be displayed : param str title : the window title : param str text : what to display in the textbox"""
return tb . textbox ( msg , title , text , codebox = 1 )
def _detect_loop ( self ) : """detect loops in flow table , raise error if being present"""
for source , dests in self . flowtable . items ( ) : if source in dests : raise conferr ( 'Loops detected: %s --> %s' % ( source , source ) )
def _set_gre_ttl ( self , v , load = False ) : """Setter method for gre _ ttl , mapped from YANG variable / interface / tunnel / gre _ ttl ( uint32) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ gre _ ttl is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ gre _ ttl ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = RestrictedClassType ( base_type = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) , restriction_dict = { 'range' : [ u'1 .. 255' ] } ) , is_leaf = True , yang_name = "gre-ttl" , rest_name = "ttl" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'Tunnel ttl range 1 to 255' , u'alt-name' : u'ttl' , u'cli-full-no' : None , u'cli-break-sequence-commands' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-gre-vxlan' , defining_module = 'brocade-gre-vxlan' , yang_type = 'uint32' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """gre_ttl must be of a type compatible with uint32""" , 'defined-type' : "uint32" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 255']}), is_leaf=True, yang_name="gre-ttl", rest_name="ttl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Tunnel ttl range 1 to 255', u'alt-name': u'ttl', u'cli-full-no': None, u'cli-break-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='uint32', is_config=True)""" , } ) self . __gre_ttl = t if hasattr ( self , '_set' ) : self . _set ( )
def make_worker_pool ( processes = None , initializer = None , initializer_kwargs_per_process = None , max_tasks_per_worker = None ) : """Convenience wrapper to create a multiprocessing . Pool . This function adds support for per - worker initializer arguments , which are not natively supported by the multiprocessing module . The motivation for this feature is to support allocating each worker to a ( different ) GPU . IMPLEMENTATION NOTE : The per - worker initializer arguments are implemented using a Queue . Each worker reads its arguments from this queue when it starts . When it terminates , it adds its initializer arguments back to the queue , so a future process can initialize itself using these arguments . There is one issue with this approach , however . If a worker crashes , it never repopulates the queue of initializer arguments . This will prevent any future worker from re - using those arguments . To deal with this issue we add a second ' backup queue ' . This queue always contains the full set of initializer arguments : whenever a worker reads from it , it always pushes the pop ' d args back to the end of the queue immediately . If the primary arg queue is ever empty , then workers will read from this backup queue . Parameters processes : int Number of workers . Default : num CPUs . initializer : function , optional Init function to call in each worker initializer _ kwargs _ per _ process : list of dict , optional Arguments to pass to initializer function for each worker . Length of list must equal the number of workers . max _ tasks _ per _ worker : int , optional Restart workers after this many tasks . Requires Python > = 3.2. Returns multiprocessing . Pool"""
if not processes : processes = cpu_count ( ) pool_kwargs = { 'processes' : processes , } if max_tasks_per_worker : pool_kwargs [ "maxtasksperchild" ] = max_tasks_per_worker if initializer : if initializer_kwargs_per_process : assert len ( initializer_kwargs_per_process ) == processes kwargs_queue = Queue ( ) kwargs_queue_backup = Queue ( ) for kwargs in initializer_kwargs_per_process : kwargs_queue . put ( kwargs ) kwargs_queue_backup . put ( kwargs ) pool_kwargs [ "initializer" ] = worker_init_entry_point pool_kwargs [ "initargs" ] = ( initializer , kwargs_queue , kwargs_queue_backup ) else : pool_kwargs [ "initializer" ] = initializer worker_pool = Pool ( ** pool_kwargs ) print ( "Started pool: %s" % str ( worker_pool ) ) pprint ( pool_kwargs ) return worker_pool
def return_hdr ( self ) : """Return the header for further use . Returns subj _ id : str subject identification code start _ time : datetime start time of the dataset s _ freq : float sampling frequency chan _ name : list of str list of all the channels n _ samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes As far as I can , BCI2000 doesn ' t have channel labels , so we use dummies starting at chan001 ( more consistent with Matlab 1 - base indexing . . . )"""
orig = { } orig = _read_header ( self . filename ) nchan = int ( orig [ 'SourceCh' ] ) chan_name = [ 'ch{:03d}' . format ( i + 1 ) for i in range ( nchan ) ] chan_dtype = dtype ( orig [ 'DataFormat' ] ) self . statevector_len = int ( orig [ 'StatevectorLen' ] ) s_freq = orig [ 'Parameter' ] [ 'SamplingRate' ] if s_freq . endswith ( 'Hz' ) : s_freq = s_freq . replace ( 'Hz' , '' ) s_freq = int ( s_freq . strip ( ) ) self . s_freq = s_freq storagetime = orig [ 'Parameter' ] [ 'StorageTime' ] . replace ( '%20' , ' ' ) try : # newer version start_time = datetime . strptime ( storagetime , '%a %b %d %H:%M:%S %Y' ) except : start_time = datetime . strptime ( storagetime , '%Y-%m-%dT%H:%M:%S' ) subj_id = orig [ 'Parameter' ] [ 'SubjectName' ] self . dtype = dtype ( [ ( chan , chan_dtype ) for chan in chan_name ] + [ ( 'statevector' , 'S' , self . statevector_len ) ] ) # compute n _ samples based on file size - header with open ( self . filename , 'rb' ) as f : f . seek ( 0 , SEEK_END ) EOData = f . tell ( ) n_samples = int ( ( EOData - int ( orig [ 'HeaderLen' ] ) ) / self . dtype . itemsize ) self . s_freq = s_freq self . header_len = int ( orig [ 'HeaderLen' ] ) self . n_samples = n_samples self . statevectors = _prepare_statevectors ( orig [ 'StateVector' ] ) # TODO : a better way to parse header self . gain = array ( [ float ( x ) for x in orig [ 'Parameter' ] [ 'SourceChGain' ] . split ( ' ' ) [ 1 : ] ] ) return subj_id , start_time , s_freq , chan_name , n_samples , orig
def add_arguments ( self , parser ) : """Entry point for subclassed commands to add custom arguments ."""
subparsers = parser . add_subparsers ( help = 'sub-command help' , dest = 'command' ) add_parser = partial ( _add_subparser , subparsers , parser ) add_parser ( 'list' , help = "list concurrency triggers" ) add_parser ( 'drop' , help = "drop concurrency triggers" ) add_parser ( 'create' , help = "create concurrency triggers" ) parser . add_argument ( '-d' , '--database' , action = 'store' , dest = 'database' , default = None , help = 'limit to this database' ) parser . add_argument ( '-t' , '--trigger' , action = 'store' , dest = 'trigger' , default = None , help = 'limit to this trigger name' )
def distances ( self , points ) : """Computes the distances from the plane to each of the points . Positive distances are on the side of the normal of the plane while negative distances are on the other side : param points : Points for which distances are computed : return : Distances from the plane to the points ( positive values on the side of the normal to the plane , negative values on the other side )"""
return [ np . dot ( self . normal_vector , pp ) + self . d for pp in points ]
def import_lv_load_areas ( self , session , mv_grid_district , lv_grid_districts , lv_stations ) : """Imports load _ areas ( load areas ) from database for a single MV grid _ district Parameters session : sqlalchemy . orm . session . Session Database session mv _ grid _ district : MV grid _ district / station ( instance of MVGridDistrictDing0 class ) for which the import of load areas is performed lv _ grid _ districts : DataFrame LV grid districts within this mv _ grid _ district lv _ stations : : pandas : ` pandas . DataFrame < dataframe > ` LV stations within this mv _ grid _ district"""
# get ding0s ' standard CRS ( SRID ) srid = str ( int ( cfg_ding0 . get ( 'geo' , 'srid' ) ) ) # SET SRID 3035 to achieve correct area calculation of lv _ grid _ district # srid = ' 3035' # threshold : load area peak load , if peak load < threshold = > disregard # load area lv_loads_threshold = cfg_ding0 . get ( 'mv_routing' , 'load_area_threshold' ) gw2kw = 10 ** 6 # load in database is in GW - > scale to kW # build SQL query lv_load_areas_sqla = session . query ( self . orm [ 'orm_lv_load_areas' ] . id . label ( 'id_db' ) , self . orm [ 'orm_lv_load_areas' ] . zensus_sum , self . orm [ 'orm_lv_load_areas' ] . zensus_count . label ( 'zensus_cnt' ) , self . orm [ 'orm_lv_load_areas' ] . ioer_sum , self . orm [ 'orm_lv_load_areas' ] . ioer_count . label ( 'ioer_cnt' ) , self . orm [ 'orm_lv_load_areas' ] . area_ha . label ( 'area' ) , self . orm [ 'orm_lv_load_areas' ] . sector_area_residential , self . orm [ 'orm_lv_load_areas' ] . sector_area_retail , self . orm [ 'orm_lv_load_areas' ] . sector_area_industrial , self . orm [ 'orm_lv_load_areas' ] . sector_area_agricultural , self . orm [ 'orm_lv_load_areas' ] . sector_share_residential , self . orm [ 'orm_lv_load_areas' ] . sector_share_retail , self . orm [ 'orm_lv_load_areas' ] . sector_share_industrial , self . orm [ 'orm_lv_load_areas' ] . sector_share_agricultural , self . orm [ 'orm_lv_load_areas' ] . sector_count_residential , self . orm [ 'orm_lv_load_areas' ] . sector_count_retail , self . orm [ 'orm_lv_load_areas' ] . sector_count_industrial , self . orm [ 'orm_lv_load_areas' ] . sector_count_agricultural , self . orm [ 'orm_lv_load_areas' ] . nuts . label ( 'nuts_code' ) , func . ST_AsText ( func . ST_Transform ( self . orm [ 'orm_lv_load_areas' ] . geom , srid ) ) . label ( 'geo_area' ) , func . ST_AsText ( func . ST_Transform ( self . orm [ 'orm_lv_load_areas' ] . geom_centre , srid ) ) . label ( 'geo_centre' ) , ( self . orm [ 'orm_lv_load_areas' ] . sector_peakload_residential * gw2kw ) . label ( 'peak_load_residential' ) , ( self . orm [ 'orm_lv_load_areas' ] . sector_peakload_retail * gw2kw ) . label ( 'peak_load_retail' ) , ( self . orm [ 'orm_lv_load_areas' ] . sector_peakload_industrial * gw2kw ) . label ( 'peak_load_industrial' ) , ( self . orm [ 'orm_lv_load_areas' ] . sector_peakload_agricultural * gw2kw ) . label ( 'peak_load_agricultural' ) , ( ( self . orm [ 'orm_lv_load_areas' ] . sector_peakload_residential + self . orm [ 'orm_lv_load_areas' ] . sector_peakload_retail + self . orm [ 'orm_lv_load_areas' ] . sector_peakload_industrial + self . orm [ 'orm_lv_load_areas' ] . sector_peakload_agricultural ) * gw2kw ) . label ( 'peak_load' ) ) . filter ( self . orm [ 'orm_lv_load_areas' ] . subst_id == mv_grid_district . mv_grid . _station . id_db ) . filter ( ( ( self . orm [ 'orm_lv_load_areas' ] . sector_peakload_residential # only pick load areas with peak load > lv _ loads _ threshold + self . orm [ 'orm_lv_load_areas' ] . sector_peakload_retail + self . orm [ 'orm_lv_load_areas' ] . sector_peakload_industrial + self . orm [ 'orm_lv_load_areas' ] . sector_peakload_agricultural ) * gw2kw ) > lv_loads_threshold ) . filter ( self . orm [ 'version_condition_la' ] ) # read data from db lv_load_areas = pd . read_sql_query ( lv_load_areas_sqla . statement , session . bind , index_col = 'id_db' ) # create load _ area objects from rows and add them to graph for id_db , row in lv_load_areas . iterrows ( ) : # create LV load _ area object lv_load_area = LVLoadAreaDing0 ( id_db = id_db , db_data = row , mv_grid_district = mv_grid_district , peak_load = row [ 'peak_load' ] ) # sub - selection of lv _ grid _ districts / lv _ stations within one # specific load area lv_grid_districts_per_load_area = lv_grid_districts . loc [ lv_grid_districts [ 'la_id' ] == id_db ] lv_stations_per_load_area = lv_stations . loc [ lv_stations [ 'la_id' ] == id_db ] self . build_lv_grid_district ( lv_load_area , lv_grid_districts_per_load_area , lv_stations_per_load_area ) # create new centre object for Load Area lv_load_area_centre = LVLoadAreaCentreDing0 ( id_db = id_db , geo_data = wkt_loads ( row [ 'geo_centre' ] ) , lv_load_area = lv_load_area , grid = mv_grid_district . mv_grid ) # links the centre object to Load Area lv_load_area . lv_load_area_centre = lv_load_area_centre # add Load Area to MV grid district ( and add centre object to MV gris district ' s graph ) mv_grid_district . add_lv_load_area ( lv_load_area )
def add_clients ( session , verbose ) : """Add clients to the ATVS Keystroke database ."""
for ctype in [ 'Genuine' , 'Impostor' ] : for cdid in userid_clients : cid = ctype + '_%d' % cdid if verbose > 1 : print ( " Adding user '%s' of type '%s'..." % ( cid , ctype ) ) session . add ( Client ( cid , ctype , cdid ) )
def remove_stage_from_deployed_values ( key , filename ) : # type : ( str , str ) - > None """Delete a top level key from the deployed JSON file ."""
final_values = { } # type : Dict [ str , Any ] try : with open ( filename , 'r' ) as f : final_values = json . load ( f ) except IOError : # If there is no file to delete from , then this funciton is a noop . return try : del final_values [ key ] with open ( filename , 'wb' ) as f : data = serialize_to_json ( final_values ) f . write ( data . encode ( 'utf-8' ) ) except KeyError : # If they key didn ' t exist then there is nothing to remove . pass
def atomic ( connection_name : Optional [ str ] = None ) -> Callable : """Transaction decorator . You can wrap your function with this decorator to run it into one transaction . If error occurs transaction will rollback . : param connection _ name : name of connection to run with , optional if you have only one db connection"""
def wrapper ( func ) : @ wraps ( func ) async def wrapped ( * args , ** kwargs ) : connection = _get_connection ( connection_name ) async with connection . _in_transaction ( ) : return await func ( * args , ** kwargs ) return wrapped return wrapper
def _pipeline_needs_fastq ( config , data ) : """Determine if the pipeline can proceed with a BAM file , or needs fastq conversion ."""
aligner = config [ "algorithm" ] . get ( "aligner" ) support_bam = aligner in alignment . metadata . get ( "support_bam" , [ ] ) return aligner and not support_bam
def v_reference_choice ( ctx , stmt ) : """Make sure that the default case exists"""
d = stmt . search_one ( 'default' ) if d is not None : m = stmt . search_one ( 'mandatory' ) if m is not None and m . arg == 'true' : err_add ( ctx . errors , stmt . pos , 'DEFAULT_AND_MANDATORY' , ( ) ) ptr = attrsearch ( d . arg , 'arg' , stmt . i_children ) if ptr is None : err_add ( ctx . errors , d . pos , 'DEFAULT_CASE_NOT_FOUND' , d . arg ) else : # make sure there are no mandatory nodes in the default case def chk_no_defaults ( s ) : for c in s . i_children : if c . keyword in ( 'leaf' , 'choice' ) : m = c . search_one ( 'mandatory' ) if m is not None and m . arg == 'true' : err_add ( ctx . errors , c . pos , 'MANDATORY_NODE_IN_DEFAULT_CASE' , ( ) ) elif c . keyword in ( 'list' , 'leaf-list' ) : m = c . search_one ( 'min-elements' ) if m is not None and int ( m . arg ) > 0 : err_add ( ctx . errors , c . pos , 'MANDATORY_NODE_IN_DEFAULT_CASE' , ( ) ) elif c . keyword == 'container' : p = c . search_one ( 'presence' ) if p == None or p . arg == 'false' : chk_no_defaults ( c ) chk_no_defaults ( ptr )
def _updateTargetFromNode ( self ) : """Applies the configuration to the target axis it monitors . The axis label will be set to the configValue . If the configValue equals PgAxisLabelCti . NO _ LABEL , the label will be hidden ."""
rtiInfo = self . collector . rtiInfo self . plotItem . setLabel ( self . axisPosition , self . configValue . format ( ** rtiInfo ) ) self . plotItem . showLabel ( self . axisPosition , self . configValue != self . NO_LABEL )
def set_computer_name ( name ) : '''Set the computer name : param str name : The new computer name : return : True if successful , False if not : rtype : bool CLI Example : . . code - block : : bash salt ' * ' system . set _ computer _ name " Mike ' s Mac "'''
cmd = 'systemsetup -setcomputername "{0}"' . format ( name ) __utils__ [ 'mac_utils.execute_return_success' ] ( cmd ) return __utils__ [ 'mac_utils.confirm_updated' ] ( name , get_computer_name , )
def write ( self , basename = None , write_separate_manifests = True ) : """Write one or more dump files to complete this dump . Returns the number of dump / archive files written ."""
self . check_files ( ) n = 0 for manifest in self . partition_dumps ( ) : dumpbase = "%s%05d" % ( basename , n ) dumpfile = "%s.%s" % ( dumpbase , self . format ) if ( write_separate_manifests ) : manifest . write ( basename = dumpbase + '.xml' ) if ( self . format == 'zip' ) : self . write_zip ( manifest . resources , dumpfile ) elif ( self . format == 'warc' ) : self . write_warc ( manifest . resources , dumpfile ) else : raise DumpError ( "Unknown dump format requested (%s)" % ( self . format ) ) n += 1 self . logger . info ( "Wrote %d dump files" % ( n ) ) return ( n )
def evaluate_js ( script , uid = 'master' ) : """Evaluate given JavaScript code and return the result : param script : The JavaScript code to be evaluated : param uid : uid of the target instance : return : Return value of the evaluated code"""
escaped_script = 'JSON.stringify(eval("{0}"))' . format ( escape_string ( script ) ) return gui . evaluate_js ( escaped_script , uid )
def rsdl_sn ( self , U ) : """Compute dual residual normalisation term . Overriding this method is required if methods : meth : ` cnst _ A ` , : meth : ` cnst _ AT ` , : meth : ` cnst _ B ` , and : meth : ` cnst _ c ` are not overridden ."""
return self . rho * np . linalg . norm ( self . cnst_AT ( U ) )
def scrypt ( password , salt , N = SCRYPT_N , r = SCRYPT_r , p = SCRYPT_p , olen = 64 ) : """Returns a key derived using the scrypt key - derivarion function N must be a power of two larger than 1 but no larger than 2 * * 63 ( insane ) r and p must be positive numbers such that r * p < 2 * * 30 The default values are : N - - 2 * * 14 ( ~ 16k ) r - - 8 p - - 1 Memory usage is proportional to N * r . Defaults require about 16 MiB . Time taken is proportional to N * p . Defaults take < 100ms of a recent x86. The last one differs from libscrypt defaults , but matches the ' interactive ' work factor from the original paper . For long term storage where runtime of key derivation is not a problem , you could use 16 as in libscrypt or better yet increase N if memory is plentiful ."""
check_args ( password , salt , N , r , p , olen ) if _scrypt_ll : out = ctypes . create_string_buffer ( olen ) if _scrypt_ll ( password , len ( password ) , salt , len ( salt ) , N , r , p , out , olen ) : raise ValueError return out . raw if len ( salt ) != _scrypt_salt or r != 8 or ( p & ( p - 1 ) ) or ( N * p <= 512 ) : return scr_mod . scrypt ( password , salt , N , r , p , olen ) s = next ( i for i in range ( 1 , 64 ) if 2 ** i == N ) t = next ( i for i in range ( 0 , 30 ) if 2 ** i == p ) m = 2 ** ( 10 + s ) o = 2 ** ( 5 + t + s ) if s > 53 or t + s > 58 : raise ValueError out = ctypes . create_string_buffer ( olen ) if _scrypt ( out , olen , password , len ( password ) , salt , o , m ) != 0 : raise ValueError return out . raw
def on_view_not_found ( self , environ : Dict [ str , Any ] , start_response : Callable ) -> Iterable [ bytes ] : # pragma : nocover """called when view is not found"""
raise NotImplementedError ( )
def read_candidates ( candsfile , snrmin = 0 , snrmax = 999 , returnstate = False ) : """Reads candidate file and returns data as python object . candsfile is pkl file ( for now ) with ( 1 ) state dict and ( 2 ) cands object . cands object can either be a dictionary or tuple of two numpy arrays . Return tuple of two numpy arrays ( location , properties ) . returned values can be filtered by snrmin and snrmax ( on absolute value ) . returnstate will instead return ( loc , prop , state ) ."""
# read in pickle file of candidates assert os . path . exists ( candsfile ) try : with open ( candsfile , 'rb' ) as pkl : d = pickle . load ( pkl ) cands = pickle . load ( pkl ) except IOError : logger . error ( 'Trouble parsing candsfile' ) loc = np . array ( [ ] ) prop = np . array ( [ ] ) if returnstate : return ( loc , prop , d ) else : return ( loc , prop ) if 'snr2' in d [ 'features' ] : snrcol = d [ 'features' ] . index ( 'snr2' ) elif 'snr1' in d [ 'features' ] : snrcol = d [ 'features' ] . index ( 'snr1' ) # old style . here for backwards compatibility if isinstance ( cands , dict ) : loc = [ ] ; prop = [ ] for kk in sorted ( cands . keys ( ) ) : if ( ( np . abs ( cands [ kk ] [ snrcol ] ) > snrmin ) and ( np . abs ( cands [ kk ] [ snrcol ] ) < snrmax ) ) : loc . append ( list ( kk ) ) prop . append ( list ( cands [ kk ] ) ) loc = np . array ( loc ) prop = np . array ( prop ) # new style elif isinstance ( cands , tuple ) : loc , prop = cands assert isinstance ( loc , np . ndarray ) and isinstance ( prop , np . ndarray ) , 'if cands object is tuple, contents must be two ndarrays' if not len ( loc ) : if returnstate : return ( loc , prop , d ) else : return ( loc , prop ) snrsel = np . where ( ( np . abs ( prop [ : , snrcol ] ) > snrmin ) & ( np . abs ( prop [ : , snrcol ] ) < snrmax ) ) loc = loc [ snrsel ] prop = prop [ snrsel ] else : logger . error ( 'Cands object (in cands file) must be dict or tuple(np.array, np.array).' ) # if segment or scan pkl , insert scan number as first col and modify d if 'scan' not in d [ 'featureind' ] : scanarr = d [ 'scan' ] * np . ones ( len ( loc ) , dtype = int ) loc = np . concatenate ( ( scanarr [ : , None ] , loc ) , axis = 1 ) d [ 'featureind' ] . insert ( 0 , 'scan' ) logger . info ( 'Read %d candidates from %s.' % ( len ( loc ) , candsfile ) ) if returnstate : return loc , prop , d else : return loc , prop
def start_task ( self , func ) : """Start up a task"""
task = self . loop . create_task ( func ( self ) ) self . _started_tasks . append ( task ) def done_callback ( done_task ) : self . _started_tasks . remove ( done_task ) task . add_done_callback ( done_callback ) return task
def horizontals ( self ) : """All horizontal squares from the piece ' s point of view . Returns a list of relative movements up to the board ' s bound ."""
horizontal_shifts = set ( izip_longest ( map ( lambda i : i - self . x , range ( self . board . length ) ) , [ ] , fillvalue = 0 ) ) horizontal_shifts . discard ( ( 0 , 0 ) ) return horizontal_shifts
def prior_GP_var_inv_gamma ( y_invK_y , n_y , tau_range ) : """Imposing an inverse - Gamma prior onto the variance ( tau ^ 2) parameter of a Gaussian Process , which is in turn a prior imposed over an unknown function y = f ( x ) . The inverse - Gamma prior of tau ^ 2 , tau ^ 2 ~ invgamma ( shape , scale ) is described by a shape parameter alpha = 2 and a scale parameter beta = tau _ range ^ 2 . tau _ range describes the reasonable range of tau in the inverse - Gamma prior . The data y ' s at locations x ' s are assumed to follow Gaussian Process : f ( x , x ' ) ~ N ( 0 , K ( x , x ' ) / 2 tau ^ 2 ) , where K is a kernel function defined on x . For n observations , K ( x1 , x2 , . . . , xn ) is an n by n positive definite matrix . Given the prior parameter tau _ range , number of observations n _ y , and y _ invK _ y = y * inv ( K ) * y ' , the function returns the MAP estimate of tau ^ 2 and the log posterior probability of tau ^ 2 at the MAP value : log ( p ( tau ^ 2 | tau _ range ) ) . This function is written primarily for BRSA but can also be used elsewhere . y in this case corresponds to the log of SNR in each voxel . GBRSA does not rely on this function . An alternative form of prior is half - Cauchy prior on tau . Inverse - Gamma prior penalizes for both very small and very large values of tau , while half - Cauchy prior only penalizes for very large values of tau . For more information on usage , see description in BRSA class : ` . BRSA ` See also : ` . prior _ GP _ var _ half _ cauchy ` Parameters y _ invK _ y : float y * inv ( K ) * y ^ T , where y = f ( x ) is a vector of observations of unknown function f at different locations x . K is correlation matrix of f between different locations , based on a Gaussian Process ( GP ) describing the smoothness property of f . K fully incorporates the form of the kernel and the length scale of the GP , but not the variance of the GP ( the purpose of this function is to estimate the variance ) . n _ y : int , number of observations tau _ range : float , The reasonable range of tau , the standard deviation of the Gaussian Process imposed on y = f ( x ) . tau _ range is parameter of the inverse - Gamma prior . Say , if you expect the standard deviation of the Gaussian process to be around 3 , tau _ range can be set to 3. The smaller it is , the more penalization is imposed on large variation of y . Returns tau2 : The MAP estimation of tau ^ 2 based on the prior on tau and y _ invK _ y . log _ ptau : log ( p ( tau ) ) of the returned tau ^ 2 based on the inverse - Gamma prior ."""
alpha = 2 tau2 = ( y_invK_y + 2 * tau_range ** 2 ) / ( alpha * 2 + 2 + n_y ) log_ptau = scipy . stats . invgamma . logpdf ( tau2 , scale = tau_range ** 2 , a = 2 ) return tau2 , log_ptau
def handle ( self , * args , ** options ) : # NoQA """Execute the command ."""
# Load the settings self . require_settings ( args , options ) # Load your AWS credentials from ~ / . aws / credentials self . load_credentials ( ) # Get the Django settings file self . get_django_settings_file ( ) # Make sure the necessary IAM execution roles are available self . zappa . create_iam_roles ( ) # Create the Lambda Zip self . create_package ( ) # Upload it to S3 try : zip_arn = self . zappa . upload_to_s3 ( self . zip_path , self . s3_bucket_name ) except ( KeyboardInterrupt , SystemExit ) : raise # Register the Lambda function with that zip as the source # You ' ll also need to define the path to your lambda _ handler code . lambda_arn = self . zappa . create_lambda_function ( bucket = self . s3_bucket_name , s3_key = self . zip_path , function_name = self . lambda_name , handler = 'handler.lambda_handler' , vpc_config = self . vpc_config , memory_size = self . memory_size , timeout = self . timeout ) # Create and configure the API Gateway api_id = self . zappa . create_api_gateway_routes ( lambda_arn , self . lambda_name ) # Deploy the API ! endpoint_url = self . zappa . deploy_api_gateway ( api_id , self . api_stage ) # Finally , delete the local copy our zip package if self . zappa_settings [ self . api_stage ] . get ( 'delete_zip' , True ) : os . remove ( self . zip_path ) # Remove the local settings self . remove_s3_local_settings ( ) # Remove the uploaded zip from S3 , because it is now registered . . self . zappa . remove_from_s3 ( self . zip_path , self . s3_bucket_name ) if self . zappa_settings [ self . api_stage ] . get ( 'touch' , True ) : requests . get ( endpoint_url ) print ( "Your Zappa deployment is live!: " + endpoint_url ) events = self . zappa_settings [ self . api_stage ] . get ( 'events' ) if options [ 'unschedule' ] and events : self . zappa . unschedule_events ( lambda_arn , self . lambda_name , events ) elif options [ 'unschedule' ] and not events : print ( "No Events to Unschedule" ) if options [ 'schedule' ] and events : self . zappa . schedule_events ( lambda_arn , self . lambda_name , events ) elif options [ 'schedule' ] and not events : print ( "No Events to Schedule" )
def create_output ( stdout = None , true_color = False , ansi_colors_only = None ) : """Return an : class : ` ~ prompt _ toolkit . output . Output ` instance for the command line . : param true _ color : When True , use 24bit colors instead of 256 colors . ( ` bool ` or : class : ` ~ prompt _ toolkit . filters . SimpleFilter ` . ) : param ansi _ colors _ only : When True , restrict to 16 ANSI colors only . ( ` bool ` or : class : ` ~ prompt _ toolkit . filters . SimpleFilter ` . )"""
stdout = stdout or sys . __stdout__ true_color = to_simple_filter ( true_color ) if is_windows ( ) : if is_conemu_ansi ( ) : return ConEmuOutput ( stdout ) else : return Win32Output ( stdout ) else : term = os . environ . get ( 'TERM' , '' ) if PY2 : term = term . decode ( 'utf-8' ) return Vt100_Output . from_pty ( stdout , true_color = true_color , ansi_colors_only = ansi_colors_only , term = term )
def create ( example ) : """Create a copy of the given example ."""
try : this_dir = os . path . dirname ( os . path . realpath ( __file__ ) ) example_dir = os . path . join ( this_dir , os . pardir , "examples" , example ) shutil . copytree ( example_dir , os . path . join ( os . getcwd ( ) , example ) ) log ( "Example created." , delay = 0 ) except TypeError : click . echo ( "Example '{}' does not exist." . format ( example ) ) except OSError : click . echo ( "Example '{}' already exists here." . format ( example ) )
def change_default_radii ( def_map ) : """Change the default radii"""
s = current_system ( ) rep = current_representation ( ) rep . radii_state . default = [ def_map [ t ] for t in s . type_array ] rep . radii_state . reset ( )
def add ( self , * destinations ) : """Adds new destinations . A destination should never ever throw an exception . Seriously . A destination should not mutate the dictionary it is given . @ param destinations : A list of callables that takes message dictionaries ."""
buffered_messages = None if not self . _any_added : # These are first set of messages added , so we need to clear # BufferingDestination : self . _any_added = True buffered_messages = self . _destinations [ 0 ] . messages self . _destinations = [ ] self . _destinations . extend ( destinations ) if buffered_messages : # Re - deliver buffered messages : for message in buffered_messages : self . send ( message )
def types ( ** typefuncs ) : """Decorate a function that takes strings to one that takes typed values . The decorator ' s arguments are functions to perform type conversion . The positional and keyword arguments will be mapped to the positional and keyword arguments of the decoratored function . This allows web - based service functions , which by design always are passed string arguments , to be declared as functions taking typed arguments instead , eliminating the overhead of having to perform type conversions manually . If type conversion fails for any argument , the wrapped function will return a dict describing the exception that was raised ."""
def wrap ( f ) : @ functools . wraps ( f ) def typed_func ( * pargs , ** kwargs ) : # Analyze the incoming arguments so we know how to apply the # type - conversion functions in ` typefuncs ` . argspec = inspect . getargspec ( f ) # The ` args ` property contains the list of named arguments passed to # f . Construct a dict mapping from these names to the values that # were passed . # It is possible that ` args ` contains names that are not represented # in ` pargs ` , if some of the arguments are passed as keyword # arguments . In this case , the relative shortness of ` pargs ` will # cause the call to zip ( ) to truncate the ` args ` list , and the # keyword - style passed arguments will simply be present in ` kwargs ` . pargs_dict = { name : value for ( name , value ) in zip ( argspec . args , pargs ) } # Begin converting arguments according to the functions given in # ` typefuncs ` . If a given name does not appear in ` typefuncs ` , # simply leave it unchanged . If a name appears in ` typefuncs ` that # does not appear in the argument list , this is considered an error . try : for name , func in typefuncs . iteritems ( ) : if name in pargs_dict : pargs_dict [ name ] = func ( pargs_dict [ name ] ) elif name in kwargs : kwargs [ name ] = func ( kwargs [ name ] ) else : http_status ( 400 , "Unknown Argument Name" ) content_type ( "application/json" ) return { "error" : "'%s' was registered for type conversion but did not appear in the arguments list" % ( name ) } except ValueError as e : http_status ( 400 , "Input Value Conversion Failed" ) content_type ( "application/json" ) return { "error" : str ( e ) } # Unroll ` pargs ` into a list of arguments that are in the correct # order . pargs = [ ] for name in argspec . args : try : pargs . append ( pargs_dict [ name ] ) except KeyError : break # Call the wrapped function using the converted arguments . return f ( * pargs , ** kwargs ) typed_func . typefuncs = typefuncs return typed_func return wrap
def main ( ) : """Start main part of the wait script ."""
logger . info ( 'Waiting for database: `%s`' , MYSQL_DB ) connect_mysql ( host = MYSQL_HOST , port = MYSQL_PORT , user = MYSQL_USER , password = MYSQL_PASSWORD , database = MYSQL_DB ) logger . info ( 'Database `%s` found' , MYSQL_DB )
def index ( environment , start_response , headers ) : """Return the status of this Kronos instance + its backends > Doesn ' t expect any URL parameters ."""
response = { 'service' : 'kronosd' , 'version' : kronos . __version__ , 'id' : settings . node [ 'id' ] , 'storage' : { } , SUCCESS_FIELD : True } # Check if each backend is alive for name , backend in router . get_backends ( ) : response [ 'storage' ] [ name ] = { 'alive' : backend . is_alive ( ) , 'backend' : settings . storage [ name ] [ 'backend' ] } start_response ( '200 OK' , headers ) return response
def unblock_signals ( self ) : """Let the combos listen for event changes again ."""
self . aggregation_layer_combo . blockSignals ( False ) self . exposure_layer_combo . blockSignals ( False ) self . hazard_layer_combo . blockSignals ( False )
def connect ( self ) -> "google.cloud.storage.Bucket" : """Connect to the assigned bucket ."""
log = self . _log log . info ( "Connecting to the bucket..." ) client = self . create_client ( ) return client . lookup_bucket ( self . bucket_name )
def lcs_logs ( ) : """Pull Retrosheet LCS Game Logs"""
file_name = 'GLLC.TXT' z = get_zip_file ( lcs_url ) data = pd . read_csv ( z . open ( file_name ) , header = None , sep = ',' , quotechar = '"' ) data . columns = gamelog_columns return data
def find_matches ( self , content , file_to_handle ) : """Find all matches of an expression in a file"""
# look for all match groups in the content groups = [ match . groupdict ( ) for match in self . match_expression . finditer ( content ) ] # filter out content not in the matchgroup matches = [ group [ 'matchgroup' ] for group in groups if group . get ( 'matchgroup' ) ] logger . info ( 'Found %s matches in %s' , len ( matches ) , file_to_handle ) # We only need the unique strings found as we ' ll be replacing each # of them . No need to replace the ones already replaced . return list ( set ( matches ) )
def make_directory ( path ) : """Make a directory and any intermediate directories that don ' t already exist . This function handles the case where two threads try to create a directory at once ."""
if not os . path . exists ( path ) : # concurrent writes that try to create the same dir can fail try : os . makedirs ( path ) except OSError as e : if e . errno == errno . EEXIST : pass else : raise e
def __list_updates ( update_type , update_list ) : """Function used to list package updates by update type in console : param update _ type : string : param update _ list : list"""
if len ( update_list ) : print ( " %s:" % update_type ) for update_item in update_list : print ( " -- %(version)s on %(upload_time)s" % update_item )
async def delete_lease_async ( self , lease ) : """Delete the lease info for the given partition from the store . If there is no stored lease for the given partition , that is treated as success . : param lease : The stored lease to be deleted . : type lease : ~ azure . eventprocessorhost . lease . Lease"""
await self . host . loop . run_in_executor ( self . executor , functools . partial ( self . storage_client . delete_blob , self . lease_container_name , lease . partition_id , lease_id = lease . token ) )
def invoke_shell ( self , locs , banner ) : """Invokes the appropriate flavor of the python shell . Falls back on the native python shell if the requested flavor ( ipython , bpython , etc ) is not installed ."""
shell = self . SHELLS [ self . args . shell ] try : shell ( ) . invoke ( locs , banner ) except ImportError as e : warn ( ( "%s is not installed, `%s`, " "falling back to native shell" ) % ( self . args . shell , e ) , RuntimeWarning ) if shell == NativePythonShell : raise NativePythonShell ( ) . invoke ( locs , banner )
def parse_db_url ( db_url ) : """provided a db url , return a dict with connection properties"""
u = urlparse ( db_url ) db = { } db [ "database" ] = u . path [ 1 : ] db [ "user" ] = u . username db [ "password" ] = u . password db [ "host" ] = u . hostname db [ "port" ] = u . port return db
def remove_handler ( self , handler : Handler , group : int = 0 ) : """Removes a previously - added update handler . Make sure to provide the right group that the handler was added in . You can use the return value of the : meth : ` add _ handler ` method , a tuple of ( handler , group ) , and pass it directly . Args : handler ( ` ` Handler ` ` ) : The handler to be removed . group ( ` ` int ` ` , * optional * ) : The group identifier , defaults to 0."""
if isinstance ( handler , DisconnectHandler ) : self . disconnect_handler = None else : self . dispatcher . remove_handler ( handler , group )
def send_scp ( self , * args , ** kwargs ) : """Transmit an SCP Packet and return the response . This function is a thin wrapper around : py : meth : ` rig . machine _ control . scp _ connection . SCPConnection . send _ scp ` . This function will attempt to use the SCP connection nearest the destination of the SCP command if multiple connections have been discovered using : py : meth : ` . discover _ connections ` . Parameters x : int y : int p : int * args * * kwargs"""
# Retrieve contextual arguments from the keyword arguments . The # context system ensures that these values are present . x = kwargs . pop ( "x" ) y = kwargs . pop ( "y" ) p = kwargs . pop ( "p" ) return self . _send_scp ( x , y , p , * args , ** kwargs )
def lookup_object ( model , object_id , slug , slug_field ) : """Return the ` ` model ` ` object with the passed ` ` object _ id ` ` . If ` ` object _ id ` ` is None , then return the object whose ` ` slug _ field ` ` equals the passed ` ` slug ` ` . If ` ` slug ` ` and ` ` slug _ field ` ` are not passed , then raise Http404 exception ."""
lookup_kwargs = { } if object_id : lookup_kwargs [ '%s__exact' % model . _meta . pk . name ] = object_id elif slug and slug_field : lookup_kwargs [ '%s__exact' % slug_field ] = slug else : raise GenericViewError ( "Generic view must be called with either an object_id or a" " slug/slug_field." ) try : return model . objects . get ( ** lookup_kwargs ) except ObjectDoesNotExist : raise Http404 ( "No %s found for %s" % ( model . _meta . verbose_name , lookup_kwargs ) )
def split_window ( self , attach = False , vertical = True , start_directory = None ) : """Split window at pane and return newly created : class : ` Pane ` . Parameters attach : bool , optional Attach / select pane after creation . vertical : bool , optional split vertically start _ directory : str , optional specifies the working directory in which the new pane is created . Returns : class : ` Pane `"""
return self . window . split_window ( target = self . get ( 'pane_id' ) , start_directory = start_directory , attach = attach , vertical = vertical , )
def replace ( doc , pointer , value ) : """Replace element from sequence , member from mapping . : param doc : the document base : param pointer : the path to search in : param value : the new value : return : the new object . . note : : This operation is functionally identical to a " remove " operation for a value , followed immediately by an " add " operation at the same location with the replacement value ."""
return Target ( doc ) . replace ( pointer , value ) . document
def duration ( self ) : """Queries the duration of the call . If the call has not ended then the current duration will be returned . Returns datetime . timedelta The timedelta object representing the duration ."""
if self . ended_timestamp is None : return datetime . datetime . utcnow ( ) - self . message . created_at else : return self . ended_timestamp - self . message . created_at
def evaluate ( ref_intervals , ref_labels , est_intervals , est_labels , ** kwargs ) : """Computes weighted accuracy for all comparison functions for the given reference and estimated annotations . Examples > > > ( ref _ intervals , . . . ref _ labels ) = mir _ eval . io . load _ labeled _ intervals ( ' ref . lab ' ) > > > ( est _ intervals , . . . est _ labels ) = mir _ eval . io . load _ labeled _ intervals ( ' est . lab ' ) > > > scores = mir _ eval . chord . evaluate ( ref _ intervals , ref _ labels , . . . est _ intervals , est _ labels ) Parameters ref _ intervals : np . ndarray , shape = ( n , 2) Reference chord intervals , in the format returned by : func : ` mir _ eval . io . load _ labeled _ intervals ` . ref _ labels : list , shape = ( n , ) reference chord labels , in the format returned by : func : ` mir _ eval . io . load _ labeled _ intervals ` . est _ intervals : np . ndarray , shape = ( m , 2) estimated chord intervals , in the format returned by : func : ` mir _ eval . io . load _ labeled _ intervals ` . est _ labels : list , shape = ( m , ) estimated chord labels , in the format returned by : func : ` mir _ eval . io . load _ labeled _ intervals ` . kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions . Returns scores : dict Dictionary of scores , where the key is the metric name ( str ) and the value is the ( float ) score achieved ."""
# Append or crop estimated intervals so their span is the same as reference est_intervals , est_labels = util . adjust_intervals ( est_intervals , est_labels , ref_intervals . min ( ) , ref_intervals . max ( ) , NO_CHORD , NO_CHORD ) # use merged intervals for segmentation evaluation merged_ref_intervals = merge_chord_intervals ( ref_intervals , ref_labels ) merged_est_intervals = merge_chord_intervals ( est_intervals , est_labels ) # Adjust the labels so that they span the same intervals intervals , ref_labels , est_labels = util . merge_labeled_intervals ( ref_intervals , ref_labels , est_intervals , est_labels ) # Convert intervals to durations ( used as weights ) durations = util . intervals_to_durations ( intervals ) # Store scores for each comparison function scores = collections . OrderedDict ( ) scores [ 'thirds' ] = weighted_accuracy ( thirds ( ref_labels , est_labels ) , durations ) scores [ 'thirds_inv' ] = weighted_accuracy ( thirds_inv ( ref_labels , est_labels ) , durations ) scores [ 'triads' ] = weighted_accuracy ( triads ( ref_labels , est_labels ) , durations ) scores [ 'triads_inv' ] = weighted_accuracy ( triads_inv ( ref_labels , est_labels ) , durations ) scores [ 'tetrads' ] = weighted_accuracy ( tetrads ( ref_labels , est_labels ) , durations ) scores [ 'tetrads_inv' ] = weighted_accuracy ( tetrads_inv ( ref_labels , est_labels ) , durations ) scores [ 'root' ] = weighted_accuracy ( root ( ref_labels , est_labels ) , durations ) scores [ 'mirex' ] = weighted_accuracy ( mirex ( ref_labels , est_labels ) , durations ) scores [ 'majmin' ] = weighted_accuracy ( majmin ( ref_labels , est_labels ) , durations ) scores [ 'majmin_inv' ] = weighted_accuracy ( majmin_inv ( ref_labels , est_labels ) , durations ) scores [ 'sevenths' ] = weighted_accuracy ( sevenths ( ref_labels , est_labels ) , durations ) scores [ 'sevenths_inv' ] = weighted_accuracy ( sevenths_inv ( ref_labels , est_labels ) , durations ) scores [ 'underseg' ] = underseg ( merged_ref_intervals , merged_est_intervals ) scores [ 'overseg' ] = overseg ( merged_ref_intervals , merged_est_intervals ) scores [ 'seg' ] = min ( scores [ 'overseg' ] , scores [ 'underseg' ] ) return scores
def startup_config_content ( self ) : """Returns the content of the current startup - config file ."""
config_file = self . startup_config_file if config_file is None : return None try : with open ( config_file , "rb" ) as f : return f . read ( ) . decode ( "utf-8" , errors = "replace" ) except OSError as e : raise IOUError ( "Can't read startup-config file '{}': {}" . format ( config_file , e ) )
def consecutive_frame ( self ) : """Return a DataFrame with columns cnt , pids , pl . cnt is the number of pids in the sequence . pl is the pl sum"""
if self . _frame . empty : return pd . DataFrame ( columns = [ 'pids' , 'pl' , 'cnt' , 'is_win' ] ) else : vals = ( self . _frame [ PC . RET ] >= 0 ) . astype ( int ) seq = ( vals . shift ( 1 ) != vals ) . astype ( int ) . cumsum ( ) def _do_apply ( sub ) : return pd . Series ( { 'pids' : sub . index . values , 'pl' : sub [ PC . PL ] . sum ( ) , 'cnt' : len ( sub . index ) , 'is_win' : sub [ PC . RET ] . iloc [ 0 ] >= 0 , } ) return self . _frame . groupby ( seq ) . apply ( _do_apply )
def AddFiles ( self , hash_id_metadatas ) : """Adds multiple files to the file store . Args : hash _ id _ metadatas : A dictionary mapping hash ids to file metadata ( a tuple of hash client path and blob references ) ."""
for hash_id , metadata in iteritems ( hash_id_metadatas ) : self . AddFile ( hash_id , metadata )
def parse_events ( cls , ev_args , parent_ctx ) : """Capture the events sent to : meth : ` . XSO . parse _ events ` , including the initial ` ev _ args ` to a list and call : meth : ` _ set _ captured _ events ` on the result of : meth : ` . XSO . parse _ events ` . Like the method it overrides , : meth : ` parse _ events ` is suspendable ."""
dest = [ ( "start" , ) + tuple ( ev_args ) ] result = yield from capture_events ( super ( ) . parse_events ( ev_args , parent_ctx ) , dest ) result . _set_captured_events ( dest ) return result
def cli ( env , package_keyname , location , preset , verify , billing , complex_type , quantity , extras , order_items ) : """Place or verify an order . This CLI command is used for placing / verifying an order of the specified package in the given location ( denoted by a datacenter ' s long name ) . Orders made via the CLI can then be converted to be made programmatically by calling SoftLayer . OrderingManager . place _ order ( ) with the same keynames . Packages for ordering can be retrieved from ` slcli order package - list ` Presets for ordering can be retrieved from ` slcli order preset - list ` ( not all packages have presets ) Items can be retrieved from ` slcli order item - list ` . In order to find required items for the order , use ` slcli order category - list ` , and then provide the - - category option for each category code in ` slcli order item - list ` . Example : : # Order an hourly VSI with 4 CPU , 16 GB RAM , 100 GB SAN disk , # Ubuntu 16.04 , and 1 Gbps public & private uplink in dal13 slcli order place - - billing hourly CLOUD _ SERVER DALLAS13 \ GUEST _ CORES _ 4 \ RAM _ 16 _ GB \ REBOOT _ REMOTE _ CONSOLE \ 1 _ GBPS _ PUBLIC _ PRIVATE _ NETWORK _ UPLINKS \ BANDWIDTH _ 0 _ GB _ 2 \ 1 _ IP _ ADDRESS \ GUEST _ DISK _ 100 _ GB _ SAN \ OS _ UBUNTU _ 16_04 _ LTS _ XENIAL _ XERUS _ MINIMAL _ 64 _ BIT _ FOR _ VSI \ MONITORING _ HOST _ PING \ NOTIFICATION _ EMAIL _ AND _ TICKET \ AUTOMATED _ NOTIFICATION \ UNLIMITED _ SSL _ VPN _ USERS _ 1 _ PPTP _ VPN _ USER _ PER _ ACCOUNT \ NESSUS _ VULNERABILITY _ ASSESSMENT _ REPORTING \ - - extras ' { " virtualGuests " : [ { " hostname " : " test " , " domain " : " softlayer . com " } ] } ' \ - - complex - type SoftLayer _ Container _ Product _ Order _ Virtual _ Guest"""
manager = ordering . OrderingManager ( env . client ) if extras : try : extras = json . loads ( extras ) except ValueError as err : raise exceptions . CLIAbort ( "There was an error when parsing the --extras value: {}" . format ( err ) ) args = ( package_keyname , location , order_items ) kwargs = { 'preset_keyname' : preset , 'extras' : extras , 'quantity' : quantity , 'complex_type' : complex_type , 'hourly' : bool ( billing == 'hourly' ) } if verify : table = formatting . Table ( COLUMNS ) order_to_place = manager . verify_order ( * args , ** kwargs ) for price in order_to_place [ 'orderContainers' ] [ 0 ] [ 'prices' ] : cost_key = 'hourlyRecurringFee' if billing == 'hourly' else 'recurringFee' table . add_row ( [ price [ 'item' ] [ 'keyName' ] , price [ 'item' ] [ 'description' ] , price [ cost_key ] if cost_key in price else formatting . blank ( ) ] ) else : if not ( env . skip_confirmations or formatting . confirm ( "This action will incur charges on your account. Continue?" ) ) : raise exceptions . CLIAbort ( "Aborting order." ) order = manager . place_order ( * args , ** kwargs ) table = formatting . KeyValueTable ( [ 'name' , 'value' ] ) table . align [ 'name' ] = 'r' table . align [ 'value' ] = 'l' table . add_row ( [ 'id' , order [ 'orderId' ] ] ) table . add_row ( [ 'created' , order [ 'orderDate' ] ] ) table . add_row ( [ 'status' , order [ 'placedOrder' ] [ 'status' ] ] ) env . fout ( table )
def orientation ( self , value ) : '''setter of orientation property .'''
for values in self . __orientation : if value in values : # can not set upside - down until api level 18. self . server . jsonrpc . setOrientation ( values [ 1 ] ) break else : raise ValueError ( "Invalid orientation." )
def determine_encoding ( buf ) : """Return the appropriate encoding for the given CSS source , according to the CSS charset rules . ` buf ` may be either a string or bytes ."""
# The ultimate default is utf8 ; bravo , W3C bom_encoding = 'UTF-8' if not buf : # What return bom_encoding if isinstance ( buf , six . text_type ) : # We got a file that , for whatever reason , produces already - decoded # text . Check for the BOM ( which is useless now ) and believe # whatever ' s in the @ charset . if buf [ 0 ] == '\ufeff' : buf = buf [ 0 : ] # This is pretty similar to the code below , but without any encoding # double - checking . charset_start = '@charset "' charset_end = '";' if buf . startswith ( charset_start ) : start = len ( charset_start ) end = buf . index ( charset_end , start ) return buf [ start : end ] else : return bom_encoding # BOMs if buf [ : 3 ] == b'\xef\xbb\xbf' : bom_encoding = 'UTF-8' buf = buf [ 3 : ] if buf [ : 4 ] == b'\x00\x00\xfe\xff' : bom_encoding = 'UTF-32BE' buf = buf [ 4 : ] elif buf [ : 4 ] == b'\xff\xfe\x00\x00' : bom_encoding = 'UTF-32LE' buf = buf [ 4 : ] if buf [ : 4 ] == b'\x00\x00\xff\xfe' : raise UnicodeError ( "UTF-32-2143 is not supported" ) elif buf [ : 4 ] == b'\xfe\xff\x00\x00' : raise UnicodeError ( "UTF-32-2143 is not supported" ) elif buf [ : 2 ] == b'\xfe\xff' : bom_encoding = 'UTF-16BE' buf = buf [ 2 : ] elif buf [ : 2 ] == b'\xff\xfe' : bom_encoding = 'UTF-16LE' buf = buf [ 2 : ] # The spec requires exactly this syntax ; no escapes or extra spaces or # other shenanigans , thank goodness . charset_start = '@charset "' . encode ( bom_encoding ) charset_end = '";' . encode ( bom_encoding ) if buf . startswith ( charset_start ) : start = len ( charset_start ) end = buf . index ( charset_end , start ) encoded_encoding = buf [ start : end ] encoding = encoded_encoding . decode ( bom_encoding ) # Ensure that decoding with the specified encoding actually produces # the same @ charset rule encoded_charset = buf [ : end + len ( charset_end ) ] if ( encoded_charset . decode ( encoding ) != encoded_charset . decode ( bom_encoding ) ) : raise UnicodeError ( "@charset {0} is incompatible with detected encoding {1}" . format ( bom_encoding , encoding ) ) else : # With no @ charset , believe the BOM encoding = bom_encoding return encoding
def create_token ( key , payload ) : """Auth token generator payload should be a json encodable data structure"""
token = hmac . new ( key ) token . update ( json . dumps ( payload ) ) return token . hexdigest ( )
def register ( name ) : """Return a decorator that registers the decorated class as a resolver with the given * name * ."""
def decorator ( class_ ) : if name in known_resolvers : raise ValueError ( 'duplicate resolver name "%s"' % name ) known_resolvers [ name ] = class_ return decorator
def transformer_ae_small_noatt ( ) : """Set of hyperparameters ."""
hparams = transformer_ae_small ( ) hparams . reshape_method = "slice" hparams . bottleneck_kind = "dvq" hparams . hidden_size = 512 hparams . num_blocks = 1 hparams . num_decode_blocks = 1 hparams . z_size = 12 hparams . do_attend_decompress = False return hparams
def __insert_data ( postid , userid , rating ) : '''Inert new record .'''
uid = tools . get_uuid ( ) TabRating . create ( uid = uid , post_id = postid , user_id = userid , rating = rating , timestamp = tools . timestamp ( ) , ) return uid
def _login ( self , failed = False ) : """Login prompt"""
if failed : content = self . LOGIN_TEMPLATE . format ( failed_message = "Login failed" ) else : content = self . LOGIN_TEMPLATE . format ( failed_message = "" ) return "200 OK" , content , { "Content-Type" : "text/html" }
def _get_archive_listing ( self , archive_name ) : '''Return full document for ` ` { _ id : ' archive _ name ' } ` ` . . note : : MongoDB specific results - do not expose to user'''
res = self . collection . find_one ( { '_id' : archive_name } ) if res is None : raise KeyError return res
def feincms_render_region ( context , feincms_object , region , request = None , classes = '' , wrapper = True ) : """{ % feincms _ render _ region feincms _ page " main " request % } Support for rendering Page without some regions especialy for modals this feature is driven by context variable"""
if not feincms_object : return '' if not context . get ( 'standalone' , False ) or region in STANDALONE_REGIONS : region_content = '' . join ( _render_content ( content , request = request , context = context ) for content in getattr ( feincms_object . content , region ) ) else : region_content = '' if not wrapper : return region_content _classes = "leonardo-region leonardo-region-%(region)s %(classes)s" % { 'region' : region , 'classes' : classes } _id = "%(region)s-%(id)s" % { 'id' : feincms_object . id , 'region' : region , } return '<div class="%(classes)s" id=%(id)s>%(content)s</div>' % { 'id' : _id , 'classes' : _classes , 'content' : region_content }
def getattr ( self , path , fh ) : """Called by FUSE when the attributes for a file or directory are required . Returns a dictionary with keys identical to the stat C structure of stat ( 2 ) . st _ atime , st _ mtime and st _ ctime should be floats . On OSX , st _ nlink should count all files inside the directory . On Linux , only the subdirectories are counted . The ' st _ dev ' and ' st _ blksize ' fields are ignored . The ' st _ ino ' field is ignored except if the ' use _ ino ' mount option is given . This method gets very heavy traffic ."""
self . _raise_error_if_os_special_file ( path ) # log . debug ( u ' getattr ( ) : { 0 } ' . format ( path ) ) attribute = self . _get_attributes_through_cache ( path ) # log . debug ( ' getattr ( ) returned attribute : { 0 } ' . format ( attribute ) ) return self . _stat_from_attributes ( attribute )
def check_port ( helper , port ) : """check if the port parameter is really a port or " scan " """
try : int ( port ) except ValueError : helper . exit ( summary = "Port (-p) must be a integer value." , exit_code = unknown , perfdata = '' )
def get_for_control_var_and_eval_expr ( comm_type , kwargs ) : """Returns tuple that consists of control variable name and iterable that is result of evaluated expression of given for loop . For example : - given ' for $ i in $ ( echo " foo bar " ) ' it returns ( [ ' i ' ] , [ ' foo ' , ' bar ' ] ) - given ' for $ i , $ j in $ foo ' it returns ( [ ' i ' , ' j ' ] , [ ( ' foo ' , ' bar ' ) ] )"""
# let possible exceptions bubble up control_vars , iter_type , expression = parse_for ( comm_type ) eval_expression = evaluate_expression ( expression , kwargs ) [ 1 ] iterval = [ ] if len ( control_vars ) == 2 : if not isinstance ( eval_expression , dict ) : raise exceptions . YamlSyntaxError ( 'Can\'t expand {t} to two control variables.' . format ( t = type ( eval_expression ) ) ) else : iterval = list ( eval_expression . items ( ) ) elif isinstance ( eval_expression , six . string_types ) : if iter_type == 'word_in' : iterval = eval_expression . split ( ) else : iterval = eval_expression else : iterval = eval_expression return control_vars , iterval
def set_will ( self , topic , payload , qos = 0 , retain = False ) : """Sets up the will message : param topic : Topic of the will message : param payload : Content of the message : param qos : Quality of Service : param retain : The message will be retained : raise ValueError : Invalid topic : raise TypeError : Invalid payload"""
self . __mqtt . will_set ( topic , payload , qos , retain = retain )
def replace ( self , key , initial_value , new_value ) : """Atomically replace the value of a key with a new value . This compares the current value of a key , then replaces it with a new value if it is equal to a specified value . This operation takes place in a transaction . : param key : key in etcd to replace : param initial _ value : old value to replace : type initial _ value : bytes : param new _ value : new value of the key : type new _ value : bytes : returns : status of transaction , ` ` True ` ` if the replace was successful , ` ` False ` ` otherwise : rtype : bool"""
status , _ = self . transaction ( compare = [ self . transactions . value ( key ) == initial_value ] , success = [ self . transactions . put ( key , new_value ) ] , failure = [ ] , ) return status
def set_title ( self , s , panel = 'top' ) : "set plot title"
panel = self . get_panel ( panel ) panel . set_title ( s )
def continue_object ( self , workflow_object , restart_point = 'restart_task' , task_offset = 1 , stop_on_halt = False ) : """Continue workflow for one given object from " restart _ point " . : param object : : param stop _ on _ halt : : param restart _ point : can be one of : * restart _ prev : will restart from the previous task * continue _ next : will continue to the next task * restart _ task : will restart the current task You can use stop _ on _ error to raise exception ' s and stop the processing . Use stop _ on _ halt to stop processing the workflow if HaltProcessing is raised ."""
translate = { 'restart_task' : 'current' , 'continue_next' : 'next' , 'restart_prev' : 'prev' , } self . state . callback_pos = workflow_object . callback_pos or [ 0 ] self . restart ( task = translate [ restart_point ] , obj = 'first' , objects = [ workflow_object ] , stop_on_halt = stop_on_halt )
def logical_or ( f1 , f2 ) : # function factory '''Logical or from functions . Parameters f1 , f2 : function Function that takes array and returns true or false for each item in array . Returns Function .'''
def f ( value ) : return np . logical_or ( f1 ( value ) , f2 ( value ) ) f . __name__ = "(" + f1 . __name__ + "_or_" + f2 . __name__ + ")" return f
def sequence_length ( fasta ) : """return a dict of the lengths of sequences in a fasta file"""
sequences = SeqIO . parse ( fasta , "fasta" ) records = { record . id : len ( record ) for record in sequences } return records
def plot_sigma ( corpus , sigma , nodes = None , ** kwargs ) : """Plot sigma values for the ` ` topn ` ` most influential nodes . Parameters G : : class : ` . GraphCollection ` corpus : : class : ` . Corpus ` feature : str Name of a featureset in ` corpus ` . topn : int or float { 0 . - 1 . } ( default : 20 ) Number ( int ) or percentage ( float ) of top - occurring features to return . If ` ` flist ` ` is provided , this parameter is ignored . sort _ by : str ( default : ' max ' ) Criterion for selecting ` ` topn ` ` nodes . perslice : bool ( default : False ) If True , loads ` ` topn ` ` features per slice . Otherwise , loads ` ` topn ` ` features overall . If ` ` flist ` ` is provided , this parameter is ignored . flist : list List of nodes . If provided , ` ` topn ` ` and ` ` perslice ` ` are ignored . fig : : class : ` matplotlib . figure . Figure ` ( default : None ) You may provide a Figure instance if you wish . Otherwise , a new figure is generated . Returns fig : : class : ` matplotlib . figure . Figure ` G : : class : ` . GraphCollection ` A co - citation graph collection , updated with ` ` sigma ` ` node attributes . Examples Assuming that you have a : class : ` . Corpus ` ( ` ` G ` ` ) sliced by ` ` ' date ' ` ` and a co - citation : class : ` . GraphCollection ` ( ` ` corpus ` ` ) . . . . . code - block : : python > > > from tethne . analyze . cocitation import plot _ sigma > > > fig , G = plot _ sigma ( G , corpus , topn = 5 , perslice = True ) > > > fig . savefig ( ' ~ / sigma _ plot . png ' ) In this figure , the top 5 most sigma - influential nodes in each slice are shown . Red bands indicate periods in which each paper was influential ; opacity indicates the intensity of sigma ( normalized by the highest value in the plot ) . The period prior to the first instance of each node is grayed out . . . figure : : _ static / images / sigma _ plot . png : width : 600 : align : center"""
try : import matplotlib . pyplot as plt import matplotlib . patches as mpatches except ImportError : raise RuntimeError ( 'This method requires the package matplotlib.' ) if nodes == 'all' : nodes = sigma . keys ( ) # Display parameters . color = kwargs . get ( 'color' , 'red' ) years = sorted ( corpus . indices [ 'date' ] . keys ( ) ) width = years [ 1 ] - years [ 0 ] # Get width based on slices . height = 1.0 sort_by = kwargs . get ( 'sort_by' , 'max' ) perslice = kwargs . get ( 'perslice' , False ) topn = kwargs . get ( 'topn' , 20 ) if not nodes : # Get only the topn most significant papers . include = [ ] if sort_by == 'max' : if perslice : # Get topn per slice . vals = { } norm_by = 0. # Organize values in a way that makes selection easier . for node , history in sigma . iteritems ( ) : years , values = history if max ( values ) == 0. : continue for year , val in zip ( years , values ) : if year not in vals : vals [ year ] = { } vals [ year ] [ node ] = val # Get the maximum values for each slice . for year , node_values in vals . iteritems ( ) : indices = argsort ( node_values . values ( ) ) [ - topn : ] [ : : - 1 ] include += [ node_values . keys ( ) [ i ] for i in indices ] max_value = max ( node_values . values ( ) ) if max_value > norm_by : norm_by = max_value else : # Get topn overall . maxes = [ max ( v [ 1 ] ) for v in sigma . values ( ) ] indices = argsort ( maxes ) [ - topn : ] [ : : - 1 ] include = [ sigma . keys ( ) [ i ] for i in indices ] norm_by = max ( maxes ) # Nodes to include . nodes = [ node for node , values in sigma . iteritems ( ) if max ( values [ 1 ] ) > 0 and node in include ] # if fig is None : # Create a new Figure instance . fig = plt . figure ( figsize = ( 10 , len ( nodes ) / 4. ) ) # Plot ! f = 1 # Current subplot . axes = { } # Earliest year for which we have values . x_min = min ( [ min ( years ) for years , values in sigma . values ( ) ] ) for node in nodes : x_order = argsort ( sigma [ node ] [ 0 ] ) x = sorted ( sigma [ node ] [ 0 ] ) y = [ sigma [ node ] [ 1 ] [ i ] / norm_by for i in x_order ] ax = fig . add_subplot ( len ( nodes ) , 1 , f ) f += 1 ax . set_yticks ( [ ] ) ax . set_xbound ( x_min , max ( years ) + 1 ) # Only show xticks on the bottom subplot . if not f == len ( nodes ) + 1 : ax . set_xticklabels ( [ ] ) # Block out years until first occurrence of feature . rect = mpatches . Rectangle ( ( x_min , 0 ) , x [ 0 ] - x_min , height , fill = True , linewidth = 0.0 ) rect . set_facecolor ( 'black' ) rect . set_alpha ( 0.1 ) ax . add_patch ( rect ) # Add a rectangle for each year , shaded according to burstness state . for d in xrange ( min ( x ) , max ( x ) ) : try : # May not have values for all years . i = x . index ( d ) except ValueError : continue xy = ( d , 0. ) state = y [ i ] rect = mpatches . Rectangle ( xy , width , height , fill = True , linewidth = 0.0 ) rect . set_facecolor ( color ) rect . set_alpha ( state + 0.1 ) ax . add_patch ( rect ) ax . set_ylabel ( node , rotation = 0 , horizontalalignment = 'right' , verticalalignment = 'center' ) plt . subplots_adjust ( left = 0.5 ) fig . tight_layout ( h_pad = 0.25 ) plt . show ( )