signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def FileEntryExistsByPathSpec ( self , path_spec ) : """Determines if a file entry for a path specification exists . Args : path _ spec ( PathSpec ) : path specification . Returns : bool : True if the file entry exists ."""
# Opening a file by inode number is faster than opening a file by location . tsk_file = None inode = getattr ( path_spec , 'inode' , None ) location = getattr ( path_spec , 'location' , None ) try : if inode is not None : tsk_file = self . _tsk_file_system . open_meta ( inode = inode ) elif location is not None : tsk_file = self . _tsk_file_system . open ( location ) except IOError : pass return tsk_file is not None
def make_qadapter ( ** kwargs ) : """Return the concrete : class : ` QueueAdapter ` class from a string . Note that one can register a customized version with : . . example : : from qadapters import SlurmAdapter class MyAdapter ( SlurmAdapter ) : QTYPE = " myslurm " # Add your customized code here # Register your class . SlurmAdapter . register ( MyAdapter ) make _ qadapter ( qtype = " myslurm " , * * kwargs ) . . warning : : MyAdapter should be pickleable , hence one should declare it at the module level so that pickle can import it at run - time ."""
# Get all known subclasses of QueueAdapter . d = { c . QTYPE : c for c in all_subclasses ( QueueAdapter ) } # Preventive copy before pop kwargs = copy . deepcopy ( kwargs ) qtype = kwargs [ "queue" ] . pop ( "qtype" ) return d [ qtype ] ( ** kwargs )
def pipeline_filepath ( pm , filename = None , suffix = None ) : """Derive path to file for managed pipeline . : param pypiper . PipelineManager | pypiper . Pipeline pm : Manager of a particular pipeline instance . : param str filename : Name of file for which to create full path based on pipeline ' s output folder . : param str suffix : Suffix for the file ; this can be added to the filename if provided or added to the pipeline name if there ' s no filename . : raises TypeError : If neither filename nor suffix is provided , raise a TypeError , as in that case there ' s no substance from which to create a filepath . : return str : Path to file within managed pipeline ' s output folder , with filename as given or determined by the pipeline name , and suffix appended if given ."""
if filename is None and suffix is None : raise TypeError ( "Provide filename and/or suffix to create " "path to a pipeline file." ) filename = ( filename or pm . name ) + ( suffix or "" ) # Note that Pipeline and PipelineManager define the same outfolder . # In fact , a Pipeline just references its manager ' s outfolder . # So we can handle argument of either type to pm parameter . return filename if os . path . isabs ( filename ) else os . path . join ( pm . outfolder , filename )
def add_reaction_constraints ( model , reactions , Constraint ) : """Add the stoichiometric coefficients as constraints . Parameters model : optlang . Model The transposed stoichiometric matrix representation . reactions : iterable Container of ` cobra . Reaction ` instances . Constraint : optlang . Constraint The constraint class for the specific interface ."""
constraints = [ ] for rxn in reactions : expression = add ( [ c * model . variables [ m . id ] for m , c in rxn . metabolites . items ( ) ] ) constraints . append ( Constraint ( expression , lb = 0 , ub = 0 , name = rxn . id ) ) model . add ( constraints )
def call_api ( self , request_data , service , action , idempotency = False , ** kwargs ) : """This will call the adyen api . username , password , merchant _ account , and platform are pulled from root module level and or self object . AdyenResult will be returned on 200 response . Otherwise , an exception is raised . Args : request _ data ( dict ) : The dictionary of the request to place . This should be in the structure of the Adyen API . https : / / docs . adyen . com / manuals / api - manual service ( str ) : This is the API service to be called . action ( str ) : The specific action of the API service to be called idempotency ( bool , optional ) : Whether the transaction should be processed idempotently . https : / / docs . adyen . com / manuals / api - manual # apiidempotency Returns : AdyenResult : The AdyenResult is returned when a request was succesful ."""
if not self . http_init : self . http_client = HTTPClient ( self . app_name , self . USER_AGENT_SUFFIX , self . LIB_VERSION , self . http_force ) self . http_init = True # username at self object has highest priority . fallback to root module # and ensure that it is set . if self . xapikey : xapikey = self . xapikey elif 'xapikey' in kwargs : xapikey = kwargs . pop ( "xapikey" ) if self . username : username = self . username elif 'username' in kwargs : username = kwargs . pop ( "username" ) elif service == "Payout" : if any ( substring in action for substring in [ "store" , "submit" ] ) : username = self . _store_payout_username ( ** kwargs ) else : username = self . _review_payout_username ( ** kwargs ) if not username : errorstring = """Please set your webservice username. You can do this by running 'Adyen.username = 'Your username'""" raise AdyenInvalidRequestError ( errorstring ) # password at self object has highest priority . # fallback to root module # and ensure that it is set . if self . password : password = self . password elif 'password' in kwargs : password = kwargs . pop ( "password" ) elif service == "Payout" : if any ( substring in action for substring in [ "store" , "submit" ] ) : password = self . _store_payout_pass ( ** kwargs ) else : password = self . _review_payout_pass ( ** kwargs ) if not password : errorstring = """Please set your webservice password. You can do this by running 'Adyen.password = 'Your password'""" raise AdyenInvalidRequestError ( errorstring ) # xapikey at self object has highest priority . # fallback to root module # and ensure that it is set . # platform at self object has highest priority . fallback to root module # and ensure that it is set to either ' live ' or ' test ' . if self . platform : platform = self . platform elif 'platform' in kwargs : platform = kwargs . pop ( 'platform' ) if not isinstance ( platform , str ) : errorstring = "'platform' value must be type of string" raise TypeError ( errorstring ) elif platform . lower ( ) not in [ 'live' , 'test' ] : errorstring = "'platform' must be the value of 'live' or 'test'" raise ValueError ( errorstring ) message = request_data if not message . get ( 'merchantAccount' ) : message [ 'merchantAccount' ] = self . merchant_account # Add application info request_data [ 'applicationInfo' ] = { "adyenLibrary" : { "name" : settings . LIB_NAME , "version" : settings . LIB_VERSION } } # Adyen requires this header to be set and uses the combination of # merchant account and merchant reference to determine uniqueness . headers = { } if idempotency : headers [ 'Pragma' ] = 'process-retry' url = self . _determine_api_url ( platform , service , action ) raw_response , raw_request , status_code , headers = self . http_client . request ( url , json = message , username = username , password = password , headers = headers , ** kwargs ) # Creates AdyenResponse if request was successful , raises error if not . adyen_result = self . _handle_response ( url , raw_response , raw_request , status_code , headers , message ) return adyen_result
def find_value_type ( global_ns , value_type_str ) : """implementation details"""
if not value_type_str . startswith ( '::' ) : value_type_str = '::' + value_type_str found = global_ns . decls ( name = value_type_str , function = lambda decl : not isinstance ( decl , calldef . calldef_t ) , allow_empty = True ) if not found : no_global_ns_value_type_str = value_type_str [ 2 : ] if no_global_ns_value_type_str in cpptypes . FUNDAMENTAL_TYPES : return cpptypes . FUNDAMENTAL_TYPES [ no_global_ns_value_type_str ] elif type_traits . is_std_string ( value_type_str ) : string_ = global_ns . typedef ( '::std::string' ) return type_traits . remove_declarated ( string_ ) elif type_traits . is_std_wstring ( value_type_str ) : string_ = global_ns . typedef ( '::std::wstring' ) return type_traits . remove_declarated ( string_ ) else : value_type_str = no_global_ns_value_type_str has_const = value_type_str . startswith ( 'const ' ) if has_const : value_type_str = value_type_str [ len ( 'const ' ) : ] has_pointer = value_type_str . endswith ( '*' ) if has_pointer : value_type_str = value_type_str [ : - 1 ] found = None if has_const or has_pointer : found = impl_details . find_value_type ( global_ns , value_type_str ) if not found : return None else : if isinstance ( found , class_declaration . class_types ) : return cpptypes . declarated_t ( found ) if has_const : return cpptypes . const_t ( found ) if has_pointer : return cpptypes . pointer_t ( found ) if len ( found ) == 1 : return found [ 0 ] return None
def lambda_handler ( event , context ) : """Main handler ."""
email = event . get ( 'email' , None ) api_key = event . get ( 'api_key' , None ) if not ( api_key or email ) : msg = "Missing authentication parameters in your request" return { 'success' : False , 'message' : msg } indicators = list ( set ( event . get ( 'indicators' , list ( ) ) ) ) if len ( indicators ) == 0 : return { 'success' : False , 'message' : "No indicators sent in" } user = check_api_key ( email , api_key ) if not user : return { 'success' : False , 'message' : "Email or API key was invalid." } role = check_role ( user ) if not role : return { 'success' : False , 'message' : "Account not approved to contribute." } current_time = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) table = boto3 . resource ( "dynamodb" ) . Table ( os . environ [ 'database' ] ) with table . batch_writer ( overwrite_by_pkeys = [ 'indicator' ] ) as batch : for item in indicators : if item == "" : continue if len ( item ) != 32 : item = hashlib . md5 ( item ) . hexdigest ( ) try : batch . put_item ( Item = { 'indicator' : item , 'creator' : user . get ( 'email' ) , 'datetime' : current_time } ) except Exception as e : logger . error ( str ( e ) ) msg = "Wrote {} indicators" . format ( len ( indicators ) ) return { 'success' : True , 'message' : msg , 'writeCount' : len ( indicators ) }
def extract_rows ( self , result = { } , selector = '' , table_headers = [ ] , attr = '' , connector = '' , default = '' , verbosity = 0 , * args , ** kwargs ) : """Row data extraction for extract _ tabular"""
result_list = [ ] try : values = self . get_tree_tag ( selector ) if len ( table_headers ) >= len ( values ) : from itertools import izip_longest pairs = izip_longest ( table_headers , values , fillvalue = default ) else : from itertools import izip pairs = izip ( table_headers , values ) for head , val in pairs : if verbosity > 1 : print ( "\nExtracting" , head , "attribute" , sep = ' ' , end = '' ) if attr . lower ( ) == "text" : try : content = connector . join ( [ make_ascii ( x ) . strip ( ) for x in val . itertext ( ) ] ) except Exception : content = default content = content . replace ( "\n" , " " ) . strip ( ) else : content = val . get ( attr ) if attr in [ "href" , "src" ] : content = urljoin ( self . url , content ) result [ head ] = content result_list . append ( result ) except XPathError : raise Exception ( "Invalid %s selector - %s" % ( self . __selector_type__ , selector ) ) except TypeError : raise Exception ( "Selector expression string to be provided. Got " + selector ) return result_list
def fetch ( self ) : """Fetch a ReservationInstance : returns : Fetched ReservationInstance : rtype : twilio . rest . taskrouter . v1 . workspace . task . reservation . ReservationInstance"""
params = values . of ( { } ) payload = self . _version . fetch ( 'GET' , self . _uri , params = params , ) return ReservationInstance ( self . _version , payload , workspace_sid = self . _solution [ 'workspace_sid' ] , task_sid = self . _solution [ 'task_sid' ] , sid = self . _solution [ 'sid' ] , )
def kill ( self ) : """Remove the socket as it is no longer needed ."""
try : os . unlink ( self . server_address ) except OSError : if os . path . exists ( self . server_address ) : raise
def _Function ( self , t ) : """Handle function definitions"""
if t . decorators is not None : self . _fill ( "@" ) self . _dispatch ( t . decorators ) self . _fill ( "def " + t . name + "(" ) defaults = [ None ] * ( len ( t . argnames ) - len ( t . defaults ) ) + list ( t . defaults ) for i , arg in enumerate ( zip ( t . argnames , defaults ) ) : self . _write ( arg [ 0 ] ) if arg [ 1 ] is not None : self . _write ( '=' ) self . _dispatch ( arg [ 1 ] ) if i < len ( t . argnames ) - 1 : self . _write ( ', ' ) self . _write ( ")" ) if self . _single_func : self . _do_indent = False self . _enter ( ) self . _dispatch ( t . code ) self . _leave ( ) self . _do_indent = True
def error ( self , msg = 'Program error: {err}' , exit = None ) : """Error handler factory This function takes a message with optional ` ` { err } ` ` placeholder and returns a function that takes an exception object , prints the error message to STDERR and optionally quits . If no message is supplied ( e . g . , passing ` ` None ` ` or ` ` False ` ` or empty string ) , then nothing is output to STDERR . The ` ` exit ` ` argument can be set to a non - zero value , in which case the program quits after printing the message using its value as return value of the program . The returned function can be used with the ` ` progress ( ) ` ` context manager as error handler ."""
def handler ( exc ) : if msg : self . perr ( msg . format ( err = exc ) ) if exit is not None : self . quit ( exit ) return handler
def recordData ( self , measurementId , deviceId , data ) : """Passes the data to the handler . : param deviceId : the device the data comes from . : param measurementId : the measurement id . : param data : the data . : return : true if the data was handled ."""
am , handler = self . getDataHandler ( measurementId , deviceId ) if handler is not None : am . stillRecording ( deviceId , len ( data ) ) handler . handle ( data ) return True else : logger . error ( 'Received data for unknown handler ' + deviceId + '/' + measurementId ) return False
def __authenticate ( self ) : """Sends a json payload with the email and password in order to get the authentication api _ token to be used with the rest of the requests"""
if self . api_token : # verify current API token check_auth_uri = self . uri . split ( '/api/v1' ) [ 0 ] + '/check_token' req = self . request ( check_auth_uri ) try : ping = req . get ( ) . json ( ) except Exception as exc : if str ( exc ) . startswith ( 'User not authenticated' ) : self . api_token = None else : if self . email == ping [ 'hello' ] : return print "Authentication failed" sys . exit ( 1 ) auth_uri = self . uri . split ( '/api/v1' ) [ 0 ] + '/auth' payload = { 'email' : self . email , 'password' : self . password , 'org_id' : self . org_name } data = json . dumps ( payload ) req = self . request ( auth_uri , data = data ) response = req . post ( ) . json ( ) token = response . get ( 'mist_api_token' , None ) if token : # backwards compatibility with old Authentication system self . api_token = "mist_1 %s:%s" % ( self . email , token ) else : self . api_token = response . get ( 'token' , None )
def filter ( self , chamber , congress = CURRENT_CONGRESS , ** kwargs ) : """Takes a chamber and Congress , OR state and district , returning a list of members"""
check_chamber ( chamber ) kwargs . update ( chamber = chamber , congress = congress ) if 'state' in kwargs and 'district' in kwargs : path = ( "members/{chamber}/{state}/{district}/" "current.json" ) . format ( ** kwargs ) elif 'state' in kwargs : path = ( "members/{chamber}/{state}/" "current.json" ) . format ( ** kwargs ) else : path = ( "{congress}/{chamber}/" "members.json" ) . format ( ** kwargs ) return self . fetch ( path , parse = lambda r : r [ 'results' ] )
def troll ( roll , dec , v2 , v3 ) : """Computes the roll angle at the target position based on : : the roll angle at the V1 axis ( roll ) , the dec of the target ( dec ) , and the V2 / V3 position of the aperture ( v2 , v3 ) in arcseconds . Based on the algorithm provided by Colin Cox that is used in Generic Conversion at STScI ."""
# Convert all angles to radians _roll = DEGTORAD ( roll ) _dec = DEGTORAD ( dec ) _v2 = DEGTORAD ( v2 / 3600. ) _v3 = DEGTORAD ( v3 / 3600. ) # compute components sin_rho = sqrt ( ( pow ( sin ( _v2 ) , 2 ) + pow ( sin ( _v3 ) , 2 ) ) - ( pow ( sin ( _v2 ) , 2 ) * pow ( sin ( _v3 ) , 2 ) ) ) rho = asin ( sin_rho ) beta = asin ( sin ( _v3 ) / sin_rho ) if _v2 < 0 : beta = pi - beta gamma = asin ( sin ( _v2 ) / sin_rho ) if _v3 < 0 : gamma = pi - gamma A = pi / 2. + _roll - beta B = atan2 ( sin ( A ) * cos ( _dec ) , ( sin ( _dec ) * sin_rho - cos ( _dec ) * cos ( rho ) * cos ( A ) ) ) # compute final value troll = RADTODEG ( pi - ( gamma + B ) ) return troll
def mean ( self ) : """Mean of the distribution ."""
_self = self . _discard_value ( None ) if not _self . total ( ) : return None weighted_sum = sum ( key * value for key , value in iteritems ( _self ) ) return weighted_sum / float ( _self . total ( ) )
def switch_positions ( elems ) : """This function switches the position of every nth value with ( n + 1 ) th value in a given list . Examples : switch _ positions ( [ 0 , 1 , 2 , 3 , 4 , 5 ] ) - > [ 1 , 0 , 3 , 2 , 5 , 4] switch _ positions ( [ 5 , 6 , 7 , 8 , 9 , 10 ] ) - > [ 6 , 5 , 8 , 7 , 10 , 9] switch _ positions ( [ 25 , 35 , 45 , 55 , 75 , 95 ] ) - > [ 35 , 25 , 55 , 45 , 95 , 75] Args : elems : A list . Returns : A list where each nth element has been swapped with the ( n + 1 ) th element ."""
from itertools import tee , chain , zip_longest ( elems1 , elems2 ) = tee ( iter ( elems ) , 2 ) return list ( chain . from_iterable ( zip_longest ( elems [ 1 : : 2 ] , elems [ : : 2 ] ) ) )
def _dim_attribute ( self , attr , * args , ** kwargs ) : """Returns a list of dimension attribute attr , for the dimensions specified as strings in args . . . code - block : : python ntime , nbl , nchan = cube . _ dim _ attribute ( ' global _ size ' , ' ntime ' , ' nbl ' , ' nchan ' ) or . . code - block : : python ntime , nbl , nchan , nsrc = cube . _ dim _ attribute ( ' global _ size ' , ' ntime , nbl : nchan nsrc ' )"""
import re # If we got a single string argument , try splitting it by separators if len ( args ) == 1 and isinstance ( args [ 0 ] , str ) : args = ( s . strip ( ) for s in re . split ( ',|:|;| ' , args [ 0 ] ) ) # Now get the specific attribute for each string dimension # Integers are returned as is result = [ d if isinstance ( d , ( int , np . integer ) ) else getattr ( self . _dims [ d ] , attr ) for d in args ] # Return single element if length one and single else entire list return ( result [ 0 ] if kwargs . get ( 'single' , True ) and len ( result ) == 1 else result )
def _init ( self , trcback ) : """format a traceback from sys . exc _ info ( ) into 7 - item tuples , containing the regular four traceback tuple items , plus the original template filename , the line number adjusted relative to the template source , and code line from that line number of the template ."""
import mako . template mods = { } rawrecords = traceback . extract_tb ( trcback ) new_trcback = [ ] for filename , lineno , function , line in rawrecords : if not line : line = '' try : ( line_map , template_lines ) = mods [ filename ] except KeyError : try : info = mako . template . _get_module_info ( filename ) module_source = info . code template_source = info . source template_filename = info . template_filename or filename except KeyError : # A normal . py file ( not a Template ) if not compat . py3k : try : fp = open ( filename , 'rb' ) encoding = util . parse_encoding ( fp ) fp . close ( ) except IOError : encoding = None if encoding : line = line . decode ( encoding ) else : line = line . decode ( 'ascii' , 'replace' ) new_trcback . append ( ( filename , lineno , function , line , None , None , None , None ) ) continue template_ln = 1 source_map = mako . template . ModuleInfo . get_module_source_metadata ( module_source , full_line_map = True ) line_map = source_map [ 'full_line_map' ] template_lines = [ line for line in template_source . split ( "\n" ) ] mods [ filename ] = ( line_map , template_lines ) template_ln = line_map [ lineno - 1 ] if template_ln <= len ( template_lines ) : template_line = template_lines [ template_ln - 1 ] else : template_line = None new_trcback . append ( ( filename , lineno , function , line , template_filename , template_ln , template_line , template_source ) ) if not self . source : for l in range ( len ( new_trcback ) - 1 , 0 , - 1 ) : if new_trcback [ l ] [ 5 ] : self . source = new_trcback [ l ] [ 7 ] self . lineno = new_trcback [ l ] [ 5 ] break else : if new_trcback : try : # A normal . py file ( not a Template ) fp = open ( new_trcback [ - 1 ] [ 0 ] , 'rb' ) encoding = util . parse_encoding ( fp ) fp . seek ( 0 ) self . source = fp . read ( ) fp . close ( ) if encoding : self . source = self . source . decode ( encoding ) except IOError : self . source = '' self . lineno = new_trcback [ - 1 ] [ 1 ] return new_trcback
def convert_reshape ( node , ** kwargs ) : """Map MXNet ' s Reshape operator attributes to onnx ' s Reshape operator . Converts output shape attribute to output shape tensor and return multiple created nodes ."""
name , input_nodes , attrs = get_inputs ( node , kwargs ) output_shape_list = convert_string_to_list ( attrs [ "shape" ] ) initializer = kwargs [ "initializer" ] output_shape_np = np . array ( output_shape_list , dtype = 'int64' ) data_type = onnx . mapping . NP_TYPE_TO_TENSOR_TYPE [ output_shape_np . dtype ] dims = np . shape ( output_shape_np ) output_shape_name = "reshape_attr_tensor" + str ( kwargs [ "idx" ] ) tensor_node = onnx . helper . make_tensor_value_info ( output_shape_name , data_type , dims ) initializer . append ( onnx . helper . make_tensor ( name = output_shape_name , data_type = data_type , dims = dims , vals = output_shape_list , raw = False , ) ) input_nodes . append ( output_shape_name ) not_supported_shape = [ - 2 , - 3 , - 4 ] for val in output_shape_list : if val in not_supported_shape : raise AttributeError ( "Reshape: Shape value not supported in ONNX" , val ) reshape_node = onnx . helper . make_node ( "Reshape" , input_nodes , [ name ] , name = name ) return [ tensor_node , reshape_node ]
def _DownloadTS05Data ( Overwrite = False ) : '''This function will try to download all existing TS05 archives and extract them in $ GEOPACK _ PATH / tab .'''
Year = 1995 Cont = True OutPath = Globals . DataPath + 'tab/' cmd0 = 'wget -nv --show-progress ' cmd0 += 'http://geo.phys.spbu.ru/~tsyganenko/TS05_data_and_stuff/{:4d}_OMNI_5m_with_TS05_variables.zip' cmd0 += ' -O ' + OutPath + '{:04d}.zip' cmd1 = 'unzip ' + OutPath + '{:04d}.zip -d ' + OutPath cmd2 = 'rm -v ' + OutPath + '{:04d}.zip' cmd3 = 'mv -v ' + OutPath + '{:04d}_OMNI_5m_with_TS05_variables.dat ' cmd3 += OutPath + '{:04d}.tab' files = [ ] while Cont : if Overwrite or ( not os . path . isfile ( Globals . DataPath + 'tab/{:04d}.tab' . format ( Year ) ) ) : ret = os . system ( cmd0 . format ( Year , Year ) ) if ret == 0 : # extract file os . system ( cmd1 . format ( Year ) ) # delete archive os . system ( cmd2 . format ( Year ) ) # rename tab os . system ( cmd3 . format ( Year , Year ) ) files . append ( OutPath + '{:04}.tab' . format ( Year ) ) else : # stop loop os . system ( cmd2 . format ( Year ) ) Cont = False Year += 1
def add_distances ( self , indices , periodic = True , indices2 = None ) : r"""Adds the distances between atoms to the feature list . Parameters indices : can be of two types : ndarray ( ( n , 2 ) , dtype = int ) : n x 2 array with the pairs of atoms between which the distances shall be computed iterable of integers ( either list or ndarray ( n , dtype = int ) ) : indices ( not pairs of indices ) of the atoms between which the distances shall be computed . periodic : optional , boolean , default is True If periodic is True and the trajectory contains unitcell information , distances will be computed under the minimum image convention . indices2 : iterable of integers ( either list or ndarray ( n , dtype = int ) ) , optional : Only has effect if : py : obj : ` indices ` is an iterable of integers . Instead of the above behaviour , only the distances between the atoms in : py : obj : ` indices ` and : py : obj : ` indices2 ` will be computed . . . note : : When using the iterable of integers input , : py : obj : ` indices ` and : py : obj : ` indices2 ` will be sorted numerically and made unique before converting them to a pairlist . Please look carefully at the output of : py : func : ` describe ( ) ` to see what features exactly have been added ."""
from . distances import DistanceFeature atom_pairs = _parse_pairwise_input ( indices , indices2 , self . logger , fname = 'add_distances()' ) atom_pairs = self . _check_indices ( atom_pairs ) f = DistanceFeature ( self . topology , atom_pairs , periodic = periodic ) self . __add_feature ( f )
def Ctrl_V ( self , delay = 0 ) : """Ctrl + V shortcut ."""
self . _delay ( delay ) self . add ( Command ( "KeyDown" , 'KeyDown "%s", %s' % ( BoardKey . Ctrl , 1 ) ) ) self . add ( Command ( "KeyPress" , 'KeyPress "%s", %s' % ( BoardKey . V , 1 ) ) ) self . add ( Command ( "KeyUp" , 'KeyUp "%s", %s' % ( BoardKey . Ctrl , 1 ) ) )
def image_single_point_source ( self , image_model_class , kwargs_lens , kwargs_source , kwargs_lens_light , kwargs_ps ) : """return model without including the point source contributions as a list ( for each point source individually ) : param image _ model _ class : ImageModel class instance : param kwargs _ lens : lens model kwargs list : param kwargs _ source : source model kwargs list : param kwargs _ lens _ light : lens light model kwargs list : param kwargs _ ps : point source model kwargs list : return : list of images with point source isolated"""
# reconstructed model with given psf model , error_map , cov_param , param = image_model_class . image_linear_solve ( kwargs_lens , kwargs_source , kwargs_lens_light , kwargs_ps ) # model = image _ model _ class . image ( kwargs _ lens , kwargs _ source , kwargs _ lens _ light , kwargs _ ps ) data = image_model_class . Data . data mask = image_model_class . ImageNumerics . mask point_source_list = image_model_class . point_sources_list ( kwargs_ps , kwargs_lens ) n = len ( point_source_list ) model_single_source_list = [ ] for i in range ( n ) : model_single_source = ( data - model + point_source_list [ i ] ) * mask model_single_source_list . append ( model_single_source ) return model_single_source_list
def version_is_valid ( version_str ) : """Check to see if the version specified is a valid as far as pkg _ resources is concerned > > > version _ is _ valid ( ' blah ' ) False > > > version _ is _ valid ( ' 1.2.3 ' ) True"""
try : packaging . version . Version ( version_str ) except packaging . version . InvalidVersion : return False return True
def start_session ( self ) : """Start the session . Invoke in your @ gen . coroutine wrapped prepare method like : : result = yield gen . Task ( self . start _ session ) : rtype : bool"""
self . session = self . _session_start ( ) result = yield gen . Task ( self . session . fetch ) self . _set_session_cookie ( ) if not self . session . get ( 'ip_address' ) : self . session . ip_address = self . request . remote_ip self . _last_values ( ) raise gen . Return ( result )
def get_find_all_query ( self , table_name , constraints = None , * , columns = None , order_by = None , limiting = ( None , None ) ) : """Builds a find query . : limiting : if present must be a 2 - tuple of ( limit , offset ) either of which can be None ."""
where , params = self . parse_constraints ( constraints ) if columns : if isinstance ( columns , str ) : pass else : columns = ", " . join ( columns ) else : columns = "*" if order_by : order = " order by {0}" . format ( order_by ) else : order = "" paging = "" if limiting is not None : limit , offset = limiting if limit is not None : paging += " limit {0}" . format ( limit ) if offset is not None : paging += " offset {0}" . format ( offset ) return ( "select {0} from {1} where {2}{3}{4}" . format ( columns , table_name , where or "1 = 1" , order , paging ) , params )
def one_point_crossover ( parents ) : """Perform one point crossover on two parent chromosomes . Select a random position in the chromosome . Take genes to the left from one parent and the rest from the other parent . Ex . p1 = xxxxx , p2 = yyyyy , position = 2 ( starting at 0 ) , child = xxyyy"""
# The point that the chromosomes will be crossed at ( see Ex . above ) crossover_point = random . randint ( 1 , len ( parents [ 0 ] ) - 1 ) return ( _one_parent_crossover ( parents [ 0 ] , parents [ 1 ] , crossover_point ) , _one_parent_crossover ( parents [ 1 ] , parents [ 0 ] , crossover_point ) )
def build_tree_from_alignment ( aln , moltype = DNA , best_tree = False , params = { } , working_dir = '/tmp' ) : """Returns a tree from Alignment object aln . aln : an cogent . core . alignment . Alignment object , or data that can be used to build one . - Clearcut only accepts aligned sequences . Alignment object used to handle unaligned sequences . moltype : a cogent . core . moltype object . - NOTE : If moltype = RNA , we must convert to DNA since Clearcut v1.0.8 gives incorrect results if RNA is passed in . ' U ' is treated as an incorrect character and is excluded from distance calculations . best _ tree : if True ( default : False ) , uses a slower but more accurate algorithm to build the tree . params : dict of parameters to pass in to the Clearcut app controller . The result will be an cogent . core . tree . PhyloNode object , or None if tree fails ."""
params [ '--out' ] = get_tmp_filename ( working_dir ) # Create instance of app controller , enable tree , disable alignment app = Clearcut ( InputHandler = '_input_as_multiline_string' , params = params , WorkingDir = working_dir , SuppressStdout = True , SuppressStderr = True ) # Input is an alignment app . Parameters [ '-a' ] . on ( ) # Turn off input as distance matrix app . Parameters [ '-d' ] . off ( ) # If moltype = RNA , we must convert to DNA . if moltype == RNA : moltype = DNA if best_tree : app . Parameters [ '-N' ] . on ( ) # Turn on correct moltype moltype_string = moltype . label . upper ( ) app . Parameters [ MOLTYPE_MAP [ moltype_string ] ] . on ( ) # Setup mapping . Clearcut clips identifiers . We will need to remap them . # Clearcut only accepts aligned sequences . Let Alignment object handle # unaligned sequences . seq_aln = Alignment ( aln , MolType = moltype ) # get int mapping int_map , int_keys = seq_aln . getIntMap ( ) # create new Alignment object with int _ map int_map = Alignment ( int_map ) # Collect result result = app ( int_map . toFasta ( ) ) # Build tree tree = DndParser ( result [ 'Tree' ] . read ( ) , constructor = PhyloNode ) for node in tree . tips ( ) : node . Name = int_keys [ node . Name ] # Clean up result . cleanUp ( ) del ( seq_aln , app , result , int_map , int_keys , params ) return tree
def platedir ( self , filetype , ** kwargs ) : """Returns plate subdirectory in : envvar : ` PLATELIST _ DIR ` of the form : ` ` NNNNXX / NNNNN ` ` . Parameters filetype : str File type parameter . plateid : int or str Plate ID number . Will be converted to int internally . Returns platedir : str Plate directory in the format ` ` NNNNXX / NNNNN ` ` ."""
plateid = int ( kwargs [ 'plateid' ] ) plateid100 = plateid // 100 subdir = "{:0>4d}" . format ( plateid100 ) + "XX" return os . path . join ( subdir , "{:0>6d}" . format ( plateid ) )
def complete_contexts ( self ) : '''Return a list of interfaces that have satisfied contexts .'''
if self . _complete_contexts : return self . _complete_contexts self . context ( ) return self . _complete_contexts
def get_gene_names ( self ) : """Gather gene names of all nodes and node members"""
# Collect all gene names in network gene_names = [ ] for node in self . _nodes : members = node [ 'data' ] . get ( 'members' ) if members : gene_names += list ( members . keys ( ) ) else : if node [ 'data' ] [ 'name' ] . startswith ( 'Group' ) : continue gene_names . append ( node [ 'data' ] [ 'name' ] ) self . _gene_names = gene_names
def pexpire ( self , name , time ) : """Set an expire flag on key ` ` name ` ` for ` ` time ` ` milliseconds . ` ` time ` ` can be represented by an integer or a Python timedelta object ."""
if isinstance ( time , datetime . timedelta ) : time = int ( time . total_seconds ( ) * 1000 ) return self . execute_command ( 'PEXPIRE' , name , time )
def df2plotshape ( dlen , xlabel_unit , ylabel_unit , suptitle = '' , fix = 'h' , xlabel_skip = [ ] , test = False ) : """_ xlen : _ ylen : title :"""
dlen [ 'xlabel' ] = dlen . apply ( lambda x : f"{x['_xlen']}" if not x [ 'title' ] in xlabel_skip else '' , axis = 1 ) dlen [ 'ylabel' ] = dlen . apply ( lambda x : "" , axis = 1 ) ylen = dlen [ '_ylen' ] . unique ( ) [ 0 ] if test : print ( dlen . columns ) if fix == 'h' : dlen [ 'xlen' ] = dlen [ '_xlen' ] / dlen [ '_xlen' ] . max ( ) / len ( dlen ) * 0.8 dlen [ 'ylen' ] = 0.8 subsets = [ ] for subset in [ c for c in dlen if c . startswith ( '_ylen ' ) ] : subsetcol = subset . replace ( '_ylen' , 'ylen' ) dlen [ subsetcol ] = 0.25 subsets . append ( subsetcol ) subsets2cols = dict ( zip ( [ subsetcol . replace ( 'ylen ' , '' ) for s in subsets ] , subsets ) ) if test : print ( dlen . columns ) if test : print ( subsets2cols ) elif fix == 'w' : dlen [ 'xlen' ] = 0.8 dlen [ 'ylen' ] = dlen [ '_ylen' ] / dlen [ '_ylen' ] . max ( ) / len ( dlen ) * 0.85 dlen = dlen . drop ( [ c for c in dlen if c . startswith ( '_' ) ] , axis = 1 ) if test : print ( dlen . columns ) if fig is None : fig = plt . figure ( figsize = [ 4 , 4 ] ) for idx in dlen . index : if idx == 0 : x_ = 0 kws_plot_rect = makekws_plot_rect ( dlen , fig , idx , x_ ) if test : print ( kws_plot_rect ) kws_plot_rect_big = { k : kws_plot_rect [ k ] for k in kws_plot_rect if not 'ylen ' in k } kws_plot_rect_big [ 'color' ] = 'gray' ax = plot_rect ( ** kws_plot_rect_big ) for subset in subsets2cols : kws_plot_rect = makekws_plot_rect ( dlen . drop ( 'ylen' , axis = 1 ) . rename ( columns = { subsets2cols [ subset ] : 'ylen' } ) , fig , idx , x_ ) kws_plot_rect [ 'title' ] = '' kws_plot_rect [ 'xlabel' ] = '' kws_plot_rect [ 'ylabel' ] = subset if idx != 0 : kws_plot_rect [ 'ylabel' ] = '' if test : print ( kws_plot_rect ) ax = plot_rect ( ** kws_plot_rect ) x_ = kws_plot_rect [ 'x' ] + dlen . loc [ idx , 'xlen' ] + 0.1 ax . text ( x_ / 2.3 , - 0.1 , xlabel_unit , ha = 'center' ) ax . text ( x_ / 2.3 , 0.9 , suptitle , ha = 'center' ) ax . text ( - 0.1 , 0.4 , f"total ~{ylen}{ylabel_unit}" , va = 'center' , rotation = 90 ) if fig is not None : return fig , ax else : return ax
async def get_home_data ( self ) : """Get Tautulli home stats ."""
cmd = 'get_home_stats' url = self . base_url + cmd data = { } try : async with async_timeout . timeout ( 8 , loop = self . _loop ) : request = await self . _session . get ( url ) response = await request . json ( ) for stat in response . get ( 'response' , { } ) . get ( 'data' , { } ) : if stat . get ( 'stat_id' ) == 'top_movies' : try : row = stat . get ( 'rows' , { } ) [ 0 ] data [ 'movie' ] = row . get ( 'title' ) except ( IndexError , KeyError ) : data [ 'movie' ] = None if stat . get ( 'stat_id' ) == 'top_tv' : try : row = stat . get ( 'rows' , { } ) [ 0 ] data [ 'tv' ] = row . get ( 'title' ) except ( IndexError , KeyError ) : data [ 'tv' ] = None if stat . get ( 'stat_id' ) == 'top_users' : try : row = stat . get ( 'rows' , { } ) [ 0 ] data [ 'user' ] = row . get ( 'user' ) except ( IndexError , KeyError ) : data [ 'user' ] = None logger ( "Status from Tautulli: " + str ( request . status ) ) self . tautulli_home_data = data logger ( self . tautulli_home_data ) except ( asyncio . TimeoutError , aiohttp . ClientError , socket . gaierror , AttributeError ) as error : msg = "Can not load data from Tautulli: {} - {}" . format ( url , error ) logger ( msg , 40 )
def escape ( self , text , quote = True ) : """Escape special characters in HTML"""
if isinstance ( text , bytes ) : return escape_b ( text , quote ) else : return escape ( text , quote )
def returnList ( self , limit = False ) : '''Return a list of dictionaries ( and * not * a PLOD class ) . The list returned maintains the ' types ' of the original entries unless another operation has explicity changed them . For example , an ' upsert ' replacement of an entry . Example of use : > > > test = [ . . . { " name " : " Jim " , " age " : 18 , " income " : 93000 , " wigs " : [ 9 , 12 ] } , . . . { " name " : " Larry " , " age " : 18 , " wigs " : [ 3 , 2 , 9 ] } , . . . { " name " : " Joe " , " age " : 20 , " income " : 15000 , " wigs " : [ 1 , 2 , 3 ] } , . . . { " name " : " Bill " , " age " : 19 , " income " : 29000 } , > > > my _ list = PLOD ( test ) . sort ( " age " ) . returnList ( ) > > > print my _ list [ { ' age ' : 18 , ' name ' : ' Jim ' , ' wigs ' : [ 9 , 12 ] , ' income ' : 93000 } , { ' age ' : 18 , ' name ' : ' Larry ' , ' wigs ' : [ 3 , 2 , 9 ] } , { ' age ' : 19 , ' name ' : ' Bill ' , ' income ' : 29000 } , { ' age ' : 20 , ' name ' : ' Joe ' , ' wigs ' : [ 1 , 2 , 3 ] , ' income ' : 15000 } ] > > > print my _ list [ 2 ] [ " age " ] 19 : param limit : A number limiting the quantity of entries to return . Defaults to False , which means that the full list is returned . : return : the list of dictionaries'''
if limit == False : return self . table result = [ ] for i in range ( limit ) : if len ( self . table ) > i : result . append ( self . table [ i ] ) return result
def polygon_to_mask ( coords , dims , z = None ) : """Given a list of pairs of points which define a polygon , return a binary mask covering the interior of the polygon with dimensions dim"""
bounds = array ( coords ) . astype ( 'int' ) path = Path ( bounds ) grid = meshgrid ( range ( dims [ 1 ] ) , range ( dims [ 0 ] ) ) grid_flat = zip ( grid [ 0 ] . ravel ( ) , grid [ 1 ] . ravel ( ) ) mask = path . contains_points ( grid_flat ) . reshape ( dims [ 0 : 2 ] ) . astype ( 'int' ) if z is not None : if len ( dims ) < 3 : raise Exception ( 'Dims must have three-dimensions for embedding z-index' ) if z >= dims [ 2 ] : raise Exception ( 'Z-index %g exceeds third dimension %g' % ( z , dims [ 2 ] ) ) tmp = zeros ( dims ) tmp [ : , : , z ] = mask mask = tmp return mask
def expand ( self , line , do_expand , force = False , vislevels = 0 , level = - 1 ) : """Multi - purpose expand method from original STC class"""
lastchild = self . GetLastChild ( line , level ) line += 1 while line <= lastchild : if force : if vislevels > 0 : self . ShowLines ( line , line ) else : self . HideLines ( line , line ) elif do_expand : self . ShowLines ( line , line ) if level == - 1 : level = self . GetFoldLevel ( line ) if level & stc . STC_FOLDLEVELHEADERFLAG : if force : self . SetFoldExpanded ( line , vislevels - 1 ) line = self . expand ( line , do_expand , force , vislevels - 1 ) else : expandsub = do_expand and self . GetFoldExpanded ( line ) line = self . expand ( line , expandsub , force , vislevels - 1 ) else : line += 1 return line
def _device_expiry_callback ( self ) : """Periodic callback to remove expired devices from visible _ devices ."""
expired = 0 for adapters in self . _devices . values ( ) : to_remove = [ ] now = monotonic ( ) for adapter_id , dev in adapters . items ( ) : if 'expires' not in dev : continue if now > dev [ 'expires' ] : to_remove . append ( adapter_id ) local_conn = "adapter/%d/%s" % ( adapter_id , dev [ 'connection_string' ] ) if local_conn in self . _conn_strings : del self . _conn_strings [ local_conn ] for entry in to_remove : del adapters [ entry ] expired += 1 if expired > 0 : self . _logger . info ( 'Expired %d devices' , expired )
def pi ( nsteps ) : sum , step = 0. , 1. / nsteps "omp parallel for reduction ( + : sum ) private ( x )"
for i in range ( nsteps ) : x = ( i - 0.5 ) * step sum += 4. / ( 1. + x ** 2 ) return step * sum
def order_stop ( backend , order_id ) : """Stop an order - Turn off the serving generation ability of an order . Stop any running jobs . Keep all state around ."""
if order_id is None : raise click . ClickException ( 'invalid order id %s' % order_id ) click . secho ( '%s - Stop order id %s' % ( get_datetime ( ) , order_id ) , fg = 'green' ) check_and_print ( DKCloudCommandRunner . stop_order ( backend . dki , order_id ) )
def enable_cpu ( self , rg ) : '''Enable cpus rg : range or list of threads to enable'''
if type ( rg ) == int : rg = [ rg ] to_disable = set ( rg ) & set ( self . __get_ranges ( "offline" ) ) for cpu in to_disable : fpath = path . join ( "cpu%i" % cpu , "online" ) self . __write_cpu_file ( fpath , b"1" )
def idle_task ( self ) : '''called rapidly by mavproxy'''
now = time . time ( ) time_delta = now - self . last_calc if time_delta > 1 : self . last_calc = now self . buckets . append ( self . counts ) self . counts = { } if len ( self . buckets ) > self . max_buckets : self . buckets = self . buckets [ - self . max_buckets : ]
def from_string ( string , output_path , options = None , toc = None , cover = None , css = None , config = None , cover_first = None ) : """Convert given string / strings to IMG file : param string : : param output _ path : path to output PDF file / files . False means file will be returned as string : param options : ( optional ) dict with wkhtmltopdf global and page options , with or w / o ' - - ' : param toc : ( optional ) dict with toc - specific wkhtmltopdf options , with or w / o ' - - ' : param cover : ( optional ) string with url / filename with a cover html page : param css : style of input : param config : ( optional ) instance of imgkit . config . Config ( ) : param cover _ first : ( optional ) if True , cover always precedes TOC : return : True when success"""
rtn = IMGKit ( string , 'string' , options = options , toc = toc , cover = cover , css = css , config = config , cover_first = cover_first ) return rtn . to_img ( output_path )
def log_status ( plugin , filename , status ) : '''Properly display a migration status line'''
display = ':' . join ( ( plugin , filename ) ) + ' ' log . info ( '%s [%s]' , '{:.<70}' . format ( display ) , status )
def add_size_info ( self ) : """Set size of URL content ( if any ) . . Should be overridden in subclasses ."""
maxbytes = self . aggregate . config [ "maxfilesizedownload" ] if self . size > maxbytes : self . add_warning ( _ ( "Content size %(size)s is larger than %(maxbytes)s." ) % dict ( size = strformat . strsize ( self . size ) , maxbytes = strformat . strsize ( maxbytes ) ) , tag = WARN_URL_CONTENT_SIZE_TOO_LARGE )
def delete_request ( self , container , resource = None , query_items = None , accept = None ) : """Send a DELETE request ."""
url = self . make_url ( container , resource ) headers = self . _make_headers ( accept ) if query_items and isinstance ( query_items , ( list , tuple , set ) ) : url += RestHttp . _list_query_str ( query_items ) query_items = None try : rsp = requests . delete ( url , params = query_items , headers = headers , verify = self . _verify , timeout = self . _timeout ) except requests . exceptions . ConnectionError as e : RestHttp . _raise_conn_error ( e ) if self . _dbg_print : self . __print_req ( 'DELETE' , rsp . url , headers , None ) return self . _handle_response ( rsp )
def currency ( s = '' ) : 'dirty float ( strip non - numeric characters )'
if isinstance ( s , str ) : s = '' . join ( ch for ch in s if ch in floatchars ) return float ( s ) if s else TypedWrapper ( float , None )
def umount ( name , device = None , user = None , util = 'mount' ) : '''Attempt to unmount a device by specifying the directory it is mounted on CLI Example : . . code - block : : bash salt ' * ' mount . umount / mnt / foo . . versionadded : : 2015.5.0 . . code - block : : bash salt ' * ' mount . umount / mnt / foo / dev / xvdc1'''
if util != 'mount' : # This functionality used to live in img . umount _ image if 'qemu_nbd.clear' in __salt__ : if 'img.mnt_{0}' . format ( name ) in __context__ : __salt__ [ 'qemu_nbd.clear' ] ( __context__ [ 'img.mnt_{0}' . format ( name ) ] ) return mnts = active ( ) if name not in mnts : return "{0} does not have anything mounted" . format ( name ) if not device : cmd = 'umount {0}' . format ( name ) else : cmd = 'umount {0}' . format ( device ) out = __salt__ [ 'cmd.run_all' ] ( cmd , runas = user , python_shell = False ) if out [ 'retcode' ] : return out [ 'stderr' ] return True
def sign ( allocate_quota_request ) : """Obtains a signature for an operation in a ` AllocateQuotaRequest ` Args : op ( : class : ` endpoints _ management . gen . servicecontrol _ v1 _ messages . Operation ` ) : an operation used in a ` AllocateQuotaRequest ` Returns : string : a secure hash generated from the operation"""
if not isinstance ( allocate_quota_request , sc_messages . AllocateQuotaRequest ) : raise ValueError ( u'Invalid request' ) op = allocate_quota_request . allocateOperation if op is None or op . methodName is None or op . consumerId is None : logging . error ( u'Bad %s: not initialized => not signed' , allocate_quota_request ) raise ValueError ( u'allocate_quota request must be initialized with an operation' ) md5 = hashlib . md5 ( ) md5 . update ( op . methodName . encode ( 'utf-8' ) ) md5 . update ( b'\x00' ) md5 . update ( op . consumerId . encode ( 'utf-8' ) ) if op . labels : signing . add_dict_to_hash ( md5 , encoding . MessageToPyValue ( op . labels ) ) for value_set in op . quotaMetrics : md5 . update ( b'\x00' ) md5 . update ( value_set . metricName . encode ( 'utf-8' ) ) for mv in value_set . metricValues : metric_value . update_hash ( md5 , mv ) md5 . update ( b'\x00' ) return md5 . digest ( )
def volume_show ( self , name ) : '''Show one volume'''
if self . volume_conn is None : raise SaltCloudSystemExit ( 'No cinder endpoint available' ) nt_ks = self . volume_conn volumes = self . volume_list ( search_opts = { 'display_name' : name } , ) volume = volumes [ name ] # except Exception as esc : # # volume doesn ' t exist # log . error ( esc . strerror ) # return { ' name ' : name , ' status ' : ' deleted ' } return volume
def rolling_window ( arr , window_size , stride = 1 , return_idx = False ) : """There is an example of an iterator for pure - Python objects in : http : / / stackoverflow . com / questions / 6822725 / rolling - or - sliding - window - iterator - in - python This is a rolling - window iterator Numpy arrays , with window size and stride control . See examples below for demos . Parameters arr : array _ like Input numpy array . window _ size : int Width of the window . stride : int ( optional ) Number of indices to advance the window each iteration step . return _ idx : bool ( optional ) Whether to return the slice indices alone with the array segment . Examples > > > a = np . array ( [ 1,2,3,4,5,6 ] ) > > > for x in rolling _ window ( a , 3 ) : . . . print ( x ) [1 2 3] [2 3 4] [3 4 5] [4 5 6] > > > for x in rolling _ window ( a , 2 , stride = 2 ) : . . . print ( x ) [1 2] [3 4] [5 6] > > > for ( i1 , i2 ) , x in rolling _ window ( a , 2 , stride = 2 , return _ idx = True ) : # doctest : + SKIP . . . print ( i1 , i2 , x ) (0 , 2 , array ( [ 1 , 2 ] ) ) (2 , 4 , array ( [ 3 , 4 ] ) ) (4 , 6 , array ( [ 5 , 6 ] ) )"""
window_size = int ( window_size ) stride = int ( stride ) if window_size < 0 or stride < 1 : raise ValueError arr_len = len ( arr ) if arr_len < window_size : if return_idx : yield ( 0 , arr_len ) , arr else : yield arr ix1 = 0 while ix1 < arr_len : ix2 = ix1 + window_size result = arr [ ix1 : ix2 ] if return_idx : yield ( ix1 , ix2 ) , result else : yield result if len ( result ) < window_size or ix2 >= arr_len : break ix1 += stride
def after_batch ( self , stream_name : str , batch_data : Batch ) -> None : """If ` ` stream _ name ` ` equals to : py : attr : ` cxflow . constants . TRAIN _ STREAM ` , increase the iterations counter and possibly stop the training ; additionally , call : py : meth : ` _ check _ train _ time ` . : param stream _ name : stream name : param batch _ data : ignored : raise TrainingTerminated : if the number of iterations reaches ` ` self . _ iters ` `"""
self . _check_train_time ( ) if self . _iters is not None and stream_name == self . _train_stream_name : self . _iters_done += 1 if self . _iters_done >= self . _iters : raise TrainingTerminated ( 'Training terminated after iteration {}' . format ( self . _iters_done ) )
def to_grey ( self , on : bool = False ) : """Change the LED to grey . : param on : Unused , here for API consistency with the other states : return : None"""
self . _on = False self . _load_new ( led_grey )
def update ( self , ptime ) : """Update tween with the time since the last frame"""
delta = self . delta + ptime total_duration = self . delay + self . duration if delta > total_duration : delta = total_duration if delta < self . delay : pass elif delta == total_duration : for key , tweenable in self . tweenables : setattr ( self . target , key , tweenable . target_value ) else : fraction = self . ease ( ( delta - self . delay ) / ( total_duration - self . delay ) ) for key , tweenable in self . tweenables : res = tweenable . update ( fraction ) if isinstance ( res , float ) and self . round : res = int ( res ) setattr ( self . target , key , res ) if delta == total_duration or len ( self . tweenables ) == 0 : self . complete = True self . delta = delta if self . on_update : self . on_update ( self . target ) return self . complete
def local ( self ) : """Access the local : returns : twilio . rest . api . v2010 . account . available _ phone _ number . local . LocalList : rtype : twilio . rest . api . v2010 . account . available _ phone _ number . local . LocalList"""
if self . _local is None : self . _local = LocalList ( self . _version , account_sid = self . _solution [ 'account_sid' ] , country_code = self . _solution [ 'country_code' ] , ) return self . _local
def mv_grid_topology ( pypsa_network , configs , timestep = None , line_color = None , node_color = None , line_load = None , grid_expansion_costs = None , filename = None , arrows = False , grid_district_geom = True , background_map = True , voltage = None , limits_cb_lines = None , limits_cb_nodes = None , xlim = None , ylim = None , lines_cmap = 'inferno_r' , title = '' , scaling_factor_line_width = None ) : """Plot line loading as color on lines . Displays line loading relative to nominal capacity . Parameters pypsa _ network : : pypsa : ` pypsa . Network < network > ` configs : : obj : ` dict ` Dictionary with used configurations from config files . See : class : ` ~ . grid . network . Config ` for more information . timestep : : pandas : ` pandas . Timestamp < timestamp > ` Time step to plot analysis results for . If ` timestep ` is None maximum line load and if given , maximum voltage deviation , is used . In that case arrows cannot be drawn . Default : None . line _ color : : obj : ` str ` Defines whereby to choose line colors ( and implicitly size ) . Possible options are : * ' loading ' Line color is set according to loading of the line . Loading of MV lines must be provided by parameter ` line _ load ` . * ' expansion _ costs ' Line color is set according to investment costs of the line . This option also effects node colors and sizes by plotting investment in stations and setting ` node _ color ` to ' storage _ integration ' in order to plot storage size of integrated storages . Grid expansion costs must be provided by parameter ` grid _ expansion _ costs ` . * None ( default ) Lines are plotted in black . Is also the fallback option in case of wrong input . node _ color : : obj : ` str ` Defines whereby to choose node colors ( and implicitly size ) . Possible options are : * ' technology ' Node color as well as size is set according to type of node ( generator , MV station , etc . ) . * ' voltage ' Node color is set according to voltage deviation from 1 p . u . . Voltages of nodes in MV grid must be provided by parameter ` voltage ` . * ' storage _ integration ' Only storages are plotted . Size of node corresponds to size of storage . * None ( default ) Nodes are not plotted . Is also the fallback option in case of wrong input . line _ load : : pandas : ` pandas . DataFrame < dataframe > ` Dataframe with current results from power flow analysis in A . Index of the dataframe is a : pandas : ` pandas . DatetimeIndex < datetimeindex > ` , columns are the line representatives . grid _ expansion _ costs : : pandas : ` pandas . DataFrame < dataframe > ` Dataframe with grid expansion costs in kEUR . See ` grid _ expansion _ costs ` in : class : ` ~ . grid . network . Results ` for more information . filename : : obj : ` str ` Filename to save plot under . If not provided , figure is shown directly . Default : None . arrows : : obj : ` Boolean ` If True draws arrows on lines in the direction of the power flow . Does only work when ` line _ color ` option ' loading ' is used and a time step is given . Default : False . limits _ cb _ lines : : obj : ` tuple ` Tuple with limits for colorbar of line color . First entry is the minimum and second entry the maximum value . Default : None . limits _ cb _ nodes : : obj : ` tuple ` Tuple with limits for colorbar of nodes . First entry is the minimum and second entry the maximum value . Default : None . xlim : : obj : ` tuple ` Limits of x - axis . Default : None . ylim : : obj : ` tuple ` Limits of y - axis . Default : None . lines _ cmap : : obj : ` str ` Colormap to use for lines in case ` line _ color ` is ' loading ' or ' expansion _ costs ' . Default : ' inferno _ r ' . title : : obj : ` str ` Title of the plot . Default : ' ' . scaling _ factor _ line _ width : : obj : ` float ` or None If provided line width is set according to the nominal apparent power of the lines . If line width is None a default line width of 2 is used for each line . Default : None ."""
def get_color_and_size ( name , colors_dict , sizes_dict ) : if 'BranchTee' in name : return colors_dict [ 'BranchTee' ] , sizes_dict [ 'BranchTee' ] elif 'LVStation' in name : return colors_dict [ 'LVStation' ] , sizes_dict [ 'LVStation' ] elif 'GeneratorFluctuating' in name : return ( colors_dict [ 'GeneratorFluctuating' ] , sizes_dict [ 'GeneratorFluctuating' ] ) elif 'Generator' in name : return colors_dict [ 'Generator' ] , sizes_dict [ 'Generator' ] elif 'DisconnectingPoint' in name : return ( colors_dict [ 'DisconnectingPoint' ] , sizes_dict [ 'DisconnectingPoint' ] ) elif 'MVStation' in name : return colors_dict [ 'MVStation' ] , sizes_dict [ 'MVStation' ] elif 'Storage' in name : return colors_dict [ 'Storage' ] , sizes_dict [ 'Storage' ] else : return colors_dict [ 'else' ] , sizes_dict [ 'else' ] def nodes_by_technology ( buses ) : bus_sizes = { } bus_colors = { } colors_dict = { 'BranchTee' : 'b' , 'GeneratorFluctuating' : 'g' , 'Generator' : 'k' , 'LVStation' : 'c' , 'MVStation' : 'r' , 'Storage' : 'y' , 'DisconnectingPoint' : '0.75' , 'else' : 'orange' } sizes_dict = { 'BranchTee' : 10 , 'GeneratorFluctuating' : 100 , 'Generator' : 100 , 'LVStation' : 50 , 'MVStation' : 120 , 'Storage' : 100 , 'DisconnectingPoint' : 50 , 'else' : 200 } for bus in buses : bus_colors [ bus ] , bus_sizes [ bus ] = get_color_and_size ( bus , colors_dict , sizes_dict ) return bus_sizes , bus_colors def nodes_by_voltage ( buses , voltage ) : bus_colors = { } bus_sizes = { } for bus in buses : if 'primary' in bus : bus_tmp = bus [ 12 : ] else : bus_tmp = bus [ 4 : ] if timestep is not None : bus_colors [ bus ] = 100 * abs ( 1 - voltage . loc [ timestep , ( 'mv' , bus_tmp ) ] ) else : bus_colors [ bus ] = 100 * max ( abs ( 1 - voltage . loc [ : , ( 'mv' , bus_tmp ) ] ) ) bus_sizes [ bus ] = 50 return bus_sizes , bus_colors def nodes_storage_integration ( buses ) : bus_sizes = { } for bus in buses : if not 'storage' in bus : bus_sizes [ bus ] = 0 else : tmp = bus . split ( '_' ) storage_repr = '_' . join ( tmp [ 1 : ] ) bus_sizes [ bus ] = pypsa_network . storage_units . loc [ storage_repr , 'p_nom' ] * 1000 / 3 return bus_sizes def nodes_by_costs ( buses , grid_expansion_costs ) : # sum costs for each station costs_lv_stations = grid_expansion_costs [ grid_expansion_costs . index . str . contains ( "LVStation" ) ] costs_lv_stations [ 'station' ] = costs_lv_stations . reset_index ( ) [ 'index' ] . apply ( lambda _ : '_' . join ( _ . split ( '_' ) [ 0 : 2 ] ) ) . values costs_lv_stations = costs_lv_stations . groupby ( 'station' ) . sum ( ) costs_mv_station = grid_expansion_costs [ grid_expansion_costs . index . str . contains ( "MVStation" ) ] costs_mv_station [ 'station' ] = costs_mv_station . reset_index ( ) [ 'index' ] . apply ( lambda _ : '_' . join ( _ . split ( '_' ) [ 0 : 2 ] ) ) . values costs_mv_station = costs_mv_station . groupby ( 'station' ) . sum ( ) bus_sizes = { } bus_colors = { } for bus in buses : if 'LVStation' in bus : try : tmp = bus . split ( '_' ) lv_st = '_' . join ( tmp [ 2 : ] ) bus_colors [ bus ] = costs_lv_stations . loc [ lv_st , 'total_costs' ] bus_sizes [ bus ] = 100 except : bus_colors [ bus ] = 0 bus_sizes [ bus ] = 0 elif 'MVStation' in bus : try : tmp = bus . split ( '_' ) mv_st = '_' . join ( tmp [ 2 : ] ) bus_colors [ bus ] = costs_mv_station . loc [ mv_st , 'total_costs' ] bus_sizes [ bus ] = 100 except : bus_colors [ bus ] = 0 bus_sizes [ bus ] = 0 else : bus_colors [ bus ] = 0 bus_sizes [ bus ] = 0 return bus_sizes , bus_colors # set font and font size font = { 'family' : 'serif' , 'size' : 15 } matplotlib . rc ( 'font' , ** font ) # create pypsa network only containing MV buses and lines pypsa_plot = PyPSANetwork ( ) pypsa_plot . buses = pypsa_network . buses . loc [ pypsa_network . buses . v_nom >= 10 ] # filter buses of aggregated loads and generators pypsa_plot . buses = pypsa_plot . buses [ ~ pypsa_plot . buses . index . str . contains ( "agg" ) ] pypsa_plot . lines = pypsa_network . lines [ pypsa_network . lines . bus0 . isin ( pypsa_plot . buses . index ) ] [ pypsa_network . lines . bus1 . isin ( pypsa_plot . buses . index ) ] # line colors if line_color == 'loading' : # calculate relative line loading # get load factor residual_load = tools . get_residual_load_from_pypsa_network ( pypsa_network ) case = residual_load . apply ( lambda _ : 'feedin_case' if _ < 0 else 'load_case' ) if timestep is not None : timeindex = [ timestep ] else : timeindex = line_load . index load_factor = pd . DataFrame ( data = { 's_nom' : [ float ( configs [ 'grid_expansion_load_factors' ] [ 'mv_{}_line' . format ( case . loc [ _ ] ) ] ) for _ in timeindex ] } , index = timeindex ) # get allowed line load s_allowed = load_factor . dot ( pypsa_plot . lines . s_nom . to_frame ( ) . T * 1e3 ) # get line load from pf line_colors = line_load . loc [ : , pypsa_plot . lines . index ] . divide ( s_allowed ) . max ( ) elif line_color == 'expansion_costs' : node_color = 'expansion_costs' line_costs = pypsa_plot . lines . join ( grid_expansion_costs , rsuffix = 'costs' , how = 'left' ) line_colors = line_costs . total_costs . fillna ( 0 ) else : line_colors = pd . Series ( 'black' , index = pypsa_plot . lines . index ) # bus colors and sizes if node_color == 'technology' : bus_sizes , bus_colors = nodes_by_technology ( pypsa_plot . buses . index ) bus_cmap = None elif node_color == 'voltage' : bus_sizes , bus_colors = nodes_by_voltage ( pypsa_plot . buses . index , voltage ) bus_cmap = plt . cm . Blues elif node_color == 'storage_integration' : bus_sizes = nodes_storage_integration ( pypsa_plot . buses . index ) bus_colors = 'orangered' bus_cmap = None elif node_color == 'expansion_costs' : bus_sizes , bus_colors = nodes_by_costs ( pypsa_plot . buses . index , grid_expansion_costs ) bus_cmap = None elif node_color is None : bus_sizes = 0 bus_colors = 'r' bus_cmap = None else : logging . warning ( 'Choice for `node_color` is not valid. Default is ' 'used instead.' ) bus_sizes = 0 bus_colors = 'r' bus_cmap = None # convert bus coordinates to Mercator if contextily and background_map : inProj = Proj ( init = 'epsg:4326' ) outProj = Proj ( init = 'epsg:3857' ) x2 , y2 = transform ( inProj , outProj , list ( pypsa_plot . buses . loc [ : , 'x' ] ) , list ( pypsa_plot . buses . loc [ : , 'y' ] ) ) pypsa_plot . buses . loc [ : , 'x' ] = x2 pypsa_plot . buses . loc [ : , 'y' ] = y2 # plot plt . figure ( figsize = ( 12 , 8 ) ) ax = plt . gca ( ) # plot grid district if grid_district_geom and geopandas : try : subst = pypsa_network . buses [ pypsa_network . buses . index . str . contains ( "MVStation" ) ] . index [ 0 ] subst_id = subst . split ( '_' ) [ - 1 ] projection = 3857 if contextily and background_map else 4326 region = get_grid_district_polygon ( configs , subst_id = subst_id , projection = projection ) region . plot ( ax = ax , color = 'white' , alpha = 0.2 , edgecolor = 'red' , linewidth = 2 ) except Exception as e : logging . warning ( "Grid district geometry could not be plotted due " "to the following error: {}" . format ( e ) ) # if scaling factor is given s _ nom is plotted as line width if scaling_factor_line_width is not None : line_width = pypsa_plot . lines . s_nom * scaling_factor_line_width else : line_width = 2 cmap = plt . cm . get_cmap ( lines_cmap ) ll = pypsa_plot . plot ( line_colors = line_colors , line_cmap = cmap , ax = ax , title = title , line_widths = line_width , branch_components = [ 'Line' ] , basemap = True , bus_sizes = bus_sizes , bus_colors = bus_colors , bus_cmap = bus_cmap ) # color bar line loading if line_color == 'loading' : if limits_cb_lines is None : limits_cb_lines = ( min ( line_colors ) , max ( line_colors ) ) v = np . linspace ( limits_cb_lines [ 0 ] , limits_cb_lines [ 1 ] , 101 ) cb = plt . colorbar ( ll [ 1 ] , boundaries = v , ticks = v [ 0 : 101 : 10 ] ) cb . set_clim ( vmin = limits_cb_lines [ 0 ] , vmax = limits_cb_lines [ 1 ] ) cb . set_label ( 'Line loading in p.u.' ) # color bar grid expansion costs elif line_color == 'expansion_costs' : if limits_cb_lines is None : limits_cb_lines = ( min ( min ( line_colors ) , min ( bus_colors . values ( ) ) ) , max ( max ( line_colors ) , max ( bus_colors . values ( ) ) ) ) v = np . linspace ( limits_cb_lines [ 0 ] , limits_cb_lines [ 1 ] , 101 ) cb = plt . colorbar ( ll [ 1 ] , boundaries = v , ticks = v [ 0 : 101 : 10 ] ) cb . set_clim ( vmin = limits_cb_lines [ 0 ] , vmax = limits_cb_lines [ 1 ] ) cb . set_label ( 'Grid expansion costs in kEUR' ) # color bar voltage if node_color == 'voltage' : if limits_cb_nodes is None : limits_cb_nodes = ( min ( bus_colors . values ( ) ) , max ( bus_colors . values ( ) ) ) v_voltage = np . linspace ( limits_cb_nodes [ 0 ] , limits_cb_nodes [ 1 ] , 101 ) cb_voltage = plt . colorbar ( ll [ 0 ] , boundaries = v_voltage , ticks = v_voltage [ 0 : 101 : 10 ] ) cb_voltage . set_clim ( vmin = limits_cb_nodes [ 0 ] , vmax = limits_cb_nodes [ 1 ] ) cb_voltage . set_label ( 'Voltage deviation in %' ) # storages if node_color == 'expansion_costs' : ax . scatter ( pypsa_plot . buses . loc [ pypsa_network . storage_units . loc [ : , 'bus' ] , 'x' ] , pypsa_plot . buses . loc [ pypsa_network . storage_units . loc [ : , 'bus' ] , 'y' ] , c = 'orangered' , s = pypsa_network . storage_units . loc [ : , 'p_nom' ] * 1000 / 3 ) # add legend for storage size and line capacity if ( node_color == 'storage_integration' or node_color == 'expansion_costs' ) and pypsa_network . storage_units . loc [ : , 'p_nom' ] . any ( ) > 0 : scatter_handle = plt . scatter ( [ ] , [ ] , c = 'orangered' , s = 100 , label = '= 300 kW battery storage' ) else : scatter_handle = None if scaling_factor_line_width is not None : line_handle = plt . plot ( [ ] , [ ] , c = 'black' , linewidth = scaling_factor_line_width * 10 , label = '= 10 MVA' ) else : line_handle = None if scatter_handle and line_handle : plt . legend ( handles = [ scatter_handle , line_handle [ 0 ] ] , labelspacing = 1 , title = 'Storage size and line capacity' , borderpad = 0.5 , loc = 2 , framealpha = 0.5 , fontsize = 'medium' ) elif scatter_handle : plt . legend ( handles = [ scatter_handle ] , labelspacing = 1 , title = 'Storage size' , borderpad = 0.5 , loc = 2 , framealpha = 0.5 , fontsize = 'medium' ) elif line_handle : plt . legend ( handles = [ line_handle [ 0 ] ] , labelspacing = 1 , title = 'Line capacity' , borderpad = 0.5 , loc = 2 , framealpha = 0.5 , fontsize = 'medium' ) # axes limits if xlim is not None : ax . set_xlim ( xlim [ 0 ] , xlim [ 1 ] ) if ylim is not None : ax . set_ylim ( ylim [ 0 ] , ylim [ 1 ] ) # hide axes labels ax . get_xaxis ( ) . set_visible ( False ) ax . get_yaxis ( ) . set_visible ( False ) # draw arrows on lines if arrows and timestep and line_color == 'loading' : path = ll [ 1 ] . get_segments ( ) colors = cmap ( ll [ 1 ] . get_array ( ) / 100 ) for i in range ( len ( path ) ) : if pypsa_network . lines_t . p0 . loc [ timestep , line_colors . index [ i ] ] > 0 : arrowprops = dict ( arrowstyle = "->" , color = 'b' ) # colors [ i ] ) else : arrowprops = dict ( arrowstyle = "<-" , color = 'b' ) # colors [ i ] ) ax . annotate ( "" , xy = abs ( ( path [ i ] [ 0 ] - path [ i ] [ 1 ] ) * 0.51 - path [ i ] [ 0 ] ) , xytext = abs ( ( path [ i ] [ 0 ] - path [ i ] [ 1 ] ) * 0.49 - path [ i ] [ 0 ] ) , arrowprops = arrowprops , size = 10 ) # plot map data in background if contextily and background_map : try : add_basemap ( ax , zoom = 12 ) except Exception as e : logging . warning ( "Background map could not be plotted due to the " "following error: {}" . format ( e ) ) if filename is None : plt . show ( ) else : plt . savefig ( filename ) plt . close ( )
def read_creds_from_aws_credentials_file ( profile_name , credentials_file = aws_credentials_file ) : """Read credentials from AWS config file : param profile _ name : : param credentials _ file : : return :"""
credentials = init_creds ( ) profile_found = False try : # Make sure the ~ . aws folder exists if not os . path . exists ( aws_config_dir ) : os . makedirs ( aws_config_dir ) with open ( credentials_file , 'rt' ) as cf : for line in cf : profile_line = re_profile_name . match ( line ) if profile_line : if profile_line . groups ( ) [ 0 ] == profile_name : profile_found = True else : profile_found = False if profile_found : if re_access_key . match ( line ) : credentials [ 'AccessKeyId' ] = line . split ( "=" ) [ 1 ] . strip ( ) elif re_secret_key . match ( line ) : credentials [ 'SecretAccessKey' ] = line . split ( "=" ) [ 1 ] . strip ( ) elif re_mfa_serial . match ( line ) : credentials [ 'SerialNumber' ] = ( line . split ( '=' ) [ 1 ] ) . strip ( ) elif re_session_token . match ( line ) or re_security_token . match ( line ) : credentials [ 'SessionToken' ] = ( '=' . join ( x for x in line . split ( '=' ) [ 1 : ] ) ) . strip ( ) elif re_expiration . match ( line ) : credentials [ 'Expiration' ] = ( '=' . join ( x for x in line . split ( '=' ) [ 1 : ] ) ) . strip ( ) except Exception as e : # Silent if error is due to no ~ / . aws / credentials file if not hasattr ( e , 'errno' ) or e . errno != 2 : printException ( e ) return credentials
def post_build ( self , pkt , pay ) : """add padding to the frame if required . note that padding is only added if pay is None / empty . this allows us to add # noqa : E501 any payload after the MACControl * PDU if needed ( piggybacking ) ."""
if not pay : under_layers_size = self . _get_underlayers_size ( ) frame_size = ( len ( pkt ) + under_layers_size ) if frame_size < 64 : return pkt + b'\x00' * ( 64 - frame_size ) return pkt + pay
def set_time ( self , time ) : """Set the time marker to a specific time : param time : Time to set for the time marker on the TimeLine : type time : float"""
x = self . get_time_position ( time ) _ , y = self . _canvas_ticks . coords ( self . _time_marker_image ) self . _canvas_ticks . coords ( self . _time_marker_image , x , y ) self . _timeline . coords ( self . _time_marker_line , x , 0 , x , self . _timeline . winfo_height ( ) )
async def send_all_reactions ( self ) : """Sends all reactions for this paginator , if any are missing . This method is generally for internal use only ."""
for emoji in filter ( None , self . emojis ) : await self . message . add_reaction ( emoji ) self . sent_page_reactions = True
def layers ( self , annotationtype = None , set = None ) : """Returns a list of annotation layers found * directly * under this element , does not include alternative layers"""
if inspect . isclass ( annotationtype ) : annotationtype = annotationtype . ANNOTATIONTYPE return [ x for x in self . select ( AbstractAnnotationLayer , set , False , True ) if annotationtype is None or x . ANNOTATIONTYPE == annotationtype ]
def get_file_history ( self , path , limit = None ) : """Returns history of file as reversed list of ` ` Changeset ` ` objects for which file at given ` ` path ` ` has been modified ."""
fctx = self . _get_filectx ( path ) hist = [ ] cnt = 0 for cs in reversed ( [ x for x in fctx . filelog ( ) ] ) : cnt += 1 hist . append ( hex ( fctx . filectx ( cs ) . node ( ) ) ) if limit and cnt == limit : break return [ self . repository . get_changeset ( node ) for node in hist ]
def serial_packet ( self , event ) : """Handles incoming raw sensor data : param data : raw incoming data"""
self . log ( 'Incoming serial packet:' , event . __dict__ , lvl = verbose ) if self . scanning : pass else : # self . log ( " Incoming data : " , ' % . 50s . . . ' % event . data , lvl = debug ) sanitized_data = self . _parse ( event . bus , event . data ) self . log ( 'Sanitized data:' , sanitized_data , lvl = verbose ) if sanitized_data is not None : self . _broadcast ( event . bus , sanitized_data )
def gtd7 ( Input , flags , output ) : '''The standard model subroutine ( GTD7 ) always computes the ‘ ‘ thermospheric ’ ’ mass density by explicitly summing the masses of the species in equilibrium at the thermospheric temperature T ( z ) .'''
mn3 = 5 zn3 = [ 32.5 , 20.0 , 15.0 , 10.0 , 0.0 ] mn2 = 4 zn2 = [ 72.5 , 55.0 , 45.0 , 32.5 ] zmix = 62.5 soutput = nrlmsise_output ( ) tselec ( flags ) ; # / * Latitude variation of gravity ( none for sw [ 2 ] = 0 ) * / xlat = Input . g_lat ; if ( flags . sw [ 2 ] == 0 ) : # pragma : no cover xlat = 45.0 ; glatf ( xlat , gsurf , re ) ; xmm = pdm [ 2 ] [ 4 ] ; # / * THERMOSPHERE / MESOSPHERE ( above zn2[0 ] ) * / if ( Input . alt > zn2 [ 0 ] ) : altt = Input . alt ; else : altt = zn2 [ 0 ] ; tmp = Input . alt ; Input . alt = altt ; gts7 ( Input , flags , soutput ) ; altt = Input . alt ; Input . alt = tmp ; if ( flags . sw [ 0 ] ) : # pragma : no cover # / * metric adjustment * / dm28m = dm28 * 1.0E6 ; else : dm28m = dm28 ; output . t [ 0 ] = soutput . t [ 0 ] ; output . t [ 1 ] = soutput . t [ 1 ] ; if ( Input . alt >= zn2 [ 0 ] ) : for i in range ( 9 ) : output . d [ i ] = soutput . d [ i ] ; return # / * LOWER MESOSPHERE / UPPER STRATOSPHERE ( between zn3[0 ] and zn2[0 ] ) # * Temperature at nodes and gradients at end nodes # * Inverse temperature a linear function of spherical harmonics meso_tgn2 [ 0 ] = meso_tgn1 [ 1 ] ; meso_tn2 [ 0 ] = meso_tn1 [ 4 ] ; meso_tn2 [ 1 ] = pma [ 0 ] [ 0 ] * pavgm [ 0 ] / ( 1.0 - flags . sw [ 20 ] * glob7s ( pma [ 0 ] , Input , flags ) ) ; meso_tn2 [ 2 ] = pma [ 1 ] [ 0 ] * pavgm [ 1 ] / ( 1.0 - flags . sw [ 20 ] * glob7s ( pma [ 1 ] , Input , flags ) ) ; meso_tn2 [ 3 ] = pma [ 2 ] [ 0 ] * pavgm [ 2 ] / ( 1.0 - flags . sw [ 20 ] * flags . sw [ 22 ] * glob7s ( pma [ 2 ] , Input , flags ) ) ; meso_tgn2 [ 1 ] = pavgm [ 8 ] * pma [ 9 ] [ 0 ] * ( 1.0 + flags . sw [ 20 ] * flags . sw [ 22 ] * glob7s ( pma [ 9 ] , Input , flags ) ) * meso_tn2 [ 3 ] * meso_tn2 [ 3 ] / ( pow ( ( pma [ 2 ] [ 0 ] * pavgm [ 2 ] ) , 2.0 ) ) ; meso_tn3 [ 0 ] = meso_tn2 [ 3 ] ; if ( Input . alt < zn3 [ 0 ] ) : # / * LOWER STRATOSPHERE AND TROPOSPHERE ( below zn3[0 ] ) # * Temperature at nodes and gradients at end nodes # * Inverse temperature a linear function of spherical harmonics meso_tgn3 [ 0 ] = meso_tgn2 [ 1 ] ; meso_tn3 [ 1 ] = pma [ 3 ] [ 0 ] * pavgm [ 3 ] / ( 1.0 - flags . sw [ 22 ] * glob7s ( pma [ 3 ] , Input , flags ) ) ; meso_tn3 [ 2 ] = pma [ 4 ] [ 0 ] * pavgm [ 4 ] / ( 1.0 - flags . sw [ 22 ] * glob7s ( pma [ 4 ] , Input , flags ) ) ; meso_tn3 [ 3 ] = pma [ 5 ] [ 0 ] * pavgm [ 5 ] / ( 1.0 - flags . sw [ 22 ] * glob7s ( pma [ 5 ] , Input , flags ) ) ; meso_tn3 [ 4 ] = pma [ 6 ] [ 0 ] * pavgm [ 6 ] / ( 1.0 - flags . sw [ 22 ] * glob7s ( pma [ 6 ] , Input , flags ) ) ; meso_tgn3 [ 1 ] = pma [ 7 ] [ 0 ] * pavgm [ 7 ] * ( 1.0 + flags . sw [ 22 ] * glob7s ( pma [ 7 ] , Input , flags ) ) * meso_tn3 [ 4 ] * meso_tn3 [ 4 ] / ( pow ( ( pma [ 6 ] [ 0 ] * pavgm [ 6 ] ) , 2.0 ) ) ; # / * LINEAR TRANSITION TO FULL MIXING BELOW zn2[0 ] * / dmc = 0 ; if ( Input . alt > zmix ) : dmc = 1.0 - ( zn2 [ 0 ] - Input . alt ) / ( zn2 [ 0 ] - zmix ) ; dz28 = soutput . d [ 2 ] ; # / * * * * N2 density * * * * / dmr = soutput . d [ 2 ] / dm28m - 1.0 ; tz = [ 0.0 ] output . d [ 2 ] = densm ( Input . alt , dm28m , xmm , tz , mn3 , zn3 , meso_tn3 , meso_tgn3 , mn2 , zn2 , meso_tn2 , meso_tgn2 ) ; output . d [ 2 ] = output . d [ 2 ] * ( 1.0 + dmr * dmc ) ; # / * * * * HE density * * * * / dmr = soutput . d [ 0 ] / ( dz28 * pdm [ 0 ] [ 1 ] ) - 1.0 ; output . d [ 0 ] = output . d [ 2 ] * pdm [ 0 ] [ 1 ] * ( 1.0 + dmr * dmc ) ; # / * * * * O density * * * * / output . d [ 1 ] = 0 ; output . d [ 8 ] = 0 ; # / * * * * O2 density * * * * / dmr = soutput . d [ 3 ] / ( dz28 * pdm [ 3 ] [ 1 ] ) - 1.0 ; output . d [ 3 ] = output . d [ 2 ] * pdm [ 3 ] [ 1 ] * ( 1.0 + dmr * dmc ) ; # / * * * * AR density * * * / dmr = soutput . d [ 4 ] / ( dz28 * pdm [ 4 ] [ 1 ] ) - 1.0 ; output . d [ 4 ] = output . d [ 2 ] * pdm [ 4 ] [ 1 ] * ( 1.0 + dmr * dmc ) ; # / * * * * Hydrogen density * * * * / output . d [ 6 ] = 0 ; # / * * * * Atomic nitrogen density * * * * / output . d [ 7 ] = 0 ; # / * * * * Total mass density * / output . d [ 5 ] = 1.66E-24 * ( 4.0 * output . d [ 0 ] + 16.0 * output . d [ 1 ] + 28.0 * output . d [ 2 ] + 32.0 * output . d [ 3 ] + 40.0 * output . d [ 4 ] + output . d [ 6 ] + 14.0 * output . d [ 7 ] ) ; if ( flags . sw [ 0 ] ) : # pragma : no cover output . d [ 5 ] = output . d [ 5 ] / 1000 ; # / * * * * temperature at altitude * * * * / global dd dd = densm ( Input . alt , 1.0 , 0 , tz , mn3 , zn3 , meso_tn3 , meso_tgn3 , mn2 , zn2 , meso_tn2 , meso_tgn2 ) ; output . t [ 1 ] = tz [ 0 ] ; return
def configure ( self ) : """Configure the device . Send the device configuration saved inside the MCP342x object to the target device ."""
logger . debug ( 'Configuring ' + hex ( self . get_address ( ) ) + ' ch: ' + str ( self . get_channel ( ) ) + ' res: ' + str ( self . get_resolution ( ) ) + ' gain: ' + str ( self . get_gain ( ) ) ) self . bus . write_byte ( self . address , self . config )
def _retrieve ( self , * criterion ) : """Retrieve a model by some criteria . : raises ` ModelNotFoundError ` if the row cannot be deleted ."""
try : return self . _query ( * criterion ) . one ( ) except NoResultFound as error : raise ModelNotFoundError ( "{} not found" . format ( self . model_class . __name__ , ) , error , )
def get_tunnel_info_output_tunnel_admin_state ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) get_tunnel_info = ET . Element ( "get_tunnel_info" ) config = get_tunnel_info output = ET . SubElement ( get_tunnel_info , "output" ) tunnel = ET . SubElement ( output , "tunnel" ) admin_state = ET . SubElement ( tunnel , "admin-state" ) admin_state . text = kwargs . pop ( 'admin_state' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def updateComponentStartVals ( self ) : """Go through selected components for each auto parameter and set the start value"""
for param in self . _parameters : for component in param [ 'selection' ] : if param [ 'parameter' ] == 'filename' : component . set ( param [ 'parameter' ] , param [ 'names' ] [ 0 ] ) else : component . set ( param [ 'parameter' ] , param [ 'start' ] )
def solve ( self ) : """Runs the Dancing Links / Algorithm X solver on the Sudoku . This code has been adapted from http : / / www . cs . mcgill . ca / ~ aassaf9 / python / algorithm _ x . html : return : List of lists with the same size as the input Sudoku , representing solutions . : rtype : list"""
R , C = int ( math . sqrt ( len ( self . sudoku ) ) ) , int ( math . sqrt ( len ( self . sudoku [ 0 ] ) ) ) N = R * C X = ( [ ( "rc" , rc ) for rc in product ( range ( N ) , range ( N ) ) ] + [ ( "rn" , rn ) for rn in product ( range ( N ) , range ( 1 , N + 1 ) ) ] + [ ( "cn" , cn ) for cn in product ( range ( N ) , range ( 1 , N + 1 ) ) ] + [ ( "bn" , bn ) for bn in product ( range ( N ) , range ( 1 , N + 1 ) ) ] ) Y = dict ( ) for r , c , n in product ( range ( N ) , range ( N ) , range ( 1 , N + 1 ) ) : b = ( r // R ) * R + ( c // C ) # Box number Y [ ( r , c , n ) ] = [ ( "rc" , ( r , c ) ) , ( "rn" , ( r , n ) ) , ( "cn" , ( c , n ) ) , ( "bn" , ( b , n ) ) ] X , Y = self . _exact_cover ( X , Y ) for i , row in enumerate ( self . sudoku ) : for j , n in enumerate ( row ) : if n : self . _select ( X , Y , ( i , j , n ) ) for solution in self . _solve ( X , Y , [ ] ) : grid = copy . deepcopy ( self . sudoku ) for ( r , c , n ) in solution : grid [ r ] [ c ] = n yield grid
def GetAllPackages ( classification ) : """Gets a list of all Blueprint Packages with a given classification . https : / / t3n . zendesk . com / entries / 20411357 - Get - Packages : param classification : package type filter ( System , Script , Software )"""
packages = [ ] for visibility in Blueprint . visibility_stoi . keys ( ) : try : for r in Blueprint . GetPackages ( classification , visibility ) : packages . append ( dict ( r . items ( ) + { 'Visibility' : visibility } . items ( ) ) ) except : pass if len ( packages ) : return ( packages )
def _pre_train ( self , stop_param_updates , num_epochs , updates_epoch ) : """Set parameters and constants before training ."""
# Calculate the total number of updates given early stopping . updates = { k : stop_param_updates . get ( k , num_epochs ) * updates_epoch for k , v in self . params . items ( ) } # Calculate the value of a single step given the number of allowed # updates . single_steps = { k : np . exp ( - ( ( 1.0 - ( 1.0 / v ) ) ) * self . params [ k ] [ 'factor' ] ) for k , v in updates . items ( ) } # Calculate the factor given the true factor and the value of a # single step . constants = { k : np . exp ( - self . params [ k ] [ 'factor' ] ) / v for k , v in single_steps . items ( ) } return constants
def parse_denovo_params ( user_params = None ) : """Return default GimmeMotifs parameters . Defaults will be replaced with parameters defined in user _ params . Parameters user _ params : dict , optional User - defined parameters . Returns params : dict"""
config = MotifConfig ( ) if user_params is None : user_params = { } params = config . get_default_params ( ) params . update ( user_params ) if params . get ( "torque" ) : logger . debug ( "Using torque" ) else : logger . debug ( "Using multiprocessing" ) params [ "background" ] = [ x . strip ( ) for x in params [ "background" ] . split ( "," ) ] logger . debug ( "Parameters:" ) for param , value in params . items ( ) : logger . debug ( " %s: %s" , param , value ) # Maximum time ? if params [ "max_time" ] : try : max_time = params [ "max_time" ] = float ( params [ "max_time" ] ) except Exception : logger . debug ( "Could not parse max_time value, setting to no limit" ) params [ "max_time" ] = - 1 if params [ "max_time" ] > 0 : logger . debug ( "Time limit for motif prediction: %0.2f hours" , max_time ) params [ "max_time" ] = 3600 * params [ "max_time" ] logger . debug ( "Max_time in seconds %0.0f" , max_time ) else : logger . debug ( "No time limit for motif prediction" ) return params
def img2wav ( path , min_x , max_x , min_y , max_y , window_size = 3 ) : """Generate 1 - D data ` ` y = f ( x ) ` ` from a black / white image . Suppose we have an image like that : . . image : : images / waveform . png : align : center Put some codes : : > > > from weatherlab . math . img2waveform import img2wav > > > import matplotlib . pyplot as plt > > > x , y = img2wav ( r " testdata \ img2waveform \ waveform . png " , . . . min _ x = 0.0 , max _ x = 288, . . . min _ y = 15.0 , max _ y = 35.0, . . . window _ size = 15) > > > plt . plot ( x , y ) > > > plt . show ( ) Then you got nicely sampled data : . . image : : images \ waveform _ pyplot . png : align : center : param path : the image file path : type path : string : param min _ x : minimum value of x axis : type min _ x : number : param max _ x : maximum value of x axis : type max _ x : number : param min _ y : minimum value of y axis : type min _ y : number : param max _ y : maximum value of y axis : type max _ y : number : param window _ size : the slide window : type window _ size : int Note : In python , a numpy array that represent a image is from left to the right , top to the bottom , but in coordinate , it ' s from bottom to the top . So we use : : - 1 for a reverse output"""
image = Image . open ( path ) . convert ( "L" ) matrix = np . array ( image ) [ : : - 1 ] # you can customize the gray scale fix behavior to fit color image matrix [ np . where ( matrix >= 128 ) ] = 255 matrix [ np . where ( matrix < 128 ) ] = 0 tick_x = ( max_x - min_x ) / matrix . shape [ 1 ] tick_y = ( max_y - min_y ) / matrix . shape [ 0 ] x , y = list ( ) , list ( ) for i in range ( matrix . shape [ 1 ] ) : window = expand_window ( # slide margin window i , window_size , matrix . shape [ 1 ] ) margin_dots_y_indices = np . where ( matrix [ : , window ] == 0 ) [ 0 ] # if found at least one dots in margin if len ( margin_dots_y_indices ) > 0 : x . append ( min_x + ( i + 1 ) * tick_x ) y . append ( min_y + margin_dots_y_indices . mean ( ) * tick_y ) return np . array ( x ) , np . array ( y )
def deregister_image ( self , ami_id , region = 'us-east-1' ) : """Deregister an AMI by id : param ami _ id : : param region : region to deregister from : return :"""
deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}" . format ( self . aws_project , region , ami_id ) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}" . format ( deregister_cmd ) res = subprocess . check_output ( shlex . split ( deregister_cmd ) ) print "Response: {}" . format ( res ) print "Not monitoring de-register command"
def get_my_learning_path_session ( self , proxy ) : """Gets the ` ` OsidSession ` ` associated with the my learning path service . : param proxy : a proxy : type proxy : ` ` osid . proxy . Proxy ` ` : return : a ` ` MyLearningPathSession ` ` : rtype : ` ` osid . learning . MyLearningPathSession ` ` : raise : ` ` NullArgument ` ` - - ` ` proxy ` ` is ` ` null ` ` : raise : ` ` OperationFailed ` ` - - unable to complete request : raise : ` ` Unimplemented ` ` - - ` ` supports _ my _ learning _ path ( ) ` ` is ` ` false ` ` * compliance : optional - - This method must be implemented if ` ` supports _ my _ learning _ path ( ) ` ` is ` ` true ` ` . *"""
if not self . supports_my_learning_path ( ) : raise Unimplemented ( ) try : from . import sessions except ImportError : raise OperationFailed ( ) proxy = self . _convert_proxy ( proxy ) try : session = sessions . MyLearningPathSession ( proxy = proxy , runtime = self . _runtime ) except AttributeError : raise OperationFailed ( ) return session
def is_insertion ( self ) : """Does this variant represent the insertion of nucleotides into the reference genome ?"""
# An insertion would appear in a VCF like C > CT , so that the # alternate allele starts with the reference nucleotides . # Since the nucleotide strings may be normalized in the constructor , # it ' s worth noting that the normalized form of this variant would be # ' ' > ' T ' , so that ' T ' . startswith ( ' ' ) still holds . return ( len ( self . ref ) < len ( self . alt ) ) and self . alt . startswith ( self . ref )
def bind ( self , instance_id : str , binding_id : str , details : BindDetails ) -> Binding : """Binding the instance see openbrokerapi documentation"""
# Find the instance instance = self . _backend . find ( instance_id ) # Find or create the binding binding = self . _backend . find ( binding_id , instance ) # Create the binding if needed return self . _backend . bind ( binding , details . parameters )
def check_workers ( self ) : '''Kill workers that have been pending for a while and check if all workers are alive .'''
if time . time ( ) - self . _worker_alive_time > 5 : self . _worker_alive_time = time . time ( ) # join processes if they are now gone , it should not do anything bad # if the process is still running [ worker . join ( ) for worker in self . _workers if not worker . is_alive ( ) ] self . _workers = [ worker for worker in self . _workers if worker . is_alive ( ) ] if len ( self . _workers ) < self . _num_workers : raise ProcessKilled ( 'One of the workers has been killed.' )
def to_frame ( self , slot = SLOT . DEVICE_CONFIG ) : """Return the current configuration as a YubiKeyFrame object ."""
data = self . to_string ( ) payload = data . ljust ( 64 , b'\0' ) return yubikey_frame . YubiKeyFrame ( command = slot , payload = payload )
def from_config ( cls , cp , data = None , delta_f = None , delta_t = None , gates = None , recalibration = None , ** kwargs ) : """Initializes an instance of this class from the given config file . Parameters cp : WorkflowConfigParser Config file parser to read . data : dict A dictionary of data , in which the keys are the detector names and the values are the data . This is not retrieved from the config file , and so must be provided . delta _ f : float The frequency spacing of the data ; needed for waveform generation . delta _ t : float The time spacing of the data ; needed for time - domain waveform generators . recalibration : dict of pycbc . calibration . Recalibrate , optional Dictionary of detectors - > recalibration class instances for recalibrating data . gates : dict of tuples , optional Dictionary of detectors - > tuples of specifying gate times . The sort of thing returned by ` pycbc . gate . gates _ from _ cli ` . \**kwargs : All additional keyword arguments are passed to the class . Any provided keyword will over ride what is in the config file ."""
prior_section = "marginalized_prior" args = cls . _init_args_from_config ( cp ) marg_prior = read_distributions_from_config ( cp , prior_section ) if len ( marg_prior ) == 0 : raise AttributeError ( "No priors are specified for the " "marginalization. Please specify this in a " "section in the config file with heading " "{}-variable" . format ( prior_section ) ) params = [ i . params [ 0 ] for i in marg_prior ] marg_args = [ k for k , v in args . items ( ) if "_marginalization" in k ] if len ( marg_args ) != len ( params ) : raise ValueError ( "There is not a prior for each keyword argument" ) kwargs [ 'marg_prior' ] = marg_prior for i in params : kwargs [ i + "_marginalization" ] = True args . update ( kwargs ) variable_params = args [ 'variable_params' ] args [ "data" ] = data try : static_params = args [ 'static_params' ] except KeyError : static_params = { } # set up waveform generator try : approximant = static_params [ 'approximant' ] except KeyError : raise ValueError ( "no approximant provided in the static args" ) generator_function = generator . select_waveform_generator ( approximant ) waveform_generator = generator . FDomainDetFrameGenerator ( generator_function , epoch = data . values ( ) [ 0 ] . start_time , variable_args = variable_params , detectors = data . keys ( ) , delta_f = delta_f , delta_t = delta_t , recalib = recalibration , gates = gates , ** static_params ) args [ 'waveform_generator' ] = waveform_generator args [ "f_lower" ] = static_params [ "f_lower" ] return cls ( ** args )
def get_plaintext ( self , id ) : # pylint : disable = invalid - name , redefined - builtin """Get a config as plaintext . : param id : Config ID as an int . : rtype : string"""
return self . service . get_id ( self . base , id , params = { 'format' : 'text' } ) . text
async def list ( self , * , filters : Mapping = None ) -> List [ Mapping ] : """Return a list of services Args : filters : a dict with a list of filters Available filters : id = < service id > label = < service label > mode = [ " replicated " | " global " ] name = < service name >"""
params = { "filters" : clean_filters ( filters ) } response = await self . docker . _query_json ( "services" , method = "GET" , params = params ) return response
def _construct_rest_api ( self ) : """Constructs and returns the ApiGateway RestApi . : returns : the RestApi to which this SAM Api corresponds : rtype : model . apigateway . ApiGatewayRestApi"""
rest_api = ApiGatewayRestApi ( self . logical_id , depends_on = self . depends_on , attributes = self . resource_attributes ) rest_api . BinaryMediaTypes = self . binary_media rest_api . MinimumCompressionSize = self . minimum_compression_size if self . endpoint_configuration : self . _set_endpoint_configuration ( rest_api , self . endpoint_configuration ) elif not RegionConfiguration . is_apigw_edge_configuration_supported ( ) : # Since this region does not support EDGE configuration , we explicitly set the endpoint type # to Regional which is the only supported config . self . _set_endpoint_configuration ( rest_api , "REGIONAL" ) if self . definition_uri and self . definition_body : raise InvalidResourceException ( self . logical_id , "Specify either 'DefinitionUri' or 'DefinitionBody' property and not both" ) self . _add_cors ( ) self . _add_auth ( ) self . _add_gateway_responses ( ) if self . definition_uri : rest_api . BodyS3Location = self . _construct_body_s3_dict ( ) elif self . definition_body : rest_api . Body = self . definition_body if self . name : rest_api . Name = self . name return rest_api
def _expandDH ( self , sampling , lmax , lmax_calc ) : """Evaluate the coefficients on a Driscoll and Healy ( 1994 ) grid ."""
if self . normalization == '4pi' : norm = 1 elif self . normalization == 'schmidt' : norm = 2 elif self . normalization == 'unnorm' : norm = 3 elif self . normalization == 'ortho' : norm = 4 else : raise ValueError ( "Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}" . format ( repr ( self . normalization ) ) ) data = _shtools . MakeGridDHC ( self . coeffs , sampling = sampling , norm = norm , csphase = self . csphase , lmax = lmax , lmax_calc = lmax_calc ) gridout = SHGrid . from_array ( data , grid = 'DH' , copy = False ) return gridout
def wrap_line ( line , limit = None , chars = 80 ) : """Wraps the specified line of text on whitespace to make sure that none of the lines ' lengths exceeds ' chars ' characters ."""
result = [ ] builder = [ ] length = 0 if limit is not None : sline = line [ 0 : limit ] else : sline = line for word in sline . split ( ) : if length <= chars : builder . append ( word ) length += len ( word ) + 1 else : result . append ( ' ' . join ( builder ) ) builder = [ word ] length = 0 result . append ( ' ' . join ( builder ) ) return result
def wiki_versions_list ( self , page_id , updater_id ) : """Return a list of wiki page version . Parameters : page _ id ( int ) : updater _ id ( int ) :"""
params = { 'earch[updater_id]' : updater_id , 'search[wiki_page_id]' : page_id } return self . _get ( 'wiki_page_versions.json' , params )
def bmp_server_add ( self , address , port ) : """This method registers a new BMP ( BGP monitoring Protocol ) server . The BGP speaker starts to send BMP messages to the server . Currently , only one BMP server can be registered . ` ` address ` ` specifies the IP address of a BMP server . ` ` port ` ` specifies the listen port number of a BMP server ."""
func_name = 'bmp.start' param = { 'host' : address , 'port' : port , } call ( func_name , ** param )
def add_exception_handler ( self , exception_handler ) : # type : ( AbstractExceptionHandler ) - > None """Register input to the exception handlers list . : param exception _ handler : Exception Handler instance to be registered . : type exception _ handler : AbstractExceptionHandler : return : None"""
if exception_handler is None : raise RuntimeConfigException ( "Valid Exception Handler instance to be provided" ) if not isinstance ( exception_handler , AbstractExceptionHandler ) : raise RuntimeConfigException ( "Input should be an ExceptionHandler instance" ) self . exception_handlers . append ( exception_handler )
def _set_cfm_state ( self , v , load = False ) : """Setter method for cfm _ state , mapped from YANG variable / cfm _ state ( container ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ cfm _ state is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ cfm _ state ( ) directly . YANG Description : CFM Operational Information"""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = cfm_state . cfm_state , is_container = 'container' , presence = False , yang_name = "cfm-state" , rest_name = "cfm-state" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'dot1ag-cfm' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-dot1ag-operational' , defining_module = 'brocade-dot1ag-operational' , yang_type = 'container' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """cfm_state must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=cfm_state.cfm_state, is_container='container', presence=False, yang_name="cfm-state", rest_name="cfm-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'dot1ag-cfm', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='container', is_config=True)""" , } ) self . __cfm_state = t if hasattr ( self , '_set' ) : self . _set ( )
def on ( self , event , handler = None ) : """Register an event handler . : param event : The event name . Can be ` ` ' connect ' ` ` , ` ` ' message ' ` ` or ` ` ' disconnect ' ` ` . : param handler : The function that should be invoked to handle the event . When this parameter is not given , the method acts as a decorator for the handler function . Example usage : : # as a decorator : @ eio . on ( ' connect ' ) def connect _ handler ( ) : print ( ' Connection request ' ) # as a method : def message _ handler ( msg ) : print ( ' Received message : ' , msg ) eio . send ( ' response ' ) eio . on ( ' message ' , message _ handler )"""
if event not in self . event_names : raise ValueError ( 'Invalid event' ) def set_handler ( handler ) : self . handlers [ event ] = handler return handler if handler is None : return set_handler set_handler ( handler )
def _merge_two ( res1 , res2 , compute_aux = False ) : """Internal method used to merges two runs with differing ( possibly variable ) numbers of live points into one run . Parameters res1 : : class : ` ~ dynesty . results . Results ` instance The " base " nested sampling run . res2 : : class : ` ~ dynesty . results . Results ` instance The " new " nested sampling run . compute _ aux : bool , optional Whether to compute auxiliary quantities ( evidences , etc . ) associated with a given run . * * WARNING : these are only valid if ` res1 ` or ` res2 ` was initialized from the prior * and * their sampling bounds overlap . * * Default is ` False ` . Returns res : : class : ` ~ dynesty . results . Results ` instances : class : ` ~ dynesty . results . Results ` instance from the newly combined nested sampling run ."""
# Initialize the first ( " base " ) run . base_id = res1 . samples_id base_u = res1 . samples_u base_v = res1 . samples base_logl = res1 . logl base_nc = res1 . ncall base_it = res1 . samples_it nbase = len ( base_id ) # Number of live points throughout the run . try : base_n = res1 . samples_n except : niter , nlive = res1 . niter , res1 . nlive if nbase == niter : base_n = np . ones ( niter , dtype = 'int' ) * nlive elif nbase == ( niter + nlive ) : base_n = np . append ( np . ones ( niter , dtype = 'int' ) * nlive , np . arange ( 1 , nlive + 1 ) [ : : - 1 ] ) else : raise ValueError ( "Final number of samples differs from number of " "iterations and number of live points in `res1`." ) # Proposal information ( if available ) . try : base_prop = res1 . prop base_propidx = res1 . samples_prop base_piter = res1 . prop_iter base_scale = res1 . scale base_proposals = True except : base_proposals = False # Batch information ( if available ) . try : base_batch = res1 . samples_batch base_bounds = res1 . batch_bounds except : base_batch = np . zeros ( nbase , dtype = 'int' ) base_bounds = np . array ( [ ( - np . inf , np . inf ) ] ) # Initialize the second ( " new " ) run . new_id = res2 . samples_id new_u = res2 . samples_u new_v = res2 . samples new_logl = res2 . logl new_nc = res2 . ncall new_it = res2 . samples_it nnew = len ( new_id ) # Number of live points throughout the run . try : new_n = res2 . samples_n except : niter , nlive = res2 . niter , res2 . nlive if nnew == niter : new_n = np . ones ( niter , dtype = 'int' ) * nlive elif nnew == ( niter + nlive ) : new_n = np . append ( np . ones ( niter , dtype = 'int' ) * nlive , np . arange ( 1 , nlive + 1 ) [ : : - 1 ] ) else : raise ValueError ( "Final number of samples differs from number of " "iterations and number of live points in `res2`." ) # Proposal information ( if available ) . try : new_prop = res2 . prop new_propidx = res2 . samples_prop new_piter = res2 . prop_iter new_scale = res2 . scale new_proposals = True except : new_proposals = False # Batch information ( if available ) . try : new_batch = res2 . samples_batch new_bounds = res2 . batch_bounds except : new_batch = np . zeros ( nnew , dtype = 'int' ) new_bounds = np . array ( [ ( - np . inf , np . inf ) ] ) # Initialize our new combind run . combined_id = [ ] combined_u = [ ] combined_v = [ ] combined_logl = [ ] combined_logvol = [ ] combined_logwt = [ ] combined_logz = [ ] combined_logzvar = [ ] combined_h = [ ] combined_nc = [ ] combined_propidx = [ ] combined_it = [ ] combined_n = [ ] combined_piter = [ ] combined_scale = [ ] combined_batch = [ ] # Check if proposal info is the same and modify counters accordingly . if base_proposals and new_proposals : if base_prop == new_prop : prop = base_prop poffset = 0 else : prop = np . concatenate ( ( base_prop , new_prop ) ) poffset = len ( base_prop ) # Check if batch info is the same and modify counters accordingly . if np . all ( base_bounds == new_bounds ) : bounds = base_bounds boffset = 0 else : bounds = np . concatenate ( ( base_bounds , new_bounds ) ) boffset = len ( base_bounds ) # Start our counters at the beginning of each set of dead points . idx_base , idx_new = 0 , 0 logl_b , logl_n = base_logl [ idx_base ] , new_logl [ idx_new ] nlive_b , nlive_n = base_n [ idx_base ] , new_n [ idx_new ] # Iteratively walk through both set of samples to simulate # a combined run . ntot = nbase + nnew llmin_b = np . min ( base_bounds [ base_batch ] ) llmin_n = np . min ( new_bounds [ new_batch ] ) logvol = 0. for i in range ( ntot ) : if logl_b > llmin_n and logl_n > llmin_b : # If our samples from the both runs are past the each others ' # lower log - likelihood bound , both runs are now " active " . nlive = nlive_b + nlive_n elif logl_b <= llmin_n : # If instead our collection of dead points from the " base " run # are below the bound , just use those . nlive = nlive_b else : # Our collection of dead points from the " new " run # are below the bound , so just use those . nlive = nlive_n # Increment our position along depending on # which dead point ( saved or new ) is worse . if logl_b <= logl_n : combined_id . append ( base_id [ idx_base ] ) combined_u . append ( base_u [ idx_base ] ) combined_v . append ( base_v [ idx_base ] ) combined_logl . append ( base_logl [ idx_base ] ) combined_nc . append ( base_nc [ idx_base ] ) combined_it . append ( base_it [ idx_base ] ) combined_batch . append ( base_batch [ idx_base ] ) if base_proposals and new_proposals : combined_propidx . append ( base_propidx [ idx_base ] ) combined_piter . append ( base_piter [ idx_base ] ) combined_scale . append ( base_scale [ idx_base ] ) idx_base += 1 else : combined_id . append ( new_id [ idx_new ] ) combined_u . append ( new_u [ idx_new ] ) combined_v . append ( new_v [ idx_new ] ) combined_logl . append ( new_logl [ idx_new ] ) combined_nc . append ( new_nc [ idx_new ] ) combined_it . append ( new_it [ idx_new ] ) combined_batch . append ( new_batch [ idx_new ] + boffset ) if base_proposals and new_proposals : combined_propidx . append ( new_propidx [ idx_new ] + poffset ) combined_piter . append ( new_piter [ idx_new ] + poffset ) combined_scale . append ( new_scale [ idx_new ] ) idx_new += 1 # Save the number of live points and expected ln ( volume ) . logvol -= math . log ( ( nlive + 1. ) / nlive ) combined_n . append ( nlive ) combined_logvol . append ( logvol ) # Attempt to step along our samples . If we ' re out of samples , # set values to defaults . try : logl_b = base_logl [ idx_base ] nlive_b = base_n [ idx_base ] except : logl_b = np . inf nlive_b = 0 try : logl_n = new_logl [ idx_new ] nlive_n = new_n [ idx_new ] except : logl_n = np . inf nlive_n = 0 # Compute sampling efficiency . eff = 100. * ntot / sum ( combined_nc ) # Save results . r = [ ( 'niter' , ntot ) , ( 'ncall' , np . array ( combined_nc ) ) , ( 'eff' , eff ) , ( 'samples' , np . array ( combined_v ) ) , ( 'samples_id' , np . array ( combined_id ) ) , ( 'samples_it' , np . array ( combined_it ) ) , ( 'samples_n' , np . array ( combined_n ) ) , ( 'samples_u' , np . array ( combined_u ) ) , ( 'samples_batch' , np . array ( combined_batch ) ) , ( 'logl' , np . array ( combined_logl ) ) , ( 'logvol' , np . array ( combined_logvol ) ) , ( 'batch_bounds' , np . array ( bounds ) ) ] # Add proposal information ( if available ) . if base_proposals and new_proposals : r . append ( ( 'prop' , prop ) ) r . append ( ( 'prop_iter' , np . array ( combined_piter ) ) ) r . append ( ( 'samples_prop' , np . array ( combined_propidx ) ) ) r . append ( ( 'scale' , np . array ( combined_scale ) ) ) # Compute the posterior quantities of interest if desired . if compute_aux : h = 0. logz = - 1.e300 loglstar = - 1.e300 logzvar = 0. logvols_pad = np . concatenate ( ( [ 0. ] , combined_logvol ) ) logdvols = misc . logsumexp ( a = np . c_ [ logvols_pad [ : - 1 ] , logvols_pad [ 1 : ] ] , axis = 1 , b = np . c_ [ np . ones ( ntot ) , - np . ones ( ntot ) ] ) logdvols += math . log ( 0.5 ) dlvs = logvols_pad [ : - 1 ] - logvols_pad [ 1 : ] for i in range ( ntot ) : loglstar_new = combined_logl [ i ] logdvol , dlv = logdvols [ i ] , dlvs [ i ] logwt = np . logaddexp ( loglstar_new , loglstar ) + logdvol logz_new = np . logaddexp ( logz , logwt ) lzterm = ( math . exp ( loglstar - logz_new ) * loglstar + math . exp ( loglstar_new - logz_new ) * loglstar_new ) h_new = ( math . exp ( logdvol ) * lzterm + math . exp ( logz - logz_new ) * ( h + logz ) - logz_new ) dh = h_new - h h = h_new logz = logz_new logzvar += dh * dlv loglstar = loglstar_new combined_logwt . append ( logwt ) combined_logz . append ( logz ) combined_logzvar . append ( logzvar ) combined_h . append ( h ) # Compute batch information . combined_id = np . array ( combined_id ) batch_nlive = [ len ( np . unique ( combined_id [ combined_batch == i ] ) ) for i in np . unique ( combined_batch ) ] # Add to our results . r . append ( ( 'logwt' , np . array ( combined_logwt ) ) ) r . append ( ( 'logz' , np . array ( combined_logz ) ) ) r . append ( ( 'logzerr' , np . sqrt ( np . array ( combined_logzvar ) ) ) ) r . append ( ( 'h' , np . array ( combined_h ) ) ) r . append ( ( 'batch_nlive' , np . array ( batch_nlive , dtype = 'int' ) ) ) # Combine to form final results object . res = Results ( r ) return res
def override_ssh_auth_env ( ) : """Override the ` $ SSH _ AUTH _ SOCK ` env variable to mock the absence of an SSH agent ."""
ssh_auth_sock = "SSH_AUTH_SOCK" old_ssh_auth_sock = os . environ . get ( ssh_auth_sock ) del os . environ [ ssh_auth_sock ] yield if old_ssh_auth_sock : os . environ [ ssh_auth_sock ] = old_ssh_auth_sock
def prepare_model_data ( self , packages , linked , pip = None , private_packages = None ) : """Prepare downloaded package info along with pip pacakges info ."""
logger . debug ( '' ) return self . _prepare_model_data ( packages , linked , pip = pip , private_packages = private_packages )
def fileRefDiscovery ( self ) : '''Finds the missing components for file nodes by parsing the Doxygen xml ( which is just the ` ` doxygen _ output _ dir / node . refid ` ` ) . Additional items parsed include adding items whose ` ` refid ` ` tag are used in this file , the < programlisting > for the file , what it includes and what includes it , as well as the location of the file ( with respsect to the * Doxygen * root ) . Care must be taken to only include a refid found with specific tags . The parsing of the xml file was done by just looking at some example outputs . It seems to be working correctly , but there may be some subtle use cases that break it . . . warning : : Some enums , classes , variables , etc declared in the file will not have their associated refid in the declaration of the file , but will be present in the < programlisting > . These are added to the files ' list of children when they are found , but this parental relationship cannot be formed if you set ` ` XML _ PROGRAMLISTING = NO ` ` with Doxygen . An example of such an enum would be an enum declared inside of a namespace within this file .'''
if not os . path . isdir ( configs . _doxygen_xml_output_directory ) : utils . fancyError ( "The doxygen xml output directory [{0}] is not valid!" . format ( configs . _doxygen_xml_output_directory ) ) # parse the doxygen xml file and extract all refid ' s put in it # keys : file object , values : list of refid ' s doxygen_xml_file_ownerships = { } # innerclass , innernamespace , etc ref_regex = re . compile ( r'.*<inner.*refid="(\w+)".*' ) # what files this file includes inc_regex = re . compile ( r'.*<includes.*>(.+)</includes>' ) # what files include this file inc_by_regex = re . compile ( r'.*<includedby refid="(\w+)".*>(.*)</includedby>' ) # the actual location of the file loc_regex = re . compile ( r'.*<location file="(.*)"/>' ) for f in self . files : doxygen_xml_file_ownerships [ f ] = [ ] try : doxy_xml_path = os . path . join ( configs . _doxygen_xml_output_directory , "{0}.xml" . format ( f . refid ) ) with codecs . open ( doxy_xml_path , "r" , "utf-8" ) as doxy_file : processing_code_listing = False # shows up at bottom of xml for line in doxy_file : # see if this line represents the location tag match = loc_regex . match ( line ) if match is not None : f . location = os . path . normpath ( match . groups ( ) [ 0 ] ) continue if not processing_code_listing : # gather included by references match = inc_by_regex . match ( line ) if match is not None : ref , name = match . groups ( ) f . included_by . append ( ( ref , name ) ) continue # gather includes lines match = inc_regex . match ( line ) if match is not None : inc = match . groups ( ) [ 0 ] f . includes . append ( inc ) continue # gather any classes , namespaces , etc declared in the file match = ref_regex . match ( line ) if match is not None : match_refid = match . groups ( ) [ 0 ] if match_refid in self . node_by_refid : doxygen_xml_file_ownerships [ f ] . append ( match_refid ) continue # lastly , see if we are starting the code listing if "<programlisting>" in line : processing_code_listing = True elif processing_code_listing : if "</programlisting>" in line : processing_code_listing = False else : f . program_listing . append ( line ) except : utils . fancyError ( "Unable to process doxygen xml for file [{0}].\n" . format ( f . name ) ) # IMPORTANT : do not set the parent field of anything being added as a child to the file # hack to make things work right on RTD # TODO : do this at construction rather than as a post process ! if configs . doxygenStripFromPath is not None : for node in itertools . chain ( self . files , self . dirs ) : if node . kind == "file" : manip = node . location else : # node . kind = = " dir " manip = node . name abs_strip_path = os . path . normpath ( os . path . abspath ( configs . doxygenStripFromPath ) ) if manip . startswith ( abs_strip_path ) : manip = os . path . relpath ( manip , abs_strip_path ) manip = os . path . normpath ( manip ) if node . kind == "file" : node . location = manip else : # node . kind = = " dir " node . name = manip # now that we have parsed all the listed refid ' s in the doxygen xml , reparent # the nodes that we care about allowable_child_kinds = [ "struct" , "class" , "function" , "typedef" , "define" , "enum" , "union" ] for f in self . files : for match_refid in doxygen_xml_file_ownerships [ f ] : child = self . node_by_refid [ match_refid ] if child . kind in allowable_child_kinds : if child not in f . children : f . children . append ( child ) elif child . kind == "namespace" : if child not in f . namespaces_used : f . namespaces_used . append ( child ) # last but not least , some different kinds declared in the file that are scoped # in a namespace they will show up in the programlisting , but not at the toplevel . for f in self . files : potential_orphans = [ ] for n in f . namespaces_used : for child in n . children : if child . kind == "enum" or child . kind == "variable" or child . kind == "function" or child . kind == "typedef" or child . kind == "union" : potential_orphans . append ( child ) # now that we have a list of potential orphans , see if this doxygen xml had # the refid of a given child present . for orphan in potential_orphans : unresolved_name = orphan . name . split ( "::" ) [ - 1 ] if f . refid in orphan . refid and any ( unresolved_name in line for line in f . program_listing ) : if orphan not in f . children : f . children . append ( orphan ) # Last but not least , make sure all children know where they were defined . for f in self . files : for child in f . children : if child . def_in_file is None : child . def_in_file = f elif child . def_in_file != f : # < < verboseBuild utils . verbose_log ( "Conflicting file definition for [{0}]: both [{1}] and [{2}] found." . format ( child . name , child . def_in_file . name , f . name ) , utils . AnsiColors . BOLD_RED )
def shards ( self ) : """Retrieves information about the shards in the current remote database . : returns : Shard information retrieval status in JSON format"""
url = '/' . join ( ( self . database_url , '_shards' ) ) resp = self . r_session . get ( url ) resp . raise_for_status ( ) return response_to_json_dict ( resp )
def cancel_job ( self , job_id , project = None , location = None , retry = DEFAULT_RETRY ) : """Attempt to cancel a job from a job ID . See https : / / cloud . google . com / bigquery / docs / reference / rest / v2 / jobs / cancel Arguments : job _ id ( str ) : Unique job identifier . Keyword Arguments : project ( str ) : ( Optional ) ID of the project which owns the job ( defaults to the client ' s project ) . location ( str ) : Location where the job was run . retry ( google . api _ core . retry . Retry ) : ( Optional ) How to retry the RPC . Returns : Union [ google . cloud . bigquery . job . LoadJob , google . cloud . bigquery . job . CopyJob , google . cloud . bigquery . job . ExtractJob , google . cloud . bigquery . job . QueryJob ] : Job instance , based on the resource returned by the API ."""
extra_params = { "projection" : "full" } if project is None : project = self . project if location is None : location = self . location if location is not None : extra_params [ "location" ] = location path = "/projects/{}/jobs/{}/cancel" . format ( project , job_id ) resource = self . _call_api ( retry , method = "POST" , path = path , query_params = extra_params ) return self . job_from_resource ( resource [ "job" ] )
def iter ( self , offs ) : '''Iterate over items in a sequence from a given offset . Args : offs ( int ) : The offset to begin iterating from . Yields : ( indx , valu ) : The index and valu of the item .'''
startkey = s_common . int64en ( offs ) for lkey , lval in self . slab . scanByRange ( startkey , db = self . db ) : indx = s_common . int64un ( lkey ) valu = s_msgpack . un ( lval ) yield indx , valu