signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _prune ( self ) : """Primitive way to keep dict in sync with RB ."""
delkeys = [ k for k in self . keys ( ) if k not in self . __ringbuffer ] for k in delkeys : # necessary because dict is changed during iterations super ( KRingbuffer , self ) . __delitem__ ( k )
def IsDirectory ( self ) : """Determines if the file entry is a directory . Returns : bool : True if the file entry is a directory ."""
if self . _stat_object is None : self . _stat_object = self . _GetStat ( ) if self . _stat_object is not None : self . entry_type = self . _stat_object . type return self . entry_type == definitions . FILE_ENTRY_TYPE_DIRECTORY
def _get_vispy_font_filename ( face , bold , italic ) : """Fetch a remote vispy font"""
name = face + '-' name += 'Regular' if not bold and not italic else '' name += 'Bold' if bold else '' name += 'Italic' if italic else '' name += '.ttf' return load_data_file ( 'fonts/%s' % name )
def _do_perform_delete_on_model ( self ) : """Perform the actual delete query on this model instance ."""
if self . _force_deleting : return self . with_trashed ( ) . where ( self . get_key_name ( ) , self . get_key ( ) ) . force_delete ( ) return self . _run_soft_delete ( )
def remove_from_group ( self , group , user ) : """Remove a user from a group : type user : str : param user : User ' s email : type group : str : param group : Group name : rtype : dict : return : an empty dictionary"""
data = { 'group' : group , 'user' : user } return self . post ( 'removeUserFromGroup' , data )
def get_source_name ( self , src_id ) : """Returns the name of the given source ."""
len_out = _ffi . new ( 'unsigned int *' ) rv = rustcall ( _lib . lsm_view_get_source_name , self . _get_ptr ( ) , src_id , len_out ) if rv : return decode_rust_str ( rv , len_out [ 0 ] )
def overlap ( listA , listB ) : """Return list of objects shared by listA , listB ."""
if ( listA is None ) or ( listB is None ) : return [ ] else : return list ( set ( listA ) & set ( listB ) )
def debounce ( self , wait , immediate = None ) : """Returns a function , that , as long as it continues to be invoked , will not be triggered . The function will be called after it stops being called for N milliseconds . If ` immediate ` is passed , trigger the function on the leading edge , instead of the trailing ."""
wait = ( float ( wait ) / float ( 1000 ) ) def debounced ( * args , ** kwargs ) : def call_it ( ) : self . obj ( * args , ** kwargs ) try : debounced . t . cancel ( ) except ( AttributeError ) : pass debounced . t = Timer ( wait , call_it ) debounced . t . start ( ) return self . _wrap ( debounced )
def _t_run_reactive ( self , x ) : """Repeatedly updates transient ' A ' , ' b ' , and the solution guess within each time step according to the applied source term then calls ' _ solve ' to solve the resulting system of linear equations . Stops when the residual falls below ' r _ tolerance ' . Parameters x : ND - array Initial guess of unknown variable Returns x _ new : ND - array Solution array . Notes Description of ' relaxation _ quantity ' and ' max _ iter ' settings can be found in the parent class ' ReactiveTransport ' documentation ."""
if x is None : x = np . zeros ( shape = [ self . Np , ] , dtype = float ) self [ self . settings [ 'quantity' ] ] = x relax = self . settings [ 'relaxation_quantity' ] res = 1e+06 for itr in range ( int ( self . settings [ 'max_iter' ] ) ) : if res >= self . settings [ 'r_tolerance' ] : logger . info ( 'Tolerance not met: ' + str ( res ) ) self [ self . settings [ 'quantity' ] ] = x self . _A = ( self . _A_t ) . copy ( ) self . _b = ( self . _b_t ) . copy ( ) self . _apply_sources ( ) x_new = self . _solve ( ) # Relaxation x_new = relax * x_new + ( 1 - relax ) * self [ self . settings [ 'quantity' ] ] self [ self . settings [ 'quantity' ] ] = x_new res = np . sum ( np . absolute ( x ** 2 - x_new ** 2 ) ) x = x_new if ( res < self . settings [ 'r_tolerance' ] or self . settings [ 'sources' ] == [ ] ) : logger . info ( 'Solution converged: ' + str ( res ) ) break return x_new
def plot_cumulative_returns_by_quantile ( quantile_returns , period , freq , ax = None ) : """Plots the cumulative returns of various factor quantiles . Parameters quantile _ returns : pd . DataFrame Returns by factor quantile period : pandas . Timedelta or string Length of period for which the returns are computed ( e . g . 1 day ) if ' period ' is a string it must follow pandas . Timedelta constructor format ( e . g . ' 1 days ' , ' 1D ' , ' 30m ' , ' 3h ' , ' 1D1h ' , etc ) freq : pandas DateOffset Used to specify a particular trading calendar e . g . BusinessDay or Day Usually this is inferred from utils . infer _ trading _ calendar , which is called by either get _ clean _ factor _ and _ forward _ returns or compute _ forward _ returns ax : matplotlib . Axes , optional Axes upon which to plot . Returns ax : matplotlib . Axes"""
if ax is None : f , ax = plt . subplots ( 1 , 1 , figsize = ( 18 , 6 ) ) ret_wide = quantile_returns . unstack ( 'factor_quantile' ) cum_ret = ret_wide . apply ( perf . cumulative_returns , period = period , freq = freq ) cum_ret = cum_ret . loc [ : , : : - 1 ] # we want negative quantiles as ' red ' cum_ret . plot ( lw = 2 , ax = ax , cmap = cm . coolwarm ) ax . legend ( ) ymin , ymax = cum_ret . min ( ) . min ( ) , cum_ret . max ( ) . max ( ) ax . set ( ylabel = 'Log Cumulative Returns' , title = '''Cumulative Return by Quantile ({} Period Forward Return)''' . format ( period ) , xlabel = '' , yscale = 'symlog' , yticks = np . linspace ( ymin , ymax , 5 ) , ylim = ( ymin , ymax ) ) ax . yaxis . set_major_formatter ( ScalarFormatter ( ) ) ax . axhline ( 1.0 , linestyle = '-' , color = 'black' , lw = 1 ) return ax
def is_valid_codon ( codon , type = 'start' ) : """Given a codon sequence , check if it is a valid start / stop codon"""
if len ( codon ) != 3 : return False if type == 'start' : if codon != 'ATG' : return False elif type == 'stop' : if not any ( _codon == codon for _codon in ( 'TGA' , 'TAG' , 'TAA' ) ) : return False else : logging . error ( "`{0}` is not a valid codon type. " . format ( type ) + "Should be one of (`start` or `stop`)" ) sys . exit ( ) return True
def deploy_templates ( ) : """Deploy any templates from your shortest TEMPLATE _ DIRS setting"""
deployed = None if not hasattr ( env , 'project_template_dir' ) : # the normal pattern would mean the shortest path is the main one . # its probably the last listed length = 1000 for dir in env . TEMPLATE_DIRS : if dir : len_dir = len ( dir ) if len_dir < length : length = len_dir env . project_template_dir = dir if hasattr ( env , 'project_template_dir' ) : remote_dir = '/' . join ( [ deployment_root ( ) , 'env' , env . project_fullname , 'templates' ] ) if env . verbosity : print env . host , "DEPLOYING templates" , remote_dir deployed = deploy_files ( env . project_template_dir , remote_dir ) return deployed
def attrs ( self , attribute_name ) : """Retrieve HTML attribute values from the elements matched by the query . Example usage : . . code : : python # Assume that the query matches html elements : # < div class = " foo " > and < div class = " bar " > > > q . attrs ( ' class ' ) [ ' foo ' , ' bar ' ] Args : attribute _ name ( str ) : The name of the attribute values to retrieve . Returns : A list of attribute values for ` attribute _ name ` ."""
desc = u'attrs({!r})' . format ( attribute_name ) return self . map ( lambda el : el . get_attribute ( attribute_name ) , desc ) . results
def parse_lines ( self , lines : Iterable [ str ] ) -> List [ ParseResults ] : """Parse multiple lines in succession ."""
return [ self . parseString ( line , line_number ) for line_number , line in enumerate ( lines ) ]
def concatenate_fastas ( output_fna_clustered , output_fna_failures , output_concat_filepath ) : """Concatenates two input fastas , writes to output _ concat _ filepath output _ fna _ clustered : fasta of successful ref clusters output _ fna _ failures : de novo fasta of cluster failures output _ concat _ filepath : path to write combined fastas to"""
output_fp = open ( output_concat_filepath , "w" ) for label , seq in parse_fasta ( open ( output_fna_clustered , "U" ) ) : output_fp . write ( ">%s\n%s\n" % ( label , seq ) ) for label , seq in parse_fasta ( open ( output_fna_failures , "U" ) ) : output_fp . write ( ">%s\n%s\n" % ( label , seq ) ) return output_concat_filepath
def cmdline ( argv , flags ) : """A cmdopts wrapper that takes a list of flags and builds the corresponding cmdopts rules to match those flags ."""
rules = dict ( [ ( flag , { 'flags' : [ "--%s" % flag ] } ) for flag in flags ] ) return parse ( argv , rules )
def update_m ( data , old_M , old_W , selected_genes , disp = False , inner_max_iters = 100 , parallel = True , threads = 4 , write_progress_file = None , tol = 0.0 , regularization = 0.0 , ** kwargs ) : """This returns a new M matrix that contains all genes , given an M that was created from running state estimation with a subset of genes . Args : data ( sparse matrix or dense array ) : data matrix of shape ( genes , cells ) , containing all genes old _ M ( array ) : shape is ( selected _ genes , k ) old _ W ( array ) : shape is ( k , cells ) selected _ genes ( list ) : list of selected gene indices Rest of the args are as in poisson _ estimate _ state Returns : new _ M : array of shape ( all _ genes , k )"""
genes , cells = data . shape k = old_M . shape [ 1 ] non_selected_genes = [ x for x in range ( genes ) if x not in set ( selected_genes ) ] # 1 . initialize new M new_M = np . zeros ( ( genes , k ) ) new_M [ selected_genes , : ] = old_M # TODO : how to initialize rest of genes ? # data * w ? if disp : print ( 'computing initial guess for M by data*W.T' ) new_M_non_selected = data [ non_selected_genes , : ] * sparse . csc_matrix ( old_W . T ) new_M [ non_selected_genes , : ] = new_M_non_selected . toarray ( ) X = data . astype ( float ) XT = X . T is_sparse = False if sparse . issparse ( X ) : is_sparse = True update_fn = sparse_nolips_update_w # convert to csc X = sparse . csc_matrix ( X ) XT = sparse . csc_matrix ( XT ) if parallel : update_fn = parallel_sparse_nolips_update_w Xsum = np . asarray ( X . sum ( 0 ) ) . flatten ( ) Xsum_m = np . asarray ( X . sum ( 1 ) ) . flatten ( ) # L - BFGS - B won ' t work right now for sparse matrices method = 'NoLips' objective_fn = _call_sparse_obj else : objective_fn = objective update_fn = nolips_update_w Xsum = X . sum ( 0 ) Xsum_m = X . sum ( 1 ) # If method is NoLips , converting to a sparse matrix # will always improve the performance ( ? ) and never lower accuracy . . . # will almost always improve performance ? # if sparsity is below 40 % ? if method == 'NoLips' : is_sparse = True X = sparse . csc_matrix ( X ) XT = sparse . csc_matrix ( XT ) update_fn = sparse_nolips_update_w if parallel : update_fn = parallel_sparse_nolips_update_w objective_fn = _call_sparse_obj if disp : print ( 'starting estimating M' ) new_M = _estimate_w ( XT , new_M . T , old_W . T , Xsum_m , update_fn , objective_fn , is_sparse , parallel , threads , method , tol , disp , inner_max_iters , 'M' , regularization ) if write_progress_file is not None : progress = open ( write_progress_file , 'w' ) progress . write ( '0' ) progress . close ( ) return new_M . T
def get_sdk_version ( self ) : """Get the version of Windows SDK from VCVarsQueryRegistry . bat ."""
name = 'VCVarsQueryRegistry.bat' path = os . path . join ( self . tool_dir , name ) batch = read_file ( path ) if not batch : raise RuntimeError ( _ ( 'failed to find the SDK version' ) ) regex = r'(?<=\\Microsoft SDKs\\Windows\\).+?(?=")' try : version = re . search ( regex , batch ) . group ( ) except AttributeError : return '' else : logging . debug ( _ ( 'SDK version: %s' ) , version ) return version
def extract_options ( name ) : """Extracts comparison option from filename . As example , ` ` Binarizer - SkipDim1 ` ` means options * SkipDim1 * is enabled . ` ` ( 1 , 2 ) ` ` and ` ` ( 2 , ) ` ` are considered equal . Available options : * ` ' SkipDim1 ' ` : reshape arrays by skipping 1 - dimension : ` ` ( 1 , 2 ) ` ` - - > ` ` ( 2 , ) ` ` * ` ' OneOff ' ` : inputs comes in a list for the predictions are computed with a call for each of them , not with one call See function * dump _ data _ and _ model * to get the full list ."""
opts = name . replace ( "\\" , "/" ) . split ( "/" ) [ - 1 ] . split ( '.' ) [ 0 ] . split ( '-' ) if len ( opts ) == 1 : return { } else : res = { } for opt in opts [ 1 : ] : if opt in ( "SkipDim1" , "OneOff" , "NoProb" , "Dec4" , "Dec3" , 'Out0' , 'Dec2' , 'Reshape' , 'Opp' ) : res [ opt ] = True else : raise NameError ( "Unable to parse option '{}'" . format ( opts [ 1 : ] ) ) return res
def import_submodules ( package : Union [ str , ModuleType ] , base_package_for_relative_import : str = None , recursive : bool = True ) -> Dict [ str , ModuleType ] : """Import all submodules of a module , recursively , including subpackages . Args : package : package ( name or actual module ) base _ package _ for _ relative _ import : path to prepend ? recursive : import submodules too ? Returns : dict : mapping from full module name to module"""
# http : / / stackoverflow . com / questions / 3365740 / how - to - import - all - submodules if isinstance ( package , str ) : package = importlib . import_module ( package , base_package_for_relative_import ) results = { } for loader , name , is_pkg in pkgutil . walk_packages ( package . __path__ ) : full_name = package . __name__ + '.' + name log . debug ( "importing: {}" , full_name ) results [ full_name ] = importlib . import_module ( full_name ) if recursive and is_pkg : results . update ( import_submodules ( full_name ) ) return results
def start_ssh_server ( port , username , password , namespace ) : """Start an SSH server on the given port , exposing a Python prompt with the given namespace ."""
# This is a lot of boilerplate , see http : / / tm . tl / 6429 for a ticket to # provide a utility function that simplifies this . from twisted . internet import reactor from twisted . conch . insults import insults from twisted . conch import manhole , manhole_ssh from twisted . cred . checkers import ( InMemoryUsernamePasswordDatabaseDontUse as MemoryDB ) from twisted . cred . portal import Portal sshRealm = manhole_ssh . TerminalRealm ( ) def chainedProtocolFactory ( ) : return insults . ServerProtocol ( manhole . Manhole , namespace ) sshRealm . chainedProtocolFactory = chainedProtocolFactory sshPortal = Portal ( sshRealm , [ MemoryDB ( ** { username : password } ) ] ) reactor . listenTCP ( port , manhole_ssh . ConchFactory ( sshPortal ) , interface = "127.0.0.1" )
def credential_add ( self , name , cred_type , ** options ) : '''Adds a new credential into SecurityCenter . As credentials can be of multiple types , we have different options to specify for each type of credential . * * Global Options ( Required ) * * : param name : Unique name to be associated to this credential : param cred _ type : The type of credential . Valid values are : ' ssh ' , ' windows ' , ' snmp ' , or ' kerberos ' : type name : string : type cred _ type : string * * Windows Credential Options * * : param username : Account Name : param password : Account Password : param domain : [ Optional ] Account Member Domain : type username : string : type password : string : type domain : string * * Unix / SSH Credential Options * * SSH Credentials cover a multitude of different types of hosts . Everything from Linux / Unix boxes to networking gear like Cisco IOS devices . As a result of this , there are a lot of available options in order to cover as many possible scenarios as possible . A few examples : Simple Username / Password : > > > sc . credential _ add ( ' Example Linux Root ' , ' ssh ' , username = ' root ' , password = ' r00tp @ ssw0rd ' ) Utilizing Sudo : > > > sc . credential _ add ( ' Example Linux Sudo ' , ' ssh ' , username = ' user ' , password = ' p @ ssw0rd ' , privilegeEscalation = ' sudo ' , escalationPassword = ' p @ ssw0rd ' ) SSH Keys ( By Filename ) : > > > sc . credential _ add ( ' Example Linux Keys ' , ' ssh ' , username = ' root ' , privateKey = ' / path / to / id _ rsa ' , publicKey = ' / path / to / id _ rsa . pub ' , passphrase = ' somthing ' # Only use this if needed SSH Keys ( Using File Objects ) : > > > pubkey = open ( ' / path / to / id _ rsa . pub ' , ' rb ' ) > > > privkey = open ( ' / path / to / id _ rsa ' , ' rb ' ) > > > sc . credential _ add ( ' Example Linux Keys 2 ' , ' ssh ' , username = ' root ' , privateKey = privkey , publicKey = pubkey , passphrase = ' somthing ' # Only use this if needed : param username : Account Name : param password : Account Password : param privilegeEscalation : [ Optional ] The type of privilege escalation required for this account . The default is None . Valid options are : ' su ' , ' su + sudo ' , ' dzdo ' , ' pbrun ' , ' Cisco \' enable \' ' , or ' none ' . : param escalationUsername : [ Optional ] The username to escalate to . Only used for su + sudo escalation . : param escalationPassword : [ Optional ] The password used for escalation . : param publicKey : [ Optional ] The SSH public RSA / DSA key used for authentication . : param privateKey : [ Optional ] The SSH private RSA / DSA key used for authentication . : param passphrase : [ Optional ] The passphrase needed for the RSA / DSA keypair . : type username : string : type password : string : type privilegeEscalation : string : type escalationUsername : string : type escalationPassword : string : type publicKey : string [ filename ] , fileobj : type privateKey : string [ filename ] , fileobj : type passphrase : string * * Kerberos Credential Options * * : param ip : Kerberos Host IP : param port : Kerberos Host Port : param realm : Kerberos Realm : param protocol : Kerberos Protocol : type ip : string : type port : string : type realm : string : type protocol : string * * SNMP Community String * * : param communityString : The community string to connect with . : type communityString : string'''
if 'pirvateKey' in options : options [ 'privateKey' ] = self . _upload ( options [ 'privateKey' ] ) [ 'filename' ] if 'publicKey' in options : options [ 'publicKey' ] = self . _upload ( options [ 'publicKey' ] ) [ 'filename' ] return self . raw_query ( "credential" , "add" , data = options )
def count_countries ( publishingCountry , ** kwargs ) : '''Lists occurrence counts for all countries covered by the data published by the given country : param publishingCountry : [ str ] A two letter country code : return : dict Usage : : from pygbif import occurrences occurrences . count _ countries ( publishingCountry = " DE " )'''
url = gbif_baseurl + 'occurrence/counts/countries' out = gbif_GET ( url , { 'publishingCountry' : publishingCountry } , ** kwargs ) return out
async def dump_variant ( self , elem , elem_type = None , params = None , obj = None ) : """Dumps variant type to the writer . Supports both wrapped and raw variant . : param elem : : param elem _ type : : param params : : param obj : : return :"""
fvalue = None if isinstance ( elem , x . VariantType ) or elem_type . WRAPS_VALUE : try : self . tracker . push_variant ( elem . variant_elem_type ) fvalue = { elem . variant_elem : await self . _dump_field ( getattr ( elem , elem . variant_elem ) , elem . variant_elem_type , obj = obj ) } self . tracker . pop ( ) except Exception as e : raise helpers . ArchiveException ( e , tracker = self . tracker ) from e else : try : fdef = elem_type . find_fdef ( elem_type . f_specs ( ) , elem ) self . tracker . push_variant ( fdef [ 1 ] ) fvalue = { fdef [ 0 ] : await self . _dump_field ( elem , fdef [ 1 ] , obj = obj ) } self . tracker . pop ( ) except Exception as e : raise helpers . ArchiveException ( e , tracker = self . tracker ) from e return fvalue
def IsTemplateParameterList ( clean_lines , linenum , column ) : """Check if the token ending on ( linenum , column ) is the end of template < > . Args : clean _ lines : A CleansedLines instance containing the file . linenum : the number of the line to check . column : end column of the token to check . Returns : True if this token is end of a template parameter list , False otherwise ."""
( _ , startline , startpos ) = ReverseCloseExpression ( clean_lines , linenum , column ) if ( startpos > - 1 and Search ( r'\btemplate\s*$' , clean_lines . elided [ startline ] [ 0 : startpos ] ) ) : return True return False
def expanddotpaths ( env , console ) : """Move files with dots in them to sub - directories"""
for filepath in os . listdir ( path . join ( env . dir ) ) : filename , ext = path . splitext ( filepath ) if ext == '.lua' and '.' in filename : paths , newfilename = filename . rsplit ( '.' , 1 ) newpath = paths . replace ( '.' , '/' ) newfilename = path . join ( newpath , newfilename ) + ext console . quiet ( 'Move %s to %s' % ( filepath , newfilename ) ) fullpath = path . join ( env . project_directory , newpath ) if not path . exists ( fullpath ) : os . makedirs ( fullpath ) clom . git . mv ( filepath , newfilename ) . shell . execute ( )
def get_routes ( self , athlete_id = None , limit = None ) : """Gets the routes list for an authenticated user . http : / / strava . github . io / api / v3 / routes / # list : param athlete _ id : id for the : param limit : Max rows to return ( default unlimited ) . : type limit : int : return : An iterator of : class : ` stravalib . model . Route ` objects . : rtype : : class : ` BatchedResultsIterator `"""
if athlete_id is None : athlete_id = self . get_athlete ( ) . id result_fetcher = functools . partial ( self . protocol . get , '/athletes/{id}/routes' . format ( id = athlete_id ) ) return BatchedResultsIterator ( entity = model . Route , bind_client = self , result_fetcher = result_fetcher , limit = limit )
def preprocess_value ( self , value , default = tuple ( ) ) : """Preprocess the value for set"""
# empty value if not value : return default # list with one empty item if isinstance ( value , ( list , tuple ) ) : if len ( value ) == 1 and not value [ 0 ] : return default if not isinstance ( value , ( list , tuple ) ) : value = value , return value
def generate_changelog ( context ) : """Generates an automatic changelog from your commit messages ."""
changelog_content = [ '\n## [%s](%s/compare/%s...%s)\n\n' % ( context . new_version , context . repo_url , context . current_version , context . new_version , ) ] git_log_content = None git_log = 'log --oneline --no-merges --no-color' . split ( ' ' ) try : git_log_tag = git_log + [ '%s..master' % context . current_version ] git_log_content = git ( git_log_tag ) log . debug ( 'content: %s' % git_log_content ) except Exception : log . warn ( 'Error diffing previous version, initial release' ) git_log_content = git ( git_log ) git_log_content = replace_sha_with_commit_link ( context . repo_url , git_log_content ) # turn change log entries into markdown bullet points if git_log_content : [ changelog_content . append ( '* %s\n' % line ) if line else line for line in git_log_content [ : - 1 ] ] write_new_changelog ( context . repo_url , 'CHANGELOG.md' , changelog_content , dry_run = context . dry_run ) log . info ( 'Added content to CHANGELOG.md' ) context . changelog_content = changelog_content
def md5_8_name ( self , url ) : """把下载的文件重命名为地址的md5前8位"""
m = hashlib . md5 ( ) m . update ( url . encode ( 'utf-8' ) ) return m . hexdigest ( ) [ : 8 ] + os . path . splitext ( url ) [ 1 ]
def fasta_format_check ( fasta_path , logger ) : """Check that a file is valid FASTA format . - First non - blank line needs to begin with a ' > ' header character . - Sequence can only contain valid IUPAC nucleotide characters Args : fasta _ str ( str ) : FASTA file contents string Raises : Exception : If invalid FASTA format"""
header_count = 0 line_count = 1 nt_count = 0 with open ( fasta_path ) as f : for l in f : l = l . strip ( ) if l == '' : continue if l [ 0 ] == '>' : header_count += 1 continue if header_count == 0 and l [ 0 ] != '>' : error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' . format ( line_count = line_count ) logger . error ( error_msg ) raise Exception ( error_msg ) non_nucleotide_chars_in_line = set ( l ) - VALID_NUCLEOTIDES if len ( non_nucleotide_chars_in_line ) > 0 : error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' . format ( line = line_count , non_nt_chars = ', ' . join ( [ x for x in non_nucleotide_chars_in_line ] ) ) logger . error ( error_msg ) raise Exception ( error_msg ) nt_count += len ( l ) line_count += 1 if nt_count == 0 : error_msg = 'File "{}" does not contain any nucleotide sequence.' . format ( fasta_path ) logger . error ( error_msg ) raise Exception ( error_msg ) logger . info ( 'Valid FASTA format "{}" ({} bp)' . format ( fasta_path , nt_count ) )
def random_get_instance ( ) -> tcod . random . Random : """Return the default Random instance . Returns : Random : A Random instance using the default random number generator ."""
return tcod . random . Random . _new_from_cdata ( ffi . cast ( "mersenne_data_t*" , lib . TCOD_random_get_instance ( ) ) )
def handleTickSize ( self , msg ) : """holds latest tick bid / ask / last size"""
if msg . size < 0 : return df2use = self . marketData if self . contracts [ msg . tickerId ] . m_secType in ( "OPT" , "FOP" ) : df2use = self . optionsData # create tick holder for ticker if msg . tickerId not in df2use . keys ( ) : df2use [ msg . tickerId ] = df2use [ 0 ] . copy ( ) # market data # bid size if msg . field == dataTypes [ "FIELD_BID_SIZE" ] : df2use [ msg . tickerId ] [ 'bidsize' ] = int ( msg . size ) # ask size elif msg . field == dataTypes [ "FIELD_ASK_SIZE" ] : df2use [ msg . tickerId ] [ 'asksize' ] = int ( msg . size ) # last size elif msg . field == dataTypes [ "FIELD_LAST_SIZE" ] : df2use [ msg . tickerId ] [ 'lastsize' ] = int ( msg . size ) # options data # open interest elif msg . field == dataTypes [ "FIELD_OPEN_INTEREST" ] : df2use [ msg . tickerId ] [ 'oi' ] = int ( msg . size ) elif msg . field == dataTypes [ "FIELD_OPTION_CALL_OPEN_INTEREST" ] and self . contracts [ msg . tickerId ] . m_right == "CALL" : df2use [ msg . tickerId ] [ 'oi' ] = int ( msg . size ) elif msg . field == dataTypes [ "FIELD_OPTION_PUT_OPEN_INTEREST" ] and self . contracts [ msg . tickerId ] . m_right == "PUT" : df2use [ msg . tickerId ] [ 'oi' ] = int ( msg . size ) # volume elif msg . field == dataTypes [ "FIELD_VOLUME" ] : df2use [ msg . tickerId ] [ 'volume' ] = int ( msg . size ) elif msg . field == dataTypes [ "FIELD_OPTION_CALL_VOLUME" ] and self . contracts [ msg . tickerId ] . m_right == "CALL" : df2use [ msg . tickerId ] [ 'volume' ] = int ( msg . size ) elif msg . field == dataTypes [ "FIELD_OPTION_PUT_VOLUME" ] and self . contracts [ msg . tickerId ] . m_right == "PUT" : df2use [ msg . tickerId ] [ 'volume' ] = int ( msg . size ) # fire callback self . ibCallback ( caller = "handleTickSize" , msg = msg )
def get_all_groups ( self , ** kwargs ) : # noqa : E501 """Get all group information . # noqa : E501 An endpoint for retrieving all group information . * * Example usage : * * ` curl https : / / api . us - east - 1 . mbedcloud . com / v3 / policy - groups - H ' Authorization : Bearer API _ KEY ' ` # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass asynchronous = True > > > thread = api . get _ all _ groups ( asynchronous = True ) > > > result = thread . get ( ) : param asynchronous bool : param int limit : The number of results to return ( 2-1000 ) , default is 50. : param str after : The entity ID to fetch after the given one . : param str order : The order of the records based on creation time , ASC or DESC ; by default ASC : param str include : Comma separated additional data to return . Currently supported : total _ count : param str name _ _ eq : Filter for group name : return : GroupSummaryList If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'asynchronous' ) : return self . get_all_groups_with_http_info ( ** kwargs ) # noqa : E501 else : ( data ) = self . get_all_groups_with_http_info ( ** kwargs ) # noqa : E501 return data
def get_message_type ( message ) : """Return message ' s type"""
for msg_type in MessageType . FIELDS : if Message . is_type ( msg_type , message ) : return msg_type return MessageType . UNKNOWN
def _string_to_rgb ( color ) : """Convert user string or hex color to color array ( length 3 or 4)"""
if not color . startswith ( '#' ) : if color . lower ( ) not in _color_dict : raise ValueError ( 'Color "%s" unknown' % color ) color = _color_dict [ color ] assert color [ 0 ] == '#' # hex color color = color [ 1 : ] lc = len ( color ) if lc in ( 3 , 4 ) : color = '' . join ( c + c for c in color ) lc = len ( color ) if lc not in ( 6 , 8 ) : raise ValueError ( 'Hex color must have exactly six or eight ' 'elements following the # sign' ) color = np . array ( [ int ( color [ i : i + 2 ] , 16 ) / 255. for i in range ( 0 , lc , 2 ) ] ) return color
def requestExec ( self , commandLine ) : """Request execution of : commandLine : and return a deferred reply ."""
data = common . NS ( commandLine ) return self . sendRequest ( 'exec' , data , wantReply = True )
def setup_benchbuild ( ) : """Setup benchbuild inside a container . This will query a for an existing installation of benchbuild and try to upgrade it to the latest version , if possible ."""
LOG . debug ( "Setting up Benchbuild..." ) venv_dir = local . path ( "/benchbuild" ) prefixes = CFG [ "container" ] [ "prefixes" ] . value prefixes . append ( venv_dir ) CFG [ "container" ] [ "prefixes" ] = prefixes src_dir = str ( CFG [ "source_dir" ] ) have_src = src_dir is not None if have_src : __mount_source ( src_dir ) benchbuild = find_benchbuild ( ) if benchbuild and not requires_update ( benchbuild ) : if have_src : __upgrade_from_source ( venv_dir , with_deps = False ) return setup_virtualenv ( venv_dir ) if have_src : __upgrade_from_source ( venv_dir ) else : __upgrade_from_pip ( venv_dir )
def cli ( ) : """Command line utility for serving datasets in a directory over HTTP ."""
parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( "dataset_directory" , help = "Directory with datasets to be served" ) parser . add_argument ( "-p" , "--port" , type = int , default = 8081 , help = "Port to serve datasets on (default 8081)" ) args = parser . parse_args ( ) if not os . path . isdir ( args . dataset_directory ) : parser . error ( "Not a directory: {}" . format ( args . dataset_directory ) ) serve_dtool_directory ( args . dataset_directory , args . port )
def _split_field_list ( field_list ) : """Split the list of fields for which to extract values into lists by extraction methods . - Remove any duplicated field names . - Raises ValueError with list of any invalid field names in ` ` field _ list ` ` ."""
lookup_dict = { } generate_dict = { } for field_name in field_list or FIELD_NAME_TO_EXTRACT_DICT . keys ( ) : try : extract_dict = FIELD_NAME_TO_EXTRACT_DICT [ field_name ] except KeyError : assert_invalid_field_list ( field_list ) else : if "lookup_str" in extract_dict : lookup_dict [ field_name ] = extract_dict else : generate_dict [ field_name ] = extract_dict return lookup_dict , generate_dict
def get_list ( self , input_string ) : """Return a list of user input : param input _ string : : return :"""
if input_string in ( '--ensemble_list' , '--fpf' ) : # was the flag set ? try : index_low = self . args . index ( input_string ) + 1 except ValueError : if input_string in self . required : print ( "\n {flag} is required" . format ( flag = input_string ) ) print_short_help ( ) sys . exit ( 1 ) else : return None # the flag was set , so check if a value was set , otherwise exit try : if self . args [ index_low ] in self . flags : print ( "\n {flag} was set but a value was not specified" . format ( flag = input_string ) ) print_short_help ( ) sys . exit ( 1 ) except IndexError : print ( "\n {flag} was set but a value was not specified" . format ( input_string ) ) print_short_help ( ) sys . exit ( 1 ) # at least one value was set index_high = index_low try : # if the flag wasn ' t the last argument specified while self . args [ index_high ] not in self . flags : index_high += 1 except IndexError : # if it was , then handle it accordingly index_high = self . args . index ( self . args [ - 1 ] ) return self . args [ index_low : index_high + 1 ] # return a list of input files if index_low == index_high : inputList = [ ] inputList . append ( self . args [ index_low ] ) return inputList else : return self . args [ index_low : index_high ]
def _parse_option ( cls , token ) : """Parse an option expression . : param token : The option expression : type token : str : rtype : InputOption"""
description = "" validator = None if " : " in token : token , description = tuple ( token . split ( " : " , 2 ) ) token = token . strip ( ) description = description . strip ( ) # Checking validator : matches = re . match ( r"(.*)\((.*?)\)" , token ) if matches : token = matches . group ( 1 ) . strip ( ) validator = matches . group ( 2 ) . strip ( ) shortcut = None matches = re . split ( r"\s*\|\s*" , token , 2 ) if len ( matches ) > 1 : shortcut = matches [ 0 ] . lstrip ( "-" ) token = matches [ 1 ] else : token = token . lstrip ( "-" ) default = None mode = Option . NO_VALUE if token . endswith ( "=*" ) : mode = Option . MULTI_VALUED token = token . rstrip ( "=*" ) elif token . endswith ( "=?*" ) : mode = Option . MULTI_VALUED token = token . rstrip ( "=?*" ) elif token . endswith ( "=?" ) : mode = Option . OPTIONAL_VALUE token = token . rstrip ( "=?" ) elif token . endswith ( "=" ) : mode = Option . REQUIRED_VALUE token = token . rstrip ( "=" ) matches = re . match ( r"(.+)(=[?*]*)(.+)" , token ) if matches : token = matches . group ( 1 ) operator = matches . group ( 2 ) default = matches . group ( 3 ) if operator == "=*" : mode = Option . REQUIRED_VALUE | Option . MULTI_VALUED elif operator == "=?*" : mode = Option . MULTI_VALUED elif operator == "=?" : mode = Option . OPTIONAL_VALUE elif operator == "=" : mode = Option . REQUIRED_VALUE return _option ( token , shortcut , mode , description , default )
def get_currencys ( self , site = 'Pro' , _async = False ) : """获取所有币种 : param site : : return :"""
assert site in [ 'Pro' , 'HADAX' ] params = { } path = f'/v1{"/" if site == "Pro" else "/hadax/"}common/currencys' return api_key_get ( params , path , _async = _async )
def delete_thing_shadow ( self , thing_name ) : """after deleting , get _ thing _ shadow will raise ResourceNotFound . But version of the shadow keep increasing . . ."""
thing = iot_backends [ self . region_name ] . describe_thing ( thing_name ) if thing . thing_shadow is None : raise ResourceNotFoundException ( ) payload = None new_shadow = FakeShadow . create_from_previous_version ( thing . thing_shadow , payload ) thing . thing_shadow = new_shadow return thing . thing_shadow
def _set_ovsdb_server ( self , v , load = False ) : """Setter method for ovsdb _ server , mapped from YANG variable / ovsdb _ server ( list ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ ovsdb _ server is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ ovsdb _ server ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = YANGListType ( "name" , ovsdb_server . ovsdb_server , yang_name = "ovsdb-server" , rest_name = "ovsdb-server" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'name' , extensions = { u'tailf-common' : { u'info' : u'Configure OVSDB server.' , u'callpoint' : u'ovsdbServerConfig' , u'sort-priority' : u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG' , u'cli-suppress-list-no' : None , u'cli-full-command' : None , u'hidden' : u'full' , u'cli-full-no' : None , u'cli-mode-name' : u'config-server-$(name)' } } ) , is_container = 'list' , yang_name = "ovsdb-server" , rest_name = "ovsdb-server" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure OVSDB server.' , u'callpoint' : u'ovsdbServerConfig' , u'sort-priority' : u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG' , u'cli-suppress-list-no' : None , u'cli-full-command' : None , u'hidden' : u'full' , u'cli-full-no' : None , u'cli-mode-name' : u'config-server-$(name)' } } , namespace = 'urn:brocade.com:mgmt:brocade-tunnels' , defining_module = 'brocade-tunnels' , yang_type = 'list' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """ovsdb_server must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("name",ovsdb_server.ovsdb_server, yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}), is_container='list', yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""" , } ) self . __ovsdb_server = t if hasattr ( self , '_set' ) : self . _set ( )
def remove_tag ( tag_id ) : '''Delete the records of certain tag .'''
entry = TabPost2Tag . delete ( ) . where ( TabPost2Tag . tag_id == tag_id ) entry . execute ( )
def print_help ( self , script_name : str ) : '''print a help message from the script'''
textWidth = max ( 60 , shutil . get_terminal_size ( ( 80 , 20 ) ) . columns ) if len ( script_name ) > 20 : print ( f'usage: sos run {script_name}' ) print ( ' [workflow_name | -t targets] [options] [workflow_options]' ) else : print ( f'usage: sos run {script_name} [workflow_name | -t targets] [options] [workflow_options]' ) print ( ' workflow_name: Single or combined workflows defined in this script' ) print ( ' targets: One or more targets to generate' ) print ( ' options: Single-hyphen sos parameters (see "sos run -h" for details)' ) print ( ' workflow_options: Double-hyphen workflow-specific parameters' ) description = [ x . lstrip ( '# ' ) . strip ( ) for x in self . description ] description = textwrap . dedent ( '\n' . join ( description ) ) . strip ( ) if description : print ( '\n' + description ) print ( '\nWorkflows:' ) print ( ' ' + '\n ' . join ( self . workflows ) ) global_parameters = { } for section in self . sections : global_parameters . update ( section . global_parameters ) if global_parameters : print ( '\nGlobal Workflow Options:' ) for name , ( value , comment ) in global_parameters . items ( ) : par_str = f' {format_par(name, value)}' print ( par_str ) if comment : print ( '\n' . join ( textwrap . wrap ( comment , width = textWidth , initial_indent = ' ' * 24 , subsequent_indent = ' ' * 24 ) ) ) print ( '\nSections' ) for section in self . sections : section . show ( )
def plucks ( obj , selector , default = None ) : """Safe itemgetter for structured objects . Happily operates on all ( nested ) objects that implement the item getter , i . e . the ` [ ] ` operator . The ` selector ` is ~ ` ` ( < key > | < index > | < slice > | \ * ) ( \ . ( < key > | < index > | < slice > | \ * ) ) * ` ` . Parts ( keys ) in the selector path are separated with a dot . If the key looks like a number it ' s interpreted as such , i . e . as an index ( so beware of numeric string keys in ` dict ` s ) . Python slice syntax is supported with keys like : ` ` 2:7 ` ` , ` ` : 5 ` ` , ` ` : : - 1 ` ` . A special key is ` ` * ` ` , equivalent to the slice - all op ` ` : ` ` . Note its usage does not serve functional , but annotational purpose - - feel free to leave it out ( check the last example below ) . Examples : obj = { ' users ' : [ { ' uid ' : 1234, ' name ' : { ' first ' : ' John ' , ' last ' : ' Smith ' , ' uid ' : 2345, ' name ' : { ' last ' : ' Bono ' plucks ( obj , ' users . 1 . name ' ) - > { ' last ' : ' Bono ' } plucks ( obj , ' users . * . name . last ' ) - > [ ' Smith ' , ' Bono ' ] plucks ( obj , ' users . name . first ' ) - > [ ' John ' ] Note : since the dot ` . ` is used as a separator , keys can not contain dots ."""
def _filter ( iterable , index ) : res = [ ] for obj in iterable : try : res . append ( obj [ index ] ) except : pass return res def _int ( val ) : try : return int ( val ) except : return None def _parsekey ( key ) : m = re . match ( r"^(?P<index>-?\d+)$" , key ) if m : return int ( m . group ( 'index' ) ) m = re . match ( r"^(?P<start>-?\d+)?" r"(:(?P<stop>-?\d+)?(:(?P<step>-?\d+)?)?)?$" , key ) if m : return slice ( _int ( m . group ( 'start' ) ) , _int ( m . group ( 'stop' ) ) , _int ( m . group ( 'step' ) ) ) if key == '*' : return slice ( None ) return key miss = False for key in selector . split ( '.' ) : index = _parsekey ( key ) if miss : if isinstance ( index , basestring ) : obj = { } else : obj = [ ] try : if isinstance ( index , basestring ) : if isinstance ( obj , list ) : obj = _filter ( obj , index ) else : obj = obj [ index ] else : obj = obj [ index ] miss = False except : miss = True if miss : return default else : return obj
def add_component ( self , symlink_comp ) : # type : ( bytes ) - > None '''Add a new component to this symlink record . Parameters : symlink _ comp - The string to add to this symlink record . Returns : Nothing .'''
if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'SL record not yet initialized!' ) if ( self . current_length ( ) + RRSLRecord . Component . length ( symlink_comp ) ) > 255 : raise pycdlibexception . PyCdlibInvalidInput ( 'Symlink would be longer than 255' ) self . symlink_components . append ( self . Component . factory ( symlink_comp ) )
def url ( self ) : """The URL present in this inline results . If you want to " click " this URL to open it in your browser , you should use Python ' s ` webbrowser . open ( url ) ` for such task ."""
if isinstance ( self . result , types . BotInlineResult ) : return self . result . url
def merge_graphs ( main_graph , addition_graph ) : """Merges an ' ' addition _ graph ' ' into the ' ' main _ graph ' ' . Returns a tuple of dictionaries , mapping old node ids and edge ids to new ids ."""
node_mapping = { } edge_mapping = { } for node in addition_graph . get_all_node_objects ( ) : node_id = node [ 'id' ] new_id = main_graph . new_node ( ) node_mapping [ node_id ] = new_id for edge in addition_graph . get_all_edge_objects ( ) : edge_id = edge [ 'id' ] old_vertex_a_id , old_vertex_b_id = edge [ 'vertices' ] new_vertex_a_id = node_mapping [ old_vertex_a_id ] new_vertex_b_id = node_mapping [ old_vertex_b_id ] new_edge_id = main_graph . new_edge ( new_vertex_a_id , new_vertex_b_id ) edge_mapping [ edge_id ] = new_edge_id return node_mapping , edge_mapping
def askRasterBounds ( self ) : """Prompts the user to provide the raster bounds with a dialog . Saves the bounds to be applied to the plot"""
dlg = RasterBoundsDialog ( bounds = ( self . rasterBottom , self . rasterTop ) ) if dlg . exec_ ( ) : bounds = dlg . values ( ) self . setRasterBounds ( bounds )
def covar ( self , bessel = True ) : """Return covariance matrix : Parameters : bessel : bool , optional , default = True Use Bessel ' s correction in order to obtain an unbiased estimator of sample covariances ."""
if bessel : return self . Mxy / ( self . w - 1 ) else : return self . Mxy / self . w
def create_variable_is_list ( self ) : """Append code for creating variable with bool if it ' s instance of list with a name ` ` { variable } _ is _ list ` ` . Similar to ` create _ variable _ with _ length ` ."""
variable_name = '{}_is_list' . format ( self . _variable ) if variable_name in self . _variables : return self . _variables . add ( variable_name ) self . l ( '{variable}_is_list = isinstance({variable}, list)' )
def draw ( self , figure ) : """Draw watermark Parameters figure : Matplotlib . figure . Figure Matplolib figure on which to draw"""
X = mimage . imread ( self . filename ) figure . figimage ( X , ** self . kwargs )
def execute ( self , query , args = None ) : """Execute a query . query - - string , query to execute on server args - - optional sequence or mapping , parameters to use with query . Note : If args is a sequence , then % s must be used as the parameter placeholder in the query . If a mapping is used , % ( key ) s must be used as the placeholder . Returns integer represents rows affected , if any"""
while self . nextset ( ) : pass db = self . _get_db ( ) if isinstance ( query , unicode ) : query = query . encode ( db . encoding ) if args is not None : if isinstance ( args , dict ) : nargs = { } for key , item in args . items ( ) : if isinstance ( key , unicode ) : key = key . encode ( db . encoding ) nargs [ key ] = db . literal ( item ) args = nargs else : args = tuple ( map ( db . literal , args ) ) try : query = query % args except TypeError as m : raise ProgrammingError ( str ( m ) ) assert isinstance ( query , ( bytes , bytearray ) ) res = self . _query ( query ) return res
def clear_if_finalized ( iteration : TransitionResult , ) -> TransitionResult [ InitiatorPaymentState ] : """Clear the initiator payment task if all transfers have been finalized or expired ."""
state = cast ( InitiatorPaymentState , iteration . new_state ) if state is None : return iteration if len ( state . initiator_transfers ) == 0 : return TransitionResult ( None , iteration . events ) return iteration
def calculate_sv_coverage ( data ) : """Calculate coverage within bins for downstream CNV calling . Creates corrected cnr files with log2 ratios and depths ."""
calcfns = { "cnvkit" : _calculate_sv_coverage_cnvkit , "gatk-cnv" : _calculate_sv_coverage_gatk } from bcbio . structural import cnvkit data = utils . to_single_data ( data ) if not cnvkit . use_general_sv_bins ( data ) : out_target_file , out_anti_file = ( None , None ) else : work_dir = utils . safe_makedir ( os . path . join ( dd . get_work_dir ( data ) , "structural" , dd . get_sample_name ( data ) , "bins" ) ) out_target_file , out_anti_file = calcfns [ cnvkit . bin_approach ( data ) ] ( data , work_dir ) if not os . path . exists ( out_target_file ) : out_target_file , out_anti_file = ( None , None ) if "seq2c" in dd . get_svcaller ( data ) : from bcbio . structural import seq2c seq2c_target = seq2c . precall ( data ) else : seq2c_target = None if not tz . get_in ( [ "depth" , "bins" ] , data ) : data = tz . update_in ( data , [ "depth" , "bins" ] , lambda x : { } ) data [ "depth" ] [ "bins" ] = { "target" : out_target_file , "antitarget" : out_anti_file , "seq2c" : seq2c_target } return [ [ data ] ]
def is_credit_card ( string , card_type = None ) : """Checks if a string is a valid credit card number . If card type is provided then it checks that specific type , otherwise any known credit card number will be accepted . : param string : String to check . : type string : str : param card _ type : Card type . : type card _ type : str Can be one of these : * VISA * MASTERCARD * AMERICAN _ EXPRESS * DINERS _ CLUB * DISCOVER * JCB or None . Default to None ( any card ) . : return : True if credit card , false otherwise . : rtype : bool"""
if not is_full_string ( string ) : return False if card_type : if card_type not in CREDIT_CARDS : raise KeyError ( 'Invalid card type "{}". Valid types are: {}' . format ( card_type , ', ' . join ( CREDIT_CARDS . keys ( ) ) ) ) return bool ( CREDIT_CARDS [ card_type ] . search ( string ) ) for c in CREDIT_CARDS : if CREDIT_CARDS [ c ] . search ( string ) : return True return False
def partition ( N , k ) : '''Distribute ` ` N ` ` into ` ` k ` ` parts such that each part takes the value ` ` N / / k ` ` or ` ` N / / k + 1 ` ` where ` ` / / ` ` denotes integer division ; i . e . , perform the minimal lexicographic integer partition . Example : N = 5 , k = 2 - - > return [ 3 , 2]'''
out = [ N // k ] * k remainder = N % k for i in range ( remainder ) : out [ i ] += 1 return out
def download_torrent ( self ) : """Download torrent . Rated implies download the unique best rated torrent found . Otherwise : get the magnet and download it ."""
try : if self . back_to_menu is True : return if self . found_torrents is False : print ( 'Nothing found.' ) return if self . mode_search == 'best_rated' : print ( 'Downloading..' ) self . open_magnet ( ) elif self . mode_search == 'list' : if self . selected is not None : # t _ p , pirate and 1337x got magnet inside , else direct . if self . page in [ 'eztv' , 'limetorrents' ] : self . magnet = self . hrefs [ int ( self . selected ) ] print ( 'Downloading..' ) self . open_magnet ( ) elif self . page in [ 'the_pirate_bay' , 'torrent_project' , '1337x' , 'isohunt' ] : url = self . hrefs [ int ( self . selected ) ] self . get_magnet ( url ) print ( 'Downloading..' ) self . open_magnet ( ) else : print ( 'Bad selected page.' ) else : print ( 'Nothing selected.' ) sys . exit ( 1 ) except Exception : print ( traceback . format_exc ( ) ) sys . exit ( 0 )
def check_power_raw ( self ) : """Returns the power state of the smart power strip in raw format ."""
packet = bytearray ( 16 ) packet [ 0x00 ] = 0x0a packet [ 0x02 ] = 0xa5 packet [ 0x03 ] = 0xa5 packet [ 0x04 ] = 0x5a packet [ 0x05 ] = 0x5a packet [ 0x06 ] = 0xae packet [ 0x07 ] = 0xc0 packet [ 0x08 ] = 0x01 response = self . send_packet ( 0x6a , packet ) err = response [ 0x22 ] | ( response [ 0x23 ] << 8 ) if err == 0 : payload = self . decrypt ( bytes ( response [ 0x38 : ] ) ) if type ( payload [ 0x4 ] ) == int : state = payload [ 0x0e ] else : state = ord ( payload [ 0x0e ] ) return state
def ordered_by_replica ( self , request_key ) : """Should be called by each replica when request is ordered or replica is removed ."""
state = self . get ( request_key ) if not state : return state . unordered_by_replicas_num -= 1
def list_available_tools ( self ) : """Lists all the Benchmarks configuration files found in the configuration folders : return :"""
benchmarks = [ ] if self . alternative_config_dir : for n in glob . glob ( os . path . join ( self . alternative_config_dir , self . BENCHMARKS_DIR , '*.conf' ) ) : benchmarks . append ( BenchmarkToolConfiguration ( n ) ) for n in glob . glob ( os . path . join ( self . default_config_dir , self . BENCHMARKS_DIR , '*.conf' ) ) : benchmarks . append ( BenchmarkToolConfiguration ( n ) ) return benchmarks
def wr_xlsx_nts ( self , fout_xlsx , desc2nts , ** kws_usr ) : """Print grouped and sorted GO IDs ."""
# KWS _ XLSX : top _ n section _ prt section _ sortby # Adjust xlsx keyword args kws_xlsx = self . _get_xlsx_kws ( ** kws_usr ) # KWS _ SHADE : shade _ hdrgos hdrgo _ prt section _ sortby top _ n shade_hdrgos = self . _get_shade_hdrgos ( ** kws_usr ) self . _adjust_prt_flds ( kws_xlsx , desc2nts , shade_hdrgos ) # 1 - D : data to print is a flat list of namedtuples if 'flat' in desc2nts : nts = desc2nts . get ( 'flat' ) # sys . stdout . write ( " FLAT NTS : { FLDS } \ n " . format ( FLDS = " " . join ( next ( iter ( nts ) ) . _ fields ) ) ) wr_xlsx ( fout_xlsx , nts , ** kws_xlsx ) # 2 - D : data to print is a list of [ ( section , nts ) , . . . else : sections_hdrgos = desc2nts . get ( 'sections' ) wr_xlsx_sections ( fout_xlsx , sections_hdrgos , ** kws_xlsx )
def apply_motion_tracks ( self , tracks , accuracy = 0.004 ) : """Similar to click but press the screen for the given time interval and then release Args : tracks ( : py : obj : ` list ` ) : list of : py : class : ` poco . utils . track . MotionTrack ` object accuracy ( : py : obj : ` float ` ) : motion accuracy for each motion steps in normalized coordinate metrics ."""
if not tracks : raise ValueError ( 'Please provide at least one track. Got {}' . format ( repr ( tracks ) ) ) tb = MotionTrackBatch ( tracks ) return self . agent . input . applyMotionEvents ( tb . discretize ( accuracy ) )
def transpose ( self , method ) : """Transpose bounding box ( flip or rotate in 90 degree steps ) : param method : One of : py : attr : ` PIL . Image . FLIP _ LEFT _ RIGHT ` , : py : attr : ` PIL . Image . FLIP _ TOP _ BOTTOM ` , : py : attr : ` PIL . Image . ROTATE _ 90 ` , : py : attr : ` PIL . Image . ROTATE _ 180 ` , : py : attr : ` PIL . Image . ROTATE _ 270 ` , : py : attr : ` PIL . Image . TRANSPOSE ` or : py : attr : ` PIL . Image . TRANSVERSE ` ."""
if method not in ( FLIP_LEFT_RIGHT , FLIP_TOP_BOTTOM ) : raise NotImplementedError ( "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" ) image_width , image_height = self . size xmin , ymin , xmax , ymax = self . _split_into_xyxy ( ) if method == FLIP_LEFT_RIGHT : TO_REMOVE = 1 transposed_xmin = image_width - xmax - TO_REMOVE transposed_xmax = image_width - xmin - TO_REMOVE transposed_ymin = ymin transposed_ymax = ymax elif method == FLIP_TOP_BOTTOM : transposed_xmin = xmin transposed_xmax = xmax transposed_ymin = image_height - ymax transposed_ymax = image_height - ymin transposed_boxes = torch . cat ( ( transposed_xmin , transposed_ymin , transposed_xmax , transposed_ymax ) , dim = - 1 ) bbox = BoxList ( transposed_boxes , self . size , mode = "xyxy" ) # bbox . _ copy _ extra _ fields ( self ) for k , v in self . extra_fields . items ( ) : if not isinstance ( v , torch . Tensor ) : v = v . transpose ( method ) bbox . add_field ( k , v ) return bbox . convert ( self . mode )
def options ( self , * args , ** kwargs ) : """Default OPTIONS response If the ' cors ' option is True , will respond with an empty response and set the ' Access - Control - Allow - Headers ' and ' Access - Control - Allow - Methods ' headers"""
if getattr ( options , 'cors' , False ) : self . set_header ( 'Access-Control-Allow-Headers' , 'Content-Type, Authorization, ' 'Accept, X-Requested-With' ) self . set_header ( 'Access-Control-Allow-Methods' , 'OPTIONS, TRACE, GET, HEAD, POST, ' 'PUT, PATCH, DELETE' ) self . finish ( )
def range ( self , dim , data_range = True , dimension_range = True ) : """Return the lower and upper bounds of values along dimension . Args : dimension : The dimension to compute the range on . data _ range ( bool ) : Compute range from data values dimension _ range ( bool ) : Include Dimension ranges Whether to include Dimension range and soft _ range in range calculation Returns : Tuple containing the lower and upper bound"""
iskdim = self . get_dimension ( dim ) not in self . vdims return super ( StatisticsElement , self ) . range ( dim , iskdim , dimension_range )
def _parse_bands ( self , band_input ) : """Parses class input and verifies band names . : param band _ input : input parameter ` bands ` : type band _ input : str or list ( str ) or None : return : verified list of bands : rtype : list ( str )"""
all_bands = AwsConstants . S2_L1C_BANDS if self . data_source is DataSource . SENTINEL2_L1C else AwsConstants . S2_L2A_BANDS if band_input is None : return all_bands if isinstance ( band_input , str ) : band_list = band_input . split ( ',' ) elif isinstance ( band_input , list ) : band_list = band_input . copy ( ) else : raise ValueError ( 'bands parameter must be a list or a string' ) band_list = [ band . strip ( ) . split ( '.' ) [ 0 ] for band in band_list ] band_list = [ band for band in band_list if band != '' ] if not set ( band_list ) <= set ( all_bands ) : raise ValueError ( 'bands {} must be a subset of {}' . format ( band_list , all_bands ) ) return band_list
def path ( self , which = None ) : """Extend ` ` nailgun . entity _ mixins . Entity . path ` ` . The format of the returned path depends on the value of ` ` which ` ` : incremental _ update / content _ view _ versions / incremental _ update promote / content _ view _ versions / < id > / promote ` ` super ` ` is called otherwise ."""
if which in ( 'incremental_update' , 'promote' ) : prefix = 'base' if which == 'incremental_update' else 'self' return '{0}/{1}' . format ( super ( ContentViewVersion , self ) . path ( prefix ) , which ) return super ( ContentViewVersion , self ) . path ( which )
def getAnalysisRequestTemplates ( self ) : """This functions builds a list of tuples with the object AnalysisRequestTemplates ' uids and names . : returns : A list of tuples where the first value of the tuple is the AnalysisRequestTemplate name and the second one is the AnalysisRequestTemplate UID . - - > [ ( ART . title ) , ( ART . UID ) , . . . ]"""
l = [ ] art_uids = self . ar_templates # I have to get the catalog in this way because I can ' t do it with ' self ' . . . pc = getToolByName ( api . portal . get ( ) , 'uid_catalog' ) for art_uid in art_uids : art_obj = pc ( UID = art_uid ) if len ( art_obj ) != 0 : l . append ( ( art_obj [ 0 ] . Title , art_uid ) ) return l
def add_export ( exports = '/etc/exports' , path = None , hosts = None , options = None ) : '''Add an export CLI Example : . . code - block : : bash salt ' * ' nfs3 . add _ export path = ' / srv / test ' hosts = ' 127.0.0.1 ' options = [ ' rw ' ]'''
if options is None : options = [ ] if not isinstance ( hosts , six . string_types ) : # Lists , etc would silently mangle / etc / exports raise TypeError ( 'hosts argument must be a string' ) edict = list_exports ( exports ) if path not in edict : edict [ path ] = [ ] new = { 'hosts' : hosts , 'options' : options } edict [ path ] . append ( new ) _write_exports ( exports , edict ) return new
def cmd ( send , msg , args ) : """Shows or clears the abuse list Syntax : { command } < - - clear | - - show >"""
parser = arguments . ArgParser ( args [ 'config' ] ) group = parser . add_mutually_exclusive_group ( ) group . add_argument ( '--clear' , action = 'store_true' ) group . add_argument ( '--show' , action = 'store_true' ) try : cmdargs = parser . parse_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return if cmdargs . clear : args [ 'handler' ] . abuselist . clear ( ) send ( "Abuse list cleared." ) elif cmdargs . show : abusers = [ ] for x in args [ 'handler' ] . abuselist . keys ( ) : if args [ 'db' ] . query ( Ignore ) . filter ( Ignore . nick == x ) . count ( ) : abusers . append ( x ) if abusers : send ( ", " . join ( abusers ) ) else : send ( "No abusers." ) else : send ( "Please specify an option." )
async def get_box_ids_issued ( self ) -> str : """Return json object on lists of all unique box identifiers ( schema identifiers , credential definition identifiers , and revocation registry identifiers ) for all credential definitions and credentials issued ; e . g . , " schema _ id " : [ " R17v42T4pk . . . : 2 : tombstone : 1.2 " , " cred _ def _ id " : [ " R17v42T4pk . . . : 3 : CL : 19 : tag " , " rev _ reg _ id " : [ " R17v42T4pk . . . : 4 : R17v42T4pk . . . : 3 : CL : 19 : tag : CL _ ACCUM : 0 " , " R17v42T4pk . . . : 4 : R17v42T4pk . . . : 3 : CL : 19 : tag : CL _ ACCUM : 1 " , An issuer must issue a credential definition to include its schema identifier in the returned values ; the schema identifier in isolation belongs properly to an Origin , not necessarily to an Issuer . The operation may be useful for a Verifier anchor going off - line to seed its cache before doing so . : return : tuple of sets for schema ids , cred def ids , rev reg ids"""
LOGGER . debug ( 'Issuer.get_box_ids_issued >>>' ) cd_ids = [ d for d in listdir ( self . dir_tails ) if isdir ( join ( self . dir_tails , d ) ) and ok_cred_def_id ( d , self . did ) ] s_ids = [ ] for cd_id in cd_ids : try : s_ids . append ( json . loads ( await self . get_schema ( cred_def_id2seq_no ( cd_id ) ) ) [ 'id' ] ) except AbsentSchema : LOGGER . error ( 'Issuer %s has issued cred def %s but no corresponding schema on ledger' , self . name , cd_id ) rr_ids = [ basename ( link ) for link in Tails . links ( self . dir_tails , self . did ) ] rv = json . dumps ( { 'schema_id' : s_ids , 'cred_def_id' : cd_ids , 'rev_reg_id' : rr_ids } ) LOGGER . debug ( 'Issuer.get_box_ids_issued <<< %s' , rv ) return rv
def get_records ( self , start_time , end_time , msgid = 1 , number = 10000 ) : """获取客服聊天记录 : param start _ time : 查询开始时间 , UNIX 时间戳 : param end _ time : 查询结束时间 , UNIX 时间戳 , 每次查询不能跨日查询 : param msgid : 消息id顺序从小到大 , 从1开始 : param number : 每次获取条数 , 最多10000条 : return : 返回的 JSON 数据包"""
if isinstance ( start_time , datetime . datetime ) : start_time = time . mktime ( start_time . timetuple ( ) ) if isinstance ( end_time , datetime . datetime ) : end_time = time . mktime ( end_time . timetuple ( ) ) record_data = { 'starttime' : int ( start_time ) , 'endtime' : int ( end_time ) , 'msgid' : msgid , 'number' : number } res = self . _post ( 'https://api.weixin.qq.com/customservice/msgrecord/getmsglist' , data = record_data , ) return res
def shellc ( ndim , lenvals , array ) : # This works ! looks like this is a mutable 2d char array """Sort an array of character strings according to the ASCII collating sequence using the Shell Sort algorithm . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / shellc _ c . html : param ndim : Dimension of the array . : type ndim : int : param lenvals : String length . : type lenvals : int : param array : The array to be sorted . : type array : list of str . : return : The sorted array . : rtype : list of str ."""
array = stypes . listToCharArray ( array , xLen = lenvals , yLen = ndim ) ndim = ctypes . c_int ( ndim ) lenvals = ctypes . c_int ( lenvals ) libspice . shellc_c ( ndim , lenvals , ctypes . byref ( array ) ) return stypes . cVectorToPython ( array )
def tables ( self ) : """Generic method that does a depth - first search on the node attributes . Child classes should override this method for better performance ."""
_tables = set ( ) for attr in six . itervalues ( self . __dict__ ) : if isinstance ( attr , list ) : for item in attr : if isinstance ( item , Node ) : _tables |= item . tables ( ) elif isinstance ( attr , Node ) : _tables |= attr . tables ( ) return _tables
def label_to_original_label_otu_by_id ( otu_by_id ) : """Takes a v1.2 otuById dict and , for every otu , checks if ot : originalLabel exists . If it does not , but @ label does , then ot : originalLabel is set to @ label and @ label is deleted ."""
for val in otu_by_id . values ( ) : orig = val . get ( '^ot:originalLabel' ) if orig is None : label = val . get ( '@label' ) if label : del val [ '@label' ] val [ '^ot:originalLabel' ] = label
def validate ( self , instance ) : """Validates the given document against this schema . Raises a ValidationException if there are any failures ."""
errors = { } self . _validate_instance ( instance , errors ) if len ( errors ) > 0 : raise ValidationException ( errors )
def key_gen ( self , key_name , type , size = 2048 , ** kwargs ) : """Adds a new public key that can be used for name _ publish . . . code - block : : python > > > c . key _ gen ( ' example _ key _ name ' ) { ' Name ' : ' example _ key _ name ' , ' Id ' : ' QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8 ' } Parameters key _ name : str Name of the new Key to be generated . Used to reference the Keys . type : str Type of key to generate . The current possible keys types are : * ` ` " rsa " ` ` * ` ` " ed25519 " ` ` size : int Bitsize of key to generate Returns dict : Key name and Key Id"""
opts = { "type" : type , "size" : size } kwargs . setdefault ( "opts" , opts ) args = ( key_name , ) return self . _client . request ( '/key/gen' , args , decoder = 'json' , ** kwargs )
async def songs ( self ) : '''list of songs in the playlist | force | | coro | Returns list of type : class : ` embypy . objects . Audio `'''
items = [ ] for i in await self . items : if i . type == 'Audio' : items . append ( i ) elif hasattr ( i , 'songs' ) : items . extend ( await i . songs ) return items
def set_range ( self , bounds , keep_aspect = False ) : """Zoom to fit a box ."""
# a * ( v0 + t ) = - 1 # a * ( v1 + t ) = + 1 # a * ( v1 - v0 ) = 2 bounds = np . asarray ( bounds , dtype = np . float64 ) v0 = bounds [ : 2 ] v1 = bounds [ 2 : ] pan = - .5 * ( v0 + v1 ) zoom = 2. / ( v1 - v0 ) if keep_aspect : zoom = zoom . min ( ) * np . ones ( 2 ) self . set_pan_zoom ( pan = pan , zoom = zoom )
def norm ( self , coords : Vector3Like , frac_coords : bool = True ) -> float : """Compute the norm of vector ( s ) . Args : coords : Array - like object with the coordinates . frac _ coords : Boolean stating whether the vector corresponds to fractional or cartesian coordinates . Returns : one - dimensional ` numpy ` array ."""
return np . sqrt ( self . dot ( coords , coords , frac_coords = frac_coords ) )
def start ( name , call = None ) : '''start a machine by name : param name : name given to the machine : param call : call value in this case is ' action ' : return : true if successful CLI Example : . . code - block : : bash salt - cloud - a start vm _ name'''
datacenter_id = get_datacenter_id ( ) conn = get_conn ( ) node = get_node ( conn , name ) conn . start_server ( datacenter_id = datacenter_id , server_id = node [ 'id' ] ) return True
def _su_scripts_regex ( self ) : """: return : [ compiled regex , function ]"""
sups = re . escape ( '' . join ( [ k for k in self . superscripts . keys ( ) ] ) ) subs = re . escape ( '' . join ( [ k for k in self . subscripts . keys ( ) ] ) ) # language = PythonRegExp su_regex = ( r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' + r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)' ) . format ( su_ = subs + sups , sub = subs , sup = sups ) su_regex = re . compile ( su_regex ) def su_replace ( m ) : esc , sub , root_sup , sup = m . groups ( ) if esc is not None : return esc elif sub is not None : return '_{' + '' . join ( [ c if ( c in [ '‹' , '›' , '˹' , '˺' ] ) else self . subscripts [ c ] for c in sub ] ) + '}' elif root_sup is not None : return '' . join ( [ self . superscripts [ c ] for c in root_sup ] ) elif sup is not None : return '^{' + '' . join ( [ c if ( c in [ '‹' , '›' , '˹' , '˺' ] ) else self . superscripts [ c ] for c in sup ] ) + '}' else : raise TypeError ( "Regex bug: this should never be reached" ) return [ su_regex , su_replace ]
def fetch_extra_data ( resource ) : """Return a dict with extra data retrieved from cern oauth ."""
person_id = resource . get ( 'PersonID' , [ None ] ) [ 0 ] identity_class = resource . get ( 'IdentityClass' , [ None ] ) [ 0 ] department = resource . get ( 'Department' , [ None ] ) [ 0 ] return dict ( person_id = person_id , identity_class = identity_class , department = department )
def validate_LESSTHAN ( in_value , restriction ) : """Test to ensure that a value is less than a prescribed value . Parameter : Two values , which will be compared for the difference . ."""
# Sometimes restriction values can accidentally be put in the template < item > 100 < / items > , # Making them a list , not a number . Rather than blowing up , just get value 1 from the list . if type ( restriction ) is list : restriction = restriction [ 0 ] value = _get_val ( in_value ) if type ( value ) is list : for subval in value : if type ( subval ) is tuple : subval = subval [ 1 ] validate_LESSTHAN ( subval , restriction ) else : try : if value >= restriction : raise ValidationError ( "LESSTHAN: %s" % ( restriction ) ) except TypeError : # Incompatible types for comparison . raise ValidationError ( "LESSTHAN: Incompatible types %s" % ( restriction ) )
def initialize_pop ( self ) : """Generates initial population with random positions and speeds ."""
self . population = self . toolbox . swarm ( n = self . _params [ 'popsize' ] ) if self . _params [ 'neighbours' ] : for i in range ( len ( self . population ) ) : self . population [ i ] . ident = i self . population [ i ] . neighbours = list ( set ( [ ( i - x ) % len ( self . population ) for x in range ( 1 , self . _params [ 'neighbours' ] + 1 ) ] + [ i ] + [ ( i + x ) % len ( self . population ) for x in range ( 1 , self . _params [ 'neighbours' ] + 1 ) ] ) ) else : for i in range ( len ( self . population ) ) : self . population [ i ] . ident = i self . population [ i ] . neighbours = [ x for x in range ( len ( self . population ) ) ] self . assign_fitnesses ( self . population ) for part in self . population : part . best = creator . Particle ( part ) part . best . fitness . values = part . fitness . values
def read_union ( fo , writer_schema , reader_schema = None ) : """A union is encoded by first writing a long value indicating the zero - based position within the union of the schema of its value . The value is then encoded per the indicated schema within the union ."""
# schema resolution index = read_long ( fo ) if reader_schema : # Handle case where the reader schema is just a single type ( not union ) if not isinstance ( reader_schema , list ) : if match_types ( writer_schema [ index ] , reader_schema ) : return read_data ( fo , writer_schema [ index ] , reader_schema ) else : for schema in reader_schema : if match_types ( writer_schema [ index ] , schema ) : return read_data ( fo , writer_schema [ index ] , schema ) msg = 'schema mismatch: %s not found in %s' % ( writer_schema , reader_schema ) raise SchemaResolutionError ( msg ) else : return read_data ( fo , writer_schema [ index ] )
def sym_exp_map ( cls , q , eta ) : """Quaternion symmetrized exponential map . Find the symmetrized exponential map on the quaternion Riemannian manifold . Params : q : the base point as a Quaternion object eta : the tangent vector argument of the exponential map as a Quaternion object Returns : A quaternion p . Note : The symmetrized exponential formulation is akin to the exponential formulation for symmetric positive definite tensors [ Source ] ( http : / / www . academia . edu / 7656761 / On _ the _ Averaging _ of _ Symmetric _ Positive - Definite _ Tensors )"""
sqrt_q = q ** 0.5 return sqrt_q * Quaternion . exp ( eta ) * sqrt_q
def process_selectors ( self , index = 0 , flags = 0 ) : """Process selectors . We do our own selectors as BeautifulSoup4 has some annoying quirks , and we don ' t really need to do nth selectors or siblings or descendants etc ."""
return self . parse_selectors ( self . selector_iter ( self . pattern ) , index , flags )
def recompute_grad ( fn ) : """Decorator that recomputes the function on the backwards pass . Args : fn : a function that takes Tensors ( all as positional arguments ) and returns a tuple of Tensors . Returns : A wrapped fn that is identical to fn when called , but its activations will be discarded and recomputed on the backwards pass ( i . e . on a call to tf . gradients ) ."""
@ functools . wraps ( fn ) def wrapped ( * args ) : return _recompute_grad ( fn , args ) return wrapped
def _get_port_for_acl ( self , port_id , switch ) : """Gets interface name for ACLs Finds the Port - Channel name if port _ id is in a Port - Channel , otherwise ACLs are applied to Ethernet interface . : param port _ id : Name of port from ironic db : param server : Server endpoint on the Arista switch to be configured"""
all_intf_info = self . _port_group_info . get ( switch , { } ) intf_info = all_intf_info . get ( port_id , { } ) member_info = intf_info . get ( 'interfaceMembership' , '' ) port_group_info = re . search ( 'Member of (?P<port_group>\S+)' , member_info ) if port_group_info : port_id = port_group_info . group ( 'port_group' ) return port_id
def start_runtime ( self ) : '''Start the system !'''
while True : try : self . call_runtime ( ) except Exception : log . error ( 'Exception in Thorium: ' , exc_info = True ) time . sleep ( self . opts [ 'thorium_interval' ] )
def connect_patch_namespaced_service_proxy_with_path ( self , name , namespace , path , ** kwargs ) : # noqa : E501 """connect _ patch _ namespaced _ service _ proxy _ with _ path # noqa : E501 connect PATCH requests to proxy of Service # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . connect _ patch _ namespaced _ service _ proxy _ with _ path ( name , namespace , path , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str name : name of the ServiceProxyOptions ( required ) : param str namespace : object name and auth scope , such as for teams and projects ( required ) : param str path : path to the resource ( required ) : param str path2 : Path is the part of URLs that include service endpoints , suffixes , and parameters to use for the current proxy request to service . For example , the whole request URL is http : / / localhost / api / v1 / namespaces / kube - system / services / elasticsearch - logging / _ search ? q = user : kimchy . Path is _ search ? q = user : kimchy . : return : str If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . connect_patch_namespaced_service_proxy_with_path_with_http_info ( name , namespace , path , ** kwargs ) # noqa : E501 else : ( data ) = self . connect_patch_namespaced_service_proxy_with_path_with_http_info ( name , namespace , path , ** kwargs ) # noqa : E501 return data
def HexEscape ( self , string , match , ** unused_kwargs ) : """Converts a hex escaped string ."""
logging . debug ( 'HexEscape matched {0:s}.' . format ( string ) ) hex_string = match . group ( 1 ) try : hex_string = binascii . unhexlify ( hex_string ) hex_string = codecs . decode ( hex_string , 'utf-8' ) self . string += hex_string except ( TypeError , binascii . Error ) : raise errors . ParseError ( 'Invalid hex escape {0!s}.' . format ( hex_string ) )
def _netmiko_open ( self , device_type , netmiko_optional_args = None ) : """Standardized method of creating a Netmiko connection using napalm attributes ."""
if netmiko_optional_args is None : netmiko_optional_args = { } try : self . _netmiko_device = ConnectHandler ( device_type = device_type , host = self . hostname , username = self . username , password = self . password , timeout = self . timeout , ** netmiko_optional_args ) except NetMikoTimeoutException : raise ConnectionException ( "Cannot connect to {}" . format ( self . hostname ) ) # ensure in enable mode self . _netmiko_device . enable ( ) return self . _netmiko_device
def best_archiver ( random , population , archive , args ) : """Archive only the best individual ( s ) . This function archives the best solutions and removes inferior ones . If the comparison operators have been overloaded to define Pareto preference ( as in the ` ` Pareto ` ` class ) , then this archiver will form a Pareto archive . . . Arguments : random - - the random number generator object population - - the population of individuals archive - - the current archive of individuals args - - a dictionary of keyword arguments"""
new_archive = archive for ind in population : if len ( new_archive ) == 0 : new_archive . append ( ind ) else : should_remove = [ ] should_add = True for a in new_archive : if ind . candidate == a . candidate : should_add = False break elif ind < a : should_add = False elif ind > a : should_remove . append ( a ) for r in should_remove : new_archive . remove ( r ) if should_add : new_archive . append ( ind ) return new_archive
def make_docker_context ( get_steps_fn , github_project , opts = None , default_context_dir = None ) : '''Returns a path to the Docker context directory . See parse _ args . py . Helper for making a command - line utility that writes your project ' s Dockerfile and associated data into a ( temporary ) directory . Your main program might look something like this : print ( make _ docker _ context ( lambda builder : [ builder . step ( . . . ) , . . . ] , ' facebook / your _ project ' ,'''
if opts is None : opts = { } valid_versions = ( ( 'ubuntu:16.04' , '5' ) , ) def add_args ( parser ) : parser . add_argument ( '--docker-context-dir' , metavar = 'DIR' , default = default_context_dir , help = 'Write the Dockerfile and its context into this directory. ' 'If empty, make a temporary directory. Default: %(default)s.' , ) parser . add_argument ( '--user' , metavar = 'NAME' , default = opts . get ( 'user' , 'nobody' ) , help = 'Build and install as this user. Default: %(default)s.' , ) parser . add_argument ( '--prefix' , metavar = 'DIR' , default = opts . get ( 'prefix' , '/home/install' ) , help = 'Install all libraries in this prefix. Default: %(default)s.' , ) parser . add_argument ( '--projects-dir' , metavar = 'DIR' , default = opts . get ( 'projects_dir' , '/home' ) , help = 'Place project code directories here. Default: %(default)s.' , ) parser . add_argument ( '--os-image' , metavar = 'IMG' , choices = zip ( * valid_versions ) [ 0 ] , default = opts . get ( 'os_image' , valid_versions [ 0 ] [ 0 ] ) , help = 'Docker OS image -- be sure to use only ones you trust (See ' 'README.docker). Choices: %(choices)s. Default: %(default)s.' , ) parser . add_argument ( '--gcc-version' , metavar = 'VER' , choices = set ( zip ( * valid_versions ) [ 1 ] ) , default = opts . get ( 'gcc_version' , valid_versions [ 0 ] [ 1 ] ) , help = 'Choices: %(choices)s. Default: %(default)s.' , ) parser . add_argument ( '--make-parallelism' , metavar = 'NUM' , type = int , default = opts . get ( 'make_parallelism' , 1 ) , help = 'Use `make -j` on multi-CPU systems with lots of RAM. ' 'Default: %(default)s.' , ) parser . add_argument ( '--local-repo-dir' , metavar = 'DIR' , help = 'If set, build {0} from a local directory instead of Github.' . format ( github_project ) , ) parser . add_argument ( '--ccache-tgz' , metavar = 'PATH' , help = 'If set, enable ccache for the build. To initialize the ' 'cache, first try to hardlink, then to copy --cache-tgz ' 'as ccache.tgz into the --docker-context-dir.' ) opts = parse_args_to_fbcode_builder_opts ( add_args , # These have add _ argument ( ) calls , others are set via - - option . ( 'docker_context_dir' , 'user' , 'prefix' , 'projects_dir' , 'os_image' , 'gcc_version' , 'make_parallelism' , 'local_repo_dir' , 'ccache_tgz' , ) , opts , help = textwrap . dedent ( ''' Reads `fbcode_builder_config.py` from the current directory, and prepares a Docker context directory to build {github_project} and its dependencies. Prints to stdout the path to the context directory. Pass --option {github_project}:git_hash SHA1 to build something other than the master branch from Github. Or, pass --option {github_project}:local_repo_dir LOCAL_PATH to build from a local repo instead of cloning from Github. Usage: (cd $(./make_docker_context.py) && docker build . 2>&1 | tee log) ''' . format ( github_project = github_project ) ) , ) # This allows travis _ docker _ build . sh not to know the main Github project . local_repo_dir = opts . pop ( 'local_repo_dir' , None ) if local_repo_dir is not None : opts [ '{0}:local_repo_dir' . format ( github_project ) ] = local_repo_dir if ( opts . get ( 'os_image' ) , opts . get ( 'gcc_version' ) ) not in valid_versions : raise Exception ( 'Due to 4/5 ABI changes (std::string), we can only use {0}' . format ( ' / ' . join ( 'GCC {1} on {0}' . format ( * p ) for p in valid_versions ) ) ) if opts . get ( 'docker_context_dir' ) is None : opts [ 'docker_context_dir' ] = tempfile . mkdtemp ( prefix = 'docker-context-' ) elif not os . path . exists ( opts . get ( 'docker_context_dir' ) ) : os . makedirs ( opts . get ( 'docker_context_dir' ) ) builder = DockerFBCodeBuilder ( ** opts ) context_dir = builder . option ( 'docker_context_dir' ) # Mark option " in - use " # The renderer may also populate some files into the context _ dir . dockerfile = builder . render ( get_steps_fn ( builder ) ) with os . fdopen ( os . open ( os . path . join ( context_dir , 'Dockerfile' ) , os . O_RDWR | os . O_CREAT | os . O_EXCL , # Do not overwrite existing files 0o644 , ) , 'w' ) as f : f . write ( dockerfile ) return context_dir