signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def col ( loc , strg ) : """Returns current column within a string , counting newlines as line separators . The first column is number 1. Note : the default parsing behavior is to expand tabs in the input string before starting the parsing process . See L { I { ParserElement . parseString } < ParserElement . parseString > } for more information on parsing strings containing C { < TAB > } s , and suggested methods to maintain a consistent view of the parsed string , the parse location , and line and column positions within the parsed string ."""
s = strg return 1 if loc < len ( s ) and s [ loc ] == '\n' else loc - s . rfind ( "\n" , 0 , loc )
def call_external_subprocess ( command_list , stdin_filename = None , stdout_filename = None , stderr_filename = None , env = None ) : """Run the command and arguments in the command _ list . Will search the system PATH for commands to execute , but no shell is started . Redirects any selected outputs to the given filename . Waits for command completion ."""
if stdin_filename : stdin = open ( stdin_filename , "r" ) else : stdin = None if stdout_filename : stdout = open ( stdout_filename , "w" ) else : stdout = None if stderr_filename : stderr = open ( stderr_filename , "w" ) else : stderr = None subprocess . check_call ( command_list , stdin = stdin , stdout = stdout , stderr = stderr , env = env ) if stdin_filename : stdin . close ( ) if stdout_filename : stdout . close ( ) if stderr_filename : stderr . close ( ) # The older way to do the above with os . system is below , just for reference . # command = " " . join ( command _ list ) # if stdin _ filename : command + = " < " + stdin _ filename # if stdout _ filename : command + = " > " + stdout _ filename # if stderr _ filename : command + = " 2 > " + stderr _ filename # os . system ( command ) return
def set_log_level ( self ) : """Set log level according to command - line options @ returns : logger object"""
if self . options . debug : self . logger . setLevel ( logging . DEBUG ) elif self . options . quiet : self . logger . setLevel ( logging . ERROR ) else : self . logger . setLevel ( logging . INFO ) self . logger . addHandler ( logging . StreamHandler ( ) ) return self . logger
def _create_lock_object ( self , key ) : '''Returns a lock object , split for testing'''
return redis_lock . Lock ( self . redis_conn , key , expire = self . settings [ 'REDIS_LOCK_EXPIRATION' ] , auto_renewal = True )
def post_json ( self , url , data , cls = None , ** kwargs ) : """POST data to the api - server : param url : resource location ( eg : " / type / uuid " ) : type url : str : param cls : JSONEncoder class : type cls : JSONEncoder"""
kwargs [ 'data' ] = to_json ( data , cls = cls ) kwargs [ 'headers' ] = self . default_headers return self . post ( url , ** kwargs ) . json ( )
def get_deck ( self ) : """Returns parent : Deck : of a : Placeable :"""
trace = self . get_trace ( ) # Find decks in trace , prepend with [ None ] in case nothing was found res = [ None ] + [ item for item in trace if isinstance ( item , Deck ) ] # Pop last ( and hopefully only Deck ) or None if there is no deck return res . pop ( )
def snip_string ( string , max_len = 20 , snip_string = '...' , snip_point = 0.5 ) : """Snips a string so that it is no longer than max _ len , replacing deleted characters with the snip _ string . The snip is done at snip _ point , which is a fraction between 0 and 1, indicating relatively where along the string to snip . snip _ point of 0.5 would be the middle . > > > snip _ string ( ' this is long ' , 8) ' this . . . ' > > > snip _ string ( ' this is long ' , 8 , snip _ point = 0.5) ' th . . . ong ' > > > snip _ string ( ' this is long ' , 12) ' this is long ' > > > snip _ string ( ' this is long ' , 8 , ' ~ ' ) ' this is ~ ' > > > snip _ string ( ' this is long ' , 8 , ' ~ ' , 0.5) ' thi ~ long '"""
if len ( string ) <= max_len : new_string = string else : visible_len = ( max_len - len ( snip_string ) ) start_len = int ( visible_len * snip_point ) end_len = visible_len - start_len new_string = string [ 0 : start_len ] + snip_string if end_len > 0 : new_string += string [ - end_len : ] return new_string
def _print_context ( # pragma : no cover filename , secret , count , total , plugin_settings , additional_header_lines = None , force = False , ) : """: type filename : str : param filename : the file currently scanned . : type secret : dict , in PotentialSecret . json ( ) format : param secret : the secret , represented in the baseline file . : type count : int : param count : current count of secrets scanned so far : type total : int : param total : total number of secrets in baseline : type plugin _ settings : list : param plugin _ settings : plugins used to create baseline . : type additional _ header _ lines : str : param additional _ header _ lines : any additional lines to add to the header of the interactive audit display . : type force : bool : param force : if True , will print the lines of code even if it doesn ' t find the secret expected : raises : SecretNotFoundOnSpecifiedLineError"""
print ( '{} {} {} {}\n{} {}\n{} {}' . format ( colorize ( 'Secret: ' , AnsiColor . BOLD ) , colorize ( str ( count ) , AnsiColor . PURPLE ) , colorize ( 'of' , AnsiColor . BOLD ) , colorize ( str ( total ) , AnsiColor . PURPLE ) , colorize ( 'Filename: ' , AnsiColor . BOLD ) , colorize ( filename , AnsiColor . PURPLE ) , colorize ( 'Secret Type:' , AnsiColor . BOLD ) , colorize ( secret [ 'type' ] , AnsiColor . PURPLE ) , ) ) if additional_header_lines : print ( additional_header_lines ) print ( '-' * 10 ) error_obj = None try : secret_with_context = _get_secret_with_context ( filename , secret , plugin_settings , force = force , ) print ( secret_with_context ) except SecretNotFoundOnSpecifiedLineError as e : error_obj = e print ( e ) print ( '-' * 10 ) if error_obj : raise error_obj
def remove_cache_tier ( self , cache_pool ) : """Removes a cache tier from Ceph . Flushes all dirty objects from writeback pools and waits for that to complete . : param cache _ pool : six . string _ types . The cache tier pool name to remove . : return : None"""
# read - only is easy , writeback is much harder mode = get_cache_mode ( self . service , cache_pool ) if mode == 'readonly' : check_call ( [ 'ceph' , '--id' , self . service , 'osd' , 'tier' , 'cache-mode' , cache_pool , 'none' ] ) check_call ( [ 'ceph' , '--id' , self . service , 'osd' , 'tier' , 'remove' , self . name , cache_pool ] ) elif mode == 'writeback' : pool_forward_cmd = [ 'ceph' , '--id' , self . service , 'osd' , 'tier' , 'cache-mode' , cache_pool , 'forward' ] if cmp_pkgrevno ( 'ceph-common' , '10.1' ) >= 0 : # Jewel added a mandatory flag pool_forward_cmd . append ( '--yes-i-really-mean-it' ) check_call ( pool_forward_cmd ) # Flush the cache and wait for it to return check_call ( [ 'rados' , '--id' , self . service , '-p' , cache_pool , 'cache-flush-evict-all' ] ) check_call ( [ 'ceph' , '--id' , self . service , 'osd' , 'tier' , 'remove-overlay' , self . name ] ) check_call ( [ 'ceph' , '--id' , self . service , 'osd' , 'tier' , 'remove' , self . name , cache_pool ] )
def get_transformation ( name : str ) : """Get a transformation function and error if its name is not registered . : param name : The name of a function to look up : return : A transformation function : raises MissingPipelineFunctionError : If the given function name is not registered"""
func = mapped . get ( name ) if func is None : raise MissingPipelineFunctionError ( '{} is not registered as a pipeline function' . format ( name ) ) return func
def anoteElements ( ax , anotelist , showAccName = False , efilter = None , textypos = None , ** kwargs ) : """annotate elements to axes : param ax : matplotlib axes object : param anotelist : element annotation object list : param showAccName : tag name for accelerator tubes ? default is False , show acceleration band type , e . g . ' S ' , ' C ' , ' X ' , or for ' [ S , C , X ] D ' for cavity : param efilter : element type filter , default is None , annotate all elements could be defined to be one type name or type name list / tuple , e . g . filter = ' QUAD ' or filter = ( ' QUAD ' , ' CSRCSBEN ' ) : param textypos : y coordinator of annotated text string : param kwargs : alpha = 0.8 , arrowprops = dict ( arrowstyle = ' - > ' ) , rotation = - 60 , fontsize = ' small ' return list of annotation objects"""
defaultstyle = { 'alpha' : 0.8 , 'arrowprops' : dict ( arrowstyle = '->' ) , 'rotation' : - 60 , 'fontsize' : 'small' } defaultstyle . update ( kwargs ) anote_list = [ ] if efilter is None : for anote in anotelist : if textypos is None : textxypos = tuple ( anote [ 'textpos' ] ) else : textxypos = tuple ( ( anote [ 'textpos' ] [ 0 ] , textypos ) ) if not showAccName and anote [ 'type' ] in ( 'RFCW' , 'RFDF' ) : kwstyle = { k : v for k , v in defaultstyle . items ( ) } kwstyle . pop ( 'arrowprops' ) note_text = ax . text ( anote [ 'atext' ] [ 'xypos' ] [ 0 ] , anote [ 'atext' ] [ 'xypos' ] [ 1 ] , anote [ 'atext' ] [ 'text' ] , ** kwstyle ) else : note_text = ax . annotate ( s = anote [ 'name' ] , xy = anote [ 'xypos' ] , xytext = textxypos , ** defaultstyle ) anote_list . append ( note_text ) else : if not isinstance ( efilter , tuple ) : filter = tuple ( efilter ) for anote in anotelist : if anote [ 'type' ] in efilter : if textypos is None : textxypos = tuple ( anote [ 'textpos' ] ) else : textxypos = tuple ( ( anote [ 'textpos' ] [ 0 ] , textypos ) ) if not showAccName and anote [ 'type' ] in ( 'RFCW' , 'RFDF' ) : kwstyle = { k : v for k , v in defaultstyle . items ( ) } kwstyle . pop ( 'arrowprops' ) note_text = ax . text ( anote [ 'atext' ] [ 'xypos' ] [ 0 ] , anote [ 'atext' ] [ 'xypos' ] [ 1 ] , anote [ 'atext' ] [ 'text' ] , ** kwstyle ) else : note_text = ax . annotate ( s = anote [ 'name' ] , xy = anote [ 'xypos' ] , xytext = textxypos , ** defaultstyle ) anote_list . append ( note_text ) return anote_list
def promote_artifacts ( self , promote_stage = 'latest' ) : """Promote artifact version to dest . Args : promote _ stage ( string ) : Stage that is being promoted"""
if promote_stage . lower ( ) == 'alpha' : self . _sync_to_uri ( self . s3_canary_uri ) elif promote_stage . lower ( ) == 'canary' : self . _sync_to_uri ( self . s3_latest_uri ) else : self . _sync_to_uri ( self . s3_latest_uri )
def explain_template_loading_attempts ( app , template , attempts ) : """This should help developers understand what failed . Mostly the same as : func : ` flask . debughelpers . explain _ template _ loading _ attempts ` , except here we ' ve extended it to support showing what : class : ` UnchainedJinjaLoader ` is doing ."""
from flask import Flask , Blueprint from flask . debughelpers import _dump_loader_info from flask . globals import _request_ctx_stack template , expected_priors = parse_template ( template ) info = [ f'Locating {pretty_num(expected_priors + 1)} template "{template}":' ] total_found = 0 blueprint = None reqctx = _request_ctx_stack . top if reqctx is not None and reqctx . request . blueprint is not None : blueprint = reqctx . request . blueprint for idx , ( loader , srcobj , triple ) in enumerate ( attempts ) : if isinstance ( srcobj , Flask ) : src_info = 'application "%s"' % srcobj . import_name elif isinstance ( srcobj , Blueprint ) : src_info = 'blueprint "%s" (%s)' % ( srcobj . name , srcobj . import_name ) else : src_info = repr ( srcobj ) info . append ( '% 5d: trying loader of %s' % ( idx + 1 , src_info ) ) for line in _dump_loader_info ( loader ) : info . append ( ' %s' % line ) if triple is None : detail = 'no match' else : if total_found < expected_priors : action = 'skipping' elif total_found == expected_priors : action = 'using' else : action = 'ignoring' detail = '%s (%r)' % ( action , triple [ 1 ] or '<string>' ) total_found += 1 info . append ( ' -> %s' % detail ) seems_fishy = False if total_found < expected_priors : info . append ( 'Error: the template could not be found.' ) seems_fishy = True if blueprint is not None and seems_fishy : info . append ( ' The template was looked up from an endpoint that ' 'belongs to the blueprint "%s".' % blueprint ) info . append ( ' Maybe you did not place a template in the right folder?' ) info . append ( ' See http://flask.pocoo.org/docs/blueprints/#templates' ) app . logger . info ( '\n' . join ( info ) )
def _pump ( self ) : '''Attempts to process the next command in the queue if one exists and the driver is not currently busy .'''
while ( not self . _busy ) and len ( self . _queue ) : cmd = self . _queue . pop ( 0 ) self . _name = cmd [ 2 ] try : cmd [ 0 ] ( * cmd [ 1 ] ) except Exception as e : self . notify ( 'error' , exception = e ) if self . _debug : traceback . print_exc ( )
def sortedby2 ( item_list , * args , ** kwargs ) : """sorts ` ` item _ list ` ` using key _ list Args : item _ list ( list ) : list to sort * args : multiple lists to sort by * * kwargs : reverse ( bool ) : sort order is descending if True else acscending Returns : list : ` ` list _ ` ` sorted by the values of another ` ` list ` ` . defaults to ascending order CommandLine : python - m utool . util _ list - - exec - sortedby2 - - show Examples : > > > # ENABLE _ DOCTEST > > > from utool . util _ list import * # NOQA > > > import utool as ut > > > item _ list = [ 1 , 2 , 3 , 4 , 5] > > > key _ list1 = [ 1 , 1 , 2 , 3 , 4] > > > key _ list2 = [ 2 , 1 , 4 , 1 , 1] > > > args = ( key _ list1 , key _ list2) > > > kwargs = dict ( reverse = False ) > > > result = ut . sortedby2 ( item _ list , * args , * * kwargs ) > > > print ( result ) [2 , 1 , 3 , 4 , 5] Examples : > > > # ENABLE _ DOCTEST > > > # Python 3 Compatibility Test > > > import utool as ut > > > item _ list = [ 1 , 2 , 3 , 4 , 5] > > > key _ list1 = [ ' a ' , ' a ' , 2 , 3 , 4] > > > key _ list2 = [ ' b ' , ' a ' , 4 , 1 , 1] > > > args = ( key _ list1 , key _ list2) > > > kwargs = dict ( reverse = False ) > > > result = ut . sortedby2 ( item _ list , * args , * * kwargs ) > > > print ( result ) [3 , 4 , 5 , 2 , 1]"""
assert all ( [ len ( item_list ) == len_ for len_ in map ( len , args ) ] ) reverse = kwargs . get ( 'reverse' , False ) key = operator . itemgetter ( * range ( 1 , len ( args ) + 1 ) ) tup_list = list ( zip ( item_list , * args ) ) # print ( tup _ list ) try : sorted_tups = sorted ( tup_list , key = key , reverse = reverse ) except TypeError : # Python 3 does not allow sorting mixed types def keyfunc ( tup ) : return tuple ( map ( str , tup [ 1 : ] ) ) sorted_tups = sorted ( tup_list , key = keyfunc , reverse = reverse ) sorted_list = [ tup [ 0 ] for tup in sorted_tups ] return sorted_list
def get_repl_lag ( self , master_status ) : """Given two ' members ' elements from rs . status ( ) , return lag between their optimes ( in secs ) ."""
member_status = self . get_member_rs_status ( ) if not member_status : raise MongoctlException ( "Unable to determine replicaset status for" " member '%s'" % self . id ) return get_member_repl_lag ( member_status , master_status )
def get_channels ( self , condensed = False ) : '''Grabs all channels in the slack team Args : condensed ( bool ) : if true triggers list condensing functionality Returns : dic : Dict of channels in Slack team . See also : https : / / api . slack . com / methods / channels . list'''
channel_list = self . slack_client . api_call ( 'channels.list' ) if not channel_list . get ( 'ok' ) : return None if condensed : channels = [ { 'id' : item . get ( 'id' ) , 'name' : item . get ( 'name' ) } for item in channel_list . get ( 'channels' ) ] return channels else : return channel_list
def deploy_local ( self , dotfiles , target_root = None ) : """Deploy dotfiles to a local path ."""
if target_root is None : target_root = self . args . path for source_path , target_path in dotfiles . items ( ) : source_path = path . join ( self . source , source_path ) target_path = path . join ( target_root , target_path ) if path . isfile ( target_path ) or path . islink ( target_path ) : self . log . debug ( 'Removing existing file at %s' , target_path ) os . unlink ( target_path ) elif path . isdir ( target_path ) : self . log . debug ( 'Removing existing dir at %s' , target_path ) shutil . rmtree ( target_path ) parent_dir = path . dirname ( target_path ) if not path . isdir ( parent_dir ) : self . log . debug ( 'Creating parent dir %s' , parent_dir ) os . makedirs ( parent_dir ) if self . args . copy : if path . isdir ( source_path ) : self . log . debug ( 'Copying file %s to %s' , source_path , target_path ) shutil . copytree ( source_path , target_path ) else : self . log . debug ( 'Copying dir %s to %s' , source_path , target_path ) shutil . copy ( source_path , target_path ) else : self . log . debug ( 'Symlinking %s -> %s' , target_path , source_path ) os . symlink ( source_path , target_path )
def ls ( ctx , name , list_formatted ) : """List EC2 instances"""
session = create_session ( ctx . obj [ 'AWS_PROFILE_NAME' ] ) ec2 = session . resource ( 'ec2' ) if name == '*' : instances = ec2 . instances . filter ( ) else : condition = { 'Name' : 'tag:Name' , 'Values' : [ name ] } instances = ec2 . instances . filter ( Filters = [ condition ] ) out = format_output ( instances , list_formatted ) click . echo ( '\n' . join ( out ) )
def actnorm_scale ( name , x , logscale_factor = 3. , reverse = False , init = False ) : """Per - channel scaling of x ."""
x_shape = common_layers . shape_list ( x ) with tf . variable_scope ( name , reuse = tf . AUTO_REUSE ) : # Variance initialization logic . assert len ( x_shape ) == 2 or len ( x_shape ) == 4 if len ( x_shape ) == 2 : x_var = tf . reduce_mean ( x ** 2 , [ 0 ] , keepdims = True ) logdet_factor = 1 var_shape = ( 1 , x_shape [ 1 ] ) elif len ( x_shape ) == 4 : x_var = tf . reduce_mean ( x ** 2 , [ 0 , 1 , 2 ] , keepdims = True ) logdet_factor = x_shape [ 1 ] * x_shape [ 2 ] var_shape = ( 1 , 1 , 1 , x_shape [ 3 ] ) init_value = tf . log ( 1.0 / ( tf . sqrt ( x_var ) + 1e-6 ) ) / logscale_factor logs = get_variable_ddi ( "logs" , var_shape , initial_value = init_value , init = init ) logs = logs * logscale_factor # Function and reverse function . if not reverse : x = x * tf . exp ( logs ) else : x = x * tf . exp ( - logs ) # Objective calculation , h * w * sum ( log | s | ) dlogdet = tf . reduce_sum ( logs ) * logdet_factor if reverse : dlogdet *= - 1 return x , dlogdet
def getDignities ( self ) : """Returns the dignities belonging to this object ."""
info = self . getInfo ( ) dignities = [ dign for ( dign , objID ) in info . items ( ) if objID == self . obj . id ] return dignities
def __write_edit_tmpl ( tag_key , tag_list ) : '''Generate the HTML file for editing . : param tag _ key : key of the tags . : param tag _ list : list of the tags . : return : None'''
edit_file = os . path . join ( OUT_DIR , 'edit' , 'edit_' + tag_key . split ( '_' ) [ 1 ] + '.html' ) edit_widget_arr = [ ] for sig in tag_list : html_sig = '_' . join ( [ 'html' , sig ] ) # var _ html = eval ( ' html _ vars . ' + html _ sig ) var_html = HTML_DICS [ html_sig ] if var_html [ 'type' ] in INPUT_ARR : tmpl = func_gen_html . gen_input_edit ( var_html ) elif var_html [ 'type' ] == 'select' : tmpl = func_gen_html . gen_select_edit ( var_html ) elif var_html [ 'type' ] == 'radio' : tmpl = func_gen_html . gen_radio_edit ( var_html ) elif var_html [ 'type' ] == 'checkbox' : tmpl = func_gen_html . gen_checkbox_edit ( var_html ) elif var_html [ 'type' ] == 'file' : tmpl = func_gen_html . gen_file_edit ( var_html ) else : tmpl = '' edit_widget_arr . append ( tmpl ) with open ( edit_file , 'w' ) as fileout2 : outstr = minify ( TPL_EDIT . replace ( 'xxxxxx' , '' . join ( edit_widget_arr ) ) . replace ( 'yyyyyy' , tag_key . split ( '_' ) [ 1 ] [ : 2 ] ) . replace ( 'kkkk' , KIND_DICS [ 'kind_' + tag_key . split ( '_' ) [ - 1 ] ] ) ) fileout2 . write ( outstr )
def iter_services ( self , service_group = None ) : """Args : service _ group : optional name of service group Returns : if service _ group is omitted or None , an Iterator over all flattened service records in the service registry if service _ group is present , an Iterator over all service records in that group"""
if service_group is not None : if service_group not in EFConfig . SERVICE_GROUPS : raise RuntimeError ( "service registry: {} doesn't have '{}' section listed in EFConfig" . format ( self . _service_registry_file , service_group ) ) return self . service_registry_json [ service_group ] . iteritems ( ) else : return self . services ( ) . iteritems ( )
def team ( self , team , simple = False ) : """Get data on a single specified team . : param team : Team to get data for . : param simple : Get only vital data . : return : Team object with data on specified team ."""
return Team ( self . _get ( 'team/%s%s' % ( self . team_key ( team ) , '/simple' if simple else '' ) ) )
def to_glyphs_font_attributes ( self , source , master , is_initial ) : """Copy font attributes from ` ufo ` either to ` self . font ` or to ` master ` . Arguments : self - - The UFOBuilder ufo - - The current UFO being read master - - The current master being written is _ initial - - True iff this the first UFO that we process"""
if is_initial : _set_glyphs_font_attributes ( self , source ) else : _compare_and_merge_glyphs_font_attributes ( self , source )
def _cdf ( self , xloc , dist , cache ) : """Cumulative distribution function ."""
return evaluation . evaluate_forward ( dist , numpy . e ** xloc , cache = cache )
def paddr ( address ) : """Parse a string representation of an address . This function is the inverse of : func : ` saddr ` ."""
if not isinstance ( address , six . string_types ) : raise TypeError ( 'expecting a string' ) if address . startswith ( '[' ) : p1 = address . find ( ']:' ) if p1 == - 1 : raise ValueError return ( address [ 1 : p1 ] , int ( address [ p1 + 2 : ] ) ) elif ':' in address : p1 = address . find ( ':' ) return ( address [ : p1 ] , int ( address [ p1 + 1 : ] ) ) else : return address
def marvcli_restore ( file ) : """Restore previously dumped database"""
data = json . load ( file ) site = create_app ( ) . site site . restore_database ( ** data )
def add_volume ( self , colorchange = True , column = None , name = '' , str = '{name}' , ** kwargs ) : """Add ' volume ' study to QuantFigure . studies Parameters : colorchange : bool If True then each volume bar will have a fill color depending on if ' base ' had a positive or negative change compared to the previous value If False then each volume bar will have a fill color depending on if the volume data itself had a positive or negative change compared to the previous value column : string Defines the data column name that contains the volume data . Default : ' volume ' name : string Name given to the study str : string Label factory for studies The following wildcards can be used : { name } : Name of the column { study } : Name of the study { period } : Period used Examples : ' study : { study } - period : { period } ' kwargs : base : string Defines the column which will define the positive / negative changes ( if colorchange = True ) . Default = ' close ' up _ color : string Color for positive bars down _ color : string Color for negative bars"""
if not column : column = self . _d [ 'volume' ] up_color = kwargs . pop ( 'up_color' , self . theme [ 'up_color' ] ) down_color = kwargs . pop ( 'down_color' , self . theme [ 'down_color' ] ) study = { 'kind' : 'volume' , 'name' : name , 'params' : { 'colorchange' : colorchange , 'base' : 'close' , 'column' : column , 'str' : None } , 'display' : utils . merge_dict ( { 'up_color' : up_color , 'down_color' : down_color } , kwargs ) } self . _add_study ( study )
def _get_bufsize_linux ( iface ) : '''Return network interface buffer information using ethtool'''
ret = { 'result' : False } cmd = '/sbin/ethtool -g {0}' . format ( iface ) out = __salt__ [ 'cmd.run' ] ( cmd ) pat = re . compile ( r'^(.+):\s+(\d+)$' ) suffix = 'max-' for line in out . splitlines ( ) : res = pat . match ( line ) if res : ret [ res . group ( 1 ) . lower ( ) . replace ( ' ' , '-' ) + suffix ] = int ( res . group ( 2 ) ) ret [ 'result' ] = True elif line . endswith ( 'maximums:' ) : suffix = '-max' elif line . endswith ( 'settings:' ) : suffix = '' if not ret [ 'result' ] : parts = out . split ( ) # remove shell cmd prefix from msg if parts [ 0 ] . endswith ( 'sh:' ) : out = ' ' . join ( parts [ 1 : ] ) ret [ 'comment' ] = out return ret
def docfrom ( base ) : """Decorator to set a function ' s docstring from another function ."""
def setdoc ( func ) : func . __doc__ = ( getattr ( base , '__doc__' ) or '' ) + ( func . __doc__ or '' ) return func return setdoc
def getCallSet ( self , id_ ) : """Returns a CallSet with the specified id , or raises a CallSetNotFoundException if it does not exist ."""
if id_ not in self . _callSetIdMap : raise exceptions . CallSetNotFoundException ( id_ ) return self . _callSetIdMap [ id_ ]
def option ( value , default = '' , omit_opts = False , omit_master = False , omit_pillar = False ) : '''Pass in a generic option and receive the value that will be assigned CLI Example : . . code - block : : bash salt ' * ' config . option redis . host'''
if not omit_opts : if value in __opts__ : return __opts__ [ value ] if not omit_master : if value in __pillar__ . get ( 'master' , { } ) : return __pillar__ [ 'master' ] [ value ] if not omit_pillar : if value in __pillar__ : return __pillar__ [ value ] if value in DEFAULTS : return DEFAULTS [ value ] return default
def _matches_billing ( price , hourly ) : """Return True if the price object is hourly and / or monthly ."""
return any ( [ hourly and price . get ( 'hourlyRecurringFee' ) is not None , not hourly and price . get ( 'recurringFee' ) is not None ] )
def flatten_probas ( probas , labels , ignore = None ) : """Flattens predictions in the batch"""
B , C , H , W = probas . size ( ) probas = probas . permute ( 0 , 2 , 3 , 1 ) . contiguous ( ) . view ( - 1 , C ) # B * H * W , C = P , C labels = labels . view ( - 1 ) if ignore is None : return probas , labels valid = ( labels != ignore ) vprobas = probas [ valid . nonzero ( ) . squeeze ( ) ] vlabels = labels [ valid ] return vprobas , vlabels
def new ( self ) : # type : ( ) - > None '''A method to create a new UDF Anchor Volume Structure . Parameters : None . Returns : Nothing .'''
if self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'UDF Anchor Volume Structure already initialized' ) self . desc_tag = UDFTag ( ) self . desc_tag . new ( 2 ) # FIXME : we should let the user set serial _ number self . main_vd_length = 32768 self . main_vd_extent = 0 # This will get set later . self . reserve_vd_length = 32768 self . reserve_vd_extent = 0 # This will get set later . self . _initialized = True
def fix_input_files_for_numbered_seq ( sourceDir , suffix , timestamp , containers ) : """Fixes files used as input when pre - processing MPL - containers in their numbered form ."""
# Fix input files for each MPL - container type . for container in containers : files = glob . glob ( os . path . join ( sourceDir , container , container + '*' + suffix ) ) for currentFile in sorted ( files ) : fix_header_comment ( currentFile , timestamp )
def to_timedelta ( arg , unit = 'ns' , box = True , errors = 'raise' ) : """Convert argument to timedelta . Timedeltas are absolute differences in times , expressed in difference units ( e . g . days , hours , minutes , seconds ) . This method converts an argument from a recognized timedelta format / value into a Timedelta type . Parameters arg : str , timedelta , list - like or Series The data to be converted to timedelta . unit : str , default ' ns ' Denotes the unit of the arg . Possible values : ( ' Y ' , ' M ' , ' W ' , ' D ' , ' days ' , ' day ' , ' hours ' , hour ' , ' hr ' , ' h ' , ' m ' , ' minute ' , ' min ' , ' minutes ' , ' T ' , ' S ' , ' seconds ' , ' sec ' , ' second ' , ' ms ' , ' milliseconds ' , ' millisecond ' , ' milli ' , ' millis ' , ' L ' , ' us ' , ' microseconds ' , ' microsecond ' , ' micro ' , ' micros ' , ' U ' , ' ns ' , ' nanoseconds ' , ' nano ' , ' nanos ' , ' nanosecond ' , ' N ' ) . box : bool , default True - If True returns a Timedelta / TimedeltaIndex of the results . - If False returns a numpy . timedelta64 or numpy . darray of values of dtype timedelta64 [ ns ] . . . deprecated : : 0.25.0 Use : meth : ` . to _ numpy ` or : meth : ` Timedelta . to _ timedelta64 ` instead to get an ndarray of values or numpy . timedelta64, respectively . errors : { ' ignore ' , ' raise ' , ' coerce ' } , default ' raise ' - If ' raise ' , then invalid parsing will raise an exception . - If ' coerce ' , then invalid parsing will be set as NaT . - If ' ignore ' , then invalid parsing will return the input . Returns timedelta64 or numpy . array of timedelta64 Output type returned if parsing succeeded . See Also DataFrame . astype : Cast argument to a specified dtype . to _ datetime : Convert argument to datetime . Examples Parsing a single string to a Timedelta : > > > pd . to _ timedelta ( ' 1 days 06:05:01.00003 ' ) Timedelta ( ' 1 days 06:05:01.000030 ' ) > > > pd . to _ timedelta ( ' 15.5us ' ) Timedelta ( ' 0 days 00:00:00.000015 ' ) Parsing a list or array of strings : > > > pd . to _ timedelta ( [ ' 1 days 06:05:01.00003 ' , ' 15.5us ' , ' nan ' ] ) TimedeltaIndex ( [ ' 1 days 06:05:01.000030 ' , ' 0 days 00:00:00.000015 ' , NaT ] , dtype = ' timedelta64 [ ns ] ' , freq = None ) Converting numbers by specifying the ` unit ` keyword argument : > > > pd . to _ timedelta ( np . arange ( 5 ) , unit = ' s ' ) TimedeltaIndex ( [ ' 00:00:00 ' , ' 00:00:01 ' , ' 00:00:02 ' , '00:00:03 ' , ' 00:00:04 ' ] , dtype = ' timedelta64 [ ns ] ' , freq = None ) > > > pd . to _ timedelta ( np . arange ( 5 ) , unit = ' d ' ) TimedeltaIndex ( [ ' 0 days ' , ' 1 days ' , ' 2 days ' , ' 3 days ' , ' 4 days ' ] , dtype = ' timedelta64 [ ns ] ' , freq = None ) Returning an ndarray by using the ' box ' keyword argument : > > > pd . to _ timedelta ( np . arange ( 5 ) , box = False ) array ( [ 0 , 1 , 2 , 3 , 4 ] , dtype = ' timedelta64 [ ns ] ' )"""
unit = parse_timedelta_unit ( unit ) if errors not in ( 'ignore' , 'raise' , 'coerce' ) : raise ValueError ( "errors must be one of 'ignore', " "'raise', or 'coerce'}" ) if unit in { 'Y' , 'y' , 'M' } : warnings . warn ( "M and Y units are deprecated and " "will be removed in a future version." , FutureWarning , stacklevel = 2 ) if arg is None : return arg elif isinstance ( arg , ABCSeries ) : values = _convert_listlike ( arg . _values , unit = unit , box = False , errors = errors ) return arg . _constructor ( values , index = arg . index , name = arg . name ) elif isinstance ( arg , ABCIndexClass ) : return _convert_listlike ( arg , unit = unit , box = box , errors = errors , name = arg . name ) elif isinstance ( arg , np . ndarray ) and arg . ndim == 0 : # extract array scalar and process below arg = arg . item ( ) elif is_list_like ( arg ) and getattr ( arg , 'ndim' , 1 ) == 1 : return _convert_listlike ( arg , unit = unit , box = box , errors = errors ) elif getattr ( arg , 'ndim' , 1 ) > 1 : raise TypeError ( 'arg must be a string, timedelta, list, tuple, ' '1-d array, or Series' ) # . . . so it must be a scalar value . Return scalar . return _coerce_scalar_to_timedelta_type ( arg , unit = unit , box = box , errors = errors )
def max ( self ) : """Maximum , ignorning nans ."""
if "max" not in self . attrs . keys ( ) : def f ( dataset , s ) : return np . nanmax ( dataset [ s ] ) self . attrs [ "max" ] = np . nanmax ( list ( self . chunkwise ( f ) . values ( ) ) ) return self . attrs [ "max" ]
async def _get_popular_people_page ( self , page = 1 ) : """Get a specific page of popular person data . Arguments : page ( : py : class : ` int ` , optional ) : The page to get . Returns : : py : class : ` dict ` : The page data ."""
return await self . get_data ( self . url_builder ( 'person/popular' , url_params = OrderedDict ( page = page ) , ) )
def get_node_id ( nuc_or_sat , namespace = None ) : """return the node ID of the given nucleus or satellite"""
node_type = get_node_type ( nuc_or_sat ) if node_type == 'leaf' : leaf_id = nuc_or_sat [ 0 ] . leaves ( ) [ 0 ] if namespace is not None : return '{0}:{1}' . format ( namespace , leaf_id ) else : return string ( leaf_id ) # else : node _ type = = ' span ' span_start = nuc_or_sat [ 0 ] . leaves ( ) [ 0 ] span_end = nuc_or_sat [ 0 ] . leaves ( ) [ 1 ] if namespace is not None : return '{0}:span:{1}-{2}' . format ( namespace , span_start , span_end ) else : return 'span:{0}-{1}' . format ( span_start , span_end )
def arcball_map_to_sphere ( point , center , radius ) : """Return unit sphere coordinates from window coordinates ."""
v0 = ( point [ 0 ] - center [ 0 ] ) / radius v1 = ( center [ 1 ] - point [ 1 ] ) / radius n = v0 * v0 + v1 * v1 if n > 1.0 : # position outside of sphere n = math . sqrt ( n ) return numpy . array ( [ v0 / n , v1 / n , 0.0 ] ) else : return numpy . array ( [ v0 , v1 , math . sqrt ( 1.0 - n ) ] )
def connect ( self , datas = None ) : """Connects ` ` Pipers ` ` in the order input - > output . See ` ` Piper . connect ` ` . According to the pipes ( topology ) . If " datas " is given will connect the input ` ` Pipers ` ` to the input data see : ` ` Dagger . connect _ inputs ` ` . Argumensts : - datas ( sequence ) [ default : ` ` None ` ` ] valid sequence of input data . see : ` ` Dagger . connect _ inputs ` ` ."""
# if data connect inputs if datas : self . connect_inputs ( datas ) # connect the remaining pipers postorder = self . postorder ( ) self . log . debug ( '%s trying to connect in the order %s' % ( repr ( self ) , repr ( postorder ) ) ) for piper in postorder : if not piper . connected and self [ piper ] . nodes ( ) : # 1 . sort inputs by index in postorder inputs = [ p for p in postorder if p in self [ piper ] . nodes ( ) ] # 2 . sort postorder so that all parents come before children # mind that the top of a pipeline is a child ! inputs . sort ( cmp = self . children_after_parents ) # 2 . branch age sorted inputs piper . connect ( inputs ) self . log . debug ( '%s succesfuly connected' % repr ( self ) )
def get_portal_cik ( self , portal_name ) : """Retrieves portal object according to ' portal _ name ' and returns its cik ."""
portal = self . get_portal_by_name ( portal_name ) cik = portal [ 2 ] [ 1 ] [ 'info' ] [ 'key' ] return cik
def get_client_address ( self ) : """Returns an auth token dictionary for making calls to eventhub REST API . : rtype : str"""
return "amqps://{}:{}@{}.{}:5671/{}" . format ( urllib . parse . quote_plus ( self . policy ) , urllib . parse . quote_plus ( self . sas_key ) , self . sb_name , self . namespace_suffix , self . eh_name )
async def save ( self , db ) : """Save the object to Redis ."""
kwargs = { } for col in self . _auto_columns : if not self . has_real_data ( col . name ) : kwargs [ col . name ] = await col . auto_generate ( db , self ) self . __dict__ . update ( kwargs ) # we have to delete the old index key stale_object = await self . __class__ . load ( db , identifier = self . identifier ( ) ) d = { k : ( v . strftime ( DATETIME_FORMAT ) if isinstance ( v , datetime ) else v ) for k , v in self . __dict__ . items ( ) } success = await db . hmset_dict ( self . redis_key ( ) , d ) await self . save_index ( db , stale_object = stale_object ) return success
def check_orthogonal ( angle ) : """Check the given Dinf angle based on D8 flow direction encoding code by ArcGIS"""
flow_dir_taudem = - 1 flow_dir = - 1 if MathClass . floatequal ( angle , FlowModelConst . e ) : flow_dir_taudem = FlowModelConst . e flow_dir = 1 elif MathClass . floatequal ( angle , FlowModelConst . ne ) : flow_dir_taudem = FlowModelConst . ne flow_dir = 128 elif MathClass . floatequal ( angle , FlowModelConst . n ) : flow_dir_taudem = FlowModelConst . n flow_dir = 64 elif MathClass . floatequal ( angle , FlowModelConst . nw ) : flow_dir_taudem = FlowModelConst . nw flow_dir = 32 elif MathClass . floatequal ( angle , FlowModelConst . w ) : flow_dir_taudem = FlowModelConst . w flow_dir = 16 elif MathClass . floatequal ( angle , FlowModelConst . sw ) : flow_dir_taudem = FlowModelConst . sw flow_dir = 8 elif MathClass . floatequal ( angle , FlowModelConst . s ) : flow_dir_taudem = FlowModelConst . s flow_dir = 4 elif MathClass . floatequal ( angle , FlowModelConst . se ) : flow_dir_taudem = FlowModelConst . se flow_dir = 2 return flow_dir_taudem , flow_dir
def _to_binpoly ( x ) : '''Convert a Galois Field ' s number into a nice polynomial'''
if x <= 0 : return "0" b = 1 # init to 2 ^ 0 = 1 c = [ ] # stores the degrees of each term of the polynomials i = 0 # counter for b = 2 ^ i while x > 0 : b = ( 1 << i ) # generate a number power of 2 : 2 ^ 0 , 2 ^ 1 , 2 ^ 2 , . . . , 2 ^ i . Equivalent to b = 2 ^ i if x & b : # then check if x is divisible by the power of 2 . Equivalent to x % 2 ^ i = = 0 # If yes , then . . . c . append ( i ) # append this power ( i , the exponent , gives us the coefficient ) x ^= b # and compute the remainder of x / b i = i + 1 # increment to compute the next power of 2 return " + " . join ( [ "x^%i" % y for y in c [ : : - 1 ] ] )
def _advance_params ( self ) : """Explicitly generate new values for these parameters only when appropriate ."""
for p in [ 'x' , 'y' , 'direction' ] : self . force_new_dynamic_value ( p ) self . last_time = self . time_fn ( )
def as_protein ( structure , filter_residues = True ) : """Exposes methods in the Bio . Struct . Protein module . Parameters : - filter _ residues boolean ; removes non - aa residues through Bio . PDB . Polypeptide is _ aa function [ Default : True ] Returns a new structure object ."""
from ssbio . biopython . Bio . Struct . Protein import Protein return Protein . from_structure ( structure , filter_residues )
def visit_copy_command ( element , compiler , ** kw ) : """Returns the actual sql query for the CopyCommand class ."""
qs = """COPY {table}{columns} FROM :data_location WITH CREDENTIALS AS :credentials {format} {parameters}""" parameters = [ ] bindparams = [ sa . bindparam ( 'data_location' , value = element . data_location , type_ = sa . String , ) , sa . bindparam ( 'credentials' , value = element . credentials , type_ = sa . String , ) , ] if element . format == Format . csv : format_ = 'FORMAT AS CSV' if element . quote is not None : format_ += ' QUOTE AS :quote_character' bindparams . append ( sa . bindparam ( 'quote_character' , value = element . quote , type_ = sa . String , ) ) elif element . format == Format . json : format_ = 'FORMAT AS JSON AS :json_option' bindparams . append ( sa . bindparam ( 'json_option' , value = element . path_file , type_ = sa . String , ) ) elif element . format == Format . avro : format_ = 'FORMAT AS AVRO AS :avro_option' bindparams . append ( sa . bindparam ( 'avro_option' , value = element . path_file , type_ = sa . String , ) ) elif element . format == Format . orc : format_ = 'FORMAT AS ORC' elif element . format == Format . parquet : format_ = 'FORMAT AS PARQUET' elif element . format == Format . fixed_width and element . fixed_width is None : raise sa_exc . CompileError ( "'fixed_width' argument required for format 'FIXEDWIDTH'." ) else : format_ = '' if element . delimiter is not None : parameters . append ( 'DELIMITER AS :delimiter_char' ) bindparams . append ( sa . bindparam ( 'delimiter_char' , value = element . delimiter , type_ = sa . String , ) ) if element . fixed_width is not None : parameters . append ( 'FIXEDWIDTH AS :fixedwidth_spec' ) bindparams . append ( sa . bindparam ( 'fixedwidth_spec' , value = _process_fixed_width ( element . fixed_width ) , type_ = sa . String , ) ) if element . compression is not None : parameters . append ( Compression ( element . compression ) . value ) if element . manifest : parameters . append ( 'MANIFEST' ) if element . accept_any_date : parameters . append ( 'ACCEPTANYDATE' ) if element . accept_inv_chars is not None : parameters . append ( 'ACCEPTINVCHARS AS :replacement_char' ) bindparams . append ( sa . bindparam ( 'replacement_char' , value = element . accept_inv_chars , type_ = sa . String ) ) if element . blanks_as_null : parameters . append ( 'BLANKSASNULL' ) if element . date_format is not None : parameters . append ( 'DATEFORMAT AS :dateformat_string' ) bindparams . append ( sa . bindparam ( 'dateformat_string' , value = element . date_format , type_ = sa . String , ) ) if element . empty_as_null : parameters . append ( 'EMPTYASNULL' ) if element . encoding is not None : parameters . append ( 'ENCODING AS ' + Encoding ( element . encoding ) . value ) if element . escape : parameters . append ( 'ESCAPE' ) if element . explicit_ids : parameters . append ( 'EXPLICIT_IDS' ) if element . fill_record : parameters . append ( 'FILLRECORD' ) if element . ignore_blank_lines : parameters . append ( 'IGNOREBLANKLINES' ) if element . ignore_header is not None : parameters . append ( 'IGNOREHEADER AS :number_rows' ) bindparams . append ( sa . bindparam ( 'number_rows' , value = element . ignore_header , type_ = sa . Integer , ) ) if element . dangerous_null_delimiter is not None : parameters . append ( "NULL AS '%s'" % element . dangerous_null_delimiter ) if element . remove_quotes : parameters . append ( 'REMOVEQUOTES' ) if element . roundec : parameters . append ( 'ROUNDEC' ) if element . time_format is not None : parameters . append ( 'TIMEFORMAT AS :timeformat_string' ) bindparams . append ( sa . bindparam ( 'timeformat_string' , value = element . time_format , type_ = sa . String , ) ) if element . trim_blanks : parameters . append ( 'TRIMBLANKS' ) if element . truncate_columns : parameters . append ( 'TRUNCATECOLUMNS' ) if element . comp_rows : parameters . append ( 'COMPROWS :numrows' ) bindparams . append ( sa . bindparam ( 'numrows' , value = element . comp_rows , type_ = sa . Integer , ) ) if element . comp_update : parameters . append ( 'COMPUPDATE ON' ) elif element . comp_update is not None : parameters . append ( 'COMPUPDATE OFF' ) if element . max_error is not None : parameters . append ( 'MAXERROR AS :error_count' ) bindparams . append ( sa . bindparam ( 'error_count' , value = element . max_error , type_ = sa . Integer , ) ) if element . no_load : parameters . append ( 'NOLOAD' ) if element . stat_update : parameters . append ( 'STATUPDATE ON' ) elif element . stat_update is not None : parameters . append ( 'STATUPDATE OFF' ) if element . region is not None : parameters . append ( 'REGION :region' ) bindparams . append ( sa . bindparam ( 'region' , value = element . region , type_ = sa . String ) ) columns = ' (%s)' % ', ' . join ( compiler . preparer . format_column ( column ) for column in element . columns ) if element . columns else '' qs = qs . format ( table = compiler . preparer . format_table ( element . table ) , columns = columns , format = format_ , parameters = '\n' . join ( parameters ) ) return compiler . process ( sa . text ( qs ) . bindparams ( * bindparams ) , ** kw )
def virtual_machine_convert_to_managed_disks ( name , resource_group , ** kwargs ) : # pylint : disable = invalid - name '''. . versionadded : : 2019.2.0 Converts virtual machine disks from blob - based to managed disks . Virtual machine must be stop - deallocated before invoking this operation . : param name : The name of the virtual machine to convert . : param resource _ group : The resource group name assigned to the virtual machine . CLI Example : . . code - block : : bash salt - call azurearm _ compute . virtual _ machine _ convert _ to _ managed _ disks testvm testgroup'''
compconn = __utils__ [ 'azurearm.get_client' ] ( 'compute' , ** kwargs ) try : # pylint : disable = invalid - name vm = compconn . virtual_machines . convert_to_managed_disks ( resource_group_name = resource_group , vm_name = name ) vm . wait ( ) vm_result = vm . result ( ) result = vm_result . as_dict ( ) except CloudError as exc : __utils__ [ 'azurearm.log_cloud_error' ] ( 'compute' , str ( exc ) , ** kwargs ) result = { 'error' : str ( exc ) } return result
def memoize ( fn ) : """Simple reset - able memoization decorator for functions and methods , assumes that all arguments to the function can be hashed and compared ."""
cache = { } @ wraps ( fn ) def wrapped_fn ( * args , ** kwargs ) : cache_key = _memoize_cache_key ( args , kwargs ) try : return cache [ cache_key ] except KeyError : value = fn ( * args , ** kwargs ) cache [ cache_key ] = value return value def clear_cache ( ) : cache . clear ( ) # Needed to ensure that EnsemblRelease . clear _ cache # is able to clear memoized values from each of its methods wrapped_fn . clear_cache = clear_cache # expose the cache so we can check if an item has already been computed wrapped_fn . cache = cache # if we want to check whether an item is in the cache , first need # to construct the same cache key as used by wrapped _ fn wrapped_fn . make_cache_key = _memoize_cache_key return wrapped_fn
def _merge_intervals ( self , min_depth ) : """Merge overlapping intervals . This method is called only once in the constructor ."""
def add_interval ( ret , start , stop ) : if min_depth is not None : shift = 2 * ( 29 - min_depth ) mask = ( int ( 1 ) << shift ) - 1 if stop - start < mask : ret . append ( ( start , stop ) ) else : ofs = start & mask st = start if ofs > 0 : st = ( start - ofs ) + ( mask + 1 ) ret . append ( ( start , st ) ) while st + mask + 1 < stop : ret . append ( ( st , st + mask + 1 ) ) st = st + mask + 1 ret . append ( ( st , stop ) ) else : ret . append ( ( start , stop ) ) ret = [ ] start = stop = None # Use numpy sort method self . _intervals . sort ( axis = 0 ) for itv in self . _intervals : if start is None : start , stop = itv continue # gap between intervals if itv [ 0 ] > stop : add_interval ( ret , start , stop ) start , stop = itv else : # merge intervals if itv [ 1 ] > stop : stop = itv [ 1 ] if start is not None and stop is not None : add_interval ( ret , start , stop ) self . _intervals = np . asarray ( ret )
def user_stats ( request ) : """Get user statistics for selected groups of items time : time in format ' % Y - % m - % d _ % H : % M : % S ' used for practicing user : identifier of the user ( only for stuff users ) username : username of user ( only for users with public profile ) filters : - - use this or body json as in BODY mastered : use model to compute number of mastered items - can be slowed language : language of the items BODY json in following format : " # identifier " : [ ] - - custom identifier ( str ) and filter"""
timer ( 'user_stats' ) response = { } data = None if request . method == "POST" : data = json . loads ( request . body . decode ( "utf-8" ) ) [ "filters" ] if "filters" in request . GET : data = load_query_json ( request . GET , "filters" ) if data is None : return render_json ( request , { } , template = 'models_user_stats.html' , help_text = user_stats . __doc__ ) environment = get_environment ( ) if is_time_overridden ( request ) : environment . shift_time ( get_time ( request ) ) user_id = get_user_id ( request ) language = get_language ( request ) filter_names , filter_filters = list ( zip ( * sorted ( data . items ( ) ) ) ) reachable_leaves = Item . objects . filter_all_reachable_leaves_many ( filter_filters , language ) all_leaves = sorted ( list ( set ( flatten ( reachable_leaves ) ) ) ) answers = environment . number_of_answers_more_items ( all_leaves , user_id ) correct_answers = environment . number_of_correct_answers_more_items ( all_leaves , user_id ) if request . GET . get ( "mastered" ) : timer ( 'user_stats_mastered' ) mastery_threshold = get_mastery_trashold ( ) predictions = Item . objects . predict_for_overview ( environment , user_id , all_leaves ) mastered = dict ( list ( zip ( all_leaves , [ p >= mastery_threshold for p in predictions ] ) ) ) LOGGER . debug ( "user_stats - getting predictions for items took %s seconds" , ( timer ( 'user_stats_mastered' ) ) ) for identifier , items in zip ( filter_names , reachable_leaves ) : if len ( items ) == 0 : response [ identifier ] = { "filter" : data [ identifier ] , "number_of_items" : 0 , } else : response [ identifier ] = { "filter" : data [ identifier ] , "number_of_items" : len ( items ) , "number_of_practiced_items" : sum ( answers [ i ] > 0 for i in items ) , "number_of_answers" : sum ( answers [ i ] for i in items ) , "number_of_correct_answers" : sum ( correct_answers [ i ] for i in items ) , } if request . GET . get ( "mastered" ) : response [ identifier ] [ "number_of_mastered_items" ] = sum ( mastered [ i ] for i in items ) return render_json ( request , response , template = 'models_user_stats.html' , help_text = user_stats . __doc__ )
def import_key ( ctx , slot , management_key , pin , private_key , pin_policy , touch_policy , password ) : """Import a private key . Write a private key to one of the slots on the YubiKey . SLOT PIV slot to import the private key to . PRIVATE - KEY File containing the private key . Use ' - ' to use stdin ."""
dev = ctx . obj [ 'dev' ] controller = ctx . obj [ 'controller' ] _ensure_authenticated ( ctx , controller , pin , management_key ) data = private_key . read ( ) while True : if password is not None : password = password . encode ( ) try : private_key = parse_private_key ( data , password ) except ( ValueError , TypeError ) : if password is None : password = click . prompt ( 'Enter password to decrypt key' , default = '' , hide_input = True , show_default = False , err = True ) continue else : password = None click . echo ( 'Wrong password.' ) continue break if pin_policy : pin_policy = PIN_POLICY . from_string ( pin_policy ) if touch_policy : touch_policy = TOUCH_POLICY . from_string ( touch_policy ) _check_pin_policy ( ctx , dev , controller , pin_policy ) _check_touch_policy ( ctx , controller , touch_policy ) _check_key_size ( ctx , controller , private_key ) controller . import_key ( slot , private_key , pin_policy , touch_policy )
def parse_comment ( doc_comment , next_line ) : r"""Split the raw comment text into a dictionary of tags . The main comment body is included as ' doc ' . > > > comment = get _ doc _ comments ( read _ file ( ' examples / module . js ' ) ) [ 4 ] [ 0] > > > parse _ comment ( strip _ stars ( comment ) , ' ' ) [ ' doc ' ] ' This is the documentation for the fourth function . \ n \ n Since the function being documented is itself generated from another \ n function , its name needs to be specified explicitly . using the @ function tag ' > > > parse _ comment ( strip _ stars ( comment ) , ' ' ) [ ' function ' ] ' not _ auto _ discovered ' If there are multiple tags with the same name , they ' re included as a list : > > > parse _ comment ( strip _ stars ( comment ) , ' ' ) [ ' param ' ] [ ' { String } arg1 The first argument . ' , ' { Int } arg2 The second argument . ' ]"""
sections = re . split ( '\n\s*@' , doc_comment ) tags = { 'doc' : sections [ 0 ] . strip ( ) , 'guessed_function' : guess_function_name ( next_line ) , 'guessed_params' : guess_parameters ( next_line ) } for section in sections [ 1 : ] : tag , body = split_tag ( section ) if tag in tags : existing = tags [ tag ] try : existing . append ( body ) except AttributeError : tags [ tag ] = [ existing , body ] else : tags [ tag ] = body return tags
def find_elements_by_class_name ( self , name ) : """Finds elements by class name . : Args : - name : The class name of the elements to find . : Returns : - list of WebElement - a list with elements if any was found . An empty list if not : Usage : elements = driver . find _ elements _ by _ class _ name ( ' foo ' )"""
return self . find_elements ( by = By . CLASS_NAME , value = name )
def get_continent ( self , callsign , timestamp = timestamp_now ) : """Returns the continent Identifier of a callsign Args : callsign ( str ) : Amateur Radio callsign timestamp ( datetime , optional ) : datetime in UTC ( tzinfo = pytz . UTC ) Returns : str : continent identified Raises : KeyError : No Continent found for callsign Note : The following continent identifiers are used : - EU : Europe - NA : North America - SA : South America - AS : Asia - AF : Africa - OC : Oceania - AN : Antarctica"""
return self . get_all ( callsign , timestamp ) [ const . CONTINENT ]
def get_rates ( self , mmin , mmax = np . inf ) : """Returns the cumulative rates greater than Mmin : param float mmin : Minimum magnitude"""
nsrcs = self . number_sources ( ) for iloc , source in enumerate ( self . source_model ) : print ( "Source Number %s of %s, Name = %s, Typology = %s" % ( iloc + 1 , nsrcs , source . name , source . __class__ . __name__ ) ) if isinstance ( source , CharacteristicFaultSource ) : self . _get_fault_rates ( source , mmin , mmax ) elif isinstance ( source , ComplexFaultSource ) : self . _get_fault_rates ( source , mmin , mmax ) elif isinstance ( source , SimpleFaultSource ) : self . _get_fault_rates ( source , mmin , mmax ) elif isinstance ( source , AreaSource ) : self . _get_area_rates ( source , mmin , mmax ) elif isinstance ( source , PointSource ) : self . _get_point_rates ( source , mmin , mmax ) else : print ( "Source type %s not recognised - skipping!" % source ) continue
def write_to_file ( data , path ) : """Export extracted fields to xml Appends . xml to path if missing and generates xml file in specified directory , if not then in root Parameters data : dict Dictionary of extracted fields path : str directory to save generated xml file Notes Do give file name to the function parameter path . Only ` date ` , ` desc ` , ` amount ` and ` currency ` are exported Examples > > > from invoice2data . output import to _ xml > > > to _ xml . write _ to _ file ( data , " / exported _ xml / invoice . xml " ) > > > to _ xml . write _ to _ file ( data , " invoice . xml " )"""
if path . endswith ( '.xml' ) : filename = path else : filename = path + '.xml' tag_data = ET . Element ( 'data' ) xml_file = open ( filename , "w" ) i = 0 for line in data : i += 1 tag_item = ET . SubElement ( tag_data , 'item' ) tag_date = ET . SubElement ( tag_item , 'date' ) tag_desc = ET . SubElement ( tag_item , 'desc' ) tag_currency = ET . SubElement ( tag_item , 'currency' ) tag_amount = ET . SubElement ( tag_item , 'amount' ) tag_item . set ( 'id' , str ( i ) ) tag_date . text = line [ 'date' ] . strftime ( '%d/%m/%Y' ) tag_desc . text = line [ 'desc' ] tag_currency . text = line [ 'currency' ] tag_amount . text = str ( line [ 'amount' ] ) xml_file . write ( prettify ( tag_data ) ) xml_file . close ( )
def merge_list ( self , new_list ) : """Add new CM servers to the list : param new _ list : a list of ` ` ( ip , port ) ` ` tuples : type new _ list : : class : ` list `"""
total = len ( self . list ) for ip , port in new_list : if ( ip , port ) not in self . list : self . mark_good ( ( ip , port ) ) if len ( self . list ) > total : self . _LOG . debug ( "Added %d new CM addresses." % ( len ( self . list ) - total ) )
def get_md5 ( self , filename ) : """Returns the md5 checksum of the provided file name ."""
with open ( filename , 'rb' ) as f : m = hashlib . md5 ( f . read ( ) ) return m . hexdigest ( )
def main ( ) : """main method"""
# initialize parser usage = "usage: %prog [-u USER] [-p PASSWORD] [-t TITLE] [-s selection] url" parser = OptionParser ( usage , version = "%prog " + instapaperlib . __version__ ) parser . add_option ( "-u" , "--user" , action = "store" , dest = "user" , metavar = "USER" , help = "instapaper username" ) parser . add_option ( "-p" , "--password" , action = "store" , dest = "password" , metavar = "USER" , help = "instapaper password" ) parser . add_option ( "-t" , "--title" , action = "store" , dest = "title" , metavar = "TITLE" , help = "title of the link to add" ) parser . add_option ( "-s" , "--selection" , action = "store" , dest = "selection" , metavar = "SELECTION" , help = "short text for description" ) ( options , args ) = parser . parse_args ( ) if not len ( args ) > 0 : parser . error ( "What do you want to read later?" ) if not options . user : # auth regex login = re . compile ( "(.+?):(.+)" ) try : config = open ( os . path . expanduser ( "~" ) + "/.instapaperrc" ) for line in config : matches = login . match ( line ) if matches : user = matches . group ( 1 ) . strip ( ) password = matches . group ( 2 ) . strip ( ) except IOError : parser . error ( "No login information present." ) sys . exit ( - 1 ) else : user = options . user # make sure all parameters are present if not options . password : password = getpass ( ) else : password = options . password ( status , text ) = instapaperlib . add_item ( user , password , args [ 0 ] , options . title , options . selection ) print text
def hook ( self , function , dependencies = None ) : """Tries to load a hook Args : function ( func ) : Function that will be called when the event is called Kwargs : dependencies ( str ) : String or Iterable with modules whose hooks should be called before this one Raises : : class : TypeError Note that the dependencies are module - wide , that means that if ` parent . foo ` and ` parent . bar ` are both subscribed to ` example ` event and ` child ` enumerates ` parent ` as dependcy , * * both * * ` foo ` and ` bar ` must be called in order for the dependcy to get resolved ."""
if not isinstance ( dependencies , ( Iterable , type ( None ) , str ) ) : raise TypeError ( "Invalid list of dependencies provided!" ) # Tag the function with its dependencies if not hasattr ( function , "__deps__" ) : function . __deps__ = dependencies # If a module is loaded before all its dependencies are loaded , put # it in _ later list and don ' t load yet if self . isloaded ( function . __deps__ ) : self . append ( function ) else : self . _later . append ( function ) # After each module load , retry to resolve dependencies for ext in self . _later : if self . isloaded ( ext . __deps__ ) : self . _later . remove ( ext ) self . hook ( ext )
def get_neuroglancer_link ( self , resource , resolution , x_range , y_range , z_range , ** kwargs ) : """Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step . Args : resource ( intern . resource . Resource ) : Resource compatible with cutout operations . resolution ( int ) : 0 indicates native resolution . x _ range ( list [ int ] ) : x range such as [ 10 , 20 ] which means x > = 10 and x < 20. y _ range ( list [ int ] ) : y range such as [ 10 , 20 ] which means y > = 10 and y < 20. z _ range ( list [ int ] ) : z range such as [ 10 , 20 ] which means z > = 10 and z < 20. Returns : ( string ) : Return neuroglancer link . Raises : RuntimeError when given invalid resource . Other exceptions may be raised depending on the volume service ' s implementation ."""
return self . _volume . get_neuroglancer_link ( resource , resolution , x_range , y_range , z_range , ** kwargs )
def get_distribution_names ( self ) : """Return all the distribution names known to this locator ."""
result = set ( ) page = self . get_page ( self . base_url ) if not page : raise DistlibException ( 'Unable to get %s' % self . base_url ) for match in self . _distname_re . finditer ( page . data ) : result . add ( match . group ( 1 ) ) return result
def _parent_changed ( self , parent ) : """From Parentable : Called when the parent changed update the constraints and priors view , so that constraining is automized for the parent ."""
from . index_operations import ParameterIndexOperationsView # if getattr ( self , " _ in _ init _ " ) : # import ipdb ; ipdb . set _ trace ( ) # self . constraints . update ( param . constraints , start ) # self . priors . update ( param . priors , start ) offset = parent . _offset_for ( self ) for name , iop in list ( self . _index_operations . items ( ) ) : self . remove_index_operation ( name ) self . add_index_operation ( name , ParameterIndexOperationsView ( parent . _index_operations [ name ] , offset , self . size ) ) self . _fixes_ = None for p in self . parameters : p . _parent_changed ( parent )
def disconnect ( self , close = True ) : """Logs off the session : param close : Will close all tree connects in a session"""
if not self . _connected : # already disconnected so let ' s return return if close : for open in list ( self . open_table . values ( ) ) : open . close ( False ) for tree in list ( self . tree_connect_table . values ( ) ) : tree . disconnect ( ) log . info ( "Session: %s - Logging off of SMB Session" % self . username ) logoff = SMB2Logoff ( ) log . info ( "Session: %s - Sending Logoff message" % self . username ) log . debug ( str ( logoff ) ) request = self . connection . send ( logoff , sid = self . session_id ) log . info ( "Session: %s - Receiving Logoff response" % self . username ) res = self . connection . receive ( request ) res_logoff = SMB2Logoff ( ) res_logoff . unpack ( res [ 'data' ] . get_value ( ) ) log . debug ( str ( res_logoff ) ) self . _connected = False del self . connection . session_table [ self . session_id ]
def lookupEncoding ( encoding ) : """Return the python codec name corresponding to an encoding or None if the string doesn ' t correspond to a valid encoding ."""
if isinstance ( encoding , binary_type ) : try : encoding = encoding . decode ( "ascii" ) except UnicodeDecodeError : return None if encoding is not None : try : return webencodings . lookup ( encoding ) except AttributeError : return None else : return None
def _read_openjp2_common ( self ) : """Read a JPEG 2000 image using libopenjp2. Returns ndarray or lst Either the image as an ndarray or a list of ndarrays , each item corresponding to one band ."""
with ExitStack ( ) as stack : filename = self . filename stream = opj2 . stream_create_default_file_stream ( filename , True ) stack . callback ( opj2 . stream_destroy , stream ) codec = opj2 . create_decompress ( self . _codec_format ) stack . callback ( opj2 . destroy_codec , codec ) opj2 . set_error_handler ( codec , _ERROR_CALLBACK ) opj2 . set_warning_handler ( codec , _WARNING_CALLBACK ) if self . _verbose : opj2 . set_info_handler ( codec , _INFO_CALLBACK ) else : opj2 . set_info_handler ( codec , None ) opj2 . setup_decoder ( codec , self . _dparams ) raw_image = opj2 . read_header ( stream , codec ) stack . callback ( opj2 . image_destroy , raw_image ) if self . _dparams . nb_tile_to_decode : opj2 . get_decoded_tile ( codec , stream , raw_image , self . _dparams . tile_index ) else : opj2 . set_decode_area ( codec , raw_image , self . _dparams . DA_x0 , self . _dparams . DA_y0 , self . _dparams . DA_x1 , self . _dparams . DA_y1 ) opj2 . decode ( codec , stream , raw_image ) opj2 . end_decompress ( codec , stream ) image = self . _extract_image ( raw_image ) return image
def set_level ( self , level , realms ) : """Set the realm level in the realms hierarchy : return : None"""
self . level = level if not self . level : logger . info ( "- %s" , self . get_name ( ) ) else : logger . info ( " %s %s" , '+' * self . level , self . get_name ( ) ) self . all_sub_members = [ ] self . all_sub_members_names = [ ] for child in sorted ( self . realm_members ) : child = realms . find_by_name ( child ) if not child : continue self . all_sub_members . append ( child . uuid ) self . all_sub_members_names . append ( child . get_name ( ) ) grand_children = child . set_level ( self . level + 1 , realms ) for grand_child in grand_children : if grand_child in self . all_sub_members_names : continue grand_child = realms . find_by_name ( grand_child ) if grand_child : self . all_sub_members_names . append ( grand_child . get_name ( ) ) self . all_sub_members . append ( grand_child . uuid ) return self . all_sub_members_names
def best_detection ( detections , predictions , minimum_overlap = 0.2 , relative_prediction_threshold = 0.25 ) : """best _ detection ( detections , predictions , [ minimum _ overlap ] , [ relative _ prediction _ threshold ] ) - > bounding _ box , prediction Computes the best detection for the given detections and according predictions . This is achieved by computing a weighted sum of detections that overlap with the best detection ( the one with the highest prediction ) , where the weights are based on the predictions . Only detections with according prediction values > 0 are considered . * * Parameters : * * ` ` detections ` ` : [ : py : class : ` BoundingBox ` ] The detected bounding boxes . ` ` predictions ` ` : [ float ] The predictions for the ` ` detections ` ` . ` ` minimum _ overlap ` ` : float between 0 and 1 The minimum overlap ( in terms of Jaccard : py : meth : ` BoundingBox . similarity ` ) of bounding boxes with the best detection to be considered . ` ` relative _ prediction _ threshold ` ` : float between 0 and 1 Limits the bounding boxes to those that have a prediction value higher then ` ` relative _ prediction _ threshold * max ( predictions ) ` ` * * Returns : * * ` ` bounding _ box ` ` : : py : class : ` BoundingBox ` The bounding box which has been merged from the detections ` ` prediction ` ` : float The prediction value of the bounding box , which is a weighted sum of the predictions with minimum overlap"""
# remove all negative predictions since they harm the calculation of the weights detections = [ detections [ i ] for i in range ( len ( detections ) ) if predictions [ i ] > 0 ] predictions = [ predictions [ i ] for i in range ( len ( predictions ) ) if predictions [ i ] > 0 ] if not detections : raise ValueError ( "No detections with a prediction value > 0 have been found" ) # keep only the bounding boxes with the highest overlap detections , predictions = overlapping_detections ( detections , numpy . array ( predictions ) , minimum_overlap ) return average_detections ( detections , predictions , relative_prediction_threshold )
def add_child_to_subtree ( self , parent_word_id , tree ) : '''Searches for the tree with * parent _ word _ id * from the current subtree ( from this tree and from all of its subtrees ) . If the parent tree is found , attaches the given * tree * as its child . If the parent tree is not found , the current tree is not changed .'''
if ( self . word_id == parent_word_id ) : self . add_child_to_self ( tree ) elif ( self . children ) : for child in self . children : child . add_child_to_subtree ( parent_word_id , tree )
def new_signing_keys ( self ) : """Access the new _ signing _ keys : returns : twilio . rest . api . v2010 . account . new _ signing _ key . NewSigningKeyList : rtype : twilio . rest . api . v2010 . account . new _ signing _ key . NewSigningKeyList"""
if self . _new_signing_keys is None : self . _new_signing_keys = NewSigningKeyList ( self . _version , account_sid = self . _solution [ 'sid' ] , ) return self . _new_signing_keys
def _cimdatetime_constructor ( loader , node ) : """PyYAML constructor function for CIMDateTime objects . This is needed for yaml . safe _ load ( ) to support CIMDateTime ."""
cimdatetime_str = loader . construct_scalar ( node ) cimdatetime = CIMDateTime ( cimdatetime_str ) return cimdatetime
def subscribe ( self , transport , data ) : """adds a transport to a channel"""
self . add ( transport , address = data . get ( 'hx_subscribe' ) . encode ( ) ) self . send ( data [ 'hx_subscribe' ] , { 'message' : "%r is listening" % transport } )
def trace_plot ( precisions , path , n_edges = 20 , ground_truth = None , edges = [ ] ) : """Plot the change in precision ( or covariance ) coefficients as a function of changing lambda and l1 - norm . Always ignores diagonals . Parameters precisions : array of len ( path ) 2D ndarray , shape ( n _ features , n _ features ) This is either precision _ or covariance _ from an InverseCovariance estimator in path mode , or a list of results for individual runs of the GraphLasso . path : array of floats ( descending ) This is path of lambdas explored . n _ edges : int ( default = 20) Max number of edges to plot for each precision matrix along the path . Only plots the maximum magnitude values ( evaluating the last precision matrix ) . ground _ truth : 2D ndarray , shape ( n _ features , n _ features ) ( default = None ) If not None , plot the top n _ edges / 2 false positive and top n _ edges / 2 false negative indices when compared to ground _ truth . edges : list ( default = [ ] ) If not empty , use edges to determine which indicies of each precision matrix to track . Should be arranged to index precisions [ 0 ] . flat . If non - empty , n _ edges and ground _ truth will be ignored ."""
_check_path ( path ) assert len ( path ) == len ( precisions ) assert len ( precisions ) > 0 path = np . array ( path ) dim , _ = precisions [ 0 ] . shape # determine which indices to track if not edges : base_precision = np . copy ( precisions [ - 1 ] ) base_precision [ np . triu_indices ( base_precision . shape [ 0 ] ) ] = 0 if ground_truth is None : # top n _ edges strongest coefficients edges = np . argsort ( np . abs ( base_precision . flat ) ) [ : : - 1 ] [ : n_edges ] else : # top n _ edges / 2 false positives and negatives compared to truth assert ground_truth . shape == precisions [ 0 ] . shape masked_gt = np . copy ( ground_truth ) masked_gt [ np . triu_indices ( ground_truth . shape [ 0 ] ) ] = 0 intersection = np . intersect1d ( np . nonzero ( base_precision . flat ) [ 0 ] , np . nonzero ( masked_gt . flat ) [ 0 ] ) # false positives fp_precision = np . copy ( base_precision ) fp_precision . flat [ intersection ] = 0 fp_edges = np . argsort ( np . abs ( fp_precision . flat ) ) [ : : - 1 ] [ : n_edges / 2 ] # false negatives fn_precision = np . copy ( masked_gt ) fn_precision . flat [ intersection ] = 0 fn_edges = np . argsort ( np . abs ( fn_precision . flat ) ) [ : : - 1 ] [ : n_edges / 2 ] edges = list ( fp_edges ) + list ( fn_edges ) assert len ( edges ) < len ( precisions [ 0 ] . flat ) assert np . max ( edges ) < len ( precisions [ 0 ] . flat ) assert np . min ( edges ) >= 0 # reshape data a bit : # flatten each matrix into a column ( so that coeffs are examples ) # compute l1 - norm of each column l1_norms = [ ] coeffs = np . zeros ( ( dim ** 2 , len ( precisions ) ) ) for ridx , result in enumerate ( precisions ) : coeffs [ edges , ridx ] = result . flat [ edges ] l1_norms . append ( np . linalg . norm ( coeffs [ : , ridx ] ) ) # remove any zero rows coeffs = coeffs [ np . linalg . norm ( coeffs , axis = 1 ) > 1e-10 , : ] plt . figure ( ) # show coefficients as a function of lambda plt . subplot ( 1 , 2 , 1 ) for result in precisions : plt . plot ( l1_norms , coeffs . T , lw = 1 ) plt . xlim ( [ np . min ( l1_norms ) , np . max ( l1_norms ) ] ) plt . ylabel ( "Coefficients" ) plt . xlabel ( "l1 Norm" ) # show coefficients as a function of lambda log_path = np . log ( path ) plt . subplot ( 1 , 2 , 2 ) for result in precisions : plt . plot ( log_path , coeffs . T , lw = 1 ) plt . xlim ( [ np . min ( log_path ) , np . max ( log_path ) ] ) plt . ylabel ( "Coefficients" ) plt . xlabel ( "log-Lambda" ) plt . show ( ) r_input ( "Press any key to continue." )
def cloned_workspace ( clone_config , chdir = True ) : """Create a cloned workspace and yield it . This creates a workspace for a with - block and cleans it up on exit . By default , this will also change to the workspace ' s ` clone _ dir ` for the duration of the with - block . Args : clone _ config : The execution engine configuration to use for the workspace . chdir : Whether to change to the workspace ' s ` clone _ dir ` before entering the with - block . Yields : The ` CloneWorkspace ` instance created for the context ."""
workspace = ClonedWorkspace ( clone_config ) original_dir = os . getcwd ( ) if chdir : os . chdir ( workspace . clone_dir ) try : yield workspace finally : os . chdir ( original_dir ) workspace . cleanup ( )
def parse_template_config ( template_config_data ) : """> > > from tests import doctest _ utils > > > convert _ html _ to _ text = registration _ settings . VERIFICATION _ EMAIL _ HTML _ TO _ TEXT _ CONVERTER # noqa : E501 > > > parse _ template _ config ( { } ) # doctest : + IGNORE _ EXCEPTION _ DETAIL Traceback ( most recent call last ) : ImproperlyConfigured > > > parse _ template _ config ( { . . . ' subject ' : ' blah ' , . . . } ) # doctest : + IGNORE _ EXCEPTION _ DETAIL Traceback ( most recent call last ) : ImproperlyConfigured > > > parse _ template _ config ( { . . . ' subject ' : ' blah ' , . . . ' body ' : ' blah ' , . . . } ) # doctest : + IGNORE _ EXCEPTION _ DETAIL Traceback ( most recent call last ) : ImproperlyConfigured > > > doctest _ utils . equals ( . . . parse _ template _ config ( { . . . ' subject ' : ' rest _ registration / register / subject . txt ' , . . . ' html _ body ' : ' rest _ registration / register / body . html ' , . . . ' text _ body ' : ' rest _ registration / register / body . txt ' , . . . EmailTemplateConfig ( . . . ' rest _ registration / register / subject . txt ' , . . . ' rest _ registration / register / body . txt ' , . . . ' rest _ registration / register / body . html ' , . . . identity ) ) OK > > > doctest _ utils . equals ( . . . parse _ template _ config ( { . . . ' subject ' : ' rest _ registration / register / subject . txt ' , . . . ' html _ body ' : ' rest _ registration / register / body . html ' , . . . EmailTemplateConfig ( . . . ' rest _ registration / register / subject . txt ' , . . . ' rest _ registration / register / body . html ' , . . . ' rest _ registration / register / body . html ' , . . . convert _ html _ to _ text ) ) OK > > > doctest _ utils . equals ( . . . parse _ template _ config ( { . . . ' subject ' : ' rest _ registration / register / subject . txt ' , . . . ' text _ body ' : ' rest _ registration / register / body . txt ' , . . . EmailTemplateConfig ( . . . ' rest _ registration / register / subject . txt ' , . . . ' rest _ registration / register / body . txt ' , None , . . . identity ) ) OK > > > doctest _ utils . equals ( . . . parse _ template _ config ( { . . . ' subject ' : ' rest _ registration / register / subject . txt ' , . . . ' body ' : ' rest _ registration / register / body . txt ' , . . . EmailTemplateConfig ( . . . ' rest _ registration / register / subject . txt ' , . . . ' rest _ registration / register / body . txt ' , None , . . . identity ) ) OK > > > doctest _ utils . equals ( . . . parse _ template _ config ( { . . . ' subject ' : ' rest _ registration / register / subject . txt ' , . . . ' body ' : ' rest _ registration / register / body . html ' , . . . ' is _ html ' : True , . . . EmailTemplateConfig ( . . . ' rest _ registration / register / subject . txt ' , . . . ' rest _ registration / register / body . html ' , . . . ' rest _ registration / register / body . html ' , . . . convert _ html _ to _ text ) ) OK"""
try : subject_template_name = template_config_data [ 'subject' ] except KeyError : raise ImproperlyConfigured ( "No 'subject' key found" ) body_template_name = template_config_data . get ( 'body' ) text_body_template_name = template_config_data . get ( 'text_body' ) html_body_template_name = template_config_data . get ( 'html_body' ) is_html_body = template_config_data . get ( 'is_html' ) convert_html_to_text = registration_settings . VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa : E501 if html_body_template_name and text_body_template_name : config = EmailTemplateConfig ( subject_template_name = subject_template_name , text_body_template_name = text_body_template_name , html_body_template_name = html_body_template_name , text_body_processor = identity , ) elif html_body_template_name : config = EmailTemplateConfig ( subject_template_name = subject_template_name , text_body_template_name = html_body_template_name , html_body_template_name = html_body_template_name , text_body_processor = convert_html_to_text , ) elif text_body_template_name : config = EmailTemplateConfig ( subject_template_name = subject_template_name , text_body_template_name = text_body_template_name , html_body_template_name = None , text_body_processor = identity , ) elif body_template_name : if is_html_body : config = EmailTemplateConfig ( subject_template_name = subject_template_name , text_body_template_name = body_template_name , html_body_template_name = body_template_name , text_body_processor = convert_html_to_text , ) else : config = EmailTemplateConfig ( subject_template_name = subject_template_name , text_body_template_name = body_template_name , html_body_template_name = None , text_body_processor = identity , ) else : raise ImproperlyConfigured ( 'Could not parse template config data: {template_config_data}' . format ( # noqa : E501 template_config_data = template_config_data ) ) _validate_template_name_existence ( config . subject_template_name ) _validate_template_name_existence ( config . text_body_template_name ) if config . html_body_template_name : _validate_template_name_existence ( config . html_body_template_name ) assert callable ( config . text_body_processor ) return config
def description_of ( lines , name = 'stdin' ) : """Return a string describing the probable encoding of a file or list of strings . : param lines : The lines to get the encoding of . : type lines : Iterable of bytes : param name : Name of file or collection of lines : type name : str"""
u = UniversalDetector ( ) for line in lines : u . feed ( line ) u . close ( ) result = u . result if result [ 'encoding' ] : return '{0}: {1} with confidence {2}' . format ( name , result [ 'encoding' ] , result [ 'confidence' ] ) else : return '{0}: no result' . format ( name )
def unshare_project ( project_id , usernames , ** kwargs ) : """Un - share a project with a list of users , identified by their usernames ."""
user_id = kwargs . get ( 'user_id' ) proj_i = _get_project ( project_id ) proj_i . check_share_permission ( user_id ) for username in usernames : user_i = _get_user ( username ) # Set the owner ship on the network itself proj_i . unset_owner ( user_i . id , write = write , share = share ) db . DBSession . flush ( )
def retry_failure_fab_dev_delete ( self , tenant_id , fw_data , fw_dict ) : """Retry the failure cases for delete . This module calls routine in fabric to retry the failure cases for delete . If device is not successfully cfg / uncfg , it calls the device manager routine to cfg / uncfg the device ."""
result = fw_data . get ( 'result' ) . split ( '(' ) [ 0 ] name = dfa_dbm . DfaDBMixin . get_project_name ( self , tenant_id ) fw_dict [ 'tenant_name' ] = name is_fw_virt = self . is_device_virtual ( ) if result == fw_constants . RESULT_FW_DELETE_INIT : if self . fwid_attr [ tenant_id ] . is_fw_drvr_created ( ) : ret = self . delete_fw_device ( tenant_id , fw_dict . get ( 'fw_id' ) , fw_dict ) if ret : # Device portion self . update_fw_db_dev_status ( fw_dict . get ( 'fw_id' ) , '' ) self . fwid_attr [ tenant_id ] . fw_drvr_created ( False ) LOG . info ( "Retry failue dev return success for delete" " tenant %s" , tenant_id ) else : return name = dfa_dbm . DfaDBMixin . get_project_name ( self , tenant_id ) ret = self . fabric . retry_failure ( tenant_id , name , fw_dict , is_fw_virt , result ) if not ret : LOG . error ( "Retry failure returned fail for tenant %s" , tenant_id ) return result = fw_constants . RESULT_FW_DELETE_DONE self . update_fw_db_final_result ( fw_dict . get ( 'fw_id' ) , result ) self . delete_fw ( fw_dict . get ( 'fw_id' ) ) self . fwid_attr [ tenant_id ] . delete_fw ( fw_dict . get ( 'fw_id' ) ) self . tenant_db . del_fw_tenant ( fw_dict . get ( 'fw_id' ) )
def delete_row ( self , ind ) : """remove self . df row at ind inplace"""
self . df = pd . concat ( [ self . df [ : ind ] , self . df [ ind + 1 : ] ] , sort = True ) return self . df
def ExtractCredentialsFromPathSpec ( self , path_spec ) : """Extracts credentials from a path specification . Args : path _ spec ( PathSpec ) : path specification to extract credentials from ."""
credentials = manager . CredentialsManager . GetCredentials ( path_spec ) for identifier in credentials . CREDENTIALS : value = getattr ( path_spec , identifier , None ) if value is None : continue self . SetCredential ( path_spec , identifier , value )
def _get_stream ( self , multiprocess = False ) : """Get the stream used to store the flaky report . If this nose run is going to use the multiprocess plugin , then use a multiprocess - list backed StringIO proxy ; otherwise , use the default stream . : param multiprocess : Whether or not this test run is configured for multiprocessing . : type multiprocess : ` bool ` : return : The stream to use for storing the flaky report . : rtype : : class : ` StringIO ` or : class : ` MultiprocessingStringIO `"""
if multiprocess : from flaky . multiprocess_string_io import MultiprocessingStringIO return MultiprocessingStringIO ( ) return self . _stream
def clone_with_new_elements ( self , new_elements ) : """Create another VariantCollection of the same class and with same state ( including metadata ) but possibly different entries . Warning : metadata is a dictionary keyed by variants . This method leaves that dictionary as - is , which may result in extraneous entries or missing entries ."""
kwargs = self . to_dict ( ) kwargs [ "variants" ] = new_elements return self . from_dict ( kwargs )
def _on_status_message ( self , sequence , topic , message ) : """Process a status message received Args : sequence ( int ) : The sequence number of the packet received topic ( string ) : The topic this message was received on message ( dict ) : The message itself"""
self . _logger . debug ( "Received message on (topic=%s): %s" % ( topic , message ) ) try : conn_key = self . _find_connection ( topic ) except ArgumentError : self . _logger . warn ( "Dropping message that does not correspond with a known connection, message=%s" , message ) return if messages . ConnectionResponse . matches ( message ) : if self . name != message [ 'client' ] : self . _logger . debug ( "Connection response received for a different client, client=%s, name=%s" , message [ 'client' ] , self . name ) return self . conns . finish_connection ( conn_key , message [ 'success' ] , message . get ( 'failure_reason' , None ) ) else : self . _logger . warn ( "Dropping message that did not correspond with a known schema, message=%s" , message )
def aspects ( self , obj ) : """Returns true if this star aspects another object . Fixed stars only aspect by conjunctions ."""
dist = angle . closestdistance ( self . lon , obj . lon ) return abs ( dist ) < self . orb ( )
def _tree_to_labels ( X , single_linkage_tree , min_cluster_size = 10 , cluster_selection_method = 'eom' , allow_single_cluster = False , match_reference_implementation = False ) : """Converts a pretrained tree and cluster size into a set of labels and probabilities ."""
condensed_tree = condense_tree ( single_linkage_tree , min_cluster_size ) stability_dict = compute_stability ( condensed_tree ) labels , probabilities , stabilities = get_clusters ( condensed_tree , stability_dict , cluster_selection_method , allow_single_cluster , match_reference_implementation ) return ( labels , probabilities , stabilities , condensed_tree , single_linkage_tree )
def create_object ( self , alias , * args , ** kwargs ) : """Constructs the type with the given alias using the given args and kwargs . NB : aliases may be the alias ' object type itself if that type is known . : API : public : param alias : Either the type alias or the type itself . : type alias : string | type : param * args : These pass through to the underlying callable object . : param * * kwargs : These pass through to the underlying callable object . : returns : The created object ."""
object_type = self . _type_aliases . get ( alias ) if object_type is None : raise KeyError ( 'There is no type registered for alias {0}' . format ( alias ) ) return object_type ( * args , ** kwargs )
def named ( self , name , predicate = None , index = None ) : """Retrieves a set of Match objects that have the given name . : param name : : type name : str : param predicate : : type predicate : : param index : : type index : int : return : set of matches : rtype : set [ Match ]"""
return filter_index ( _BaseMatches . _base ( self . _name_dict [ name ] ) , predicate , index )
def url_fix_common_typos ( url ) : """Fix common typos in given URL like forgotten colon ."""
if url . startswith ( "http//" ) : url = "http://" + url [ 6 : ] elif url . startswith ( "https//" ) : url = "https://" + url [ 7 : ] return url
def gdalwarp ( src , dst , options ) : """a simple wrapper for : osgeo : func : ` gdal . Warp ` Parameters src : str , : osgeo : class : ` ogr . DataSource ` or : osgeo : class : ` gdal . Dataset ` the input data set dst : str the output data set options : dict additional parameters passed to gdal . Warp ; see : osgeo : func : ` gdal . WarpOptions ` Returns"""
try : out = gdal . Warp ( dst , src , options = gdal . WarpOptions ( ** options ) ) except RuntimeError as e : raise RuntimeError ( '{}:\n src: {}\n dst: {}\n options: {}' . format ( str ( e ) , src , dst , options ) ) out = None
def _write_html_pages ( root , tlobjects , methods , layer , input_res ) : """Generates the documentation HTML files from from ` ` scheme . tl ` ` to ` ` / methods ` ` and ` ` / constructors ` ` , etc ."""
# Save ' Type : [ Constructors ] ' for use in both : # * Seeing the return type or constructors belonging to the same type . # * Generating the types documentation , showing available constructors . paths = { k : root / v for k , v in ( ( 'css' , 'css' ) , ( 'arrow' , 'img/arrow.svg' ) , ( 'search.js' , 'js/search.js' ) , ( '404' , '404.html' ) , ( 'index_all' , 'index.html' ) , ( 'bot_index' , 'botindex.html' ) , ( 'index_types' , 'types/index.html' ) , ( 'index_methods' , 'methods/index.html' ) , ( 'index_constructors' , 'constructors/index.html' ) ) } paths [ 'default_css' ] = 'light' # docs . < name > . css , local path type_to_constructors = defaultdict ( list ) type_to_functions = defaultdict ( list ) for tlobject in tlobjects : d = type_to_functions if tlobject . is_function else type_to_constructors d [ tlobject . result ] . append ( tlobject ) for t , cs in type_to_constructors . items ( ) : type_to_constructors [ t ] = list ( sorted ( cs , key = lambda c : c . name ) ) methods = { m . name : m for m in methods } # Since the output directory is needed everywhere partially apply it now create_path_for = functools . partial ( _get_path_for , root ) path_for_type = lambda t : root / _get_path_for_type ( t ) bot_docs_paths = [ ] for tlobject in tlobjects : filename = create_path_for ( tlobject ) with DocsWriter ( root , filename , path_for_type ) as docs : docs . write_head ( title = tlobject . class_name , css_path = paths [ 'css' ] , default_css = paths [ 'default_css' ] ) # Create the menu ( path to the current TLObject ) docs . set_menu_separator ( paths [ 'arrow' ] ) _build_menu ( docs ) # Create the page title docs . write_title ( tlobject . class_name ) if tlobject . is_function : if tlobject . usability == Usability . USER : start = '<strong>Only users</strong> can' elif tlobject . usability == Usability . BOT : bot_docs_paths . append ( filename ) start = '<strong>Only bots</strong> can' elif tlobject . usability == Usability . BOTH : bot_docs_paths . append ( filename ) start = '<strong>Both users and bots</strong> can' else : bot_docs_paths . append ( filename ) start = 'Both users and bots <strong>may</strong> be able to' docs . write_text ( '{} use this method. <a href="#examples">' 'See code examples.</a>' . format ( start ) ) # Write the code definition for this TLObject docs . write_code ( tlobject ) docs . write_copy_button ( 'Copy import to the clipboard' , get_import_code ( tlobject ) ) # Write the return type ( or constructors belonging to the same type ) docs . write_title ( 'Returns' if tlobject . is_function else 'Belongs to' , level = 3 ) generic_arg = next ( ( arg . name for arg in tlobject . args if arg . generic_definition ) , None ) if tlobject . result == generic_arg : # We assume it ' s a function returning a generic type generic_arg = next ( ( arg . name for arg in tlobject . args if arg . is_generic ) ) docs . write_text ( 'This function returns the result of whatever ' 'the result from invoking the request passed ' 'through <i>{}</i> is.' . format ( generic_arg ) ) else : if re . search ( '^vector<' , tlobject . result , re . IGNORECASE ) : docs . write_text ( 'A list of the following type is returned.' ) _ , inner = tlobject . result . split ( '<' ) inner = inner . strip ( '>' ) else : inner = tlobject . result docs . begin_table ( column_count = 1 ) docs . add_row ( inner , link = path_for_type ( inner ) ) docs . end_table ( ) cs = type_to_constructors . get ( inner , [ ] ) if not cs : docs . write_text ( 'This type has no instances available.' ) elif len ( cs ) == 1 : docs . write_text ( 'This type can only be an instance of:' ) else : docs . write_text ( 'This type can be an instance of either:' ) docs . begin_table ( column_count = 2 ) for constructor in cs : link = create_path_for ( constructor ) docs . add_row ( constructor . class_name , link = link ) docs . end_table ( ) # Return ( or similar types ) written . Now parameters / members docs . write_title ( 'Parameters' if tlobject . is_function else 'Members' , level = 3 ) # Sort the arguments in the same way they ' re sorted # on the generated code ( flags go last ) args = [ a for a in tlobject . sorted_args ( ) if not a . flag_indicator and not a . generic_definition ] if args : # Writing parameters docs . begin_table ( column_count = 3 ) for arg in args : # Name row docs . add_row ( arg . name , bold = True ) # Type row friendly_type = 'flag' if arg . type == 'true' else arg . type if arg . is_generic : docs . add_row ( '!' + friendly_type , align = 'center' ) else : docs . add_row ( friendly_type , align = 'center' , link = path_for_type ( arg . type ) ) # Add a description for this argument docs . add_row ( _get_description ( arg ) ) docs . end_table ( ) else : if tlobject . is_function : docs . write_text ( 'This request takes no input parameters.' ) else : docs . write_text ( 'This type has no members.' ) if tlobject . is_function : docs . write_title ( 'Known RPC errors' ) method_info = methods . get ( tlobject . fullname ) errors = method_info and method_info . errors if not errors : docs . write_text ( "This request can't cause any RPC error " "as far as we know." ) else : docs . write_text ( 'This request can cause {} known error{}:' . format ( len ( errors ) , '' if len ( errors ) == 1 else 's' ) ) docs . begin_table ( column_count = 2 ) for error in errors : docs . add_row ( '<code>{}</code>' . format ( error . name ) ) docs . add_row ( '{}.' . format ( error . description ) ) docs . end_table ( ) docs . write_text ( 'You can import these from ' '<code>telethon.errors</code>.' ) docs . write_title ( 'Example' , id = 'examples' ) docs . write ( '''<pre>\ <strong>from</strong> telethon.sync <strong>import</strong> TelegramClient <strong>from</strong> telethon <strong>import</strong> functions, types <strong>with</strong> TelegramClient(name, api_id, api_hash) <strong>as</strong> client: result = client(''' ) tlobject . as_example ( docs , indent = 1 ) docs . write ( ')\n' ) if tlobject . result . startswith ( 'Vector' ) : docs . write ( '''\ <strong>for</strong> x <strong>in</strong> result: print(x''' ) else : docs . write ( ' print(result' ) if tlobject . result != 'Bool' and not tlobject . result . startswith ( 'Vector' ) : docs . write ( '.stringify()' ) docs . write ( ')</pre>' ) depth = '../' * ( 2 if tlobject . namespace else 1 ) docs . add_script ( src = 'prependPath = "{}";' . format ( depth ) ) docs . add_script ( path = paths [ 'search.js' ] ) docs . end_body ( ) # Find all the available types ( which are not the same as the constructors ) # Each type has a list of constructors associated to it , hence is a map for t , cs in type_to_constructors . items ( ) : filename = path_for_type ( t ) out_dir = filename . parent if out_dir : out_dir . mkdir ( parents = True , exist_ok = True ) # Since we don ' t have access to the full TLObject , split the type if '.' in t : namespace , name = t . split ( '.' ) else : namespace , name = None , t with DocsWriter ( root , filename , path_for_type ) as docs : docs . write_head ( title = snake_to_camel_case ( name ) , css_path = paths [ 'css' ] , default_css = paths [ 'default_css' ] ) docs . set_menu_separator ( paths [ 'arrow' ] ) _build_menu ( docs ) # Main file title docs . write_title ( snake_to_camel_case ( name ) ) # List available constructors for this type docs . write_title ( 'Available constructors' , level = 3 ) if not cs : docs . write_text ( 'This type has no constructors available.' ) elif len ( cs ) == 1 : docs . write_text ( 'This type has one constructor available.' ) else : docs . write_text ( 'This type has %d constructors available.' % len ( cs ) ) docs . begin_table ( 2 ) for constructor in cs : # Constructor full name link = create_path_for ( constructor ) docs . add_row ( constructor . class_name , link = link ) docs . end_table ( ) # List all the methods which return this type docs . write_title ( 'Methods returning this type' , level = 3 ) functions = type_to_functions . get ( t , [ ] ) if not functions : docs . write_text ( 'No method returns this type.' ) elif len ( functions ) == 1 : docs . write_text ( 'Only the following method returns this type.' ) else : docs . write_text ( 'The following %d methods return this type as a result.' % len ( functions ) ) docs . begin_table ( 2 ) for func in functions : link = create_path_for ( func ) docs . add_row ( func . class_name , link = link ) docs . end_table ( ) # List all the methods which take this type as input docs . write_title ( 'Methods accepting this type as input' , level = 3 ) other_methods = sorted ( ( u for u in tlobjects if any ( a . type == t for a in u . args ) and u . is_function ) , key = lambda u : u . name ) if not other_methods : docs . write_text ( 'No methods accept this type as an input parameter.' ) elif len ( other_methods ) == 1 : docs . write_text ( 'Only this method has a parameter with this type.' ) else : docs . write_text ( 'The following %d methods accept this type as an input ' 'parameter.' % len ( other_methods ) ) docs . begin_table ( 2 ) for ot in other_methods : link = create_path_for ( ot ) docs . add_row ( ot . class_name , link = link ) docs . end_table ( ) # List every other type which has this type as a member docs . write_title ( 'Other types containing this type' , level = 3 ) other_types = sorted ( ( u for u in tlobjects if any ( a . type == t for a in u . args ) and not u . is_function ) , key = lambda u : u . name ) if not other_types : docs . write_text ( 'No other types have a member of this type.' ) elif len ( other_types ) == 1 : docs . write_text ( 'You can find this type as a member of this other type.' ) else : docs . write_text ( 'You can find this type as a member of any of ' 'the following %d types.' % len ( other_types ) ) docs . begin_table ( 2 ) for ot in other_types : link = create_path_for ( ot ) docs . add_row ( ot . class_name , link = link ) docs . end_table ( ) docs . end_body ( ) # After everything ' s been written , generate an index . html per folder . # This will be done automatically and not taking into account any extra # information that we have available , simply a file listing all the others # accessible by clicking on their title for folder in [ 'types' , 'methods' , 'constructors' ] : _generate_index ( root , root / folder , paths ) _generate_index ( root , root / 'methods' , paths , True , bot_docs_paths ) # Write the final core index , the main index for the rest of files types = set ( ) methods = [ ] cs = [ ] for tlobject in tlobjects : if tlobject . is_function : methods . append ( tlobject ) else : cs . append ( tlobject ) if not tlobject . result . lower ( ) in CORE_TYPES : if re . search ( '^vector<' , tlobject . result , re . IGNORECASE ) : types . add ( tlobject . result . split ( '<' ) [ 1 ] . strip ( '>' ) ) else : types . add ( tlobject . result ) types = sorted ( types ) methods = sorted ( methods , key = lambda m : m . name ) cs = sorted ( cs , key = lambda c : c . name ) shutil . copy ( str ( input_res / '404.html' ) , str ( paths [ '404' ] ) ) _copy_replace ( input_res / 'core.html' , paths [ 'index_all' ] , { '{type_count}' : len ( types ) , '{method_count}' : len ( methods ) , '{constructor_count}' : len ( tlobjects ) - len ( methods ) , '{layer}' : layer , } ) def fmt ( xs ) : zs = { } # create a dict to hold those which have duplicated keys for x in xs : zs [ x . class_name ] = x . class_name in zs return ', ' . join ( '"{}.{}"' . format ( x . namespace , x . class_name ) if zs [ x . class_name ] and x . namespace else '"{}"' . format ( x . class_name ) for x in xs ) request_names = fmt ( methods ) constructor_names = fmt ( cs ) def fmt ( xs , formatter ) : return ', ' . join ( '"{}"' . format ( formatter ( x ) ) . replace ( os . path . sep , '/' ) for x in xs ) type_names = fmt ( types , formatter = lambda x : x ) # Local URLs shouldn ' t rely on the output ' s root , so set empty root get_path_for = functools . partial ( _get_path_for , Path ( ) ) request_urls = fmt ( methods , get_path_for ) type_urls = fmt ( types , _get_path_for_type ) constructor_urls = fmt ( cs , get_path_for ) paths [ 'search.js' ] . parent . mkdir ( parents = True , exist_ok = True ) _copy_replace ( input_res / 'js/search.js' , paths [ 'search.js' ] , { '{request_names}' : request_names , '{type_names}' : type_names , '{constructor_names}' : constructor_names , '{request_urls}' : request_urls , '{type_urls}' : type_urls , '{constructor_urls}' : constructor_urls } )
def transform_pil_image ( self , pil_image ) : '''Uses : py : meth : ` PIL . Image . Image . transform ` to scale down the image . Based on ` this stackoverflow discussions < http : / / stackoverflow . com / a / 940368/907060 > ` _ , uses : attr : ` PIL . Image . ANTIALIAS `'''
max_width = min ( self . dimensions [ 0 ] or float ( 'inf' ) , pil_image . size [ 0 ] ) max_height = min ( self . dimensions [ 1 ] or float ( 'inf' ) , pil_image . size [ 1 ] ) max_dimensions = ( max_width , max_height ) pil_image . thumbnail ( max_dimensions , Image . ANTIALIAS ) return pil_image
def perform_permissions_check ( self , user , obj , perms ) : """Performs the permissions check ."""
return self . request . forum_permission_handler . can_access_moderation_queue ( user )
def disable_paging ( self , delay_factor = 1 ) : """Disable paging is only available with specific roles so it may fail ."""
check_command = "get system status | grep Virtual" output = self . send_command_timing ( check_command ) self . allow_disable_global = True self . vdoms = False self . _output_mode = "more" if "Virtual domain configuration: enable" in output : self . vdoms = True vdom_additional_command = "config global" output = self . send_command_timing ( vdom_additional_command , delay_factor = 2 ) if "Command fail" in output : self . allow_disable_global = False self . remote_conn . close ( ) self . establish_connection ( width = 100 , height = 1000 ) new_output = "" if self . allow_disable_global : self . _retrieve_output_mode ( ) disable_paging_commands = [ "config system console" , "set output standard" , "end" , ] # There is an extra ' end ' required if in multi - vdoms are enabled if self . vdoms : disable_paging_commands . append ( "end" ) outputlist = [ self . send_command_timing ( command , delay_factor = 2 ) for command in disable_paging_commands ] # Should test output is valid new_output = self . RETURN . join ( outputlist ) return output + new_output
def small_integer ( self , column , auto_increment = False , unsigned = False ) : """Create a new small integer column on the table . : param column : The column : type column : str : type auto _ increment : bool : type unsigned : bool : rtype : Fluent"""
return self . _add_column ( "small_integer" , column , auto_increment = auto_increment , unsigned = unsigned )
def sizes ( args ) : """% prog sizes gaps . bed a . fasta b . fasta Take the flanks of gaps within a . fasta , map them onto b . fasta . Compile the results to the gap size estimates in b . The output is detailed below : Columns are : 1 . A scaffold 2 . Start position 3 . End position 4 . Gap identifier 5 . Gap size in A ( = End - Start ) 6 . Gap size in B ( based on BLAST , see below ) For each gap , I extracted the left and right sequence ( mostly 2Kb , but can be shorter if it runs into another gap ) flanking the gap . The flanker names look like gap . 00003L and gap . 00003R means the left and right flanker of this particular gap , respectively . The BLAST output is used to calculate the gap size . For each flanker sequence , I took the best hit , and calculate the inner distance between the L match range and R range . The two flankers must map with at least 98 % identity , and in the same orientation . NOTE the sixth column in the list file is not always a valid number . Other values are : - na : both flankers are missing in B - Singleton : one flanker is missing - Different chr : flankers map to different scaffolds - Strand + | - : flankers map in different orientations - Negative value : the R flanker map before L flanker"""
from jcvi . formats . base import DictFile from jcvi . apps . align import blast p = OptionParser ( sizes . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 3 : sys . exit ( not p . print_help ( ) ) gapsbed , afasta , bfasta = args pf = gapsbed . rsplit ( "." , 1 ) [ 0 ] extbed = pf + ".ext.bed" extfasta = pf + ".ext.fasta" if need_update ( gapsbed , extfasta ) : extbed , extfasta = flanks ( [ gapsbed , afasta ] ) q = op . basename ( extfasta ) . split ( "." ) [ 0 ] r = op . basename ( bfasta ) . split ( "." ) [ 0 ] blastfile = "{0}.{1}.blast" . format ( q , r ) if need_update ( [ extfasta , bfasta ] , blastfile ) : blastfile = blast ( [ bfasta , extfasta , "--wordsize=50" , "--pctid=98" ] ) labelsfile = blast_to_twobeds ( blastfile ) labels = DictFile ( labelsfile , delimiter = '\t' ) bed = Bed ( gapsbed ) for b in bed : b . score = b . span accn = b . accn print ( "\t" . join ( ( str ( x ) for x in ( b . seqid , b . start - 1 , b . end , accn , b . score , labels . get ( accn , "na" ) ) ) ) )