signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def asof ( self , label ) : """Return the label from the index , or , if not present , the previous one . Assuming that the index is sorted , return the passed index label if it is in the index , or return the previous index label if the passed one is not in the index . Parameters label : object The label up to which the method returns the latest index label . Returns object The passed label if it is in the index . The previous label if the passed label is not in the sorted index or ` NaN ` if there is no such label . See Also Series . asof : Return the latest value in a Series up to the passed index . merge _ asof : Perform an asof merge ( similar to left join but it matches on nearest key rather than equal key ) . Index . get _ loc : An ` asof ` is a thin wrapper around ` get _ loc ` with method = ' pad ' . Examples ` Index . asof ` returns the latest index label up to the passed label . > > > idx = pd . Index ( [ ' 2013-12-31 ' , ' 2014-01-02 ' , ' 2014-01-03 ' ] ) > > > idx . asof ( ' 2014-01-01 ' ) '2013-12-31' If the label is in the index , the method returns the passed label . > > > idx . asof ( ' 2014-01-02 ' ) '2014-01-02' If all of the labels in the index are later than the passed label , NaN is returned . > > > idx . asof ( ' 1999-01-02 ' ) nan If the index is not sorted , an error is raised . > > > idx _ not _ sorted = pd . Index ( [ ' 2013-12-31 ' , ' 2015-01-02 ' , . . . ' 2014-01-03 ' ] ) > > > idx _ not _ sorted . asof ( ' 2013-12-31 ' ) Traceback ( most recent call last ) : ValueError : index must be monotonic increasing or decreasing"""
try : loc = self . get_loc ( label , method = 'pad' ) except KeyError : return self . _na_value else : if isinstance ( loc , slice ) : loc = loc . indices ( len ( self ) ) [ - 1 ] return self [ loc ]
def save_load ( jid , load , minions = None ) : '''Save the load to the specified jid id'''
with _get_serv ( commit = True ) as cur : sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try : cur . execute ( sql , ( jid , salt . utils . json . dumps ( load ) ) ) except MySQLdb . IntegrityError : # https : / / github . com / saltstack / salt / issues / 22171 # Without this try / except we get tons of duplicate entry errors # which result in job returns not being stored properly pass
def format ( tokens , formatter , outfile = None ) : # pylint : disable = redefined - builtin """Format a tokenlist ` ` tokens ` ` with the formatter ` ` formatter ` ` . If ` ` outfile ` ` is given and a valid file object ( an object with a ` ` write ` ` method ) , the result will be written to it , otherwise it is returned as a string ."""
try : if not outfile : realoutfile = getattr ( formatter , 'encoding' , None ) and BytesIO ( ) or StringIO ( ) formatter . format ( tokens , realoutfile ) return realoutfile . getvalue ( ) else : formatter . format ( tokens , outfile ) except TypeError as err : if ( isinstance ( err . args [ 0 ] , str ) and ( 'unbound method format' in err . args [ 0 ] or 'missing 1 required positional argument' in err . args [ 0 ] ) ) : raise TypeError ( 'format() argument must be a formatter instance, ' 'not a class' ) raise
def get_templates ( path : Path ) -> List [ str ] : '''List all files in ` ` templates ` ` directory , including all subdirectories . The resulting list contains UNIX - like relative paths starting with ` ` templates ` ` .'''
result = [ ] for item in path . glob ( '**/*' ) : if item . is_file ( ) and not item . name . startswith ( '_' ) : result . append ( item . relative_to ( path . parent ) . as_posix ( ) ) return result
def get_contributors ( gh , repo_id ) : """Get list of contributors to a repository ."""
try : # FIXME : Use ` github3 . Repository . contributors ` to get this information contrib_url = gh . repository_with_id ( repo_id ) . contributors_url r = requests . get ( contrib_url ) if r . status_code == 200 : contributors = r . json ( ) def get_author ( contributor ) : r = requests . get ( contributor [ 'url' ] ) if r . status_code == 200 : data = r . json ( ) return dict ( name = ( data [ 'name' ] if 'name' in data and data [ 'name' ] else data [ 'login' ] ) , affiliation = data . get ( 'company' ) or '' , ) # Sort according to number of contributions contributors . sort ( key = itemgetter ( 'contributions' ) ) contributors = [ get_author ( x ) for x in reversed ( contributors ) if x [ 'type' ] == 'User' ] contributors = filter ( lambda x : x is not None , contributors ) return contributors except Exception : return None
def is_period_alias ( period ) : """Check if a given period is possibly an alias . Parameters period : float A period to test if it is a possible alias or not . Returns is _ alias : boolean True if the given period is in a range of period alias ."""
# Based on the period vs periodSN plot of EROS - 2 dataset ( Kim + 2014 ) . # Period alias occurs mostly at ~ 1 and ~ 30. # Check each 1 , 2 , 3 , 4 , 5 factors . for i in range ( 1 , 6 ) : # One - day and one - month alias if ( .99 / float ( i ) ) < period < ( 1.004 / float ( i ) ) : return True if ( 1.03 / float ( i ) ) < period < ( 1.04 / float ( i ) ) : return True if ( 29.2 / float ( i ) ) < period < ( 29.9 / float ( i ) ) : return True # From candidates from the two fields 01 , 08. # All of them are close to one day ( or sidereal ) alias . if ( 0.96465 / float ( i ) ) < period < ( 0.96485 / float ( i ) ) : return True if ( 0.96725 / float ( i ) ) < period < ( 0.96745 / float ( i ) ) : return True if ( 0.98190 / float ( i ) ) < period < ( 0.98230 / float ( i ) ) : return True if ( 1.01034 / float ( i ) ) < period < ( 1.01076 / float ( i ) ) : return True if ( 1.01568 / float ( i ) ) < period < ( 1.01604 / float ( i ) ) : return True if ( 1.01718 / float ( i ) ) < period < ( 1.01742 / float ( i ) ) : return True # From the all candidates from the entire LMC fields . # Some of these could be overlapped with the above cuts . if ( 0.50776 / float ( i ) ) < period < ( 0.50861 / float ( i ) ) : return True if ( 0.96434 / float ( i ) ) < period < ( 0.9652 / float ( i ) ) : return True if ( 0.96688 / float ( i ) ) < period < ( 0.96731 / float ( i ) ) : return True if ( 1.0722 / float ( i ) ) < period < ( 1.0729 / float ( i ) ) : return True if ( 27.1 / float ( i ) ) < period < ( 27.5 / float ( i ) ) : return True # Not in the range of any alias . return False
def get_model ( is_netfree = False , without_reset = False , seeds = None , effective = False ) : """Generate a model with parameters in the global scope , ` ` SPECIES _ ATTRIBUTES ` ` and ` ` REACTIONRULES ` ` . Parameters is _ netfree : bool , optional Return ` ` NetfreeModel ` ` if True , and ` ` NetworkModel ` ` if else . Default is False . without _ reset : bool , optional Do not reset the global variables after the generation if True . Default is False . seeds : list , optional A list of seed ` ` Species ` ` for expanding the model . If this is not None , generate a ` ` NetfreeModel ` ` once , and return a ` ` NetworkModel ` ` , which is an expanded form of that with the given seeds . Default is None . effective : bool , optional See ` ` NetfreeModel . effective ` ` and ` ` Netfree . set _ effective ` ` . Only meaningfull with option ` ` is _ netfree = True ` ` . Default is False Returns model : NetworkModel , NetfreeModel"""
try : if seeds is not None or is_netfree : m = ecell4_base . core . NetfreeModel ( ) else : m = ecell4_base . core . NetworkModel ( ) for sp in SPECIES_ATTRIBUTES : m . add_species_attribute ( sp ) for rr in REACTION_RULES : m . add_reaction_rule ( rr ) if not without_reset : reset_model ( ) if seeds is not None : return m . expand ( seeds ) if isinstance ( m , ecell4_base . core . NetfreeModel ) : m . set_effective ( effective ) except Exception as e : reset_model ( ) raise e return m
def write_primers ( primer_list , path , names = None , notes = None ) : '''Write a list of primers out to a csv file . The first three columns are compatible with the current IDT order form ( name , sequence , notes ) . By default there are no notes , which is an optional parameter . : param primer _ list : A list of primers . : type primer _ list : coral . Primer list : param path : A path to the csv you want to write . : type path : str : param names : A list of strings to name each oligo . Must be the same length as the primer _ list . : type names : str list : param notes : A list of strings to provide a note for each oligo . Must be the same length as the primer _ list . : type notes : str list'''
# Check for notes and names having the right length , apply them to primers if names is not None : if len ( names ) != len ( primer_list ) : names_msg = 'Mismatch in number of notes and primers.' raise PrimerAnnotationError ( names_msg ) for i , name in enumerate ( names ) : primer_list [ i ] . name = name if notes is not None : if len ( notes ) != len ( primer_list ) : notes_msg = 'Mismatch in number of notes and primers.' raise PrimerAnnotationError ( notes_msg ) for i , note in enumerate ( notes ) : primer_list [ i ] . note = note # Write to csv with open ( path , 'w' ) as csv_file : writer = csv . writer ( csv_file ) writer . writerow ( [ 'name' , 'sequence' , 'notes' ] ) for primer in primer_list : string_rep = str ( primer . overhang ) . lower ( ) + str ( primer . anneal ) writer . writerow ( [ primer . name , string_rep , primer . note ] )
def parse_requires ( __fname : str ) -> List [ str ] : """Parse ` ` pip ` ` - style requirements files . This is a * very * naïve parser , but very few packages make use of the more advanced features . Support for other features will be added only when packages in the wild depend on them . Args : _ _ fname : Base file to pass Returns : Parsed dependencies"""
deps = [ ] with open ( __fname ) as req_file : entries = [ s . split ( '#' ) [ 0 ] . strip ( ) for s in req_file . readlines ( ) ] for dep in entries : if not dep : continue elif dep . startswith ( '-r ' ) : include = dep . split ( ) [ 1 ] if '/' not in include : include = path . join ( path . dirname ( __fname ) , include ) deps . extend ( parse_requires ( include ) ) continue elif ';' in dep : dep , marker = [ s . strip ( ) for s in dep . split ( ';' ) ] # Support for other markers will be added when they ’ re actually # found in the wild match = re . fullmatch ( r""" (?:python_version) # Supported markers \s* (?:<=?|==|>=?) # Supported comparisons \s* (?P<quote>(?:'|"))(?:[\d\.]+)(?P=quote) # Test """ , marker , re . VERBOSE ) if not match : raise ValueError ( 'Invalid marker {!r}' . format ( marker ) ) env = { '__builtins__' : { } , 'python_version' : '{}.{}' . format ( * version_info [ : 2 ] ) , } if not eval ( marker , env ) : # pylint : disable = eval - used continue deps . append ( dep ) return deps
def transcribe ( self , text , punctuation = True ) : """Accepts a word and returns a string of an approximate pronounciation ( IPA )"""
if not punctuation : text = re . sub ( r"[\.\";\,\:\[\]\(\)!&?‘]" , "" , text ) text = re . sub ( r'sch' , 'ʃ' , text ) text = re . sub ( r'(?<=[aeiouäëöüâæœêîôû])h' , 'χ' , text ) text = re . sub ( r'h(?=[aeiouäëöüâæœêîôû])' , 'χ' , text ) text = re . sub ( r'(?<=[aeiouäëöüâæœêîôû])s(?=[aeiouäëöüâæœêîôû])' , 'z̥' , text ) text = re . sub ( r'^s(?=[aeiouäëöüâæœêîôû])' , 'z̥' , text ) for w , val in zip ( Dipthongs_IPA . keys ( ) , Dipthongs_IPA . values ( ) ) : text = text . replace ( w , val ) for w , val in zip ( IPA . keys ( ) , IPA . values ( ) ) : text = text . replace ( w , val ) return "[" + text + "]"
def handle_channel_settled ( raiden : 'RaidenService' , event : Event ) : data = event . event_data token_network_identifier = event . originating_contract channel_identifier = data [ 'args' ] [ 'channel_identifier' ] block_number = data [ 'block_number' ] block_hash = data [ 'block_hash' ] transaction_hash = data [ 'transaction_hash' ] chain_state = views . state_from_raiden ( raiden ) channel_state = views . get_channelstate_by_canonical_identifier ( chain_state = chain_state , canonical_identifier = CanonicalIdentifier ( chain_identifier = chain_state . chain_id , token_network_address = token_network_identifier , channel_identifier = channel_identifier , ) , ) # This may happen for two reasons : # - This node is not a participant for the given channel ( normal operation , # the event should be ignored ) . # - Something went wrong in our code and the channel state was cleared # before settle ( a bug , this should raise an exception on development # mode ) . # Because we cannot distinguish the two cases , assume the channel is not of # interest and ignore the event . if not channel_state : return """This is resolving a corner case where the current node view of the channel state does not reflect what the blockchain contains . The corner case goes as follows in a setup of nodes : A - > B : - A sends out a LockedTransfer to B - B sends a refund to A - B goes offline - A sends LockExpired to B Here : (1 ) the lock is removed from A ' s state (2 ) B never received the message - A closes the channel with B ' s refund - B comes back online and calls updateNonClosingBalanceProof with A ' s LockedTransfer ( LockExpired was never processed ) . - When channel is settled , B unlocks it ' s refund transfer lock provided that it gains from doing so . - A does NOT try to unlock its lock because its side of the channel state is empty ( lock expired and was removed ) . The above is resolved by providing the state machine with the onchain locksroots for both participants in the channel so that the channel state is updated to store these locksroots . In ` raiden _ event _ handler : handle _ contract _ send _ channelunlock ` , those values are used to restore the channel state back to where the locksroots values existed and this channel state is used to calculate the gain and potentially perform unlocks in case there is value to be gained ."""
our_locksroot , partner_locksroot = get_onchain_locksroots ( chain = raiden . chain , canonical_identifier = channel_state . canonical_identifier , participant1 = channel_state . our_state . address , participant2 = channel_state . partner_state . address , block_identifier = block_hash , ) channel_settled = ContractReceiveChannelSettled ( transaction_hash = transaction_hash , canonical_identifier = channel_state . canonical_identifier , our_onchain_locksroot = our_locksroot , partner_onchain_locksroot = partner_locksroot , block_number = block_number , block_hash = block_hash , ) raiden . handle_and_track_state_change ( channel_settled )
def pick_flat_z ( data ) : """Generate a 2D array of the quasiparticle weight by only selecting the first particle data"""
zmes = [ ] for i in data [ 'zeta' ] : zmes . append ( i [ : , 0 ] ) return np . asarray ( zmes )
def _get_photos ( session , user_or_group_id ) : """https : / / vk . com / dev / photos . getAll"""
response = session . fetch_items ( "photos.getAll" , Photo . from_json , count = 200 , owner_id = user_or_group_id ) return response
def get_mate_center ( self , angle = 0 ) : """Mate at ring ' s center rotated ` ` angle ` ` degrees . : param angle : rotation around z - axis ( unit : deg ) : type angle : : class : ` float ` : return : mate in ring ' s center rotated about z - axis : rtype : : class : ` Mate < cqparts . constraint . Mate > `"""
return Mate ( self , CoordSystem . from_plane ( cadquery . Plane ( origin = ( 0 , 0 , self . width / 2 ) , xDir = ( 1 , 0 , 0 ) , normal = ( 0 , 0 , 1 ) , ) . rotated ( ( 0 , 0 , angle ) ) # rotate about z - axis ) )
def helper_parallel_lines ( start0 , end0 , start1 , end1 , filename ) : """Image for : func : ` . parallel _ lines _ parameters ` docstring ."""
if NO_IMAGES : return figure = plt . figure ( ) ax = figure . gca ( ) points = stack1d ( start0 , end0 , start1 , end1 ) ax . plot ( points [ 0 , : 2 ] , points [ 1 , : 2 ] , marker = "o" ) ax . plot ( points [ 0 , 2 : ] , points [ 1 , 2 : ] , marker = "o" ) ax . axis ( "scaled" ) _plot_helpers . add_plot_boundary ( ax ) save_image ( figure , filename )
def ui_main ( fmt_table , node_dict ) : """Create the base UI in command mode ."""
cmd_funct = { "quit" : False , "run" : node_cmd , "stop" : node_cmd , "connect" : node_cmd , "details" : node_cmd , "update" : True } ui_print ( "\033[?25l" ) # cursor off print ( "{}\n" . format ( fmt_table ) ) sys . stdout . flush ( ) # refresh _ main values : # None = loop main - cmd , True = refresh - list , False = exit - program refresh_main = None while refresh_main is None : cmd_name = get_user_cmd ( node_dict ) if callable ( cmd_funct [ cmd_name ] ) : refresh_main = cmd_funct [ cmd_name ] ( cmd_name , node_dict ) else : refresh_main = cmd_funct [ cmd_name ] if cmd_name != "connect" and refresh_main : ui_clear ( len ( node_dict ) + 2 ) return refresh_main
def chdir ( self , path ) : """Changes the current directory to the given path"""
self . cwd = self . _join_chunks ( self . _normalize_path ( path ) )
def gp_xfac ( ) : """example using QM12 enhancement factors - uses ` gpcalls ` kwarg to reset xtics - numpy . loadtxt needs reshaping for input files w / only one datapoint - according poster presentations see QM12 _ & NSD _ review . . _ QM12 : http : / / indico . cern . ch / getFile . py / access ? contribId = 268 & sessionId = 10 & resId = 0 & materialId = slides & confId = 181055 . . _ NSD : http : / / rnc . lbl . gov / ~ xdong / RNC / DirectorReview2012 / posters / Huck . pdf . . image : : pics / xfac . png : width : 450 px : ivar key : translates filename into legend / key label : ivar shift : slightly shift selected data points"""
# prepare data inDir , outDir = getWorkDirs ( ) data = OrderedDict ( ) # TODO : " really " reproduce plot using spectral data for file in os . listdir ( inDir ) : info = os . path . splitext ( file ) [ 0 ] . split ( '_' ) key = ' ' . join ( info [ : 2 ] + [ ':' , ' - ' . join ( [ str ( float ( s ) / 1e3 ) for s in info [ - 1 ] [ : 7 ] . split ( '-' ) ] ) + ' GeV' ] ) file_url = os . path . join ( inDir , file ) data [ key ] = np . loadtxt ( open ( file_url , 'rb' ) ) . reshape ( ( - 1 , 5 ) ) data [ key ] [ : , 0 ] *= shift . get ( key , 1 ) logging . debug ( data ) # shown if - - log flag given on command line # generate plot nSets = len ( data ) make_plot ( data = data . values ( ) , properties = [ getOpts ( i ) for i in xrange ( nSets ) ] , titles = data . keys ( ) , # use data keys as legend titles name = os . path . join ( outDir , 'xfac' ) , key = [ 'top center' , 'maxcols 2' , 'width -7' , 'font ",20"' ] , ylabel = 'LMR Enhancement Factor' , xlabel = '{/Symbol \326}s_{NN} (GeV)' , yr = [ 0.5 , 6.5 ] , size = '8.5in,8in' , rmargin = 0.99 , tmargin = 0.98 , bmargin = 0.14 , xlog = True , gpcalls = [ 'format x "%g"' , 'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)' , 'boxwidth 0.015 absolute' ] , labels = { 'STAR Preliminary' : [ 0.5 , 0.5 , False ] } , lines = { 'x=1' : 'lc 0 lw 4 lt 2' } ) return 'done'
def turn_on ( host , did , token = None ) : """Turn on bulb or fixture"""
urllib3 . disable_warnings ( ) if token : scheme = "https" if not token : scheme = "http" token = "1234567890" url = ( scheme + '://' + host + '/gwr/gop.php?cmd=DeviceSendCommand&data=<gip><version>1</version><token>' + token + '</token><did>' + did + '</did><value>1</value></gip>&fmt=xml' ) response = requests . get ( url , verify = False ) if response . status_code == '200' : return True else : return False
def find ( self , oid ) : """Return list of extensions with given Oid"""
if not isinstance ( oid , Oid ) : raise TypeError ( "Need crytypescrypto.oid.Oid as argument" ) found = [ ] index = - 1 end = len ( self ) while True : index = libcrypto . X509_get_ext_by_NID ( self . cert . cert , oid . nid , index ) if index >= end or index < 0 : break found . append ( self [ index ] ) return found
def adjust_widths ( self , max_width , colstats ) : """Adjust column widths based on the least negative affect it will have on the viewing experience . We take note of the total character mass that will be clipped when each column should be narrowed . The actual score for clipping is based on percentage of total character mass , which is the total number of characters in the column ."""
adj_colstats = [ ] for x in colstats : if not x [ 'preformatted' ] : adj_colstats . append ( x ) else : max_width -= x [ 'offt' ] next_score = lambda x : ( x [ 'counts' ] [ x [ 'offt' ] ] + x [ 'chop_mass' ] + x [ 'chop_count' ] ) / x [ 'total_mass' ] cur_width = lambda : sum ( x [ 'offt' ] for x in adj_colstats ) min_width = lambda x : self . width_normalize ( self . colspec [ x [ 'column' ] ] [ 'minwidth' ] ) while cur_width ( ) > max_width : nextaffects = [ ( next_score ( x ) , i ) for i , x in enumerate ( adj_colstats ) if x [ 'offt' ] > min_width ( x ) ] if not nextaffects : break # All columns are as small as they can get . nextaffects . sort ( ) chop = adj_colstats [ nextaffects [ 0 ] [ 1 ] ] chop [ 'chop_count' ] += chop [ 'counts' ] [ chop [ 'offt' ] ] chop [ 'chop_mass' ] += chop [ 'chop_count' ] chop [ 'offt' ] -= 1
def timeout ( seconds = 0 , minutes = 0 , hours = 0 ) : """Add a signal - based timeout to any block of code . If multiple time units are specified , they will be added together to determine time limit . Usage : with timeout ( seconds = 5 ) : my _ slow _ function ( . . . ) Args : - seconds : The time limit , in seconds . - minutes : The time limit , in minutes . - hours : The time limit , in hours ."""
limit = seconds + 60 * minutes + 3600 * hours def handler ( signum , frame ) : # pylint : disable = W0613 raise TimeoutError ( 'timed out after {} seconds' . format ( limit ) ) try : signal . signal ( signal . SIGALRM , handler ) signal . setitimer ( signal . ITIMER_REAL , limit ) yield finally : signal . alarm ( 0 )
def add_missing_xml_attributes ( dom , volume_counter = 0 ) : """Add ` xmlns ` and ` ID ` attributes to ` ` < mods : mods > ` ` tag . Args : dom ( HTMLElement ) : DOM containing whole document . volume _ counter ( int , default 0 ) : ID of volume ."""
mods_tag = get_mods_tag ( dom ) if mods_tag : params = mods_tag . params # add missing attributes params [ "ID" ] = "MODS_VOLUME_%04d" % ( volume_counter + 1 ) params [ "xmlns:mods" ] = "http://www.loc.gov/mods/v3" params [ "xmlns:xlink" ] = "http://www.w3.org/1999/xlink" params [ "xmlns:xsi" ] = "http://www.w3.org/2001/XMLSchema-instance" params [ "xsi:schemaLocation" ] = " " . join ( ( "http://www.w3.org/2001/XMLSchema-instance" , "http://www.w3.org/2001/XMLSchema.xsd" , "http://www.loc.gov/mods/v3" , "http://www.loc.gov/standards/mods/v3/mods-3-4.xsd" , "http://www.w3.org/1999/xlink http://www.w3.org/1999/xlink.xsd" , ) )
def pagerank_weighted ( graph , initial_value = None , damping = 0.85 ) : """Calculates PageRank for an undirected graph"""
if initial_value == None : initial_value = 1.0 / len ( graph . nodes ( ) ) scores = dict . fromkeys ( graph . nodes ( ) , initial_value ) iteration_quantity = 0 for iteration_number in range ( 100 ) : iteration_quantity += 1 convergence_achieved = 0 for i in graph . nodes ( ) : rank = 1 - damping for j in graph . neighbors ( i ) : neighbors_sum = sum ( graph . edge_weight ( ( j , k ) ) for k in graph . neighbors ( j ) ) rank += damping * scores [ j ] * graph . edge_weight ( ( j , i ) ) / neighbors_sum if abs ( scores [ i ] - rank ) <= CONVERGENCE_THRESHOLD : convergence_achieved += 1 scores [ i ] = rank if convergence_achieved == len ( graph . nodes ( ) ) : break return scores
def blueprint ( self , blueprint , ** options ) : """Register a blueprint on the application . : param blueprint : Blueprint object or ( list , tuple ) thereof : param options : option dictionary with blueprint defaults : return : Nothing"""
if isinstance ( blueprint , ( list , tuple , BlueprintGroup ) ) : for item in blueprint : self . blueprint ( item , ** options ) return if blueprint . name in self . blueprints : assert self . blueprints [ blueprint . name ] is blueprint , ( 'A blueprint with the name "%s" is already registered. ' "Blueprint names must be unique." % ( blueprint . name , ) ) else : self . blueprints [ blueprint . name ] = blueprint self . _blueprint_order . append ( blueprint ) blueprint . register ( self , options )
def user_getinfo ( self , fields , access_token = None ) : """Request multiple fields of information about the user . : param fields : The names of the fields requested . : type fields : list : returns : The values of the current user for the fields requested . The keys are the field names , values are the values of the fields as indicated by the OpenID Provider . Note that fields that were not provided by the Provider are absent . : rtype : dict : raises Exception : If the user was not authenticated . Check this with user _ loggedin . . . versionadded : : 1.0"""
if g . oidc_id_token is None and access_token is None : raise Exception ( 'User was not authenticated' ) info = { } all_info = None for field in fields : if access_token is None and field in g . oidc_id_token : info [ field ] = g . oidc_id_token [ field ] elif current_app . config [ 'OIDC_USER_INFO_ENABLED' ] : # This was not in the id _ token . Let ' s get user information if all_info is None : all_info = self . _retrieve_userinfo ( access_token ) if all_info is None : # To make sure we don ' t retry for every field all_info = { } if field in all_info : info [ field ] = all_info [ field ] else : # We didn ' t get this information pass return info
def asarray ( self , key = None , series = None ) : """Return image data of multiple TIFF pages as numpy array . By default the first image series is returned . Parameters key : int , slice , or sequence of page indices Defines which pages to return as array . series : int Defines which series of pages to return as array ."""
if key is None and series is None : series = 0 if series is not None : pages = self . series [ series ] . pages else : pages = self . pages if key is None : pass elif isinstance ( key , int ) : pages = [ pages [ key ] ] elif isinstance ( key , slice ) : pages = pages [ key ] elif isinstance ( key , collections . Iterable ) : pages = [ pages [ k ] for k in key ] else : raise TypeError ( 'key must be an int, slice, or sequence' ) if len ( pages ) == 1 : return pages [ 0 ] . asarray ( ) elif self . is_nih : result = numpy . vstack ( p . asarray ( colormapped = False , squeeze = False ) for p in pages ) if pages [ 0 ] . is_palette : result = numpy . take ( pages [ 0 ] . color_map , result , axis = 1 ) result = numpy . swapaxes ( result , 0 , 1 ) else : if self . is_ome and any ( p is None for p in pages ) : firstpage = next ( p for p in pages if p ) nopage = numpy . zeros_like ( firstpage . asarray ( ) ) result = numpy . vstack ( ( p . asarray ( ) if p else nopage ) for p in pages ) if key is None : try : result . shape = self . series [ series ] . shape except ValueError : warnings . warn ( "failed to reshape %s to %s" % ( result . shape , self . series [ series ] . shape ) ) result . shape = ( - 1 , ) + pages [ 0 ] . shape else : result . shape = ( - 1 , ) + pages [ 0 ] . shape return result
def realpath ( self , filename ) : """Return the canonical path of the specified filename , eliminating any symbolic links encountered in the path ."""
if self . filesystem . is_windows_fs : return self . abspath ( filename ) filename = make_string_path ( filename ) path , ok = self . _joinrealpath ( filename [ : 0 ] , filename , { } ) return self . abspath ( path )
def dispatch ( self , context , consumed , handler , is_endpoint ) : """Called as dispatch descends into a tier . The base extension uses this to maintain the " current url " ."""
request = context . request if __debug__ : log . debug ( "Handling dispatch event." , extra = dict ( request = id ( context ) , consumed = consumed , handler = safe_name ( handler ) , endpoint = is_endpoint ) ) # The leading path element ( leading slash ) requires special treatment . if not consumed and context . request . path_info_peek ( ) == '' : consumed = [ '' ] nConsumed = 0 if consumed : # Migrate path elements consumed from the ` PATH _ INFO ` to ` SCRIPT _ NAME ` WSGI environment variables . if not isinstance ( consumed , ( list , tuple ) ) : consumed = consumed . split ( '/' ) for element in consumed : if element == context . request . path_info_peek ( ) : context . request . path_info_pop ( ) nConsumed += 1 else : break # Update the breadcrumb list . context . path . append ( Crumb ( handler , Path ( request . script_name ) ) ) if consumed : # Lastly , update the remaining path element list . request . remainder = request . remainder [ nConsumed : ]
def cleanup_full ( self , trial_runner ) : """Cleans up bracket after bracket is completely finished . Lets the last trial continue to run until termination condition kicks in ."""
for trial in self . current_trials ( ) : if ( trial . status == Trial . PAUSED ) : trial_runner . stop_trial ( trial )
async def Check ( self , stream ) : """Implements synchronous periodic checks"""
request = await stream . recv_message ( ) checks = self . _checks . get ( request . service ) if checks is None : await stream . send_trailing_metadata ( status = Status . NOT_FOUND ) elif len ( checks ) == 0 : await stream . send_message ( HealthCheckResponse ( status = HealthCheckResponse . SERVING , ) ) else : for check in checks : await check . __check__ ( ) await stream . send_message ( HealthCheckResponse ( status = _status ( checks ) , ) )
def add_unchecked ( self , binsha , mode , name ) : """Add the given item to the tree , its correctness is assumed , which puts the caller into responsibility to assure the input is correct . For more information on the parameters , see ` ` add ` ` : param binsha : 20 byte binary sha"""
self . _cache . append ( ( binsha , mode , name ) )
def _create_at ( self , timestamp = None , id = None , forced_identity = None , ** kwargs ) : """WARNING : Only for internal use and testing . Create a Versionable having a version _ start _ date and version _ birth _ date set to some pre - defined timestamp : param timestamp : point in time at which the instance has to be created : param id : version 4 UUID unicode object . Usually this is not specified , it will be automatically created . : param forced _ identity : version 4 UUID unicode object . For internal use only . : param kwargs : arguments needed for initializing the instance : return : an instance of the class"""
id = Versionable . uuid ( id ) if forced_identity : ident = Versionable . uuid ( forced_identity ) else : ident = id if timestamp is None : timestamp = get_utc_now ( ) kwargs [ 'id' ] = id kwargs [ 'identity' ] = ident kwargs [ 'version_start_date' ] = timestamp kwargs [ 'version_birth_date' ] = timestamp return super ( VersionManager , self ) . create ( ** kwargs )
def lifecycle_lock ( self ) : """An identity - keyed inter - process lock for safeguarding lifecycle and other operations ."""
safe_mkdir ( self . _metadata_base_dir ) return OwnerPrintingInterProcessFileLock ( # N . B . This lock can ' t key into the actual named metadata dir ( e . g . ` . pids / pantsd / lock ` # via ` ProcessMetadataManager . _ get _ metadata _ dir _ by _ name ( ) ` ) because of a need to purge # the named metadata dir on startup to avoid stale metadata reads . os . path . join ( self . _metadata_base_dir , '.lock.{}' . format ( self . _name ) ) )
def get_schema_version_from_xml ( xml ) : """Get schemaVersion attribute from OpenMalaria scenario file xml - open file or content of xml document to be processed"""
if isinstance ( xml , six . string_types ) : xml = StringIO ( xml ) try : tree = ElementTree . parse ( xml ) except ParseError : # Not an XML file return None root = tree . getroot ( ) return root . attrib . get ( 'schemaVersion' , None )
def operations ( ) : """Class decorator stores all calls into list . Can be used until . invalidate ( ) is called . : return : decorated class"""
def decorator ( func ) : @ wraps ( func ) def wrapped_func ( * args , ** kwargs ) : self = args [ 0 ] assert self . __can_use , "User operation queue only in 'with' block" def defaults_dict ( ) : f_args , varargs , keywords , defaults = inspect . getargspec ( func ) defaults = defaults or [ ] return dict ( zip ( f_args [ - len ( defaults ) + len ( args [ 1 : ] ) : ] , defaults [ len ( args [ 1 : ] ) : ] ) ) route_args = dict ( defaults_dict ( ) . items ( ) + kwargs . items ( ) ) func ( * args , ** kwargs ) self . operations . append ( ( func . __name__ , args [ 1 : ] , route_args , ) ) return wrapped_func def decorate ( clazz ) : for attr in clazz . __dict__ : if callable ( getattr ( clazz , attr ) ) : setattr ( clazz , attr , decorator ( getattr ( clazz , attr ) ) ) def __init__ ( self ) : # simple parameter - less constructor self . operations = [ ] self . __can_use = True def invalidate ( self ) : self . __can_use = False clazz . __init__ = __init__ clazz . invalidate = invalidate return clazz return decorate
def to_array ( self ) : """Serializes this InputVenueMessageContent to a dictionary . : return : dictionary representation of this object . : rtype : dict"""
array = super ( InputVenueMessageContent , self ) . to_array ( ) array [ 'latitude' ] = float ( self . latitude ) # type float array [ 'longitude' ] = float ( self . longitude ) # type float array [ 'title' ] = u ( self . title ) # py2 : type unicode , py3 : type str array [ 'address' ] = u ( self . address ) # py2 : type unicode , py3 : type str if self . foursquare_id is not None : array [ 'foursquare_id' ] = u ( self . foursquare_id ) # py2 : type unicode , py3 : type str if self . foursquare_type is not None : array [ 'foursquare_type' ] = u ( self . foursquare_type ) # py2 : type unicode , py3 : type str return array
def send_email ( self , source , subject , body , to_addresses , cc_addresses = None , bcc_addresses = None , format = 'text' , reply_addresses = None , return_path = None , text_body = None , html_body = None ) : """Composes an email message based on input data , and then immediately queues the message for sending . : type source : string : param source : The sender ' s email address . : type subject : string : param subject : The subject of the message : A short summary of the content , which will appear in the recipient ' s inbox . : type body : string : param body : The message body . : type to _ addresses : list of strings or string : param to _ addresses : The To : field ( s ) of the message . : type cc _ addresses : list of strings or string : param cc _ addresses : The CC : field ( s ) of the message . : type bcc _ addresses : list of strings or string : param bcc _ addresses : The BCC : field ( s ) of the message . : type format : string : param format : The format of the message ' s body , must be either " text " or " html " . : type reply _ addresses : list of strings or string : param reply _ addresses : The reply - to email address ( es ) for the message . If the recipient replies to the message , each reply - to address will receive the reply . : type return _ path : string : param return _ path : The email address to which bounce notifications are to be forwarded . If the message cannot be delivered to the recipient , then an error message will be returned from the recipient ' s ISP ; this message will then be forwarded to the email address specified by the ReturnPath parameter . : type text _ body : string : param text _ body : The text body to send with this email . : type html _ body : string : param html _ body : The html body to send with this email ."""
format = format . lower ( ) . strip ( ) if body is not None : if format == "text" : if text_body is not None : raise Warning ( "You've passed in both a body and a text_body; please choose one or the other." ) text_body = body else : if html_body is not None : raise Warning ( "You've passed in both a body and an html_body; please choose one or the other." ) html_body = body params = { 'Source' : source , 'Message.Subject.Data' : subject , } if return_path : params [ 'ReturnPath' ] = return_path if html_body is not None : params [ 'Message.Body.Html.Data' ] = html_body if text_body is not None : params [ 'Message.Body.Text.Data' ] = text_body if ( format not in ( "text" , "html" ) ) : raise ValueError ( "'format' argument must be 'text' or 'html'" ) if ( not ( html_body or text_body ) ) : raise ValueError ( "No text or html body found for mail" ) self . _build_list_params ( params , to_addresses , 'Destination.ToAddresses.member' ) if cc_addresses : self . _build_list_params ( params , cc_addresses , 'Destination.CcAddresses.member' ) if bcc_addresses : self . _build_list_params ( params , bcc_addresses , 'Destination.BccAddresses.member' ) if reply_addresses : self . _build_list_params ( params , reply_addresses , 'ReplyToAddresses.member' ) return self . _make_request ( 'SendEmail' , params )
def findall ( data ) : """Worker that finds all occurrences of a given string ( or regex ) in a given text . : param data : Request data dict : : ' string ' : string to search in text ' sub ' : input text ' regex ' : True to consider string as a regular expression ' whole _ word ' : True to match whole words only . ' case _ sensitive ' : True to match case , False to ignore case : return : list of occurrence positions in text"""
return list ( findalliter ( data [ 'string' ] , data [ 'sub' ] , regex = data [ 'regex' ] , whole_word = data [ 'whole_word' ] , case_sensitive = data [ 'case_sensitive' ] ) )
def extentSearch ( self , xmin , ymin , xmax , ymax ) : """Filters the data by a geographical bounding box . The bounding box is given as lower left point coordinates and upper right point coordinates . Note : It ' s necessary that the dataframe has a ` lat ` and ` lng ` column in order to apply the filter . Check if the method could be removed in the future . ( could be done via freeSearch ) Returns list : A list containing all indexes with filtered data . Matches will be ` True ` , the remaining items will be ` False ` . If the dataFrame is empty , an empty list will be returned ."""
if not self . _dataFrame . empty : try : questionMin = ( self . _dataFrame . lat >= xmin ) & ( self . _dataFrame . lng >= ymin ) questionMax = ( self . _dataFrame . lat <= xmax ) & ( self . _dataFrame . lng <= ymax ) return np . logical_and ( questionMin , questionMax ) except AttributeError : return [ ] else : return [ ]
def bisect ( func , a , b , xtol = 1e-12 , maxiter = 100 ) : """Finds the root of ` func ` using the bisection method . Requirements - func must be continuous function that accepts a single number input and returns a single number - ` func ( a ) ` and ` func ( b ) ` must have opposite sign Parameters func : function the function that we want to find the root of a : number one of the bounds on the input b : number the other bound on the input xtol : number , optional the solution tolerance of the input value . The algorithm is considered converged if ` abs ( b - a ) 2 . < xtol ` maxiter : number , optional the maximum number of iterations allowed for convergence"""
fa = func ( a ) if fa == 0. : return a fb = func ( b ) if fb == 0. : return b assert sign ( fa ) != sign ( fb ) for i in xrange ( maxiter ) : c = ( a + b ) / 2. fc = func ( c ) if fc == 0. or abs ( b - a ) / 2. < xtol : return c if sign ( fc ) == sign ( func ( a ) ) : a = c else : b = c else : raise RuntimeError ( 'Failed to converge after %d iterations.' % maxiter )
def verify_log ( opts ) : '''If an insecre logging configuration is found , show a warning'''
level = LOG_LEVELS . get ( str ( opts . get ( 'log_level' ) ) . lower ( ) , logging . NOTSET ) if level < logging . INFO : log . warning ( 'Insecure logging configuration detected! Sensitive data may be logged.' )
def rm ( * components , ** kwargs ) : """Remove a file or directory . If path is a directory , this recursively removes the directory and any contents . Non - existent paths are silently ignored . Supports Unix style globbing by default ( disable using glob = False ) . For details on globbing pattern expansion , see : https : / / docs . python . org / 2 / library / glob . html Arguments : * components ( string [ ] ) : path to the file or directory to remove . May be absolute or relative . May contain unix glob * * kwargs : if " glob " is True , perform Unix style pattern expansion of paths ( default : True ) ."""
_path = path ( * components ) glob = kwargs . get ( "glob" , True ) paths = iglob ( _path ) if glob else [ _path ] for file in paths : if isfile ( file ) : os . remove ( file ) elif exists ( file ) : shutil . rmtree ( file , ignore_errors = False )
def _divide_and_round ( a , b ) : """divide a by b and round result to the nearest integer When the ratio is exactly half - way between two integers , the even integer is returned ."""
# Based on the reference implementation for divmod _ near # in Objects / longobject . c . q , r = divmod ( a , b ) # round up if either r / b > 0.5 , or r / b = = 0.5 and q is odd . # The expression r / b > 0.5 is equivalent to 2 * r > b if b is # positive , 2 * r < b if b negative . r *= 2 greater_than_half = r > b if b > 0 else r < b if greater_than_half or r == b and q % 2 == 1 : q += 1 return q
def bezier2polynomial ( p , numpy_ordering = True , return_poly1d = False ) : """Converts a tuple of Bezier control points to a tuple of coefficients of the expanded polynomial . return _ poly1d : returns a numpy . poly1d object . This makes computations of derivatives / anti - derivatives and many other operations quite quick . numpy _ ordering : By default ( to accommodate numpy ) the coefficients will be output in reverse standard order ."""
if len ( p ) == 4 : coeffs = ( - p [ 0 ] + 3 * ( p [ 1 ] - p [ 2 ] ) + p [ 3 ] , 3 * ( p [ 0 ] - 2 * p [ 1 ] + p [ 2 ] ) , 3 * ( p [ 1 ] - p [ 0 ] ) , p [ 0 ] ) elif len ( p ) == 3 : coeffs = ( p [ 0 ] - 2 * p [ 1 ] + p [ 2 ] , 2 * ( p [ 1 ] - p [ 0 ] ) , p [ 0 ] ) elif len ( p ) == 2 : coeffs = ( p [ 1 ] - p [ 0 ] , p [ 0 ] ) elif len ( p ) == 1 : coeffs = p else : # https : / / en . wikipedia . org / wiki / Bezier _ curve # Polynomial _ form n = len ( p ) - 1 coeffs = [ fac ( n ) // fac ( n - j ) * sum ( ( - 1 ) ** ( i + j ) * p [ i ] / ( fac ( i ) * fac ( j - i ) ) for i in range ( j + 1 ) ) for j in range ( n + 1 ) ] coeffs . reverse ( ) if not numpy_ordering : coeffs = coeffs [ : : - 1 ] # can ' t use . reverse ( ) as might be tuple if return_poly1d : return poly1d ( coeffs ) return coeffs
def shelld ( ndim , array ) : # Works ! , use this as example for " I / O " parameters """Sort a double precision array using the Shell Sort algorithm . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / shelld _ c . html : param ndim : Dimension of the array . : type ndim : int : param array : The array to be sorted . : type array : Array of floats : return : The sorted array . : rtype : Array of floats"""
array = stypes . toDoubleVector ( array ) ndim = ctypes . c_int ( ndim ) libspice . shelld_c ( ndim , ctypes . cast ( array , ctypes . POINTER ( ctypes . c_double ) ) ) return stypes . cVectorToPython ( array )
def hide_stevedore_logs ( ) : """Hides the logs of stevedore , this function was added in order to support older versions of stevedore We are using the NullHandler in order to get rid from ' No handlers could be found for logger . . . ' msg Returns : None"""
stevedore_logger = logging . getLogger ( 'stevedore.extension' ) stevedore_logger . propagate = False stevedore_logger . setLevel ( logging . ERROR ) stevedore_logger . addHandler ( logging . NullHandler ( ) )
def get_brightness ( self ) : """Get current brightness ."""
self . get_status ( ) try : self . brightness = self . data [ 'color' ] . split ( ';' ) [ - 1 ] except TypeError : self . brightness = 0 return self . brightness
def route_not_found ( * args ) : """Constructs a Flask Response for when a API Route ( path + method ) is not found . This is usually HTTP 404 but with API Gateway this is a HTTP 403 ( https : / / forums . aws . amazon . com / thread . jspa ? threadID = 2166840) : return : a Flask Response"""
response_data = jsonify ( ServiceErrorResponses . _MISSING_AUTHENTICATION ) return make_response ( response_data , ServiceErrorResponses . HTTP_STATUS_CODE_403 )
def load_model ( self , model ) : '''load _ model High - level api : Load schema information by compiling the model using pyang package . Parameters model : ` str ` Model name . Returns Model An instance of Model . YAML Example : : devices : asr22: type : ' ASR ' tacacs : login _ prompt : " login : " password _ prompt : " Password : " username : " admin " passwords : tacacs : admin enable : admin line : admin connections : protocol : telnet ip : " 1.2.3.4" port : 2004 vty : protocol : telnet ip : " 2.3.4.5" netconf : class : yang . ncdiff . ModelDevice ip : " 2.3.4.5" port : 830 username : admin password : admin Code Example : : > > > from pyats . topology import loader > > > testbed = loader . load ( ' / users / yuekyang / projects / asr21 . yaml ' ) > > > device = testbed . devices [ ' asr21 ' ] > > > device . connect ( alias = ' nc ' , via = ' netconf ' ) > > > device . nc . scan _ models ( ) > > > m = device . nc . load _ model ( ' openconfig - system ' ) > > > print ( m )'''
if os . path . isfile ( model ) : file_name , file_ext = os . path . splitext ( model ) if file_ext . lower ( ) == '.xml' : logger . debug ( 'Read model file {}' . format ( model ) ) with open ( model , 'r' ) as f : xml = f . read ( ) parser = etree . XMLParser ( remove_blank_text = True ) tree = etree . XML ( xml , parser ) else : raise ValueError ( "'{}' is not a file with extension 'xml'" . format ( model ) ) elif model in self . models_loadable : if self . compiler is None : raise ValueError ( 'please first call scan_models() to build ' 'up supported namespaces of a device' ) else : m = self . compiler . compile ( model ) else : raise ValueError ( "argument 'model' {} needs to be either a model " "name or a compiled model xml file" . format ( model ) ) if m . name in self . models : self . nodes = { k : v for k , v in self . nodes . items ( ) if self . roots [ k . split ( ' ' ) [ 0 ] ] != m . name } logger . info ( 'Model {} is reloaded' . format ( m . name ) ) else : logger . info ( 'Model {} is loaded' . format ( m . name ) ) self . models [ m . name ] = m return m
def _transform_snapshot ( raw_snapshot : Dict [ Any , Any ] ) -> str : """This migration upgrades the object : - ` MediatorTransferState ` such that a list of routes is added to the state to be able to route a waiting transfer in case the receiving node comes back online ."""
snapshot = json . loads ( raw_snapshot ) secrethash_to_task = snapshot [ 'payment_mapping' ] [ 'secrethashes_to_task' ] for task in secrethash_to_task . values ( ) : if task [ '_type' ] != 'raiden.transfer.state.MediatorTask' : continue mediator_state = task . get ( 'mediator_state' ) # Make sure the old meditor _ state was not migrated already . assert 'routes' not in mediator_state mediator_state [ 'routes' ] = [ ] waiting_transfer = mediator_state . get ( 'waiting_transfer' ) if waiting_transfer is None : continue transfer = waiting_transfer . get ( 'transfer' ) token_network_identifier = transfer [ 'balance_proof' ] [ 'token_network_identifier' ] token_network = get_token_network_by_identifier ( snapshot , token_network_identifier , ) channel_identifier = transfer [ 'balance_proof' ] [ 'channel_identifier' ] channel = token_network . get ( 'channelidentifiers_to_channels' ) . get ( channel_identifier ) if not channel : raise ChannelNotFound ( f'Upgrading to v18 failed. ' f'Could not find channel with identifier {channel_identifier} ' f'in the current chain state.' , ) # Only add the route for which the waiting transfer was intended . # At the time of migration , we cannot re - calculate the list of routes # that were originally calculated when the transfer was being # mediated so this step should be sufficient for now . mediator_state [ 'routes' ] = [ RouteState . from_dict ( { 'node_address' : channel [ 'partner_state' ] [ 'address' ] , 'channel_identifier' : channel_identifier , } ) . to_dict ( ) , ] return json . dumps ( snapshot )
def update_folder_name ( self , name ) : """Change this folder name : param str name : new name to change to : return : Updated or Not : rtype : bool"""
if self . root : return False if not name : return False url = self . build_url ( self . _endpoints . get ( 'get_folder' ) . format ( id = self . folder_id ) ) response = self . con . patch ( url , data = { self . _cc ( 'displayName' ) : name } ) if not response : return False folder = response . json ( ) self . name = folder . get ( self . _cc ( 'displayName' ) , '' ) self . parent_id = folder . get ( self . _cc ( 'parentFolderId' ) , None ) return True
def cumulative_detections ( dates = None , template_names = None , detections = None , plot_grouped = False , group_name = None , rate = False , plot_legend = True , ax = None , ** kwargs ) : """Plot cumulative detections or detection rate in time . Simple plotting function to take a list of either datetime objects or : class : ` eqcorrscan . core . match _ filter . Detection ` objects and plot a cumulative detections list . Can take dates as a list of lists and will plot each list separately , e . g . if you have dates from more than one template it will overlay them in different colours . : type dates : list : param dates : Must be a list of lists of datetime . datetime objects : type template _ names : list : param template _ names : List of the template names in order of the dates : type detections : list : param detections : List of : class : ` eqcorrscan . core . match _ filter . Detection ` : type plot _ grouped : bool : param plot _ grouped : Plot detections for each template individually , or group them all together - set to False ( plot template detections individually ) by default . : type rate : bool : param rate : Whether or not to plot the rate of detection per day . Only works for plot _ grouped = True : type plot _ legend : bool : param plot _ legend : Specify whether to plot legend of template names . Defaults to True . : returns : : class : ` matplotlib . figure . Figure ` . . note : : Can either take lists of : class : ` eqcorrscan . core . match _ filter . Detection ` objects directly , or two lists of dates and template names - either / or , not both . . . rubric : : Example > > > import datetime as dt > > > import numpy as np > > > from eqcorrscan . utils . plotting import cumulative _ detections > > > dates = [ ] > > > for i in range ( 3 ) : . . . dates . append ( [ dt . datetime ( 2012 , 3 , 26 ) + dt . timedelta ( n ) . . . for n in np . random . randn ( 100 ) ] ) > > > cumulative _ detections ( dates , [ ' a ' , ' b ' , ' c ' ] , . . . show = True ) # doctest : + SKIP . . plot : : import datetime as dt import numpy as np from eqcorrscan . utils . plotting import cumulative _ detections dates = [ ] for i in range ( 3 ) : dates . append ( [ dt . datetime ( 2012 , 3 , 26 ) + dt . timedelta ( n ) for n in np . random . randn ( 100 ) ] ) cumulative _ detections ( dates , [ ' a ' , ' b ' , ' c ' ] , show = True ) . . rubric : : Example 2 : Rate plotting > > > import datetime as dt > > > import numpy as np > > > from eqcorrscan . utils . plotting import cumulative _ detections > > > dates = [ ] > > > for i in range ( 3 ) : . . . dates . append ( [ dt . datetime ( 2012 , 3 , 26 ) + dt . timedelta ( n ) . . . for n in np . random . randn ( 100 ) ] ) > > > cumulative _ detections ( dates , [ ' a ' , ' b ' , ' c ' ] , plot _ grouped = True , . . . rate = True , show = True ) # doctest : + SKIP . . plot : : import datetime as dt import numpy as np from eqcorrscan . utils . plotting import cumulative _ detections dates = [ ] for i in range ( 3 ) : dates . append ( [ dt . datetime ( 2012 , 3 , 26 ) + dt . timedelta ( n ) for n in np . random . randn ( 100 ) ] ) cumulative _ detections ( dates , [ ' a ' , ' b ' , ' c ' ] , plot _ grouped = True , rate = True , show = True )"""
import matplotlib . pyplot as plt from eqcorrscan . core . match_filter import Detection # Set up a default series of parameters for lines colors = cycle ( [ 'red' , 'green' , 'blue' , 'cyan' , 'magenta' , 'yellow' , 'black' , 'firebrick' , 'purple' , 'darkgoldenrod' , 'gray' ] ) linestyles = cycle ( [ '-' , '-.' , '--' , ':' ] ) # Check that dates is a list of lists if not detections : if type ( dates [ 0 ] ) != list : dates = [ dates ] else : dates = [ ] template_names = [ ] for detection in detections : if not type ( detection ) == Detection : raise IOError ( 'detection not of type: eqcorrscan.core.match_filter' '.Detection' ) dates . append ( detection . detect_time . datetime ) template_names . append ( detection . template_name ) _dates = [ ] _template_names = [ ] for template_name in list ( set ( template_names ) ) : _template_names . append ( template_name ) _dates . append ( [ date for i , date in enumerate ( dates ) if template_names [ i ] == template_name ] ) dates = _dates template_names = _template_names if plot_grouped : _dates = [ ] for template_dates in dates : _dates += template_dates dates = [ _dates ] if group_name : template_names = group_name else : template_names = [ 'All templates' ] if ax is None : fig = plt . figure ( ) ax = fig . add_subplot ( 1 , 1 , 1 ) else : fig = ax . figure ( ) # Make sure not to pad at edges ax . margins ( 0 , 0 ) min_date = min ( [ min ( _d ) for _d in dates ] ) max_date = max ( [ max ( _d ) for _d in dates ] ) for k , template_dates in enumerate ( dates ) : template_dates . sort ( ) plot_dates = deepcopy ( template_dates ) plot_dates . insert ( 0 , min_date ) plot_dates . insert ( - 1 , template_dates [ - 1 ] ) color = next ( colors ) if color == 'red' : linestyle = next ( linestyles ) counts = np . arange ( - 1 , len ( template_dates ) + 1 ) if rate : if not plot_grouped : msg = 'Plotting rate only implemented for plot_grouped=True' raise NotImplementedError ( msg ) if 31 < ( max_date - min_date ) . days < 365 : bins = ( max_date - min_date ) . days ax . set_ylabel ( 'Detections per day' ) elif ( max_date - min_date ) . days <= 31 : bins = ( max_date - min_date ) . days * 4 ax . set_ylabel ( 'Detections per 6 hour bin' ) else : bins = ( max_date - min_date ) . days // 7 ax . set_ylabel ( 'Detections per week' ) if len ( plot_dates ) <= 10 : bins = 1 ax . hist ( mdates . date2num ( plot_dates ) , bins = bins , label = 'Rate of detections' , color = 'darkgrey' , alpha = 0.5 ) else : ax . plot ( plot_dates , counts , linestyle , color = color , label = template_names [ k ] , linewidth = 2.0 , drawstyle = 'steps' ) ax . set_ylabel ( 'Cumulative detections' ) ax . set_xlabel ( 'Date' ) # Set formatters for x - labels mins = mdates . MinuteLocator ( ) max_date = dates [ 0 ] [ 0 ] min_date = max_date for date_list in dates : if max ( date_list ) > max_date : max_date = max ( date_list ) if min ( date_list ) < min_date : min_date = min ( date_list ) timedif = max_date - min_date if 10800 <= timedif . total_seconds ( ) <= 25200 : hours = mdates . MinuteLocator ( byminute = [ 0 , 30 ] ) mins = mdates . MinuteLocator ( byminute = np . arange ( 0 , 60 , 10 ) ) elif 7200 <= timedif . total_seconds ( ) < 10800 : hours = mdates . MinuteLocator ( byminute = [ 0 , 15 , 30 , 45 ] ) mins = mdates . MinuteLocator ( byminute = np . arange ( 0 , 60 , 5 ) ) elif timedif . total_seconds ( ) <= 1200 : hours = mdates . MinuteLocator ( byminute = np . arange ( 0 , 60 , 2 ) ) mins = mdates . MinuteLocator ( byminute = np . arange ( 0 , 60 , 0.5 ) ) elif 25200 < timedif . total_seconds ( ) <= 86400 : hours = mdates . HourLocator ( byhour = np . arange ( 0 , 24 , 3 ) ) mins = mdates . HourLocator ( byhour = np . arange ( 0 , 24 , 1 ) ) elif 86400 < timedif . total_seconds ( ) <= 172800 : hours = mdates . HourLocator ( byhour = np . arange ( 0 , 24 , 6 ) ) mins = mdates . HourLocator ( byhour = np . arange ( 0 , 24 , 1 ) ) elif timedif . total_seconds ( ) > 172800 : hours = mdates . AutoDateLocator ( ) mins = mdates . HourLocator ( byhour = np . arange ( 0 , 24 , 3 ) ) else : hours = mdates . MinuteLocator ( byminute = np . arange ( 0 , 60 , 5 ) ) # Minor locator overruns maxticks for ~ year - long datasets if timedif . total_seconds ( ) < 172800 : ax . xaxis . set_minor_locator ( mins ) hrFMT = mdates . DateFormatter ( '%Y/%m/%d %H:%M:%S' ) else : hrFMT = mdates . DateFormatter ( '%Y/%m/%d' ) ax . xaxis . set_major_locator ( hours ) ax . xaxis . set_major_formatter ( hrFMT ) fig . autofmt_xdate ( ) locs , labels = plt . xticks ( ) plt . setp ( labels , rotation = 15 ) if not rate : ax . set_ylim ( [ 0 , max ( [ len ( _d ) for _d in dates ] ) ] ) if plot_legend : if ax . legend ( ) is not None : leg = ax . legend ( loc = 2 , prop = { 'size' : 8 } , ncol = 2 ) leg . get_frame ( ) . set_alpha ( 0.5 ) fig = _finalise_figure ( fig = fig , ** kwargs ) # pragma : no cover return fig
def print_todo ( self , p_todo ) : """Given a todo item , pretty print it ."""
todo_str = p_todo . source ( ) for ppf in self . filters : todo_str = ppf . filter ( todo_str , p_todo ) return TopydoString ( todo_str )
def configure_basic_auth_decorator ( graph ) : """Configure a basic auth decorator ."""
# use the metadata name if no realm is defined graph . config . setdefault ( "BASIC_AUTH_REALM" , graph . metadata . name ) return ConfigBasicAuth ( app = graph . flask , # wrap in dict to allow lists of items as well as dictionaries credentials = dict ( graph . config . basic_auth . credentials ) , )
def get_process_curses_data ( self , p , first , args ) : """Get curses data to display for a process . - p is the process to display - first is a tag = True if the process is the first on the list"""
ret = [ self . curse_new_line ( ) ] # CPU if 'cpu_percent' in p and p [ 'cpu_percent' ] is not None and p [ 'cpu_percent' ] != '' : if args . disable_irix and self . nb_log_core != 0 : msg = self . layout_stat [ 'cpu' ] . format ( p [ 'cpu_percent' ] / float ( self . nb_log_core ) ) else : msg = self . layout_stat [ 'cpu' ] . format ( p [ 'cpu_percent' ] ) alert = self . get_alert ( p [ 'cpu_percent' ] , highlight_zero = False , is_max = ( p [ 'cpu_percent' ] == self . max_values [ 'cpu_percent' ] ) , header = "cpu" ) ret . append ( self . curse_add_line ( msg , alert ) ) else : msg = self . layout_header [ 'cpu' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg ) ) # MEM if 'memory_percent' in p and p [ 'memory_percent' ] is not None and p [ 'memory_percent' ] != '' : msg = self . layout_stat [ 'mem' ] . format ( p [ 'memory_percent' ] ) alert = self . get_alert ( p [ 'memory_percent' ] , highlight_zero = False , is_max = ( p [ 'memory_percent' ] == self . max_values [ 'memory_percent' ] ) , header = "mem" ) ret . append ( self . curse_add_line ( msg , alert ) ) else : msg = self . layout_header [ 'mem' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg ) ) # VMS / RSS if 'memory_info' in p and p [ 'memory_info' ] is not None and p [ 'memory_info' ] != '' : # VMS msg = self . layout_stat [ 'virt' ] . format ( self . auto_unit ( p [ 'memory_info' ] [ 1 ] , low_precision = False ) ) ret . append ( self . curse_add_line ( msg , optional = True ) ) # RSS msg = self . layout_stat [ 'res' ] . format ( self . auto_unit ( p [ 'memory_info' ] [ 0 ] , low_precision = False ) ) ret . append ( self . curse_add_line ( msg , optional = True ) ) else : msg = self . layout_header [ 'virt' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg ) ) msg = self . layout_header [ 'res' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg ) ) # PID msg = self . layout_stat [ 'pid' ] . format ( p [ 'pid' ] , width = self . __max_pid_size ( ) ) ret . append ( self . curse_add_line ( msg ) ) # USER if 'username' in p : # docker internal users are displayed as ints only , therefore str ( ) # Correct issue # 886 on Windows OS msg = self . layout_stat [ 'user' ] . format ( str ( p [ 'username' ] ) [ : 9 ] ) ret . append ( self . curse_add_line ( msg ) ) else : msg = self . layout_header [ 'user' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg ) ) # TIME + try : # Sum user and system time user_system_time = p [ 'cpu_times' ] [ 0 ] + p [ 'cpu_times' ] [ 1 ] except ( OverflowError , TypeError ) as e : # Catch OverflowError on some Amazon EC2 server # See https : / / github . com / nicolargo / glances / issues / 87 # Also catch TypeError on macOS # See : https : / / github . com / nicolargo / glances / issues / 622 # logger . debug ( " Cannot get TIME + ( { } ) " . format ( e ) ) msg = self . layout_header [ 'time' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg , optional = True ) ) else : hours , minutes , seconds = seconds_to_hms ( user_system_time ) if hours > 99 : msg = '{:<7}h' . format ( hours ) elif 0 < hours < 100 : msg = '{}h{}:{}' . format ( hours , minutes , seconds ) else : msg = '{}:{}' . format ( minutes , seconds ) msg = self . layout_stat [ 'time' ] . format ( msg ) if hours > 0 : ret . append ( self . curse_add_line ( msg , decoration = 'CPU_TIME' , optional = True ) ) else : ret . append ( self . curse_add_line ( msg , optional = True ) ) # THREAD if 'num_threads' in p : num_threads = p [ 'num_threads' ] if num_threads is None : num_threads = '?' msg = self . layout_stat [ 'thread' ] . format ( num_threads ) ret . append ( self . curse_add_line ( msg ) ) else : msg = self . layout_header [ 'thread' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg ) ) # NICE if 'nice' in p : nice = p [ 'nice' ] if nice is None : nice = '?' msg = self . layout_stat [ 'nice' ] . format ( nice ) ret . append ( self . curse_add_line ( msg , decoration = self . get_nice_alert ( nice ) ) ) else : msg = self . layout_header [ 'nice' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg ) ) # STATUS if 'status' in p : status = p [ 'status' ] msg = self . layout_stat [ 'status' ] . format ( status ) if status == 'R' : ret . append ( self . curse_add_line ( msg , decoration = 'STATUS' ) ) else : ret . append ( self . curse_add_line ( msg ) ) else : msg = self . layout_header [ 'status' ] . format ( '?' ) ret . append ( self . curse_add_line ( msg ) ) # IO read / write if 'io_counters' in p and p [ 'io_counters' ] [ 4 ] == 1 and p [ 'time_since_update' ] != 0 : # Display rate if stats is available and io _ tag ( [ 4 ] ) = = 1 # IO read io_rs = int ( ( p [ 'io_counters' ] [ 0 ] - p [ 'io_counters' ] [ 2 ] ) / p [ 'time_since_update' ] ) if io_rs == 0 : msg = self . layout_stat [ 'ior' ] . format ( "0" ) else : msg = self . layout_stat [ 'ior' ] . format ( self . auto_unit ( io_rs , low_precision = True ) ) ret . append ( self . curse_add_line ( msg , optional = True , additional = True ) ) # IO write io_ws = int ( ( p [ 'io_counters' ] [ 1 ] - p [ 'io_counters' ] [ 3 ] ) / p [ 'time_since_update' ] ) if io_ws == 0 : msg = self . layout_stat [ 'iow' ] . format ( "0" ) else : msg = self . layout_stat [ 'iow' ] . format ( self . auto_unit ( io_ws , low_precision = True ) ) ret . append ( self . curse_add_line ( msg , optional = True , additional = True ) ) else : msg = self . layout_header [ 'ior' ] . format ( "?" ) ret . append ( self . curse_add_line ( msg , optional = True , additional = True ) ) msg = self . layout_header [ 'iow' ] . format ( "?" ) ret . append ( self . curse_add_line ( msg , optional = True , additional = True ) ) # Command line # If no command line for the process is available , fallback to # the bare process name instead if 'cmdline' in p : cmdline = p [ 'cmdline' ] else : cmdline = '?' try : if cmdline : path , cmd , arguments = split_cmdline ( cmdline ) if os . path . isdir ( path ) and not args . process_short_name : msg = self . layout_stat [ 'command' ] . format ( path ) + os . sep ret . append ( self . curse_add_line ( msg , splittable = True ) ) ret . append ( self . curse_add_line ( cmd , decoration = 'PROCESS' , splittable = True ) ) else : msg = self . layout_stat [ 'command' ] . format ( cmd ) ret . append ( self . curse_add_line ( msg , decoration = 'PROCESS' , splittable = True ) ) if arguments : msg = ' ' + self . layout_stat [ 'command' ] . format ( arguments ) ret . append ( self . curse_add_line ( msg , splittable = True ) ) else : msg = self . layout_stat [ 'name' ] . format ( p [ 'name' ] ) ret . append ( self . curse_add_line ( msg , splittable = True ) ) except ( TypeError , UnicodeEncodeError ) as e : # Avoid crach after running fine for several hours # 1335 logger . debug ( "Can not decode command line '{}' ({})" . format ( cmdline , e ) ) ret . append ( self . curse_add_line ( '' , splittable = True ) ) # Add extended stats but only for the top processes if first and 'extended_stats' in p and args . enable_process_extended : # Left padding xpad = ' ' * 13 # First line is CPU affinity if 'cpu_affinity' in p and p [ 'cpu_affinity' ] is not None : ret . append ( self . curse_new_line ( ) ) msg = xpad + 'CPU affinity: ' + str ( len ( p [ 'cpu_affinity' ] ) ) + ' cores' ret . append ( self . curse_add_line ( msg , splittable = True ) ) # Second line is memory info if 'memory_info' in p and p [ 'memory_info' ] is not None : ret . append ( self . curse_new_line ( ) ) msg = '{}Memory info: {}' . format ( xpad , p [ 'memory_info' ] ) if 'memory_swap' in p and p [ 'memory_swap' ] is not None : msg += ' swap ' + self . auto_unit ( p [ 'memory_swap' ] , low_precision = False ) ret . append ( self . curse_add_line ( msg , splittable = True ) ) # Third line is for open files / network sessions msg = '' if 'num_threads' in p and p [ 'num_threads' ] is not None : msg += str ( p [ 'num_threads' ] ) + ' threads ' if 'num_fds' in p and p [ 'num_fds' ] is not None : msg += str ( p [ 'num_fds' ] ) + ' files ' if 'num_handles' in p and p [ 'num_handles' ] is not None : msg += str ( p [ 'num_handles' ] ) + ' handles ' if 'tcp' in p and p [ 'tcp' ] is not None : msg += str ( p [ 'tcp' ] ) + ' TCP ' if 'udp' in p and p [ 'udp' ] is not None : msg += str ( p [ 'udp' ] ) + ' UDP' if msg != '' : ret . append ( self . curse_new_line ( ) ) msg = xpad + 'Open: ' + msg ret . append ( self . curse_add_line ( msg , splittable = True ) ) # Fouth line is IO nice level ( only Linux and Windows OS ) if 'ionice' in p and p [ 'ionice' ] is not None and hasattr ( p [ 'ionice' ] , 'ioclass' ) : ret . append ( self . curse_new_line ( ) ) msg = xpad + 'IO nice: ' k = 'Class is ' v = p [ 'ionice' ] . ioclass # Linux : The scheduling class . 0 for none , 1 for real time , 2 for best - effort , 3 for idle . # Windows : On Windows only ioclass is used and it can be set to 2 ( normal ) , 1 ( low ) or 0 ( very low ) . if WINDOWS : if v == 0 : msg += k + 'Very Low' elif v == 1 : msg += k + 'Low' elif v == 2 : msg += 'No specific I/O priority' else : msg += k + str ( v ) else : if v == 0 : msg += 'No specific I/O priority' elif v == 1 : msg += k + 'Real Time' elif v == 2 : msg += k + 'Best Effort' elif v == 3 : msg += k + 'IDLE' else : msg += k + str ( v ) # value is a number which goes from 0 to 7. # The higher the value , the lower the I / O priority of the process . if hasattr ( p [ 'ionice' ] , 'value' ) and p [ 'ionice' ] . value != 0 : msg += ' (value %s/7)' % str ( p [ 'ionice' ] . value ) ret . append ( self . curse_add_line ( msg , splittable = True ) ) return ret
def add_child_gradebook ( self , gradebook_id , child_id ) : """Adds a child to a gradebook . arg : gradebook _ id ( osid . id . Id ) : the ` ` Id ` ` of a gradebook arg : child _ id ( osid . id . Id ) : the ` ` Id ` ` of the new child raise : AlreadyExists - ` ` gradebook _ id ` ` is already a parent of ` ` child _ id ` ` raise : NotFound - ` ` gradebook _ id ` ` or ` ` child _ id ` ` not found raise : NullArgument - ` ` gradebook _ id ` ` or ` ` child _ id ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for # osid . resource . BinHierarchyDesignSession . add _ child _ bin _ template if self . _catalog_session is not None : return self . _catalog_session . add_child_catalog ( catalog_id = gradebook_id , child_id = child_id ) return self . _hierarchy_session . add_child ( id_ = gradebook_id , child_id = child_id )
def define_from_values ( cls , xdtu , ydtu , zdtu , xdtu_0 , ydtu_0 , zdtu_0 ) : """Define class object from from provided values . Parameters xdtu : float XDTU fits keyword value . ydtu : float YDTU fits keyword value . zdtu : float ZDTU fits keyword value . xdtu _ 0 : float XDTU _ 0 fits keyword value . ydtu _ 0 : float YDTU _ 0 fits keyword value . zdtu _ 0 : float ZDTU _ 0 fits keyword value ."""
self = DtuConfiguration ( ) # define DTU variables self . xdtu = xdtu self . ydtu = ydtu self . zdtu = zdtu self . xdtu_0 = xdtu_0 self . ydtu_0 = ydtu_0 self . zdtu_0 = zdtu_0 return self
def connect ( self , host : str = '192.168.0.3' , port : Union [ int , str ] = 5555 ) -> None : '''Connect to a device via TCP / IP directly .'''
self . device_sn = f'{host}:{port}' if not is_connectable ( host , port ) : raise ConnectionError ( f'Cannot connect to {self.device_sn}.' ) self . _execute ( 'connect' , self . device_sn )
def get ( key , default = - 1 ) : """Backport support for original codes ."""
if isinstance ( key , int ) : return Packet ( key ) if key not in Packet . _member_map_ : extend_enum ( Packet , key , default ) return Packet [ key ]
def is_all_field_none ( self ) : """: rtype : bool"""
if self . _status is not None : return False if self . _balance_preferred is not None : return False if self . _balance_threshold_high is not None : return False if self . _savings_account_alias is not None : return False return True
def build_function ( name , args = None , defaults = None , doc = None ) : """create and initialize an astroid FunctionDef node"""
args , defaults = args or [ ] , defaults or [ ] # first argument is now a list of decorators func = nodes . FunctionDef ( name , doc ) func . args = argsnode = nodes . Arguments ( ) argsnode . args = [ ] for arg in args : argsnode . args . append ( nodes . Name ( ) ) argsnode . args [ - 1 ] . name = arg argsnode . args [ - 1 ] . parent = argsnode argsnode . defaults = [ ] for default in defaults : argsnode . defaults . append ( nodes . const_factory ( default ) ) argsnode . defaults [ - 1 ] . parent = argsnode argsnode . kwarg = None argsnode . vararg = None argsnode . parent = func if args : register_arguments ( func ) return func
def set_sync_info ( self , name , mtime , size ) : """Store mtime / size when this resource was last synchronized with remote ."""
if not self . is_local ( ) : return self . peer . set_sync_info ( name , mtime , size ) return self . cur_dir_meta . set_sync_info ( name , mtime , size )
def index_objects ( self , objects , index = "default" ) : """Bulk index a list of objects ."""
if not objects : return index_name = index index = self . app_state . indexes [ index_name ] indexed = set ( ) with index . writer ( ) as writer : for obj in objects : document = self . get_document ( obj ) if document is None : continue object_key = document [ "object_key" ] if object_key in indexed : continue writer . delete_by_term ( "object_key" , object_key ) try : writer . add_document ( ** document ) except ValueError : # logger is here to give us more infos in order to catch a weird bug # that happens regularly on CI but is not reliably # reproductible . logger . error ( "writer.add_document(%r)" , document , exc_info = True ) raise indexed . add ( object_key )
def upgrade ( ) : """Upgrade database ."""
op . create_table ( 'access_actionsroles' , sa . Column ( 'id' , sa . Integer ( ) , nullable = False ) , sa . Column ( 'action' , sa . String ( length = 80 ) , nullable = True ) , sa . Column ( 'exclude' , sa . Boolean ( name = 'exclude' ) , server_default = '0' , nullable = False ) , sa . Column ( 'argument' , sa . String ( length = 255 ) , nullable = True ) , sa . Column ( 'role_id' , sa . Integer ( ) , nullable = False ) , sa . ForeignKeyConstraint ( [ 'role_id' ] , [ u'accounts_role.id' ] , ) , sa . PrimaryKeyConstraint ( 'id' ) , sa . UniqueConstraint ( 'action' , 'exclude' , 'argument' , 'role_id' , name = 'access_actionsroles_unique' ) ) op . create_index ( op . f ( 'ix_access_actionsroles_action' ) , 'access_actionsroles' , [ 'action' ] , unique = False ) op . create_index ( op . f ( 'ix_access_actionsroles_argument' ) , 'access_actionsroles' , [ 'argument' ] , unique = False ) op . create_table ( 'access_actionsusers' , sa . Column ( 'id' , sa . Integer ( ) , nullable = False ) , sa . Column ( 'action' , sa . String ( length = 80 ) , nullable = True ) , sa . Column ( 'exclude' , sa . Boolean ( name = 'exclude' ) , server_default = '0' , nullable = False ) , sa . Column ( 'argument' , sa . String ( length = 255 ) , nullable = True ) , sa . Column ( 'user_id' , sa . Integer ( ) , nullable = True ) , sa . ForeignKeyConstraint ( [ 'user_id' ] , [ u'accounts_user.id' ] , ) , sa . PrimaryKeyConstraint ( 'id' ) , sa . UniqueConstraint ( 'action' , 'exclude' , 'argument' , 'user_id' , name = 'access_actionsusers_unique' ) ) op . create_index ( op . f ( 'ix_access_actionsusers_action' ) , 'access_actionsusers' , [ 'action' ] , unique = False ) op . create_index ( op . f ( 'ix_access_actionsusers_argument' ) , 'access_actionsusers' , [ 'argument' ] , unique = False )
def shuffle ( self ) : """Shuffle the deque Deques themselves do not support this , so this will make all items into a list , shuffle that list , clear the deque , and then re - init the deque ."""
args = list ( self ) random . shuffle ( args ) self . clear ( ) super ( DogeDeque , self ) . __init__ ( args )
def prepare_attached ( self , action , a_name , ** kwargs ) : """Prepares an attached volume for a container configuration . : param action : Action configuration . : type action : dockermap . map . runner . ActionConfig : param a _ name : The full name or id of the container sharing the volume . : type a _ name : unicode | str"""
client = action . client config_id = action . config_id policy = self . _policy if action . container_map . use_attached_parent_name : v_alias = '{0.config_name}.{0.instance_name}' . format ( config_id ) else : v_alias = config_id . instance_name user = policy . volume_users [ config_id . map_name ] [ v_alias ] permissions = policy . volume_permissions [ config_id . map_name ] [ v_alias ] if not ( self . prepare_local and hasattr ( client , 'run_cmd' ) ) : return self . _prepare_container ( client , action , a_name , v_alias ) if action . client_config . features [ 'volumes' ] : volume_detail = client . inspect_volume ( a_name ) local_path = volume_detail [ 'Mountpoint' ] else : instance_detail = client . inspect_container ( a_name ) volumes = get_instance_volumes ( instance_detail , False ) path = resolve_value ( policy . default_volume_paths [ config_id . map_name ] [ v_alias ] ) local_path = volumes . get ( path ) if not local_path : raise ValueError ( "Could not locate local path of volume alias '{0}' / " "path '{1}' in container {2}." . format ( action . config_id . instance_name , path , a_name ) ) return [ client . run_cmd ( cmd ) for cmd in get_preparation_cmd ( user , permissions , local_path ) ]
def _insertFont ( self , fontname , bfname , fontfile , fontbuffer , set_simple , idx , wmode , serif , encoding , ordering ) : """_ insertFont ( self , fontname , bfname , fontfile , fontbuffer , set _ simple , idx , wmode , serif , encoding , ordering ) - > PyObject *"""
return _fitz . Page__insertFont ( self , fontname , bfname , fontfile , fontbuffer , set_simple , idx , wmode , serif , encoding , ordering )
def readconf ( conffile , section_name = None , log_name = None , defaults = None , raw = False ) : """Read config file and return config items as a dict : param conffile : path to config file , or a file - like object ( hasattr readline ) : param section _ name : config section to read ( will return all sections if not defined ) : param log _ name : name to be used with logging ( will use section _ name if not defined ) : param defaults : dict of default values to pre - populate the config with : returns : dict of config items"""
if defaults is None : defaults = { } if raw : c = RawConfigParser ( defaults ) else : c = ConfigParser ( defaults ) if hasattr ( conffile , 'readline' ) : c . readfp ( conffile ) else : if not c . read ( conffile ) : print ( "Unable to read config file %s" ) % conffile sys . exit ( 1 ) if section_name : if c . has_section ( section_name ) : conf = dict ( c . items ( section_name ) ) else : print ( "Unable to find %s config section in %s" ) % ( section_name , conffile ) sys . exit ( 1 ) if "log_name" not in conf : if log_name is not None : conf [ 'log_name' ] = log_name else : conf [ 'log_name' ] = section_name else : conf = { } for s in c . sections ( ) : conf . update ( { s : dict ( c . items ( s ) ) } ) if 'log_name' not in conf : conf [ 'log_name' ] = log_name conf [ '__file__' ] = conffile return conf
def updatePollVote ( self , poll_id , option_ids = [ ] , new_options = [ ] ) : """Updates a poll vote : param poll _ id : ID of the poll to update vote : param option _ ids : List of the option IDs to vote : param new _ options : List of the new option names : param thread _ id : User / Group ID to change status in . See : ref : ` intro _ threads ` : param thread _ type : See : ref : ` intro _ threads ` : type thread _ type : models . ThreadType : raises : FBchatException if request failed"""
data = { "question_id" : poll_id } for i , option_id in enumerate ( option_ids ) : data [ "selected_options[{}]" . format ( i ) ] = option_id for i , option_text in enumerate ( new_options ) : data [ "new_options[{}]" . format ( i ) ] = option_text j = self . _post ( self . req_url . UPDATE_VOTE , data , fix_request = True , as_json = True )
def flip ( f ) : """Calls the function f by flipping the first two positional arguments"""
def wrapped ( * args , ** kwargs ) : return f ( * flip_first_two ( args ) , ** kwargs ) f_spec = make_func_curry_spec ( f ) return curry_by_spec ( f_spec , wrapped )
def create_json ( self , create_missing = None ) : """Create an entity . Call : meth : ` create _ raw ` . Check the response status code , decode JSON and return the decoded JSON as a dict . : return : A dict . The server ' s response , with all JSON decoded . : raises : ` ` requests . exceptions . HTTPError ` ` if the response has an HTTP 4XX or 5XX status code . : raises : ` ` ValueError ` ` If the response JSON can not be decoded ."""
response = self . create_raw ( create_missing ) response . raise_for_status ( ) return response . json ( )
def bind ( self , func : Callable [ [ Any ] , Maybe ] ) -> Maybe : """Just x > > = f = f x ."""
value = self . _value return func ( value )
def parse ( self , data , extent , parent , desc_tag ) : # type : ( bytes , int , Optional [ UDFFileEntry ] , UDFTag ) - > None '''Parse the passed in data into a UDF File Entry . Parameters : data - The data to parse . extent - The extent that this descriptor currently lives at . parent - The parent File Entry for this file ( may be None ) . desc _ tag - A UDFTag object that represents the Descriptor Tag . Returns : Nothing .'''
if self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'UDF File Entry already initialized' ) ( tag_unused , icb_tag , self . uid , self . gid , self . perms , self . file_link_count , record_format , record_display_attrs , record_len , self . info_len , self . log_block_recorded , access_time , mod_time , attr_time , checkpoint , extended_attr_icb , impl_ident , self . unique_id , self . len_extended_attrs , len_alloc_descs ) = struct . unpack_from ( self . FMT , data , 0 ) self . desc_tag = desc_tag self . icb_tag = UDFICBTag ( ) self . icb_tag . parse ( icb_tag ) if record_format != 0 : raise pycdlibexception . PyCdlibInvalidISO ( 'File Entry record format is not 0' ) if record_display_attrs != 0 : raise pycdlibexception . PyCdlibInvalidISO ( 'File Entry record display attributes is not 0' ) if record_len != 0 : raise pycdlibexception . PyCdlibInvalidISO ( 'File Entry record length is not 0' ) self . access_time = UDFTimestamp ( ) self . access_time . parse ( access_time ) self . mod_time = UDFTimestamp ( ) self . mod_time . parse ( mod_time ) self . attr_time = UDFTimestamp ( ) self . attr_time . parse ( attr_time ) if checkpoint != 1 : raise pycdlibexception . PyCdlibInvalidISO ( 'Only DVD Read-only disks supported' ) self . extended_attr_icb = UDFLongAD ( ) self . extended_attr_icb . parse ( extended_attr_icb ) self . impl_ident = UDFEntityID ( ) self . impl_ident . parse ( impl_ident ) offset = struct . calcsize ( self . FMT ) self . extended_attrs = data [ offset : offset + self . len_extended_attrs ] offset += self . len_extended_attrs num_alloc_descs = len_alloc_descs // 8 # a short _ ad is 8 bytes for i_unused in range ( 0 , num_alloc_descs ) : ( length , pos ) = struct . unpack ( '=LL' , data [ offset : offset + 8 ] ) self . alloc_descs . append ( [ length , pos ] ) offset += 8 self . orig_extent_loc = extent self . parent = parent self . _initialized = True
def load_json_file ( file , decoder = None ) : """Load data from json file : param file : Readable object or path to file : type file : FileIO | str : param decoder : Use custom json decoder : type decoder : T < = DateTimeDecoder : return : Json data : rtype : None | int | float | str | list | dict"""
if decoder is None : decoder = DateTimeDecoder if not hasattr ( file , "read" ) : with io . open ( file , "r" , encoding = "utf-8" ) as f : return json . load ( f , object_hook = decoder . decode ) return json . load ( file , object_hook = decoder . decode )
def name ( self ) : """Return the name of the sensor ."""
with self . _bt_interface . connect ( self . _mac ) as connection : name = connection . read_handle ( _HANDLE_READ_NAME ) # pylint : disable = no - member if not name : raise BluetoothBackendException ( "Could not read NAME using handle %s" " from Mi Temp sensor %s" % ( hex ( _HANDLE_READ_NAME ) , self . _mac ) ) return '' . join ( chr ( n ) for n in name )
def write_frame ( self ) : """Writes a single frame to the movie file"""
if not hasattr ( self , 'mwriter' ) : raise AssertionError ( 'This plotter has not opened a movie or GIF file.' ) self . mwriter . append_data ( self . image )
def preferred_height ( self , cli , width , max_available_height , wrap_lines ) : """Preferred height : as much as needed in order to display all the completions ."""
complete_state = cli . current_buffer . complete_state column_width = self . _get_column_width ( complete_state ) column_count = max ( 1 , ( width - self . _required_margin ) // column_width ) return int ( math . ceil ( len ( complete_state . current_completions ) / float ( column_count ) ) )
def parse_cmdline ( argv = None ) : """Parse command line options . @ param argv : List of command line arguments . If None , get list from system . @ return : Tuple of Option List and Argument List ."""
parser = optparse . OptionParser ( ) parser . add_option ( '-c' , '--conf' , help = 'Configuration file path.' , dest = 'confpath' , default = None ) parser . add_option ( '-p' , '--bindport' , help = 'Bind to TCP Port. (Default: %d)' % conf [ 'bindport' ] , dest = 'bindport' , type = 'int' , default = None , action = 'store' ) parser . add_option ( '-b' , '--bindaddr' , help = 'Bind to IP Address. (Default: %s)' % conf [ 'bindaddr' ] , dest = 'bindaddr' , default = None , action = 'store' ) parser . add_option ( '-u' , '--baseurl' , help = 'Base URL. (Default: %s)' % conf [ 'baseurl' ] , dest = 'baseurl' , default = None , action = 'store' ) parser . add_option ( '-D' , '--devel' , help = 'Enable development mode.' , dest = 'devel' , default = False , action = 'store_true' ) if argv is None : return parser . parse_args ( ) else : return parser . parse_args ( argv [ 1 : ] )
def get_output_column_widths ( table , spans ) : """Gets the widths of the columns of the output table Parameters table : list of lists of str The table of rows of text spans : list of lists of int The [ row , column ] pairs of combined cells Returns widths : list of int The widths of each column in the output table"""
widths = [ ] for column in table [ 0 ] : widths . append ( 3 ) for row in range ( len ( table ) ) : for column in range ( len ( table [ row ] ) ) : span = get_span ( spans , row , column ) column_count = get_span_column_count ( span ) if column_count == 1 : text_row = span [ 0 ] [ 0 ] text_column = span [ 0 ] [ 1 ] text = table [ text_row ] [ text_column ] length = get_longest_line_length ( text ) if length > widths [ column ] : widths [ column ] = length for row in range ( len ( table ) ) : for column in range ( len ( table [ row ] ) ) : span = get_span ( spans , row , column ) column_count = get_span_column_count ( span ) if column_count > 1 : text_row = span [ 0 ] [ 0 ] text_column = span [ 0 ] [ 1 ] text = table [ text_row ] [ text_column ] end_column = text_column + column_count available_space = sum ( widths [ text_column : end_column ] ) available_space += column_count - 1 length = get_longest_line_length ( text ) while length > available_space : for i in range ( text_column , end_column ) : widths [ i ] += 1 available_space = sum ( widths [ text_column : end_column ] ) available_space += column_count - 1 if length <= available_space : break return widths
def unlock_kinetis_swd ( jlink ) : """Unlocks a Kinetis device over SWD . Steps Involved in Unlocking : 1 . Verify that the device is configured to read / write from the CoreSight registers ; this is done by reading the Identification Code Register and checking its validity . This register is always at 0x0 on reads . 2 . Check for errors in the status register . If there are any errors , they must be cleared by writing to the Abort Register ( this is always 0x0 on writes ) . 3 . Turn on the device power and debug power so that the target is powered by the J - Link as more power is required during an unlock . 4 . Assert the ` ` RESET ` ` pin to force the target to hold in a reset - state as to avoid interrupts and other potentially breaking behaviour . 5 . At this point , SWD is configured , so send a request to clear the errors , if any , that may currently be set . 6 . Our next SWD request selects the MDM - AP register so that we can start sending unlock instructions . ` ` SELECT [ 31:24 ] = 0x01 ` ` selects it . 7 . Poll the MDM - AP Status Register ( AP [ 1 ] bank 0 , register 0 ) until the flash ready bit is set to indicate we can flash . 8 . Write to the MDM - AP Control Register ( AP [ 1 ] bank 0 , register 1 ) to request a flash mass erase . 9 . Poll the system until the flash mass erase bit is acknowledged in the MDM - AP Status Register . 10 . Poll the control register until it clears it ' s mass erase bit to indicate that it finished mass erasing , and therefore the system is now unsecure . Args : jlink ( JLink ) : the connected J - Link Returns : ` ` True ` ` if the device was unlocked successfully , otherwise ` ` False ` ` . Raises : KinetisException : when the device cannot be unlocked or fails to unlock . See Also : ` NXP Forum < https : / / community . nxp . com / thread / 317167 > ` _ . See Also : ` Kinetis Docs < nxp . com / files / 32bit / doc / ref _ manual / K12P48M50SF4RM . pdf > `"""
SWDIdentity = Identity ( 0x2 , 0xBA01 ) jlink . power_on ( ) jlink . coresight_configure ( ) # 1 . Verify that the device is configured properly . flags = registers . IDCodeRegisterFlags ( ) flags . value = jlink . coresight_read ( 0x0 , False ) if not unlock_kinetis_identified ( SWDIdentity , flags ) : return False # 2 . Check for errors . flags = registers . ControlStatusRegisterFlags ( ) flags . value = jlink . coresight_read ( 0x01 , False ) if flags . STICKYORUN or flags . STICKYCMP or flags . STICKYERR or flags . WDATAERR : jlink . coresight_write ( 0x0 , unlock_kinetis_abort_clear ( ) , False ) # 3 . Turn on device power and debug . flags = registers . ControlStatusRegisterFlags ( ) flags . value = 0 flags . CSYSPWRUPREQ = 1 # System power - up request flags . CDBGPWRUPREQ = 1 # Debug power - up request jlink . coresight_write ( 0x01 , flags . value , False ) # 4 . Assert the reset pin . jlink . set_reset_pin_low ( ) time . sleep ( 1 ) # 5 . Send a SWD Request to clear any errors . request = swd . WriteRequest ( 0x0 , False , unlock_kinetis_abort_clear ( ) ) request . send ( jlink ) # 6 . Send a SWD Request to select the MDM - AP register , SELECT [ 31:24 ] = 0x01 request = swd . WriteRequest ( 0x2 , False , ( 1 << 24 ) ) request . send ( jlink ) try : # 7 . Poll until the Flash - ready bit is set in the status register flags . # Have to read first to ensure the data is valid . unlock_kinetis_read_until_ack ( jlink , 0x0 ) flags = registers . MDMAPStatusRegisterFlags ( ) flags . flash_ready = 0 while not flags . flash_ready : flags . value = unlock_kinetis_read_until_ack ( jlink , 0x0 ) . data # 8 . System may still be secure at this point , so request a mass erase . # AP [ 1 ] bank 0 , register 1 is the MDM - AP Control Register . flags = registers . MDMAPControlRegisterFlags ( ) flags . flash_mass_erase = 1 request = swd . WriteRequest ( 0x1 , True , flags . value ) request . send ( jlink ) # 9 . Poll the status register until the mass erase command has been # accepted . unlock_kinetis_read_until_ack ( jlink , 0x0 ) flags = registers . MDMAPStatusRegisterFlags ( ) flags . flash_mass_erase_ack = 0 while not flags . flash_mass_erase_ack : flags . value = unlock_kinetis_read_until_ack ( jlink , 0x0 ) . data # 10 . Poll the control register until the ` ` flash _ mass _ erase ` ` bit is # cleared , which is done automatically when the mass erase # finishes . unlock_kinetis_read_until_ack ( jlink , 0x1 ) flags = registers . MDMAPControlRegisterFlags ( ) flags . flash_mass_erase = 1 while flags . flash_mass_erase : flags . value = unlock_kinetis_read_until_ack ( jlink , 0x1 ) . data except KinetisException as e : jlink . set_reset_pin_high ( ) return False jlink . set_reset_pin_high ( ) time . sleep ( 1 ) jlink . reset ( ) return True
def _fix_install_dir_for_user_site ( self ) : """Fix the install _ dir if " - - user " was used ."""
if not self . user or not site . ENABLE_USER_SITE : return self . create_home_path ( ) if self . install_userbase is None : msg = "User base directory is not specified" raise DistutilsPlatformError ( msg ) self . install_base = self . install_platbase = self . install_userbase scheme_name = os . name . replace ( 'posix' , 'unix' ) + '_user' self . select_scheme ( scheme_name )
def Guo_Sun ( dp , voidage , vs , rho , mu , Dt , L = 1 ) : r'''Calculates pressure drop across a packed bed of spheres using a correlation developed in [ 1 ] _ . This is valid for highly - packed particles at particle / tube diameter ratios between 2 and 3 , where a ring packing structure occurs . If a packing ratio is so low , it is important to use this model because in some cases its predictions are as low as half those of other models ! . . math : : f _ v = 180 + \ left ( 9.5374 \ frac { d _ p } { D _ t } - 2.8054 \ right ) Re _ { Erg } ^ { 0.97} . . math : : f _ v = \ frac { \ Delta P d _ p ^ 2 } { \ mu v _ s L } \ frac { \ epsilon ^ 3 } { ( 1 - \ epsilon ) ^ 2} . . math : : Re _ { Erg } = \ frac { \ rho v _ s d _ p } { \ mu ( 1 - \ epsilon ) } Parameters dp : float Particle diameter of spheres [ m ] voidage : float Void fraction of bed packing [ - ] vs : float Superficial velocity of the fluid ( volumetric flow rate / cross - sectional area ) [ m / s ] rho : float Density of the fluid [ kg / m ^ 3] mu : float Viscosity of the fluid , [ Pa * s ] Dt : float Diameter of the tube , [ m ] L : float , optional Length the fluid flows in the packed bed [ m ] Returns dP : float Pressure drop across the bed [ Pa ] Notes Developed with data in the range of : . . math : : 100 < Re _ { m } < 33000 \ \ 2 < d _ t / d _ p < 3 1 \ \ 0.476 < \ epsilon < 0.492 Examples > > > Guo _ Sun ( dp = 14.2E - 3 , voidage = 0.492 , vs = 0.6 , rho = 1E3 , mu = 1E - 3 , Dt = 40.9E - 3) 42019.529911473706 References . . [ 1 ] Guo , Zehua , Zhongning Sun , Nan Zhang , Ming Ding , and Jiaqing Liu . " Pressure Drop in Slender Packed Beds with Novel Packing Arrangement . " Powder Technology 321 ( November 2017 ) : 286-92. doi : 10.1016 / j . powtec . 2017.08.024.'''
# 2 < D / d < 3 , particles in contact with the wall tend to form a highly ordered ring structure . Rem = dp * rho * vs / mu / ( 1 - voidage ) fv = 180 + ( 9.5374 * dp / Dt - 2.8054 ) * Rem ** 0.97 return fv * ( mu * vs * L / dp ** 2 ) * ( 1 - voidage ) ** 2 / voidage ** 3
def setActiveWindow ( self , win ) : """Set the given window active ( property _ NET _ ACTIVE _ WINDOW ) : param win : the window object"""
self . _setProperty ( '_NET_ACTIVE_WINDOW' , [ 1 , X . CurrentTime , win . id ] , win )
def roundedSpecClass ( self ) : """Spectral class with rounded class number ie A8.5V is A9"""
try : classnumber = str ( int ( np . around ( self . classNumber ) ) ) except TypeError : classnumber = str ( self . classNumber ) return self . classLetter + classnumber
def _set_cursor_position ( self , value ) : """Set cursor position . Return whether it changed ."""
original_position = self . __cursor_position self . __cursor_position = max ( 0 , value ) return value != original_position
def _get_cells_headers_ids ( self , hed , index ) : """Returns a list with ids of rows of same column . : param hed : The list that represents the table header . : type hed : list ( list ( hatemile . util . html . htmldomelement . HTMLDOMElement ) ) : param index : The index of columns . : type index : int : return : The list with ids of rows of same column . : rtype : list ( str )"""
# pylint : disable = no - self - use ids = [ ] for row in hed : if row [ index ] . get_tag_name ( ) == 'TH' : ids . append ( row [ index ] . get_attribute ( 'id' ) ) return ids
def assignOrderNames ( self ) : """Assigns the order names for this tree based on the name of the columns ."""
try : schema = self . tableType ( ) . schema ( ) except AttributeError : return for colname in self . columns ( ) : column = schema . column ( colname ) if column : self . setColumnOrderName ( colname , column . name ( ) )
def makeGlu ( segID , N , CA , C , O , geo ) : '''Creates a Glutamic Acid residue'''
# # R - Group CA_CB_length = geo . CA_CB_length C_CA_CB_angle = geo . C_CA_CB_angle N_C_CA_CB_diangle = geo . N_C_CA_CB_diangle CB_CG_length = geo . CB_CG_length CA_CB_CG_angle = geo . CA_CB_CG_angle N_CA_CB_CG_diangle = geo . N_CA_CB_CG_diangle CG_CD_length = geo . CG_CD_length CB_CG_CD_angle = geo . CB_CG_CD_angle CA_CB_CG_CD_diangle = geo . CA_CB_CG_CD_diangle CD_OE1_length = geo . CD_OE1_length CG_CD_OE1_angle = geo . CG_CD_OE1_angle CB_CG_CD_OE1_diangle = geo . CB_CG_CD_OE1_diangle CD_OE2_length = geo . CD_OE2_length CG_CD_OE2_angle = geo . CG_CD_OE2_angle CB_CG_CD_OE2_diangle = geo . CB_CG_CD_OE2_diangle carbon_b = calculateCoordinates ( N , C , CA , CA_CB_length , C_CA_CB_angle , N_C_CA_CB_diangle ) CB = Atom ( "CB" , carbon_b , 0.0 , 1.0 , " " , " CB" , 0 , "C" ) carbon_g = calculateCoordinates ( N , CA , CB , CB_CG_length , CA_CB_CG_angle , N_CA_CB_CG_diangle ) CG = Atom ( "CG" , carbon_g , 0.0 , 1.0 , " " , " CG" , 0 , "C" ) carbon_d = calculateCoordinates ( CA , CB , CG , CG_CD_length , CB_CG_CD_angle , CA_CB_CG_CD_diangle ) CD = Atom ( "CD" , carbon_d , 0.0 , 1.0 , " " , " CD" , 0 , "C" ) oxygen_e1 = calculateCoordinates ( CB , CG , CD , CD_OE1_length , CG_CD_OE1_angle , CB_CG_CD_OE1_diangle ) OE1 = Atom ( "OE1" , oxygen_e1 , 0.0 , 1.0 , " " , " OE1" , 0 , "O" ) oxygen_e2 = calculateCoordinates ( CB , CG , CD , CD_OE2_length , CG_CD_OE2_angle , CB_CG_CD_OE2_diangle ) OE2 = Atom ( "OE2" , oxygen_e2 , 0.0 , 1.0 , " " , " OE2" , 0 , "O" ) # # Create Residue Data Structure res = Residue ( ( ' ' , segID , ' ' ) , "GLU" , ' ' ) res . add ( N ) res . add ( CA ) res . add ( C ) res . add ( O ) res . add ( CB ) res . add ( CG ) res . add ( CD ) res . add ( OE1 ) res . add ( OE2 ) return res
def _select_batched ( self , table , cols , num_rows , limit , queries_per_batch = 3 , execute = True ) : """Run select queries in small batches and return joined resutls ."""
# Execute select queries in small batches to avoid connection timeout commands , offset = [ ] , 0 while num_rows > 0 : # Use number of rows as limit if num _ rows < limit _limit = min ( limit , num_rows ) # Execute select _ limit query commands . append ( self . _select_limit_statement ( table , cols = cols , offset = offset , limit = limit ) ) offset += _limit num_rows += - _limit # Execute commands if execute : rows = [ ] til_reconnect = queries_per_batch for c in commands : if til_reconnect == 0 : self . disconnect ( ) self . reconnect ( ) til_reconnect = queries_per_batch rows . extend ( self . fetch ( c , False ) ) til_reconnect += - 1 del commands return rows # Return commands else : return commands
def _build_date_header_string ( self , date_value ) : """Gets the date _ value ( may be None , basestring , float or datetime . datetime instance ) and returns a valid date string as per RFC 2822."""
if isinstance ( date_value , datetime ) : date_value = time . mktime ( date_value . timetuple ( ) ) if not isinstance ( date_value , basestring ) : date_value = formatdate ( date_value , localtime = True ) # Encode it here to avoid this : # Date : = ? utf - 8 ? q ? Sat = 2C _ 01 _ Sep _ 2012_13 = 3A08 = 3A29 _ - 0300 ? = return native ( date_value )
def _get_step ( self , name , make_copy = True ) : """Return step from steps library . Optionally , the step returned is a deep copy from the step in the steps library , so additional information ( e . g . , about whether the step was scattered ) can be stored in the copy . Args : name ( str ) : name of the step in the steps library . make _ copy ( bool ) : whether a deep copy of the step should be returned or not ( default : True ) . Returns : Step from steps library . Raises : ValueError : The requested step cannot be found in the steps library ."""
self . _closed ( ) s = self . steps_library . get_step ( name ) if s is None : msg = '"{}" not found in steps library. Please check your ' 'spelling or load additional steps' raise ValueError ( msg . format ( name ) ) if make_copy : s = copy . deepcopy ( s ) return s
def scopeMatch ( assumedScopes , requiredScopeSets ) : """Take a list of a assumed scopes , and a list of required scope sets on disjunctive normal form , and check if any of the required scope sets are satisfied . Example : requiredScopeSets = [ [ " scopeA " , " scopeB " ] , [ " scopeC " ] In this case assumed _ scopes must contain , either : " scopeA " AND " scopeB " , OR just " scopeC " ."""
for scopeSet in requiredScopeSets : for requiredScope in scopeSet : for scope in assumedScopes : if scope == requiredScope : # requiredScope satisifed , no need to check more scopes break if scope . endswith ( "*" ) and requiredScope . startswith ( scope [ : - 1 ] ) : # requiredScope satisifed , no need to check more scopes break else : # requiredScope not satisfied , stop checking scopeSet break else : # scopeSet satisfied , so we ' re happy return True # none of the requiredScopeSets were satisfied return False
def send ( self , request ) : """Queue a request to be sent to the RPC ."""
if self . _UNARY_REQUESTS : try : self . _send_unary_request ( request ) except exceptions . GoogleAPICallError : _LOGGER . debug ( "Exception while sending unary RPC. This is typically " "non-fatal as stream requests are best-effort." , exc_info = True , ) else : self . _rpc . send ( request )
def _await_socket ( self , timeout ) : """Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun stdout ."""
with safe_open ( self . _ng_stdout , 'r' ) as ng_stdout : start_time = time . time ( ) accumulated_stdout = '' while 1 : # TODO : share the decreasing timeout logic here with NailgunProtocol . iter _ chunks ( ) by adding # a method to pants . util . contextutil ! remaining_time = time . time ( ) - ( start_time + timeout ) if remaining_time > 0 : stderr = read_file ( self . _ng_stderr , binary_mode = True ) raise self . InitialNailgunConnectTimedOut ( timeout = timeout , stdout = accumulated_stdout , stderr = stderr , ) readable , _ , _ = select . select ( [ ng_stdout ] , [ ] , [ ] , ( - 1 * remaining_time ) ) if readable : line = ng_stdout . readline ( ) # TODO : address deadlock risk here . try : return self . _NG_PORT_REGEX . match ( line ) . group ( 1 ) except AttributeError : pass accumulated_stdout += line
def job_exists ( self , prov ) : """Check if a job exists in the database ."""
with self . lock : self . cur . execute ( 'select * from "jobs" where "prov" = ?;' , ( prov , ) ) rec = self . cur . fetchone ( ) return rec is not None
def _greater_warnings_context ( context_lines_string ) : """Provide the ` line ` argument to warnings . showwarning ( ) . warnings . warn _ explicit ( ) doesn ' t use the ` line ` argument to showwarning ( ) , but we want to make use of the warning filtering provided by warn _ explicit ( ) . This contextmanager overwrites the showwarning ( ) method to pipe in the desired amount of context lines when using warn _ explicit ( ) ."""
prev_showwarning = warnings . showwarning def wrapped ( message , category , filename , lineno , file = None , line = None ) : return prev_showwarning ( message = message , category = category , filename = filename , lineno = lineno , file = file , line = ( line or context_lines_string ) ) warnings . showwarning = wrapped yield warnings . showwarning = prev_showwarning
def patch_dataset ( self , owner , id , body , ** kwargs ) : """Update a dataset Update an existing dataset . Note that only elements or files included in the request will be updated . All omitted elements or files will remain untouched . This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please define a ` callback ` function to be invoked when receiving the response . > > > def callback _ function ( response ) : > > > pprint ( response ) > > > thread = api . patch _ dataset ( owner , id , body , callback = callback _ function ) : param callback function : The callback function for asynchronous request . ( optional ) : param str owner : User name and unique identifier of the creator of a dataset or project . For example , in the URL : [ https : / / data . world / jonloyens / an - intro - to - dataworld - dataset ] ( https : / / data . world / jonloyens / an - intro - to - dataworld - dataset ) , jonloyens is the unique identifier of the owner . ( required ) : param str id : Dataset unique identifier . For example , in the URL : [ https : / / data . world / jonloyens / an - intro - to - dataworld - dataset ] ( https : / / data . world / jonloyens / an - intro - to - dataworld - dataset ) , an - intro - to - dataworld - dataset is the unique identifier of the dataset . ( required ) : param DatasetPatchRequest body : ( required ) : return : SuccessMessage If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'callback' ) : return self . patch_dataset_with_http_info ( owner , id , body , ** kwargs ) else : ( data ) = self . patch_dataset_with_http_info ( owner , id , body , ** kwargs ) return data
def persist ( name , value , config = None ) : '''Assign and persist a simple sysctl parameter for this minion . If ` ` config ` ` is not specified , a sensible default will be chosen using : mod : ` sysctl . default _ config < salt . modules . linux _ sysctl . default _ config > ` . CLI Example : . . code - block : : bash salt ' * ' sysctl . persist net . ipv4 . ip _ forward 1'''
if config is None : config = default_config ( ) edited = False # If the sysctl . conf is not present , add it if not os . path . isfile ( config ) : sysctl_dir = os . path . dirname ( config ) if not os . path . exists ( sysctl_dir ) : os . makedirs ( sysctl_dir ) try : with salt . utils . files . fopen ( config , 'w+' ) as _fh : _fh . write ( '#\n# Kernel sysctl configuration\n#\n' ) except ( IOError , OSError ) : msg = 'Could not write to file: {0}' raise CommandExecutionError ( msg . format ( config ) ) # Read the existing sysctl . conf nlines = [ ] try : with salt . utils . files . fopen ( config , 'r' ) as _fh : # Use readlines because this should be a small file # and it seems unnecessary to indent the below for # loop since it is a fairly large block of code . config_data = salt . utils . data . decode ( _fh . readlines ( ) ) except ( IOError , OSError ) : msg = 'Could not read from file: {0}' raise CommandExecutionError ( msg . format ( config ) ) for line in config_data : if line . startswith ( '#' ) : nlines . append ( line ) continue if '=' not in line : nlines . append ( line ) continue # Strip trailing whitespace and split the k , v comps = [ i . strip ( ) for i in line . split ( '=' , 1 ) ] # On Linux procfs , files such as / proc / sys / net / ipv4 / tcp _ rmem or any # other sysctl with whitespace in it consistently uses 1 tab . Lets # allow our users to put a space or tab between multi - value sysctls # and have salt not try to set it every single time . if isinstance ( comps [ 1 ] , string_types ) and ' ' in comps [ 1 ] : comps [ 1 ] = re . sub ( r'\s+' , '\t' , comps [ 1 ] ) # Do the same thing for the value ' just in case ' if isinstance ( value , string_types ) and ' ' in value : value = re . sub ( r'\s+' , '\t' , value ) if len ( comps ) < 2 : nlines . append ( line ) continue if name == comps [ 0 ] : # This is the line to edit if six . text_type ( comps [ 1 ] ) == six . text_type ( value ) : # It is correct in the config , check if it is correct in / proc if six . text_type ( get ( name ) ) != six . text_type ( value ) : assign ( name , value ) return 'Updated' else : return 'Already set' nlines . append ( '{0} = {1}\n' . format ( name , value ) ) edited = True continue else : nlines . append ( line ) if not edited : nlines . append ( '{0} = {1}\n' . format ( name , value ) ) try : with salt . utils . files . fopen ( config , 'wb' ) as _fh : _fh . writelines ( salt . utils . data . encode ( nlines ) ) except ( IOError , OSError ) : msg = 'Could not write to file: {0}' raise CommandExecutionError ( msg . format ( config ) ) assign ( name , value ) return 'Updated'
async def setup ( self ) : """Set up the connection with automatic retry ."""
while True : fut = self . loop . create_connection ( lambda : SW16Protocol ( self , disconnect_callback = self . handle_disconnect_callback , loop = self . loop , logger = self . logger ) , host = self . host , port = self . port ) try : self . transport , self . protocol = await asyncio . wait_for ( fut , timeout = self . timeout ) except asyncio . TimeoutError : self . logger . warning ( "Could not connect due to timeout error." ) except OSError as exc : self . logger . warning ( "Could not connect due to error: %s" , str ( exc ) ) else : self . is_connected = True if self . reconnect_callback : self . reconnect_callback ( ) break await asyncio . sleep ( self . reconnect_interval )