signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _delete ( self , url ) : """Wrapper around request . delete ( ) to use the API prefix . Returns a JSON response ."""
req = self . _session . delete ( self . _api_prefix + url ) return self . _action ( req )
def get_additional_params ( self , ** params ) : """Filter to get the additional params needed for polling"""
# TODO : Move these params to their own vertical if needed . polling_params = [ 'locationschema' , 'carrierschema' , 'sorttype' , 'sortorder' , 'originairports' , 'destinationairports' , 'stops' , 'outbounddeparttime' , 'outbounddepartstarttime' , 'outbounddepartendtime' , 'inbounddeparttime' , 'inbounddepartstarttime' , 'inbounddepartendtime' , 'duration' , 'includecarriers' , 'excludecarriers' ] additional_params = dict ( ( key , value ) for key , value in params . items ( ) if key in polling_params ) return additional_params
def scrape ( language , method , word , * args , ** kwargs ) : '''Uses custom scrapers and calls provided method .'''
scraper = Scrape ( language , word ) if hasattr ( scraper , method ) : function = getattr ( scraper , method ) if callable ( function ) : return function ( * args , ** kwargs ) else : raise NotImplementedError ( 'The method ' + method + '() is not implemented so far.' )
def create_work_item ( self , document , project , type , validate_only = None , bypass_rules = None , suppress_notifications = None , expand = None ) : """CreateWorkItem . [ Preview API ] Creates a single work item . : param : class : ` < [ JsonPatchOperation ] > < azure . devops . v5_1 . work _ item _ tracking . models . [ JsonPatchOperation ] > ` document : The JSON Patch document representing the work item : param str project : Project ID or project name : param str type : The work item type of the work item to create : param bool validate _ only : Indicate if you only want to validate the changes without saving the work item : param bool bypass _ rules : Do not enforce the work item type rules on this update : param bool suppress _ notifications : Do not fire any notifications for this change : param str expand : The expand parameters for work item attributes . Possible options are { None , Relations , Fields , Links , All } . : rtype : : class : ` < WorkItem > < azure . devops . v5_1 . work - item - tracking . models . WorkItem > `"""
route_values = { } if project is not None : route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' ) if type is not None : route_values [ 'type' ] = self . _serialize . url ( 'type' , type , 'str' ) query_parameters = { } if validate_only is not None : query_parameters [ 'validateOnly' ] = self . _serialize . query ( 'validate_only' , validate_only , 'bool' ) if bypass_rules is not None : query_parameters [ 'bypassRules' ] = self . _serialize . query ( 'bypass_rules' , bypass_rules , 'bool' ) if suppress_notifications is not None : query_parameters [ 'suppressNotifications' ] = self . _serialize . query ( 'suppress_notifications' , suppress_notifications , 'bool' ) if expand is not None : query_parameters [ '$expand' ] = self . _serialize . query ( 'expand' , expand , 'str' ) content = self . _serialize . body ( document , '[JsonPatchOperation]' ) response = self . _send ( http_method = 'POST' , location_id = '62d3d110-0047-428c-ad3c-4fe872c91c74' , version = '5.1-preview.3' , route_values = route_values , query_parameters = query_parameters , content = content , media_type = 'application/json-patch+json' ) return self . _deserialize ( 'WorkItem' , response )
def price_change ( self ) : """This method returns any price change . : return :"""
try : if self . _data_from_search : return self . _data_from_search . find ( 'div' , { 'class' : 'price-changes-sr' } ) . text else : return self . _ad_page_content . find ( 'div' , { 'class' : 'price-changes-sr' } ) . text except Exception as e : if self . _debug : logging . error ( "Error getting price_change. Error message: " + e . args [ 0 ] ) return
def _init_dut ( self ) : """Initialize the DUT . DUT will be restarted . and openthread will started ."""
if self . auto_dut : self . dut = None return dut_port = settings . DUT_DEVICE [ 0 ] dut = OpenThreadController ( dut_port ) self . dut = dut
def _determine_current_dimension_size ( self , dim_name , max_size ) : """Helper method to determine the current size of a dimension ."""
# Limited dimension . if self . dimensions [ dim_name ] is not None : return max_size def _find_dim ( h5group , dim ) : if dim not in h5group : return _find_dim ( h5group . parent , dim ) return h5group [ dim ] dim_variable = _find_dim ( self . _h5group , dim_name ) if "REFERENCE_LIST" not in dim_variable . attrs : return max_size root = self . _h5group [ "/" ] for ref , _ in dim_variable . attrs [ "REFERENCE_LIST" ] : var = root [ ref ] for i , var_d in enumerate ( var . dims ) : name = _name_from_dimension ( var_d ) if name == dim_name : max_size = max ( var . shape [ i ] , max_size ) return max_size
def walk ( self , dispatcher , node ) : """Walk through the node with a custom dispatcher for extraction of details that are required ."""
deferrable_handlers = { Declare : self . declare , Resolve : self . register_reference , } layout_handlers = { PushScope : self . push_scope , PopScope : self . pop_scope , PushCatch : self . push_catch , # should really be different , but given that the # mechanism is within the same tree , the only difference # would be sanity check which should have been tested in # the first place in the primitives anyway . PopCatch : self . pop_scope , } if not self . shadow_funcname : layout_handlers [ ResolveFuncName ] = self . shadow_reference local_dispatcher = Dispatcher ( definitions = dict ( dispatcher ) , token_handler = None , layout_handlers = layout_handlers , deferrable_handlers = deferrable_handlers , ) return list ( walk ( local_dispatcher , node ) )
def resume_from ( self ) : """Get a timestamp representing the position just after the last written gauge"""
position = self . driver . get_writer_position ( self . config . writer_name ) return position + self . config . resolution if position else 0
def handle_delete_user ( self , req ) : """Handles the DELETE v2 / < account > / < user > call for deleting a user from an account . Can only be called by an account . admin . : param req : The swob . Request to process . : returns : swob . Response , 2xx on success ."""
# Validate path info account = req . path_info_pop ( ) user = req . path_info_pop ( ) if req . path_info or not account or account [ 0 ] == '.' or not user or user [ 0 ] == '.' : return HTTPBadRequest ( request = req ) # if user to be deleted is reseller _ admin , then requesting # user must be the super _ admin is_reseller_admin = self . is_user_reseller_admin ( req , account , user ) if not is_reseller_admin and not req . credentials_valid : # if user to be deleted can ' t be found , return 404 return HTTPNotFound ( request = req ) elif is_reseller_admin and not self . is_super_admin ( req ) : return HTTPForbidden ( request = req ) if not self . is_account_admin ( req , account ) : return self . denied_response ( req ) # Delete the user ' s existing token , if any . path = quote ( '/v1/%s/%s/%s' % ( self . auth_account , account , user ) ) resp = self . make_pre_authed_request ( req . environ , 'HEAD' , path ) . get_response ( self . app ) if resp . status_int == 404 : return HTTPNotFound ( request = req ) elif resp . status_int // 100 != 2 : raise Exception ( 'Could not obtain user details: %s %s' % ( path , resp . status ) ) candidate_token = resp . headers . get ( 'x-object-meta-auth-token' ) if candidate_token : object_name = self . _get_concealed_token ( candidate_token ) path = quote ( '/v1/%s/.token_%s/%s' % ( self . auth_account , object_name [ - 1 ] , object_name ) ) resp = self . make_pre_authed_request ( req . environ , 'DELETE' , path ) . get_response ( self . app ) if resp . status_int // 100 != 2 and resp . status_int != 404 : raise Exception ( 'Could not delete possibly existing token: ' '%s %s' % ( path , resp . status ) ) # Delete the user entry itself . path = quote ( '/v1/%s/%s/%s' % ( self . auth_account , account , user ) ) resp = self . make_pre_authed_request ( req . environ , 'DELETE' , path ) . get_response ( self . app ) if resp . status_int // 100 != 2 and resp . status_int != 404 : raise Exception ( 'Could not delete the user object: %s %s' % ( path , resp . status ) ) return HTTPNoContent ( request = req )
def get_search_form ( self ) : """Return list of form based on model"""
magic_dico_form = self . get_dict_for_forms ( ) forms = [ ] initial = list ( self . request . GET . lists ( ) ) for key , value in magic_dico_form . items ( ) : form = Form ( ) model = value [ "model" ] if not value [ "fields" ] : continue for field in value [ "fields" ] : formfield = get_formfield ( model , field ) formfield . widget . attrs . update ( { 'class' : self . css_class } ) form . fields . update ( { field : formfield } ) initial_tmp = { } for k , vals in initial : tmp_list = k . split ( model . __name__ + "-" ) if len ( tmp_list ) == 2 : list_val_tmp = vals [ 0 ] if len ( vals ) == 1 else [ val for val in vals if val != '' ] initial_tmp [ tmp_list [ - 1 ] ] = list_val_tmp form . initial = initial_tmp form . prefix = model . __name__ forms . append ( form ) return sorted ( forms , key = lambda form : form . prefix )
def astimezone ( self , tz ) : """Return a : py : class : ` khayyam . JalaliDatetime ` object with new : py : meth : ` khayyam . JalaliDatetime . tzinfo ` attribute tz , adjusting the date and time data so the result is the same UTC time as self , but in * tz * ‘ s local time . * tz * must be an instance of a : py : class : ` datetime . tzinfo ` subclass , and its : py : meth : ` datetime . tzinfo . utcoffset ( ) ` and : py : meth : ` datetime . tzinfo . dst ( ) ` methods must not return : py : obj : ` None ` . * self * must be aware ( ` self . tzinfo ` must not be ` None ` , and ` self . utcoffset ( ) ` must not return ` None ` ) . If ` self . tzinfo ` is ` tz ` , ` self . astimezone ( tz ) ` is equal to ` self ` : no adjustment of date or time data is performed . Else the result is local time in time zone ` tz ` , representing the same UTC time as ` self ` : after ` astz = dt . astimezone ( tz ) , astz - astz . utcoffset ( ) ` will usually have the same date and time data as ` dt - dt . utcoffset ( ) ` . The discussion of class : py : class : ` datetime . tzinfo ` explains the cases at Daylight Saving Time transition boundaries where this cannot be achieved ( an issue only if ` tz ` models both standard and daylight time ) . If you merely want to attach a time zone object ` tz ` to a datetime dt without adjustment of date and time data , use ` dt . replace ( tzinfo = tz ) ` . If you merely want to remove the time zone object from an aware datetime dt without conversion of date and time data , use ` dt . replace ( tzinfo = None ) ` . Note that the default : py : meth : ` datetime . tzinfo . fromutc ( ) ` method can be overridden in a : py : class : ` datetime . tzinfo ` subclass to affect the result returned by : py : meth : ` khayyam . JalaliDatetime . astimezone ( ) ` . Ignoring error cases , : py : meth : ` khayyam . JalaliDatetime . astimezone ( ) ` acts like : . . code - block : : python : emphasize - lines : 3,5 def astimezone ( self , tz ) : # doctest : + SKIP if self . tzinfo is tz : return self if self . tzinfo : utc = self - self . utcoffset ( ) else : utc = self return tz . fromutc ( utc . replace ( tzinfo = tz ) ) : param tz : : py : class : ` datetime . tzinfo ` : rtype : : py : class : ` khayyam . JalaliDatetime `"""
if self . tzinfo is tz : return self if self . tzinfo : utc = self - self . utcoffset ( ) else : utc = self return tz . fromutc ( utc . replace ( tzinfo = tz ) )
def sequence_mutation_summary ( self , alignment_ids = None , alignment_type = None ) : """Summarize all mutations found in the sequence _ alignments attribute . Returns 2 dictionaries , single _ counter and fingerprint _ counter . single _ counter : Dictionary of ` ` { point mutation : list of genes / strains } ` ` Example : : ( ' A ' , 24 , ' V ' ) : [ ' Strain1 ' , ' Strain2 ' , ' Strain4 ' ] , ( ' R ' , 33 , ' T ' ) : [ ' Strain2 ' ] Here , we report which genes / strains have the single point mutation . fingerprint _ counter : Dictionary of ` ` { mutation group : list of genes / strains } ` ` Example : : ( ( ' A ' , 24 , ' V ' ) , ( ' R ' , 33 , ' T ' ) ) : [ ' Strain2 ' ] , ( ( ' A ' , 24 , ' V ' ) ) : [ ' Strain1 ' , ' Strain4 ' ] Here , we report which genes / strains have the specific combinations ( or " fingerprints " ) of point mutations Args : alignment _ ids ( str , list ) : Specified alignment ID or IDs to use alignment _ type ( str ) : Specified alignment type contained in the ` ` annotation ` ` field of an alignment object , ` ` seqalign ` ` or ` ` structalign ` ` are the current types . Returns : dict , dict : single _ counter , fingerprint _ counter"""
if alignment_ids : ssbio . utils . force_list ( alignment_ids ) if len ( self . sequence_alignments ) == 0 : log . error ( '{}: no sequence alignments' . format ( self . id ) ) return { } , { } fingerprint_counter = defaultdict ( list ) single_counter = defaultdict ( list ) for alignment in self . sequence_alignments : # Ignore alignments if a list of identifiers is provided if alignment_ids : if alignment . id not in alignment_ids : continue # Ignore alignments if type is specified if alignment_type : if alignment . annotations [ 'ssbio_type' ] != alignment_type : continue other_sequence = alignment . annotations [ 'b_seq' ] mutations = alignment . annotations [ 'mutations' ] if mutations : # Turn this list of mutations into a tuple so it can be a dictionary key mutations = tuple ( tuple ( x ) for x in mutations ) fingerprint_counter [ mutations ] . append ( other_sequence ) for m in mutations : single_counter [ m ] . append ( other_sequence ) return dict ( single_counter ) , dict ( fingerprint_counter )
def edit ( self , entry , name , mark = False ) : """Edit an entry ( file or directory ) : param entry : : class : ` . BaseFile ` object : param str name : new name for the entry : param bool mark : whether to bookmark the entry"""
fcid = None if isinstance ( entry , File ) : fcid = entry . fid elif isinstance ( entry , Directory ) : fcid = entry . cid else : raise APIError ( 'Invalid BaseFile instance for an entry.' ) is_mark = 0 if mark is True : is_mark = 1 if self . _req_files_edit ( fcid , name , is_mark ) : entry . reload ( ) return True else : raise APIError ( 'Error editing the entry.' )
def temp_file_context ( raw_dump_path , logger = None ) : """this contextmanager implements conditionally deleting a pathname at the end of a context if the pathname indicates that it is a temp file by having the word ' TEMPORARY ' embedded in it ."""
try : yield raw_dump_path finally : if 'TEMPORARY' in raw_dump_path : try : os . unlink ( raw_dump_path ) except OSError : if logger is None : logger = FakeLogger ( ) logger . warning ( 'unable to delete %s. manual deletion is required.' , raw_dump_path , exc_info = True )
def commit ( * args ) : """Commit changes to the fragments repository , limited to FILENAME ( s ) if specified ."""
parser = argparse . ArgumentParser ( prog = "%s %s" % ( __package__ , commit . __name__ ) , description = commit . __doc__ ) parser . add_argument ( 'FILENAME' , help = "file(s) to commit" , nargs = "*" , default = [ '.' ] ) args = parser . parse_args ( args ) config = FragmentsConfig ( ) for s , curr_path in _iterate_over_files ( args . FILENAME , config , statuses = 'MAD' ) : key = os . path . relpath ( curr_path , config . root ) if key not in config [ 'files' ] : yield "Could not commit '%s' because it is not being followed" % os . path . relpath ( curr_path ) continue if s in 'MA' : repo_path = os . path . join ( config . directory , config [ 'files' ] [ key ] ) with _smart_open ( repo_path , 'w' ) as repo_file : with _smart_open ( curr_path , 'r' ) as curr_file : repo_file . write ( curr_file . read ( ) ) os . utime ( repo_path , os . stat ( curr_path ) [ 7 : 9 ] ) yield "'%s' committed" % os . path . relpath ( curr_path ) elif s == 'D' : yield "Could not commit '%s' because it has been removed, instead revert or forget it" % os . path . relpath ( curr_path ) elif s == ' ' : yield "Could not commit '%s' because it has not been changed" % os . path . relpath ( curr_path )
def _read_pcm_information ( self ) : """Parses information from PCM solvent calculations ."""
temp_dict = read_pattern ( self . text , { "g_electrostatic" : r"\s*G_electrostatic\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*" , "g_cavitation" : r"\s*G_cavitation\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*" , "g_dispersion" : r"\s*G_dispersion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*" , "g_repulsion" : r"\s*G_repulsion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*" , "total_contribution_pcm" : r"\s*Total\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*" , } ) if temp_dict . get ( "g_electrostatic" ) is None : self . data [ "g_electrostatic" ] = None else : self . data [ "g_electrostatic" ] = float ( temp_dict . get ( "g_electrostatic" ) [ 0 ] [ 0 ] ) if temp_dict . get ( "g_cavitation" ) is None : self . data [ "g_cavitation" ] = None else : self . data [ "g_cavitation" ] = float ( temp_dict . get ( "g_cavitation" ) [ 0 ] [ 0 ] ) if temp_dict . get ( "g_dispersion" ) is None : self . data [ "g_dispersion" ] = None else : self . data [ "g_dispersion" ] = float ( temp_dict . get ( "g_dispersion" ) [ 0 ] [ 0 ] ) if temp_dict . get ( "g_repulsion" ) is None : self . data [ "g_repulsion" ] = None else : self . data [ "g_repulsion" ] = float ( temp_dict . get ( "g_repulsion" ) [ 0 ] [ 0 ] ) if temp_dict . get ( "total_contribution_pcm" ) is None : self . data [ "total_contribution_pcm" ] = [ ] else : self . data [ "total_contribution_pcm" ] = float ( temp_dict . get ( "total_contribution_pcm" ) [ 0 ] [ 0 ] )
def index_list ( self ) : '''Lists indices'''
request = self . session url = 'http://%s:%s/_cluster/state/' % ( self . host , self . port ) response = request . get ( url ) if request . status_code == 200 : return response . get ( 'metadata' , { } ) . get ( 'indices' , { } ) . keys ( ) else : return response
def list_build_configurations_for_product ( id = None , name = None , page_size = 200 , page_index = 0 , sort = "" , q = "" ) : """List all BuildConfigurations associated with the given Product ."""
data = list_build_configurations_for_product_raw ( id , name , page_size , page_index , sort , q ) if data : return utils . format_json_list ( data )
def parseprint ( code , filename = "<string>" , mode = "exec" , ** kwargs ) : """Parse some code from a string and pretty - print it ."""
node = parse ( code , mode = mode ) # An ode to the code print ( dump ( node , ** kwargs ) )
def parse_csv ( self , infile , delimiter = "," , decimal_sep = "." ) : "Parse template format csv file and create elements dict"
keys = ( 'name' , 'type' , 'x1' , 'y1' , 'x2' , 'y2' , 'font' , 'size' , 'bold' , 'italic' , 'underline' , 'foreground' , 'background' , 'align' , 'text' , 'priority' , 'multiline' ) self . elements = [ ] self . pg_no = 0 if not PY3K : f = open ( infile , 'rb' ) else : f = open ( infile ) for row in csv . reader ( f , delimiter = delimiter ) : kargs = { } for i , v in enumerate ( row ) : if not v . startswith ( "'" ) and decimal_sep != "." : v = v . replace ( decimal_sep , "." ) else : v = v if v == '' : v = None else : v = eval ( v . strip ( ) ) kargs [ keys [ i ] ] = v self . elements . append ( kargs ) self . keys = [ v [ 'name' ] . lower ( ) for v in self . elements ]
def elife_references_rewrite_json ( ) : """Here is the DOI and references json replacements data for elife"""
references_rewrite_json = { } references_rewrite_json [ "10.7554/eLife.00051" ] = { "bib25" : { "date" : "2012" } } references_rewrite_json [ "10.7554/eLife.00278" ] = { "bib11" : { "date" : "2013" } } references_rewrite_json [ "10.7554/eLife.00444" ] = { "bib2" : { "date" : "2013" } } references_rewrite_json [ "10.7554/eLife.00569" ] = { "bib74" : { "date" : "1996" } } references_rewrite_json [ "10.7554/eLife.00592" ] = { "bib8" : { "date" : "2013" } } references_rewrite_json [ "10.7554/eLife.00633" ] = { "bib38" : { "date" : "2004" } } references_rewrite_json [ "10.7554/eLife.00646" ] = { "bib1" : { "date" : "2012" } } references_rewrite_json [ "10.7554/eLife.00813" ] = { "bib33" : { "date" : "2007" } } references_rewrite_json [ "10.7554/eLife.01355" ] = { "bib9" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.01530" ] = { "bib12" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.01681" ] = { "bib5" : { "date" : "2000" } } references_rewrite_json [ "10.7554/eLife.01917" ] = { "bib35" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.02030" ] = { "bib53" : { "date" : "2013" } , "bib56" : { "date" : "2013" } } references_rewrite_json [ "10.7554/eLife.02076" ] = { "bib93a" : { "date" : "1990" } } references_rewrite_json [ "10.7554/eLife.02217" ] = { "bib27" : { "date" : "2009" } } references_rewrite_json [ "10.7554/eLife.02535" ] = { "bib12" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.02862" ] = { "bib8" : { "date" : "2010" } } references_rewrite_json [ "10.7554/eLife.03711" ] = { "bib35" : { "date" : "2012" } } references_rewrite_json [ "10.7554/eLife.03819" ] = { "bib37" : { "date" : "2008" } } references_rewrite_json [ "10.7554/eLife.04069" ] = { "bib8" : { "date" : "2011" } } references_rewrite_json [ "10.7554/eLife.04247" ] = { "bib19a" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.04333" ] = { "bib3" : { "date" : "1859" } , "bib37" : { "date" : "1959" } } references_rewrite_json [ "10.7554/eLife.04478" ] = { "bib49" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.04580" ] = { "bib139" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.05042" ] = { "bib78" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.05323" ] = { "bib102" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.05423" ] = { "bib102" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.05503" ] = { "bib94" : { "date" : "2016" } } references_rewrite_json [ "10.7554/eLife.05849" ] = { "bib82" : { "date" : "2005" } } references_rewrite_json [ "10.7554/eLife.06072" ] = { "bib17" : { "date" : "2003" } } references_rewrite_json [ "10.7554/eLife.06315" ] = { "bib19" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.06426" ] = { "bib39" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.07361" ] = { "bib76" : { "date" : "2011" } } references_rewrite_json [ "10.7554/eLife.07460" ] = { "bib1" : { "date" : "2013" } , "bib2" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.08500" ] = { "bib55" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.09066" ] = { "bib46" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.09100" ] = { "bib50" : { "date" : "2011" } } references_rewrite_json [ "10.7554/eLife.09148" ] = { "bib47" : { "articleTitle" : "97–104" } , "bib59" : { "articleTitle" : "1913–1918" } } references_rewrite_json [ "10.7554/eLife.09186" ] = { "bib31" : { "date" : "2015" } , "bib54" : { "date" : "2014" } , "bib56" : { "date" : "2014" } , "bib65" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.09215" ] = { "bib5" : { "date" : "2012" } } references_rewrite_json [ "10.7554/eLife.09520" ] = { "bib35" : OrderedDict ( [ ( "conference" , OrderedDict ( [ ( "name" , [ "WHO Expert Committee on Malaria" ] ) ] ) ) , ( "articleTitle" , "WHO Expert Committee on Malaria [meeting held in Geneva from 19 to 30 October 1970]: fifteenth report" ) , ( "publisher" , OrderedDict ( [ ( "name" , [ "World Health Organization" ] ) , ( "address" , OrderedDict ( [ ( "formatted" , [ "Geneva" ] ) , ( "components" , OrderedDict ( [ ( "locality" , [ "Geneva" ] ) ] ) ) , ] ) ) , ] ) ) , ] ) } references_rewrite_json [ "10.7554/eLife.09579" ] = { "bib19" : { "date" : "2007" } , "bib49" : { "date" : "2002" } } references_rewrite_json [ "10.7554/eLife.09600" ] = { "bib13" : { "date" : "2009" } } references_rewrite_json [ "10.7554/eLife.09672" ] = { "bib25" : { "conference" : { "name" : [ "Seventeenth Meeting of the RBM Partnership Monitoring and Evaluation Reference Group (MERG)" ] } } } references_rewrite_json [ "10.7554/eLife.09771" ] = { "bib22" : { "date" : "2012" } } references_rewrite_json [ "10.7554/eLife.09972" ] = { "bib61" : { "date" : "2007" , "discriminator" : "a" } } references_rewrite_json [ "10.7554/eLife.09977" ] = { "bib41" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.10032" ] = { "bib45" : { "date" : "2016" } } references_rewrite_json [ "10.7554/eLife.10042" ] = { "bib14" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.10070" ] = { "bib15" : { "date" : "2015" } , "bib38" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.10222" ] = { "bib30" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.10670" ] = { "bib7" : { "date" : "2015" } , "bib8" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.10781" ] = { "bib32" : { "date" : "2003" } } references_rewrite_json [ "10.7554/eLife.11273" ] = { "bib43" : { "date" : "2004" } } references_rewrite_json [ "10.7554/eLife.11305" ] = { "bib68" : { "date" : "2000" } } references_rewrite_json [ "10.7554/eLife.11416" ] = { "bib22" : { "date" : "1997" } } references_rewrite_json [ "10.7554/eLife.11860" ] = { "bib48" : { "title" : "Light-switchable gene expression system" } } references_rewrite_json [ "10.7554/eLife.12401" ] = { "bib25" : { "date" : "2011" } } references_rewrite_json [ "10.7554/eLife.12366" ] = { "bib10" : { "date" : "2008" } } references_rewrite_json [ "10.7554/eLife.12703" ] = { "bib27" : { "date" : "2013" } } references_rewrite_json [ "10.7554/eLife.12735" ] = { "bib35" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.12830" ] = { "bib118" : { "date" : "1982" } } references_rewrite_json [ "10.7554/eLife.13133" ] = { "bib11" : { "date" : "2011" } } references_rewrite_json [ "10.7554/eLife.13152" ] = { "bib25" : { "date" : "2000" } } references_rewrite_json [ "10.7554/eLife.13195" ] = { "bib6" : { "date" : "2013" } , "bib12" : { "date" : "2003" } } references_rewrite_json [ "10.7554/eLife.13479" ] = { "bib5" : { "date" : "2016" } } references_rewrite_json [ "10.7554/eLife.13463" ] = { "bib15" : { "date" : "2016" } } references_rewrite_json [ "10.7554/eLife.14119" ] = { "bib40" : { "date" : "2007" } } references_rewrite_json [ "10.7554/eLife.14169" ] = { "bib6" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.14523" ] = { "bib7" : { "date" : "2013" } } references_rewrite_json [ "10.7554/eLife.15272" ] = { "bib78" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.15504" ] = { "bib67" : { "isbn" : "9780198524304" } } references_rewrite_json [ "10.7554/eLife.16105" ] = { "bib2" : { "date" : "2013" } } references_rewrite_json [ "10.7554/eLife.16349" ] = { "bib68" : { "date" : "2005" } } references_rewrite_json [ "10.7554/eLife.16394" ] = { "bib6" : { "type" : "thesis" , "author" : { "type" : "person" , "name" : { "preferred" : "B Berret" , "index" : "Berret, B" } } , "publisher" : { "name" : [ "Université de Bourgogne" ] } } } references_rewrite_json [ "10.7554/eLife.16443" ] = { "bib58" : { "date" : "1987" } } references_rewrite_json [ "10.7554/eLife.16764" ] = { "bib4" : { "date" : "2013" } } references_rewrite_json [ "10.7554/eLife.17092" ] = { "bib102" : { "date" : "1980" } } references_rewrite_json [ "10.7554/eLife.18044" ] = { "bib25" : { "date" : "2005" } } references_rewrite_json [ "10.7554/eLife.18370" ] = { "bib1" : { "date" : "2006" } } references_rewrite_json [ "10.7554/eLife.18425" ] = { "bib54" : { "date" : "2014" } } references_rewrite_json [ "10.7554/eLife.18683" ] = { "bib47" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.19532" ] = { "bib27" : { "date" : "2015" } } references_rewrite_json [ "10.7554/eLife.19545" ] = { "bib51" : { "date" : "1996" } } references_rewrite_json [ "10.7554/eLife.19571" ] = { "bib56" : { "date" : "2016" } } references_rewrite_json [ "10.7554/eLife.20352" ] = { "bib53" : { "country" : "United States" } } references_rewrite_json [ "10.7554/eLife.21864" ] = { "bib2" : { "date" : "2016-10-24" } } references_rewrite_json [ "10.7554/eLife.20522" ] = { "bib42" : { "date" : "2016" } , "bib110" : { "date" : "1996" } } references_rewrite_json [ "10.7554/eLife.22053" ] = { "bib123" : { "date" : "2016" } } # Reference authors data to replace , processed further below into json references_authors = [ ] references_authors . append ( ( "10.7554/eLife.00036" , "bib8" , "authors" , [ { "surname" : "Butler" , "given-names" : "H" } , { "surname" : "Juurlink" , "given-names" : "BHJ" } ] ) ) references_authors . append ( ( "10.7554/eLife.00036" , "bib30" , "authors" , [ { "surname" : "Joyner" , "given-names" : "AL" } ] ) ) references_authors . append ( ( "10.7554/eLife.00048" , "bib15" , "authors" , [ { "surname" : "Guthrie" , "given-names" : "C" } , { "surname" : "Fink" , "given-names" : "GR" } ] ) ) references_authors . append ( ( "10.7554/eLife.00051" , "bib21" , "authors" , [ { "surname" : "Jamison" , "given-names" : "DT" } , { "surname" : "Breman" , "given-names" : "JG" } , { "surname" : "Measham" , "given-names" : "AR" } , { "surname" : "Alleyne" , "given-names" : "G" } , { "surname" : "Claeson" , "given-names" : "M" } , { "surname" : "Evans" , "given-names" : "DB" } , { "surname" : "Jha" , "given-names" : "P" } , { "surname" : "Mills" , "given-names" : "A" } , { "surname" : "Musgrove" , "given-names" : "P" } ] ) ) references_authors . append ( ( "10.7554/eLife.00051" , "bib36" , "authors" , [ { "surname" : "Rogers" , "given-names" : "RG" } , { "surname" : "Crimmins" , "given-names" : "EM" } ] ) ) references_authors . append ( ( "10.7554/eLife.00668" , "bib39" , "authors" , [ { "surname" : "Rice" , "given-names" : "SA" } ] ) ) references_authors . append ( ( "10.7554/eLife.01730" , "bib75" , "authors" , [ { "collab" : "Look AHEAD Research Group" } ] ) ) references_authors . append ( ( "10.7554/eLife.03714" , "bib64" , "authors" , [ { "surname" : "Otwinowski" , "given-names" : "Z" } , { "surname" : "Minor" , "given-names" : "W" } ] ) ) references_authors . append ( ( "10.7554/eLife.04220" , "bib31" , "authors" , [ { "surname" : "Tishby" , "given-names" : "N" } , { "surname" : "Polani" , "given-names" : "D" } ] ) ) references_authors . append ( ( "10.7554/eLife.04395" , "bib67" , "authors" , [ { "surname" : "King" , "given-names" : "AMQ" } , { "surname" : "Adams" , "given-names" : "MJ" } , { "surname" : "Carstens" , "given-names" : "EB" } , { "surname" : "Lefkowitz" , "given-names" : "E" } ] ) ) references_authors . append ( ( "10.7554/eLife.04449" , "bib62" , "authors" , [ { "surname" : "Shaham" , "given-names" : "S" } ] ) ) references_authors . append ( ( "10.7554/eLife.04659" , "bib57" , "authors" , [ { "surname" : "Sambrook" , "given-names" : "J" } , { "surname" : "Russell" , "given-names" : "TW" } ] ) ) references_authors . append ( ( "10.7554/eLife.05423" , "bib4" , "authors" , [ { "surname" : "Birkhead" , "given-names" : "TR" } , { "surname" : "Møller" , "given-names" : "AP" } ] ) ) references_authors . append ( ( "10.7554/eLife.05423" , "bib5" , "authors" , [ { "surname" : "Birkhead" , "given-names" : "TR" } , { "surname" : "Møller" , "given-names" : "AP" } ] ) ) references_authors . append ( ( "10.7554/eLife.05423" , "bib90" , "authors" , [ { "surname" : "Smith" , "given-names" : "RL" } ] ) ) references_authors . append ( ( "10.7554/eLife.05564" , "bib39" , "authors" , [ { "surname" : "Pattyn" , "given-names" : "S" } ] ) ) references_authors . append ( ( "10.7554/eLife.05959" , "bib76" , "authors" , [ { "surname" : "Macholán" , "given-names" : "M" } , { "surname" : "Baird" , "given-names" : "SJE" } , { "surname" : "Munclinger" , "given-names" : "P" } , { "surname" : "Piálek" , "given-names" : "J" } ] ) ) references_authors . append ( ( "10.7554/eLife.06565" , "bib1" , "authors" , [ { "surname" : "Ahringer" , "given-names" : "J" } ] ) ) references_authors . append ( ( "10.7554/eLife.06576" , "bib57" , "authors" , [ { "surname" : "Moller" , "given-names" : "AR" } ] ) ) references_authors . append ( ( "10.7554/eLife.06813" , "bib54" , "authors" , [ { "surname" : "King" , "given-names" : "JA" } ] ) ) references_authors . append ( ( "10.7554/eLife.06813" , "bib55" , "authors" , [ { "surname" : "Kirkland" , "given-names" : "Gl" } , { "surname" : "Layne" , "given-names" : "JN" } ] ) ) references_authors . append ( ( "10.7554/eLife.07460" , "bib1" , "authors" , [ { "surname" : "Rallapalli" , "given-names" : "Ghanasyam" } ] ) ) references_authors . append ( ( "10.7554/eLife.07460" , "bib2" , "authors" , [ { "surname" : "Bazyl" , "given-names" : "Steven" } ] ) ) references_authors . append ( ( "10.7554/eLife.07847" , "bib40" , "authors" , [ { "collab" : "Nature Immunology" } ] ) ) references_authors . append ( ( "10.7554/eLife.09666" , "bib9" , "authors" , [ { "surname" : "Schüler" , "given-names" : "D" } ] ) ) references_authors . append ( ( "10.7554/eLife.09868" , "bib5" , "authors" , [ { "surname" : "Barlow" , "given-names" : "HB" } ] ) ) references_authors . append ( ( "10.7554/eLife.10222" , "bib30" , "authors" , [ { "collab" : "PharmaMar" } ] ) ) references_authors . append ( ( "10.7554/eLife.11860" , "bib48" , "authors" , [ { "surname" : "Yang" , "given-names" : "Y" } , { "surname" : "Wang" , "given-names" : "X" } , { "surname" : "Chen" , "given-names" : "X" } , ] ) ) references_authors . append ( ( "10.7554/eLife.11945" , "bib23" , "authors" , [ { "surname" : "Glimcher" , "given-names" : "P" } , { "surname" : "Fehr" , "given-names" : "E" } ] ) ) references_authors . append ( ( "10.7554/eLife.13135" , "bib26" , "authors" , [ { "surname" : "Ivanova" , "given-names" : "S" } , { "surname" : "Herbreteau" , "given-names" : "B" } , { "surname" : "Blasdell" , "given-names" : "K" } , { "surname" : "Chaval" , "given-names" : "Y" } , { "surname" : "Buchy" , "given-names" : "P" } , { "surname" : "Guillard" , "given-names" : "B" } , { "surname" : "Morand" , "given-names" : "S" } , ] ) ) references_authors . append ( ( "10.7554/eLife.13135" , "bib27" , "authors" , [ { "surname" : "King" , "given-names" : "AMQ" } , { "surname" : "Adams" , "given-names" : "J" } , { "surname" : "Carstens" , "given-names" : "EB" } , { "surname" : "Lefkowitz" , "given-names" : "EJ" } ] ) ) references_authors . append ( ( "10.7554/eLife.14188" , "bib1" , "authors" , [ { "collab" : "Avisoft Bioacoustics" } ] ) ) references_authors . append ( ( "10.7554/eLife.17716" , "bib7" , "authors" , [ { "collab" : "World Health Organization" } ] ) ) references_authors . append ( ( "10.7554/eLife.17956" , "bib4" , "authors" , [ { "surname" : "Barrett" , "given-names" : "SCH" } ] ) ) references_authors . append ( ( "10.7554/eLife.18109" , "bib39" , "authors" , [ { "surname" : "Weber" , "given-names" : "EH" } ] ) ) # Now turn the authors data into the json for author_row in references_authors : ref_json = OrderedDict ( ) doi , id , author_type , authors = author_row # if id not in ref _ json : ref_json [ id ] = OrderedDict ( ) ref_json [ id ] [ author_type ] = [ ] for ref_author in authors : if "collab" in ref_author : author_json = elifetools . utils_html . references_author_collab ( ref_author ) else : author_json = elifetools . utils . references_author_person ( ref_author ) if author_json : ref_json [ id ] [ author_type ] . append ( author_json ) # Add to json array , and do not verwrite existing rule of a specific bib id ( if present ) if doi not in references_rewrite_json : references_rewrite_json [ doi ] = ref_json else : for key , value in iteritems ( ref_json ) : if key not in references_rewrite_json [ doi ] : references_rewrite_json [ doi ] [ key ] = value else : # Append dict items for k , v in iteritems ( value ) : references_rewrite_json [ doi ] [ key ] [ k ] = v return references_rewrite_json
def split_message ( message , max_length ) : """Split long messages"""
if len ( message ) > max_length : for message in textwrap . wrap ( message , max_length ) : yield message else : yield message . rstrip ( STRIPPED_CHARS )
def hot_unplug_cpu ( self , cpu ) : """Removes a CPU from the machine . in cpu of type int The CPU id to remove ."""
if not isinstance ( cpu , baseinteger ) : raise TypeError ( "cpu can only be an instance of type baseinteger" ) self . _call ( "hotUnplugCPU" , in_p = [ cpu ] )
def decr ( self , conn , key , decrement = 1 ) : """Command is used to change data for some item in - place , decrementing it . The data for the item is treated as decimal representation of a 64 - bit unsigned integer . : param key : ` ` bytes ` ` , is the key of the item the client wishes to change : param decrement : ` ` int ` ` , is the amount by which the client wants to decrease the item . : return : ` ` int ` ` new value of the item ' s data , after the increment or ` ` None ` ` to indicate the item with this value was not found"""
assert self . _validate_key ( key ) resp = yield from self . _incr_decr ( conn , b'decr' , key , decrement ) return resp
def _imp_semsim ( self , c1 , c2 ) : """The paper ' s implicit semantic similarity metric involves iteratively computing string overlaps ; this is a modification where we instead use inverse Sift4 distance ( a fast approximation of Levenshtein distance ) . Frankly ~ I don ' t know if this is an appropriate substitute , so I ' ll have to play around with this and see ."""
desc1 = self . _description ( c1 ) desc2 = self . _description ( c2 ) raw_sim = 1 / ( sift4 ( desc1 , desc2 ) + 1 ) return math . log ( raw_sim + 1 )
def predict_compound_pairs ( reaction , compound_formula , pair_weights = { } , weight_func = element_weight ) : """Predict compound pairs for a single reaction . Performs greedy matching on reaction compounds using a scoring function that uses generalized Jaccard similarity corrected by the weights in the given dictionary . Returns a tuple of a transfer dictionary and a dictionary of unbalanced compounds . The dictionary of unbalanced compounds is empty only if the reaction is balanced . Args : reaction : : class : ` psamm . reaction . Reaction ` . compound _ formula : Dictionary mapping compound IDs to : class : ` psamm . formula . Formula ` . Formulas must be flattened . pair _ weights : Dictionary mapping pairs of compound IDs to correction values . This value is multiplied by the calculated Jaccard similarity . If a pair is not in the dictionary , the value 1 is used . Pairs are looked up in the weights dictionary as a tuple of compound names ( ` ` c1 ` ` , ` ` c2 ` ` ) where ` ` c1 ` ` is the left - hand side and ` ` c2 ` ` is the right - hand side . weight _ func : Weight function for caclulating the generalized Jaccard similarity . This function will be given an : class : ` psamm . formula . Atom ` or : class : ` psamm . formula . Radical ` and should return a corresponding weight ."""
def score_func ( inst1 , inst2 ) : score = _jaccard_similarity ( inst1 . formula , inst2 . formula , weight_func ) if score is None : return None pair = inst1 . compound . name , inst2 . compound . name pair_weight = pair_weights . get ( pair , 1.0 ) return pair_weight * score return _match_greedily ( reaction , compound_formula , score_func )
def email_confirm ( request , confirmation_key , template_name = 'accounts/email_confirm_fail.html' , success_url = None , extra_context = None ) : """Confirms an email address with a confirmation key . Confirms a new email address by running : func : ` User . objects . confirm _ email ` method . If the method returns an : class : ` User ` the user will have his new e - mail address set and redirected to ` ` success _ url ` ` . If no ` ` User ` ` is returned the user will be represented with a fail message from ` ` template _ name ` ` . : param confirmation _ key : String with a SHA1 representing the confirmation key used to verify a new email address . : param template _ name : String containing the template name which should be rendered when confirmation fails . When confirmation is successful , no template is needed because the user will be redirected to ` ` success _ url ` ` . : param success _ url : String containing the URL which is redirected to after a successful confirmation . Supplied argument must be able to be rendered by ` ` reverse ` ` function . : param extra _ context : Dictionary of variables that are passed on to the template supplied by ` ` template _ name ` ` ."""
user = AccountsSignup . objects . confirm_email ( confirmation_key ) if user : if accounts_settings . ACCOUNTS_USE_MESSAGES : messages . success ( request , _ ( 'Your email address has been changed.' ) , fail_silently = True ) if success_url : redirect_to = success_url else : redirect_to = reverse ( 'accounts_email_confirm_complete' , kwargs = { 'username' : user . username } ) return redirect ( redirect_to ) else : if not extra_context : extra_context = dict ( ) return ExtraContextTemplateView . as_view ( template_name = template_name , extra_context = extra_context ) ( request )
def save_summaries ( frames , keys , selected_summaries , batch_dir , batch_name ) : """Writes the summaries to csv - files Args : frames : list of ` ` cellpy ` ` summary DataFrames keys : list of indexes ( typically run - names ) for the different runs selected _ summaries : list defining which summary data to save batch _ dir : directory to save to batch _ name : the batch name ( will be used for making the file - name ( s ) ) Returns : a pandas DataFrame with your selected summaries ."""
if not frames : logger . info ( "Could save summaries - no summaries to save!" ) logger . info ( "You have no frames - aborting" ) return None if not keys : logger . info ( "Could save summaries - no summaries to save!" ) logger . info ( "You have no keys - aborting" ) return None selected_summaries_dict = create_selected_summaries_dict ( selected_summaries ) summary_df = pd . concat ( frames , keys = keys , axis = 1 ) # saving the selected summaries for key , value in selected_summaries_dict . items ( ) : _summary_file_name = os . path . join ( batch_dir , "summary_%s_%s.csv" % ( key , batch_name ) ) _summary_df = summary_df . iloc [ : , summary_df . columns . get_level_values ( 1 ) == value ] # include function to tweak headers here ( need to learn MultiIndex ) _header = _summary_df . columns _summary_df . to_csv ( _summary_file_name , sep = ";" ) logger . info ( "saved summary (%s) to:\n %s" % ( key , _summary_file_name ) ) logger . info ( "finished saving summaries" ) return summary_df
def usage ( self , subcommand ) : """Returns * how to use command * text ."""
usage = ' ' . join ( [ '%prog' , subcommand , '[options]' ] ) if self . args : usage = '%s %s' % ( usage , str ( self . args ) ) return usage
def read ( self , file , * , fs ) : """Write a row on the next line of given file . Prefix is used for newlines ."""
for line in file : yield line . rstrip ( self . eol )
def fit ( self , x , y , deg , w = None , y_vs_x = True , times_sigma_reject = None , title = None , debugplot = 0 ) : """Update the arc line from least squares fit to data . Parameters x : 1d numpy array , float X coordinates of the data being fitted . y : 1d numpy array , float Y coordinates of the data being fitted . deg : int Degree of the fitting polynomial . w : 1d numpy array , float Weights to be employed in the polynomial fit . y _ vs _ x : bool If True , the fit is Y vs X . Otherwise , X vs Y is computed . times _ sigma _ reject : float If not None , deviant point are rejected . title : string Plot title . debugplot : int Determines whether intermediate computations and / or plots are displayed : 00 : no debug , no plots 01 : no debug , plots without pauses 02 : no debug , plots with pauses 10 : debug , no plots 11 : debug , plots without pauses 12 : debug , plots with pauses"""
# protections if type ( x ) is not np . ndarray : raise ValueError ( "x=" + str ( x ) + " must be a numpy.ndarray" ) if type ( y ) is not np . ndarray : raise ValueError ( "y=" + str ( y ) + " must be a numpy.ndarray" ) if x . size != y . size : raise ValueError ( "x.size != y.size" ) if w is not None : if type ( w ) is not np . ndarray : raise ValueError ( "w=" + str ( w ) + " must be None or a numpy.ndarray" ) if w . size != x . size : raise ValueError ( "w.size != x.size" ) if type ( deg ) not in [ np . int , np . int64 ] : raise ValueError ( "deg=" + str ( deg ) + " is not a valid integer" ) # update bounding box of the CCD line self . bb_nc1_orig = min ( x ) self . bb_nc2_orig = max ( x ) self . bb_ns1_orig = min ( y ) self . bb_ns2_orig = max ( y ) # compute polynomial from fit to data if y_vs_x : if times_sigma_reject is None : # fit using the minimal domain that covers the x data poly_funct = Polynomial . fit ( x = x , y = y , deg = deg , w = w , domain = None , window = None , full = False ) # restore the class domain self . poly_funct = Polynomial . cast ( poly_funct ) # display resulting fit when requested if debugplot % 10 != 0 : polfit_residuals ( x = x , y = y , deg = deg , title = title , debugplot = debugplot ) else : self . poly_funct , yres_dum , reject_dum = polfit_residuals_with_sigma_rejection ( x = x , y = y , deg = deg , title = title , times_sigma_reject = times_sigma_reject , debugplot = debugplot ) self . xlower_line = self . bb_nc1_orig self . ylower_line = self . poly_funct ( self . xlower_line ) self . xupper_line = self . bb_nc2_orig self . yupper_line = self . poly_funct ( self . xupper_line ) else : if times_sigma_reject is None : # fit using the minimal domain that covers the y data poly_funct = Polynomial . fit ( x = y , y = x , deg = deg , w = w , domain = None , window = None , full = False ) # restore the class domain self . poly_funct = Polynomial . cast ( poly_funct ) # display resulting fit when requested if debugplot % 10 != 0 : polfit_residuals ( x = y , y = x , deg = deg , title = title , debugplot = debugplot ) else : self . poly_funct , yres_dum , reject_dum = polfit_residuals_with_sigma_rejection ( x = y , y = x , deg = deg , title = title , times_sigma_reject = times_sigma_reject , debugplot = debugplot ) self . ylower_line = self . bb_ns1_orig self . xlower_line = self . poly_funct ( self . ylower_line ) self . yupper_line = self . bb_ns2_orig self . xupper_line = self . poly_funct ( self . yupper_line ) # CCD line has been defined self . available = True
def rename_edges ( self , old_node_name , new_node_name ) : """Change references to a node in existing edges . Args : old _ node _ name ( str ) : The old name for the node . new _ node _ name ( str ) : The new name for the node ."""
graph = self . graph for node , edges in graph . items ( ) : if node == old_node_name : graph [ new_node_name ] = copy ( edges ) del graph [ old_node_name ] else : if old_node_name in edges : edges . remove ( old_node_name ) edges . add ( new_node_name )
def _get_mine ( fun ) : '''Return the mine function from all the targeted minions . Just a small helper to avoid redundant pieces of code .'''
if fun in _CACHE and _CACHE [ fun ] : return _CACHE [ fun ] net_runner_opts = _get_net_runner_opts ( ) _CACHE [ fun ] = __salt__ [ 'mine.get' ] ( net_runner_opts . get ( 'target' ) , fun , tgt_type = net_runner_opts . get ( 'expr_form' ) ) return _CACHE [ fun ]
def translations ( context : Context , pull = False , push = False ) : """Synchronises translations with transifex . com"""
if not ( pull or push ) : raise TaskError ( 'Specify whether to push or pull translations' ) if pull : context . shell ( 'tx' , 'pull' ) make_messages ( context , javascript = False ) make_messages ( context , javascript = True ) if push : context . shell ( 'tx' , 'push' , '--source' , '--no-interactive' )
def is_expired ( self , max_idle_seconds ) : """Determines whether this record is expired or not . : param max _ idle _ seconds : ( long ) , the maximum idle time of record , maximum time after the last access time . : return : ( bool ) , ` ` true ` ` is this record is not expired ."""
now = current_time ( ) return ( self . expiration_time is not None and self . expiration_time < now ) or ( max_idle_seconds is not None and self . last_access_time + max_idle_seconds < now )
def interp2d2d ( x , y , Z , xout , yout , split_factor = 1 , ** kwargs ) : """INTERP2D2D : Interpolate a 2D matrix into another 2D matrix @ param x : 1st dimension vector of size NX @ param y : 2nd dimension vector of size NY @ param Z : Array to interpolate ( NXxNY ) @ param xout : 1st dimension vector of size NXout @ param yout : 2nd dimension vector of size NYout @ keyword split _ factor : Nummber of times to split arrays . Nb of threads is equal to split _ factor * * 2. @ return : Interpolated array ( NXoutxNYout ) @ author : Renaud DUSSURGET , LER / PAC , Ifremer La Seyne"""
# Queued thread class ThreadClass ( threading . Thread ) : # We override the _ _ init _ _ method def __init__ ( self , input_q , indices_q , result_q ) : threading . Thread . __init__ ( self ) self . input = input_q self . indices = indices_q self . result = result_q def task ( self , NaN = True ) : # grabs host from queue inargs = self . input . get ( ) ind = inargs [ 0 ] # thread index x = inargs [ 1 ] y = inargs [ 2 ] Z = inargs [ 3 ] xout = inargs [ 4 ] yout = inargs [ 5 ] Zout = _interp2d2d ( x , y , Z , xout , yout , ** kwargs ) # Zout = sc . interpolate . griddata ( points , gz , xi , * * kwargs ) self . indices . put ( ind ) self . result . put ( Zout ) del Zout # if verbose > 0 : print " % s ended at time : % s " % ( self . getName ( ) , datetime . datetime . now ( ) ) def run ( self ) : # Starts the queue self . task ( ) # signals to queue job is done self . input . task_done ( ) # Setup input and output queues input_q = Queue . Queue ( ) indices_q = Queue . Queue ( ) result_q = Queue . Queue ( ) # Map the data along X axis N_threads = split_factor ** 2 # over = np . ceil ( cut / dx ) # Overlay between each time series processed in parallel # Get dimensions to split matrix nxin = x . size nyin = y . size nxout = xout . size nyout = yout . size gx = np . reshape ( np . repeat ( x , nyin ) , ( nxin , nyin ) ) gy = np . reshape ( np . repeat ( y , nxin ) , ( nyin , nxin ) ) . transpose ( ( 1 , 0 ) ) gxout = np . reshape ( np . repeat ( xout , nyout ) , ( nxout , nyout ) ) gyout = np . reshape ( np . repeat ( yout , nxout ) , ( nyout , nxout ) ) . transpose ( ( 1 , 0 ) ) # Map output coordinates ind = [ ] # Map output coordinates xsplit = [ ] ysplit = [ ] for i in np . arange ( split_factor ) : for j in np . arange ( split_factor ) : xsplit . append ( i * ( nxout / float ( split_factor ) ) ) xsplit . append ( ( i + 1 ) * ( nxout / float ( split_factor ) ) - 1 ) ysplit . append ( j * ( nyout / float ( split_factor ) ) ) ysplit . append ( ( j + 1 ) * ( nyout / float ( split_factor ) ) - 1 ) # Round xsplit = np . round ( xsplit ) . astype ( int ) ysplit = np . round ( ysplit ) . astype ( int ) N_threads = len ( xsplit ) / 2 # th _ xout = gxout [ 0 : nxout / split _ factor , 0:2] # th _ yout = # gz = Z . flatten ( ) # points = zip ( * ( gx . flatten ( ) , gy . flatten ( ) ) ) # spawn a pool of threads , and pass them queue instance for i in np . arange ( N_threads ) : t = ThreadClass ( input_q , indices_q , result_q ) t . setDaemon ( True ) t . start ( ) # Feed threads with data for i in range ( N_threads ) : xoind = xsplit [ [ i * 2 , ( i * 2 ) + 1 ] ] yoind = ysplit [ [ i * 2 , ( i * 2 ) + 1 ] ] xiind = x [ xout [ xoind ] . astype ( int ) ] . astype ( int ) yiind = y [ yout [ yoind ] . astype ( int ) ] . astype ( int ) th_x = x [ xiind [ 0 ] : xiind [ 1 ] + 1 ] th_y = y [ yiind [ 0 ] : yiind [ 1 ] + 1 ] th_xo = xout [ xoind [ 0 ] : xoind [ 1 ] + 1 ] th_yo = yout [ yoind [ 0 ] : yoind [ 1 ] + 1 ] th_z = Z [ xiind [ 0 ] : xiind [ 1 ] + 1 , yiind [ 0 ] : yiind [ 1 ] + 1 ] # input _ q . put ( ( i , np . copy ( points ) , gz . copy ( ) , xout [ iind [ i ] ] . copy ( ) , yout [ iind [ i ] ] . copy ( ) ) ) input_q . put ( ( i , th_x , th_y , th_z , th_xo , th_yo ) ) # outvar = Z . copy ( ) # outvar . data [ : ] = outvar . fill _ value # for i in range ( N _ threads ) : # print " % s launched time : % s " % ( i , datetime . datetime . now ( ) ) # input _ q . put ( ( i , np . copy ( points ) , gz . copy ( ) , xout [ iind [ i ] ] . copy ( ) , yout [ iind [ i ] ] . copy ( ) ) ) # wait on the queue until everything has been processed input_q . join ( ) # Sort threads tnb = [ ] for i in range ( N_threads ) : tnb . append ( indices_q . get ( i ) ) tsort = np . argsort ( tnb ) # Get back the results for each thread in a list of results for i in np . arange ( N_threads ) : r = result_q . get ( i ) if i == 0 : dum = [ r ] else : dum . append ( r ) # Reorder data from each thread into output matrix for i in tsort : if i == tsort [ 0 ] : outmat = dum [ i ] else : outmat = np . ma . concatenate ( ( outmat , dum [ i ] ) , 0 ) if isinstance ( outmat , np . ma . masked_array ) else np . concatenate ( ( outmat , dum [ i ] ) , 0 ) if len ( outmat ) != len ( outmat ) : raise '[ERROR]Output array is not coherent with input array - check array reconstruction' return ( outmat )
def to_representation ( self , obj ) : """Represent data for the field ."""
many = isinstance ( obj , collections . Iterable ) or isinstance ( obj , models . Manager ) and not isinstance ( obj , dict ) assert self . serializer is not None and issubclass ( self . serializer , serializers . ModelSerializer ) , ( "Bad serializer defined %s" % self . serializer ) extra_params = { } if issubclass ( self . serializer , ModelPermissionsSerializer ) : extra_params [ 'cached_allowed_fields' ] = self . parent . cached_allowed_fields ser = self . serializer ( obj , context = self . context , many = many , ** extra_params ) return ser . data
def specific_notes ( hazard , exposure ) : """Return notes which are specific for a given hazard and exposure . : param hazard : The hazard definition . : type hazard : safe . definition . hazard : param exposure : The exposure definition . : type hazard : safe . definition . exposure : return : List of notes specific . : rtype : list"""
for item in ITEMS : if item [ 'hazard' ] == hazard and item [ 'exposure' ] == exposure : return item . get ( 'notes' , [ ] ) return [ ]
def setup ( config ) : """Setup persistence to be used in cinderlib . By default memory persistance will be used , but there are other mechanisms available and other ways to use custom mechanisms : - Persistence plugins : Plugin mechanism uses Python entrypoints under namespace cinderlib . persistence . storage , and cinderlib comes with 3 different mechanisms , " memory " , " dbms " , and " memory _ dbms " . To use any of these one must pass the string name in the storage parameter and any other configuration as keyword arguments . - Passing a class that inherits from PersistenceDriverBase as storage parameter and initialization parameters as keyword arguments . - Passing an instance that inherits from PersistenceDriverBase as storage parameter ."""
if config is None : config = { } else : config = config . copy ( ) # Prevent driver dynamic loading clearing configuration options volume_cmd . CONF . _ConfigOpts__cache = MyDict ( ) # Default configuration is using memory storage storage = config . pop ( 'storage' , None ) or DEFAULT_STORAGE if isinstance ( storage , base . PersistenceDriverBase ) : return storage if inspect . isclass ( storage ) and issubclass ( storage , base . PersistenceDriverBase ) : return storage ( ** config ) if not isinstance ( storage , six . string_types ) : raise exception . InvalidPersistence ( storage ) persistence_driver = driver . DriverManager ( namespace = 'cinderlib.persistence.storage' , name = storage , invoke_on_load = True , invoke_kwds = config , ) return persistence_driver . driver
def on_created ( self , event ) : '''Fired when something ' s been created'''
if self . trigger != "create" : return action_input = ActionInput ( event , "" , self . name ) flows . Global . MESSAGE_DISPATCHER . send_message ( action_input )
def getStrikes ( self , contract_identifier , smin = None , smax = None ) : """return strikes of contract / " multi " contract ' s contracts"""
strikes = [ ] contracts = self . contractDetails ( contract_identifier ) [ "contracts" ] if contracts [ 0 ] . m_secType not in ( "FOP" , "OPT" ) : return [ ] # collect expirations for contract in contracts : strikes . append ( contract . m_strike ) # convert to floats strikes = list ( map ( float , strikes ) ) # strikes = list ( set ( strikes ) ) # get min / max if smin is not None or smax is not None : smin = smin if smin is not None else 0 smax = smax if smax is not None else 1000000000 srange = list ( set ( range ( smin , smax , 1 ) ) ) strikes = [ n for n in strikes if n in srange ] strikes . sort ( ) return tuple ( strikes )
def on_event ( self , evt , is_final ) : """this is invoked from in response to COM PumpWaitingMessages - different thread"""
for msg in XmlHelper . message_iter ( evt ) : # Single security element in historical request node = msg . GetElement ( 'securityData' ) if node . HasElement ( 'securityError' ) : secid = XmlHelper . get_child_value ( node , 'security' ) self . security_errors . append ( XmlHelper . as_security_error ( node . GetElement ( 'securityError' ) , secid ) ) else : self . on_security_data_node ( node )
def _try_convert_value ( conversion_finder , attr_name : str , attr_value : S , desired_attr_type : Type [ T ] , logger : Logger , options : Dict [ str , Dict [ str , Any ] ] ) -> T : """Utility method to try to use provided conversion _ finder to convert attr _ value into desired _ attr _ type . If no conversion is required , the conversion finder is not even used ( it can be None ) : param conversion _ finder : : param attr _ name : : param attr _ value : : param desired _ attr _ type : : param logger : : param options : : return :"""
# check if we need additional conversion # ( a ) a collection with details about the internal item type if is_typed_collection ( desired_attr_type ) : return ConversionFinder . convert_collection_values_according_to_pep ( coll_to_convert = attr_value , desired_type = desired_attr_type , conversion_finder = conversion_finder , logger = logger , ** options ) # - - - typing types do not work with isinstance so there is a special check here elif not robust_isinstance ( attr_value , desired_attr_type ) : if conversion_finder is not None : return conversion_finder . find_and_convert ( attr_name , attr_value , desired_attr_type , logger , options ) else : raise NoConverterFoundForObjectType . create ( conversion_finder , attr_value , desired_attr_type ) else : # we can safely use the value : it is already of the correct type return attr_value
async def store_cred ( self , cred_json : str , cred_req_metadata_json : str ) -> str : """Store cred in wallet as HolderProver , return its credential identifier as created in wallet . Raise AbsentTails if tails file not available for revocation registry for input credential . Raise WalletState if wallet is closed . : param cred _ json : credential json as HolderProver created : param cred _ req _ metadata _ json : credential request metadata json as HolderProver created via create _ cred _ req ( ) : return : credential identifier within wallet"""
LOGGER . debug ( 'HolderProver.store_cred >>> cred_json: %s, cred_req_metadata_json: %s' , cred_json , cred_req_metadata_json ) if not self . wallet . handle : LOGGER . debug ( 'HolderProver.store_cred <!< Wallet %s is closed' , self . name ) raise WalletState ( 'Wallet {} is closed' . format ( self . name ) ) cred = json . loads ( cred_json ) cred_def_json = await self . get_cred_def ( cred [ 'cred_def_id' ] ) rr_id = cred [ 'rev_reg_id' ] rr_def_json = None if rr_id : await self . _sync_revoc_for_proof ( rr_id ) rr_def_json = await self . get_rev_reg_def ( rr_id ) rv = await anoncreds . prover_store_credential ( self . wallet . handle , None , # cred _ id , let indy - sdk generate random uuid cred_req_metadata_json , cred_json , cred_def_json , rr_def_json ) LOGGER . debug ( 'HolderProver.store_cred <<< %s' , rv ) return rv
def _request ( self , domain , type_name , search_command , db_method , body = None ) : """Make the API request for a Data Store CRUD operation Args : domain ( string ) : One of ' local ' , ' organization ' , or ' system ' . type _ name ( string ) : This is a free form index type name . The ThreatConnect API will use this resource verbatim . search _ command ( string ) : Search command to pass to ES . db _ method ( string ) : The DB method ' DELETE ' , ' GET ' , ' POST ' , or ' PUT ' body ( dict ) : JSON body"""
headers = { 'Content-Type' : 'application/json' , 'DB-Method' : db_method } search_command = self . _clean_datastore_path ( search_command ) url = '/v2/exchange/db/{}/{}/{}' . format ( domain , type_name , search_command ) r = self . tcex . session . post ( url , data = body , headers = headers , params = self . _params ) data = [ ] status = 'Failed' if not r . ok or 'application/json' not in r . headers . get ( 'content-type' , '' ) : self . tcex . handle_error ( 350 , [ r . status_code , r . text ] ) data = r . json ( ) status = 'Success' return { 'data' : data , 'response' : r , 'status' : status }
def remove_external_references_from_srl_layer ( self ) : """Removes all external references present in the term layer"""
if self . srl_layer is not None : for pred in self . srl_layer . get_predicates ( ) : pred . remove_external_references ( ) pred . remove_external_references_from_roles ( )
def xmoe2_v1_l4k_global_only ( ) : """With sequence length 4096."""
hparams = xmoe2_v1_l4k ( ) hparams . decoder_layers = [ "att" if l == "local_att" else l for l in hparams . decoder_layers ] return hparams
def update_check_point ( self , project , logstore , consumer_group , shard , check_point , consumer = '' , force_success = True ) : """Update check point : type project : string : param project : project name : type logstore : string : param logstore : logstore name : type consumer _ group : string : param consumer _ group : consumer group name : type shard : int : param shard : shard id : type check _ point : string : param check _ point : checkpoint name : type consumer : string : param consumer : consumer name : type force _ success : bool : param force _ success : if force to succeed : return : None"""
request = ConsumerGroupUpdateCheckPointRequest ( project , logstore , consumer_group , consumer , shard , check_point , force_success ) params = request . get_request_params ( ) body_str = request . get_request_body ( ) headers = { "Content-Type" : "application/json" } resource = "/logstores/" + logstore + "/consumergroups/" + consumer_group ( resp , header ) = self . _send ( "POST" , project , body_str , resource , params , headers ) return ConsumerGroupUpdateCheckPointResponse ( header , resp )
def get_value ( self , group , key = None ) : """get value"""
if key is None : key = group obj = self . get_queryset_by_group_and_key ( group = group , key = key ) . first ( ) if obj is None : return None return obj . value
def blip_rId ( self ) : """Value of ` p : blipFill / a : blip / @ r : embed ` . Returns | None | if not present ."""
blip = self . blipFill . blip if blip is not None and blip . rEmbed is not None : return blip . rEmbed return None
def add_format ( self , format_id , number , entry_type , description ) : """Add a format line to the header . Arguments : format _ id ( str ) : The id of the format line number ( str ) : Integer or any of [ A , R , G , . ] entry _ type ( str ) : Any of [ Integer , Float , Flag , Character , String ] description ( str ) : A description of the info line"""
format_line = '##FORMAT=<ID={0},Number={1},Type={2},Description="{3}">' . format ( format_id , number , entry_type , description ) logger . info ( "Adding format line to vcf: {0}" . format ( format_line ) ) self . parse_meta_data ( format_line ) return
def add_keyword ( self , keyword , schema = None , source = None ) : """Add a keyword . Args : keyword ( str ) : keyword to add . schema ( str ) : schema to which the keyword belongs . source ( str ) : source for the keyword ."""
keyword_dict = self . _sourced_dict ( source , value = keyword ) if schema is not None : keyword_dict [ 'schema' ] = schema self . _append_to ( 'keywords' , keyword_dict )
def is_thenable ( cls , obj ) : # type : ( Any ) - > bool """A utility function to determine if the specified object is a promise using " duck typing " ."""
_type = obj . __class__ if obj is None or _type in BASE_TYPES : return False return ( issubclass ( _type , Promise ) or iscoroutine ( obj ) # type : ignore or is_future_like ( _type ) )
def add_input ( self , in_name , type_or_parse = None ) : """Declare a possible input"""
if type_or_parse is None : type_or_parse = GenericType ( ) elif not isinstance ( type_or_parse , GenericType ) and callable ( type_or_parse ) : type_or_parse = GenericType ( parse = type_or_parse ) elif not isinstance ( type_or_parse , GenericType ) : raise ValueError ( "the given 'type_or_parse' is invalid" ) self . _inputs [ in_name ] = type_or_parse
def is_bridge ( self ) : """bool : Is this zone a bridge ?"""
# Since this does not change over time ( ? ) check whether we already # know the answer . If so , there is no need to go further if self . _is_bridge is not None : return self . _is_bridge # if not , we have to get it from the zone topology . This will set # self . _ is _ bridge for us for next time , so we won ' t have to do this # again self . _parse_zone_group_state ( ) return self . _is_bridge
def resource ( self , uri , methods = frozenset ( { 'GET' } ) , ** kwargs ) : """Decorates a function to be registered as a resource route . : param uri : path of the URL : param methods : list or tuple of methods allowed : param host : : param strict _ slashes : : param stream : : param version : : param name : user defined route name for url _ for : param filters : List of callable that will filter request and response data : param validators : List of callable added to the filter list . : return : A decorated function"""
def decorator ( f ) : if kwargs . get ( 'stream' ) : f . is_stream = kwargs [ 'stream' ] self . add_resource ( f , uri = uri , methods = methods , ** kwargs ) return decorator
def revoke_auth ( self , load ) : '''Allow a minion to request revocation of its own key'''
if 'id' not in load : return False keyapi = salt . key . Key ( self . opts ) keyapi . delete_key ( load [ 'id' ] , preserve_minions = load . get ( 'preserve_minion_cache' , False ) ) return True
def _muaprocessnew ( self ) : """Moves all ' new ' files into cur , correctly flagging"""
foldername = self . _foldername ( "new" ) files = self . filesystem . listdir ( foldername ) for filename in files : if filename == "" : continue curfilename = self . _foldername ( joinpath ( "new" , filename ) ) newfilename = joinpath ( self . _cur , "%s:2,%s" % ( filename , "" ) ) self . filesystem . rename ( curfilename , newfilename )
def get_level_id ( self ) : """Gets the ` ` Id ` ` of a ` ` Grade ` ` corresponding to the assessment difficulty . return : ( osid . id . Id ) - a grade ` ` Id ` ` * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for osid . resource . Resource . get _ avatar _ id _ template if not bool ( self . _my_map [ 'levelId' ] ) : raise errors . IllegalState ( 'this Assessment has no level' ) else : return Id ( self . _my_map [ 'levelId' ] )
def tcp ( q , where , timeout = None , port = 53 , af = None , source = None , source_port = 0 , one_rr_per_rrset = False ) : """Return the response obtained after sending a query via TCP . @ param q : the query @ type q : dns . message . Message object @ param where : where to send the message @ type where : string containing an IPv4 or IPv6 address @ param timeout : The number of seconds to wait before the query times out . If None , the default , wait forever . @ type timeout : float @ param port : The port to which to send the message . The default is 53. @ type port : int @ param af : the address family to use . The default is None , which causes the address family to use to be inferred from the form of of where . If the inference attempt fails , AF _ INET is used . @ type af : int @ rtype : dns . message . Message object @ param source : source address . The default is the IPv4 wildcard address . @ type source : string @ param source _ port : The port from which to send the message . The default is 0. @ type source _ port : int @ param one _ rr _ per _ rrset : Put each RR into its own RRset @ type one _ rr _ per _ rrset : bool"""
wire = q . to_wire ( ) if af is None : try : af = dns . inet . af_for_address ( where ) except Exception : af = dns . inet . AF_INET if af == dns . inet . AF_INET : destination = ( where , port ) if source is not None : source = ( source , source_port ) elif af == dns . inet . AF_INET6 : destination = ( where , port , 0 , 0 ) if source is not None : source = ( source , source_port , 0 , 0 ) s = socket . socket ( af , socket . SOCK_STREAM , 0 ) try : expiration = _compute_expiration ( timeout ) s . setblocking ( 0 ) if source is not None : s . bind ( source ) _connect ( s , destination ) l = len ( wire ) # copying the wire into tcpmsg is inefficient , but lets us # avoid writev ( ) or doing a short write that would get pushed # onto the net tcpmsg = struct . pack ( "!H" , l ) + wire _net_write ( s , tcpmsg , expiration ) ldata = _net_read ( s , 2 , expiration ) ( l , ) = struct . unpack ( "!H" , ldata ) wire = _net_read ( s , l , expiration ) finally : s . close ( ) r = dns . message . from_wire ( wire , keyring = q . keyring , request_mac = q . mac , one_rr_per_rrset = one_rr_per_rrset ) if not q . is_response ( r ) : raise BadResponse return r
def _parse_udf_vol_descs ( self , extent , length , descs ) : # type : ( int , int , PyCdlib . _ UDFDescriptors ) - > None '''An internal method to parse a set of UDF Volume Descriptors . Parameters : extent - The extent at which to start parsing . length - The number of bytes to read from the incoming ISO . descs - The _ UDFDescriptors object to store parsed objects into . Returns : Nothing .'''
# Read in the Volume Descriptor Sequence self . _seek_to_extent ( extent ) vd_data = self . _cdfp . read ( length ) # And parse it . Since the sequence doesn ' t have to be in any set order , # and since some of the entries may be missing , we parse the Descriptor # Tag ( the first 16 bytes ) to find out what kind of descriptor it is , # then construct the correct type based on that . We keep going until we # see a Terminating Descriptor . block_size = self . pvd . logical_block_size ( ) offset = 0 current_extent = extent done = False while not done : desc_tag = udfmod . UDFTag ( ) desc_tag . parse ( vd_data [ offset : ] , current_extent ) if desc_tag . tag_ident == 1 : descs . pvd . parse ( vd_data [ offset : offset + 512 ] , current_extent , desc_tag ) elif desc_tag . tag_ident == 4 : descs . impl_use . parse ( vd_data [ offset : offset + 512 ] , current_extent , desc_tag ) elif desc_tag . tag_ident == 5 : descs . partition . parse ( vd_data [ offset : offset + 512 ] , current_extent , desc_tag ) elif desc_tag . tag_ident == 6 : descs . logical_volume . parse ( vd_data [ offset : offset + 512 ] , current_extent , desc_tag ) elif desc_tag . tag_ident == 7 : descs . unallocated_space . parse ( vd_data [ offset : offset + 512 ] , current_extent , desc_tag ) elif desc_tag . tag_ident == 8 : descs . terminator . parse ( current_extent , desc_tag ) done = True else : raise pycdlibexception . PyCdlibInvalidISO ( 'UDF Tag identifier not %d' % ( desc_tag . tag_ident ) ) offset += block_size current_extent += 1
def get_connection_by_id ( self , id ) : '''Search for a connection on this port by its ID .'''
with self . _mutex : for conn in self . connections : if conn . id == id : return conn return None
def _request ( self , op ) : """Implementations of : meth : ` request ` call this method to send the request and process the reply . In synchronous mode , blocks until the reply is received and returns : class : ` RPCReply ` . Depending on the : attr : ` raise _ mode ` a ` rpc - error ` element in the reply may lead to an : exc : ` RPCError ` exception . In asynchronous mode , returns immediately , returning ` self ` . The : attr : ` event ` attribute will be set when the reply has been received ( see : attr : ` reply ` ) or an error occured ( see : attr : ` error ` ) . * op * is the operation to be requested as an : class : ` ~ xml . etree . ElementTree . Element `"""
self . logger . info ( 'Requesting %r' , self . __class__ . __name__ ) req = self . _wrap ( op ) self . _session . send ( req ) if self . _async : self . logger . debug ( 'Async request, returning %r' , self ) return self else : self . logger . debug ( 'Sync request, will wait for timeout=%r' , self . _timeout ) self . _event . wait ( self . _timeout ) if self . _event . isSet ( ) : if self . _error : # Error that prevented reply delivery raise self . _error self . _reply . parse ( ) if self . _reply . error is not None and not self . _device_handler . is_rpc_error_exempt ( self . _reply . error . message ) : # < rpc - error > ' s [ RPCError ] if self . _raise_mode == RaiseMode . ALL or ( self . _raise_mode == RaiseMode . ERRORS and self . _reply . error . severity == "error" ) : errlist = [ ] errors = self . _reply . errors if len ( errors ) > 1 : raise RPCError ( to_ele ( self . _reply . _raw ) , errs = errors ) else : raise self . _reply . error if self . _device_handler . transform_reply ( ) : return NCElement ( self . _reply , self . _device_handler . transform_reply ( ) ) else : return self . _reply else : raise TimeoutExpiredError ( 'ncclient timed out while waiting for an rpc reply.' )
def _convert_nonstring_categoricals ( self , param_dict ) : """Apply the self . categorical _ mappings _ mappings where necessary ."""
return { name : ( self . categorical_mappings_ [ name ] [ val ] if name in self . categorical_mappings_ else val ) for ( name , val ) in param_dict . items ( ) }
def probability_lt ( self , x ) : """Returns the probability of a random variable being less than the given value ."""
if self . mean is None : return return normdist ( x = x , mu = self . mean , sigma = self . standard_deviation )
def maybeStartBuildsOn ( self , new_builders ) : """Try to start any builds that can be started right now . This function returns immediately , and promises to trigger those builders eventually . @ param new _ builders : names of new builders that should be given the opportunity to check for new requests ."""
if not self . running : return d = self . _maybeStartBuildsOn ( new_builders ) self . _pendingMSBOCalls . append ( d ) try : yield d except Exception as e : # pragma : no cover log . err ( e , "while starting builds on {0}" . format ( new_builders ) ) finally : self . _pendingMSBOCalls . remove ( d )
def zero_or_more ( e , delimiter = None ) : """Create a PEG function to match zero or more expressions . Args : e : the expression to match delimiter : an optional expression to match between the primary * e * matches ."""
if delimiter is None : delimiter = lambda s , grm , pos : ( s , Ignore , ( pos , pos ) ) def match_zero_or_more ( s , grm = None , pos = 0 ) : start = pos try : s , obj , span = e ( s , grm , pos ) pos = span [ 1 ] data = [ ] if obj is Ignore else [ obj ] except PegreError : return PegreResult ( s , [ ] , ( pos , pos ) ) try : while True : s , obj , span = delimiter ( s , grm , pos ) pos = span [ 1 ] if obj is not Ignore : data . append ( obj ) s , obj , span = e ( s , grm , pos ) pos = span [ 1 ] if obj is not Ignore : data . append ( obj ) except PegreError : pass return PegreResult ( s , data , ( start , pos ) ) return match_zero_or_more
def process ( self , metric ) : """process a single metric @ type metric : diamond . metric . Metric @ param metric : metric to process @ rtype None"""
for rule in self . rules : rule . process ( metric , self )
def cdna_codon_sequence_after_deletion_or_substitution_frameshift ( sequence_from_start_codon , cds_offset , trimmed_cdna_ref , trimmed_cdna_alt ) : """Logic for any frameshift which isn ' t an insertion . We have insertions as a special case since our base - inclusive indexing means something different for insertions : cds _ offset = base before insertion Whereas in this case : cds _ offset = first reference base affected by a variant Returns index of first modified codon and sequence from that codon onward ."""
mutated_codon_index = cds_offset // 3 # get the sequence starting from the first modified codon until the end # of the transcript . sequence_after_mutated_codon = sequence_from_start_codon [ mutated_codon_index * 3 : ] # the variant ' s ref nucleotides should start either 0 , 1 , or 2 nucleotides # into ` sequence _ after _ mutated _ codon ` offset_into_mutated_codon = cds_offset % 3 sequence_from_mutated_codon = substitute ( sequence = sequence_after_mutated_codon , offset = offset_into_mutated_codon , ref = trimmed_cdna_ref , alt = trimmed_cdna_alt ) return mutated_codon_index , sequence_from_mutated_codon
def get_stops_in_polygon ( feed : "Feed" , polygon : Polygon , geo_stops = None ) -> DataFrame : """Return the slice of ` ` feed . stops ` ` that contains all stops that lie within the given Shapely Polygon object that is specified in WGS84 coordinates . Parameters feed : Feed polygon : Shapely Polygon Specified in WGS84 coordinates geo _ stops : Geopandas GeoDataFrame A geographic version of ` ` feed . stops ` ` which will be computed if not given . Specify this parameter in batch jobs to avoid unnecessary computation . Returns DataFrame Subset of ` ` feed . stops ` ` Notes - Requires GeoPandas - Assume the following feed attributes are not ` ` None ` ` : * ` ` feed . stops ` ` , if ` ` geo _ stops ` ` is not given"""
if geo_stops is not None : f = geo_stops . copy ( ) else : f = geometrize_stops ( feed . stops ) cols = f . columns f [ "hit" ] = f [ "geometry" ] . within ( polygon ) f = f [ f [ "hit" ] ] [ cols ] return ungeometrize_stops ( f )
def set ( self , dic , val = None , force = False ) : """set can assign versatile options from ` CMAOptions . versatile _ options ( ) ` with a new value , use ` init ( ) ` for the others . Arguments ` dic ` either a dictionary or a key . In the latter case , ` val ` must be provided ` val ` value for ` key ` , approximate match is sufficient ` force ` force setting of non - versatile options , use with caution This method will be most probably used with the ` ` opts ` ` attribute of a ` CMAEvolutionStrategy ` instance ."""
if val is not None : # dic is a key in this case dic = { dic : val } # compose a dictionary for key_original , val in list ( dict ( dic ) . items ( ) ) : key = self . corrected_key ( key_original ) if not self . _lock_setting or key in CMAOptions . versatile_options ( ) : self [ key ] = val else : _print_warning ( 'key ' + str ( key_original ) + ' ignored (not recognized as versatile)' , 'set' , 'CMAOptions' ) return self
def truncate_html ( html , * args , ** kwargs ) : """Truncates HTML string . : param html : The HTML string or parsed element tree ( with : func : ` html5lib . parse ` ) . : param kwargs : Similar with : class : ` . filters . TruncationFilter ` . : return : The truncated HTML string ."""
if hasattr ( html , 'getchildren' ) : etree = html else : etree = html5lib . parse ( html ) walker = html5lib . getTreeWalker ( 'etree' ) stream = walker ( etree ) stream = TruncationFilter ( stream , * args , ** kwargs ) serializer = html5lib . serializer . HTMLSerializer ( ) serialized = serializer . serialize ( stream ) return u'' . join ( serialized ) . strip ( )
def plot_kde ( values , values2 = None , cumulative = False , rug = False , label = None , bw = 4.5 , quantiles = None , rotated = False , contour = True , fill_last = True , textsize = None , plot_kwargs = None , fill_kwargs = None , rug_kwargs = None , contour_kwargs = None , contourf_kwargs = None , pcolormesh_kwargs = None , ax = None , legend = True , ) : """1D or 2D KDE plot taking into account boundary conditions . Parameters values : array - like Values to plot values2 : array - like , optional Values to plot . If present , a 2D KDE will be estimated cumulative : bool If true plot the estimated cumulative distribution function . Defaults to False . Ignored for 2D KDE rug : bool If True adds a rugplot . Defaults to False . Ignored for 2D KDE label : string Text to include as part of the legend bw : float Bandwidth scaling factor for 1D KDE . Should be larger than 0 . The higher this number the smoother the KDE will be . Defaults to 4.5 which is essentially the same as the Scott ' s rule of thumb ( the default rule used by SciPy ) . quantiles : list Quantiles in ascending order used to segment the KDE . Use [ . 25 , . 5 , . 75 ] for quartiles . Defaults to None . rotated : bool Whether to rotate the 1D KDE plot 90 degrees . contour : bool If True plot the 2D KDE using contours , otherwise plot a smooth 2D KDE . Defaults to True . fill _ last : bool If True fill the last contour of the 2D KDE plot . Defaults to True . textsize : float Text size scaling factor for labels , titles and lines . If None it will be autoscaled based on figsize . plot _ kwargs : dict Keywords passed to the pdf line of a 1D KDE . fill _ kwargs : dict Keywords passed to the fill under the line ( use fill _ kwargs = { ' alpha ' : 0 } to disable fill ) . Ignored for 2D KDE rug _ kwargs : dict Keywords passed to the rug plot . Ignored if rug = False or for 2D KDE Use ` space ` keyword ( float ) to control the position of the rugplot . The larger this number the lower the rugplot . contour _ kwargs : dict Keywords passed to ax . contour . Ignored for 1D KDE . contourf _ kwargs : dict Keywords passed to ax . contourf . Ignored for 1D KDE . pcolormesh _ kwargs : dict Keywords passed to ax . pcolormesh . Ignored for 1D KDE . ax : matplotlib axes legend : bool Add legend to the figure . By default True . Returns ax : matplotlib axes Examples Plot default KDE . . plot : : : context : close - figs > > > import arviz as az > > > non _ centered = az . load _ arviz _ data ( ' non _ centered _ eight ' ) > > > mu _ posterior = np . concatenate ( non _ centered . posterior [ " mu " ] . values ) > > > az . plot _ kde ( mu _ posterior ) Plot KDE with rugplot . . plot : : : context : close - figs > > > az . plot _ kde ( mu _ posterior , rug = True ) Plot a cumulative distribution . . plot : : : context : close - figs > > > az . plot _ kde ( mu _ posterior , cumulative = True ) Rotate plot 90 degrees . . plot : : : context : close - figs > > > az . plot _ kde ( mu _ posterior , rotated = True ) Plot 2d contour KDE . . plot : : : context : close - figs > > > tau _ posterior = np . concatenate ( non _ centered . posterior [ " tau " ] . values ) > > > az . plot _ kde ( mu _ posterior , values2 = tau _ posterior ) Remove fill for last contour in 2d KDE . . plot : : : context : close - figs > > > az . plot _ kde ( mu _ posterior , values2 = tau _ posterior , fill _ last = False ) Plot 2d smooth KDE . . plot : : : context : close - figs > > > az . plot _ kde ( mu _ posterior , values2 = tau _ posterior , contour = False )"""
if ax is None : ax = plt . gca ( ) figsize = ax . get_figure ( ) . get_size_inches ( ) figsize , * _ , xt_labelsize , linewidth , markersize = _scale_fig_size ( figsize , textsize , 1 , 1 ) if isinstance ( values , xr . Dataset ) : raise ValueError ( "Xarray dataset object detected.Use plot_posterior, plot_density, plot_joint" "or plot_pair instead of plot_kde" ) if isinstance ( values , InferenceData ) : raise ValueError ( " Inference Data object detected. Use plot_posterior instead of plot_kde" ) if values2 is None : if plot_kwargs is None : plot_kwargs = { } plot_kwargs . setdefault ( "color" , "C0" ) default_color = plot_kwargs . get ( "color" ) if fill_kwargs is None : fill_kwargs = { } fill_kwargs . setdefault ( "color" , default_color ) if rug_kwargs is None : rug_kwargs = { } rug_kwargs . setdefault ( "marker" , "_" if rotated else "|" ) rug_kwargs . setdefault ( "linestyle" , "None" ) rug_kwargs . setdefault ( "color" , default_color ) rug_kwargs . setdefault ( "space" , 0.2 ) plot_kwargs . setdefault ( "linewidth" , linewidth ) rug_kwargs . setdefault ( "markersize" , 2 * markersize ) density , lower , upper = _fast_kde ( values , cumulative , bw ) rug_space = max ( density ) * rug_kwargs . pop ( "space" ) x = np . linspace ( lower , upper , len ( density ) ) if cumulative : density_q = density else : density_q = density . cumsum ( ) / density . sum ( ) fill_func = ax . fill_between fill_x , fill_y = x , density if rotated : x , density = density , x fill_func = ax . fill_betweenx ax . tick_params ( labelsize = xt_labelsize ) if rotated : ax . set_xlim ( 0 , auto = True ) rug_x , rug_y = np . zeros_like ( values ) - rug_space , values else : ax . set_ylim ( 0 , auto = True ) rug_x , rug_y = values , np . zeros_like ( values ) - rug_space if rug : ax . plot ( rug_x , rug_y , ** rug_kwargs ) if quantiles is not None : fill_kwargs . setdefault ( "alpha" , 0.75 ) idx = [ np . sum ( density_q < quant ) for quant in quantiles ] fill_func ( fill_x , fill_y , where = np . isin ( fill_x , fill_x [ idx ] , invert = True , assume_unique = True ) , ** fill_kwargs ) else : fill_kwargs . setdefault ( "alpha" , 0 ) ax . plot ( x , density , label = label , ** plot_kwargs ) fill_func ( fill_x , fill_y , ** fill_kwargs ) if legend and label : legend_element = [ Patch ( edgecolor = default_color , label = label ) ] ax . legend ( handles = legend_element ) else : if contour_kwargs is None : contour_kwargs = { } contour_kwargs . setdefault ( "colors" , "0.5" ) if contourf_kwargs is None : contourf_kwargs = { } if pcolormesh_kwargs is None : pcolormesh_kwargs = { } gridsize = ( 128 , 128 ) if contour else ( 256 , 256 ) density , xmin , xmax , ymin , ymax = _fast_kde_2d ( values , values2 , gridsize = gridsize ) g_s = complex ( gridsize [ 0 ] ) x_x , y_y = np . mgrid [ xmin : xmax : g_s , ymin : ymax : g_s ] ax . grid ( False ) ax . set_xlim ( xmin , xmax ) ax . set_ylim ( ymin , ymax ) if contour : qcfs = ax . contourf ( x_x , y_y , density , antialiased = True , ** contourf_kwargs ) qcs = ax . contour ( x_x , y_y , density , ** contour_kwargs ) if not fill_last : qcfs . collections [ 0 ] . set_alpha ( 0 ) qcs . collections [ 0 ] . set_alpha ( 0 ) else : ax . pcolormesh ( x_x , y_y , density , ** pcolormesh_kwargs ) return ax
def parse_args ( * args ) : """Parse the arguments for the command"""
parser = argparse . ArgumentParser ( description = "Send push notifications for a feed" ) parser . add_argument ( '--version' , action = 'version' , version = "%(prog)s " + __version__ . __version__ ) parser . add_argument ( 'feeds' , type = str , nargs = '*' , metavar = 'feed_url' , help = 'A URL for a feed to process' ) parser . add_argument ( '--cache' , '-c' , type = str , dest = 'cache_dir' , help = 'Cache storage directory' , required = False ) parser . add_argument ( "-v" , "--verbosity" , action = "count" , help = "increase output verbosity" , default = 0 ) parser . add_argument ( "-e" , "--entry" , nargs = '+' , help = 'URLs to entries/pages to index directly' , metavar = 'entry_url' , dest = 'entries' ) parser . add_argument ( "-s" , "--websub-only" , nargs = '+' , help = 'URLs/feeds to only send WebSub notifications for' , metavar = 'feed_url' , dest = 'websub_only' ) parser . add_argument ( '--timeout' , '-t' , type = int , dest = 'timeout' , help = 'Connection timeout, in seconds' , default = 120 ) parser . add_argument ( '--max-connections' , type = int , dest = 'max_connections' , help = 'Maximum number of connections to have open at once' , default = 100 ) parser . add_argument ( '--max-per-host' , type = int , dest = 'max_per_host' , help = 'Maximum number of connections per host' , default = 0 ) parser . add_argument ( '--rel-whitelist' , '-w' , dest = 'rel_whitelist' , type = str , help = "Comma-separated list of link RELs to whitelist" + " for sending webmentions" ) parser . add_argument ( '--rel-blacklist' , '-b' , dest = 'rel_blacklist' , type = str , help = "Comma-separated list of link RELs to blacklist" + " from sending webmentions" , default = "nofollow" ) parser . add_argument ( '--max-time' , '-m' , dest = 'max_time' , type = float , help = "Maximum time (in seconds) to spend on this" , default = 1800 ) parser . add_argument ( '--user-agent' , dest = 'user_agent' , type = str , help = "User-agent string to send" , default = __version__ . USER_AGENT ) feature = parser . add_mutually_exclusive_group ( required = False ) feature . add_argument ( '--keepalive' , dest = 'keepalive' , action = 'store_true' , help = "Keep TCP connections alive" ) feature . add_argument ( '--no-keepalive' , dest = 'keepalive' , action = 'store_false' , help = "Don't keep TCP connections alive" ) feature . set_defaults ( keepalive = False ) feature = parser . add_mutually_exclusive_group ( required = False ) feature . add_argument ( '--archive' , '-a' , dest = 'archive' , action = 'store_true' , help = 'Process archive links in the feed per RFC 5005' ) feature . add_argument ( '--no-archive' , dest = 'archive' , action = 'store_false' , help = 'Do not process archive links in the feed' ) feature . set_defaults ( archive = False ) feature = parser . add_mutually_exclusive_group ( required = False ) feature . add_argument ( '--recurse' , '-r' , help = "Recursively check other discovered feeds" , action = 'store_true' , dest = 'recurse' ) feature . add_argument ( '--no-recurse' , dest = 'recurse' , action = 'store_false' , help = "Do not recurse into other feeds" ) feature . set_defaults ( recurse = False ) return parser . parse_args ( * args )
def num_time_steps ( self ) : """Returns the number of time - steps in completed and incomplete trajectories ."""
num_time_steps = sum ( t . num_time_steps for t in self . trajectories ) return num_time_steps + self . num_completed_time_steps
def _initialize_tableaux ( payoff_matrices , tableaux , bases ) : """Given a tuple of payoff matrices , initialize the tableau and basis arrays in place . For each player ` i ` , if ` payoff _ matrices [ i ] . min ( ) ` is non - positive , then stored in the tableau are payoff values incremented by ` abs ( payoff _ matrices [ i ] . min ( ) ) + 1 ` ( to ensure for the tableau not to have a negative entry or a column identically zero ) . Suppose that the players 0 and 1 have m and n actions , respectively . * ` tableaux [ 0 ] ` has n rows and m + n + 1 columns , where columns 0 , . . . , m - 1 and m , . . . , m + n - 1 correspond to the non - slack and slack variables , respectively . * ` tableaux [ 1 ] ` has m rows and m + n + 1 columns , where columns 0 , . . . , m - 1 and m , . . . , m + n - 1 correspond to the slack and non - slack variables , respectively . * In each ` tableaux [ i ] ` , column m + n contains the values of the basic variables ( which are initially 1 ) . * ` bases [ 0 ] ` and ` bases [ 1 ] ` contain basic variable indices , which are initially m , . . . , m + n - 1 and 0 , . . . , m - 1 , respectively . Parameters payoff _ matrices : tuple ( ndarray ( ndim = 2 ) ) Tuple of two arrays representing payoff matrices , of shape ( m , n ) and ( n , m ) , respectively . tableaux : tuple ( ndarray ( float , ndim = 2 ) ) Tuple of two arrays to be used to store the tableaux , of shape ( n , m + n + 1 ) and ( m , m + n + 1 ) , respectively . Modified in place . bases : tuple ( ndarray ( int , ndim = 1 ) ) Tuple of two arrays to be used to store the bases , of shape ( n , ) and ( m , ) , respectively . Modified in place . Returns tableaux : tuple ( ndarray ( float , ndim = 2 ) ) View to ` tableaux ` . bases : tuple ( ndarray ( int , ndim = 1 ) ) View to ` bases ` . Examples > > > A = np . array ( [ [ 3 , 3 ] , [ 2 , 5 ] , [ 0 , 6 ] ] ) > > > B = np . array ( [ [ 3 , 2 , 3 ] , [ 2 , 6 , 1 ] ] ) > > > m , n = A . shape > > > tableaux = ( np . empty ( ( n , m + n + 1 ) ) , np . empty ( ( m , m + n + 1 ) ) ) > > > bases = ( np . empty ( n , dtype = int ) , np . empty ( m , dtype = int ) ) > > > tableaux , bases = _ initialize _ tableaux ( ( A , B ) , tableaux , bases ) > > > tableaux [ 0] array ( [ [ 3 . , 2 . , 3 . , 1 . , 0 . , 1 . ] , [ 2 . , 6 . , 1 . , 0 . , 1 . , 1 . ] ] ) > > > tableaux [ 1] array ( [ [ 1 . , 0 . , 0 . , 4 . , 4 . , 1 . ] , [ 0 . , 1 . , 0 . , 3 . , 6 . , 1 . ] , [ 0 . , 0 . , 1 . , 1 . , 7 . , 1 . ] ] ) > > > bases ( array ( [ 3 , 4 ] ) , array ( [ 0 , 1 , 2 ] ) )"""
nums_actions = payoff_matrices [ 0 ] . shape consts = np . zeros ( 2 ) # To be added to payoffs if min < = 0 for pl in range ( 2 ) : min_ = payoff_matrices [ pl ] . min ( ) if min_ <= 0 : consts [ pl ] = min_ * ( - 1 ) + 1 for pl , ( py_start , sl_start ) in enumerate ( zip ( ( 0 , nums_actions [ 0 ] ) , ( nums_actions [ 0 ] , 0 ) ) ) : for i in range ( nums_actions [ 1 - pl ] ) : for j in range ( nums_actions [ pl ] ) : tableaux [ pl ] [ i , py_start + j ] = payoff_matrices [ 1 - pl ] [ i , j ] + consts [ 1 - pl ] for j in range ( nums_actions [ 1 - pl ] ) : if j == i : tableaux [ pl ] [ i , sl_start + j ] = 1 else : tableaux [ pl ] [ i , sl_start + j ] = 0 tableaux [ pl ] [ i , - 1 ] = 1 for i in range ( nums_actions [ 1 - pl ] ) : bases [ pl ] [ i ] = sl_start + i return tableaux , bases
def range_piles ( ranges ) : """Return piles of intervals that overlap . The piles are only interrupted by regions of zero coverage . > > > ranges = [ Range ( " 2 " , 0 , 1 , 3 , 0 ) , Range ( " 2 " , 1 , 4 , 3 , 1 ) , Range ( " 3 " , 5 , 7 , 3 , 2 ) ] > > > list ( range _ piles ( ranges ) ) [ [ 0 , 1 ] , [ 2 ] ]"""
endpoints = _make_endpoints ( ranges ) for seqid , ends in groupby ( endpoints , lambda x : x [ 0 ] ) : active = [ ] depth = 0 for seqid , pos , leftright , i , score in ends : if leftright == LEFT : active . append ( i ) depth += 1 else : depth -= 1 if depth == 0 and active : yield active active = [ ]
def do_alarm_list ( mc , args ) : '''List alarms for this tenant .'''
fields = { } if args . alarm_definition_id : fields [ 'alarm_definition_id' ] = args . alarm_definition_id if args . metric_name : fields [ 'metric_name' ] = args . metric_name if args . metric_dimensions : fields [ 'metric_dimensions' ] = utils . format_dimensions_query ( args . metric_dimensions ) if args . state : if args . state . upper ( ) not in state_types : errmsg = ( 'Invalid state, not one of [' + ', ' . join ( state_types ) + ']' ) print ( errmsg ) return fields [ 'state' ] = args . state if args . severity : if not _validate_severity ( args . severity ) : return fields [ 'severity' ] = args . severity if args . state_updated_start_time : fields [ 'state_updated_start_time' ] = args . state_updated_start_time if args . lifecycle_state : fields [ 'lifecycle_state' ] = args . lifecycle_state if args . link : fields [ 'link' ] = args . link if args . limit : fields [ 'limit' ] = args . limit if args . offset : fields [ 'offset' ] = args . offset if args . sort_by : sort_by = args . sort_by . split ( ',' ) for field in sort_by : field_values = field . lower ( ) . split ( ) if len ( field_values ) > 2 : print ( "Invalid sort_by value {}" . format ( field ) ) if field_values [ 0 ] not in allowed_alarm_sort_by : print ( "Sort-by field name {} is not in [{}]" . format ( field_values [ 0 ] , allowed_alarm_sort_by ) ) return if len ( field_values ) > 1 and field_values [ 1 ] not in [ 'asc' , 'desc' ] : print ( "Invalid value {}, must be asc or desc" . format ( field_values [ 1 ] ) ) fields [ 'sort_by' ] = args . sort_by try : alarm = mc . alarms . list ( ** fields ) except ( osc_exc . ClientException , k_exc . HttpError ) as he : raise osc_exc . CommandError ( '%s\n%s' % ( he . message , he . details ) ) else : if args . json : print ( utils . json_formatter ( alarm ) ) return cols = [ 'id' , 'alarm_definition_id' , 'alarm_definition_name' , 'metric_name' , 'metric_dimensions' , 'severity' , 'state' , 'lifecycle_state' , 'link' , 'state_updated_timestamp' , 'updated_timestamp' , "created_timestamp" ] formatters = { 'id' : lambda x : x [ 'id' ] , 'alarm_definition_id' : lambda x : x [ 'alarm_definition' ] [ 'id' ] , 'alarm_definition_name' : lambda x : x [ 'alarm_definition' ] [ 'name' ] , 'metric_name' : lambda x : format_metric_name ( x [ 'metrics' ] ) , 'metric_dimensions' : lambda x : format_metric_dimensions ( x [ 'metrics' ] ) , 'severity' : lambda x : x [ 'alarm_definition' ] [ 'severity' ] , 'state' : lambda x : x [ 'state' ] , 'lifecycle_state' : lambda x : x [ 'lifecycle_state' ] , 'link' : lambda x : x [ 'link' ] , 'state_updated_timestamp' : lambda x : x [ 'state_updated_timestamp' ] , 'updated_timestamp' : lambda x : x [ 'updated_timestamp' ] , 'created_timestamp' : lambda x : x [ 'created_timestamp' ] , } if isinstance ( alarm , list ) : # print the list utils . print_list ( alarm , cols , formatters = formatters ) else : # add the dictionary to a list , so print _ list works alarm_list = list ( ) alarm_list . append ( alarm ) utils . print_list ( alarm_list , cols , formatters = formatters )
def _to_DOM ( self ) : """Dumps object data to a fully traversable DOM representation of the object . : returns : a ` ` xml . etree . Element ` ` object"""
root_node = ET . Element ( "no2index" ) reference_time_node = ET . SubElement ( root_node , "reference_time" ) reference_time_node . text = str ( self . _reference_time ) reception_time_node = ET . SubElement ( root_node , "reception_time" ) reception_time_node . text = str ( self . _reception_time ) interval_node = ET . SubElement ( root_node , "interval" ) interval_node . text = str ( self . _interval ) no2_samples_node = ET . SubElement ( root_node , "no2_samples" ) for smpl in self . _no2_samples : s = smpl . copy ( ) # turn values to 12 decimal digits - formatted strings s [ 'label' ] = s [ 'label' ] s [ 'value' ] = '{:.12e}' . format ( s [ 'value' ] ) s [ 'precision' ] = '{:.12e}' . format ( s [ 'precision' ] ) xmlutils . create_DOM_node_from_dict ( s , "no2_sample" , no2_samples_node ) root_node . append ( self . _location . _to_DOM ( ) ) return root_node
async def stop ( wallet_name : str ) -> None : """Gracefully stop an external revocation registry builder , waiting for its current . The indy - sdk toolkit uses a temporary directory for tails file mustration , and shutting down the toolkit removes the directory , crashing the external tails file write . This method allows a graceful stop to wait for completion of such tasks already in progress . : wallet _ name : name external revocation registry builder to check : return : whether a task is pending ."""
LOGGER . debug ( 'RevRegBuilder.stop >>>' ) dir_sentinel = join ( RevRegBuilder . dir_tails_sentinel ( wallet_name ) ) if isdir ( dir_sentinel ) : open ( join ( dir_sentinel , '.stop' ) , 'w' ) . close ( ) # touch while any ( isfile ( join ( dir_sentinel , d , '.in-progress' ) ) for d in listdir ( dir_sentinel ) ) : await asyncio . sleep ( 1 ) LOGGER . debug ( 'RevRegBuilder.stop <<<' )
def unique ( enumeration ) : """Class decorator that ensures only unique members exist in an enumeration ."""
duplicates = [ ] for name , member in enumeration . __members__ . items ( ) : if name != member . name : duplicates . append ( ( name , member . name ) ) if duplicates : duplicate_names = ', ' . join ( [ "%s -> %s" % ( alias , name ) for ( alias , name ) in duplicates ] ) raise ValueError ( 'duplicate names found in %r: %s' % ( enumeration , duplicate_names ) ) return enumeration
def find_table_file ( root_project_dir ) : """Find the EUPS table file for a project . Parameters root _ project _ dir : ` str ` Path to the root directory of the main documentation project . This is the directory containing the ` ` conf . py ` ` file and a ` ` ups ` ` directory . Returns table _ path : ` str ` Path to the EUPS table file ."""
ups_dir_path = os . path . join ( root_project_dir , 'ups' ) table_path = None for name in os . listdir ( ups_dir_path ) : if name . endswith ( '.table' ) : table_path = os . path . join ( ups_dir_path , name ) break if not os . path . exists ( table_path ) : raise RuntimeError ( 'Could not find the EUPS table file at {}' . format ( table_path ) ) return table_path
def can_finalise ( self , step , exhausted , status ) : """The step is running and the inputs are exhausted : param step : : param exhausted : : return :"""
return step in status and step in exhausted and status [ step ] == 'running' and all ( in_ in exhausted [ step ] for in_ in step . ins )
def calc_blr ( xsqlda ) : "Calculate BLR from XSQLVAR array ."
ln = len ( xsqlda ) * 2 blr = [ 5 , 2 , 4 , 0 , ln & 255 , ln >> 8 ] for x in xsqlda : sqltype = x . sqltype if sqltype == SQL_TYPE_VARYING : blr += [ 37 , x . sqllen & 255 , x . sqllen >> 8 ] elif sqltype == SQL_TYPE_TEXT : blr += [ 14 , x . sqllen & 255 , x . sqllen >> 8 ] elif sqltype == SQL_TYPE_LONG : blr += [ 8 , x . sqlscale ] elif sqltype == SQL_TYPE_SHORT : blr += [ 7 , x . sqlscale ] elif sqltype == SQL_TYPE_INT64 : blr += [ 16 , x . sqlscale ] elif sqltype == SQL_TYPE_QUAD : blr += [ 9 , x . sqlscale ] elif sqltype == SQL_TYPE_DEC_FIXED : blr += [ 26 , x . sqlscale ] else : blr += sqltype2blr [ sqltype ] blr += [ 7 , 0 ] # [ blr _ short , 0] blr += [ 255 , 76 ] # [ blr _ end , blr _ eoc ] # x . sqlscale value shoud be negative , so b convert to range ( 0 , 256) return bs ( 256 + b if b < 0 else b for b in blr )
def prepare_method ( self , method ) : """Prepares the given HTTP method ."""
self . method = method if self . method is not None : self . method = self . method . upper ( )
def get_linked_sections ( section , include_instructor_not_on_time_schedule = True ) : """Returns a list of uw _ sws . models . Section objects , representing linked sections for the passed section ."""
linked_sections = [ ] for url in section . linked_section_urls : section = get_section_by_url ( url , include_instructor_not_on_time_schedule ) linked_sections . append ( section ) return linked_sections
def request_sid ( self ) : """Request a BOSH session according to http : / / xmpp . org / extensions / xep - 0124 . html # session - request Returns the new SID ( str ) ."""
if self . _sid : return self . _sid self . log . debug ( 'Prepare to request BOSH session' ) data = self . send_request ( self . get_body ( sid_request = True ) ) if not data : return None # This is XML . response _ body contains the < body / > element of the # response . response_body = ET . fromstring ( data ) # Get the remote Session ID self . _sid = response_body . get ( 'sid' ) self . log . debug ( 'sid = %s' % self . _sid ) # Get the longest time ( s ) that the XMPP server will wait before # responding to any request . self . server_wait = response_body . get ( 'wait' ) self . log . debug ( 'wait = %s' % self . server_wait ) # Get the authid self . authid = response_body . get ( 'authid' ) # Get the allowed authentication methods using xpath search_for = '{{{0}}}features/{{{1}}}mechanisms/{{{2}}}mechanism' . format ( JABBER_STREAMS_NS , XMPP_SASL_NS , XMPP_SASL_NS ) self . log . debug ( 'Looking for "%s" into response body' , search_for ) mechanisms = response_body . findall ( search_for ) self . server_auth = [ ] for mechanism in mechanisms : self . server_auth . append ( mechanism . text ) self . log . debug ( 'New AUTH method: %s' % mechanism . text ) if not self . server_auth : self . log . debug ( ( 'The server didn\'t send the allowed ' 'authentication methods' ) ) self . _sid = None return self . _sid
def galcencyl_to_XYZ ( R , phi , Z , Xsun = 1. , Zsun = 0. , _extra_rot = True ) : """NAME : galcencyl _ to _ XYZ PURPOSE : transform cylindrical Galactocentric coordinates to XYZ coordinates ( wrt Sun ) INPUT : R , phi , Z - Galactocentric cylindrical coordinates Xsun - cylindrical distance to the GC ( can be array of same length as R ) Zsun - Sun ' s height above the midplane ( can be array of same length as R ) _ extra _ rot = ( True ) if True , perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy ' s definition OUTPUT : X , Y , Z HISTORY : 2011-02-23 - Written - Bovy ( NYU ) 2017-10-24 - Allowed Xsun / Zsun to be arrays - Bovy ( UofT )"""
Xr , Yr , Zr = cyl_to_rect ( R , phi , Z ) return galcenrect_to_XYZ ( Xr , Yr , Zr , Xsun = Xsun , Zsun = Zsun , _extra_rot = _extra_rot )
def start ( self ) : """Start the pinging coroutine using the client and event loop which was passed to the constructor . : meth : ` start ` always behaves as if : meth : ` stop ` was called right before it ."""
self . stop ( ) self . _task = asyncio . ensure_future ( self . _pinger ( ) , loop = self . _loop )
def pretty_print_gremlin ( gremlin ) : """Return a human - readable representation of a gremlin command string ."""
gremlin = remove_custom_formatting ( gremlin ) too_many_parts = re . split ( r'([)}]|scatter)[ ]?\.' , gremlin ) # Put the ) and } back on . parts = [ too_many_parts [ i ] + too_many_parts [ i + 1 ] for i in six . moves . xrange ( 0 , len ( too_many_parts ) - 1 , 2 ) ] parts . append ( too_many_parts [ - 1 ] ) # Put the . back on . for i in six . moves . xrange ( 1 , len ( parts ) ) : parts [ i ] = '.' + parts [ i ] indentation = 0 indentation_increment = 4 output = [ ] for current_part in parts : if any ( [ current_part . startswith ( '.out' ) , current_part . startswith ( '.in' ) , current_part . startswith ( '.ifThenElse' ) ] ) : indentation += indentation_increment elif current_part . startswith ( '.back' ) or current_part . startswith ( '.optional' ) : indentation -= indentation_increment if indentation < 0 : raise AssertionError ( u'Indentation became negative: {}' . format ( indentation ) ) output . append ( ( ' ' * indentation ) + current_part ) return '\n' . join ( output ) . strip ( )
def setSignalHeader ( self , edfsignal , channel_info ) : """Sets the parameter for signal edfsignal . channel _ info should be a dict with these values : ' label ' : channel label ( string , < = 16 characters , must be unique ) ' dimension ' : physical dimension ( e . g . , mV ) ( string , < = 8 characters ) ' sample _ rate ' : sample frequency in hertz ( int ) ' physical _ max ' : maximum physical value ( float ) ' physical _ min ' : minimum physical value ( float ) ' digital _ max ' : maximum digital value ( int , - 2 * * 15 < = x < 2 * * 15) ' digital _ min ' : minimum digital value ( int , - 2 * * 15 < = x < 2 * * 15)"""
if edfsignal < 0 or edfsignal > self . n_channels : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] = channel_info self . update_header ( )
def file_writelines_flush_sync ( path , lines ) : """Fill file at @ path with @ lines then flush all buffers ( Python and system buffers )"""
fp = open ( path , 'w' ) try : fp . writelines ( lines ) flush_sync_file_object ( fp ) finally : fp . close ( )
def login_required ( func ) : '''decorator describing User methods that need to be logged in'''
def ret ( obj , * args , ** kw ) : if not hasattr ( obj , 'sessionToken' ) : message = '%s requires a logged-in session' % func . __name__ raise ResourceRequestLoginRequired ( message ) return func ( obj , * args , ** kw ) return ret
def _do_select ( self , start_bindex , end_bindex ) : """select the given range by buffer indices selects items like this : . . . . . xxxxx xxxxx xxxxx xxxxx . . . . . * not * like this : . . . . . xxxxx . . . . . . . . . . xxxxx . . . . . . . . . . xxxxx . . . . . . . . . . xxxxx . . . . ."""
self . select ( QItemSelection ( ) , QItemSelectionModel . Clear ) if start_bindex > end_bindex : start_bindex , end_bindex = end_bindex , start_bindex selection = QItemSelection ( ) if row_number ( end_bindex ) - row_number ( start_bindex ) == 0 : # all on one line self . _bselect ( selection , start_bindex , end_bindex ) elif row_number ( end_bindex ) - row_number ( start_bindex ) == 1 : # two lines self . _bselect ( selection , start_bindex , row_end_index ( start_bindex ) ) self . _bselect ( selection , row_start_index ( end_bindex ) , end_bindex ) else : # many lines self . _bselect ( selection , start_bindex , row_end_index ( start_bindex ) ) self . _bselect ( selection , row_start_index ( start_bindex ) + 0x10 , row_end_index ( end_bindex ) - 0x10 ) self . _bselect ( selection , row_start_index ( end_bindex ) , end_bindex ) self . select ( selection , QItemSelectionModel . SelectCurrent ) self . start = start_bindex self . end = end_bindex self . selectionRangeChanged . emit ( end_bindex )
def set_or_create ( self , path , * args , ** kwargs ) : """Sets the data of a node at the given path , or creates it ."""
d = self . set ( path , * args , ** kwargs ) @ d . addErrback def _error ( result ) : return self . create ( path , * args , ** kwargs ) return d
def _convert_epoch_anchor ( cls , reading ) : """Convert a reading containing an epoch timestamp to datetime ."""
delta = datetime . timedelta ( seconds = reading . value ) return cls . _EpochReference + delta
def search_group ( self , search_query ) : """Searches for public groups using a query Results will be returned using the on _ group _ search _ response ( ) callback : param search _ query : The query that contains some of the desired groups ' name ."""
log . info ( "[+] Initiating a search for groups using the query '{}'" . format ( search_query ) ) return self . _send_xmpp_element ( roster . GroupSearchRequest ( search_query ) )
def compile_update ( self , query , values ) : """Compile an update statement into SQL : param query : A QueryBuilder instance : type query : QueryBuilder : param values : The update values : type values : dict : return : The compiled update : rtype : str"""
table = self . wrap_table ( query . from__ ) columns = self . _compile_update_columns ( values ) from_ = self . _compile_update_from ( query ) where = self . _compile_update_wheres ( query ) return ( "UPDATE %s SET %s%s %s" % ( table , columns , from_ , where ) ) . strip ( )
def to_text_diagram_drawer ( self , * , use_unicode_characters : bool = True , qubit_namer : Optional [ Callable [ [ ops . Qid ] , str ] ] = None , transpose : bool = False , precision : Optional [ int ] = 3 , qubit_order : ops . QubitOrderOrList = ops . QubitOrder . DEFAULT , get_circuit_diagram_info : Optional [ Callable [ [ ops . Operation , protocols . CircuitDiagramInfoArgs ] , protocols . CircuitDiagramInfo ] ] = None ) -> TextDiagramDrawer : """Returns a TextDiagramDrawer with the circuit drawn into it . Args : use _ unicode _ characters : Determines if unicode characters are allowed ( as opposed to ascii - only diagrams ) . qubit _ namer : Names qubits in diagram . Defaults to str . transpose : Arranges qubit wires vertically instead of horizontally . precision : Number of digits to use when representing numbers . qubit _ order : Determines how qubits are ordered in the diagram . get _ circuit _ diagram _ info : Gets circuit diagram info . Defaults to protocol with fallback . Returns : The TextDiagramDrawer instance ."""
qubits = ops . QubitOrder . as_qubit_order ( qubit_order ) . order_for ( self . all_qubits ( ) ) qubit_map = { qubits [ i ] : i for i in range ( len ( qubits ) ) } if qubit_namer is None : qubit_namer = lambda q : str ( q ) + ( '' if transpose else ': ' ) diagram = TextDiagramDrawer ( ) for q , i in qubit_map . items ( ) : diagram . write ( 0 , i , qubit_namer ( q ) ) moment_groups = [ ] # type : List [ Tuple [ int , int ] ] for moment in self . _moments : _draw_moment_in_diagram ( moment , use_unicode_characters , qubit_map , diagram , precision , moment_groups , get_circuit_diagram_info ) w = diagram . width ( ) for i in qubit_map . values ( ) : diagram . horizontal_line ( i , 0 , w ) if moment_groups : _draw_moment_groups_in_diagram ( moment_groups , use_unicode_characters , diagram ) if transpose : diagram = diagram . transpose ( ) return diagram