signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
async def get_peer_id ( self , peer , add_mark = True ) : """Gets the ID for the given peer , which may be anything entity - like . This method needs to be ` ` async ` ` because ` peer ` supports usernames , invite - links , phone numbers ( from people in your contact list ) , etc . If ` ` add _ mark is False ` ` , then a positive ID will be returned instead . By default , bot - API style IDs ( signed ) are returned ."""
if isinstance ( peer , int ) : return utils . get_peer_id ( peer , add_mark = add_mark ) try : if peer . SUBCLASS_OF_ID not in ( 0x2d45687 , 0xc91c90b6 ) : # 0x2d45687 , 0xc91c90b6 = = crc32 ( b ' Peer ' ) and b ' InputPeer ' peer = await self . get_input_entity ( peer ) except AttributeError : peer = await self . get_input_entity ( peer ) if isinstance ( peer , types . InputPeerSelf ) : peer = await self . get_me ( input_peer = True ) return utils . get_peer_id ( peer , add_mark = add_mark )
def _write_config ( config ) : '''writes / usbkey / config'''
try : with salt . utils . atomicfile . atomic_open ( '/usbkey/config' , 'w' ) as config_file : config_file . write ( "#\n# This file was generated by salt\n#\n" ) for prop in salt . utils . odict . OrderedDict ( sorted ( config . items ( ) ) ) : if ' ' in six . text_type ( config [ prop ] ) : if not config [ prop ] . startswith ( '"' ) or not config [ prop ] . endswith ( '"' ) : config [ prop ] = '"{0}"' . format ( config [ prop ] ) config_file . write ( salt . utils . stringutils . to_str ( "{0}={1}\n" . format ( prop , config [ prop ] ) ) ) log . debug ( 'smartos.config - wrote /usbkey/config: %s' , config ) except IOError : return False return True
def _create ( cls , repo , path , resolve , reference , force , logmsg = None ) : """internal method used to create a new symbolic reference . If resolve is False , the reference will be taken as is , creating a proper symbolic reference . Otherwise it will be resolved to the corresponding object and a detached symbolic reference will be created instead"""
git_dir = _git_dir ( repo , path ) full_ref_path = cls . to_full_path ( path ) abs_ref_path = osp . join ( git_dir , full_ref_path ) # figure out target data target = reference if resolve : target = repo . rev_parse ( str ( reference ) ) if not force and osp . isfile ( abs_ref_path ) : target_data = str ( target ) if isinstance ( target , SymbolicReference ) : target_data = target . path if not resolve : target_data = "ref: " + target_data with open ( abs_ref_path , 'rb' ) as fd : existing_data = fd . read ( ) . decode ( defenc ) . strip ( ) if existing_data != target_data : raise OSError ( "Reference at %r does already exist, pointing to %r, requested was %r" % ( full_ref_path , existing_data , target_data ) ) # END no force handling ref = cls ( repo , full_ref_path ) ref . set_reference ( target , logmsg ) return ref
def _get ( self , value , context = None , default = None ) : """Similar to _ get _ list ( ) except that the return value is required to be a str . This calls _ get _ list ( ) to retrieve the value , but raises an exception unless the return is None or a single - valued list when that value will be returned . If a default value is provided , it will be returned if the value passed is None . It is not applied during recursion , but will be applied if the result of the recursion is None ."""
if value is None : return default ret = self . _get_list ( value , context = context ) if ret is None : return default name = getattr ( self , '_name' , None ) if isinstance ( ret , list ) : if len ( ret ) == 0 : raise TaskError ( name , "Value '%s' resolved to an empty list" % ( value , ) ) elif len ( ret ) == 1 : return ret [ 0 ] else : raise TaskError ( name , "Value %r resolved to a multi-valued list %s" % ( value , ret ) ) else : raise TaskError ( name , "Value %r resolved to unexpect type %s" % ( value , type ( ret ) . __name__ ) )
def ip_verification_required ( func ) : """Put this decorator before your view to check if the function is coming from an IP on file"""
def wrapper ( request , * args , ** kwargs ) : slug = kwargs . get ( 'slug' , "" ) if not slug : return kickoutt_404 ( "Not found." , content_type = "application/json" ) try : wip = WriteAPIIP . objects . get ( slug = slug ) ip = get_client_ip ( request ) if ip not in wip . allowable_ips ( ) and "0.0.0.0" not in wip . allowable_ips ( ) : msg = "The IP %s is not authorized to make the API call." % ( ip ) return kickout_401 ( msg ) except WriteAPIIP . DoesNotExist : return HttpResponse ( unauthorized_json_response ( ) , content_type = "application/json" ) return func ( request , * args , ** kwargs ) return update_wrapper ( wrapper , func )
def call ( command , silent = False ) : """Runs a bash command safely , with shell = false , catches any non - zero return codes . Raises slightly modified CalledProcessError exceptions on failures . Note : command is a string and cannot include pipes ."""
try : if silent : with open ( os . devnull , 'w' ) as FNULL : return subprocess . check_call ( command_to_array ( command ) , stdout = FNULL ) else : # Using the defaults , shell = False , no i / o redirection . return check_call ( command_to_array ( command ) ) except CalledProcessError as e : # We are modifying the error itself for 2 reasons . 1 ) it WILL contain # login credentials when run _ mongodump is run , 2 ) CalledProcessError is # slightly not - to - spec ( the message variable is blank ) , which means # cronutils . ErrorHandler would report unlabeled stack traces . e . message = "%s failed with error code %s" % ( e . cmd [ 0 ] , e . returncode ) e . cmd = e . cmd [ 0 ] + " [arguments stripped for security]" raise e
def start ( self , contract_names , target ) : '''loads the contracts - - starts their event listeners : param contract _ names : : return :'''
if isinstance ( contract_names , str ) : contract_names = [ contract_names ] if not isinstance ( contract_names , list ) : return None , "error: expecting a string, or a list of contract names" contract_listeners = [ ] for name in contract_names : c , err = Contract . get ( name , self ) if err : EZO . log . error ( red ( "error loading contract {}" . format ( name ) ) ) EZO . log . error ( red ( err ) ) continue if not c : EZO . log . warn ( blue ( "contract {} not found" . format ( name ) ) ) continue address , err = Contract . get_address ( name , c . hash , self . db , target = target ) if err : EZO . log . error ( red ( "error obtaining address for contract {}" ) . format ( name ) ) EZO . log . error ( red ( err ) ) continue if not address : EZO . log . error ( red ( "no address for contract {}" . format ( name ) ) ) continue contract_listeners . append ( c . listen ( address , target ) ) if contract_listeners : loop = asyncio . get_event_loop ( ) loop . run_until_complete ( asyncio . gather ( * contract_listeners ) ) else : return None , "unable to start contract listeners"
def most_common_nucleotides ( partitioned_read_sequences ) : """Find the most common nucleotide at each offset to the left and right of a variant . Parameters partitioned _ read _ sequences : list of tuples Each tuple has three elements : - sequence before mutant nucleotides - mutant nucleotides - sequence after mutant nucleotides Returns a tuple with the following elements : - nucleotide sequence from most common nucleotide at each offset relative to the variant - an array of counts indicating how many reads supported this nucleotide - an array of counts for all the * other * nucleotides at that position"""
counts , variant_column_indices = nucleotide_counts ( partitioned_read_sequences ) max_count_per_column = counts . max ( axis = 0 ) assert len ( max_count_per_column ) == counts . shape [ 1 ] max_nucleotide_index_per_column = np . argmax ( counts , axis = 0 ) assert len ( max_nucleotide_index_per_column ) == counts . shape [ 1 ] nucleotides = [ index_to_dna_nucleotide [ idx ] for idx in max_nucleotide_index_per_column ] other_nucleotide_counts = counts . sum ( axis = 0 ) - max_count_per_column return "" . join ( nucleotides ) , max_count_per_column , other_nucleotide_counts
def _head ( self , client_kwargs ) : """Returns object or bucket HTTP header . Args : client _ kwargs ( dict ) : Client arguments . Returns : dict : HTTP header ."""
with _handle_client_error ( ) : # Object if 'Key' in client_kwargs : header = self . client . head_object ( ** client_kwargs ) # Bucket else : header = self . client . head_bucket ( ** client_kwargs ) # Clean up HTTP request information for key in ( 'AcceptRanges' , 'ResponseMetadata' ) : header . pop ( key , None ) return header
def flatten ( self ) : """Flatten the scheme into a dictionary where the keys are compound ' dot ' notation keys , and the values are the corresponding options . Returns : dict : The flattened ` Scheme ` ."""
if self . _flat is None : flat = { } for arg in self . args : if isinstance ( arg , Option ) : flat [ arg . name ] = arg elif isinstance ( arg , ListOption ) : flat [ arg . name ] = arg elif isinstance ( arg , DictOption ) : flat [ arg . name ] = arg if arg . scheme : for k , v in arg . scheme . flatten ( ) . items ( ) : flat [ arg . name + '.' + k ] = v self . _flat = flat return self . _flat
def user_entry ( entry_int , num_inst , command ) : """Validate user entry and returns index and validity flag . Processes the user entry and take the appropriate action : abort if ' 0 ' entered , set validity flag and index is valid entry , else return invalid index and the still unset validity flag . Args : entry _ int ( int ) : a number entered or 999 if a non - int was entered . num _ inst ( int ) : the largest valid number that can be entered . command ( str ) : program command to display in prompt . Returns : entry _ idx ( int ) : the dictionary index number of the targeted instance valid _ entry ( bool ) : specifies if entry _ idx is valid . Raises : SystemExit : if the user enters 0 when they are choosing from the list it triggers the " abort " option offered to the user ."""
valid_entry = False if not entry_int : print ( "{}aborting{} - {} instance\n" . format ( C_ERR , C_NORM , command ) ) sys . exit ( ) elif entry_int >= 1 and entry_int <= num_inst : entry_idx = entry_int - 1 valid_entry = True else : print ( "{}Invalid entry:{} enter a number between 1" " and {}." . format ( C_ERR , C_NORM , num_inst ) ) entry_idx = entry_int return ( entry_idx , valid_entry )
def get_sub_array_sbi_ids ( self , sub_array_id ) : """Get Scheduling Block Instance ID associated with sub array id"""
_ids = [ ] sbi_ids = self . get_sched_block_instance_ids ( ) for details in self . get_block_details ( sbi_ids ) : if details [ 'sub_array_id' ] == sub_array_id : _ids . append ( details [ 'id' ] ) return sorted ( _ids )
def combine_dicts ( * dicts , copy = False , base = None ) : """Combines multiple dicts in one . : param dicts : A sequence of dicts . : type dicts : dict : param copy : If True , it returns a deepcopy of input values . : type copy : bool , optional : param base : Base dict where combine multiple dicts in one . : type base : dict , optional : return : A unique dict . : rtype : dict Example : : > > > sorted ( combine _ dicts ( { ' a ' : 3 , ' c ' : 3 } , { ' a ' : 1 , ' b ' : 2 } ) . items ( ) ) [ ( ' a ' , 1 ) , ( ' b ' , 2 ) , ( ' c ' , 3 ) ]"""
if len ( dicts ) == 1 and base is None : # Only one input dict . cd = dicts [ 0 ] . copy ( ) else : cd = { } if base is None else base # Initialize empty dict . for d in dicts : # Combine dicts . if d : # noinspection PyTypeChecker cd . update ( d ) # Return combined dict . return { k : _copy . deepcopy ( v ) for k , v in cd . items ( ) } if copy else cd
def integrate ( self , rate , timestep ) : """Advance a time varying quaternion to its value at a time ` timestep ` in the future . The Quaternion object will be modified to its future value . It is guaranteed to remain a unit quaternion . Params : rate : numpy 3 - array ( or array - like ) describing rotation rates about the global x , y and z axes respectively . timestep : interval over which to integrate into the future . Assuming * now * is ` T = 0 ` , the integration occurs over the interval ` T = 0 ` to ` T = timestep ` . Smaller intervals are more accurate when ` rate ` changes over time . Note : The solution is closed form given the assumption that ` rate ` is constant over the interval of length ` timestep ` ."""
self . _fast_normalise ( ) rate = self . _validate_number_sequence ( rate , 3 ) rotation_vector = rate * timestep rotation_norm = np . linalg . norm ( rotation_vector ) if rotation_norm > 0 : axis = rotation_vector / rotation_norm angle = rotation_norm q2 = Quaternion ( axis = axis , angle = angle ) self . q = ( self * q2 ) . q self . _fast_normalise ( )
def _Matches ( path , pattern_list ) : """Returns true if path matches any patten found in pattern _ list . Args : path : A dot separated path to a package , class , method or variable pattern _ list : A list of wildcard patterns Returns : True if path matches any wildcard found in pattern _ list ."""
# Note : This code does not scale to large pattern _ list sizes . return any ( fnmatch . fnmatchcase ( path , pattern ) for pattern in pattern_list )
def unbuild_month ( self , dt ) : """Deletes the directory at self . get _ build _ path ."""
self . year = str ( dt . year ) self . month = str ( dt . month ) logger . debug ( "Building %s-%s" % ( self . year , self . month ) ) target_path = os . path . split ( self . get_build_path ( ) ) [ 0 ] if self . fs . exists ( target_path ) : logger . debug ( "Removing {}" . format ( target_path ) ) self . fs . removetree ( target_path )
def read_csv ( csv_file , options , ensemble_list = None ) : """Read csv and return molList , otherwise print error and exit ."""
name , ext = os . path . splitext ( csv_file ) try : if ext == '.gz' : f = gzip . open ( csv_file , 'rb' ) else : f = open ( csv_file , 'rU' ) except IOError : print ( " \n '{f}' could not be opened\n" . format ( f = os . path . basename ( csv_file ) ) ) sys . exit ( 1 ) csv_reader = csv . reader ( f ) molList = [ ] line_number = 1 for line in csv_reader : if line_number == 1 : if ensemble_list : prop_indices = read_header ( line , options , ensemble_list ) else : prop_indices = read_header ( line , options ) else : mol = Molecule ( ) if ensemble_list : mol = read_line ( line , options , prop_indices , mol , ensemble_list ) else : mol = read_line ( line , options , prop_indices , mol ) if mol == 1 : print ( " skipping molecule {m}\n" . format ( m = ( line_number - 1 ) ) ) else : molList . append ( mol ) line_number += 1 return molList
def generate_uris ( self ) : """Generate several lambda uris ."""
lambda_arn = "arn:aws:execute-api:{0}:{1}:{2}/*/{3}/{4}" . format ( self . region , self . account_id , self . api_id , self . trigger_settings [ 'method' ] , self . trigger_settings [ 'resource' ] ) lambda_uri = ( "arn:aws:apigateway:{0}:lambda:path/{1}/functions/" "arn:aws:lambda:{0}:{2}:function:{3}/invocations" ) . format ( self . region , self . api_version , self . account_id , self . app_name ) api_dns = "https://{0}.execute-api.{1}.amazonaws.com/{2}" . format ( self . api_id , self . region , self . env ) uri_dict = { 'lambda_arn' : lambda_arn , 'lambda_uri' : lambda_uri , 'api_dns' : api_dns } return uri_dict
def _is_url_in_cache ( * args , ** kwargs ) : """Return True if request has been cached or False otherwise ."""
# Only include allowed arguments for a PreparedRequest . allowed_args = inspect . getargspec ( requests . models . PreparedRequest . prepare ) . args # self is in there as . prepare ( ) is a method . allowed_args . remove ( 'self' ) kwargs_cleaned = { } for key , value in dict ( kwargs ) . items ( ) : if key in allowed_args : kwargs_cleaned [ key ] = value prepared_request = _prepare ( * args , ** kwargs_cleaned ) request_hash = _get_hash ( prepared_request ) try : return requests_cache . get_cache ( ) . has_key ( request_hash ) except AttributeError as e : # requests _ cache not enabled if str ( e ) == "'Session' object has no attribute 'cache'" : return False raise
def identifier ( self ) : """Get the identifier for this node . Extended keys can be identified by the Hash160 ( RIPEMD160 after SHA256) of the public key ' s ` key ` . This corresponds exactly to the data used in traditional Bitcoin addresses . It is not advised to represent this data in base58 format though , as it may be interpreted as an address that way ( and wallet software is not required to accept payment to the chain key itself ) ."""
key = self . get_public_key_hex ( ) return ensure_bytes ( hexlify ( hash160 ( unhexlify ( ensure_bytes ( key ) ) ) ) )
def expect_request ( self , schema , merge = False ) : """* Sets the schema to validate the request properties * Expectations are effective for following requests in the test suite , or until they are reset or updated by using expectation keywords again . On the test suite level ( suite setup ) , they are best used for expecting the endpoint wide properties that are common regardless of the tested HTTP method , and on the test case level ( test setup ) to merge in the HTTP method specific properties . ` Expect Request ` is intented to be used with tests that have some of the request properties , e . g . body or query parameters , randomized ( " fuzzing " ) for validating that the sent values are within the expected scope . If the keyword is used , following HTTP keywords will fail when their request properties are not valid against the expected schema . If the keyword is not used , a new schema is generated for each following request for its ` ` body ` ` and ` ` query ` ` properties . Use ` Output Schema ` to output it and use it as an input to this keyword . * Options * ` ` merge ` ` : Merges the new schema with the current instead of replacing it * Examples * | ` Expect Request ` | $ { CURDIR } / valid _ payload . json | | # See ` Output Schema ` | | ` Expect Request ` | { " body " : { " required " : [ " id " ] } } | merge = true |"""
schema = self . _input_object ( schema ) if "properties" not in schema : schema = { "properties" : schema } if self . _input_boolean ( merge ) : new_schema = SchemaBuilder ( schema_uri = False ) new_schema . add_schema ( self . schema [ "properties" ] [ "request" ] ) new_schema . add_schema ( schema ) self . schema [ "properties" ] [ "request" ] = new_schema . to_schema ( ) else : self . schema [ "properties" ] [ "request" ] = schema return self . schema [ "properties" ] [ "request" ]
def _get_asset_content ( self , asset_id , asset_content_type_str = None , asset_content_id = None ) : """stub"""
rm = self . my_osid_object . _get_provider_manager ( 'REPOSITORY' ) if 'assignedBankIds' in self . my_osid_object . _my_map : if self . my_osid_object . _proxy is not None : als = rm . get_asset_lookup_session_for_repository ( Id ( self . my_osid_object . _my_map [ 'assignedBankIds' ] [ 0 ] ) , self . my_osid_object . _proxy ) else : als = rm . get_asset_lookup_session_for_repository ( Id ( self . my_osid_object . _my_map [ 'assignedBankIds' ] [ 0 ] ) ) elif 'assignedBookIds' in self . my_osid_object . _my_map : if self . my_osid_object . _proxy is not None : als = rm . get_asset_lookup_session_for_repository ( Id ( self . my_osid_object . _my_map [ 'assignedBookIds' ] [ 0 ] ) , self . my_osid_object . _proxy ) else : als = rm . get_asset_lookup_session_for_repository ( Id ( self . my_osid_object . _my_map [ 'assignedBookIds' ] [ 0 ] ) ) elif 'assignedRepositoryIds' in self . my_osid_object . _my_map : if self . my_osid_object . _proxy is not None : als = rm . get_asset_lookup_session_for_repository ( Id ( self . my_osid_object . _my_map [ 'assignedRepositoryIds' ] [ 0 ] ) , self . my_osid_object . _proxy ) else : als = rm . get_asset_lookup_session_for_repository ( Id ( self . my_osid_object . _my_map [ 'assignedRepositoryIds' ] [ 0 ] ) ) else : raise KeyError if asset_content_id is not None : ac_list = als . get_asset ( asset_id ) . get_asset_contents ( ) for ac in ac_list : if str ( ac . ident ) == str ( asset_content_id ) : return ac if not asset_content_type_str : return next ( als . get_asset ( asset_id ) . get_asset_contents ( ) ) # Just return first one else : if isinstance ( asset_content_type_str , Type ) : asset_content_type_str = str ( asset_content_type_str ) for ac in als . get_asset ( asset_id ) . get_asset_contents ( ) : if ac . get_genus_type ( ) == Type ( asset_content_type_str ) : return ac raise NotFound ( )
def get_description ( self ) : """Gets a description of this service implementation . return : ( osid . locale . DisplayText ) - a description compliance : mandatory - This method must be implemented ."""
return DisplayText ( { 'text' : profile . DESCRIPTION , 'languageTypeId' : profile . LANGUAGETYPEID , 'scriptTypeId' : profile . SCRIPTTYPEID , 'formatTypeId' : profile . FORMATTYPEID } )
async def click ( self , entity , reply_to = None , silent = False , clear_draft = False , hide_via = False ) : """Clicks this result and sends the associated ` message ` . Args : entity ( ` entity ` ) : The entity to which the message of this result should be sent . reply _ to ( ` int ` | ` Message < telethon . tl . custom . message . Message > ` , optional ) : If present , the sent message will reply to this ID or message . silent ( ` bool ` , optional ) : If ` ` True ` ` , the sent message will not notify the user ( s ) . clear _ draft ( ` bool ` , optional ) : Whether the draft should be removed after sending the message from this result or not . Defaults to ` ` False ` ` . hide _ via ( ` bool ` , optional ) : Whether the " via @ bot " should be hidden or not . Only works with certain bots ( like @ bing or @ gif ) ."""
entity = await self . _client . get_input_entity ( entity ) reply_id = None if reply_to is None else utils . get_message_id ( reply_to ) req = functions . messages . SendInlineBotResultRequest ( peer = entity , query_id = self . _query_id , id = self . result . id , silent = silent , clear_draft = clear_draft , hide_via = hide_via , reply_to_msg_id = reply_id ) return self . _client . _get_response_message ( req , await self . _client ( req ) , entity )
def close ( self , kill_restart = True ) : '''Use when you would like to close everything down @ param kill _ restart = Prevent kazoo restarting from occurring'''
self . do_not_restart = kill_restart self . zoo_client . stop ( ) self . zoo_client . close ( )
def _build_action_bound_constraints_table ( self ) : '''Builds the lower and upper action bound constraint expressions .'''
self . action_lower_bound_constraints = { } self . action_upper_bound_constraints = { } for name , preconds in self . local_action_preconditions . items ( ) : for precond in preconds : expr_type = precond . etype expr_args = precond . args bounds_expr = None if expr_type == ( 'aggregation' , 'forall' ) : inner_expr = expr_args [ 1 ] if inner_expr . etype [ 0 ] == 'relational' : bounds_expr = inner_expr elif expr_type [ 0 ] == 'relational' : bounds_expr = precond if bounds_expr : # lower bound bound = self . _extract_lower_bound ( name , bounds_expr ) if bound is not None : self . action_lower_bound_constraints [ name ] = bound else : # upper bound bound = self . _extract_upper_bound ( name , bounds_expr ) if bound is not None : self . action_upper_bound_constraints [ name ] = bound
def prepare_on_all_hosts ( self , query , excluded_host , keyspace = None ) : """Prepare the given query on all hosts , excluding ` ` excluded _ host ` ` . Intended for internal use only ."""
futures = [ ] for host in tuple ( self . _pools . keys ( ) ) : if host != excluded_host and host . is_up : future = ResponseFuture ( self , PrepareMessage ( query = query , keyspace = keyspace ) , None , self . default_timeout ) # we don ' t care about errors preparing against specific hosts , # since we can always prepare them as needed when the prepared # statement is used . Just log errors and continue on . try : request_id = future . _query ( host ) except Exception : log . exception ( "Error preparing query for host %s:" , host ) continue if request_id is None : # the error has already been logged by ResponsFuture log . debug ( "Failed to prepare query for host %s: %r" , host , future . _errors . get ( host ) ) continue futures . append ( ( host , future ) ) for host , future in futures : try : future . result ( ) except Exception : log . exception ( "Error preparing query for host %s:" , host )
def read_from ( self , provider , null_allowed = False , ** options ) : """Reads from the data : class : ` Provider ` the necessary number of bytes for the : attr : ` data ` object referenced by the ` Pointer ` field . A ` Pointer ` field stores the binary data read from the data : class : ` Provider ` in its : attr : ` bytestream ` . : param Provider provider : data : class : ` Provider ` . : param bool null _ allowed : if ` ` True ` ` read access of address zero ( Null ) is allowed . : keyword bool nested : if ` ` True ` ` all : class : ` Pointer ` fields in the : attr : ` data ` object of the ` Pointer ` field reads their referenced : attr : ` ~ Pointer . data ` object fields as well ( chained method call ) . Each ` Pointer ` field stores the bytes for its referenced : attr : ` data ` object in its : attr : ` bytestream ` ."""
if self . _data is None : pass elif is_provider ( provider ) : if self . _value < 0 : pass elif null_allowed or self . _value > 0 : while True : self . bytestream = provider . read ( self . address , self . data_size ) index = self . deserialize_data ( ) # Incomplete data object if index . bit != 0 : length = index . byte , index . bit raise ContainerLengthError ( self , length ) if not index . update : break if is_mixin ( self . _data ) and get_nested ( options ) : self . _data . read_from ( provider , ** options ) else : self . bytestream = bytes ( ) self . deserialize_data ( ) else : raise ProviderTypeError ( self , provider )
def copycol ( self , origin_col : str , dest_col : str ) : """Copy a columns values in another column : param origin _ col : name of the column to copy : type origin _ col : str : param dest _ col : name of the new column : type dest _ col : str : example : ` ` ds . copy ( " col 1 " , " New col " ) ` `"""
try : self . df [ dest_col ] = self . df [ [ origin_col ] ] except Exception as e : self . err ( e , self . copy_col , "Can not copy column" )
def _read_para_notification ( self , code , cbit , clen , * , desc , length , version ) : """Read HIP NOTIFICATION parameter . Structure of HIP NOTIFICATION parameter [ RFC 7401 ] : 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | Type | Length | | Reserved | Notify Message Type | / Notification Data / / | Padding | Octets Bits Name Description 0 0 notification . type Parameter Type 1 15 notification . critical Critical Bit 2 16 notification . length Length of Contents 4 32 - Reserved 6 48 notification . msg _ type Notify Message Type 8 64 notification . data Notification Data ? ? - Padding"""
_resv = self . _read_fileng ( 2 ) _code = self . _read_unpack ( 2 ) _data = self . _read_fileng ( 2 ) _type = _NOTIFICATION_TYPE . get ( _code ) if _type is None : if 1 <= _code <= 50 : _type = 'Unassigned (IETF Review)' elif 51 <= _code <= 8191 : _type = 'Unassigned (Specification Required; Error Message)' elif 8192 <= _code <= 16383 : _type = 'Unassigned (Reserved for Private Use; Error Message)' elif 16384 <= _code <= 40959 : _type = 'Unassigned (Specification Required; Status Message)' elif 40960 <= _code <= 65535 : _type = 'Unassigned (Reserved for Private Use; Status Message)' else : raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' ) notification = dict ( type = desc , critical = cbit , length = clen , msg_type = _type , data = _data , ) _plen = length - clen if _plen : self . _read_fileng ( _plen ) return notification
def clusterMetrics ( timeValues , massValues , massScalingFactor = 1 ) : """# TODO : docstring"""
metrics = dict ( ) metrics [ 'meanTime' ] = numpy . mean ( timeValues ) metrics [ 'meanMass' ] = numpy . mean ( massValues ) metrics [ 'devTime' ] = timeValues - metrics [ 'meanTime' ] metrics [ 'devMass' ] = massValues - metrics [ 'meanMass' ] # metrics [ ' devMass ' ] = ( 1 - metrics [ ' meanMass ' ] / massValues ) metrics [ 'spreadTime' ] = numpy . max ( timeValues ) - numpy . min ( timeValues ) metrics [ 'spreadMass' ] = numpy . max ( massValues ) - numpy . min ( massValues ) # metrics [ ' spreadMass ' ] = ( 1 - numpy . min ( massValues ) / numpy . max ( massValues ) ) metrics [ 'devEuc' ] = numpy . sqrt ( numpy . power ( metrics [ 'devTime' ] , 2 ) + numpy . power ( metrics [ 'devMass' ] * massScalingFactor , 2 ) ) metrics [ 'meanEuc' ] = numpy . mean ( metrics [ 'devEuc' ] ) metrics [ 'devTime' ] = metrics [ 'devTime' ] . tolist ( ) metrics [ 'devMass' ] = metrics [ 'devMass' ] . tolist ( ) metrics [ 'devEuc' ] = metrics [ 'devEuc' ] . tolist ( ) return metrics
def db_exists ( cls , impl , working_dir ) : """Does the chainstate db exist ?"""
path = config . get_snapshots_filename ( impl , working_dir ) return os . path . exists ( path )
def scaled_imu2_send ( self , time_boot_ms , xacc , yacc , zacc , xgyro , ygyro , zgyro , xmag , ymag , zmag , force_mavlink1 = False ) : '''The RAW IMU readings for secondary 9DOF sensor setup . This message should contain the scaled values to the described units time _ boot _ ms : Timestamp ( milliseconds since system boot ) ( uint32 _ t ) xacc : X acceleration ( mg ) ( int16 _ t ) yacc : Y acceleration ( mg ) ( int16 _ t ) zacc : Z acceleration ( mg ) ( int16 _ t ) xgyro : Angular speed around X axis ( millirad / sec ) ( int16 _ t ) ygyro : Angular speed around Y axis ( millirad / sec ) ( int16 _ t ) zgyro : Angular speed around Z axis ( millirad / sec ) ( int16 _ t ) xmag : X Magnetic field ( milli tesla ) ( int16 _ t ) ymag : Y Magnetic field ( milli tesla ) ( int16 _ t ) zmag : Z Magnetic field ( milli tesla ) ( int16 _ t )'''
return self . send ( self . scaled_imu2_encode ( time_boot_ms , xacc , yacc , zacc , xgyro , ygyro , zgyro , xmag , ymag , zmag ) , force_mavlink1 = force_mavlink1 )
def set_deployment_run_name ( self ) : """Sets the deployment run name from deployment properties : return : None"""
log = logging . getLogger ( self . cls_logger + '.set_deployment_run_name' ) self . deployment_run_name = self . get_value ( 'cons3rt.deploymentRun.name' ) log . info ( 'Found deployment run name: {n}' . format ( n = self . deployment_run_name ) )
def remove_elaborated ( type_ ) : """removes type - declaration class - binder : class : ` elaborated _ t ` from the ` type _ ` If ` type _ ` is not : class : ` elaborated _ t ` , it will be returned as is"""
nake_type = remove_alias ( type_ ) if not is_elaborated ( nake_type ) : return type_ else : if isinstance ( type_ , cpptypes . elaborated_t ) : type_ = type_ . base return type_
def clean ( ctx ) : """clean generated project files"""
os . chdir ( PROJECT_DIR ) patterns = [ '.cache' , '.coverage' , '.eggs' , 'build' , 'dist' ] ctx . run ( 'rm -vrf {0}' . format ( ' ' . join ( patterns ) ) ) ctx . run ( '''find . \( -name '*,cover' -o -name '__pycache__' -o -name '*.py[co]' -o -name '_work' \) ''' '''-exec rm -vrf '{}' \; || true''' )
def init_client ( client_id ) : """Initialse a driver for client and store for future reference @ param client _ id : ID of client user @ return whebwhatsapi object"""
if client_id not in drivers : drivers [ client_id ] = init_driver ( client_id ) return drivers [ client_id ]
def retry_until_not_none_or_limit_reached ( method , limit , sleep_s = 1 , catch_exceptions = ( ) ) : """Executes a method until the retry limit is hit or not None is returned ."""
return retry_until_valid_or_limit_reached ( method , limit , lambda x : x is not None , sleep_s , catch_exceptions )
def get_info ( self , url = None , thing_id = None , * args , ** kwargs ) : """Look up existing items by thing _ id ( fullname ) or url . : param url : A url to lookup . : param thing _ id : A single thing _ id , or a list of thing _ ids . A thing _ id can be any one of Comment ( ` ` t1 _ ` ` ) , Link ( ` ` t3 _ ` ` ) , or Subreddit ( ` ` t5 _ ` ` ) to lookup by fullname . : returns : When a single ` ` thing _ id ` ` is provided , return the corresponding thing object , or ` ` None ` ` if not found . When a list of ` ` thing _ id ` ` s or a ` ` url ` ` is provided return a list of thing objects ( up to ` ` limit ` ` ) . ` ` None ` ` is returned if all of the thing _ ids or the URL is invalid . The additional parameters are passed into : meth : ` . get _ content ` after the ` params ` parameter is exctracted and used to update the dictionary of url parameters this function sends . Note : the ` url ` parameter cannot be altered . Also , if using thing _ id and the ` limit ` parameter passed to : meth : ` . get _ content ` is used to slice the list of retreived things before returning it to the user , for when ` limit > 100 ` and ` ( limit % 100 ) > 0 ` , to ensure a maximum of ` limit ` thigns are returned ."""
if bool ( url ) == bool ( thing_id ) : raise TypeError ( 'Only one of url or thing_id is required!' ) # In these cases , we will have a list of things to return . # Otherwise , it will just be one item . if isinstance ( thing_id , six . string_types ) and ',' in thing_id : thing_id = thing_id . split ( ',' ) return_list = bool ( url ) or not isinstance ( thing_id , six . string_types ) if url : param_groups = [ { 'url' : url } ] else : if isinstance ( thing_id , six . string_types ) : thing_id = [ thing_id ] id_chunks = chunk_sequence ( thing_id , 100 ) param_groups = [ { 'id' : ',' . join ( id_chunk ) } for id_chunk in id_chunks ] items = [ ] update_with = kwargs . pop ( 'params' , { } ) for param_group in param_groups : param_group . update ( update_with ) kwargs [ 'params' ] = param_group chunk = self . get_content ( self . config [ 'info' ] , * args , ** kwargs ) items . extend ( list ( chunk ) ) # if using ids , manually set the limit if kwargs . get ( 'limit' ) : items = items [ : kwargs [ 'limit' ] ] if return_list : return items if items else None elif items : return items [ 0 ] else : return None
def from_bytes ( cls , bitstream , decode_payload = True ) : r'''Parse the given packet and update properties accordingly > > > data _ hex = ( ' c033d3c100000745c0005835400000' . . . ' ff06094a254d38204d45d1a30016f597' . . . ' a1c3c7406718bf1b50180ff0793f0000' . . . ' b555e59ff5ba6aad33d875c600fd8c1f ' . . . ' c5268078f365ee199179fbd09d09d690' . . . ' 193622a6b70bcbc7bf5f20dda4258801 ' ) > > > data = data _ hex . decode ( ' hex ' ) > > > message = DataPacket . from _ bytes ( data ) > > > message . echo _ nonce _ request False > > > message . nonce '3 \ xd3 \ xc1' > > > message . source _ map _ version > > > message . destination _ map _ version > > > message . lsb . . . # doctest : + ELLIPSIS [ True , True , True , False , False , . . . , False , False , False , False ] > > > message . instance _ id > > > bytes ( message . payload ) . . . # doctest : + ELLIPSIS ' E \ xc0 \ x00X5 @ \ x00 \ x00 \ xff \ x06 \ tJ % M8 . . . \ xdd \ xa4 % \ x88 \ x01' '''
packet = cls ( ) # Convert to ConstBitStream ( if not already provided ) if not isinstance ( bitstream , ConstBitStream ) : if isinstance ( bitstream , Bits ) : bitstream = ConstBitStream ( auto = bitstream ) else : bitstream = ConstBitStream ( bytes = bitstream ) # Read the flags ( nonce_present , lsb_enabled , packet . echo_nonce_request , map_version_present , instance_id_present ) = bitstream . readlist ( '5*bool' ) # Skip over reserved bits bitstream . read ( 3 ) # Parse nonce or map versions if nonce_present : # Nonce : yes , versions : no packet . nonce = bitstream . read ( 'bytes:3' ) packet . source_map_version = None packet . destination_map_version = None elif map_version_present : # Nonce : no , versions : yes packet . nonce = None ( packet . source_map_version , packet . destination_map_version ) = bitstream . readlist ( '2*uint:12' ) else : # Nonce : no , versions : no packet . nonce = None packet . source_map_version = None packet . destination_map_version = None # Skip over the nonce / map - version bits bitstream . read ( 24 ) # Parse instance - id if instance_id_present : packet . instance_id = bitstream . read ( 'uint:24' ) # 8 bits remaining for LSB lsb_bits = 8 else : # 32 bits remaining for LSB lsb_bits = 32 # Parse LSBs if lsb_enabled : packet . lsb = bitstream . readlist ( '%d*bool' % lsb_bits ) # Reverse for readability : least significant locator - bit first packet . lsb . reverse ( ) else : # Skip over the LSBs bitstream . read ( lsb_bits ) # The rest of the packet is payload remaining = bitstream [ bitstream . pos : ] # Parse IP packet if len ( remaining ) : ip_version = remaining . peek ( 'uint:4' ) if ip_version == 4 : packet . payload = IPv4Packet . from_bytes ( remaining , decode_payload = decode_payload ) elif ip_version == 6 : packet . payload = IPv6Packet . from_bytes ( remaining , decode_payload = decode_payload ) else : packet . payload = remaining . bytes # Verify that the properties make sense packet . sanitize ( ) return packet
def man ( self , ** kwargs ) : """Print some man for each understood command"""
str_man = 'commands' str_amount = 'full' for k , v in kwargs . items ( ) : if k == 'on' : str_man = v if k == 'amount' : str_amount = v if str_man == 'commands' : str_commands = """ This script/module provides CURL-based GET/PUT/POST communication over http to a remote REST-like service: """ + Colors . GREEN + """ ./pfurl.py [--auth <username:passwd>] [--verb <GET/POST>] \\ --http <IP>[:<port>]</some/path/> """ + Colors . WHITE + """ Where --auth is an optional authorization to pass to the REST API, --verb denotes the REST verb to use and --http specifies the REST URL. Additionally, a 'message' described in JSON syntax can be pushed to the remote service, in the following syntax: """ + Colors . GREEN + """ pfurl [--auth <username:passwd>] [--verb <GET/POST>] \\ --http <IP>[:<port>]</some/path/> \\ [--msg <JSON-formatted-string>] """ + Colors . WHITE + """ In the case of the 'pman' system this --msg flag has very specific contextual syntax, for example: """ + Colors . GREEN + """ pfurl --verb POST --http %s:%s/api/v1/cmd/ --msg \\ '{ "action": "run", "meta": { "cmd": "cal 7 1970", "auid": "rudolphpienaar", "jid": "<jid>-1", "threaded": true } }' """ % ( self . str_ip , self . str_port ) + Colors . CYAN + """ The following specific action directives are directly handled by script: """ + "\n" + self . man_pushPath ( description = "short" ) + "\n" + self . man_pullPath ( description = "short" ) + "\n" + Colors . YELLOW + """ To get detailed help on any of the above commands, type """ + Colors . LIGHT_CYAN + """ ./pfurl.py --man <command> """ return str_commands if str_man == 'pushPath' : return self . man_pushPath ( description = str_amount ) if str_man == 'pullPath' : return self . man_pullPath ( description = str_amount )
def extract_tar ( fileobj ) : """Yields 3 - tuples of ( name , modified , bytes ) ."""
import time archive = tarfile . open ( fileobj = fileobj ) filenames = [ info . name for info in archive . getmembers ( ) if info . isfile ( ) ] for src_name , dst_name in filter_tzfiles ( filenames ) : mtime = archive . getmember ( src_name ) . mtime modified = tuple ( time . gmtime ( mtime ) [ : 6 ] ) bytes = archive . extractfile ( src_name ) . read ( ) yield dst_name , modified , bytes
def notConnectedNodes ( self ) -> Set [ str ] : """Returns the names of nodes in the registry this node is NOT connected to ."""
return set ( self . registry . keys ( ) ) - self . conns
def cdx_clamp ( cdx_iter , from_ts , to_ts ) : """Clamp by start and end ts"""
if from_ts and len ( from_ts ) < 14 : from_ts = pad_timestamp ( from_ts , PAD_14_DOWN ) if to_ts and len ( to_ts ) < 14 : to_ts = pad_timestamp ( to_ts , PAD_14_UP ) for cdx in cdx_iter : if from_ts and cdx [ TIMESTAMP ] < from_ts : continue if to_ts and cdx [ TIMESTAMP ] > to_ts : continue yield cdx
def get_logger ( name = None , filename = None , filemode = None , level = WARNING ) : """Gets a customized logger . Parameters name : str , optional Name of the logger . filename : str , optional The filename to which the logger ' s output will be sent . filemode : str , optional The file mode to open the file ( corresponding to ` filename ` ) , default is ' a ' if ` filename ` is not ` ` None ` ` . level : int , optional The ` logging ` level for the logger . See : https : / / docs . python . org / 2 / library / logging . html # logging - levels Returns Logger A customized ` Logger ` object . Example # # get _ logger call with default parameters . > > > from mxnet . log import get _ logger > > > logger = get _ logger ( " Test " ) > > > logger . warn ( " Hello World " ) W0505 00:29:47 3525 < stdin > : < module > : 1 ] Hello World # # get _ logger call with WARNING level . > > > import logging > > > logger = get _ logger ( " Test2 " , level = logging . WARNING ) > > > logger . warn ( " Hello World " ) W0505 00:30:50 3525 < stdin > : < module > : 1 ] Hello World > > > logger . debug ( " Hello World " ) # This doesn ' t return anything as the level is logging . WARNING . # # get _ logger call with DEBUG level . > > > logger = get _ logger ( " Test3 " , level = logging . DEBUG ) > > > logger . debug ( " Hello World " ) # Logs the debug output as the level is logging . DEBUG . D0505 00:31:30 3525 < stdin > : < module > : 1 ] Hello World"""
logger = logging . getLogger ( name ) if name is not None and not getattr ( logger , '_init_done' , None ) : logger . _init_done = True if filename : mode = filemode if filemode else 'a' hdlr = logging . FileHandler ( filename , mode ) else : hdlr = logging . StreamHandler ( ) # pylint : disable = redefined - variable - type # the ` _ Formatter ` contain some escape character to # represent color , which is not suitable for FileHandler , # ( TODO ) maybe we can add another Formatter for FileHandler . hdlr . setFormatter ( _Formatter ( ) ) logger . addHandler ( hdlr ) logger . setLevel ( level ) return logger
def endofcell_marker ( source , comment ) : """Issues # 31 # 38 : does the cell contain a blank line ? In that case we add an end - of - cell marker"""
endofcell = '-' while True : endofcell_re = re . compile ( r'^{}( )' . format ( comment ) + endofcell + r'\s*$' ) if list ( filter ( endofcell_re . match , source ) ) : endofcell = endofcell + '-' else : return endofcell
def get_unique_repositories ( repo_list ) : """Method to create unique list of repositories from the list of repositories given . : param repo _ list : List of repositories which might contain duplicates . : return : List of repositories with no duplicate in them ."""
unique_list = list ( ) included = defaultdict ( lambda : False ) for repo in repo_list : if not included [ repo . full_name ] : unique_list . append ( repo ) included [ repo . full_name ] = True return unique_list
def render_pep440_branch_based ( pieces ) : """Build up version string , with post - release " local version identifier " . Our goal : TAG [ + DISTANCE . BRANCH _ gHEX [ . dirty ] ] . Note that if you get a tagged build and then dirty it , you ' ll get TAG + 0 . BRANCH _ gHEX . dirty Exceptions : 1 : no tags . git _ describe was just HEX . 0 + untagged . DISTANCE . BRANCH _ gHEX [ . dirty ]"""
replacements = ( [ ' ' , '.' ] , [ '(' , '' ] , [ ')' , '' ] , [ '\\' , '.' ] , [ '/' , '.' ] ) branch_name = pieces . get ( 'branch' ) or '' if branch_name : for old , new in replacements : branch_name = branch_name . replace ( old , new ) else : branch_name = 'unknown_branch' if pieces [ "closest-tag" ] : rendered = pieces [ "closest-tag" ] if pieces [ "distance" ] or pieces [ "dirty" ] : rendered += '.dev0' + plus_or_dot ( pieces ) rendered += "%d.%s.g%s" % ( pieces [ "distance" ] , branch_name , pieces [ 'short' ] ) if pieces [ "dirty" ] : rendered += ".dirty" else : # exception # 1 rendered = "0+untagged.%d.%s.g%s" % ( pieces [ "distance" ] , branch_name , pieces [ 'short' ] ) if pieces [ "dirty" ] : rendered += ".dirty" return rendered
def resample_time_series ( self ) : """OHLC time series resampler . Resamples time series data to create Open - Hi - Lo - Close ( OHLC ) data , which can be useful for statistical tests , or simply for charting . Frequency abbreviations are taken from the pandas library . By default , this method does daily ( " D " ) resampling ."""
with cursor ( ) as cur : cur . execute ( "SELECT max(starttime) FROM resampled_ledger" ) if cur . rowcount : last_resample = str ( cur . fetchone ( ) [ 0 ] ) else : last_resample = 0 if not self . quiet : print ( "Resampling time series..." ) for market in self . markets : sys . stdout . write ( market [ 0 ] + "-" + market [ 1 ] + "\r" ) sys . stdout . flush ( ) # Resample all transactions if self . full or last_resample == 'None' : query = ( "SELECT currency1, currency2, price1, price2, " "amount1, amount2, txdate FROM ripple_ledger " "WHERE market = '%s' " "ORDER BY txdate" ) % ( market [ 0 ] + market [ 1 ] ) # Resample transactions from the last resampling # starting timestamp or newer else : query = ( "SELECT currency1, currency2, price1, price2, " "amount1, amount2, txdate FROM ripple_ledger " "WHERE market = '%s' AND txdate >= '%s' " "ORDER BY txdate" ) % ( market [ 0 ] + market [ 1 ] , last_resample ) df = psql . frame_query ( query , conn ) if not df . empty : for f in self . resampling_frequencies : rs = self . resampler ( df , freq = f ) self . write_resampled ( rs , market , cur , freq = f ) conn . commit ( ) print ( ) print ( self . updates , "resampled_ledger records updated" ) print ( ) # Index the columns : starttime , freq , currency1 , currency2 conn . set_isolation_level ( ext . ISOLATION_LEVEL_AUTOCOMMIT ) with cursor ( ) as cur : if not self . quiet : print ( "Indexing..." ) idx_queries = ( "DROP INDEX IF EXISTS idx_ledger_interval" , ( "CREATE INDEX CONCURRENTLY idx_ledger_interval ON " "resampled_ledger(starttime, freq, currency1, currency2)" ) , ) for query in idx_queries : cur . execute ( query )
def set_python ( self , value ) : """Expect list of record instances , convert to a SortedDict for internal representation"""
if not self . multiselect : if value and not isinstance ( value , list ) : value = [ value ] value = value or [ ] records = SortedDict ( ) for record in value : self . validate_value ( record ) records [ record . id ] = record return_value = self . _set ( records ) self . record . _raw [ 'values' ] [ self . id ] = self . get_swimlane ( ) return return_value
def is_valid_scalar ( self , node : ValueNode ) -> None : """Check whether this is a valid scalar . Any value literal may be a valid representation of a Scalar , depending on that scalar type ."""
# Report any error at the full type expected by the location . location_type = self . context . get_input_type ( ) if not location_type : return type_ = get_named_type ( location_type ) if not is_scalar_type ( type_ ) : self . report_error ( GraphQLError ( bad_value_message ( location_type , print_ast ( node ) , enum_type_suggestion ( type_ , node ) , ) , node , ) ) return # Scalars determine if a literal value is valid via ` parse _ literal ( ) ` which may # throw or return an invalid value to indicate failure . type_ = cast ( GraphQLScalarType , type_ ) try : parse_result = type_ . parse_literal ( node ) if is_invalid ( parse_result ) : self . report_error ( GraphQLError ( bad_value_message ( location_type , print_ast ( node ) ) , node ) ) except Exception as error : # Ensure a reference to the original error is maintained . self . report_error ( GraphQLError ( bad_value_message ( location_type , print_ast ( node ) , str ( error ) ) , node , original_error = error , ) )
def str_strip ( arr , to_strip = None , side = 'both' ) : """Strip whitespace ( including newlines ) from each string in the Series / Index . Parameters to _ strip : str or unicode side : { ' left ' , ' right ' , ' both ' } , default ' both ' Returns Series or Index"""
if side == 'both' : f = lambda x : x . strip ( to_strip ) elif side == 'left' : f = lambda x : x . lstrip ( to_strip ) elif side == 'right' : f = lambda x : x . rstrip ( to_strip ) else : # pragma : no cover raise ValueError ( 'Invalid side' ) return _na_map ( f , arr )
def handle_submodules ( repo , ** kwargs ) : """: return : repo . submodules ( )"""
log . info ( 'submodules: %s %s' % ( repo , kwargs ) ) return [ serialize ( s , type = 'submodule' , url = s . url ) for s in repo . submodules ( ** kwargs ) ]
def _get_device_by_label ( devices , label ) : '''Returns the device with the given label , raises error if the device is not found . devices list of vim . vm . device . VirtualDevice objects key Unique key of device'''
device_labels = [ d for d in devices if d . deviceInfo . label == label ] if device_labels : return device_labels [ 0 ] else : raise salt . exceptions . VMwareObjectNotFoundError ( 'Virtual machine device with ' 'label {0} does not exist' . format ( label ) )
def post_venue ( self , id , ** data ) : """POST / venues / : id / Updates a : format : ` venue ` and returns it as an object ."""
return self . post ( "/venues/{0}/" . format ( id ) , data = data )
def bsrchi ( value , ndim , array ) : """Do a binary search for a key value within an integer array , assumed to be in increasing order . Return the index of the matching array entry , or - 1 if the key value is not found . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / bsrchi _ c . html : param value : Value to find in array . : type value : int : param ndim : Dimension of array . : type ndim : int : param array : Array to be searched . : type array : Array of ints : return : index : rtype : int"""
value = ctypes . c_int ( value ) ndim = ctypes . c_int ( ndim ) array = stypes . toIntVector ( array ) return libspice . bsrchi_c ( value , ndim , array )
def make_vcard ( name , displayname , email = None , phone = None , fax = None , videophone = None , memo = None , nickname = None , birthday = None , url = None , pobox = None , street = None , city = None , region = None , zipcode = None , country = None , org = None , lat = None , lng = None , source = None , rev = None , title = None ) : """Creates a QR Code which encodes a ` vCard < https : / / en . wikipedia . org / wiki / VCard > ` _ version 3.0. Only a subset of available vCard properties is supported . : param str name : The name . If it contains a semicolon , , the first part is treated as lastname and the second part is treated as forename . : param str displayname : Common name . : param str | iterable email : E - mail address . Multiple values are allowed . : param str | iterable phone : Phone number . Multiple values are allowed . : param str | iterable fax : Fax number . Multiple values are allowed . : param str | iterable videophone : Phone number for video calls . Multiple values are allowed . : param str memo : A notice for the contact . : param str nickname : Nickname . : param str | date birthday : Birthday . If a string is provided , it should encode the date as YYYY - MM - DD value . : param str | iterable url : Homepage . Multiple values are allowed . : param str | None pobox : P . O . box ( address information ) . : param str | None street : Street address . : param str | None city : City ( address information ) . : param str | None region : Region ( address information ) . : param str | None zipcode : Zip code ( address information ) . : param str | None country : Country ( address information ) . : param str org : Company / organization name . : param float lat : Latitude . : param float lng : Longitude . : param str source : URL where to obtain the vCard . : param str | date rev : Revision of the vCard / last modification date . : param str | iterable | None title : Job Title . Multiple values are allowed . : rtype : segno . QRCode"""
return segno . make_qr ( make_vcard_data ( name , displayname , email = email , phone = phone , fax = fax , videophone = videophone , memo = memo , nickname = nickname , birthday = birthday , url = url , pobox = pobox , street = street , city = city , region = region , zipcode = zipcode , country = country , org = org , lat = lat , lng = lng , source = source , rev = rev , title = title ) )
def assignrepr ( self , prefix : str ) -> str : """Return a | repr | string with a prefixed assignment ."""
with objecttools . repr_ . preserve_strings ( True ) : with objecttools . assignrepr_tuple . always_bracketed ( False ) : blanks = ' ' * ( len ( prefix ) + 8 ) lines = [ '%sElement("%s",' % ( prefix , self . name ) ] for groupname in ( 'inlets' , 'outlets' , 'receivers' , 'senders' ) : group = getattr ( self , groupname , Node ) if group : subprefix = '%s%s=' % ( blanks , groupname ) # pylint : disable = not - an - iterable # because pylint is wrong nodes = [ str ( node ) for node in group ] # pylint : enable = not - an - iterable line = objecttools . assignrepr_list ( nodes , subprefix , width = 70 ) lines . append ( line + ',' ) if self . keywords : subprefix = '%skeywords=' % blanks line = objecttools . assignrepr_list ( sorted ( self . keywords ) , subprefix , width = 70 ) lines . append ( line + ',' ) lines [ - 1 ] = lines [ - 1 ] [ : - 1 ] + ')' return '\n' . join ( lines )
def slow_augmenting_row_reduction ( n , ii , jj , idx , count , x , y , u , v , c ) : '''Perform the augmenting row reduction step from the Jonker - Volgenaut algorithm n - the number of i and j in the linear assignment problem ii - the unassigned i jj - the j - index of every entry in c idx - the index of the first entry for each i count - the number of entries for each i x - the assignment of j to i y - the assignment of i to j u - the dual variable " u " which will be updated . It should be initialized to zero for the first reduction transfer . v - the dual variable " v " which will be reduced in - place c - the cost for each entry . returns the new unassigned i'''
# From Jonker : # procedure AUGMENTING ROW REDUCTION ; # begin # LIST : = { all unassigned rows } ; # for all i in LIST do # repeat # ul : = min { c [ i , j ] - v [ j ] for j = l . . . n } ; # select j1 with c [ i , j 1 ] - v [ j 1 ] = u1; # u2 : = min { c [ i , j ] - v [ j ] for j = l . . . n , j < > jl } ; # select j2 with c [ i , j2 ] - v [ j2 ] = u2 and j2 < > j 1 ; # u [ i ] : = u2; # if ul < u2 then v [ jl ] : = v [ jl ] - ( u2 - ul ) # else if jl is assigned then jl : = j2; # k : = y [ jl ] ; if k > 0 then x [ k ] : = 0 ; x [ i ] : = jl ; y [ j l ] : = i ; i : = k # until ul = u2 ( * no reduction transfer * ) or k = 0 i ~ * augmentation * ) # end ii = list ( ii ) k = 0 limit = len ( ii ) free = [ ] while k < limit : i = ii [ k ] k += 1 j = jj [ idx [ i ] : ( idx [ i ] + count [ i ] ) ] uu = c [ idx [ i ] : ( idx [ i ] + count [ i ] ) ] - v [ j ] order = np . lexsort ( [ uu ] ) u1 , u2 = uu [ order [ : 2 ] ] j1 , j2 = j [ order [ : 2 ] ] i1 = y [ j1 ] if u1 < u2 : v [ j1 ] = v [ j1 ] - u2 + u1 elif i1 != n : j1 = j2 i1 = y [ j1 ] if i1 != n : if u1 < u2 : k -= 1 ii [ k ] = i1 else : free . append ( i1 ) x [ i ] = j1 y [ j1 ] = i return np . array ( free , np . uint32 )
def validate_mutations ( self , mutations ) : '''This function has been refactored to use the SimpleMutation class . The parameter is a list of Mutation objects . The function has no return value but raises a PDBValidationException if the wildtype in the Mutation m does not match the residue type corresponding to residue m . ResidueID in the PDB file .'''
# Chain , ResidueID , WildTypeAA , MutantAA resID2AA = self . get_residue_id_to_type_map ( ) badmutations = [ ] for m in mutations : wildtype = resID2AA . get ( PDB . ChainResidueID2String ( m . Chain , m . ResidueID ) , "" ) if m . WildTypeAA != wildtype : badmutations . append ( m ) if badmutations : raise PDBValidationException ( "The mutation(s) %s could not be matched against the PDB %s." % ( ", " . join ( map ( str , badmutations ) ) , self . pdb_id ) )
def cli ( ctx , feature_id , description , organism = "" , sequence = "" ) : """Set a feature ' s description Output : A standard apollo feature dictionary ( { " features " : [ { . . . } ] } )"""
return ctx . gi . annotations . set_description ( feature_id , description , organism = organism , sequence = sequence )
def print_devices_change_callback ( devices , key , new ) : """Print the reply from & devices ( ) and highlight errors ."""
dev = devices [ key ] print ( '- ' , new , ' ' , dev ) if dev [ 'type' ] == QSType . unknown : print ( " ERR decoding" ) elif dev [ 'value' ] == - 1 : dev ( " ERR decoding: -1?" ) qcord = pyqwikswitch . decode_qwikcord ( dev [ 'data' ] [ pyqwikswitch . QS_VALUE ] ) if qcord is not None : print ( ' qwikcord (CTAVG, CTsum) = ' + str ( qcord ) )
def request ( self , method : str , path : str , content : Optional [ Union [ dict , bytes , str ] ] = None , timestamp : Optional [ int ] = None , external_url : Optional [ str ] = None , headers : Optional [ Dict [ str , str ] ] = None , query_params : Optional [ Dict [ str , Any ] ] = None , api_path : str = "/_matrix/client/r0" ) -> Awaitable [ dict ] : """Make a raw HTTP request . Args : method : The HTTP method to use . path : The API endpoint to call . Does not include the base path ( e . g . / _ matrix / client / r0 ) . content : The content to post as a dict ( json ) or bytes / str ( raw ) . timestamp : The timestamp query param used for timestamp massaging . external _ url : The external _ url field to send in the content ( only applicable if content is dict ) . headers : The dict of HTTP headers to send . query _ params : The dict of query parameters to send . api _ path : The base API path . Returns : The response as a dict ."""
content = content or { } headers = headers or { } query_params = query_params or { } query_params [ "access_token" ] = self . token if timestamp is not None : if isinstance ( timestamp , datetime ) : timestamp = int ( timestamp . replace ( tzinfo = timezone . utc ) . timestamp ( ) * 1000 ) query_params [ "ts" ] = timestamp if isinstance ( content , dict ) and external_url is not None : content [ "external_url" ] = external_url method = method . upper ( ) if method not in [ "GET" , "PUT" , "DELETE" , "POST" ] : raise MatrixError ( "Unsupported HTTP method: %s" % method ) if "Content-Type" not in headers : headers [ "Content-Type" ] = "application/json" if headers . get ( "Content-Type" , None ) == "application/json" : content = json . dumps ( content ) if self . identity and not self . is_real_user : query_params [ "user_id" ] = self . identity self . _log_request ( method , path , content , query_params ) endpoint = self . base_url + api_path + path return self . _send ( method , endpoint , content , query_params , headers or { } )
def aggregator ( name , func , * args , type = None ) : 'Define simple aggregator ` name ` that calls func ( values )'
def _func ( col , rows ) : # wrap builtins so they can have a . type vals = list ( col . getValues ( rows ) ) try : return func ( vals , * args ) except Exception as e : if len ( vals ) == 0 : return None return e aggregators [ name ] = _defaggr ( name , type , _func )
def set_exclude_filters ( self , exclude_filters ) : ''': param list ( ExcludeFilter ) exclude _ filters :'''
self . _exclude_filters = exclude_filters self . require_module = False for exclude_filter in exclude_filters : if not exclude_filter . is_path : self . require_module = True break
def mixing_phases ( U ) : """Return the angles and CP phases of the CKM or PMNS matrix in standard parametrization , starting from a matrix with arbitrary phase convention ."""
f = { } # angles f [ 't13' ] = asin ( abs ( U [ 0 , 2 ] ) ) if U [ 0 , 0 ] == 0 : f [ 't12' ] = pi / 2 else : f [ 't12' ] = atan ( abs ( U [ 0 , 1 ] ) / abs ( U [ 0 , 0 ] ) ) if U [ 2 , 2 ] == 0 : f [ 't23' ] = pi / 2 else : f [ 't23' ] = atan ( abs ( U [ 1 , 2 ] ) / abs ( U [ 2 , 2 ] ) ) s12 = sin ( f [ 't12' ] ) c12 = cos ( f [ 't12' ] ) s13 = sin ( f [ 't13' ] ) c13 = cos ( f [ 't13' ] ) s23 = sin ( f [ 't23' ] ) c23 = cos ( f [ 't23' ] ) # standard phase if ( s12 * s23 ) == 0 or ( c12 * c13 ** 2 * c23 * s13 ) == 0 : f [ 'delta' ] = 0 else : f [ 'delta' ] = - phase ( ( U [ 0 , 0 ] . conj ( ) * U [ 0 , 2 ] * U [ 2 , 0 ] * U [ 2 , 2 ] . conj ( ) / ( c12 * c13 ** 2 * c23 * s13 ) + c12 * c23 * s13 ) / ( s12 * s23 ) ) # Majorana phases f [ 'delta1' ] = phase ( exp ( 1j * f [ 'delta' ] ) * U [ 0 , 2 ] ) f [ 'delta2' ] = phase ( U [ 1 , 2 ] ) f [ 'delta3' ] = phase ( U [ 2 , 2 ] ) f [ 'phi1' ] = 2 * phase ( exp ( 1j * f [ 'delta1' ] ) * U [ 0 , 0 ] . conj ( ) ) f [ 'phi2' ] = 2 * phase ( exp ( 1j * f [ 'delta1' ] ) * U [ 0 , 1 ] . conj ( ) ) return f
def translate_y ( self , d ) : """Translate mesh for y - direction : param float d : Amount to translate"""
mat = numpy . array ( [ [ 1 , 0 , 0 , 0 ] , [ 0 , 1 , 0 , 0 ] , [ 0 , 0 , 1 , 0 ] , [ 0 , d , 0 , 1 ] ] ) self . vectors = self . vectors . dot ( mat ) return self
def filename ( self ) -> str : """Return the filename . Given a path - like - string ( e . g . " / Users / dspadini / pydriller / myfile . py " ) returns only the filename ( e . g . " myfile . py " ) : return : str filename"""
if self . _new_path is not None and str ( self . _new_path ) != "/dev/null" : path = self . _new_path else : path = self . _old_path return path . name
def binary ( self ) : """return encoded representation"""
if isinstance ( self . value , list ) : length = len ( self . value ) if length == 0 : return b_chr ( _TAG_NIL_EXT ) elif length > 4294967295 : raise OutputException ( 'uint32 overflow' ) elif self . improper : return ( b_chr ( _TAG_LIST_EXT ) + struct . pack ( b'>I' , length - 1 ) + b'' . join ( [ _term_to_binary ( element ) for element in self . value ] ) ) else : return ( b_chr ( _TAG_LIST_EXT ) + struct . pack ( b'>I' , length ) + b'' . join ( [ _term_to_binary ( element ) for element in self . value ] ) + b_chr ( _TAG_NIL_EXT ) ) else : raise OutputException ( 'unknown list type' )
def clear ( self , scope = 'screen' ) : """see doc in Term class According to http : / / support . microsoft . com / kb / 99261 the best way to clear the console is to write out empty spaces"""
# TODO : clear attributes too if scope == 'screen' : bos = ( 0 , self . _get_console_info ( ) [ 'window' ] [ 'top' ] ) cols , lines = self . get_size ( ) length = cols * lines self . _clear_console ( length , bos ) self . move ( 'beginning of screen' ) elif scope == ' beginning of line' : pass elif scope == 'end of line' : curx , cury = self . _get_position ( ) cols , lines = self . get_size ( ) coord = ( curx , cury ) length = cols - curx self . _clear_console ( length , coord ) elif scope == 'end of screen' : curx , cury = self . _get_position ( ) coord = ( curx , cury ) cols , lines = self . get_size ( ) length = ( lines - cury ) * cols - curx self . _clear_console ( length , coord ) elif scope == 'line' : curx , cury = self . _get_position ( ) coord = ( 0 , cury ) cols , lines = self . get_size ( ) self . _clear_console ( cols , coord ) self . _set_position ( ( curx , cury ) ) elif scope == 'left' : self . move ( 'left' ) self . write ( ' ' ) elif scope == 'right' : self . write ( ' ' ) self . move ( 'left' ) else : raise ValueError ( "invalid scope to clear" )
def train_batch ( self , batch_info : BatchInfo ) -> None : """Batch - the most atomic unit of learning . For this reinforforcer , that involves : 1 . Roll out environment and store out experience in the buffer 2 . Sample the buffer and train the algo on sample batch"""
# For each reinforcer batch : # 1 . Roll out environment and store out experience in the buffer self . roll_out_and_store ( batch_info ) # 2 . Sample the buffer and train the algo on sample batch self . train_on_replay_memory ( batch_info )
def interpolate_cosine_single ( start , end , coefficient ) : """Cosine interpolation"""
cos_out = ( np . cos ( np . pi * coefficient ) + 1 ) / 2.0 return end + ( start - end ) * cos_out
def stop ( self ) : """Stops the bundle . Does nothing if the bundle is already stopped . : raise BundleException : The bundle activator failed ."""
if self . _state != Bundle . ACTIVE : # Invalid state return exception = None with self . _lock : # Store the bundle current state previous_state = self . _state # Stopping . . . self . _state = Bundle . STOPPING self . _fire_bundle_event ( BundleEvent . STOPPING ) # Call the activator , if any stopper = self . __get_activator_method ( "stop" ) if stopper is not None : try : # Call the start method stopper ( self . __context ) except ( FrameworkException , BundleException ) as ex : # Restore previous state self . _state = previous_state # Re - raise directly Pelix exceptions _logger . exception ( "Pelix error raised by %s while stopping" , self . __name ) exception = ex except Exception as ex : _logger . exception ( "Error raised by %s while stopping" , self . __name ) # Store the exception ( raised after service clean up ) exception = BundleException ( ex ) # Hide remaining services self . __framework . _hide_bundle_services ( self ) # Intermediate bundle event : activator should have cleaned up # everything , but some element could stay ( iPOPO components , . . . ) self . _fire_bundle_event ( BundleEvent . STOPPING_PRECLEAN ) # Remove remaining services ( the hard way ) self . __unregister_services ( ) # Cleanup service usages self . __framework . _unget_used_services ( self ) # Bundle is now stopped and all its services have been unregistered self . _state = Bundle . RESOLVED self . _fire_bundle_event ( BundleEvent . STOPPED ) # Raise the exception , if any # pylint : disable = E0702 # Pylint seems to miss the " is not None " check below if exception is not None : raise exception
def walk ( self ) : """Walk proposal kernel"""
if self . verbose > 1 : print_ ( '\t' + self . _id + ' Running Walk proposal kernel' ) # Mask for values to move phi = self . phi theta = self . walk_theta u = random ( len ( phi ) ) z = ( theta / ( 1 + theta ) ) * ( theta * u ** 2 + 2 * u - 1 ) if self . _prime : xp , x = self . values else : x , xp = self . values if self . verbose > 1 : print_ ( '\t' + 'Current value = ' + str ( x ) ) x = x + phi * ( x - xp ) * z if self . verbose > 1 : print_ ( '\t' + 'Proposed value = ' + str ( x ) ) self . stochastic . value = x # Set proposal adjustment factor self . hastings_factor = 0.0
def run_dependent_peptides_from_parameters ( paramfile , outfile ) : """transform a allPeptides . txt and experimentalDesign . txt table into the dependentPeptides . txt table written in outfile . : param paramfile : Perseus parameters . xml including at least two FileParam entries names ' allPeptides . txt ' and ' experimentalDesign . txt ' . : param outfile : Path to the output file ."""
parameters = parse_parameters ( paramfile ) allPeptides_file = fileParam ( parameters , 'allPeptides.txt' ) rawFilesTable_file = fileParam ( parameters , 'Raw files table' ) run_dependent_peptides ( allPeptides_file , rawFilesTable_file , outfile )
def columns ( self , dimensions = None ) : """Convert dimension values to a dictionary . Returns a dictionary of column arrays along each dimension of the element . Args : dimensions : Dimensions to return as columns Returns : Dictionary of arrays for each dimension"""
if dimensions is None : dimensions = self . kdims else : dimensions = [ self . get_dimension ( d , strict = True ) for d in dimensions ] vdims = [ d for d in dimensions if d in self . vdims ] if vdims : raise ValueError ( '%s element does not hold data for value ' 'dimensions. Could not return data for %s ' 'dimension(s).' % ( type ( self ) . __name__ , ', ' . join ( [ d . name for d in vdims ] ) ) ) return OrderedDict ( [ ( d . name , self . dimension_values ( d ) ) for d in dimensions ] )
def discard_config ( self ) : """Discard changes ( rollback 0 ) ."""
self . device . cu . rollback ( rb_id = 0 ) if not self . config_lock : self . _unlock ( )
def _create_gcloud_zone ( self , dns_name ) : """Creates a google cloud ManagedZone with dns _ name , and zone named derived from it . calls . create ( ) method and returns it . : param dns _ name : fqdn of zone to create : type dns _ name : str : type return : new google . cloud . dns . ManagedZone"""
# Zone name must begin with a letter , end with a letter or digit , # and only contain lowercase letters , digits or dashes , # and be 63 characters or less zone_name = 'zone-{}-{}' . format ( dns_name . replace ( '.' , '-' ) , uuid4 ( ) . hex ) [ : 63 ] gcloud_zone = self . gcloud_client . zone ( name = zone_name , dns_name = dns_name ) gcloud_zone . create ( client = self . gcloud_client ) # add this new zone to the list of zones . self . _gcloud_zones [ gcloud_zone . dns_name ] = gcloud_zone self . log . info ( "Created zone {}. Fqdn {}." . format ( zone_name , dns_name ) ) return gcloud_zone
def p_field_void ( self , p ) : """field : ID NL | ID NL INDENT annotation _ ref _ list docsection DEDENT"""
p [ 0 ] = AstVoidField ( self . path , p . lineno ( 1 ) , p . lexpos ( 1 ) , p [ 1 ] ) if len ( p ) > 3 : if p [ 4 ] is not None : p [ 0 ] . set_annotations ( p [ 4 ] ) if p [ 5 ] is not None : p [ 0 ] . set_doc ( p [ 5 ] )
def marshall ( values ) : """Marshall a ` dict ` into something DynamoDB likes . : param dict values : The values to marshall : rtype : dict : raises ValueError : if an unsupported type is encountered Return the values in a nested dict structure that is required for writing the values to DynamoDB ."""
serialized = { } for key in values : serialized [ key ] = _marshall_value ( values [ key ] ) return serialized
def _is_out_of_order ( segmentation ) : """Check if a given segmentation is out of order . Examples > > > _ is _ out _ of _ order ( [ [ 0 , 1 , 2 , 3 ] ] ) False > > > _ is _ out _ of _ order ( [ [ 0 , 1 ] , [ 2 , 3 ] ] ) False > > > _ is _ out _ of _ order ( [ [ 0 , 1 , 3 ] , [ 2 ] ] ) True"""
last_stroke = - 1 for symbol in segmentation : for stroke in symbol : if last_stroke > stroke : return True last_stroke = stroke return False
def get ( obj , glob , separator = "/" ) : """Given an object which contains only one possible match for the given glob , return the value for the leaf matching the given glob . If more than one leaf matches the glob , ValueError is raised . If the glob is not found , KeyError is raised ."""
ret = None found = False for item in search ( obj , glob , yielded = True , separator = separator ) : if ret is not None : raise ValueError ( "dpath.util.get() globs must match only one leaf : %s" % glob ) ret = item [ 1 ] found = True if found is False : raise KeyError ( glob ) return ret
def get_baserate ( self ) : """Helper function to populate the base rate"""
rateslist = self . rates [ 'list' ] [ 'resources' ] for rate in rateslist : rateobj = rate [ 'resource' ] [ 'fields' ] if rateobj [ 'symbol' ] . partition ( '=' ) [ 0 ] == rateobj [ 'name' ] : return rateobj [ 'name' ] raise RuntimeError ( "%s: baserate not found" % self . name )
def plot_events_distribution ( events , num_bars = 50 , ax = None ) : """Plots the distribution of events in time . Parameters events : pd . Series A pd . Series whose index contains at least ' date ' level . num _ bars : integer , optional Number of bars to plot ax : matplotlib . Axes , optional Axes upon which to plot . Returns ax : matplotlib . Axes"""
if ax is None : f , ax = plt . subplots ( 1 , 1 , figsize = ( 18 , 6 ) ) start = events . index . get_level_values ( 'date' ) . min ( ) end = events . index . get_level_values ( 'date' ) . max ( ) group_interval = ( end - start ) / num_bars grouper = pd . Grouper ( level = 'date' , freq = group_interval ) events . groupby ( grouper ) . count ( ) . plot ( kind = "bar" , grid = False , ax = ax ) ax . set ( ylabel = 'Number of events' , title = 'Distribution of events in time' , xlabel = 'Date' ) return ax
def get_bibtex ( isbn_identifier ) : """Get a BibTeX string for the given ISBN . : param isbn _ identifier : ISBN to fetch BibTeX entry for . : returns : A BibTeX string or ` ` None ` ` if could not fetch it . > > > get _ bibtex ( ' 9783161484100 ' ) ' @ book { 9783161484100, \\ n title = { Berkeley , Oakland : Albany , Emeryville , Alameda , Kensington } , \\ n author = { Peekaboo Maps } , \\ n isbn = { 9783161484100 } , \\ n year = { 2009 } , \\ n publisher = { Peek A Boo Maps } \\ n } '"""
# Try to find the BibTeX using associated DOIs bibtex = doi . get_bibtex ( to_doi ( isbn_identifier ) ) if bibtex is None : # In some cases , there are no DOIs for a given ISBN . In this case , try # to fetch bibtex directly from the ISBN , using a combination of # Google Books and worldcat . org results . bibtex = isbnlib . registry . bibformatters [ 'bibtex' ] ( isbnlib . meta ( isbn_identifier , 'default' ) ) return bibtex
def lastNode ( class_ , hot_map ) : '''Return the very last node ( recursively ) in the hot map .'''
children = hot_map [ - 1 ] [ 2 ] if children : return class_ . lastNode ( children ) else : return hot_map [ - 1 ] [ 1 ]
def get_filename ( self , year ) : """returns the filename"""
res = self . fldr + os . sep + self . type + year + '.' + self . user return res
def flush_read_tuple ( self ) : """Flush the internal buffer of reads ."""
if not self . is_empty ( ) : suffix_comment_buffer = [ ] if self . _info_simulator is not None : suffix_comment_buffer . append ( self . _info_simulator ) if self . _info_reads_in_tuple : # todo : orientation ( FF , FR , etc . ) # orientation = " " . join ( [ ] ) suffix_comment_buffer . append ( "reads-in-tuple:{}" . format ( len ( self . seqs_bases ) ) ) if len ( suffix_comment_buffer ) != 0 : suffix_comment = "[{}]" . format ( "," . join ( suffix_comment_buffer ) ) else : suffix_comment = "" rnf_name = self . _rnf_profile . get_rnf_name ( rnftools . rnfformat . ReadTuple ( segments = self . segments , read_tuple_id = self . current_read_tuple_id , suffix = suffix_comment , ) ) fq_reads = [ os . linesep . join ( [ "@{rnf_name}{read_suffix}" . format ( rnf_name = rnf_name , read_suffix = "/{}" . format ( str ( i + 1 ) ) if len ( self . seqs_bases ) > 1 else "" , ) , self . seqs_bases [ i ] , "+" , self . seqs_qualities [ i ] , ] ) for i in range ( len ( self . seqs_bases ) ) ] self . _fq_file . write ( os . linesep . join ( fq_reads ) ) self . _fq_file . write ( os . linesep ) self . empty ( )
def entry_point ( context , block_name ) : """include an snippet at the bottom of a block , if it exists For example , if the plugin with slug ' attachments ' is registered waliki / attachments _ edit _ content . html will be included with { % entry _ point ' edit _ content ' % } which is declared at the bottom of the block ' content ' in edit . html"""
from waliki . plugins import get_plugins includes = [ ] for plugin in get_plugins ( ) : template_name = 'waliki/%s_%s.html' % ( plugin . slug , block_name ) try : # template exists template . loader . get_template ( template_name ) includes . append ( template_name ) except template . TemplateDoesNotExist : continue context . update ( { 'includes' : includes } ) return context
def support_index_min ( self , tags = None ) : # type : ( Optional [ List [ Pep425Tag ] ] ) - > Optional [ int ] """Return the lowest index that one of the wheel ' s file _ tag combinations achieves in the supported _ tags list e . g . if there are 8 supported tags , and one of the file tags is first in the list , then return 0 . Returns None is the wheel is not supported ."""
if tags is None : # for mock tags = pep425tags . get_supported ( ) indexes = [ tags . index ( c ) for c in self . file_tags if c in tags ] return min ( indexes ) if indexes else None
def StaticServe ( base_path = '/views/static/' ) : """Meta program for serving any file based on the path"""
def get_file ( path = RAW_INVOCATION_ARGS ) : fullpath = get_config ( 'project_path' ) + os . path . join ( base_path , path ) try : mime , encoding = mimetypes . guess_type ( fullpath ) return open ( fullpath , 'rb' ) , mime or 'application/octet-stream' except IOError : raise DataNotFound ( "File does not exist" ) class StaticServe ( Program ) : controllers = [ 'http-get' ] model = [ get_file ] view = FileView ( ) return StaticServe ( )
def _get_altair_html_ ( self , chart_obj , slug ) : """Get html for an Altair chart"""
try : json_data = chart_obj . to_json ( indent = 0 ) except Exception as e : self . err ( e ) html = '<div id="' + slug + '"></div>\n' html += '<script type="text/javascript">' html += 'var spec = ' + json_data . replace ( "\n" , "" ) + ";" html += """ var embed_opt = {"mode": "vega-lite"}; function showError(altel, error){ altel.innerHTML = ('<div class="error">' + '<p>JavaScript Error: ' + error.message + '</p>' + "<p>This usually means there's a typo in your chart specification. " + "See the javascript console for the full traceback.</p>" + '</div>'); throw error; };\n""" html += "const el_" + slug + " = document.getElementById('" + slug + "');" html += "vegaEmbed('#" + slug + "', spec, embed_opt)" html += ".catch(error => showError(el_" + slug + ", error));" html += '</script>' return html
def status ( self , status_code = None ) : """Set status or Get Status"""
if status_code is not None : self . response_model . status = status_code # return string for response support return str ( self . response_model . status )
def coinbase_tx ( cls , public_key_sec , coin_value , coinbase_bytes = b'' , version = 1 , lock_time = 0 ) : """Create the special " first in block " transaction that includes the mining fees ."""
tx_in = cls . TxIn . coinbase_tx_in ( script = coinbase_bytes ) COINBASE_SCRIPT_OUT = "%s OP_CHECKSIG" script_text = COINBASE_SCRIPT_OUT % b2h ( public_key_sec ) script_bin = BitcoinScriptTools . compile ( script_text ) tx_out = cls . TxOut ( coin_value , script_bin ) return cls ( version , [ tx_in ] , [ tx_out ] , lock_time )
def remove_search_paths ( self , path_type , paths , target_name = None , configuration_name = None ) : """Removes the given search paths from the path _ type section of the target on the configurations : param path _ type : name of the path type to be removed the values from : param paths : A string or array of strings : param target _ name : Target name or list of target names to remove the flag from or None for every target : param configuration _ name : Configuration name to add the flag to or None for every configuration : return : void"""
for configuration in self . objects . get_configurations_on_targets ( target_name , configuration_name ) : configuration . remove_search_paths ( path_type , paths )
def filter ( value ) : """Modifier decorator to force the inclusion or exclusion of an attribute . This only modifies the behaviour of the : func : ` create _ patches ` function and the : func : ` patches ` decorator , given that their parameter ` ` use _ decorators ` ` is set to ` ` True ` ` . Parameters value : bool ` ` True ` ` to force inclusion , ` ` False ` ` to force exclusion , and ` ` None ` ` to inherit from the behaviour defined by : func : ` create _ patches ` or : func : ` patches ` . Returns object The decorated object ."""
def decorator ( wrapped ) : data = get_decorator_data ( _get_base ( wrapped ) , set_default = True ) data . filter = value return wrapped return decorator
def _copy_calibration ( self , calibration ) : """Copy another ` ` StereoCalibration ` ` object ' s values ."""
for key , item in calibration . __dict__ . items ( ) : self . __dict__ [ key ] = item
def search ( format , string , pos = 0 , endpos = None , extra_types = None , evaluate_result = True , case_sensitive = False ) : '''Search " string " for the first occurrence of " format " . The format may occur anywhere within the string . If instead you wish for the format to exactly match the string use parse ( ) . Optionally start the search at " pos " character index and limit the search to a maximum index of endpos - equivalent to search ( string [ : endpos ] ) . If ` ` evaluate _ result ` ` is True the return value will be an Result instance with two attributes : . fixed - tuple of fixed - position values from the string . named - dict of named values from the string If ` ` evaluate _ result ` ` is False the return value will be a Match instance with one method : . evaluate _ result ( ) - This will return a Result instance like you would get with ` ` evaluate _ result ` ` set to True The default behaviour is to match strings case insensitively . You may match with case by specifying case _ sensitive = True . If the format is invalid a ValueError will be raised . See the module documentation for the use of " extra _ types " . In the case there is no match parse ( ) will return None .'''
p = Parser ( format , extra_types = extra_types , case_sensitive = case_sensitive ) return p . search ( string , pos , endpos , evaluate_result = evaluate_result )
def lock ( self , key , ttl = - 1 ) : """Acquires the lock for the specified key infinitely or for the specified lease time if provided . If the lock is not available , the current thread becomes disabled for thread scheduling purposes and lies dormant until the lock has been acquired . You get a lock whether the value is present in the map or not . Other threads ( possibly on other systems ) would block on their invoke of lock ( ) until the non - existent key is unlocked . If the lock holder introduces the key to the map , the put ( ) operation is not blocked . If a thread not holding a lock on the non - existent key tries to introduce the key while a lock exists on the non - existent key , the put ( ) operation blocks until it is unlocked . Scope of the lock is this map only . Acquired lock is only for the key in this map . Locks are re - entrant ; so , if the key is locked N times , it should be unlocked N times before another thread can acquire it . * * Warning : This method uses _ _ hash _ _ and _ _ eq _ _ methods of binary form of the key , not the actual implementations of _ _ hash _ _ and _ _ eq _ _ defined in key ' s class . * * : param key : ( object ) , the key to lock . : param ttl : ( int ) , time in seconds to wait before releasing the lock ( optional ) ."""
check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( map_lock_codec , key_data , invocation_timeout = MAX_SIZE , key = key_data , thread_id = thread_id ( ) , ttl = to_millis ( ttl ) , reference_id = self . reference_id_generator . get_and_increment ( ) )
def _get_margin_width ( self , cli , margin ) : """Return the width for this margin . ( Calculate only once per render time . )"""
# Margin . get _ width , needs to have a UIContent instance . def get_ui_content ( ) : return self . _get_ui_content ( cli , width = 0 , height = 0 ) def get_width ( ) : return margin . get_width ( cli , get_ui_content ) key = ( margin , cli . render_counter ) return self . _margin_width_cache . get ( key , get_width )