signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def postinit ( self , items = None , body = None , type_annotation = None ) : """Do some setup after initialisation . : param items : The pairs of context managers and the names they are assigned to . : type items : list ( tuple ( NodeNG , AssignName or None ) ) or None : param body : The contents of the ` ` with ` ` block . : type body : list ( NodeNG ) or None"""
self . items = items self . body = body self . type_annotation = type_annotation
def get_jids ( ) : '''Return a list of all job ids'''
serv = _get_serv ( ret = None ) sql = "select distinct(jid) from jids group by load" # [ { u ' points ' : [ [ 0 , jid , load ] , # [0 , jid , load ] ] , # u ' name ' : u ' jids ' , # u ' columns ' : [ u ' time ' , u ' distinct ' , u ' load ' ] } ] data = serv . query ( sql ) ret = { } if data : for _ , jid , load in data [ 0 ] [ 'points' ] : ret [ jid ] = salt . utils . jid . format_jid_instance ( jid , salt . utils . json . loads ( load ) ) return ret
def set_last_component_continued ( self ) : # type : ( ) - > None '''Set the previous component of this SL record to continued . Parameters : None . Returns : Nothing .'''
if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'SL record not yet initialized!' ) if not self . symlink_components : raise pycdlibexception . PyCdlibInternalError ( 'Trying to set continued on a non-existent component!' ) self . symlink_components [ - 1 ] . set_continued ( )
def _CreateFolder ( self , parent , name , visible = True , description = None ) : """Create a KML Folder element . Args : parent : The parent ElementTree . Element instance . name : The folder name as a string . visible : Whether the folder is initially visible or not . description : A description string or None . Returns : The folder ElementTree . Element instance ."""
folder = ET . SubElement ( parent , 'Folder' ) name_tag = ET . SubElement ( folder , 'name' ) name_tag . text = name if description is not None : desc_tag = ET . SubElement ( folder , 'description' ) desc_tag . text = description if not visible : visibility = ET . SubElement ( folder , 'visibility' ) visibility . text = '0' return folder
def _connection_parameters ( self ) : """Return connection parameters for a pika connection . : rtype : pika . ConnectionParameters"""
return pika . ConnectionParameters ( self . config . get ( 'host' , 'localhost' ) , self . config . get ( 'port' , 5672 ) , self . config . get ( 'vhost' , '/' ) , pika . PlainCredentials ( self . config . get ( 'user' , 'guest' ) , self . config . get ( 'password' , self . config . get ( 'pass' , 'guest' ) ) ) , ssl = self . config . get ( 'ssl' , False ) , frame_max = self . config . get ( 'frame_max' , spec . FRAME_MAX_SIZE ) , socket_timeout = self . config . get ( 'socket_timeout' , 10 ) , heartbeat_interval = self . config . get ( 'heartbeat_interval' , self . HB_INTERVAL ) )
def _get_hover_data ( self , data , element ) : """Initializes hover data based on Element dimension values ."""
if 'hover' not in self . handles or self . static_source : return for k , v in self . overlay_dims . items ( ) : dim = util . dimension_sanitizer ( k . name ) if dim not in data : data [ dim ] = [ v for _ in range ( len ( list ( data . values ( ) ) [ 0 ] ) ) ]
def create_payload ( self ) : """Wrap submitted data within an extra dict . For more information , see ` Bugzilla # 1151220 < https : / / bugzilla . redhat . com / show _ bug . cgi ? id = 1151220 > ` _ ."""
payload = super ( ConfigTemplate , self ) . create_payload ( ) if 'template_combinations' in payload : payload [ 'template_combinations_attributes' ] = payload . pop ( 'template_combinations' ) return { u'config_template' : payload }
def deltet ( epoch , eptype ) : """Return the value of Delta ET ( ET - UTC ) for an input epoch . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / deltet _ c . html : param epoch : Input epoch ( seconds past J2000 ) . : type epoch : float : param eptype : Type of input epoch ( " UTC " or " ET " ) . : type eptype : str : return : Delta ET ( ET - UTC ) at input epoch . : rtype : float"""
epoch = ctypes . c_double ( epoch ) eptype = stypes . stringToCharP ( eptype ) delta = ctypes . c_double ( ) libspice . deltet_c ( epoch , eptype , ctypes . byref ( delta ) ) return delta . value
def _clean_html ( html ) : """Removes links ( ` ` < a href = " . . . " > . . . < / a > ` ` ) from the provided HTML input . Further , it replaces " & # x000A ; " with ` ` \n ` ` and removes " ¶ " from the texts ."""
content = html . replace ( u'&#x000A;' , u'\n' ) . replace ( u'¶' , '' ) content = _LINK_PATTERN . sub ( u'' , content ) content = _HTML_TAG_PATTERN . sub ( u'' , content ) content = _BACKSLASH_PATTERN . sub ( u'\n' , content ) return content
def _render_content ( self , content , ** settings ) : """Perform widget rendering , but do not print anything ."""
if not self . SETTING_WIDTH in settings or not settings [ self . SETTING_WIDTH ] : settings [ self . SETTING_WIDTH ] = TERMINAL_WIDTH s = { k : settings [ k ] for k in ( self . SETTING_WIDTH , self . SETTING_FLAG_BORDER , self . SETTING_MARGIN , self . SETTING_MARGIN_LEFT , self . SETTING_MARGIN_RIGHT , self . SETTING_PADDING , self . SETTING_PADDING_LEFT , self . SETTING_PADDING_RIGHT ) } width_int = self . calculate_width_content ( ** s ) lines = self . _wrap_content ( content , width_int ) result = [ ] if settings [ self . SETTING_FLAG_BORDER ] : result . append ( self . _render_border_line ( 't' , settings ) ) if settings [ self . SETTING_FLAG_HEADER ] : s = { k : settings [ k ] for k in settings . keys ( ) } s [ self . SETTING_TEXT_FORMATING ] = s [ self . SETTING_HEADER_FORMATING ] result . append ( self . _render_line ( settings [ self . SETTING_HEADER_CONTENT ] , s ) ) result . append ( self . _render_border_line ( 'm' , settings ) ) for l in lines : result . append ( self . _render_line ( l , settings ) ) if settings [ self . SETTING_FLAG_BORDER ] : result . append ( self . _render_border_line ( 'b' , settings ) ) return result
def gpg_list_profile_keys ( blockchain_id , proxy = None , wallet_keys = None , config_dir = None ) : """List all GPG keys in a user profile : Return a list of { ' identifier ' : key ID , ' contentUrl ' : URL to the key data } on success Raise on error Return { ' error ' : . . . } on failure"""
config_dir = get_config_dir ( config_dir ) client_config_path = os . path . join ( config_dir , blockstack_client . CONFIG_FILENAME ) if proxy is None : proxy = blockstack_client . get_default_proxy ( config_path = client_config_path ) accounts = blockstack_client . list_accounts ( blockchain_id , proxy = proxy ) if 'error' in accounts : return accounts accounts = accounts . pop ( 'accounts' ) # extract ret = [ ] for account in accounts : if account [ 'service' ] != 'pgp' : continue info = { "identifier" : account [ 'identifier' ] , "contentUrl" : account [ 'contentUrl' ] } if 'keyName' in account . keys ( ) : info [ 'keyName' ] = account [ 'keyName' ] ret . append ( info ) return ret
def partition ( self , ref = None , ** kwargs ) : """Return a partition in this bundle for a vid reference or name parts"""
from ambry . orm . exc import NotFoundError from sqlalchemy . orm . exc import NoResultFound if not ref and not kwargs : return None if ref : for p in self . partitions : if ref == p . name or ref == p . vname or ref == p . vid or ref == p . id : p . _bundle = self return p raise NotFoundError ( "No partition found for '{}' (a)" . format ( ref ) ) elif kwargs : from . . identity import PartitionNameQuery pnq = PartitionNameQuery ( ** kwargs ) try : p = self . partitions . _find_orm ( pnq ) . one ( ) if p : p . _bundle = self return p except NoResultFound : raise NotFoundError ( "No partition found for '{}' (b)" . format ( kwargs ) )
def ports ( self ) : '''The list of all ports belonging to this component .'''
with self . _mutex : if not self . _ports : self . _ports = [ ports . parse_port ( port , self ) for port in self . _obj . get_ports ( ) ] return self . _ports
def residual_unit ( data , num_filter , stride , dim_match , name , bottle_neck = True , num_group = 32 , bn_mom = 0.9 , workspace = 256 , memonger = False ) : """Return ResNet Unit symbol for building ResNet Parameters data : str Input data num _ filter : int Number of output channels bnf : int Bottle neck channels factor with regard to num _ filter stride : tuple Stride used in convolution dim _ match : Boolean True means channel number between input and output is the same , otherwise means differ name : str Base name of the operators workspace : int Workspace used in convolution operator"""
if bottle_neck : # the same as https : / / github . com / facebook / fb . resnet . torch # notes , a bit difference with origin paper conv1 = mx . sym . Convolution ( data = data , num_filter = int ( num_filter * 0.5 ) , kernel = ( 1 , 1 ) , stride = ( 1 , 1 ) , pad = ( 0 , 0 ) , no_bias = True , workspace = workspace , name = name + '_conv1' ) bn1 = mx . sym . BatchNorm ( data = conv1 , fix_gamma = False , eps = 2e-5 , momentum = bn_mom , name = name + '_bn1' ) act1 = mx . sym . Activation ( data = bn1 , act_type = 'relu' , name = name + '_relu1' ) conv2 = mx . sym . Convolution ( data = act1 , num_filter = int ( num_filter * 0.5 ) , num_group = num_group , kernel = ( 3 , 3 ) , stride = stride , pad = ( 1 , 1 ) , no_bias = True , workspace = workspace , name = name + '_conv2' ) bn2 = mx . sym . BatchNorm ( data = conv2 , fix_gamma = False , eps = 2e-5 , momentum = bn_mom , name = name + '_bn2' ) act2 = mx . sym . Activation ( data = bn2 , act_type = 'relu' , name = name + '_relu2' ) conv3 = mx . sym . Convolution ( data = act2 , num_filter = num_filter , kernel = ( 1 , 1 ) , stride = ( 1 , 1 ) , pad = ( 0 , 0 ) , no_bias = True , workspace = workspace , name = name + '_conv3' ) bn3 = mx . sym . BatchNorm ( data = conv3 , fix_gamma = False , eps = 2e-5 , momentum = bn_mom , name = name + '_bn3' ) if dim_match : shortcut = data else : shortcut_conv = mx . sym . Convolution ( data = data , num_filter = num_filter , kernel = ( 1 , 1 ) , stride = stride , no_bias = True , workspace = workspace , name = name + '_sc' ) shortcut = mx . sym . BatchNorm ( data = shortcut_conv , fix_gamma = False , eps = 2e-5 , momentum = bn_mom , name = name + '_sc_bn' ) if memonger : shortcut . _set_attr ( mirror_stage = 'True' ) eltwise = bn3 + shortcut return mx . sym . Activation ( data = eltwise , act_type = 'relu' , name = name + '_relu' ) else : conv1 = mx . sym . Convolution ( data = data , num_filter = num_filter , kernel = ( 3 , 3 ) , stride = stride , pad = ( 1 , 1 ) , no_bias = True , workspace = workspace , name = name + '_conv1' ) bn1 = mx . sym . BatchNorm ( data = conv1 , fix_gamma = False , momentum = bn_mom , eps = 2e-5 , name = name + '_bn1' ) act1 = mx . sym . Activation ( data = bn1 , act_type = 'relu' , name = name + '_relu1' ) conv2 = mx . sym . Convolution ( data = act1 , num_filter = num_filter , kernel = ( 3 , 3 ) , stride = ( 1 , 1 ) , pad = ( 1 , 1 ) , no_bias = True , workspace = workspace , name = name + '_conv2' ) bn2 = mx . sym . BatchNorm ( data = conv2 , fix_gamma = False , momentum = bn_mom , eps = 2e-5 , name = name + '_bn2' ) if dim_match : shortcut = data else : shortcut_conv = mx . sym . Convolution ( data = data , num_filter = num_filter , kernel = ( 1 , 1 ) , stride = stride , no_bias = True , workspace = workspace , name = name + '_sc' ) shortcut = mx . sym . BatchNorm ( data = shortcut_conv , fix_gamma = False , eps = 2e-5 , momentum = bn_mom , name = name + '_sc_bn' ) if memonger : shortcut . _set_attr ( mirror_stage = 'True' ) eltwise = bn2 + shortcut return mx . sym . Activation ( data = eltwise , act_type = 'relu' , name = name + '_relu' )
def _get_esxdatacenter_proxy_details ( ) : '''Returns the running esxdatacenter ' s proxy details'''
det = __salt__ [ 'esxdatacenter.get_details' ] ( ) return det . get ( 'vcenter' ) , det . get ( 'username' ) , det . get ( 'password' ) , det . get ( 'protocol' ) , det . get ( 'port' ) , det . get ( 'mechanism' ) , det . get ( 'principal' ) , det . get ( 'domain' ) , det . get ( 'datacenter' )
def hash_file_contents ( requirements_option : RequirementsOptions , path : Path ) -> str : """Returns a SHA256 hash of the contents of ` ` path ` ` combined with the Arca version ."""
return hashlib . sha256 ( path . read_bytes ( ) + bytes ( requirements_option . name + arca . __version__ , "utf-8" ) ) . hexdigest ( )
def cn_occupation_energy ( self , delta_occupation = None ) : """The coordination - number dependent energy for this site . Args : delta _ occupation ( : obj : Dict ( Str : Int ) , optional ) : A dictionary of a change in ( site - type specific ) coordination number , e . g . { ' A ' : 1 , ' B ' : - 1 } . If this is not None , the coordination - number dependent energy is calculated including these changes in neighbour - site occupations . Defaults to None Returns : ( Float ) : The coordination - number dependent energy for this site ."""
nn_occupations = self . site_specific_nn_occupation ( ) if delta_occupation : for site in delta_occupation : assert ( site in nn_occupations ) nn_occupations [ site ] += delta_occupation [ site ] return sum ( [ self . cn_occupation_energies [ s ] [ n ] for s , n in nn_occupations . items ( ) ] )
def _dump_linestring ( obj , big_endian , meta ) : """Dump a GeoJSON - like ` dict ` to a linestring WKB string . Input parameters and output are similar to : func : ` _ dump _ point ` ."""
coords = obj [ 'coordinates' ] vertex = coords [ 0 ] # Infer the number of dimensions from the first vertex num_dims = len ( vertex ) wkb_string , byte_fmt , byte_order = _header_bytefmt_byteorder ( 'LineString' , num_dims , big_endian , meta ) # append number of vertices in linestring wkb_string += struct . pack ( '%sl' % byte_order , len ( coords ) ) for vertex in coords : wkb_string += struct . pack ( byte_fmt , * vertex ) return wkb_string
def total_amount ( qs ) -> Total : """Sums the amounts of the objects in the queryset , keeping each currency separate . : param qs : A querystring containing objects that have an amount field of type Money . : return : A Total object ."""
aggregate = qs . values ( 'amount_currency' ) . annotate ( sum = Sum ( 'amount' ) ) return Total ( Money ( amount = r [ 'sum' ] , currency = r [ 'amount_currency' ] ) for r in aggregate )
def _sprite ( map , sprite , offset_x = None , offset_y = None ) : """Returns the image and background position for use in a single shorthand property"""
map = StringValue ( map ) . value sprite_name = StringValue ( sprite ) . value sprite_map = sprite_maps . get ( map ) sprite = sprite_map and sprite_map . get ( sprite_name ) if not sprite_map : log . error ( "No sprite map found: %s" , map ) elif not sprite : log . error ( "No sprite found: %s in %s" , sprite_name , sprite_map [ '*n*' ] ) if sprite : url = '%s%s?_=%s' % ( ASSETS_URL , sprite_map [ '*f*' ] , sprite_map [ '*t*' ] ) x = NumberValue ( offset_x or 0 , 'px' ) y = NumberValue ( offset_y or 0 , 'px' ) if not x or ( x <= - 1 or x >= 1 ) and x . unit != '%' : x -= sprite [ 2 ] if not y or ( y <= - 1 or y >= 1 ) and y . unit != '%' : y -= sprite [ 3 ] pos = "url(%s) %s %s" % ( escape ( url ) , x , y ) return StringValue ( pos ) return StringValue ( '0 0' )
def get_closest_match ( self , motifs , dbmotifs = None , match = "partial" , metric = "wic" , combine = "mean" , parallel = True , ncpus = None ) : """Return best match in database for motifs . Parameters motifs : list or str Filename of motifs or list of motifs . dbmotifs : list or str , optional Database motifs , default will be used if not specified . match : str , optional metric : str , optional combine : str , optional ncpus : int , optional Number of threads to use . Returns closest _ match : dict"""
if dbmotifs is None : pwm = self . config . get_default_params ( ) [ "motif_db" ] pwmdir = self . config . get_motif_dir ( ) dbmotifs = os . path . join ( pwmdir , pwm ) motifs = parse_motifs ( motifs ) dbmotifs = parse_motifs ( dbmotifs ) dbmotif_lookup = dict ( [ ( m . id , m ) for m in dbmotifs ] ) scores = self . get_all_scores ( motifs , dbmotifs , match , metric , combine , parallel = parallel , ncpus = ncpus ) for motif in scores : scores [ motif ] = sorted ( scores [ motif ] . items ( ) , key = lambda x : x [ 1 ] [ 0 ] ) [ - 1 ] for motif in motifs : dbmotif , score = scores [ motif . id ] pval , pos , orient = self . compare_motifs ( motif , dbmotif_lookup [ dbmotif ] , match , metric , combine , True ) scores [ motif . id ] = [ dbmotif , ( list ( score ) + [ pval ] ) ] return scores
def run ( self ) : """Main entrypoint method . Returns new _ nodes : ` list ` Nodes to add to the doctree ."""
if getLogger is not None : # Sphinx 1.6 + logger = getLogger ( __name__ ) else : # Previously Sphinx ' s app was also the logger logger = self . state . document . settings . env . app env = self . state . document . settings . env new_nodes = [ ] # Get skip list skipped_modules = self . _parse_skip_option ( ) # List of homepage documents for each module module_index_files = [ ] # Collect paths with the form ` modules / < module - name > / index ` for docname in _filter_index_pages ( env . found_docs , 'modules' ) : logger . debug ( 'module-toctree found %s' , docname ) if self . _parse_module_name ( docname ) in skipped_modules : logger . debug ( 'module-toctree skipped %s' , docname ) continue module_index_files . append ( docname ) module_index_files . sort ( ) entries = [ ( None , docname ) for docname in module_index_files ] logger . debug ( 'module-toctree found %d modules' , len ( module_index_files ) ) # Add the toctree ' s node itself subnode = _build_toctree_node ( parent = env . docname , entries = entries , includefiles = module_index_files , caption = None ) set_source_info ( self , subnode ) # Sphinx TocTree does this . wrappernode = docutils . nodes . compound ( classes = [ 'toctree-wrapper' , 'module-toctree' ] ) wrappernode . append ( subnode ) self . add_name ( wrappernode ) new_nodes . append ( wrappernode ) return new_nodes
def export_users ( self , body ) : """Export all users to a file using a long running job . Check job status with get ( ) . URL pointing to the export file will be included in the status once the job is complete . Args : body ( dict ) : Please see : https : / / auth0 . com / docs / api / management / v2 # ! / Jobs / post _ users _ exports"""
return self . client . post ( self . _url ( 'users-exports' ) , data = body )
def read_files ( * sources , ** kwds ) : """Construct a generator that yields file instances . : param sources : One or more strings representing path to file ( s ) ."""
filenames = _generate_filenames ( sources ) filehandles = _generate_handles ( filenames ) for fh , source in filehandles : try : f = mwtab . MWTabFile ( source ) f . read ( fh ) if kwds . get ( 'validate' ) : validator . validate_file ( mwtabfile = f , section_schema_mapping = mwschema . section_schema_mapping , validate_samples = True , validate_factors = True ) yield f if VERBOSE : print ( "Processed file: {}" . format ( os . path . abspath ( source ) ) ) except Exception as e : if VERBOSE : print ( "Error processing file: " , os . path . abspath ( source ) , "\nReason:" , e ) pass
def _alerter_thread_func ( self ) -> None : """Prints alerts and updates the prompt any time the prompt is showing"""
self . _alert_count = 0 self . _next_alert_time = 0 while not self . _stop_thread : # Always acquire terminal _ lock before printing alerts or updating the prompt # To keep the app responsive , do not block on this call if self . terminal_lock . acquire ( blocking = False ) : # Get any alerts that need to be printed alert_str = self . _generate_alert_str ( ) # Generate a new prompt new_prompt = self . _generate_colored_prompt ( ) # Check if we have alerts to print if alert_str : # new _ prompt is an optional parameter to async _ alert ( ) self . async_alert ( alert_str , new_prompt ) new_title = "Alerts Printed: {}" . format ( self . _alert_count ) self . set_window_title ( new_title ) # No alerts needed to be printed , check if the prompt changed elif new_prompt != self . prompt : self . async_update_prompt ( new_prompt ) # Don ' t forget to release the lock self . terminal_lock . release ( ) time . sleep ( 0.5 )
def execute_command ( self ) : """The generate command uses ` Jinja2 < http : / / jinja . pocoo . org / > ` _ templates to create Python scripts , according to the specification in the configuration file . The predefined templates use the extract _ content ( ) method of the : ref : ` selector classes < implementation - selectors > ` to implement linear extractors and use recursive for loops to implement multiple levels of link crawlers . This implementation is effectively a representation of the traverse _ next ( ) : ref : ` utility function < implementation - utils > ` , using the loop depth to differentiate between levels of the crawler execution . According to the - - output _ type argument in the CLI input , the results are written into a JSON document or a CSV document . The Python script is written into < output _ filename > . py - running this file is the equivalent of using the Scrapple : ref : ` run command < command - run > ` ."""
print ( Back . GREEN + Fore . BLACK + "Scrapple Generate" ) print ( Back . RESET + Fore . RESET ) directory = os . path . join ( scrapple . __path__ [ 0 ] , 'templates' , 'scripts' ) with open ( os . path . join ( directory , 'generate.txt' ) , 'r' ) as f : template_content = f . read ( ) template = Template ( template_content ) try : with open ( self . args [ '<projectname>' ] + '.json' , 'r' ) as f : config = json . load ( f ) if self . args [ '--output_type' ] == 'csv' : from scrapple . utils . config import extract_fieldnames config [ 'fields' ] = str ( extract_fieldnames ( config ) ) config [ 'output_file' ] = self . args [ '<output_filename>' ] config [ 'output_type' ] = self . args [ '--output_type' ] rendered = template . render ( config = config ) with open ( self . args [ '<output_filename>' ] + '.py' , 'w' ) as f : f . write ( rendered ) print ( Back . WHITE + Fore . RED + self . args [ '<output_filename>' ] , ".py has been created" + Back . RESET + Fore . RESET , sep = "" ) except IOError : print ( Back . WHITE + Fore . RED + self . args [ '<projectname>' ] , ".json does not " , "exist. Use ``scrapple genconfig``." + Back . RESET + Fore . RESET , sep = "" )
def remove ( self , key , preserve_data = False ) : """: param key : Document unique identifier . Remove the document from the search index ."""
if self . members . remove ( key ) != 1 : raise KeyError ( 'Document with key "%s" not found.' % key ) document_hash = self . _get_hash ( key ) content = decode ( document_hash [ 'content' ] ) if not preserve_data : document_hash . clear ( ) for word in self . tokenizer . tokenize ( content ) : word_key = self . get_key ( word ) del word_key [ key ] if len ( word_key ) == 0 : word_key . clear ( )
def system_find_databases ( input_params = { } , always_retry = True , ** kwargs ) : """Invokes the / system / findDatabases API method . For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Search # API - method % 3A - % 2Fsystem % 2FfindDatabases"""
return DXHTTPRequest ( '/system/findDatabases' , input_params , always_retry = always_retry , ** kwargs )
def unwrapArray ( a , recursive = True , readH5pyDataset = True ) : """This function takes an object ( like a dictionary ) and recursively unwraps it solving issues like : * the fact that many objects are packaged as 0d array This funciton has also some specific hack for handling h5py limits : * handle the None python object * numpy unicode . . ."""
try : # # # take care of hdf5 groups if isinstance ( a , h5py . Group ) : # take care of special flags first if isinstance ( a , h5py . Group ) and ( ( "IS_LIST" in a . attrs ) or ( "IS_LIST_OF_ARRAYS" in a . attrs ) ) : items = list ( a . keys ( ) ) items . sort ( ) a = [ unwrapArray ( a [ item ] , readH5pyDataset = readH5pyDataset ) for item in items ] # # # take care of hdf5 datasets elif isinstance ( a , h5py . Dataset ) : # read if asked so or if dummy array # WARNING : a . value and a [ . . . ] do not return the # same thing . . . # a [ . . . ] returns ndarray if a is a string # a . value returns a str ( py3 ) or unicode ( py2) if readH5pyDataset or a . shape == ( ) : a = a . value # special None flag # not array needed for FutureWarning : elementwise comparison failed ; . . . if not isinstance ( a , np . ndarray ) and a == "NONE_PYTHON_OBJECT" : a = None # clean up non - hdf5 specific if isinstance ( a , np . ndarray ) and a . ndim == 0 : a = a . item ( ) # convert to str ( for example h5py can ' t save numpy unicode ) if isinstance ( a , np . ndarray ) and a . dtype . char == "S" : a = a . astype ( str ) if recursive : if "items" in dir ( a ) : # dict , h5py groups , npz file a = dict ( a ) # convert to dict , otherwise can ' t asssign values for key , value in a . items ( ) : a [ key ] = unwrapArray ( value , readH5pyDataset = readH5pyDataset ) elif isinstance ( a , ( list , tuple ) ) : a = [ unwrapArray ( element , readH5pyDataset = readH5pyDataset ) for element in a ] else : pass except Exception as e : log . warning ( "Could not handle %s, error was: %s" % ( a , str ( e ) ) ) return a
def _ReadCompressedData ( self , read_size ) : """Reads compressed data from the file - like object . Args : read _ size ( int ) : number of bytes of compressed data to read . Returns : int : number of bytes of compressed data read ."""
compressed_data = self . _file_object . read ( read_size ) read_count = len ( compressed_data ) self . _compressed_data = b'' . join ( [ self . _compressed_data , compressed_data ] ) self . _uncompressed_data , self . _compressed_data = ( self . _decompressor . Decompress ( self . _compressed_data ) ) self . _uncompressed_data_size = len ( self . _uncompressed_data ) return read_count
def caesar_app ( parser , cmd , args ) : # pragma : no cover """Caesar crypt a value with a key ."""
parser . add_argument ( 'shift' , type = int , help = 'the shift to apply' ) parser . add_argument ( 'value' , help = 'the value to caesar crypt, read from stdin if omitted' , nargs = '?' ) parser . add_argument ( '-s' , '--shift-range' , dest = 'shift_ranges' , action = 'append' , help = 'specify a character range to shift (defaults to a-z, A-Z)' ) args = parser . parse_args ( args ) if not args . shift_ranges : args . shift_ranges = [ 'az' , 'AZ' ] return caesar ( args . shift , pwnypack . main . string_value_or_stdin ( args . value ) , args . shift_ranges )
def group_protocols ( self ) : """Returns list of preferred ( protocols , metadata )"""
if self . _subscription . subscription is None : raise Errors . IllegalStateError ( 'Consumer has not subscribed to topics' ) # dpkp note : I really dislike this . # why ? because we are using this strange method group _ protocols , # which is seemingly innocuous , to set internal state ( _ joined _ subscription ) # that is later used to check whether metadata has changed since we joined a group # but there is no guarantee that this method , group _ protocols , will get called # in the correct sequence or that it will only be called when we want it to be . # So this really should be moved elsewhere , but I don ' t have the energy to # work that out right now . If you read this at some later date after the mutable # state has bitten you . . . I ' m sorry ! It mimics the java client , and that ' s the # best I ' ve got for now . self . _joined_subscription = set ( self . _subscription . subscription ) metadata_list = [ ] for assignor in self . config [ 'assignors' ] : metadata = assignor . metadata ( self . _joined_subscription ) group_protocol = ( assignor . name , metadata ) metadata_list . append ( group_protocol ) return metadata_list
def p_delays_intnumber ( self , p ) : 'delays : DELAY intnumber'
p [ 0 ] = DelayStatement ( IntConst ( p [ 2 ] , lineno = p . lineno ( 1 ) ) , lineno = p . lineno ( 1 ) ) p . set_lineno ( 0 , p . lineno ( 1 ) )
def get_column_flat ( self , field , components = None , computed_type = 'for_observations' ) : """TODO : add documentation return a single merged value ( hstacked ) from all meshes : parameter str field : name of the mesh columnname : parameter components :"""
return self . pack_column_flat ( self . get_column ( field , components , computed_type ) , components , offset = field == 'triangles' )
def make_response ( self , rv , status = 200 , headers = None , mime = 'application/json' ) : """Create a response object using the : class : ` flask . Response ` class . : param rv : Response value . If the value is not an instance of : class : ` werkzeug . wrappers . Response ` it will be converted into a Response object . : param status : specify the HTTP status code for this response . : param mime : Specify the mimetype for this request . : param headers : Specify dict of headers for the response ."""
if not isinstance ( rv , Response ) : resp = Response ( response = rv , headers = headers , mimetype = mime , status = status ) else : resp = rv return resp
def has_shared ( arg , shared ) : """Verifica se ci sono shared ."""
try : if isinstance ( shared , list ) : shared_arguments = shared else : shared_arguments = shared . __shared_arguments__ for idx , ( args , kwargs ) in enumerate ( shared_arguments ) : arg_name = kwargs . get ( 'dest' , args [ - 1 ] . lstrip ( '-' ) . replace ( '-' , '_' ) ) if arg_name == arg : return idx idx = False except ( ValueError , AttributeError ) : idx = False return idx
def parse_delta ( filename ) : """Returns ( alignment length , similarity errors ) tuple from passed . delta . - filename - path to the input . delta file Extracts the aligned length and number of similarity errors for each aligned uniquely - matched region , and returns the cumulative total for each as a tuple ."""
aln_length , sim_errors = 0 , 0 for line in [ l . strip ( ) . split ( ) for l in open ( filename , "r" ) . readlines ( ) ] : if line [ 0 ] == "NUCMER" or line [ 0 ] . startswith ( ">" ) : # Skip headers continue # We only process lines with seven columns : if len ( line ) == 7 : aln_length += abs ( int ( line [ 1 ] ) - int ( line [ 0 ] ) ) sim_errors += int ( line [ 4 ] ) return aln_length , sim_errors
def _split_diff ( merge_result , context_lines = 3 ) : """Split diffs and context lines into groups based on None sentinel"""
collect = [ ] for item in _visible_in_diff ( merge_result , context_lines = context_lines ) : if item is None : if collect : yield collect collect = [ ] else : collect . append ( item )
def construct_surface ( direction , * args , ** kwargs ) : """Generates surfaces from curves . Arguments : * ` ` args ` ` : a list of curve instances Keyword Arguments ( optional ) : * ` ` degree ` ` : degree of the 2nd parametric direction * ` ` knotvector ` ` : knot vector of the 2nd parametric direction * ` ` rational ` ` : flag to generate rational surfaces : param direction : the direction that the input curves lies , i . e . u or v : type direction : str : return : Surface constructed from the curves on the given parametric direction"""
# Input validation possible_dirs = [ 'u' , 'v' ] if direction not in possible_dirs : raise GeomdlException ( "Possible direction values: " + ", " . join ( [ val for val in possible_dirs ] ) , data = dict ( input_dir = direction ) ) size_other = len ( args ) if size_other < 2 : raise GeomdlException ( "You need to input at least 2 curves" ) # Get keyword arguments degree_other = kwargs . get ( 'degree' , 2 ) knotvector_other = kwargs . get ( 'knotvector' , knotvector . generate ( degree_other , size_other ) ) rational = kwargs . get ( 'rational' , args [ 0 ] . rational ) # Construct the control points of the new surface degree = args [ 0 ] . degree num_ctrlpts = args [ 0 ] . ctrlpts_size new_ctrlpts = [ ] new_weights = [ ] for idx , arg in enumerate ( args ) : if degree != arg . degree : raise GeomdlException ( "Input curves must have the same degrees" , data = dict ( idx = idx , degree = degree , degree_arg = arg . degree ) ) if num_ctrlpts != arg . ctrlpts_size : raise GeomdlException ( "Input curves must have the same number of control points" , data = dict ( idx = idx , size = num_ctrlpts , size_arg = arg . ctrlpts_size ) ) new_ctrlpts += list ( arg . ctrlpts ) if rational : if arg . weights is None : raise GeomdlException ( "Expecting a rational curve" , data = dict ( idx = idx , rational = rational , rational_arg = arg . rational ) ) new_weights += list ( arg . weights ) # Set variables w . r . t . input direction if direction == 'u' : degree_u = degree_other degree_v = degree knotvector_u = knotvector_other knotvector_v = args [ 0 ] . knotvector size_u = size_other size_v = num_ctrlpts else : degree_u = degree degree_v = degree_other knotvector_u = args [ 0 ] . knotvector knotvector_v = knotvector_other size_u = num_ctrlpts size_v = size_other if rational : ctrlptsw = compatibility . combine_ctrlpts_weights ( new_ctrlpts , new_weights ) ctrlptsw = compatibility . flip_ctrlpts_u ( ctrlptsw , size_u , size_v ) new_ctrlpts , new_weights = compatibility . separate_ctrlpts_weights ( ctrlptsw ) else : new_ctrlpts = compatibility . flip_ctrlpts_u ( new_ctrlpts , size_u , size_v ) # Generate the surface ns = shortcuts . generate_surface ( rational ) ns . degree_u = degree_u ns . degree_v = degree_v ns . ctrlpts_size_u = size_u ns . ctrlpts_size_v = size_v ns . ctrlpts = new_ctrlpts if rational : ns . weights = new_weights ns . knotvector_u = knotvector_u ns . knotvector_v = knotvector_v # Return constructed surface return ns
def plot ( self , win = None , newfig = True , figsize = None , orientation = 'hor' , topfigfrac = 0.8 ) : """Plot layout Parameters win : list or tuple [ x1 , x2 , y1 , y2]"""
if newfig : plt . figure ( figsize = figsize ) ax1 = None ax2 = None if orientation == 'both' : ax1 = plt . axes ( [ 0.125 , 0.18 + ( 1 - topfigfrac ) * 0.7 , ( 0.9 - 0.125 ) , topfigfrac * 0.7 ] ) ax2 = plt . axes ( [ 0.125 , 0.11 , ( 0.9 - 0.125 ) , ( 1 - topfigfrac ) * 0.7 ] , sharex = ax1 ) elif orientation [ : 3 ] == 'hor' : ax1 = plt . subplot ( ) elif orientation [ : 3 ] == 'ver' : ax2 = plt . subplot ( ) else : if orientation == 'both' : fig = plt . gcf ( ) ax1 = fig . axes [ 0 ] ax2 = fig . axes [ 1 ] elif orientation [ : 3 ] == 'hor' : fig = plt . gcf ( ) ax1 = fig . axes [ 0 ] ax2 = None elif orientation [ : 3 ] == 'ver' : fig = plt . gcf ( ) ax1 = None ax2 = fig . axes [ 0 ] if ax1 is not None : plt . sca ( ax1 ) for e in self . elementlist : e . plot ( ) if orientation [ : 3 ] == 'hor' : plt . axis ( 'scaled' ) elif orientation == 'both' : plt . axis ( 'equal' ) # cannot be ' scaled ' when sharing axes if win is not None : plt . axis ( win ) if ax2 is not None : plt . sca ( ax2 ) for i in range ( self . aq . nlayers ) : if self . aq . ltype [ i ] == 'l' : plt . axhspan ( ymin = self . aq . z [ i + 1 ] , ymax = self . aq . z [ i ] , color = [ 0.8 , 0.8 , 0.8 ] ) for i in range ( 1 , self . aq . nlayers ) : if self . aq . ltype [ i ] == 'a' and self . aq . ltype [ i - 1 ] == 'a' : plt . axhspan ( ymin = self . aq . z [ i ] , ymax = self . aq . z [ i ] , color = [ 0.8 , 0.8 , 0.8 ] )
def load_parameters ( self , source ) : """For YML , the source it the file path"""
with open ( source ) as parameters_source : loaded = yaml . safe_load ( parameters_source . read ( ) ) for k , v in loaded . items ( ) : if isinstance ( v , str ) : loaded [ k ] = "'" + v + "'" return loaded
def AddStopObject ( self , stop , problem_reporter = None ) : """Add Stop object to this schedule if stop _ id is non - blank ."""
assert stop . _schedule is None if not problem_reporter : problem_reporter = self . problem_reporter if not stop . stop_id : return if stop . stop_id in self . stops : problem_reporter . DuplicateID ( 'stop_id' , stop . stop_id ) return stop . _schedule = weakref . proxy ( self ) self . AddTableColumns ( 'stops' , stop . _ColumnNames ( ) ) self . stops [ stop . stop_id ] = stop if hasattr ( stop , 'zone_id' ) and stop . zone_id : self . fare_zones [ stop . zone_id ] = True
def main ( self ) -> None : """Main entry point . Runs : func : ` service ` ."""
# Actual main service code . try : self . service ( ) except Exception as e : self . error ( "Unexpected exception: {e}\n{t}" . format ( e = e , t = traceback . format_exc ( ) ) )
def get_identities ( self , item ) : """Return the identities from an item"""
# All identities are in the post stream # The first post is the question . Next replies posts = item [ 'data' ] [ 'post_stream' ] [ 'posts' ] for post in posts : user = self . get_sh_identity ( post ) yield user
def bind ( self , context ) -> None : """Bind a context to this computation . The context allows the computation to convert object specifiers to actual objects ."""
# make a computation context based on the enclosing context . self . __computation_context = ComputationContext ( self , context ) # re - bind is not valid . be careful to set the computation after the data item is already in document . for variable in self . variables : assert variable . bound_item is None for result in self . results : assert result . bound_item is None # bind the variables for variable in self . variables : self . __bind_variable ( variable ) # bind the results for result in self . results : self . __bind_result ( result )
def environment_list ( self , io_handler ) : """Lists the framework process environment variables"""
# Head of the table headers = ( "Environment Variable" , "Value" ) # Lines lines = [ item for item in os . environ . items ( ) ] # Sort lines lines . sort ( ) # Print the table io_handler . write ( self . _utils . make_table ( headers , lines ) )
def see_tooltip ( step , tooltip ) : """Press a button having a given tooltip ."""
elem = world . browser . find_elements_by_xpath ( str ( '//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' % dict ( tooltip = tooltip ) ) ) elem = [ e for e in elem if e . is_displayed ( ) ] assert_true ( step , elem )
def flagants ( self , threshold = 50 ) : """Flags solutions with amplitude more than threshold larger than median ."""
# identify very low gain amps not already flagged badsols = n . where ( ( n . median ( self . amp ) / self . amp > threshold ) & ( self . flagged == False ) ) [ 0 ] if len ( badsols ) : self . logger . info ( 'Solutions %s flagged (times %s, ants %s, freqs %s) for low gain amplitude.' % ( str ( badsols ) , self . mjd [ badsols ] , self . antname [ badsols ] , self . ifid [ badsols ] ) ) for sol in badsols : self . flagged [ sol ] = True
def get ( self , deviceId ) : """lists all known active measurements ."""
measurementsByName = self . measurements . get ( deviceId ) if measurementsByName is None : return [ ] else : return list ( measurementsByName . values ( ) )
def theme ( self , text ) : '''Theme style .'''
return self . theme_color + self . BRIGHT + text + self . RESET
def _interpretPayload ( functioncode , payload ) : r"""Generate a human readable description of a Modbus payload . Args : * functioncode ( int ) : Function code * payload ( str ) : The payload that should be interpreted . It should be a byte string . Returns : A descriptive string . For example , the payload ` ` ' \ x10 \ x01 \ x00 \ x01 ' ` ` for functioncode 3 should give something like : : TODO : Update"""
raise NotImplementedError ( ) output = '' output += 'Modbus payload decoder\n' output += 'Input payload (length {} characters): {!r} \n' . format ( len ( payload ) , payload ) output += 'Function code: {} (dec).\n' . format ( functioncode ) if len ( payload ) == 4 : FourbyteMessageFirstHalfValue = _twoByteStringToNum ( payload [ 0 : 2 ] ) FourbyteMessageSecondHalfValue = _twoByteStringToNum ( payload [ 2 : 4 ] ) return output
def _autofill_spec_record ( record ) : """Returns an astropy table with columns auto - filled from FITS header Parameters record : astropy . io . fits . table . table . Row The spectrum table row to scrape Returns record : astropy . io . fits . table . table . Row The spectrum table row with possible new rows inserted"""
try : record [ 'filename' ] = os . path . basename ( record [ 'spectrum' ] ) if record [ 'spectrum' ] . endswith ( '.fits' ) : header = pf . getheader ( record [ 'spectrum' ] ) # Wavelength units if not record [ 'wavelength_units' ] : try : record [ 'wavelength_units' ] = header [ 'XUNITS' ] except KeyError : try : if header [ 'BUNIT' ] : record [ 'wavelength_units' ] = 'um' except KeyError : pass if 'microns' in record [ 'wavelength_units' ] or 'Microns' in record [ 'wavelength_units' ] or 'um' in record [ 'wavelength_units' ] : record [ 'wavelength_units' ] = 'um' # Flux units if not record [ 'flux_units' ] : try : record [ 'flux_units' ] = header [ 'YUNITS' ] . replace ( ' ' , '' ) except KeyError : try : record [ 'flux_units' ] = header [ 'BUNIT' ] . replace ( ' ' , '' ) except KeyError : pass if 'erg' in record [ 'flux_units' ] and 'A' in record [ 'flux_units' ] : record [ 'flux_units' ] = 'ergs-1cm-2A-1' if 'erg' in record [ 'flux_units' ] and 'A' in record [ 'flux_units' ] else 'ergs-1cm-2um-1' if 'erg' in record [ 'flux_units' ] and 'um' in record [ 'flux_units' ] else 'Wm-2um-1' if 'W' in record [ 'flux_units' ] and 'um' in record [ 'flux_units' ] else 'Wm-2A-1' if 'W' in record [ 'flux_units' ] and 'A' in record [ 'flux_units' ] else '' # Observation date if not record [ 'obs_date' ] : try : record [ 'obs_date' ] = header [ 'DATE_OBS' ] except KeyError : try : record [ 'obs_date' ] = header [ 'DATE-OBS' ] except KeyError : try : record [ 'obs_date' ] = header [ 'DATE' ] except KeyError : pass # Telescope id if not record [ 'telescope_id' ] : try : n = header [ 'TELESCOP' ] . lower ( ) if isinstance ( header [ 'TELESCOP' ] , str ) else '' record [ 'telescope_id' ] = 5 if 'hst' in n else 6 if 'spitzer' in n else 7 if 'irtf' in n else 9 if 'keck' in n and 'ii' in n else 8 if 'keck' in n and 'i' in n else 10 if 'kp' in n and '4' in n else 11 if 'kp' in n and '2' in n else 12 if 'bok' in n else 13 if 'mmt' in n else 14 if 'ctio' in n and '1' in n else 15 if 'ctio' in n and '4' in n else 16 if 'gemini' in n and 'north' in n else 17 if 'gemini' in n and 'south' in n else 18 if ( 'vlt' in n and 'U2' in n ) else 19 if '3.5m' in n else 20 if 'subaru' in n else 21 if ( 'mag' in n and 'ii' in n ) or ( 'clay' in n ) else 22 if ( 'mag' in n and 'i' in n ) or ( 'baade' in n ) else 23 if ( 'eso' in n and '1m' in n ) else 24 if 'cfht' in n else 25 if 'ntt' in n else 26 if ( 'palomar' in n and '200-inch' in n ) else 27 if 'pan-starrs' in n else 28 if ( 'palomar' in n and '60-inch' in n ) else 29 if ( 'ctio' in n and '0.9m' in n ) else 30 if 'soar' in n else 31 if ( 'vlt' in n and 'U3' in n ) else 32 if ( 'vlt' in n and 'U4' in n ) else 33 if 'gtc' in n else None except KeyError : pass # Instrument id if not record [ 'instrument_id' ] : try : i = header [ 'INSTRUME' ] . lower ( ) record [ 'instrument_id' ] = 1 if 'r-c spec' in i or 'test' in i or 'nod' in i else 2 if 'gmos-n' in i else 3 if 'gmos-s' in i else 4 if 'fors' in i else 5 if 'lris' in i else 6 if 'spex' in i else 7 if 'ldss3' in i else 8 if 'focas' in i else 9 if 'nirspec' in i else 10 if 'irs' in i else 11 if 'fire' in i else 12 if 'mage' in i else 13 if 'goldcam' in i else 14 if 'sinfoni' in i else 15 if 'osiris' in i else 16 if 'triplespec' in i else 17 if 'x-shooter' in i else 18 if 'gnirs' in i else 19 if 'wircam' in i else 20 if 'cormass' in i else 21 if 'isaac' in i else 22 if 'irac' in i else 23 if 'dis' in i else 24 if 'susi2' in i else 25 if 'ircs' in i else 26 if 'nirc' in i else 29 if 'stis' in i else 0 except KeyError : pass except : pass return record
def _api_config ( self ) : """Glances API RESTful implementation . Return the JSON representation of the Glances configuration file HTTP / 200 if OK HTTP / 404 if others error"""
response . content_type = 'application/json; charset=utf-8' try : # Get the JSON value of the config ' dict args_json = json . dumps ( self . config . as_dict ( ) ) except Exception as e : abort ( 404 , "Cannot get config (%s)" % str ( e ) ) return args_json
def Rzderiv ( self , R , Z , phi = 0. , t = 0. ) : """NAME : Rzderiv PURPOSE : evaluate the mixed R , z derivative INPUT : R - Galactocentric radius ( can be Quantity ) Z - vertical height ( can be Quantity ) phi - Galactocentric azimuth ( can be Quantity ) t - time ( can be Quantity ) OUTPUT : d2phi / dz / dR HISTORY : 2013-08-26 - Written - Bovy ( IAS )"""
try : return self . _amp * self . _Rzderiv ( R , Z , phi = phi , t = t ) except AttributeError : # pragma : no cover raise PotentialError ( "'_Rzderiv' function not implemented for this potential" )
def query ( self , coords , ** kwargs ) : """Returns E ( B - V ) ( or a different Planck dust inference , depending on how the class was intialized ) at the specified location ( s ) on the sky . Args : coords ( : obj : ` astropy . coordinates . SkyCoord ` ) : The coordinates to query . Returns : A float array of the selected Planck component , at the given coordinates . The shape of the output is the same as the shape of the coordinates stored by ` ` coords ` ` . If extragalactic E ( B - V ) , tau _ 353 or radiance was chosen , then the output has units of magnitudes of E ( B - V ) . If the selected Planck component is temperature ( or temperature error ) , then an : obj : ` astropy . Quantity ` is returned , with units of Kelvin . If beta ( or beta error ) was chosen , then the output is unitless ."""
return self . _scale * super ( PlanckQuery , self ) . query ( coords , ** kwargs )
def find ( self , text ) : """Print all substitutions that include the given text string ."""
for key , value in self : if ( text in key ) or ( text in value ) : print ( key , value )
def star ( args ) : """% prog star folder reference Run star on a folder with reads ."""
p = OptionParser ( star . __doc__ ) p . add_option ( "--single" , default = False , action = "store_true" , help = "Single end mapping" ) p . set_fastq_names ( ) p . set_cpus ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) folder , reference = args cpus = opts . cpus mm = MakeManager ( ) num = 1 if opts . single else 2 folder , reference = args gd = "GenomeDir" mkdir ( gd ) STAR = "STAR --runThreadN {0} --genomeDir {1}" . format ( cpus , gd ) # Step 0 : build genome index genomeidx = op . join ( gd , "Genome" ) if need_update ( reference , genomeidx ) : cmd = STAR + " --runMode genomeGenerate" cmd += " --genomeFastaFiles {0}" . format ( reference ) mm . add ( reference , genomeidx , cmd ) # Step 1 : align for p , prefix in iter_project ( folder , opts . names , num ) : pf = "{0}_star" . format ( prefix ) bamfile = pf + "Aligned.sortedByCoord.out.bam" cmd = STAR + " --readFilesIn {0}" . format ( " " . join ( p ) ) if p [ 0 ] . endswith ( ".gz" ) : cmd += " --readFilesCommand zcat" cmd += " --outSAMtype BAM SortedByCoordinate" cmd += " --outFileNamePrefix {0}" . format ( pf ) cmd += " --twopassMode Basic" # Compatibility for cufflinks cmd += " --outSAMstrandField intronMotif" cmd += " --outFilterIntronMotifs RemoveNoncanonical" mm . add ( p , bamfile , cmd ) mm . write ( )
def json_compat_obj_encode ( data_type , obj , caller_permissions = None , alias_validators = None , old_style = False , for_msgpack = False , should_redact = False ) : """Encodes an object into a JSON - compatible dict based on its type . Args : data _ type ( Validator ) : Validator for obj . obj ( object ) : Object to be serialized . caller _ permissions ( list ) : The list of raw - string caller permissions with which to serialize . Returns : An object that when passed to json . dumps ( ) will produce a string giving the JSON - encoded object . See json _ encode ( ) for additional information about validation ."""
serializer = StoneToPythonPrimitiveSerializer ( caller_permissions , alias_validators , for_msgpack , old_style , should_redact ) return serializer . encode ( data_type , obj )
def _find_cont_fitfunc_regions ( fluxes , ivars , contmask , deg , ranges , ffunc , n_proc = 1 ) : """Run fit _ cont , dealing with spectrum in regions or chunks This is useful if a spectrum has gaps . Parameters fluxes : ndarray of shape ( nstars , npixels ) training set or test set pixel intensities ivars : numpy ndarray of shape ( nstars , npixels ) inverse variances , parallel to fluxes contmask : numpy ndarray of length ( npixels ) boolean pixel mask , True indicates that pixel is continuum deg : int degree of fitting function ffunc : str type of fitting function , chebyshev or sinusoid Returns cont : numpy ndarray of shape ( nstars , npixels ) the continuum , parallel to fluxes"""
nstars = fluxes . shape [ 0 ] npixels = fluxes . shape [ 1 ] cont = np . zeros ( fluxes . shape ) for chunk in ranges : start = chunk [ 0 ] stop = chunk [ 1 ] if ffunc == "chebyshev" : output = _find_cont_fitfunc ( fluxes [ : , start : stop ] , ivars [ : , start : stop ] , contmask [ start : stop ] , deg = deg , ffunc = "chebyshev" , n_proc = n_proc ) elif ffunc == "sinusoid" : output = _find_cont_fitfunc ( fluxes [ : , start : stop ] , ivars [ : , start : stop ] , contmask [ start : stop ] , deg = deg , ffunc = "sinusoid" , n_proc = n_proc ) cont [ : , start : stop ] = output return cont
def _generate_random_node_values ( height ) : """Return random node values for building binary trees . : param height : Height of the binary tree . : type height : int : return : Randomly generated node values . : rtype : [ int ]"""
max_node_count = 2 ** ( height + 1 ) - 1 node_values = list ( range ( max_node_count ) ) random . shuffle ( node_values ) return node_values
def usermacro_update ( hostmacroid , value , ** kwargs ) : '''Update existing host usermacro . : param hostmacroid : id of the host usermacro : param value : new value of the host usermacro : param _ connection _ user : Optional - zabbix user ( can also be set in opts or pillar , see module ' s docstring ) : param _ connection _ password : Optional - zabbix password ( can also be set in opts or pillar , see module ' s docstring ) : param _ connection _ url : Optional - url of zabbix frontend ( can also be set in opts , pillar , see module ' s docstring ) return : ID of the update host usermacro . CLI Example : . . code - block : : bash salt ' * ' zabbix . usermacro _ update 1 ' public ' '''
conn_args = _login ( ** kwargs ) ret = { } try : if conn_args : params = { } method = 'usermacro.update' params [ 'hostmacroid' ] = hostmacroid params [ 'value' ] = value params = _params_extend ( params , _ignore_name = True , ** kwargs ) ret = _query ( method , params , conn_args [ 'url' ] , conn_args [ 'auth' ] ) return ret [ 'result' ] [ 'hostmacroids' ] [ 0 ] else : raise KeyError except KeyError : return ret
def Authenticate ( self , app_id , challenge , registered_keys ) : """Authenticates app _ id with the security key . Executes the U2F authentication / signature flow with the security key . Args : app _ id : The app _ id to register the security key against . challenge : Server challenge passed to the security key as a bytes object . registered _ keys : List of keys already registered for this app _ id + user . Returns : SignResponse with client _ data , key _ handle , and signature _ data . The client data is an object , while the signature _ data is encoded in FIDO U2F binary format . Raises : U2FError : There was some kind of problem with authentication ( e . g . there was a timeout while waiting for the test of user presence . )"""
client_data = model . ClientData ( model . ClientData . TYP_AUTHENTICATION , challenge , self . origin ) app_param = self . InternalSHA256 ( app_id ) challenge_param = self . InternalSHA256 ( client_data . GetJson ( ) ) num_invalid_keys = 0 for key in registered_keys : try : if key . version != u'U2F_V2' : continue for _ in range ( 30 ) : try : resp = self . security_key . CmdAuthenticate ( challenge_param , app_param , key . key_handle ) return model . SignResponse ( key . key_handle , resp , client_data ) except errors . TUPRequiredError : self . security_key . CmdWink ( ) time . sleep ( 0.5 ) except errors . InvalidKeyHandleError : num_invalid_keys += 1 continue except errors . HardwareError as e : raise errors . U2FError ( errors . U2FError . BAD_REQUEST , e ) if num_invalid_keys == len ( registered_keys ) : # In this case , all provided keys were invalid . raise errors . U2FError ( errors . U2FError . DEVICE_INELIGIBLE ) # In this case , the TUP was not pressed . raise errors . U2FError ( errors . U2FError . TIMEOUT )
def overloaded_build ( type_ , add_name = None ) : """Factory for constant transformers that apply to a given build instruction . Parameters type _ : type The object type to overload the construction of . This must be one of " buildable " types , or types with a " BUILD _ * " instruction . add _ name : str , optional The suffix of the instruction tha adds elements to the collection . For example : ' add ' or ' append ' Returns transformer : subclass of CodeTransformer A new code transformer class that will overload the provided literal types ."""
typename = type_ . __name__ instrname = 'BUILD_' + typename . upper ( ) dict_ = OrderedDict ( __doc__ = dedent ( """ A CodeTransformer for overloading {name} instructions. """ . format ( name = instrname ) ) ) try : build_instr = getattr ( instructions , instrname ) except AttributeError : raise TypeError ( "type %s is not buildable" % typename ) if add_name is not None : try : add_instr = getattr ( instructions , '_' . join ( ( typename , add_name ) ) . upper ( ) , ) except AttributeError : TypeError ( "type %s is not addable" % typename ) dict_ [ '_start_comprehension' ] = pattern ( build_instr , matchany [ var ] , add_instr , ) ( _start_comprehension ) dict_ [ '_return_value' ] = pattern ( instructions . RETURN_VALUE , startcodes = ( IN_COMPREHENSION , ) , ) ( _return_value ) else : add_instr = None dict_ [ '_build' ] = pattern ( build_instr ) ( _build ) if not typename . endswith ( 's' ) : typename = typename + 's' return type ( 'overloaded_' + typename , ( overloaded_constants ( type_ ) , ) , dict_ , )
def roll_data ( self , data ) : """Append new data to the right side of every line strip and remove as much data from the left . Parameters data : array - like A data array to append ."""
data = data . astype ( 'float32' ) [ ... , np . newaxis ] s1 = self . _data_shape [ 1 ] - self . _offset if data . shape [ 1 ] > s1 : self . _pos_tex [ : , self . _offset : ] = data [ : , : s1 ] self . _pos_tex [ : , : data . shape [ 1 ] - s1 ] = data [ : , s1 : ] self . _offset = ( self . _offset + data . shape [ 1 ] ) % self . _data_shape [ 1 ] else : self . _pos_tex [ : , self . _offset : self . _offset + data . shape [ 1 ] ] = data self . _offset += data . shape [ 1 ] self . shared_program [ 'offset' ] = self . _offset self . update ( )
def get_assessments_taken ( self ) : """Gets all ` ` AssessmentTaken ` ` elements . In plenary mode , the returned list contains all known assessments taken or an error results . Otherwise , the returned list may contain only those assessments taken that are accessible through this session . return : ( osid . assessment . AssessmentTakenList ) - a list of ` ` AssessmentTaken ` ` elements raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure occurred * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for # osid . resource . ResourceLookupSession . get _ resources # NOTE : This implementation currently ignores plenary view collection = JSONClientValidated ( 'assessment' , collection = 'AssessmentTaken' , runtime = self . _runtime ) result = collection . find ( self . _view_filter ( ) ) . sort ( '_id' , DESCENDING ) return objects . AssessmentTakenList ( result , runtime = self . _runtime , proxy = self . _proxy )
def warn_deprecated ( message , stacklevel = 2 ) : # pragma : no cover """Warn deprecated ."""
warnings . warn ( message , category = DeprecationWarning , stacklevel = stacklevel )
def iter_ROOT_classes ( ) : """Iterator over all available ROOT classes"""
class_index = "http://root.cern.ch/root/html/ClassIndex.html" for s in minidom . parse ( urlopen ( class_index ) ) . getElementsByTagName ( "span" ) : if ( "class" , "typename" ) in s . attributes . items ( ) : class_name = s . childNodes [ 0 ] . nodeValue try : yield getattr ( QROOT , class_name ) except AttributeError : pass
def predict_retinotopy ( sub , template = 'benson14' , registration = 'fsaverage' ) : '''predict _ retinotopy ( subject ) yields a pair of dictionaries each with four keys : angle , eccen , sigma , and varea . Each of these keys maps to a numpy array with one entry per vertex . The first element of the yielded pair is the left hemisphere map and the second is the right hemisphere map . The values are obtained by resampling the Benson et al . 2014 anatomically defined template of retinotopy to the given subject . The following optional arguments may be given : * template ( default : ' benson14 ' ) specifies the template to use . * registration ( default : ' fsaverage ' ) specifies the subject registration to use ; generally can only be ' fsaverage ' or ' fsaverage _ sym ' .'''
template = template . lower ( ) retino_tmpls = predict_retinotopy . retinotopy_templates [ registration ] hemis = [ 'lh' , 'rh' ] if registration == 'fsaverage' else [ 'sym' ] if template not in retino_tmpls : libdir = os . path . join ( library_path ( ) , 'data' ) search_paths = [ libdir ] # just hard - baked - in for now . suff = 'v4_0' if registration == 'fsaverage' else 'v3_0' filenames = { ( hname , fnm ) : ( '%s.%s_%s.%s.mgz' % ( hname , template , fnm , suff ) ) for fnm in [ 'angle' , 'eccen' , 'varea' , 'sigma' ] for hname in hemis } # find an appropriate directory tmpl_path = next ( ( os . path . join ( path0 , registration ) for path0 in search_paths if all ( os . path . isfile ( os . path . join ( path0 , registration , 'surf' , s ) ) for s in six . itervalues ( filenames ) ) ) , None ) if tmpl_path is None : raise ValueError ( 'No subject found with appropriate surf/*.%s_* files!' % template ) tmpl_sub = nyfs . subject ( registration ) spath = os . path . join ( tmpl_path , 'surf' ) retino_tmpls [ template ] = pimms . persist ( { h : { k : pimms . imm_array ( dat ) for k in [ 'angle' , 'eccen' , 'varea' , 'sigma' ] for dat in [ nyio . load ( os . path . join ( tmpl_path , 'surf' , filenames [ ( h , k ) ] ) ) ] } for h in hemis } ) # Okay , we just need to interpolate over to this subject tmpl = retino_tmpls [ template ] if not all ( s in tmpl for s in hemis ) : raise ValueError ( 'could not find matching template' ) if registration == 'fsaverage_sym' : sym = nyfs . subject ( 'fsaverage_sym' ) if isinstance ( sub , mri . Subject ) : subj_hems = ( sub . lh , sub . hemis [ 'rhx' ] ) tmpl_hems = ( sym . lh , sym . lh ) chrs_hems = ( 'lh' , 'rh' ) else : subj_hems = ( sub , ) tmpl_hems = ( sym . lh , ) chrs_hems = ( sub . chirality , ) else : fsa = nyfs . subject ( 'fsaverage' ) if isinstance ( sub , mri . Subject ) : subj_hems = ( sub . lh , sub . rh ) tmpl_hems = ( fsa . lh , fsa . rh ) chrs_hems = ( 'lh' , 'rh' ) else : subj_hems = ( sub , ) tmpl_hems = ( ( fsa . lh if sub . chirality == 'lh' else fsa . rh ) , ) chrs_hems = ( sub . chirality , ) tpl = tuple ( [ th . interpolate ( sh , tmpl [ h if registration == 'fsaverage' else 'sym' ] ) for ( sh , th , h ) in zip ( subj_hems , tmpl_hems , chrs_hems ) ] ) return tpl [ 0 ] if len ( tpl ) == 1 else tpl
def line ( self , plunge , bearing , * args , ** kwargs ) : """Plot points representing linear features on the axes . Additional arguments and keyword arguments are passed on to ` plot ` . Parameters plunge , bearing : number or sequence of numbers The plunge and bearing of the line ( s ) in degrees . The plunge is measured in degrees downward from the end of the feature specified by the bearing . * * kwargs Additional parameters are passed on to ` plot ` . Returns A sequence of Line2D artists representing the point ( s ) specified by ` strike ` and ` dip ` ."""
lon , lat = stereonet_math . line ( plunge , bearing ) args , kwargs = self . _point_plot_defaults ( args , kwargs ) return self . plot ( [ lon ] , [ lat ] , * args , ** kwargs )
def read_xml ( self ) : """read metadata from xml and set all the found properties . : return : the root element of the xml : rtype : ElementTree . Element"""
if self . xml_uri is None : root = self . _read_xml_db ( ) else : root = self . _read_xml_file ( ) if root is not None : for name , path in list ( self . _standard_properties . items ( ) ) : value = read_property_from_xml ( root , path ) if value is not None : # this calls the default setters setattr ( self , name , value ) return root
def _remove_subsequent_result_because_of_batch_failure ( self , sig ) : """Remove transactions from scheduled and txn _ results for successors of txns in a failed batch . These transactions will now , or in the future be rescheduled in next _ transaction ; giving a replay ability . Args : sig ( str ) : Transaction header signature"""
batch = self . _batches_by_txn_id [ sig ] seen = [ ] for txn in batch . transactions : txn_id = txn . header_signature for poss_successor in self . _scheduled . copy ( ) : if not self . is_transaction_in_schedule ( poss_successor ) : continue if self . _is_txn_to_replay ( txn_id , poss_successor , seen ) : if self . _txn_has_result ( poss_successor ) : del self . _txn_results [ poss_successor ] self . _scheduled . remove ( poss_successor ) self . _txns_available [ poss_successor ] = self . _transactions [ poss_successor ] else : self . _outstanding . add ( poss_successor ) seen . append ( poss_successor )
def confirmation_option ( * param_decls , ** attrs ) : """Shortcut for confirmation prompts that can be ignored by passing ` ` - - yes ` ` as parameter . This is equivalent to decorating a function with : func : ` option ` with the following parameters : : def callback ( ctx , param , value ) : if not value : ctx . abort ( ) @ click . command ( ) @ click . option ( ' - - yes ' , is _ flag = True , callback = callback , expose _ value = False , prompt = ' Do you want to continue ? ' ) def dropdb ( ) : pass"""
def decorator ( f ) : def callback ( ctx , param , value ) : if not value : ctx . abort ( ) attrs . setdefault ( 'is_flag' , True ) attrs . setdefault ( 'callback' , callback ) attrs . setdefault ( 'expose_value' , False ) attrs . setdefault ( 'prompt' , 'Do you want to continue?' ) attrs . setdefault ( 'help' , 'Confirm the action without prompting.' ) return option ( * ( param_decls or ( '--yes' , ) ) , ** attrs ) ( f ) return decorator
def osmlem ( op , x , data , niter , callback = None , ** kwargs ) : r"""Ordered Subsets Maximum Likelihood Expectation Maximation algorithm . This solver attempts to solve : : max _ x L ( x | data ) where ` ` L ( x , | data ) ` ` is the likelihood of ` ` x ` ` given ` ` data ` ` . The likelihood depends on the forward operators ` ` op [ 0 ] , . . . , op [ n - 1 ] ` ` such that ( approximately ) : : op [ i ] ( x ) = data [ i ] Parameters op : sequence of ` Operator ` Forward operators in the inverse problem . x : ` ` op . domain ` ` element Vector to which the result is written . Its initial value is used as starting point of the iteration , and its values are updated in each iteration step . The initial value of ` ` x ` ` should be non - negative . data : sequence of ` ` op . range ` ` ` element - like ` Right - hand sides of the equation defining the inverse problem . niter : int Number of iterations . callback : callable , optional Function called with the current iterate after each iteration . Other Parameters sensitivities : float or ` ` op . domain ` ` ` element - like ` , optional The algorithm contains an ` ` A ^ T 1 ` ` term , if this parameter is given , it is replaced by it . Default : ` ` op [ i ] . adjoint ( op [ i ] . range . one ( ) ) ` ` Notes Given forward models : math : ` A _ i ` , and data : math : ` g _ i ` , : math : ` i = 1 , . . . , M ` , the algorithm attempts to find an : math : ` x ` that maximizes : . . math : : \ prod _ { i = 1 } ^ M P ( g _ i | g _ i \ text { is } Poisson ( A _ i ( x ) ) \ text { distributed } ) . The algorithm is explicitly given by partial updates : . . math : : x _ { n + m / M } = \ frac { x _ { n + ( m - 1 ) / M } } { A _ i ^ * 1 } A _ i ^ * ( g _ i / A _ i ( x _ { n + ( m - 1 ) / M } ) ) for : math : ` m = 1 , . . . , M ` and : math : ` x _ { n + 1 } = x _ { n + M / M } ` . The algorithm is not guaranteed to converge , but works for many practical problems . References Natterer , F . Mathematical Methods in Image Reconstruction , section 5.3.2. See Also mlem : Ordinary MLEM algorithm without subsets . loglikelihood : Function for calculating the logarithm of the likelihood"""
n_ops = len ( op ) if len ( data ) != n_ops : raise ValueError ( 'number of data ({}) does not match number of ' 'operators ({})' . format ( len ( data ) , n_ops ) ) if not all ( x in opi . domain for opi in op ) : raise ValueError ( '`x` not an element in the domains of all operators' ) # Convert data to range elements data = [ op [ i ] . range . element ( data [ i ] ) for i in range ( len ( op ) ) ] # Parameter used to enforce positivity . # TODO : let users give this . eps = 1e-8 if np . any ( np . less ( x , 0 ) ) : raise ValueError ( '`x` must be non-negative' ) # Extract the sensitivites parameter sensitivities = kwargs . pop ( 'sensitivities' , None ) if sensitivities is None : sensitivities = [ np . maximum ( opi . adjoint ( opi . range . one ( ) ) , eps ) for opi in op ] else : # Make sure the sensitivities is a list of the correct size . try : list ( sensitivities ) except TypeError : sensitivities = [ sensitivities ] * n_ops tmp_dom = op [ 0 ] . domain . element ( ) tmp_ran = [ opi . range . element ( ) for opi in op ] for _ in range ( niter ) : for i in range ( n_ops ) : op [ i ] ( x , out = tmp_ran [ i ] ) tmp_ran [ i ] . ufuncs . maximum ( eps , out = tmp_ran [ i ] ) data [ i ] . divide ( tmp_ran [ i ] , out = tmp_ran [ i ] ) op [ i ] . adjoint ( tmp_ran [ i ] , out = tmp_dom ) tmp_dom /= sensitivities [ i ] x *= tmp_dom if callback is not None : callback ( x )
def encode_quorum ( self , rw ) : """Converts a symbolic quorum value into its on - the - wire equivalent . : param rw : the quorum : type rw : string , integer : rtype : integer"""
if rw in QUORUM_TO_PB : return QUORUM_TO_PB [ rw ] elif type ( rw ) is int and rw >= 0 : return rw else : return None
def mxnet_prefer_gpu ( ) : """If gpu available return gpu , else cpu Returns context : Context The preferable GPU context ."""
gpu = int ( os . environ . get ( 'MXNET_GPU' , default = 0 ) ) if gpu in mx . test_utils . list_gpus ( ) : return mx . gpu ( gpu ) return mx . cpu ( )
def get ( key , service = None , profile = None ) : # pylint : disable = W0613 '''Get a value from the etcd service'''
client = _get_conn ( profile ) result = client . get ( key ) return result . value
def apply_getters ( self , task ) : """This function is called when we specify the task dependencies with the syntax : deps = { node : " @ property " } In this case the task has to the get ` property ` from ` node ` before starting the calculation . At present , the following properties are supported : - @ structure"""
if not self . getters : return for getter in self . getters : if getter == "@structure" : task . history . info ( "Getting structure from %s" % self . node ) new_structure = self . node . get_final_structure ( ) task . _change_structure ( new_structure ) else : raise ValueError ( "Wrong getter %s" % getter )
def guess_strategy_type ( file_name_or_ext ) : """Guess strategy type to use for file by extension . Args : file _ name _ or _ ext : Either a file name with an extension or just an extension Returns : Strategy : Type corresponding to extension or None if there ' s no corresponding strategy type"""
if '.' not in file_name_or_ext : ext = file_name_or_ext else : name , ext = os . path . splitext ( file_name_or_ext ) ext = ext . lstrip ( '.' ) file_type_map = get_file_type_map ( ) return file_type_map . get ( ext , None )
def get_minor_version ( version , remove = None ) : """Return minor version of a provided version string . Minor version is the second component in the dot - separated version string . For non - version - like strings this function returns ` ` None ` ` . The ` ` remove ` ` parameter is deprecated since version 1.18 and will be removed in the future . : param version : Version string : type version : str : rtype : str"""
if remove : warnings . warn ( "remove argument is deprecated" , DeprecationWarning ) version_split = version . split ( "." ) try : # Assume MAJOR . MINOR . REST . . . return version_split [ 1 ] except IndexError : return None
def expect ( self , use_proportions = True ) : """The Expectation step of the CEM algorithm"""
changed = self . get_changed ( self . partition , self . prev_partition ) lk_table = self . generate_lktable ( self . partition , changed , use_proportions ) self . table = self . likelihood_table_to_probs ( lk_table )
def build_kernel ( self ) : """Build the KNN kernel . Build a k nearest neighbors kernel , optionally with alpha decay . If ` precomputed ` is not ` None ` , the appropriate steps in the kernel building process are skipped . Must return a symmetric matrix Returns K : kernel matrix , shape = [ n _ samples , n _ samples ] symmetric matrix with ones down the diagonal with no non - negative entries . Raises ValueError : if ` precomputed ` is not an acceptable value"""
if self . precomputed == "affinity" : # already done # TODO : should we check that precomputed matrices look okay ? # e . g . check the diagonal K = self . data_nu elif self . precomputed == "adjacency" : # need to set diagonal to one to make it an affinity matrix K = self . data_nu if sparse . issparse ( K ) and not ( isinstance ( K , sparse . dok_matrix ) or isinstance ( K , sparse . lil_matrix ) ) : K = K . tolil ( ) K = set_diagonal ( K , 1 ) else : tasklogger . log_start ( "affinities" ) if sparse . issparse ( self . data_nu ) : self . data_nu = self . data_nu . toarray ( ) if self . precomputed == "distance" : pdx = self . data_nu elif self . precomputed is None : pdx = pdist ( self . data_nu , metric = self . distance ) if np . any ( pdx == 0 ) : pdx = squareform ( pdx ) duplicate_ids = np . array ( [ i for i in np . argwhere ( pdx == 0 ) if i [ 1 ] > i [ 0 ] ] ) duplicate_names = ", " . join ( [ "{} and {}" . format ( i [ 0 ] , i [ 1 ] ) for i in duplicate_ids ] ) warnings . warn ( "Detected zero distance between samples {}. " "Consider removing duplicates to avoid errors in " "downstream processing." . format ( duplicate_names ) , RuntimeWarning ) else : pdx = squareform ( pdx ) else : raise ValueError ( "precomputed='{}' not recognized. " "Choose from ['affinity', 'adjacency', 'distance', " "None]" . format ( self . precomputed ) ) if self . bandwidth is None : knn_dist = np . partition ( pdx , self . knn + 1 , axis = 1 ) [ : , : self . knn + 1 ] bandwidth = np . max ( knn_dist , axis = 1 ) elif callable ( self . bandwidth ) : bandwidth = self . bandwidth ( pdx ) else : bandwidth = self . bandwidth bandwidth = bandwidth * self . bandwidth_scale pdx = ( pdx . T / bandwidth ) . T K = np . exp ( - 1 * np . power ( pdx , self . decay ) ) # handle nan K = np . where ( np . isnan ( K ) , 1 , K ) tasklogger . log_complete ( "affinities" ) # truncate if sparse . issparse ( K ) : if not ( isinstance ( K , sparse . csr_matrix ) or isinstance ( K , sparse . csc_matrix ) or isinstance ( K , sparse . bsr_matrix ) ) : K = K . tocsr ( ) K . data [ K . data < self . thresh ] = 0 K = K . tocoo ( ) K . eliminate_zeros ( ) K = K . tocsr ( ) else : K [ K < self . thresh ] = 0 return K
def moves_from_games ( self , start_game , end_game , moves , shuffle , column_family , column ) : """Dataset of samples and / or shuffled moves from game range . Args : n : an integer indicating how many past games should be sourced . moves : an integer indicating how many moves should be sampled from those N games . column _ family : name of the column family containing move examples . column : name of the column containing move examples . shuffle : if True , shuffle the selected move examples . Returns : A dataset containing no more than ` moves ` examples , sampled randomly from the last ` n ` games in the table ."""
start_row = ROW_PREFIX . format ( start_game ) end_row = ROW_PREFIX . format ( end_game ) # NOTE : Choose a probability high enough to guarantee at least the # required number of moves , by using a slightly lower estimate # of the total moves , then trimming the result . total_moves = self . count_moves_in_game_range ( start_game , end_game ) probability = moves / ( total_moves * 0.99 ) utils . dbg ( 'Row range: %s - %s; total moves: %d; probability %.3f; moves %d' % ( start_row , end_row , total_moves , probability , moves ) ) ds = self . tf_table . parallel_scan_range ( start_row , end_row , probability = probability , columns = [ ( column_family , column ) ] ) if shuffle : utils . dbg ( 'Doing a complete shuffle of %d moves' % moves ) ds = ds . shuffle ( moves ) ds = ds . take ( moves ) return ds
def _filter_matrix_rows ( cls , matrix ) : '''matrix = output from _ to _ matrix'''
indexes_to_keep = [ ] for i in range ( len ( matrix ) ) : keep_row = False for element in matrix [ i ] : if element not in { 'NA' , 'no' } : keep_row = True break if keep_row : indexes_to_keep . append ( i ) return [ matrix [ i ] for i in indexes_to_keep ]
def _sanity_check_mark_location_preceding_optional_traverse ( ir_blocks ) : """Assert that optional Traverse blocks are preceded by a MarkLocation ."""
# Once all fold blocks are removed , each optional Traverse must have # a MarkLocation block immediately before it . _ , new_ir_blocks = extract_folds_from_ir_blocks ( ir_blocks ) for first_block , second_block in pairwise ( new_ir_blocks ) : # Traverse blocks with optional = True are immediately preceded by a MarkLocation block . if isinstance ( second_block , Traverse ) and second_block . optional : if not isinstance ( first_block , MarkLocation ) : raise AssertionError ( u'Expected MarkLocation before Traverse with optional=True, ' u'but none was found: {}' . format ( ir_blocks ) )
def _generate_default_grp_constraints ( roles , network_constraints ) : """Generate default symetric grp constraints ."""
default_delay = network_constraints . get ( 'default_delay' ) default_rate = network_constraints . get ( 'default_rate' ) default_loss = network_constraints . get ( 'default_loss' , 0 ) except_groups = network_constraints . get ( 'except' , [ ] ) grps = network_constraints . get ( 'groups' , roles . keys ( ) ) # expand each groups grps = [ expand_groups ( g ) for g in grps ] # flatten grps = [ x for expanded_group in grps for x in expanded_group ] # building the default group constraints return [ { 'src' : grp1 , 'dst' : grp2 , 'delay' : default_delay , 'rate' : default_rate , 'loss' : default_loss } for grp1 in grps for grp2 in grps if ( ( grp1 != grp2 or _src_equals_dst_in_constraints ( network_constraints , grp1 ) ) and grp1 not in except_groups and grp2 not in except_groups ) ]
def sample ( self , nsamples , nburn = 0 , nthin = 1 , save_hidden_state_trajectory = False , call_back = None ) : """Sample from the BHMM posterior . Parameters nsamples : int The number of samples to generate . nburn : int , optional , default = 0 The number of samples to discard to burn - in , following which ` nsamples ` will be generated . nthin : int , optional , default = 1 The number of Gibbs sampling updates used to generate each returned sample . save _ hidden _ state _ trajectory : bool , optional , default = False If True , the hidden state trajectory for each sample will be saved as well . call _ back : function , optional , default = None a call back function with no arguments , which if given is being called after each computed sample . This is useful for implementing progress bars . Returns models : list of bhmm . HMM The sampled HMM models from the Bayesian posterior . Examples > > > from bhmm import testsystems > > > [ model , observations , states , sampled _ model ] = testsystems . generate _ random _ bhmm ( ntrajectories = 5 , length = 1000) > > > nburn = 5 # run the sampler a bit before recording samples > > > nsamples = 10 # generate 10 samples > > > nthin = 2 # discard one sample in between each recorded sample > > > samples = sampled _ model . sample ( nsamples , nburn = nburn , nthin = nthin )"""
# Run burn - in . for iteration in range ( nburn ) : logger ( ) . info ( "Burn-in %8d / %8d" % ( iteration , nburn ) ) self . _update ( ) # Collect data . models = list ( ) for iteration in range ( nsamples ) : logger ( ) . info ( "Iteration %8d / %8d" % ( iteration , nsamples ) ) # Run a number of Gibbs sampling updates to generate each sample . for thin in range ( nthin ) : self . _update ( ) # Save a copy of the current model . model_copy = copy . deepcopy ( self . model ) # print " Sampled : \ n " , repr ( model _ copy ) if not save_hidden_state_trajectory : model_copy . hidden_state_trajectory = None models . append ( model_copy ) if call_back is not None : call_back ( ) # Return the list of models saved . return models
def invariant_image_similarity ( image1 , image2 , local_search_iterations = 0 , metric = 'MI' , thetas = np . linspace ( 0 , 360 , 5 ) , thetas2 = np . linspace ( 0 , 360 , 5 ) , thetas3 = np . linspace ( 0 , 360 , 5 ) , scale_image = 1 , do_reflection = False , txfn = None , transform = 'Affine' ) : """Similarity metrics between two images as a function of geometry Compute similarity metric between two images as image is rotated about its center w / or w / o optimization ANTsR function : ` invariantImageSimilarity ` Arguments image1 : ANTsImage reference image image2 : ANTsImage moving image local _ search _ iterations : integer integer controlling local search in multistart metric : string which metric to use MI GC thetas : 1D - ndarray / list / tuple numeric vector of search angles in degrees thetas2 : 1D - ndarray / list / tuple numeric vector of search angles in degrees around principal axis 2 ( 3D ) thetas3 : 1D - ndarray / list / tuple numeric vector of search angles in degrees around principal axis 3 ( 3D ) scale _ image : scalar global scale do _ reflection : boolean whether to reflect image about principal axis txfn : string ( optional ) if present , write optimal tx to . mat file transform : string type of transform to use Rigid Similarity Affine Returns pd . DataFrame dataframe with metric values and transformation parameters Example > > > import ants > > > img1 = ants . image _ read ( ants . get _ ants _ data ( ' r16 ' ) ) > > > img2 = ants . image _ read ( ants . get _ ants _ data ( ' r64 ' ) ) > > > metric = ants . invariant _ image _ similarity ( img1 , img2)"""
if transform not in { 'Rigid' , 'Similarity' , 'Affine' } : raise ValueError ( 'transform must be one of Rigid/Similarity/Affine' ) if image1 . pixeltype != 'float' : image1 = image1 . clone ( 'float' ) if image2 . pixeltype != 'float' : image2 = image2 . clone ( 'float' ) if txfn is None : txfn = mktemp ( suffix = '.mat' ) # convert thetas to radians thetain = ( thetas * math . pi ) / 180. thetain2 = ( thetas2 * math . pi ) / 180. thetain3 = ( thetas3 * math . pi ) / 180. image1 = utils . iMath ( image1 , 'Normalize' ) image2 = utils . iMath ( image2 , 'Normalize' ) idim = image1 . dimension fpname = [ 'FixedParam%i' % i for i in range ( 1 , idim + 1 ) ] if not do_reflection : libfn = utils . get_lib_fn ( 'invariantImageSimilarity_%s%iD' % ( transform , idim ) ) r1 = libfn ( image1 . pointer , image2 . pointer , list ( thetain ) , list ( thetain2 ) , list ( thetain3 ) , local_search_iterations , metric , scale_image , int ( do_reflection ) , txfn ) r1 = np . asarray ( r1 ) pnames = [ 'Param%i' % i for i in range ( 1 , r1 . shape [ 1 ] ) ] pnames [ ( len ( pnames ) - idim ) : len ( pnames ) ] = fpname r1 = pd . DataFrame ( r1 , columns = [ 'MetricValue' ] + pnames ) return r1 , txfn else : txfn1 = mktemp ( suffix = '.mat' ) txfn2 = mktemp ( suffix = '.mat' ) txfn3 = mktemp ( suffix = '.mat' ) txfn4 = mktemp ( suffix = '.mat' ) libfn = utils . get_lib_fn ( 'invariantImageSimilarity_%s%iD' % ( transform , idim ) ) # # R1 # # r1 = libfn ( image1 . pointer , image2 . pointer , list ( thetain ) , list ( thetain2 ) , list ( thetain3 ) , local_search_iterations , metric , scale_image , 0 , txfn1 ) r1 = np . asarray ( r1 ) pnames = [ 'Param%i' % i for i in range ( 1 , r1 . shape [ 1 ] ) ] pnames [ ( len ( pnames ) - idim ) : len ( pnames ) ] = fpname r1 = pd . DataFrame ( r1 , columns = [ 'MetricValue' ] + pnames ) # # R2 # # r2 = libfn ( image1 . pointer , image2 . pointer , list ( thetain ) , list ( thetain2 ) , list ( thetain3 ) , local_search_iterations , metric , scale_image , 1 , txfn2 ) r2 = np . asarray ( r2 ) r2 = pd . DataFrame ( r2 , columns = [ 'MetricValue' ] + pnames ) # # R3 # # r3 = libfn ( image1 . pointer , image2 . pointer , list ( thetain ) , list ( thetain2 ) , list ( thetain3 ) , local_search_iterations , metric , scale_image , 2 , txfn3 ) r3 = np . asarray ( r3 ) r3 = pd . DataFrame ( r3 , columns = [ 'MetricValue' ] + pnames ) # # R4 # # r4 = libfn ( image1 . pointer , image2 . pointer , list ( thetain ) , list ( thetain2 ) , list ( thetain3 ) , local_search_iterations , metric , scale_image , 3 , txfn4 ) r4 = np . asarray ( r4 ) r4 = pd . DataFrame ( r4 , columns = [ 'MetricValue' ] + pnames ) rmins = [ np . min ( r1 . iloc [ : , 0 ] ) , np . min ( r2 . iloc [ : , 0 ] ) , np . min ( r3 . iloc [ : , 0 ] ) , np . min ( r4 . iloc [ : , 0 ] ) ] ww = np . argmin ( rmins ) if ww == 0 : return r1 , txfn1 elif ww == 1 : return r2 , txfn2 elif ww == 2 : return r3 , txfn3 elif ww == 3 : return r4 , txfn4
def fix_pbc ( structure , matrix = None ) : """Set all frac _ coords of the input structure within [ 0,1 ] . Args : structure ( pymatgen structure object ) : input structure matrix ( lattice matrix , 3 by 3 array / matrix ) new structure ' s lattice matrix , if none , use input structure ' s matrix Return : new structure with fixed frac _ coords and lattice matrix"""
spec = [ ] coords = [ ] if matrix is None : latte = Lattice ( structure . lattice . matrix ) else : latte = Lattice ( matrix ) for site in structure : spec . append ( site . specie ) coord = np . array ( site . frac_coords ) for i in range ( 3 ) : coord [ i ] -= floor ( coord [ i ] ) if np . allclose ( coord [ i ] , 1 ) : coord [ i ] = 0 elif np . allclose ( coord [ i ] , 0 ) : coord [ i ] = 0 else : coord [ i ] = round ( coord [ i ] , 7 ) coords . append ( coord ) return Structure ( latte , spec , coords , site_properties = structure . site_properties )
def unpack_response ( self , cursor_id = None , codec_options = _UNICODE_REPLACE_CODEC_OPTIONS , user_fields = None , legacy_response = False ) : """Unpack a OP _ MSG command response . : Parameters : - ` cursor _ id ` ( optional ) : Ignored , for compatibility with _ OpReply . - ` codec _ options ` ( optional ) : an instance of : class : ` ~ bson . codec _ options . CodecOptions `"""
# If _ OpMsg is in - use , this cannot be a legacy response . assert not legacy_response return bson . _decode_all_selective ( self . payload_document , codec_options , user_fields )
def load_tags ( self , max_pages = 30 ) : """Load all WordPress tags from the given site . : param max _ pages : kill counter to avoid infinite looping : return : None"""
logger . info ( "loading tags" ) # clear them all out so we don ' t get dupes if requested if self . purge_first : Tag . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/tags" . format ( self . site_id ) params = { "number" : 1000 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_tags = response . json ( ) . get ( "tags" ) if not api_tags : # we ' re done here break tags = [ ] for api_tag in api_tags : # if it exists locally , update local version if anything has changed existing_tag = Tag . objects . filter ( site_id = self . site_id , wp_id = api_tag [ "ID" ] ) . first ( ) if existing_tag : self . update_existing_tag ( existing_tag , api_tag ) else : tags . append ( self . get_new_tag ( api_tag ) ) if tags : Tag . objects . bulk_create ( tags ) elif not self . full : # we ' re done here break # get next page page += 1 params [ "page" ] = page response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return
def browse_history ( self , backward ) : """Browse history"""
if self . is_cursor_before ( 'eol' ) and self . hist_wholeline : self . hist_wholeline = False tocursor = self . get_current_line_to_cursor ( ) text , self . histidx = self . find_in_history ( tocursor , self . histidx , backward ) if text is not None : if self . hist_wholeline : self . clear_line ( ) self . insert_text ( text ) else : cursor_position = self . get_position ( 'cursor' ) # Removing text from cursor to the end of the line self . remove_text ( 'cursor' , 'eol' ) # Inserting history text self . insert_text ( text ) self . set_cursor_position ( cursor_position )
def butter ( dt_list , val , lowpass = 1.0 ) : """This is framework for a butterworth bandpass for 1D data Needs to be cleaned up and generalized"""
import scipy . signal import matplotlib . pyplot as plt # dt is 300 s , 5 min dt_diff = np . diff ( dt_list ) dt_diff = np . array ( [ dt . total_seconds ( ) for dt in dt_diff ] ) dt = malib . fast_median ( dt_diff ) # f is 0.00333 Hz # 288 samples / day fs = 1. / dt nyq = fs / 2. if False : # psd , f = psd ( z _ msl , fs ) sp_f , sp_psd = scipy . signal . periodogram ( val , fs , detrend = 'linear' ) # sp _ f , sp _ psd = scipy . signal . welch ( z _ msl , fs , nperseg = 2048) sp_f_days = 1. / sp_f / 86400. plt . figure ( ) plt . plot ( sp_f , sp_psd ) plt . plot ( sp_f_days , sp_psd ) plt . semilogy ( sp_f_days , sp_psd ) plt . xlabel ( 'Frequency' ) plt . ylabel ( 'Power' ) print ( "Filtering tidal signal" ) # Define bandpass filter # f _ min = dt / ( 86400*0.25) f_max = ( 1. / ( 86400 * 0.1 ) ) / nyq f_min = ( 1. / ( 86400 * 1.8 ) ) / nyq order = 6 b , a = scipy . signal . butter ( order , f_min , btype = 'highpass' ) # b , a = sp . signal . butter ( order , ( f _ min , f _ max ) , btype = ' bandpass ' ) w , h = scipy . signal . freqz ( b , a , worN = 2000 ) w_f = ( nyq / np . pi ) * w w_f_days = 1 / w_f / 86400. # plt . figure ( ) # plt . plot ( w _ f _ days , np . abs ( h ) ) val_f_tide = scipy . signal . filtfilt ( b , a , val ) b , a = scipy . signal . butter ( order , f_max , btype = 'lowpass' ) # b , a = sp . signal . butter ( order , ( f _ min , f _ max ) , btype = ' bandstop ' ) w , h = scipy . signal . freqz ( b , a , worN = 2000 ) w_f = ( nyq / np . pi ) * w w_f_days = 1 / w_f / 86400. # plt . plot ( w _ f _ days , np . abs ( h ) ) val_f_tide_denoise = scipy . signal . filtfilt ( b , a , val_f_tide ) # val _ f _ notide = sp . signal . filtfilt ( b , a , val ) val_f_notide = val - val_f_tide
def item_path_or ( default , keys , dict_or_obj ) : """Optional version of item _ path with a default value . keys can be dict keys or object attributes , or a combination : param default : : param keys : List of keys or dot - separated string : param dict _ or _ obj : A dict or obj : return :"""
if not keys : raise ValueError ( "Expected at least one key, got {0}" . format ( keys ) ) resolved_keys = keys . split ( '.' ) if isinstance ( str , keys ) else keys current_value = dict_or_obj for key in resolved_keys : current_value = prop_or ( default , key , default_to ( { } , current_value ) ) return current_value
async def power ( dev : Device , cmd , target , value ) : """Turn on and off , control power settings . Accepts commands ' on ' , ' off ' , and ' settings ' ."""
async def try_turn ( cmd ) : state = True if cmd == "on" else False try : return await dev . set_power ( state ) except SongpalException as ex : if ex . code == 3 : err ( "The device is already %s." % cmd ) else : raise ex if cmd == "on" or cmd == "off" : click . echo ( await try_turn ( cmd ) ) elif cmd == "settings" : settings = await dev . get_power_settings ( ) print_settings ( settings ) elif cmd == "set" and target and value : click . echo ( await dev . set_power_settings ( target , value ) ) else : power = await dev . get_power ( ) click . echo ( click . style ( str ( power ) , bold = power ) )
def cmd_gyrocal ( self , args ) : '''do a full gyro calibration'''
mav = self . master mav . mav . command_long_send ( mav . target_system , mav . target_component , mavutil . mavlink . MAV_CMD_PREFLIGHT_CALIBRATION , 0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 )
def _makeplot ( self , ax , fig , data , ymin = None , ymax = None , height = 6 , width = 6 , dos = None , color = None ) : """Utility method to tidy phonon band structure diagrams ."""
# Define colours if color is None : color = 'C0' # Default to first colour in matplotlib series # set x and y limits tymax = ymax if ( ymax is not None ) else max ( flatten ( data [ 'frequency' ] ) ) tymin = ymin if ( ymin is not None ) else min ( flatten ( data [ 'frequency' ] ) ) pad = ( tymax - tymin ) * 0.05 if ymin is None : ymin = 0 if tymin >= self . imag_tol else tymin - pad ymax = ymax if ymax else tymax + pad ax . set_ylim ( ymin , ymax ) ax . set_xlim ( 0 , data [ 'distances' ] [ - 1 ] [ - 1 ] ) if ymin < 0 : dashline = True ax . axhline ( 0 , color = rcParams [ 'grid.color' ] , linestyle = '--' , dashes = dashes , zorder = 0 , linewidth = rcParams [ 'ytick.major.width' ] ) else : dashline = False if dos is not None : self . _plot_phonon_dos ( dos , ax = fig . axes [ 1 ] , color = color , dashline = dashline ) else : # keep correct aspect ratio ; match axis to canvas x0 , x1 = ax . get_xlim ( ) y0 , y1 = ax . get_ylim ( ) if width is None : width = rcParams [ 'figure.figsize' ] [ 0 ] if height is None : height = rcParams [ 'figure.figsize' ] [ 1 ] ax . set_aspect ( ( height / width ) * ( ( x1 - x0 ) / ( y1 - y0 ) ) )
def build_D3treepie ( old , MAX_DEPTH , level = 1 , toplayer = None ) : """Create the JSON needed by the treePie viz http : / / bl . ocks . org / adewes / 4710330/94a7c0aeb6f09d681dbfdd0e5150578e4935c6ae Eg [ ' origin ' , [ n1 , n2 ] , { ' name1 ' : [ ' name1 ' , [ n1 , n2 ] , { ' name1-1 ' : . . . }"""
d = { } if not old : old = toplayer for x in old : label = x . bestLabel ( quotes = False ) . replace ( "_" , " " ) if x . children ( ) and level < MAX_DEPTH : size = len ( x . children ( ) ) d [ x . qname ] = [ label , [ size , size ] , build_D3treepie ( x . children ( ) , MAX_DEPTH , level + 1 ) ] else : size = 1 d [ x . qname ] = [ label , [ size , size ] , { } ] return d
def get_eval_func ( obj , feature , slice = np . s_ [ ... ] ) : """Return the function of interest ( kernel or mean ) for the expectation depending on the type of : obj : and whether any features are given"""
if feature is not None : # kernel + feature combination if not isinstance ( feature , InducingFeature ) or not isinstance ( obj , kernels . Kernel ) : raise TypeError ( "If `feature` is supplied, `obj` must be a kernel." ) return lambda x : tf . transpose ( Kuf ( feature , obj , x ) ) [ slice ] elif isinstance ( obj , mean_functions . MeanFunction ) : return lambda x : obj ( x ) [ slice ] elif isinstance ( obj , kernels . Kernel ) : return lambda x : obj . Kdiag ( x ) else : raise NotImplementedError ( )
def _to_power_basis_degree4 ( nodes1 , nodes2 ) : r"""Compute the coefficients of an * * intersection polynomial * * . Helper for : func : ` to _ power _ basis ` in the case that B | eacute | zout ' s ` theorem ` _ tells us the * * intersection polynomial * * is degree : math : ` 4 ` . This happens if the two curves have degrees two and two or have degrees one and four . Args : nodes1 ( numpy . ndarray ) : The nodes in the first curve . nodes2 ( numpy . ndarray ) : The nodes in the second curve . Returns : numpy . ndarray : ` ` 5 ` ` - array of coefficients ."""
# We manually invert the Vandermonde matrix : # [1 0 0 0 0 ] [ c0 ] = [ n0] # [1 1/4 1/16 1/64 1/256 ] [ c1 ] [ n1] # [1 1/2 1/4 1/8 1/16 ] [ c2 ] [ n2] # [1 3/4 9/16 27/64 81/256 ] [ c3 ] [ n3] # [1 1 1 1 1 ] [ c4 ] [ n4] val0 = eval_intersection_polynomial ( nodes1 , nodes2 , 0.0 ) val1 = eval_intersection_polynomial ( nodes1 , nodes2 , 0.25 ) val2 = eval_intersection_polynomial ( nodes1 , nodes2 , 0.5 ) val3 = eval_intersection_polynomial ( nodes1 , nodes2 , 0.75 ) val4 = eval_intersection_polynomial ( nodes1 , nodes2 , 1.0 ) # [ c0 ] = [ 3 0 0 0 0 ] [ n0] # [ c1 ] = 1 / 3 [ - 25 48 - 36 16 - 3 ] [ n1] # [ c2 ] = [ 70 - 208 228 - 112 22 ] [ n2] # [ c3 ] = [ - 80 288 - 384 224 - 48 ] [ n3] # [ c4 ] = [ 32 - 128 192 - 128 32 ] [ n4] # Since polynomial coefficients , we don ' t need to divide by 3 # to get the same polynomial . Avoid the division to avoid round - off . return np . asfortranarray ( [ 3.0 * val0 , - 25.0 * val0 + 48.0 * val1 - 36.0 * val2 + 16.0 * val3 - 3.0 * val4 , 70.0 * val0 - 208.0 * val1 + 228.0 * val2 - 112.0 * val3 + 22.0 * val4 , ( - 80.0 * val0 + 288.0 * val1 - 384.0 * val2 + 224.0 * val3 - 48.0 * val4 ) , 32.0 * val0 - 128.0 * val1 + 192.0 * val2 - 128.0 * val3 + 32.0 * val4 , ] )
def score_for_percentile_in ( self , leaderboard_name , percentile ) : '''Calculate the score for a given percentile value in the leaderboard . @ param leaderboard _ name [ String ] Name of the leaderboard . @ param percentile [ float ] Percentile value ( 0.0 to 100.0 inclusive ) . @ return the score corresponding to the percentile argument . Return + None + for arguments outside 0-100 inclusive and for leaderboards with no members .'''
if not 0 <= percentile <= 100 : return None total_members = self . total_members_in ( leaderboard_name ) if total_members < 1 : return None if self . order == self . ASC : percentile = 100 - percentile index = ( total_members - 1 ) * ( percentile / 100.0 ) scores = [ pair [ 1 ] for pair in self . redis_connection . zrange ( leaderboard_name , int ( math . floor ( index ) ) , int ( math . ceil ( index ) ) , withscores = True ) ] if index == math . floor ( index ) : return scores [ 0 ] else : interpolate_fraction = index - math . floor ( index ) return scores [ 0 ] + interpolate_fraction * ( scores [ 1 ] - scores [ 0 ] )