idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
6,000
def value ( self ) : value = getattr ( self . instrument , self . probe_name ) self . buffer . append ( value ) return value
reads the value from the instrument
6,001
def load_and_append ( probe_dict , probes , instruments = { } ) : loaded_failed = { } updated_probes = { } updated_probes . update ( probes ) updated_instruments = { } updated_instruments . update ( instruments ) new_instruments = list ( set ( probe_dict . keys ( ) ) - set ( probes . keys ( ) ) ) if new_instruments != ...
load probes from probe_dict and append to probes if additional instruments are required create them and add them to instruments
6,002
def get ( self , key ) : try : return self [ self . id_lookup . get ( key ) ] except TypeError : raise KeyError
Returns an address by user controlled input ID
6,003
def get_index ( self , key ) : try : return self [ self . index_lookup . get ( key ) ] except TypeError : raise KeyError
Returns an address by input index a value that matches the list index of the provided lookup value not necessarily the result .
6,004
def _igamc ( a , x ) : ax = math . exp ( a * math . log ( x ) - x - math . lgamma ( a ) ) y = 1.0 - a z = x + y + 1.0 c = 0.0 pkm2 = 1.0 qkm2 = x pkm1 = x + 1.0 qkm1 = z * x ans = pkm1 / qkm1 while True : c += 1.0 y += 1.0 z += 2.0 yc = y * c pk = pkm1 * z - pkm2 * yc qk = qkm1 * z - qkm2 * yc if qk != 0 : r = pk / qk ...
Complemented incomplete Gamma integral .
6,005
def main ( ) : dem = '../tests/data/Jamaica_dem.tif' num_proc = 2 wp = '../tests/data/tmp_results/wtsd_delineation' TauDEMWorkflow . watershed_delineation ( num_proc , dem , workingdir = wp )
The simplest usage of watershed delineation based on TauDEM .
6,006
def _get_line ( self , search_string , search_file , return_string = True , case_sens = True ) : if os . path . isfile ( search_file ) : if type ( search_string ) == type ( '' ) : search_string = [ search_string ] if not case_sens : search_string = [ i . lower ( ) for i in search_string ] with open ( search_file ) as f...
Return the first line containing a set of strings in a file .
6,007
def get_cutoff_energy ( self ) : return Value ( scalars = [ Scalar ( value = self . settings [ "kinetic-energy cutoff" ] ) ] , units = self . settings [ 'kinetic-energy cutoff units' ] )
Determine the cutoff energy from the output
6,008
def get_pp_name ( self ) : ppnames = [ ] natomtypes = int ( self . _get_line ( 'number of atomic types' , self . outputf ) . split ( ) [ 5 ] ) with open ( self . outputf ) as fp : for line in fp : if "PseudoPot. #" in line : ppnames . append ( Scalar ( value = next ( fp ) . split ( '/' ) [ - 1 ] . rstrip ( ) ) ) if len...
Determine the pseudopotential names from the output
6,009
def get_U_settings ( self ) : with open ( self . outputf ) as fp : for line in fp : if "LDA+U calculation" in line : U_param = { } U_param [ 'Type' ] = line . split ( ) [ 0 ] U_param [ 'Values' ] = { } for nl in range ( 15 ) : line2 = next ( fp ) . split ( ) if len ( line2 ) > 1 and line2 [ 0 ] == "atomic" : pass elif ...
Determine the DFT + U type and parameters from the output
6,010
def get_vdW_settings ( self ) : xc = self . get_xc_functional ( ) . scalars [ 0 ] . value if 'vdw' in xc . lower ( ) : return Value ( scalars = [ Scalar ( value = xc ) ] ) else : vdW_dict = { 'xdm' : 'Becke-Johnson XDM' , 'ts' : 'Tkatchenko-Scheffler' , 'ts-vdw' : 'Tkatchenko-Scheffler' , 'tkatchenko-scheffler' : 'Tkat...
Determine the vdW type if using vdW xc functional or correction scheme from the input otherwise
6,011
def get_stresses ( self ) : if "stress" not in self . settings : return None wrapped = [ [ Scalar ( value = x ) for x in y ] for y in self . settings [ "stress" ] ] return Property ( matrices = [ wrapped ] , units = self . settings [ "stress units" ] )
Determine the stress tensor from the output
6,012
def get_dos ( self ) : fildos = '' for f in self . _files : with open ( f , 'r' ) as fp : first_line = next ( fp ) if "E (eV)" in first_line and "Int dos(E)" in first_line : fildos = f ndoscol = len ( next ( fp ) . split ( ) ) - 2 fp . close ( ) break fp . close ( ) if not fildos : return None line = self . _get_line (...
Find the total DOS shifted by the Fermi energy
6,013
def get_band_gap ( self ) : dosdata = self . get_dos ( ) if type ( dosdata ) == type ( None ) : return None else : energy = dosdata . conditions . scalars dos = dosdata . scalars step_size = energy [ 1 ] . value - energy [ 0 ] . value not_found = True l = 0 bot = 10 ** 3 top = - 10 ** 3 while not_found and l < len ( do...
Compute the band gap from the DOS
6,014
def get_category_aliases_under ( parent_alias = None ) : return [ ch . alias for ch in get_cache ( ) . get_children_for ( parent_alias , only_with_aliases = True ) ]
Returns a list of category aliases under the given parent .
6,015
def get_category_lists ( init_kwargs = None , additional_parents_aliases = None , obj = None ) : init_kwargs = init_kwargs or { } additional_parents_aliases = additional_parents_aliases or [ ] parent_aliases = additional_parents_aliases if obj is not None : ctype = ContentType . objects . get_for_model ( obj ) cat_ids ...
Returns a list of CategoryList objects optionally associated with a given model instance .
6,016
def register_lists ( self , category_lists , lists_init_kwargs = None , editor_init_kwargs = None ) : lists_init_kwargs = lists_init_kwargs or { } editor_init_kwargs = editor_init_kwargs or { } for lst in category_lists : if isinstance ( lst , string_types ) : lst = self . list_cls ( lst , ** lists_init_kwargs ) elif n...
Registers CategoryList objects to handle their requests .
6,017
def action_remove ( cls , request , category_list ) : if not category_list . editor . allow_remove : raise SitecatsSecurityException ( '`action_remove()` is not supported by parent `%s`category.' % category_list . alias ) category_id = int ( request . POST . get ( 'category_id' , 0 ) ) if not category_id : raise Siteca...
Handles remove action from CategoryList editor .
6,018
def action_add ( cls , request , category_list ) : if not category_list . editor . allow_add : raise SitecatsSecurityException ( '`action_add()` is not supported by `%s` category.' % category_list . alias ) titles = request . POST . get ( 'category_title' , '' ) . strip ( ) if not titles : raise SitecatsSecurityExcepti...
Handles add action from CategoryList editor .
6,019
def shrink ( image , apikey ) : def _handle_response ( response ) : body = json . loads ( response . read ( ) ) if response . code == TinyPNGResponse . SUCCESS_CODE : body [ 'location' ] = response . headers . getheader ( "Location" ) try : body [ 'bytes' ] = urlopen ( body [ 'location' ] ) . read ( ) except : body [ '...
To shrink a PNG image post the data to the API service . The response is a JSON message . The initial request must be authorized with HTTP Basic authorization .
6,020
def download_and_install_dependencies ( ) : try : import requests except ImportError : raise ValueError ( "Python 3.6+ is required." ) dependencies = { "hmm_databases" : HMM_URL } if sys . platform . startswith ( "linux" ) or "bsd" in sys . platform : dependencies [ "prodigal" ] = "{}.linux" . format ( BASE_PRODIGAL ) ...
Setup URLS and download dependencies for Python 3 . 6 +
6,021
def get ( self ) : ret_list = [ ] if hasattr ( self , "font" ) : ret_list . append ( self . font ) if hasattr ( self , "size" ) : ret_list . append ( self . size ) if hasattr ( self , "text" ) : ret_list . append ( self . text ) return ret_list
method to fetch all contents as a list
6,022
def extract_by_prefix_surfix ( text , prefix , surfix , minlen = None , maxlen = None , include = False ) : if minlen is None : minlen = 0 if maxlen is None : maxlen = 2 ** 30 pattern = r % ( prefix , minlen , maxlen , surfix ) if include : return [ prefix + s + surfix for s in re . findall ( pattern , text ) ] else : ...
Extract the text in between a prefix and surfix . It use non - greedy match .
6,023
def extract_number ( text ) : result = list ( ) chunk = list ( ) valid_char = set ( ".1234567890" ) for char in text : if char in valid_char : chunk . append ( char ) else : result . append ( "" . join ( chunk ) ) chunk = list ( ) result . append ( "" . join ( chunk ) ) result_new = list ( ) for number in result : if "...
Extract digit character from text .
6,024
def extract_email ( text ) : result = list ( ) for tp in re . findall ( _regex_extract_email , text . lower ( ) ) : for email in tp : if re . match ( _regex_validate_email , email ) : result . append ( email ) return result
Extract email from text .
6,025
def sign ( self , headers : Mapping , method = None , path = None ) : required_headers = self . header_list message = generate_message ( required_headers , headers , method , path ) signature = encode_string ( self . _signer . sign ( message ) , 'base64' ) ret_headers = multidict . CIMultiDict ( headers ) ret_headers [...
Add Signature Authorization header to case - insensitive header dict .
6,026
async def verify ( self , headers : Mapping , method = None , path = None ) : if not 'authorization' in headers : return False auth_type , auth_params = parse_authorization_header ( headers [ 'authorization' ] ) if auth_type . lower ( ) != 'signature' : return False for param in ( 'algorithm' , 'keyId' , 'signature' ) ...
Parse Signature Authorization header and verify signature
6,027
def docpie ( self , argv = None ) : token = self . _prepare_token ( argv ) self . check_flag_and_handler ( token ) if token . error is not None : self . exception_handler ( token . error ) try : result , dashed = self . _match ( token ) except DocpieExit as e : self . exception_handler ( e ) value = result . get_value ...
match the argv for each usages return dict .
6,028
def clone_exception ( error , args ) : new_error = error . __class__ ( * args ) new_error . __dict__ = error . __dict__ return new_error
return a new cloned error
6,029
def to_dict ( self ) : config = { 'stdopt' : self . stdopt , 'attachopt' : self . attachopt , 'attachvalue' : self . attachvalue , 'auto2dashes' : self . auto2dashes , 'case_sensitive' : self . case_sensitive , 'namedoptions' : self . namedoptions , 'appearedonly' : self . appeared_only , 'optionsfirst' : self . option...
Convert Docpie into a JSONlizable dict .
6,030
def from_dict ( cls , dic ) : if '__version__' not in dic : raise ValueError ( 'Not support old docpie data' ) data_version = int ( dic [ '__version__' ] . replace ( '.' , '' ) ) this_version = int ( cls . _version . replace ( '.' , '' ) ) logger . debug ( 'this: %s, old: %s' , this_version , data_version ) if data_ver...
Convert dict generated by convert_2_dict into Docpie instance
6,031
def set_config ( self , ** config ) : reinit = False if 'stdopt' in config : stdopt = config . pop ( 'stdopt' ) reinit = ( stdopt != self . stdopt ) self . stdopt = stdopt if 'attachopt' in config : attachopt = config . pop ( 'attachopt' ) reinit = reinit or ( attachopt != self . attachopt ) self . attachopt = attachop...
Shadow all the current config .
6,032
def find_flag_alias ( self , flag ) : for each in self . opt_names : if flag in each : result = set ( each ) result . remove ( flag ) return result return None
Return alias set of a flag ; return None if flag is not defined in Options .
6,033
def set_auto_handler ( self , flag , handler ) : assert flag . startswith ( '-' ) and flag not in ( '-' , '--' ) alias = self . find_flag_alias ( flag ) or [ ] self . extra [ flag ] = handler for each in alias : self . extra [ each ] = handler
Set pre - auto - handler for a flag .
6,034
def preview ( self , stream = sys . stdout ) : write = stream . write write ( ( '[Quick preview of Docpie %s]' % self . _version ) . center ( 80 , '=' ) ) write ( '\n' ) write ( ' sections ' . center ( 80 , '-' ) ) write ( '\n' ) write ( self . usage_text ) write ( '\n' ) option_sections = self . option_sections if opt...
A quick preview of docpie . Print all the parsed object
6,035
def refresh_core ( self ) : self . log . info ( 'Sending out mass query for all attributes' ) for key in ATTR_CORE : self . query ( key )
Query device for all attributes that exist regardless of power state .
6,036
def poweron_refresh ( self ) : if self . _poweron_refresh_successful : return else : self . refresh_all ( ) self . _loop . call_later ( 2 , self . poweron_refresh )
Keep requesting all attributes until it works .
6,037
def refresh_all ( self ) : self . log . info ( 'refresh_all' ) for key in LOOKUP : self . query ( key )
Query device for all attributes that are known .
6,038
def connection_made ( self , transport ) : self . log . info ( 'Connection established to AVR' ) self . transport = transport limit_low , limit_high = self . transport . get_write_buffer_limits ( ) self . log . debug ( 'Write buffer limits %d to %d' , limit_low , limit_high ) self . command ( 'ECH1' ) self . refresh_co...
Called when asyncio . Protocol establishes the network connection .
6,039
def data_received ( self , data ) : self . buffer += data . decode ( ) self . log . debug ( 'Received %d bytes from AVR: %s' , len ( self . buffer ) , self . buffer ) self . _assemble_buffer ( )
Called when asyncio . Protocol detects received data from network .
6,040
def connection_lost ( self , exc ) : if exc is None : self . log . warning ( 'eof from receiver?' ) else : self . log . warning ( 'Lost connection to receiver: %s' , exc ) self . transport = None if self . _connection_lost_callback : self . _loop . call_soon ( self . _connection_lost_callback )
Called when asyncio . Protocol loses the network connection .
6,041
def _assemble_buffer ( self ) : self . transport . pause_reading ( ) for message in self . buffer . split ( ';' ) : if message != '' : self . log . debug ( 'assembled message ' + message ) self . _parse_message ( message ) self . buffer = "" self . transport . resume_reading ( ) return
Split up received data from device into individual commands .
6,042
def _populate_inputs ( self , total ) : total = total + 1 for input_number in range ( 1 , total ) : self . query ( 'ISN' + str ( input_number ) . zfill ( 2 ) )
Request the names for all active configured inputs on the device .
6,043
def formatted_command ( self , command ) : command = command command = command . encode ( ) self . log . debug ( '> %s' , command ) try : self . transport . write ( command ) time . sleep ( 0.01 ) except : self . log . warning ( 'No transport found, unable to send command' )
Issue a raw formatted command to the device .
6,044
def dump_rawdata ( self ) : if hasattr ( self , 'transport' ) : attrs = vars ( self . transport ) return ', ' . join ( "%s: %s" % item for item in attrs . items ( ) )
Return contents of transport object for debugging forensics .
6,045
def add_upsert ( self , value , criteria ) : value = value . strip ( ) v = value . lower ( ) self . lower_val_to_val [ v ] = value criteria_array = self . upserts . get ( v ) if criteria_array is None : criteria_array = [ ] self . upserts_size [ v ] = 31 + len ( value ) criteria_array . append ( criteria . to_dict ( ) ...
Add a tag or populator to the batch by value and criteria
6,046
def add_delete ( self , value ) : value = value . strip ( ) v = value . lower ( ) self . lower_val_to_val [ v ] = value if len ( v ) == 0 : raise ValueError ( "Invalid value for delete. Value is empty." ) self . deletes . add ( v )
Delete a tag or populator by value - these are processed before upserts
6,047
def parts ( self ) : parts = [ ] upserts = dict ( ) deletes = [ ] max_upload_size = 700000 base_part_size = 118 if not self . replace_all : base_part_size += 1 part_size = base_part_size for value in self . upserts : if ( part_size + self . upserts_size [ value ] ) >= max_upload_size : parts . append ( BatchPart ( self...
Return an array of batch parts to submit
6,048
def build_json ( self , guid ) : upserts = [ ] for value in self . upserts : upserts . append ( { "value" : value , "criteria" : self . upserts [ value ] } ) return json . dumps ( { 'replace_all' : self . replace_all , 'guid' : guid , 'complete' : self . complete , 'upserts' : upserts , 'deletes' : self . deletes } )
Build JSON with the input guid
6,049
def _ensure_field ( self , key ) : if self . _has_field : self . _size += 2 self . _has_field = True self . _size += len ( key ) + 4
Ensure a non - array field
6,050
def _ensure_array ( self , key , value ) : if key not in self . _json_dict : self . _json_dict [ key ] = [ ] self . _size += 2 self . _ensure_field ( key ) if len ( self . _json_dict [ key ] ) > 0 : self . _size += 2 if isinstance ( value , str ) : self . _size += 2 self . _size += len ( str ( value ) ) self . _json_di...
Ensure an array field
6,051
def add_tcp_flag ( self , tcp_flag ) : if tcp_flag not in [ 1 , 2 , 4 , 8 , 16 , 32 , 64 , 128 ] : raise ValueError ( "Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]" ) prev_size = 0 if self . _json_dict . get ( 'tcp_flags' ) is None : self . _json_dict [ 'tcp_flags' ] = 0 else : prev_size = len ( str ( self . _...
Add a single TCP flag - will be OR d into the existing bitmask
6,052
def set_tcp_flags ( self , tcp_flags ) : if tcp_flags < 0 or tcp_flags > 255 : raise ValueError ( "Invalid tcp_flags. Valid: 0-255." ) prev_size = 0 if self . _json_dict . get ( 'tcp_flags' ) is not None : prev_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 self . _json_dict [ 'tcp_f...
Set the complete tcp flag bitmask
6,053
def _submit_batch ( self , url , batch ) : batch_parts = batch . parts ( ) guid = "" headers = { 'User-Agent' : 'kentik-python-api/0.1' , 'Content-Type' : 'application/json' , 'X-CH-Auth-Email' : self . api_email , 'X-CH-Auth-API-Token' : self . api_token } last_part = dict ( ) for batch_part in batch_parts : resp = re...
Submit the batch returning the JSON - > dict from the last HTTP response
6,054
def submit_populator_batch ( self , column_name , batch ) : if not set ( column_name ) . issubset ( _allowedCustomDimensionChars ) : raise ValueError ( 'Invalid custom dimension name "%s": must only contain letters, digits, and underscores' % column_name ) if len ( column_name ) < 3 or len ( column_name ) > 20 : raise ...
Submit a populator batch
6,055
def submit_tag_batch ( self , batch ) : url = '%s/api/v5/batch/tags' % self . base_url self . _submit_batch ( url , batch )
Submit a tag batch
6,056
def fetch_batch_status ( self , guid ) : url = '%s/api/v5/batch/%s/status' % ( self . base_url , guid ) headers = { 'User-Agent' : 'kentik-python-api/0.1' , 'Content-Type' : 'application/json' , 'X-CH-Auth-Email' : self . api_email , 'X-CH-Auth-API-Token' : self . api_token } resp = requests . get ( url , headers = hea...
Fetch the status of a batch given the guid
6,057
def predict_files ( self , files ) : imgs = [ 0 ] * len ( files ) for i , file in enumerate ( files ) : img = cv2 . imread ( file ) . astype ( 'float64' ) img = cv2 . resize ( img , ( 224 , 224 ) ) img = preprocess_input ( img ) if img is None : print ( 'failed to open: {}, continuing...' . format ( file ) ) imgs [ i ]...
reads files off disk resizes them and then predicts them files should be a list or itrerable of file paths that lead to images they are then loaded with opencv resized and predicted
6,058
def rename_genome ( genome_in , genome_out = None ) : if genome_out is None : genome_out = "{}_renamed.fa" . format ( genome_in . split ( "." ) [ 0 ] ) with open ( genome_out , "w" ) as output_handle : for record in SeqIO . parse ( genome_in , "fasta" ) : new_record_id = record . id . replace ( " " , "_" ) new_record_i...
Rename genome and slugify headers
6,059
def filter_genome ( genome_in , threshold = 500 , list_records = None ) : if list_records is None : def truth ( * args ) : del args return True is_a_record_to_keep = truth else : try : with open ( list_records ) as records_handle : records_to_keep = records_handle . readlines ( ) except OSError : if not hasattr ( list_...
Filter fasta file according to various parameters .
6,060
def rename_proteins ( prot_in , prot_out = None , chunk_size = DEFAULT_CHUNK_SIZE ) : if prot_out is None : prot_out = "{}_renamed.fa" . format ( prot_in . split ( "." ) [ 0 ] ) with open ( prot_out , "w" ) as prot_out_handle : for record in SeqIO . parse ( prot_in , "fasta" ) : header = record . description name , pos...
Rename prodigal output files
6,061
def write_records ( records , output_file , split = False ) : if split : for record in records : with open ( "{}{}.fa" . format ( output_file , record . id ) , "w" ) as record_handle : SeqIO . write ( record , record_handle , "fasta" ) else : SeqIO . write ( records , output_file , "fasta" )
Write FASTA records
6,062
def add_sample ( self , ** data ) : missing_dimensions = set ( data ) . difference ( self . dimensions ) if missing_dimensions : raise KeyError ( 'Dimensions not defined in this series: %s' % ', ' . join ( missing_dimensions ) ) for dim in self . dimensions : getattr ( self , dim ) . append ( data . get ( dim ) )
Add a sample to this series .
6,063
def samples ( self ) : names = self . series . dimensions for values in zip ( * ( getattr ( self . series , name ) for name in names ) ) : yield dict ( zip ( names , values ) )
Yield the samples as dicts keyed by dimensions .
6,064
def write_binary ( filename , data ) : dir = os . path . dirname ( filename ) if not os . path . exists ( dir ) : os . makedirs ( dir ) with open ( filename , 'wb' ) as f : f . write ( data )
Create path to filename and saves binary data
6,065
def files_with_exts ( root = '.' , suffix = '' ) : return ( os . path . join ( rootdir , filename ) for rootdir , dirnames , filenames in os . walk ( root ) for filename in filenames if filename . endswith ( suffix ) )
Returns generator that contains filenames from root directory and ends with suffix
6,066
def find_apikey ( ) : env_keys = [ 'TINYPNG_APIKEY' , 'TINYPNG_API_KEY' ] paths = [ ] paths . append ( os . path . join ( os . path . abspath ( "." ) , "tinypng.key" ) ) paths . append ( os . path . expanduser ( "~/.tinypng.key" ) ) for env_key in env_keys : if os . environ . get ( env_key ) : return os . environ . get...
Finds TinyPNG API key
6,067
def compare_packages ( rpm_str_a , rpm_str_b , arch_provided = True ) : logger . debug ( 'resolve_versions(%s, %s)' , rpm_str_a , rpm_str_b ) evr_a = parse_package ( rpm_str_a , arch_provided ) [ 'EVR' ] evr_b = parse_package ( rpm_str_b , arch_provided ) [ 'EVR' ] return labelCompare ( evr_a , evr_b )
Compare two RPM strings to determine which is newer
6,068
def compare_evrs ( evr_a , evr_b ) : a_epoch , a_ver , a_rel = evr_a b_epoch , b_ver , b_rel = evr_b if a_epoch != b_epoch : return a_newer if a_epoch > b_epoch else b_newer ver_comp = compare_versions ( a_ver , b_ver ) if ver_comp != a_eq_b : return ver_comp rel_comp = compare_versions ( a_rel , b_rel ) return rel_com...
Compare two EVR tuples to determine which is newer
6,069
def compare_versions ( version_a , version_b ) : logger . debug ( 'compare_versions(%s, %s)' , version_a , version_b ) if version_a == version_b : return a_eq_b try : chars_a , chars_b = list ( version_a ) , list ( version_b ) except TypeError : raise RpmError ( 'Could not compare {0} to ' '{1}' . format ( version_a , ...
Compare two RPM version strings
6,070
def package ( package_string , arch_included = True ) : logger . debug ( 'package(%s, %s)' , package_string , arch_included ) pkg_info = parse_package ( package_string , arch_included ) pkg = Package ( pkg_info [ 'name' ] , pkg_info [ 'EVR' ] [ 0 ] , pkg_info [ 'EVR' ] [ 1 ] , pkg_info [ 'EVR' ] [ 2 ] , pkg_info [ 'arc...
Parse an RPM version string
6,071
def parse_package ( package_string , arch_included = True ) : logger . debug ( 'parse_package(%s, %s)' , package_string , arch_included ) default_epoch = '0' arch = None if arch_included : char_list = list ( package_string ) arch = _pop_arch ( char_list ) package_string = '' . join ( char_list ) logger . debug ( 'updat...
Parse an RPM version string to get name version and arch
6,072
def _pop_arch ( char_list ) : logger . debug ( '_pop_arch(%s)' , char_list ) arch_list = [ ] char = char_list . pop ( ) while char != '.' : arch_list . insert ( 0 , char ) try : char = char_list . pop ( ) except IndexError : raise RpmError ( 'Could not parse an architecture. Did you mean to ' 'set the arch_included fla...
Pop the architecture from a version string and return it
6,073
def _check_leading ( * char_lists ) : logger . debug ( '_check_leading(%s)' , char_lists ) for char_list in char_lists : while ( len ( char_list ) != 0 and not char_list [ 0 ] . isalnum ( ) and not char_list [ 0 ] == '~' ) : char_list . pop ( 0 ) logger . debug ( 'updated list: %s' , char_list )
Remove any non - alphanumeric or non - ~ leading characters
6,074
def _trim_zeros ( * char_lists ) : logger . debug ( '_trim_zeros(%s)' , char_lists ) for char_list in char_lists : while len ( char_list ) != 0 and char_list [ 0 ] == '0' : char_list . pop ( 0 ) logger . debug ( 'updated block: %s' , char_list )
Trim any zeros from provided character lists
6,075
def _pop_digits ( char_list ) : logger . debug ( '_pop_digits(%s)' , char_list ) digits = [ ] while len ( char_list ) != 0 and char_list [ 0 ] . isdigit ( ) : digits . append ( char_list . pop ( 0 ) ) logger . debug ( 'got digits: %s' , digits ) logger . debug ( 'updated char list: %s' , char_list ) return digits
Pop consecutive digits from the front of list and return them
6,076
def _pop_letters ( char_list ) : logger . debug ( '_pop_letters(%s)' , char_list ) letters = [ ] while len ( char_list ) != 0 and char_list [ 0 ] . isalpha ( ) : letters . append ( char_list . pop ( 0 ) ) logger . debug ( 'got letters: %s' , letters ) logger . debug ( 'updated char list: %s' , char_list ) return letter...
Pop consecutive letters from the front of a list and return them
6,077
def _compare_blocks ( block_a , block_b ) : logger . debug ( '_compare_blocks(%s, %s)' , block_a , block_b ) if block_a [ 0 ] . isdigit ( ) : _trim_zeros ( block_a , block_b ) if len ( block_a ) != len ( block_b ) : logger . debug ( 'block lengths are not equal' ) return a_newer if len ( block_a ) > len ( block_b ) els...
Compare two blocks of characters
6,078
def _get_block_result ( chars_a , chars_b ) : logger . debug ( '_get_block_result(%s, %s)' , chars_a , chars_b ) first_is_digit = chars_a [ 0 ] . isdigit ( ) pop_func = _pop_digits if first_is_digit else _pop_letters return_if_no_b = a_newer if first_is_digit else b_newer block_a , block_b = pop_func ( chars_a ) , pop_...
Get the first block from two character lists and compare
6,079
def list_ ( * , cursor : str = None , exclude_archived : bool = None , exclude_members : bool = None , limit : int = None ) -> snug . Query [ Page [ t . List [ Channel ] ] ] : kwargs = { 'exclude_archived' : exclude_archived , 'exclude_members' : exclude_members , 'limit' : limit } response = yield { 'cursor' : cursor ...
list all channels
6,080
def create ( name : str , * , validate : bool = None ) -> snug . Query [ Channel ] : return { 'name' : name , 'validate' : validate }
create a new channel
6,081
def tube ( self , name ) : if name in self . _tubes : return self . _tubes [ name ] assert name , 'Tube name must be specified' t = self . _tube_cls ( self , name ) self . _tubes [ name ] = t return t
Returns tube by its name
6,082
def device_measurement ( device , ts = None , part = None , result = None , code = None , ** kwargs ) : if ts is None : ts = local_now ( ) payload = MeasurementPayload ( device = device , part = part ) m = Measurement ( ts , result , code , list ( kwargs ) ) payload . measurements . append ( m ) m . add_sample ( ts , *...
Returns a JSON MeasurementPayload ready to be send through a transport .
6,083
def add_sample ( self , ts , ** kwargs ) : if not self . series . offsets : self . ts = ts offset = 0 else : dt = ts - self . ts offset = ( dt . days * 24 * 60 * 60 * 1000 + dt . seconds * 1000 + dt . microseconds // 1000 ) self . series . add_sample ( offset , ** kwargs )
Add a sample to this measurements .
6,084
def samples ( self ) : names = self . series . dimensions for n , offset in enumerate ( self . series . offsets ) : dt = datetime . timedelta ( microseconds = offset * 1000 ) d = { "ts" : self . ts + dt } for name in names : d [ name ] = getattr ( self . series , name ) [ n ] yield d
Yield samples as dictionaries keyed by dimensions .
6,085
def determine_format ( data , extension = None ) : if isinstance ( data , ( os . PathLike , str ) ) : data = open ( data , 'rb' ) data_reader = DataReader ( data ) data_reader . seek ( 0 , os . SEEK_SET ) d = data_reader . read ( 4 ) if d . startswith ( ( b'ID3' , b'\xFF\xFB' ) ) : if extension is None or extension . e...
Determine the format of an audio file .
6,086
def load ( f ) : if isinstance ( f , ( os . PathLike , str ) ) : fileobj = open ( f , 'rb' ) else : try : f . read ( 0 ) except AttributeError : raise ValueError ( "Not a valid file-like object." ) except Exception : raise ValueError ( "Can't read from file-like object." ) fileobj = f parser_cls = determine_format ( fi...
Load audio metadata from filepath or file - like object .
6,087
def loads ( b ) : parser_cls = determine_format ( b ) if parser_cls is None : raise UnsupportedFormat ( "Supported format signature not found." ) return parser_cls . load ( b )
Load audio metadata from a bytes - like object .
6,088
def Find ( self , node_type , item_type ) : if node_type == OtherNodes . DirectionNode : child = self . GetChild ( len ( self . children ) - 1 ) while child is not None and not isinstance ( child . GetItem ( ) , item_type ) : if child . GetItem ( ) . __class__ . __name__ == item_type . __name__ : return True child = ch...
method for finding specific types of notation from nodes . will currently return the first one it encounters because this method s only really intended for some types of notation for which the exact value doesn t really matter .
6,089
def count_lines ( abspath ) : with open ( abspath , "rb" ) as f : i = 0 for line in f : i += 1 pass return i
Count how many lines in a pure text file .
6,090
def lines_stats ( dir_path , file_filter ) : n_files = 0 n_lines = 0 for p in Path ( dir_path ) . select_file ( file_filter ) : n_files += 1 n_lines += count_lines ( p . abspath ) return n_files , n_lines
Lines count of selected files under a directory .
6,091
def parse_content ( self , text ) : match = re . search ( self . usage_re_str . format ( self . usage_name ) , text , flags = ( re . DOTALL if self . case_sensitive else ( re . DOTALL | re . IGNORECASE ) ) ) if match is None : return dic = match . groupdict ( ) logger . debug ( dic ) self . raw_content = dic [ 'raw' ] ...
get Usage section and set to raw_content formal_content of no title and empty - line version
6,092
def spaceless_pdf_plot_maker ( array , filename , vmax = None , dpi = DEFAULT_DPI ) : if vmax is None : vmax = np . percentile ( array , DEFAULT_SATURATION_THRESHOLD ) plt . gca ( ) . set_axis_off ( ) plt . subplots_adjust ( top = 1 , bottom = 0 , right = 1 , left = 0 , hspace = 0 , wspace = 0 ) plt . margins ( 0 , 0 )...
Draw a pretty plot from an array
6,093
def draw_sparse_matrix ( array_filename , output_image , vmax = DEFAULT_SATURATION_THRESHOLD , max_size_matrix = DEFAULT_MAX_SIZE_MATRIX , ) : matrix = np . loadtxt ( array_filename , dtype = np . int32 , skiprows = 1 ) try : row , col , data = matrix . T except ValueError : row , col , data = matrix size = max ( np . ...
Draw a quick preview of a sparse matrix with automated binning and normalization .
6,094
def nth ( iterable , n , default = None ) : return next ( itertools . islice ( iterable , n , None ) , default )
Returns the nth item or a default value .
6,095
def pull ( iterable , n ) : fifo = collections . deque ( maxlen = n ) for i in iterable : fifo . append ( i ) return list ( fifo )
Return last n items of the iterable as a list .
6,096
def running_window ( iterable , size ) : if size > len ( iterable ) : raise ValueError ( "size can not be greater than length of iterable." ) fifo = collections . deque ( maxlen = size ) for i in iterable : fifo . append ( i ) if len ( fifo ) == size : yield list ( fifo )
Generate n - size running window .
6,097
def cycle_running_window ( iterable , size ) : if size > len ( iterable ) : raise ValueError ( "size can not be greater than length of iterable." ) fifo = collections . deque ( maxlen = size ) cycle = itertools . cycle ( iterable ) counter = itertools . count ( 1 ) length = len ( iterable ) for i in cycle : fifo . appe...
Generate n - size cycle running window .
6,098
def shift_and_trim ( array , dist ) : length = len ( array ) if length == 0 : return [ ] if ( dist >= length ) or ( dist <= - length ) : return [ ] elif dist < 0 : return array [ - dist : ] elif dist > 0 : return array [ : - dist ] else : return list ( array )
Shift and trim unneeded item .
6,099
def shift_and_pad ( array , dist , pad = "__null__" ) : length = len ( array ) if length == 0 : return [ ] if pad == "__null__" : if dist > 0 : padding_item = array [ 0 ] elif dist < 0 : padding_item = array [ - 1 ] else : padding_item = None else : padding_item = pad if abs ( dist ) >= length : return length * [ paddi...
Shift and pad with item .