idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
8,900
def spaced_indexes ( len_ , n , trunc = False ) : if n is None : return np . arange ( len_ ) all_indexes = np . arange ( len_ ) if trunc : n = min ( len_ , n ) if n == 0 : return np . empty ( 0 ) stride = len_ // n try : indexes = all_indexes [ 0 : - 1 : stride ] except ValueError : raise ValueError ( 'cannot slice list of len_=%r into n=%r parts' % ( len_ , n ) ) return indexes
Returns n evenly spaced indexes . Returns as many as possible if trunc is true
125
15
8,901
def random_sample ( list_ , nSample , strict = False , rng = None , seed = None ) : rng = ensure_rng ( seed if rng is None else rng ) if isinstance ( list_ , list ) : list2_ = list_ [ : ] else : list2_ = np . copy ( list_ ) if len ( list2_ ) == 0 and not strict : return list2_ rng . shuffle ( list2_ ) if nSample is None and strict is False : return list2_ if not strict : nSample = min ( max ( 0 , nSample ) , len ( list2_ ) ) sample_list = list2_ [ : nSample ] return sample_list
Grabs data randomly
155
4
8,902
def deterministic_sample ( list_ , nSample , seed = 0 , rng = None , strict = False ) : rng = ensure_rng ( seed if rng is None else rng ) sample_list = random_sample ( list_ , nSample , strict = strict , rng = rng ) return sample_list
Grabs data randomly but in a repeatable way
72
10
8,903
def spaced_items ( list_ , n , * * kwargs ) : indexes = spaced_indexes ( len ( list_ ) , n , * * kwargs ) items = list_ [ indexes ] return items
Returns n evenly spaced items
47
5
8,904
def get_servers ( self , topic ) : return ( nsq . node . ServerNode ( sh ) for sh in self . __server_hosts )
We re assuming that the static list of servers can serve the given topic since we have to preexisting knowledge about them .
34
25
8,905
def tokenizer ( text ) : for entry in text . split ( '$$$$\n' ) : if entry . rstrip ( ) : lines_stream = deque ( entry . split ( '\n' ) ) else : continue # yield from _molfile(stream=lines_stream) for token in _molfile ( stream = lines_stream ) : yield token if len ( lines_stream ) : # yield from _sdfile(stream=lines_stream) for token in _sdfile ( stream = lines_stream ) : yield token yield EndOfFile ( )
A lexical analyzer for the CTfile formatted files .
125
12
8,906
def _ctab_atom_bond_block ( number_of_lines , block_type , stream ) : for _ in range ( int ( number_of_lines ) ) : line = stream . popleft ( ) yield block_type ( * line . split ( ) )
Process atom and bond blocks of Ctab .
61
9
8,907
def _ctab_property_block ( stream ) : line = stream . popleft ( ) while line != 'M END' : name = line . split ( ) [ 1 ] yield CtabPropertiesBlockLine ( name , line ) line = stream . popleft ( )
Process properties block of Ctab .
60
7
8,908
def set_features ( self ) : self . scores = { } for t_or_d , feats in zip ( [ 'target' , 'decoy' ] , [ self . target , self . decoy ] ) : self . scores [ t_or_d ] = { } self . scores [ t_or_d ] [ 'scores' ] = self . score_get_fun ( feats , self . featuretype , self . prepare_percolator_output ) self . scores [ t_or_d ] [ 'fn' ] = '{}_qvality_input.txt' . format ( t_or_d ) writers . write_qvality_input ( self . scores [ t_or_d ] [ 'scores' ] , self . scores [ t_or_d ] [ 'fn' ] )
Creates scorefiles for qvality s target and decoy distributions
181
14
8,909
def write ( self ) : outfn = self . create_outfilepath ( self . fn , self . outsuffix ) command = [ 'qvality' ] command . extend ( self . qvalityoptions ) command . extend ( [ self . scores [ 'target' ] [ 'fn' ] , self . scores [ 'decoy' ] [ 'fn' ] , '-o' , outfn ] ) subprocess . call ( command )
This actually runs the qvality program from PATH .
96
11
8,910
def run ( self ) : if sys . platform == "linux" or sys . platform == "linux2" : libname = 'libfaketime.so.1' libnamemt = 'libfaketimeMT.so.1' elif sys . platform == "darwin" : libname = 'libfaketime.1.dylib' libnamemt = 'libfaketimeMT.1.dylib' else : sys . stderr . write ( "WARNING : libfaketime does not support platform {}\n" . format ( sys . platform ) ) sys . stderr . flush ( ) return faketime_lib = join ( 'faketime' , libname ) faketime_lib_mt = join ( 'faketime' , libnamemt ) self . my_outputs = [ ] setup_py_directory = dirname ( realpath ( __file__ ) ) faketime_directory = join ( setup_py_directory , "faketime" ) os . chdir ( faketime_directory ) if sys . platform == "linux" or sys . platform == "linux2" : subprocess . check_call ( [ 'make' , ] ) else : os . chdir ( setup_py_directory ) if "10.12" in subprocess . check_output ( [ "sw_vers" , "-productVersion" ] ) . decode ( 'utf8' ) : self . copy_file ( join ( 'faketime' , "libfaketime.c.sierra" ) , join ( 'faketime' , "libfaketime.c" ) ) os . chdir ( faketime_directory ) subprocess . check_call ( [ 'make' , '-f' , 'Makefile.OSX' ] ) os . chdir ( setup_py_directory ) dest = join ( self . install_purelib , dirname ( faketime_lib ) ) dest_mt = join ( self . install_purelib , dirname ( faketime_lib_mt ) ) try : os . makedirs ( dest ) except OSError as e : if e . errno != 17 : raise self . copy_file ( faketime_lib , dest ) if exists ( faketime_lib_mt ) : self . copy_file ( faketime_lib_mt , dest_mt ) self . my_outputs . append ( join ( dest , libname ) ) install . run ( self )
Compile libfaketime .
545
7
8,911
def _generate_serial2 ( func , args_gen , kw_gen = None , ntasks = None , progkw = { } , verbose = None , nTasks = None ) : if verbose is None : verbose = 2 if ntasks is None : ntasks = nTasks if ntasks is None : ntasks = len ( args_gen ) if verbose > 0 : print ( '[ut._generate_serial2] executing %d %s tasks in serial' % ( ntasks , get_funcname ( func ) ) ) # kw_gen can be a single dict applied to everything if kw_gen is None : kw_gen = [ { } ] * ntasks if isinstance ( kw_gen , dict ) : kw_gen = [ kw_gen ] * ntasks # Get iterator with or without progress if verbose > 1 : lbl = '(sergen) %s: ' % ( get_funcname ( func ) , ) progkw_ = dict ( freq = None , bs = True , adjust = False , freq_est = 'between' ) progkw_ . update ( progkw ) args_gen = util_progress . ProgIter ( args_gen , length = ntasks , lbl = lbl , * * progkw_ ) for args , kw in zip ( args_gen , kw_gen ) : result = func ( * args , * * kw ) yield result
internal serial generator
327
3
8,912
def buffered_generator ( source_gen , buffer_size = 2 , use_multiprocessing = False ) : if buffer_size < 2 : raise RuntimeError ( "Minimal buffer_ size is 2!" ) if use_multiprocessing : print ( 'WARNING seems to freeze if passed in a generator' ) #assert False, 'dont use this buffered multiprocessing' if False : pool = multiprocessing . Pool ( processes = get_default_numprocs ( ) , initializer = init_worker , maxtasksperchild = None ) Process = pool . Process else : Process = multiprocessing . Process _Queue = multiprocessing . Queue target = _buffered_generation_process else : _Queue = queue . Queue Process = KillableThread target = _buffered_generation_thread # the effective buffer_ size is one less, because the generation process # will generate one extra element and block until there is room in the # buffer_. buffer_ = _Queue ( maxsize = buffer_size - 1 ) # previously None was used as a sentinal, which fails when source_gen # genrates None need to make object that it will not be generated by the # process. A reasonable hack is to use the StopIteration exception instead sentinal = StopIteration process = Process ( target = target , args = ( iter ( source_gen ) , buffer_ , sentinal ) ) #if not use_multiprocessing: process . daemon = True process . start ( ) while True : #output = buffer_.get(timeout=1.0) output = buffer_ . get ( ) if output is sentinal : raise StopIteration yield output
r Generator that runs a slow source generator in a separate process .
366
13
8,913
def sort_window_ids ( winid_list , order = 'mru' ) : import utool as ut winid_order = XCtrl . sorted_window_ids ( order ) sorted_win_ids = ut . isect ( winid_order , winid_list ) return sorted_win_ids
Orders window ids by most recently used
68
9
8,914
def focus_window ( winhandle , path = None , name = None , sleeptime = .01 ) : import utool as ut import time print ( 'focus: ' + winhandle ) args = [ 'wmctrl' , '-xa' , winhandle ] ut . cmd ( * args , verbose = False , quiet = True ) time . sleep ( sleeptime )
sudo apt - get install xautomation apt - get install autokey - gtk
81
19
8,915
def setup_chmod ( setup_fpath , setup_dir , chmod_patterns ) : #st_mode = os.stat(setup_fpath).st_mode st_mode = 33277 for pattern in chmod_patterns : for fpath in util_path . glob ( setup_dir , pattern , recursive = True ) : print ( '[setup] chmod fpath=%r' % fpath ) os . chmod ( fpath , st_mode )
Gives files matching pattern the same chmod flags as setup . py
104
14
8,916
def __infer_setup_kwargs ( module , kwargs ) : # Get project name from the module #if 'name' not in kwargs: # kwargs['name'] = module.__name__ #else: # raise AssertionError('must specify module name!') name = kwargs [ 'name' ] # Our projects depend on utool #if kwargs['name'] != 'utool': # install_requires = kwargs.get('install_requires', []) # if 'utool' not in install_requires: # install_requires.append('utool') # kwargs['install_requires'] = install_requires packages = kwargs . get ( 'packages' , [ ] ) if name not in packages : packages . append ( name ) kwargs [ 'packages' ] = packages if 'version' not in kwargs : version = parse_package_for_version ( name ) kwargs [ 'version' ] = version # Parse version #if 'version' not in kwargs: # if module is None: # version_errmsg = 'You must include a version (preferably one that matches the __version__ variable in your modules init file' # raise AssertionError(version_errmsg) # else: # Parse license if 'license' not in kwargs : try : kwargs [ 'license' ] = read_license ( 'LICENSE' ) except IOError : pass # Parse readme if 'long_description' not in kwargs : kwargs [ 'long_description' ] = parse_readme ( )
Implicitly build kwargs based on standard info
355
11
8,917
def _replaced ( __values , * * __replacements ) : return tuple ( o for o in ( __replacements . get ( name , name ) for name in __values ) if o )
Replace elements in iterable with values from an alias dict suppressing empty values .
42
16
8,918
def _get_admin_route_name ( model_or_instance ) : model = model_or_instance if isinstance ( model_or_instance , type ) else type ( model_or_instance ) return 'admin:{meta.app_label}_{meta.model_name}' . format ( meta = model . _meta )
Get the base name of the admin route for a model or model instance .
73
15
8,919
def _build_admin_filter_url ( model , filters ) : url = reverse ( _get_admin_route_name ( model ) + '_changelist' ) parts = urlsplit ( url ) query = parse_qs ( parts . query ) query . update ( filters ) parts_with_filter = parts . _replace ( query = urlencode ( query ) ) return urlunsplit ( parts_with_filter )
Build a filter URL to an admin changelist of all objects with similar field values .
95
17
8,920
def _make_admin_link_to_similar ( primary_field , * fields , name = None ) : fields = ( primary_field , ) + fields url_template = '<a href="{url}">{name_or_value}</a>' def field_link ( self , obj ) : value = getattr ( obj , primary_field , None ) name_or_value = name or value filters = { field_name : getattr ( obj , field_name ) for field_name in fields } url = _build_admin_filter_url ( obj , filters ) return format_html ( url_template , * * locals ( ) ) if url else value field_link . allow_tags = True field_link . short_description = primary_field . replace ( '_' , ' ' ) . capitalize ( ) field_link . admin_order_field = primary_field field_link . __name__ = field_link . __name__ . replace ( 'field' , primary_field ) return field_link
Create a function that links to a changelist of all objects with similar field values .
224
17
8,921
def _retry_failed_log ( failed_trigger_log ) : model = type ( failed_trigger_log ) try : failed_trigger_log = ( model . objects . select_for_update ( ) . get ( id = failed_trigger_log . id , state = TRIGGER_LOG_STATE [ 'FAILED' ] , ) ) except model . DoesNotExist : return False failed_trigger_log . redo ( ) return True
Try to re - apply a failed trigger log action .
100
11
8,922
def ignore_failed_logs_action ( self , request , queryset ) : count = _ignore_failed_logs ( queryset ) self . message_user ( request , _ ( '{count} failed trigger logs marked as ignored.' ) . format ( count = count ) , )
Set FAILED trigger logs in queryset to IGNORED .
64
14
8,923
def retry_failed_logs_action ( self , request , queryset ) : count = 0 for trigger_log in queryset : retried = _retry_failed_log ( trigger_log ) if retried : count += 1 self . message_user ( request , _ ( '{count} failed trigger logs retried.' ) . format ( count = count ) , )
Try to re - apply FAILED trigger log actions in the queryset .
84
17
8,924
def create_psm_lookup ( fn , fastafn , mapfn , header , pgdb , unroll = False , specfncol = None , decoy = False , fastadelim = None , genefield = None ) : proteins = store_proteins_descriptions ( pgdb , fastafn , fn , mapfn , header , decoy , fastadelim , genefield ) mzmlmap = pgdb . get_mzmlfile_map ( ) sequences = { } for psm in tsvreader . generate_tsv_psms ( fn , header ) : seq = tsvreader . get_psm_sequence ( psm , unroll ) sequences [ seq ] = 1 pgdb . store_pepseqs ( ( ( seq , ) for seq in sequences ) ) pepseqmap = pgdb . get_peptide_seq_map ( ) psms = [ ] for row , psm in enumerate ( tsvreader . generate_tsv_psms ( fn , header ) ) : specfn , psm_id , scan , seq , score = tsvreader . get_psm ( psm , unroll , specfncol ) if len ( psms ) % DB_STORE_CHUNK == 0 : pgdb . store_psms ( psms ) psms = [ ] psms . append ( { 'rownr' : row , 'psm_id' : psm_id , 'seq' : pepseqmap [ seq ] , 'score' : score , 'specfn' : mzmlmap [ specfn ] , 'scannr' : scan , 'spec_id' : '{}_{}' . format ( mzmlmap [ specfn ] , scan ) , } ) pgdb . store_psms ( psms ) pgdb . index_psms ( ) store_psm_protein_relations ( fn , header , pgdb , proteins )
Reads PSMs from file stores them to a database backend in chunked PSMs .
429
18
8,925
def store_psm_protein_relations ( fn , header , pgdb , proteins ) : # TODO do we need an OrderedDict or is regular dict enough? # Sorting for psm_id useful? allpsms = OrderedDict ( ) last_id , psmids_to_store = None , set ( ) store_soon = False for psm in tsvreader . generate_tsv_psms ( fn , header ) : psm_id , prots = tsvreader . get_pepproteins ( psm ) prots = [ x for x in prots if x in proteins ] try : # In case the PSMs are presented unrolled allpsms [ psm_id ] . extend ( prots ) except KeyError : allpsms [ psm_id ] = prots if len ( psmids_to_store ) % DB_STORE_CHUNK == 0 : store_soon = True if store_soon and last_id != psm_id : pgdb . store_peptides_proteins ( allpsms , psmids_to_store ) store_soon = False psmids_to_store = set ( ) psmids_to_store . add ( psm_id ) last_id = psm_id if len ( psmids_to_store ) > 0 : pgdb . store_peptides_proteins ( allpsms , psmids_to_store ) pgdb . index_protein_peptides ( ) return allpsms
Reads PSMs from file extracts their proteins and peptides and passes them to a database backend in chunks .
341
22
8,926
def on_exception_report_input ( func_ = None , force = False , keys = None ) : def _closure_onexceptreport ( func ) : if not ONEX_REPORT_INPUT and not force : return func @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_onexceptreport ( * args , * * kwargs ) : try : #import utool #if utool.DEBUG: # print('[IN EXCPRPT] args=%r' % (args,)) # print('[IN EXCPRPT] kwargs=%r' % (kwargs,)) return func ( * args , * * kwargs ) except Exception as ex : from utool import util_str print ( 'ERROR occured! Reporting input to function' ) if keys is not None : from utool import util_inspect from utool import util_list from utool import util_dict argspec = util_inspect . get_func_argspec ( func ) in_kwargs_flags = [ key in kwargs for key in keys ] kwarg_keys = util_list . compress ( keys , in_kwargs_flags ) kwarg_vals = [ kwargs . get ( key ) for key in kwarg_keys ] flags = util_list . not_list ( in_kwargs_flags ) arg_keys = util_list . compress ( keys , flags ) arg_idxs = [ argspec . args . index ( key ) for key in arg_keys ] num_nodefault = len ( argspec . args ) - len ( argspec . defaults ) default_vals = ( ( [ None ] * ( num_nodefault ) ) + list ( argspec . defaults ) ) args_ = list ( args ) + default_vals [ len ( args ) + 1 : ] arg_vals = util_list . take ( args_ , arg_idxs ) requested_dict = dict ( util_list . flatten ( [ zip ( kwarg_keys , kwarg_vals ) , zip ( arg_keys , arg_vals ) ] ) ) print ( 'input dict = ' + util_str . repr4 ( util_dict . dict_subset ( requested_dict , keys ) ) ) # (print out specific keys only) pass arg_strs = ', ' . join ( [ repr ( util_str . truncate_str ( str ( arg ) ) ) for arg in args ] ) kwarg_strs = ', ' . join ( [ util_str . truncate_str ( '%s=%r' % ( key , val ) ) for key , val in six . iteritems ( kwargs ) ] ) msg = ( '\nERROR: funcname=%r,\n * args=%s,\n * kwargs=%r\n' % ( meta_util_six . get_funcname ( func ) , arg_strs , kwarg_strs ) ) msg += ' * len(args) = %r\n' % len ( args ) msg += ' * len(kwargs) = %r\n' % len ( kwargs ) util_dbg . printex ( ex , msg , pad_stdout = True ) raise wrp_onexceptreport = preserve_sig ( wrp_onexceptreport , func ) return wrp_onexceptreport if func_ is None : return _closure_onexceptreport else : return _closure_onexceptreport ( func_ )
If an error is thrown in the scope of this function s stack frame then the decorated function name and the arguments passed to it will be printed to the utool print function .
786
35
8,927
def _indent_decor ( lbl ) : def closure_indent ( func ) : if util_arg . TRACE : @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_indent ( * args , * * kwargs ) : with util_print . Indenter ( lbl ) : print ( ' ...trace[in]' ) ret = func ( * args , * * kwargs ) print ( ' ...trace[out]' ) return ret else : @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_indent ( * args , * * kwargs ) : with util_print . Indenter ( lbl ) : ret = func ( * args , * * kwargs ) return ret wrp_indent_ = ignores_exc_tb ( wrp_indent ) wrp_indent_ = preserve_sig ( wrp_indent , func ) return wrp_indent_ return closure_indent
does the actual work of indent_func
231
8
8,928
def indent_func ( input_ ) : if isinstance ( input_ , six . string_types ) : # A label was specified lbl = input_ return _indent_decor ( lbl ) elif isinstance ( input_ , ( bool , tuple ) ) : # Allow individually turning of of this decorator func = input_ return func else : # Use the function name as the label func = input_ lbl = '[' + meta_util_six . get_funcname ( func ) + ']' return _indent_decor ( lbl ) ( func )
Takes either no arguments or an alias label
124
9
8,929
def tracefunc_xml ( func ) : funcname = meta_util_six . get_funcname ( func ) def wrp_tracefunc2 ( * args , * * kwargs ) : verbose = kwargs . get ( 'verbose' , True ) if verbose : print ( '<%s>' % ( funcname , ) ) with util_print . Indenter ( ' ' ) : ret = func ( * args , * * kwargs ) if verbose : print ( '</%s>' % ( funcname , ) ) return ret wrp_tracefunc2_ = ignores_exc_tb ( wrp_tracefunc2 ) wrp_tracefunc2_ = preserve_sig ( wrp_tracefunc2_ , func ) return wrp_tracefunc2_
Causes output of function to be printed in an XML style block
177
13
8,930
def accepts_scalar_input ( func ) : #@on_exception_report_input @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_asi ( self , input_ , * args , * * kwargs ) : #if HAVE_PANDAS: # if isinstance(input_, (pd.DataFrame, pd.Series)): # input_ = input_.values if util_iter . isiterable ( input_ ) : # If input is already iterable do default behavior return func ( self , input_ , * args , * * kwargs ) else : # If input is scalar, wrap input, execute, and unpack result #ret = func(self, (input_,), *args, **kwargs) ret = func ( self , [ input_ ] , * args , * * kwargs ) if ret is not None : return ret [ 0 ] wrp_asi = preserve_sig ( wrp_asi , func ) return wrp_asi
DEPRICATE in favor of accepts_scalar_input2 only accepts one input as vector
229
21
8,931
def __assert_param_consistency ( args , argx_list_ ) : if util_arg . NO_ASSERTS : return if len ( argx_list_ ) == 0 : return True argx_flags = [ util_iter . isiterable ( args [ argx ] ) for argx in argx_list_ ] try : assert all ( [ argx_flags [ 0 ] == flag for flag in argx_flags ] ) , ( 'invalid mixing of iterable and scalar inputs' ) except AssertionError as ex : print ( '!!! ASSERTION ERROR IN UTIL_DECOR !!!' ) for argx in argx_list_ : print ( '[util_decor] args[%d] = %r' % ( argx , args [ argx ] ) ) raise ex
debugging function for accepts_scalar_input2 checks to make sure all the iterable inputs are of the same length
181
26
8,932
def accepts_scalar_input_vector_output ( func ) : @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_asivo ( self , input_ , * args , * * kwargs ) : #import utool #if utool.DEBUG: # print('[IN SIVO] args=%r' % (args,)) # print('[IN SIVO] kwargs=%r' % (kwargs,)) if util_iter . isiterable ( input_ ) : # If input is already iterable do default behavior return func ( self , input_ , * args , * * kwargs ) else : # If input is scalar, wrap input, execute, and unpack result result = func ( self , ( input_ , ) , * args , * * kwargs ) # The output length could be 0 on a scalar input if len ( result ) == 0 : return [ ] else : assert len ( result ) == 1 , 'error in asivo' return result [ 0 ] return wrp_asivo
DEPRICATE IN FAVOR OF accepts_scalar_input2
240
17
8,933
def accepts_numpy ( func ) : #@ignores_exc_tb #@wraps(func) def wrp_accepts_numpy ( self , input_ , * args , * * kwargs ) : if not ( util_type . HAVE_NUMPY and isinstance ( input_ , np . ndarray ) ) : # If the input is not numpy, just call the function return func ( self , input_ , * args , * * kwargs ) else : # TODO: use a variant of util_list.unflat_unique_rowid_map # If the input is a numpy array, and return the output with the same # shape as the input if UNIQUE_NUMPY : # Remove redundant input (because we are passing it to SQL) input_list , inverse_unique = np . unique ( input_ , return_inverse = True ) else : input_list = input_ . flatten ( ) # Call the function in list format # TODO: is this necessary? input_list = input_list . tolist ( ) output_list = func ( self , input_list , * args , * * kwargs ) # Put the output back into numpy if UNIQUE_NUMPY : # Reconstruct redundant queries output_arr = np . array ( output_list ) [ inverse_unique ] output_shape = tuple ( list ( input_ . shape ) + list ( output_arr . shape [ 1 : ] ) ) return np . array ( output_arr ) . reshape ( output_shape ) else : return np . array ( output_list ) . reshape ( input_ . shape ) wrp_accepts_numpy = preserve_sig ( wrp_accepts_numpy , func ) return wrp_accepts_numpy
Allows the first input to be a numpy array and get result in numpy form
394
17
8,934
def memoize_nonzero ( func ) : class _memorizer ( dict ) : def __init__ ( self , func ) : self . func = func def __call__ ( self , * args ) : return self [ args ] def __missing__ ( self , key ) : ret = self [ key ] = self . func ( * key ) return ret return _memorizer ( func )
Memoization decorator for functions taking a nonzero number of arguments .
84
15
8,935
def memoize ( func ) : cache = func . _util_decor_memoize_cache = { } # @functools.wraps(func) def memoizer ( * args , * * kwargs ) : key = str ( args ) + str ( kwargs ) if key not in cache : cache [ key ] = func ( * args , * * kwargs ) return cache [ key ] memoizer = preserve_sig ( memoizer , func ) memoizer . cache = cache return memoizer
simple memoization decorator
112
5
8,936
def lazyfunc ( func ) : closuremem_ = [ { } ] def wrapper ( * args , * * kwargs ) : mem = closuremem_ [ 0 ] key = ( repr ( args ) , repr ( kwargs ) ) try : return mem [ key ] except KeyError : mem [ key ] = func ( * args , * * kwargs ) return mem [ key ] return wrapper
Returns a memcached version of a function
85
9
8,937
def apply_docstr ( docstr_func ) : def docstr_applier ( func ) : #docstr = meta_util_six.get_funcdoc(docstr_func) #meta_util_six.set_funcdoc(func, docstr) if isinstance ( docstr_func , six . string_types ) : olddoc = meta_util_six . get_funcdoc ( func ) if olddoc is None : olddoc = '' newdoc = olddoc + docstr_func meta_util_six . set_funcdoc ( func , newdoc ) return func else : preserved_func = preserve_sig ( func , docstr_func ) return preserved_func return docstr_applier
Changes docstr of one functio to that of another
161
12
8,938
def _sigfigs ( n , sigfigs = 3 ) : n = float ( n ) if n == 0 or math . isnan ( n ) : # avoid math domain errors return n return round ( n , - int ( math . floor ( math . log10 ( abs ( n ) ) ) - sigfigs + 1 ) )
helper function to round a number to significant figures
73
10
8,939
def merge_moments ( m_a , m_a2 , m_a3 , m_a4 , n_a , m_b , m_b2 , m_b3 , m_b4 , n_b ) : delta = m_b - m_a delta_2 = delta * delta delta_3 = delta * delta_2 delta_4 = delta * delta_3 n_x = n_a + n_b m_x = m_a + delta * n_b / n_x m_x2 = m_a2 + m_b2 + delta_2 * n_a * n_b / n_x m_x3 = m_a3 + m_b3 + delta_3 * n_a * n_b * ( n_a - n_b ) + 3 * delta * ( n_a * m_2b - n_b * m_2a ) / n_x m_x4 = ( m_a4 + m_b4 + delta_4 * ( n_a * n_b * ( n_a * n_a - n_a * n_b + n_b * n_b ) ) / ( n_x ** 3 ) + 6 * delta_2 * ( n_a * n_a * m_b2 + n_b * n_b * m_a2 ) / ( n_x ** 2 ) + 4 * delta * ( n_a * m_b3 - n_b * m_a3 ) / n_x ) return m_x , m_x2 , m_x3 , m_x4 , n_x
Merge moments of two samples A and B . parameters are m_a ... m_a4 = first through fourth moment of sample A n_a = size of sample A m_b ... m_b4 = first through fourth moment of sample B n_b = size of sample B
366
60
8,940
def _transition ( self , nxt , cur = None , since = None ) : self . transition_intervals [ ( cur , nxt ) ] . tick ( ) if since : self . state_durations [ cur ] . end ( since )
Register that a transition has taken place . nxt is an identifier for the state being entered . cur is an identifier for the state being left . since is the time at which the previous state was entered .
54
41
8,941
def _cleanup ( self , ref ) : self . transitor_states [ self . _weakref_holder [ ref ] ] -= 1 del self . _weakref_holder [ ref ]
cleanup after a transitor weakref fires
41
9
8,942
def _commit ( self , ref ) : path_times = self . _weakref_path_map [ ref ] path_times . append ( nanotime ( ) ) del self . _weakref_path_map [ ref ] path = tuple ( path_times [ 1 : : 2 ] ) times = path_times [ : : 2 ] if path not in self . path_stats : # tuple to save a tiny bit of memory self . path_stats [ path ] = tuple ( [ Duration ( interval = False ) for i in range ( len ( path ) ) ] ) path_stats = self . path_stats [ path ] for i in range ( 1 , len ( times ) ) : path_stats [ i - 1 ] . _stats . add ( times [ i ] - times [ i - 1 ] )
commit a walkers data after it is collected
174
9
8,943
def pformat ( self , prefix = ( ) ) : nan = float ( "nan" ) def sformat ( segment , stat ) : FMT = "n={0}, mean={1}, p50/95={2}/{3}, max={4}" line_segs = [ segment ] for s in [ stat ] : p = s . get_percentiles ( ) p50 , p95 = p . get ( 0.50 , nan ) , p . get ( 0.95 , nan ) line_segs . append ( FMT . format ( s . n , s . mean , p50 , p95 , s . max ) ) return '{0}: {1}' . format ( * line_segs ) lines = [ ] for path in sorted ( self . path_stats . keys ( ) ) : lines . append ( '=====================' ) for seg , stat in zip ( path , self . path_stats [ path ] ) : lines . append ( sformat ( seg , stat ) ) return lines
Makes a pretty ASCII format of the data suitable for displaying in a console or saving to a text file . Returns a list of lines .
221
28
8,944
def specfn_quant_generator ( specfiles , quantfiles , tag , ignore_tags ) : for specfn , qfn in zip ( specfiles , quantfiles ) : for quant_el in basereader . generate_xmltags ( qfn , tag , ignore_tags ) : yield os . path . basename ( specfn ) , quant_el
Generates tuples of specfile and quant element for general formats
79
13
8,945
def get_feature_info ( feature ) : dimensions = feature . findall ( 'position' ) for dim in dimensions : if dim . attrib [ 'dim' ] == '0' : rt = dim . text elif dim . attrib [ 'dim' ] == '1' : mz = dim . text return { 'rt' : float ( rt ) , 'mz' : float ( mz ) , 'charge' : int ( feature . find ( 'charge' ) . text ) , 'intensity' : float ( feature . find ( 'intensity' ) . text ) , }
Returns a dict with feature information
129
6
8,946
def merge_maps ( m , base ) : for k in base . keys ( ) : if k not in m : m [ k ] = base [ k ]
Merge in undefined map entries from given map .
34
10
8,947
def merge_lists ( l , base ) : for i in base : if i not in l : l . append ( i )
Merge in undefined list entries from given list .
27
10
8,948
def generate_top_psms ( psms , protcol ) : top_ms1_psms = { } for psm in psms : protacc = psm [ protcol ] precursor_amount = psm [ mzidtsvdata . HEADER_PRECURSOR_QUANT ] if ';' in protacc or precursor_amount == 'NA' : continue precursor_amount = float ( precursor_amount ) psm_seq = psm [ mzidtsvdata . HEADER_PEPTIDE ] try : peptide_area = top_ms1_psms [ protacc ] [ psm_seq ] except KeyError : try : top_ms1_psms [ protacc ] [ psm_seq ] = precursor_amount except KeyError : top_ms1_psms [ protacc ] = { psm_seq : precursor_amount } else : if precursor_amount > peptide_area : top_ms1_psms [ protacc ] [ psm_seq ] = precursor_amount return top_ms1_psms
Fed with a psms generator this returns the 3 PSMs with the highest precursor intensities ( or areas or whatever is given in the HEADER_PRECURSOR_QUANT
235
38
8,949
def add_ms1_quant_from_top3_mzidtsv ( proteins , psms , headerfields , protcol ) : if not protcol : protcol = mzidtsvdata . HEADER_MASTER_PROT top_ms1_psms = generate_top_psms ( psms , protcol ) for protein in proteins : prot_acc = protein [ prottabledata . HEADER_PROTEIN ] prec_area = calculate_protein_precursor_quant ( top_ms1_psms , prot_acc ) outprotein = { k : v for k , v in protein . items ( ) } outprotein [ headerfields [ 'precursorquant' ] [ prottabledata . HEADER_AREA ] [ None ] ] = str ( prec_area ) yield outprotein
Collects PSMs with the highes precursor quant values adds sum of the top 3 of these to a protein table
181
23
8,950
def toc ( tt , return_msg = False , write_msg = True , verbose = None ) : if verbose is not None : write_msg = verbose ( msg , start_time ) = tt ellapsed = ( default_timer ( ) - start_time ) if ( not return_msg ) and write_msg and msg is not None : sys . stdout . write ( '...toc(%.4fs, ' % ellapsed + '"' + str ( msg ) + '"' + ')\n' ) if return_msg : return msg else : return ellapsed
similar to matlab toc
127
6
8,951
def parse_timestamp ( timestamp , zone = 'UTC' , timestamp_format = None ) : if timestamp is None : return None use_delorean = True or six . PY2 if use_delorean : import delorean ## customize delorean string method #def __str__(self): # return str(self.datetime) # #return str(self.datetime) + ' ' + str(self.timezone) #delorean.Delorean.__str__ = __str__ ## method types must be injected into the class ##ut.inject_func_as_method(dn, __str__, '__repr__', override=True) if not isinstance ( timestamp , six . string_types ) : raise NotImplementedError ( 'Unknown format: timestamp=%r' % ( timestamp , ) ) # Normal format, or non-standard year first data if timestamp_format is None : # dont warn because we will take care of utc timefmt = determine_timestamp_format ( timestamp , warn = False ) else : timefmt = timestamp_format if timefmt is None or not isinstance ( timefmt , six . string_types ) : raise AssertionError ( 'unknown timestamp_format=%r' % ( timestamp_format , ) ) # Fixup timestamp utc_offset = None if len ( timestamp ) == 20 and '\x00' in timestamp : timestamp_ = timestamp . replace ( '\x00' , ' ' ) . strip ( ';' ) . strip ( ) elif use_delorean and len ( timestamp ) > 19 : timestamp_ = timestamp [ : 19 ] . strip ( ';' ) . strip ( ) utc_offset = timestamp [ 19 : ] else : timestamp_ = timestamp dt_ = datetime . datetime . strptime ( timestamp_ , timefmt ) if use_delorean : #if utc and utc_offset is not None: #if utc: # dn_ = delorean.Delorean(dt_, 'UTC') #else: if zone is None : zone = time . tzname [ 0 ] if zone == 'local' : zone = time . tzname [ 0 ] dn_ = delorean . Delorean ( dt_ , zone ) else : dn_ = dt_ if utc_offset is not None and zone == 'UTC' : if use_delorean : # Python 2.7 does not account for timezones if ':' in utc_offset : sign = { ' ' : + 1 , '+' : + 1 , '-' : - 1 } [ utc_offset [ 0 ] ] hours , seconds = utc_offset [ 1 : ] . split ( ':' ) delta_ = datetime . timedelta ( hours = int ( hours ) , seconds = int ( seconds ) ) delta = sign * delta_ else : import pytz tzname = utc_offset . strip ( ) delta = pytz . timezone ( tzname ) . utcoffset ( dt_ ) # Move back to utc dn = dn_ - delta else : raise AssertionError ( 'python3 should take care of timezone' ) else : dn = dn_ if use_delorean : if not zone != 'UTC' : dn . shift ( zone ) return dn . datetime
r pip install delorean
731
5
8,952
def date_to_datetime ( date , fraction = 0.0 ) : day_seconds = ( 60 * 60 * 24 ) - 1 total_seconds = int ( day_seconds * fraction ) delta = datetime . timedelta ( seconds = total_seconds ) time = datetime . time ( ) dt = datetime . datetime . combine ( date , time ) + delta return dt
fraction is how much through the day you are . 0 = start of the day 1 = end of the day .
84
24
8,953
def ec2_instances ( ) : region = boto . ec2 . get_region ( REGION ) reservations = region . connect ( ) . get_all_instances ( ) instances = [ ] for reservation in reservations : instances += reservation . instances return instances
Use the EC2 API to get a list of all machines
56
12
8,954
def instances ( exp = ".*" ) : expression = re . compile ( exp ) instances = [ ] for node in ec2_instances ( ) : if node . tags and ip ( node ) : try : if expression . match ( node . tags . get ( "Name" ) ) : instances . append ( node ) except TypeError : pass return instances
Filter list of machines matching an expression
75
7
8,955
def use ( node ) : try : role = node . tags . get ( "Name" ) . split ( '-' ) [ 1 ] env . roledefs [ role ] += [ ip ( node ) ] except IndexError : pass env . nodes += [ node ] env . hosts += [ ip ( node ) ]
Set the fabric environment for the specifed node
67
10
8,956
def build_alias_map ( regex_map , tag_vocab ) : import utool as ut import re alias_map = ut . odict ( [ ] ) for pats , new_tag in reversed ( regex_map ) : pats = ut . ensure_iterable ( pats ) for pat in pats : flags = [ re . match ( pat , t ) for t in tag_vocab ] for old_tag in ut . compress ( tag_vocab , flags ) : alias_map [ old_tag ] = new_tag identity_map = ut . take_column ( regex_map , 1 ) for tag in ut . filter_Nones ( identity_map ) : alias_map [ tag ] = tag return alias_map
Constructs explicit mapping . Order of items in regex map matters . Items at top are given preference .
161
20
8,957
def alias_tags ( tags_list , alias_map ) : def _alias_dict ( tags ) : tags_ = [ alias_map . get ( t , t ) for t in tags ] return list ( set ( [ t for t in tags_ if t is not None ] ) ) tags_list_ = [ _alias_dict ( tags ) for tags in tags_list ] return tags_list_
update tags to new values
87
5
8,958
def setup ( self ) : self . client = self . _get_client ( ) sg = self . _create_isolation_security_group ( ) if self . exists is not True : acl = self . _create_network_acl ( ) self . _add_network_acl_entries ( acl ) self . _add_security_group_rule ( sg ) self . _add_security_group_to_instance ( sg ) if self . dry_run is not False : self . _add_security_group_rule ( sg ) self . _add_security_group_to_instance ( sg )
Conditions that can not be dry_run
140
9
8,959
def _args2_fpath ( dpath , fname , cfgstr , ext ) : if len ( ext ) > 0 and ext [ 0 ] != '.' : raise ValueError ( 'Please be explicit and use a dot in ext' ) max_len = 128 # should hashlen be larger? cfgstr_hashlen = 16 prefix = fname fname_cfgstr = consensed_cfgstr ( prefix , cfgstr , max_len = max_len , cfgstr_hashlen = cfgstr_hashlen ) fpath = join ( dpath , fname_cfgstr + ext ) fpath = normpath ( fpath ) return fpath
r Ensures that the filename is not too long
145
10
8,960
def save_cache ( dpath , fname , cfgstr , data , ext = '.cPkl' , verbose = None ) : fpath = _args2_fpath ( dpath , fname , cfgstr , ext ) util_io . save_data ( fpath , data , verbose = verbose ) return fpath
Saves data using util_io but smartly constructs a filename
75
13
8,961
def load_cache ( dpath , fname , cfgstr , ext = '.cPkl' , verbose = None , enabled = True ) : if verbose is None : verbose = VERBOSE_CACHE if not USE_CACHE or not enabled : if verbose > 1 : print ( '[util_cache] ... cache disabled: dpath=%s cfgstr=%r' % ( basename ( dpath ) , cfgstr , ) ) raise IOError ( 3 , 'Cache Loading Is Disabled' ) fpath = _args2_fpath ( dpath , fname , cfgstr , ext ) if not exists ( fpath ) : if verbose > 0 : print ( '[util_cache] ... cache does not exist: dpath=%r fname=%r cfgstr=%r' % ( basename ( dpath ) , fname , cfgstr , ) ) raise IOError ( 2 , 'No such file or directory: %r' % ( fpath , ) ) else : if verbose > 2 : print ( '[util_cache] ... cache exists: dpath=%r fname=%r cfgstr=%r' % ( basename ( dpath ) , fname , cfgstr , ) ) import utool as ut nbytes = ut . get_file_nBytes ( fpath ) big_verbose = ( nbytes > 1E6 and verbose > 2 ) or verbose > 2 if big_verbose : print ( '[util_cache] About to read file of size %s' % ( ut . byte_str2 ( nbytes ) , ) ) try : with ut . Timer ( fpath , verbose = big_verbose and verbose > 3 ) : data = util_io . load_data ( fpath , verbose = verbose > 2 ) except ( EOFError , IOError , ImportError ) as ex : print ( 'CORRUPTED? fpath = %s' % ( fpath , ) ) if verbose > 1 : print ( '[util_cache] ... cache miss dpath=%s cfgstr=%r' % ( basename ( dpath ) , cfgstr , ) ) raise IOError ( str ( ex ) ) except Exception : print ( 'CORRUPTED? fpath = %s' % ( fpath , ) ) raise else : if verbose > 2 : print ( '[util_cache] ... cache hit' ) return data
Loads data using util_io but smartly constructs a filename
539
13
8,962
def tryload_cache ( dpath , fname , cfgstr , verbose = None ) : try : return load_cache ( dpath , fname , cfgstr , verbose = verbose ) except IOError : return None
returns None if cache cannot be loaded
51
8
8,963
def tryload_cache_list ( dpath , fname , cfgstr_list , verbose = False ) : data_list = [ tryload_cache ( dpath , fname , cfgstr , verbose ) for cfgstr in cfgstr_list ] ismiss_list = [ data is None for data in data_list ] return data_list , ismiss_list
loads a list of similar cached datas . Returns flags that needs to be computed
85
15
8,964
def tryload_cache_list_with_compute ( use_cache , dpath , fname , cfgstr_list , compute_fn , * args ) : # Load precomputed values if use_cache is False : data_list = [ None ] * len ( cfgstr_list ) ismiss_list = [ True ] * len ( cfgstr_list ) # Don't load or save, just compute data_list = compute_fn ( ismiss_list , * args ) return data_list else : data_list , ismiss_list = tryload_cache_list ( dpath , fname , cfgstr_list , verbose = False ) num_total = len ( cfgstr_list ) if any ( ismiss_list ) : # Compute missing values newdata_list = compute_fn ( ismiss_list , * args ) newcfgstr_list = util_list . compress ( cfgstr_list , ismiss_list ) index_list = util_list . list_where ( ismiss_list ) print ( '[cache] %d/%d cache hits for %s in %s' % ( num_total - len ( index_list ) , num_total , fname , util_path . tail ( dpath ) ) ) # Cache write for newcfgstr , newdata in zip ( newcfgstr_list , newdata_list ) : save_cache ( dpath , fname , newcfgstr , newdata , verbose = False ) # Populate missing result for index , newdata in zip ( index_list , newdata_list ) : data_list [ index ] = newdata else : print ( '[cache] %d/%d cache hits for %s in %s' % ( num_total , num_total , fname , util_path . tail ( dpath ) ) ) return data_list
tries to load data but computes it if it can t give a compute function
408
17
8,965
def to_json ( val , allow_pickle = False , pretty = False ) : UtoolJSONEncoder = make_utool_json_encoder ( allow_pickle ) json_kw = { } json_kw [ 'cls' ] = UtoolJSONEncoder if pretty : json_kw [ 'indent' ] = 4 json_kw [ 'separators' ] = ( ',' , ': ' ) json_str = json . dumps ( val , * * json_kw ) return json_str
r Converts a python object to a JSON string using the utool convention
113
15
8,966
def from_json ( json_str , allow_pickle = False ) : if six . PY3 : if isinstance ( json_str , bytes ) : json_str = json_str . decode ( 'utf-8' ) UtoolJSONEncoder = make_utool_json_encoder ( allow_pickle ) object_hook = UtoolJSONEncoder . _json_object_hook val = json . loads ( json_str , object_hook = object_hook ) return val
Decodes a JSON object specified in the utool convention
108
11
8,967
def cachestr_repr ( val ) : try : memview = memoryview ( val ) return memview . tobytes ( ) except Exception : try : return to_json ( val ) except Exception : # SUPER HACK if repr ( val . __class__ ) == "<class 'ibeis.control.IBEISControl.IBEISController'>" : return val . get_dbname ( )
Representation of an object as a cache string .
86
10
8,968
def cached_func ( fname = None , cache_dir = 'default' , appname = 'utool' , key_argx = None , key_kwds = None , use_cache = None , verbose = None ) : if verbose is None : verbose = VERBOSE_CACHE def cached_closure ( func ) : from utool import util_decor import utool as ut fname_ = util_inspect . get_funcname ( func ) if fname is None else fname kwdefaults = util_inspect . get_kwdefaults ( func ) argnames = util_inspect . get_argnames ( func ) if ut . is_method ( func ) : # ignore self for methods argnames = argnames [ 1 : ] cacher = Cacher ( fname_ , cache_dir = cache_dir , appname = appname , verbose = verbose ) if use_cache is None : use_cache_ = not util_arg . get_argflag ( '--nocache-' + fname_ ) else : use_cache_ = use_cache #_dbgdict = dict(fname_=fname_, key_kwds=key_kwds, appname=appname, # key_argx=key_argx, use_cache_=use_cache_) #@functools.wraps(func) def cached_wraper ( * args , * * kwargs ) : """ Cached Wrapper Function Additional Kwargs: use_cache (bool) : enables cache """ try : if verbose > 2 : print ( '[util_cache] computing cached function fname_=%s' % ( fname_ , ) ) # Implicitly adds use_cache to kwargs cfgstr = get_cfgstr_from_args ( func , args , kwargs , key_argx , key_kwds , kwdefaults , argnames ) if util_cplat . WIN32 : # remove potentially invalid chars cfgstr = '_' + util_hash . hashstr27 ( cfgstr ) assert cfgstr is not None , 'cfgstr=%r cannot be None' % ( cfgstr , ) use_cache__ = kwargs . pop ( 'use_cache' , use_cache_ ) if use_cache__ : # Make cfgstr from specified input data = cacher . tryload ( cfgstr ) if data is not None : return data # Cached missed compute function data = func ( * args , * * kwargs ) # Cache save #if use_cache__: # TODO: save_cache cacher . save ( data , cfgstr ) return data #except ValueError as ex: # handle protocal error except Exception as ex : from utool import util_dbg _dbgdict2 = dict ( key_argx = key_argx , lenargs = len ( args ) , lenkw = len ( kwargs ) , ) msg = '\n' . join ( [ '+--- UTOOL --- ERROR IN CACHED FUNCTION' , #'dbgdict = ' + utool.repr4(_dbgdict), 'dbgdict2 = ' + util_str . repr4 ( _dbgdict2 ) , ] ) util_dbg . printex ( ex , msg ) raise # Give function a handle to the cacher object cached_wraper = util_decor . preserve_sig ( cached_wraper , func ) cached_wraper . cacher = cacher return cached_wraper return cached_closure
r Wraps a function with a Cacher object
788
10
8,969
def get_global_shelf_fpath ( appname = 'default' , ensure = False ) : global_cache_dir = get_global_cache_dir ( appname , ensure = ensure ) shelf_fpath = join ( global_cache_dir , meta_util_constants . global_cache_fname ) return shelf_fpath
Returns the filepath to the global shelf
76
8
8,970
def global_cache_write ( key , val , appname = 'default' ) : with GlobalShelfContext ( appname ) as shelf : shelf [ key ] = val
Writes cache files to a safe place in each operating system
37
12
8,971
def delete_global_cache ( appname = 'default' ) : #close_global_shelf(appname) shelf_fpath = get_global_shelf_fpath ( appname ) util_path . remove_file ( shelf_fpath , verbose = True , dryrun = False )
Reads cache files to a safe place in each operating system
67
12
8,972
def existing_versions ( self ) : import glob pattern = self . fname + '_*' + self . ext for fname in glob . glob1 ( self . dpath , pattern ) : fpath = join ( self . dpath , fname ) yield fpath
Returns data with different cfgstr values that were previously computed with this cacher .
58
17
8,973
def tryload ( self , cfgstr = None ) : if cfgstr is None : cfgstr = self . cfgstr if cfgstr is None : import warnings warnings . warn ( 'No cfgstr given in Cacher constructor or call' ) cfgstr = '' # assert cfgstr is not None, ( # 'must specify cfgstr in constructor or call') if not self . enabled : if self . verbose > 0 : print ( '[cache] ... %s Cacher disabled' % ( self . fname ) ) return None try : if self . verbose > 1 : print ( '[cache] tryload fname=%s' % ( self . fname , ) ) # if self.verbose > 2: # print('[cache] cfgstr=%r' % (cfgstr,)) return self . load ( cfgstr ) except IOError : if self . verbose > 0 : print ( '[cache] ... %s Cacher miss' % ( self . fname ) )
Like load but returns None if the load fails
220
9
8,974
def fuzzyload ( self , cachedir = None , partial_cfgstr = '' , * * kwargs ) : valid_targets = self . glob_valid_targets ( cachedir , partial_cfgstr ) if len ( valid_targets ) != 1 : import utool as ut msg = 'need to further specify target. valid_targets=%s' % ( ut . repr3 ( valid_targets , ) ) raise ValueError ( msg ) fpath = valid_targets [ 0 ] self . load ( fpath = fpath , * * kwargs )
Try and load from a partially specified configuration string
132
9
8,975
def load ( self , cachedir = None , cfgstr = None , fpath = None , verbose = None , quiet = QUIET , ignore_keys = None ) : if verbose is None : verbose = getattr ( self , 'verbose' , VERBOSE ) if fpath is None : fpath = self . get_fpath ( cachedir , cfgstr = cfgstr ) if verbose : print ( '[Cachable] cache tryload: %r' % ( basename ( fpath ) , ) ) try : self . _unsafe_load ( fpath , ignore_keys ) if verbose : print ( '... self cache hit: %r' % ( basename ( fpath ) , ) ) except ValueError as ex : import utool as ut msg = '[!Cachable] Cachable(%s) is likely corrupt' % ( self . get_cfgstr ( ) ) print ( 'CORRUPT fpath = %s' % ( fpath , ) ) ut . printex ( ex , msg , iswarning = True ) raise #except BadZipFile as ex: except zipfile . error as ex : import utool as ut msg = '[!Cachable] Cachable(%s) has bad zipfile' % ( self . get_cfgstr ( ) ) print ( 'CORRUPT fpath = %s' % ( fpath , ) ) ut . printex ( ex , msg , iswarning = True ) raise #if exists(fpath): # #print('[Cachable] Removing corrupted file: %r' % fpath) # #os.remove(fpath) # raise hsexcept.HotsNeedsRecomputeError(msg) #else: # raise Exception(msg) except IOError as ex : import utool as ut if not exists ( fpath ) : msg = '... self cache miss: %r' % ( basename ( fpath ) , ) if verbose : print ( msg ) raise print ( 'CORRUPT fpath = %s' % ( fpath , ) ) msg = '[!Cachable] Cachable(%s) is corrupt' % ( self . get_cfgstr ( ) ) ut . printex ( ex , msg , iswarning = True ) raise except Exception as ex : import utool as ut ut . printex ( ex , 'unknown exception while loading query result' ) raise
Loads the result from the given database
519
8
8,976
def truepath_relative ( path , otherpath = None ) : if otherpath is None : otherpath = os . getcwd ( ) otherpath = truepath ( otherpath ) path_ = normpath ( relpath ( path , otherpath ) ) return path_
Normalizes and returns absolute path with so specs
57
9
8,977
def tail ( fpath , n = 2 , trailing = True ) : return path_ndir_split ( fpath , n = n , trailing = trailing )
Alias for path_ndir_split
34
8
8,978
def unexpanduser ( path ) : homedir = expanduser ( '~' ) if path . startswith ( homedir ) : path = '~' + path [ len ( homedir ) : ] return path
r Replaces home directory with ~
50
7
8,979
def path_ndir_split ( path_ , n , force_unix = True , winroot = 'C:' , trailing = True ) : if not isinstance ( path_ , six . string_types ) : # Probably given a file pointer return path_ if n is None : cplat_path = ensure_crossplat_path ( path_ ) elif n == 0 : cplat_path = '' else : sep = '/' if force_unix else os . sep ndirs_list = [ ] head = path_ reached_end = False for nx in range ( n ) : head , tail = split ( head ) if tail == '' : if head == '' : reached_end = True else : root = head if len ( ndirs_list ) == 0 else head . strip ( '\\/' ) ndirs_list . append ( root ) reached_end = True break else : ndirs_list . append ( tail ) if trailing and not reached_end : head , tail = split ( head ) if len ( tail ) == 0 : if len ( head ) == 0 : # or head == '/': reached_end = True ndirs = sep . join ( ndirs_list [ : : - 1 ] ) cplat_path = ensure_crossplat_path ( ndirs ) #if trailing and not reached_end: if trailing and not reached_end : cplat_path = '.../' + cplat_path return cplat_path
r Shows only a little bit of the path . Up to the n bottom - level directories
329
18
8,980
def augpath ( path , augsuf = '' , augext = '' , augpref = '' , augdir = None , newext = None , newfname = None , ensure = False , prefix = None , suffix = None ) : if prefix is not None : augpref = prefix if suffix is not None : augsuf = suffix # Breakup path dpath , fname = split ( path ) fname_noext , ext = splitext ( fname ) if newfname is not None : fname_noext = newfname # Augment ext if newext is None : newext = ext # Augment fname new_fname = '' . join ( ( augpref , fname_noext , augsuf , newext , augext ) ) # Augment dpath if augdir is not None : new_dpath = join ( dpath , augdir ) if ensure : # create new dir if needebe ensuredir ( new_dpath ) else : new_dpath = dpath # Recombine into new path newpath = join ( new_dpath , new_fname ) return newpath
augments end of path before the extension .
250
9
8,981
def remove_files_in_dir ( dpath , fname_pattern_list = '*' , recursive = False , verbose = VERBOSE , dryrun = False , ignore_errors = False ) : if isinstance ( fname_pattern_list , six . string_types ) : fname_pattern_list = [ fname_pattern_list ] if verbose > 2 : print ( '[util_path] Removing files:' ) print ( ' * from dpath = %r ' % dpath ) print ( ' * with patterns = %r' % fname_pattern_list ) print ( ' * recursive = %r' % recursive ) num_removed , num_matched = ( 0 , 0 ) if not exists ( dpath ) : msg = ( '!!! dir = %r does not exist!' % dpath ) if verbose : print ( msg ) warnings . warn ( msg , category = UserWarning ) for root , dname_list , fname_list in os . walk ( dpath ) : for fname_pattern in fname_pattern_list : for fname in fnmatch . filter ( fname_list , fname_pattern ) : num_matched += 1 num_removed += remove_file ( join ( root , fname ) , ignore_errors = ignore_errors , dryrun = dryrun , verbose = verbose > 5 ) if not recursive : break if verbose > 0 : print ( '[util_path] ... Removed %d/%d files' % ( num_removed , num_matched ) ) return True
Removes files matching a pattern from a directory
339
9
8,982
def delete ( path , dryrun = False , recursive = True , verbose = None , print_exists = True , ignore_errors = True ) : if verbose is None : verbose = VERBOSE if not QUIET : verbose = 1 if verbose > 0 : print ( '[util_path] Deleting path=%r' % path ) exists_flag = exists ( path ) link_flag = islink ( path ) if not exists_flag and not link_flag : if print_exists and verbose : print ( '..does not exist!' ) flag = False else : rmargs = dict ( verbose = verbose > 1 , ignore_errors = ignore_errors , dryrun = dryrun ) if islink ( path ) : os . unlink ( path ) flag = True elif isdir ( path ) : # First remove everything in the directory flag = remove_files_in_dir ( path , recursive = recursive , * * rmargs ) # Then remove the directory itself flag = flag and remove_dirs ( path , * * rmargs ) elif isfile ( path ) : flag = remove_file ( path , * * rmargs ) else : raise ValueError ( 'Unknown type of path=%r' % ( path , ) ) if verbose > 0 : print ( '[util_path] Finished deleting path=%r' % path ) return flag
Removes a file directory or symlink
298
9
8,983
def remove_existing_fpaths ( fpath_list , verbose = VERBOSE , quiet = QUIET , strict = False , print_caller = PRINT_CALLER , lbl = 'files' ) : import utool as ut if print_caller : print ( util_dbg . get_caller_name ( range ( 1 , 4 ) ) + ' called remove_existing_fpaths' ) fpath_list_ = ut . filter_Nones ( fpath_list ) exists_list = list ( map ( exists , fpath_list_ ) ) if verbose : n_total = len ( fpath_list ) n_valid = len ( fpath_list_ ) n_exist = sum ( exists_list ) print ( '[util_path.remove_existing_fpaths] request delete of %d %s' % ( n_total , lbl ) ) if n_valid != n_total : print ( ( '[util_path.remove_existing_fpaths] ' 'trying to delete %d/%d non None %s ' ) % ( n_valid , n_total , lbl ) ) print ( ( '[util_path.remove_existing_fpaths] ' ' %d/%d exist and need to be deleted' ) % ( n_exist , n_valid ) ) existing_fpath_list = ut . compress ( fpath_list_ , exists_list ) return remove_fpaths ( existing_fpath_list , verbose = verbose , quiet = quiet , strict = strict , print_caller = False , lbl = lbl )
checks existance before removing . then tries to remove exisint paths
360
14
8,984
def remove_fpaths ( fpaths , verbose = VERBOSE , quiet = QUIET , strict = False , print_caller = PRINT_CALLER , lbl = 'files' ) : import utool as ut if print_caller : print ( util_dbg . get_caller_name ( range ( 1 , 4 ) ) + ' called remove_fpaths' ) n_total = len ( fpaths ) _verbose = ( not quiet and n_total > 0 ) or VERYVERBOSE if _verbose : print ( '[util_path.remove_fpaths] try removing %d %s' % ( n_total , lbl ) ) n_removed = 0 prog = ut . ProgIter ( fpaths , label = 'removing files' , enabled = verbose ) _iter = iter ( prog ) # Try to be fast at first try : for fpath in _iter : os . remove ( fpath ) n_removed += 1 except OSError as ex : # Buf if we fail put a try in the inner loop if VERYVERBOSE : print ( 'WARNING: Could not remove fpath = %r' % ( fpath , ) ) if strict : util_dbg . printex ( ex , 'Could not remove fpath = %r' % ( fpath , ) , iswarning = False ) raise for fpath in _iter : try : os . remove ( fpath ) n_removed += 1 except OSError as ex : if VERYVERBOSE : print ( 'WARNING: Could not remove fpath = %r' % ( fpath , ) ) if _verbose : print ( '[util_path.remove_fpaths] ... removed %d / %d %s' % ( n_removed , n_total , lbl ) ) return n_removed
Removes multiple file paths
410
5
8,985
def longest_existing_path ( _path ) : existing_path = _path while True : _path_new = os . path . dirname ( existing_path ) if exists ( _path_new ) : existing_path = _path_new break if _path_new == existing_path : print ( '!!! [utool] This is a very illformated path indeed.' ) existing_path = '' break existing_path = _path_new return existing_path
r Returns the longest root of _path that exists
101
10
8,986
def get_path_type ( path_ ) : path_type = '' if isfile ( path_ ) : path_type += 'file' if isdir ( path_ ) : path_type += 'directory' if islink ( path_ ) : path_type += 'link' if ismount ( path_ ) : path_type += 'mount' return path_type
r returns if a path is a file directory link or mount
80
12
8,987
def checkpath ( path_ , verbose = VERYVERBOSE , n = None , info = VERYVERBOSE ) : assert isinstance ( path_ , six . string_types ) , ( 'path_=%r is not a string. type(path_) = %r' % ( path_ , type ( path_ ) ) ) path_ = normpath ( path_ ) if sys . platform . startswith ( 'win32' ) : # convert back to windows style path if using unix style if path_ . startswith ( '\\' ) : dirs = path_ . split ( '\\' ) if len ( dirs ) > 1 and len ( dirs [ 0 ] ) == 0 and len ( dirs [ 1 ] ) == 1 : dirs [ 1 ] = dirs [ 1 ] . upper ( ) + ':' path_ = '\\' . join ( dirs [ 1 : ] ) does_exist = exists ( path_ ) if verbose : #print_('[utool] checkpath(%r)' % (path_)) pretty_path = path_ndir_split ( path_ , n ) caller_name = util_dbg . get_caller_name ( allow_genexpr = False ) print ( '[%s] checkpath(%r)' % ( caller_name , pretty_path ) ) if does_exist : path_type = get_path_type ( path_ ) #path_type = 'file' if isfile(path_) else 'directory' print ( '[%s] ...(%s) exists' % ( caller_name , path_type , ) ) else : print ( '[%s] ... does not exist' % ( caller_name ) ) if not does_exist and info : #print('[util_path] ! Does not exist') _longest_path = longest_existing_path ( path_ ) _longest_path_type = get_path_type ( _longest_path ) print ( '[util_path] ... The longest existing path is: %r' % _longest_path ) print ( '[util_path] ... and has type %r' % ( _longest_path_type , ) ) return does_exist
r verbose wrapper around os . path . exists
486
10
8,988
def ensurepath ( path_ , verbose = None ) : if verbose is None : verbose = VERYVERBOSE return ensuredir ( path_ , verbose = verbose )
DEPRICATE - alias - use ensuredir instead
39
11
8,989
def ensuredir ( path_ , verbose = None , info = False , mode = 0o1777 ) : if verbose is None : verbose = VERYVERBOSE if isinstance ( path_ , ( list , tuple ) ) : path_ = join ( * path_ ) if HAVE_PATHLIB and isinstance ( path_ , pathlib . Path ) : path_ = str ( path_ ) if not checkpath ( path_ , verbose = verbose , info = info ) : if verbose : print ( '[util_path] mkdir(%r)' % path_ ) try : os . makedirs ( normpath ( path_ ) , mode = mode ) except OSError as ex : util_dbg . printex ( ex , 'check that the longest existing path ' 'is not a bad windows symlink.' , keys = [ 'path_' ] ) raise return path_
r Ensures that directory will exist . creates new dir with sticky bits by default
197
16
8,990
def touch ( fpath , times = None , verbose = True ) : try : if verbose : print ( '[util_path] touching %r' % fpath ) with open ( fpath , 'a' ) : os . utime ( fpath , times ) except Exception as ex : import utool utool . printex ( ex , 'touch %s' % fpath ) raise return fpath
r Creates file if it doesnt exist
87
8
8,991
def copy_list ( src_list , dst_list , lbl = 'Copying' , ioerr_ok = False , sherro_ok = False , oserror_ok = False ) : # Feb - 6 - 2014 Copy function task_iter = zip ( src_list , dst_list ) def docopy ( src , dst ) : try : shutil . copy2 ( src , dst ) except OSError : if ioerr_ok : return False raise except shutil . Error : if sherro_ok : return False raise except IOError : if ioerr_ok : return False raise return True progiter = util_progress . ProgIter ( task_iter , adjust = True , lbl = lbl ) success_list = [ docopy ( src , dst ) for ( src , dst ) in progiter ] return success_list
Copies all data and stat info
183
7
8,992
def glob ( dpath , pattern = None , recursive = False , with_files = True , with_dirs = True , maxdepth = None , exclude_dirs = [ ] , fullpath = True , * * kwargs ) : gen = iglob ( dpath , pattern , recursive = recursive , with_files = with_files , with_dirs = with_dirs , maxdepth = maxdepth , fullpath = fullpath , exclude_dirs = exclude_dirs , * * kwargs ) path_list = list ( gen ) return path_list
r Globs directory for pattern
125
6
8,993
def num_images_in_dir ( path ) : num_imgs = 0 for root , dirs , files in os . walk ( path ) : for fname in files : if fpath_has_imgext ( fname ) : num_imgs += 1 return num_imgs
returns the number of images in a directory
64
9
8,994
def fpath_has_ext ( fname , exts , case_sensitive = False ) : fname_ = fname . lower ( ) if not case_sensitive else fname if case_sensitive : ext_pats = [ '*' + ext for ext in exts ] else : ext_pats = [ '*' + ext . lower ( ) for ext in exts ] return any ( [ fnmatch . fnmatch ( fname_ , pat ) for pat in ext_pats ] )
returns true if the filename has any of the given extensions
109
12
8,995
def get_modpath ( modname , prefer_pkg = False , prefer_main = False ) : import importlib if isinstance ( modname , six . string_types ) : module = importlib . import_module ( modname ) else : module = modname # Hack modpath = module . __file__ . replace ( '.pyc' , '.py' ) initname = '__init__.py' mainname = '__main__.py' if prefer_pkg : if modpath . endswith ( initname ) or modpath . endswith ( mainname ) : modpath = dirname ( modpath ) # modpath = modpath[:-len(initname)] if prefer_main : if modpath . endswith ( initname ) : main_modpath = modpath [ : - len ( initname ) ] + mainname if exists ( main_modpath ) : modpath = main_modpath #modname = modname.replace('.__init__', '').strip() #module_dir = get_module_dir(module) return modpath
r Returns path to module
235
5
8,996
def get_relative_modpath ( module_fpath ) : modsubdir_list = get_module_subdir_list ( module_fpath ) _ , ext = splitext ( module_fpath ) rel_modpath = join ( * modsubdir_list ) + ext rel_modpath = ensure_crossplat_path ( rel_modpath ) return rel_modpath
Returns path to module relative to the package root
86
9
8,997
def get_modname_from_modpath ( module_fpath ) : modsubdir_list = get_module_subdir_list ( module_fpath ) modname = '.' . join ( modsubdir_list ) modname = modname . replace ( '.__init__' , '' ) . strip ( ) modname = modname . replace ( '.__main__' , '' ) . strip ( ) return modname
returns importable name from file path
94
8
8,998
def ls ( path , pattern = '*' ) : path_iter = glob ( path , pattern , recursive = False ) return sorted ( list ( path_iter ) )
like unix ls - lists all files and dirs in path
36
13
8,999
def ls_moduledirs ( path , private = True , full = True ) : dir_list = ls_dirs ( path ) module_dir_iter = filter ( is_module_dir , dir_list ) if not private : module_dir_iter = filterfalse ( is_private_module , module_dir_iter ) if not full : module_dir_iter = map ( basename , module_dir_iter ) return list ( module_dir_iter )
lists all dirs which are python modules in path
102
10