idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
8,800
def _logout ( self , reset = True ) : url = '{base}/client/auth/logout' . format ( base = self . base_url ) response = self . _session . get ( url , params = self . _parameters ) if response . ok : if reset : self . _reset ( ) return True else : return False
Log out of the API .
76
6
8,801
def _state ( self ) : state = { } required_keys = ( 'deviceStatusInfo' , 'gasUsage' , 'powerUsage' , 'thermostatInfo' , 'thermostatStates' ) try : for _ in range ( self . _state_retries ) : state . update ( self . _get_data ( '/client/auth/retrieveToonState' ) ) except TypeError : self . _logger . exception ( 'Could not get answer from service.' ) message = ( 'Updating internal state with retrieved ' 'state:{state}' ) . format ( state = state ) self . _logger . debug ( message ) self . _state_ . update ( state ) if not all ( [ key in self . _state_ . keys ( ) for key in required_keys ] ) : raise IncompleteResponse ( state ) return self . _state_
The internal state of the object .
190
7
8,802
def get_smokedetector_by_name ( self , name ) : return next ( ( smokedetector for smokedetector in self . smokedetectors if smokedetector . name . lower ( ) == name . lower ( ) ) , None )
Retrieves a smokedetector object by its name
55
11
8,803
def get_light_by_name ( self , name ) : return next ( ( light for light in self . lights if light . name . lower ( ) == name . lower ( ) ) , None )
Retrieves a light object by its name
43
9
8,804
def get_smartplug_by_name ( self , name ) : return next ( ( plug for plug in self . smartplugs if plug . name . lower ( ) == name . lower ( ) ) , None )
Retrieves a smartplug object by its name
46
10
8,805
def get_thermostat_state_by_name ( self , name ) : self . _validate_thermostat_state_name ( name ) return next ( ( state for state in self . thermostat_states if state . name . lower ( ) == name . lower ( ) ) , None )
Retrieves a thermostat state object by its assigned name
67
13
8,806
def get_thermostat_state_by_id ( self , id_ ) : return next ( ( state for state in self . thermostat_states if state . id == id_ ) , None )
Retrieves a thermostat state object by its id
45
12
8,807
def thermostat_state ( self ) : current_state = self . thermostat_info . active_state state = self . get_thermostat_state_by_id ( current_state ) if not state : self . _logger . debug ( 'Manually set temperature, no Thermostat ' 'State chosen!' ) return state
The state of the thermostat programming
75
8
8,808
def thermostat_state ( self , name ) : self . _validate_thermostat_state_name ( name ) id_ = next ( ( key for key in STATES . keys ( ) if STATES [ key ] . lower ( ) == name . lower ( ) ) , None ) data = copy . copy ( self . _parameters ) data . update ( { 'state' : 2 , 'temperatureState' : id_ } ) response = self . _get_data ( '/client/auth/schemeState' , data ) self . _logger . debug ( 'Response received {}' . format ( response ) ) self . _clear_cache ( )
Changes the thermostat state to the one passed as an argument as name
143
15
8,809
def thermostat ( self , temperature ) : target = int ( temperature * 100 ) data = copy . copy ( self . _parameters ) data . update ( { 'value' : target } ) response = self . _get_data ( '/client/auth/setPoint' , data ) self . _logger . debug ( 'Response received {}' . format ( response ) ) self . _clear_cache ( )
A temperature to set the thermostat to . Requires a float .
89
14
8,810
def euler_tour_dfs ( G , source = None ) : if source is None : # produce edges for all components nodes = G else : # produce edges for components with source nodes = [ source ] yielder = [ ] visited = set ( ) for start in nodes : if start in visited : continue visited . add ( start ) stack = [ ( start , iter ( G [ start ] ) ) ] while stack : parent , children = stack [ - 1 ] try : child = next ( children ) if child not in visited : # yielder += [[parent, child]] yielder += [ parent ] visited . add ( child ) stack . append ( ( child , iter ( G [ child ] ) ) ) except StopIteration : if stack : last = stack [ - 1 ] yielder += [ last [ 0 ] ] stack . pop ( ) return yielder
adaptation of networkx dfs
182
7
8,811
def reroot ( self , s ) : # Splice out the first part of the sequence ending with the occurrence before os # remove its first occurrence (or), o_s1 = self . first_lookup [ s ] splice1 = self . tour [ 1 : o_s1 ] rest = self . tour [ o_s1 + 1 : ] new_tour = [ s ] + rest + splice1 + [ s ] new_tree = TestETT . from_tour ( new_tour , fast = self . fast ) return new_tree
s = 3 s = B
122
6
8,812
def remove_edge ( self , u , v ) : # Remove (u, v) from represented graph print ( 'Dynamically removing uv=(%r, %r)' % ( u , v ) ) self . graph . remove_edge ( u , v ) e = ( u , v ) # Remove edge e = (u, v) from all graphs. if not self . forests [ 0 ] . has_edge ( u , v ) : # If (u, v) is a non-tree edge, simply delete it. # Nothing else to do. return # If (u, v) is a tree edge we delete it and search for a replacement. # Delete from all higher levels for i in reversed ( range ( 0 , self . level [ e ] + 1 ) ) : self . forests [ i ] . remove_edge ( u , v ) # Determine if another edge that connects u and v exists. # (This must be an edge r, level[r] <= level[e]) # (Find max possible level[r] <= level[e]) for i in reversed ( range ( 0 , self . level [ e ] + 1 ) ) : # Tu != Tw b/c (u, v) was just deleted from all forests Tu = self . forests [ i ] . subtree ( u ) print ( 'Tu = %r' % ( list ( Tu . nodes ( ) ) , ) ) Tv = self . forests [ i ] . subtree ( v ) print ( 'Tv = %r' % ( list ( Tv . nodes ( ) ) , ) ) # Relabel so len(Tu) <= len(Tv) # This ensures len(Tu) < 2 ** (floor(log(n)) - i) if len ( Tu ) > len ( Tv ) : Tu , Tv = Tv , Tu # Note len(Tu) <= 2 * (len(Tu) + len(Tv) + 1) # We can afford to push all of Tu's edges to the next level and # still preserve invariant 1. seen_ = set ( [ ] ) for x in Tu . nodes ( ) : # Visit all edges INCIDENT (in real graph) to nodes in Tu. # This lets us find non-tree edges to make a tree edge seen_ . add ( x ) for y in self . graph . neighbors ( x ) : if y in seen_ : continue # print('Check replacement edge xy=(%r, %r)' % (x, y)) if y in Tv : print ( '* Found replacement xy=(%r, %r)' % ( x , y ) ) # edge (x, y) is a replacement edge. # add (x, y) to prev forests F[0:i+1] # This is the only place edges are added to forets of # higher levels. if len ( self . forests ) == i + 1 : self . forests . append ( DummyEulerTourForest ( self . graph . nodes ( ) ) ) for j in range ( 0 , i + 2 ) : print ( '* Add replacment to F[j=%r]' % ( j , ) ) # Need euler tree augmentation for outgoing level edges self . forests [ j ] . add_edge ( x , y ) return else : print ( '* Charging xy=(%r, %r)' % ( x , y ) ) # charge --- add (x, y) to next level # this pays for our search in an amortized sense # (ie, the next search at this level wont consider this) if len ( self . forests ) == i + 1 : self . forests . append ( DummyEulerTourForest ( self . graph . nodes ( ) ) ) if self . forests [ i ] . has_edge ( x , y ) : self . forests [ i + 1 ] . add_edge ( x , y ) # # assert False, 'we got it, should add it?' self . level [ ( x , y ) ] = i + 1
Using notation where 0 is top level
861
7
8,813
def extend_regex2 ( regexpr , reflags = 0 ) : regexpr = extend_regex ( regexpr ) IGNORE_CASE_PREF = '\\c' if regexpr . startswith ( IGNORE_CASE_PREF ) : # hack for vim-like ignore case regexpr = regexpr [ len ( IGNORE_CASE_PREF ) : ] reflags = reflags | re . IGNORECASE return regexpr , reflags
also preprocesses flags
107
5
8,814
def named_field ( key , regex , vim = False ) : if key is None : #return regex return r'(%s)' % ( regex , ) if vim : return r'\(%s\)' % ( regex ) else : return r'(?P<%s>%s)' % ( key , regex )
Creates a named regex group that can be referend via a backref . If key is None the backref is referenced by number .
71
28
8,815
def regex_replace ( regex , repl , text ) : return re . sub ( regex , repl , text , * * RE_KWARGS )
r thin wrapper around re . sub regex_replace
31
10
8,816
def clear ( prompt = True , cache = None ) : cache = cache or config . cache ( ) if prompt : answer = input ( 'Clear library cache files in %s/? (yN) ' % cache ) if not answer . startswith ( 'y' ) : return False shutil . rmtree ( cache , ignore_errors = True ) return True
Clear loady s cache .
77
6
8,817
def create ( gitpath , cache = None ) : if gitpath . startswith ( config . LIBRARY_PREFIX ) : path = gitpath [ len ( config . LIBRARY_PREFIX ) : ] return Library ( * path . split ( '/' ) , cache = cache )
Create a Library from a git path .
65
8
8,818
def load ( self ) : if not git : raise EnvironmentError ( MISSING_GIT_ERROR ) if os . path . exists ( self . path ) : if not config . CACHE_DISABLE : return shutil . rmtree ( self . path , ignore_errors = True ) with files . remove_on_exception ( self . path ) : url = self . GIT_URL . format ( * * vars ( self ) ) repo = git . Repo . clone_from ( url = url , to_path = self . path , b = self . branch ) if self . commit : repo . head . reset ( self . commit , index = True , working_tree = True )
Load the library .
151
4
8,819
def check_existens_of_staging_tag_in_remote_repo ( ) : staging_tag = Git . create_git_version_tag ( APISettings . GIT_STAGING_PRE_TAG ) command_git = 'git ls-remote -t' command_awk = 'awk \'{print $2}\'' command_cut_1 = 'cut -d \'/\' -f 3' command_cut_2 = 'cut -d \'^\' -f 1' command_sort = 'sort -b -t . -k 1,1nr -k 2,2nr -k 3,3r -k 4,4r -k 5,5r' command_uniq = 'uniq' command = command_git + ' | ' + command_awk + ' | ' + command_cut_1 + ' | ' + command_cut_2 + ' | ' + command_sort + ' | ' + command_uniq list_of_tags = str ( check_output ( command , shell = True ) ) if staging_tag in list_of_tags : return True return False
This method will check if the given tag exists as a staging tag in the remote repository .
245
18
8,820
def __debug ( command , dry = False ) : if dry : command . append ( '--dry-run' ) Shell . debug ( command ) if dry : call ( command ) exit ( 1 )
This method will be called if the debug mode is on .
42
12
8,821
def __git_add ( args = '' ) : command = [ 'git' , 'add' , '.' ] Shell . msg ( 'Adding files...' ) if APISettings . DEBUG : Git . __debug ( command , True ) for key in args : command . append ( key ) if not call ( command ) : pass return False
Add files to staging . The function call will return 0 if the command success .
72
16
8,822
def __git_commit ( git_tag ) : Shell . msg ( 'Commit changes.' ) if APISettings . DEBUG : Shell . debug ( 'Execute "git commit" in dry mode.' ) if not call ( [ 'git' , 'commit' , '-m' , '\'' + git_tag + '\'' , '--dry-run' ] ) : pass return True if not call ( [ 'git' , 'commit' , '-m' , '\'' + git_tag + '\'' ] ) : return True return False
Commit files to branch . The function call will return 0 if the command success .
122
17
8,823
def __git_tag ( git_tag ) : command = [ 'git' , 'tag' , '-a' , git_tag , '-m' , '\'' + git_tag + '\'' ] Shell . msg ( 'Create tag from version ' + git_tag ) if APISettings . DEBUG : Git . __debug ( command , False ) if not call ( command ) : return True return False
Create new tag . The function call will return 0 if the command success .
90
15
8,824
def __git_tag_push ( ) : command = [ 'git' , 'push' , 'origin' , '--tags' ] Shell . msg ( 'Pushing tags...' ) if APISettings . DEBUG : Git . __debug ( command , True ) if not call ( command ) : return True return False
Push all tags . The function call will return 0 if the command success .
68
15
8,825
def split_into_batches ( input_list , batch_size , batch_storage_dir , checkpoint = False ) : if checkpoint and not os . path . exists ( batch_storage_dir ) : os . mkdir ( batch_storage_dir ) batches = [ { 'index' : batch_index , 'data' : input_list [ start_index : start_index + batch_size ] , 'input_filename' : os . path . join ( batch_storage_dir , 'batch-{:05d}-input.pickle' . format ( batch_index ) ) , 'result_filename' : os . path . join ( batch_storage_dir , 'batch-{:05d}-output.pickle' . format ( batch_index ) ) , } for batch_index , start_index in enumerate ( range ( 0 , len ( input_list ) , batch_size ) ) ] if checkpoint : for batch in batches : save ( batch [ 'data' ] , batch [ 'input_filename' ] ) return batches
Break the input data into smaller batches optionally saving each one to disk .
228
14
8,826
def map_batch_parallel ( input_list , batch_size , item_mapper = None , batch_mapper = None , flatten = True , n_jobs = - 1 , * * kwargs ) : # We must specify either how to process each batch or how to process each item. if item_mapper is None and batch_mapper is None : raise ValueError ( 'You should specify either batch_mapper or item_mapper.' ) if batch_mapper is None : batch_mapper = _default_batch_mapper batches = split_into_batches ( input_list , batch_size , batch_storage_dir = '' ) all_batch_results = Parallel ( n_jobs = n_jobs , * * kwargs ) ( delayed ( batch_mapper ) ( batch [ 'data' ] , item_mapper ) for batch in progressbar ( batches , desc = 'Batches' , total = len ( batches ) , file = sys . stdout , ) ) # Unwrap the individual batch results if necessary. if flatten : final_result = [ ] for batch_result in all_batch_results : final_result . extend ( batch_result ) else : final_result = all_batch_results return final_result
Split the data into batches and process each batch in its own thread .
276
14
8,827
def get_cfg ( ast_func ) : cfg_func = cfg . Function ( ) for ast_var in ast_func . input_variable_list : cfg_var = cfg_func . get_variable ( ast_var . name ) cfg_func . add_input_variable ( cfg_var ) for ast_var in ast_func . output_variable_list : cfg_var = cfg_func . get_variable ( ast_var . name ) cfg_func . add_output_variable ( cfg_var ) bb_start = cfg . BasicBlock ( ) cfg_func . add_basic_block ( bb_start ) for stmt in ast_func . body : bb_temp = bb_start bb_temp = process_cfg ( stmt , bb_temp , cfg_func ) cfg_func . clean_up ( ) cfg_func . add_summary ( ast_func . summary ) return cfg_func
Traverses the AST and returns the corresponding CFG
222
11
8,828
def overrideable_partial ( func , * args , * * default_kwargs ) : import functools @ functools . wraps ( func ) def partial_wrapper ( * given_args , * * given_kwargs ) : kwargs = default_kwargs . copy ( ) kwargs . update ( given_kwargs ) return func ( * ( args + given_args ) , * * kwargs ) return partial_wrapper
like partial but given kwargs can be overrideden at calltime
95
15
8,829
def get_nonconflicting_string ( base_fmtstr , conflict_set , offset = 0 ) : # Infinite loop until we find a non-conflict conflict_set_ = set ( conflict_set ) for count in it . count ( offset ) : base_str = base_fmtstr % count if base_str not in conflict_set_ : return base_str
gets a new string that wont conflict with something that already exists
83
12
8,830
def get_nonconflicting_path_old ( base_fmtstr , dpath , offset = 0 ) : import utool as ut from os . path import basename pattern = '*' dname_list = ut . glob ( dpath , pattern , recursive = False , with_files = True , with_dirs = True ) conflict_set = set ( [ basename ( dname ) for dname in dname_list ] ) newname = ut . get_nonconflicting_string ( base_fmtstr , conflict_set , offset = offset ) newpath = join ( dpath , newname ) return newpath
r base_fmtstr must have a %d in it
139
13
8,831
def are_you_sure ( msg = '' ) : print ( msg ) from utool import util_arg from utool import util_str override = util_arg . get_argflag ( ( '--yes' , '--y' , '-y' ) ) if override : print ( 'accepting based on command line flag' ) return True valid_ans = [ 'yes' , 'y' ] valid_prompt = util_str . conj_phrase ( valid_ans , 'or' ) ans = input ( 'Are you sure?\n Enter %s to accept\n' % valid_prompt ) return ans . lower ( ) in valid_ans
r Prompts user to accept or checks command line for - y
144
13
8,832
def grace_period ( msg = '' , seconds = 10 ) : import time print ( msg ) override = util_arg . get_argflag ( ( '--yes' , '--y' , '-y' ) ) print ( 'starting grace period' ) if override : print ( 'ending based on command line flag' ) return True for count in reversed ( range ( 1 , seconds + 1 ) ) : time . sleep ( 1 ) print ( '%d' % ( count , ) ) print ( '%d' % ( 0 , ) ) print ( 'grace period is over' ) return True
Gives user a window to stop a process before it happens
129
12
8,833
def delayed_retry_gen ( delay_schedule = [ .1 , 1 , 10 ] , msg = None , timeout = None , raise_ = True ) : import utool as ut import time if not ut . isiterable ( delay_schedule ) : delay_schedule = [ delay_schedule ] tt = ut . tic ( ) # First attempt is immediate yield 0 for count in it . count ( 0 ) : #print('count = %r' % (count,)) if timeout is not None and ut . toc ( tt ) > timeout : if raise_ : raise Exception ( 'Retry loop timed out' ) else : raise StopIteration ( 'Retry loop timed out' ) index = min ( count , len ( delay_schedule ) - 1 ) delay = delay_schedule [ index ] time . sleep ( delay ) yield count + 1
template code for a infinte retry loop
189
10
8,834
def get_stats_str ( list_ = None , newlines = False , keys = None , exclude_keys = [ ] , lbl = None , precision = None , axis = 0 , stat_dict = None , use_nan = False , align = False , use_median = False , * * kwargs ) : from utool . util_str import repr4 import utool as ut # Get stats dict if stat_dict is None : stat_dict = get_stats ( list_ , axis = axis , use_nan = use_nan , use_median = use_median ) else : stat_dict = stat_dict . copy ( ) # Keep only included keys if specified if keys is not None : for key in list ( six . iterkeys ( stat_dict ) ) : if key not in keys : del stat_dict [ key ] # Remove excluded keys for key in exclude_keys : if key in stat_dict : del stat_dict [ key ] # apply precision statstr_dict = stat_dict . copy ( ) #precisionless_types = (bool,) + six.string_types if precision is not None : assert ut . is_int ( precision ) , 'precision must be an integer' float_fmtstr = '%.' + str ( precision ) + 'f' for key in list ( six . iterkeys ( statstr_dict ) ) : val = statstr_dict [ key ] isfloat = ut . is_float ( val ) if not isfloat and isinstance ( val , list ) : type_list = list ( map ( type , val ) ) if len ( type_list ) > 0 and ut . allsame ( type_list ) : if ut . is_float ( val [ 0 ] ) : isfloat = True val = np . array ( val ) if isfloat : if isinstance ( val , np . ndarray ) : strval = str ( [ float_fmtstr % v for v in val ] ) . replace ( '\'' , '' ) . lstrip ( 'u' ) #np.array_str((val), precision=precision) else : strval = float_fmtstr % val if not strval . startswith ( '0' ) : strval = strval . rstrip ( '0' ) strval = strval . rstrip ( '.' ) statstr_dict [ key ] = strval else : if isinstance ( val , np . ndarray ) : strval = repr ( val . tolist ( ) ) else : strval = str ( val ) statstr_dict [ key ] = strval # format the dictionary string stat_str = repr4 ( statstr_dict , strvals = True , newlines = newlines ) # add a label if requested if lbl is True : lbl = ut . get_varname_from_stack ( list_ , N = 1 ) # fancy if lbl is not None : stat_str = 'stats_' + lbl + ' = ' + stat_str if align : stat_str = ut . align ( stat_str , ':' ) return stat_str
Returns the string version of get_stats
677
8
8,835
def make_call_graph ( func , * args , * * kwargs ) : from pycallgraph import PyCallGraph from pycallgraph . output import GraphvizOutput with PyCallGraph ( output = GraphvizOutput ) : func ( * args , * * kwargs )
profile with pycallgraph
62
5
8,836
def _memory_profile ( with_gc = False ) : import utool as ut if with_gc : garbage_collect ( ) import guppy hp = guppy . hpy ( ) print ( '[hpy] Waiting for heap output...' ) heap_output = hp . heap ( ) print ( heap_output ) print ( '[hpy] total heap size: ' + ut . byte_str2 ( heap_output . size ) ) ut . util_resources . memstats ( )
Helper for memory debugging . Mostly just a namespace where I experiment with guppy and heapy .
104
19
8,837
def make_object_graph ( obj , fpath = 'sample_graph.png' ) : import objgraph objgraph . show_most_common_types ( ) #print(objgraph.by_type('ndarray')) #objgraph.find_backref_chain( # random.choice(objgraph.by_type('ndarray')), # objgraph.is_proper_module) objgraph . show_refs ( [ obj ] , filename = 'ref_graph.png' ) objgraph . show_backrefs ( [ obj ] , filename = 'backref_graph.png' )
memoryprofile with objgraph
134
5
8,838
def inverable_unique_two_lists ( item1_list , item2_list ) : import utool as ut unique_list1 , inverse1 = np . unique ( item1_list , return_inverse = True ) unique_list2 , inverse2 = np . unique ( item2_list , return_inverse = True ) flat_stacked , cumsum = ut . invertible_flatten2 ( ( unique_list1 , unique_list2 ) ) flat_unique , inverse3 = np . unique ( flat_stacked , return_inverse = True ) reconstruct_tup = ( inverse3 , cumsum , inverse2 , inverse1 ) return flat_unique , reconstruct_tup
item1_list = aid1_list item2_list = aid2_list
156
18
8,839
def uninvert_unique_two_lists ( flat_list , reconstruct_tup ) : import utool as ut ( inverse3 , cumsum , inverse2 , inverse1 ) = reconstruct_tup flat_stacked_ = ut . take ( flat_list , inverse3 ) unique_list1_ , unique_list2_ = ut . unflatten2 ( flat_stacked_ , cumsum ) res_list1_ = ut . take ( unique_list1_ , inverse1 ) res_list2_ = ut . take ( unique_list2_ , inverse2 ) return res_list1_ , res_list2_
flat_list = thumb_list
139
7
8,840
def search_module ( mod , pat , ignore_case = True , recursive = False , _seen = None ) : if _seen is not None and mod in _seen : return [ ] import utool as ut reflags = re . IGNORECASE * ignore_case found_list = [ name for name in dir ( mod ) if re . search ( pat , name , flags = reflags ) ] if recursive : if _seen is None : _seen = set ( ) _seen . add ( mod ) module_attrs = [ getattr ( mod , name ) for name in dir ( mod ) ] submodules = [ submod for submod in module_attrs if isinstance ( submod , types . ModuleType ) and submod not in _seen and ut . is_defined_by_module ( submod , mod ) ] for submod in submodules : found_list += search_module ( submod , pat , ignore_case = ignore_case , recursive = recursive , _seen = _seen ) # found_list = [name for name in dir(mod) if name.find(pat) >= 0] found_list = ut . unique_ordered ( found_list ) return found_list
r Searches module functions classes and constants for members matching a pattern .
261
14
8,841
def instancelist ( obj_list , check = False , shared_attrs = None ) : class InstanceList_ ( object ) : def __init__ ( self , obj_list , shared_attrs = None ) : self . _obj_list = [ ] self . _shared_public_attrs = [ ] self . _example_type = None if len ( obj_list ) > 0 : import utool as ut self . _obj_list = obj_list example_obj = obj_list [ 0 ] example_type = type ( example_obj ) self . _example_type = example_type if shared_attrs is None : if check : attrsgen = [ set ( dir ( obj ) ) for obj in obj_list ] shared_attrs = list ( reduce ( set . intersection , attrsgen ) ) else : shared_attrs = dir ( example_obj ) #allowed = ['__getitem__'] # TODO, put in metaclass allowed = [ ] self . _shared_public_attrs = [ a for a in shared_attrs if a in allowed or not a . startswith ( '_' ) ] for attrname in self . _shared_public_attrs : attrtype = getattr ( example_type , attrname , None ) if attrtype is not None and isinstance ( attrtype , property ) : # need to do this as metaclass setattr ( InstanceList_ , attrname , property ( self . _define_prop ( attrname ) ) ) else : func = self . _define_func ( attrname ) ut . inject_func_as_method ( self , func , attrname ) def __nice__ ( self ) : if self . _example_type is None : typename = 'object' else : typename = self . _example_type . __name__ return 'of %d %s(s)' % ( len ( self . _obj_list ) , typename ) def __repr__ ( self ) : classname = self . __class__ . __name__ devnice = self . __nice__ ( ) return '<%s(%s) at %s>' % ( classname , devnice , hex ( id ( self ) ) ) def __str__ ( self ) : classname = self . __class__ . __name__ devnice = self . __nice__ ( ) return '<%s(%s)>' % ( classname , devnice ) def __getitem__ ( self , key ) : # TODO, put in metaclass return self . _map_method ( '__getitem__' , key ) def _define_func ( self , attrname ) : import utool as ut def _wrapper ( self , * args , * * kwargs ) : return self . _map_method ( attrname , * args , * * kwargs ) ut . set_funcname ( _wrapper , attrname ) return _wrapper def _map_method ( self , attrname , * args , * * kwargs ) : mapped_vals = [ getattr ( obj , attrname ) ( * args , * * kwargs ) for obj in self . _obj_list ] return mapped_vals def _define_prop ( self , attrname ) : import utool as ut def _getter ( self ) : return self . _map_property ( attrname ) ut . set_funcname ( _getter , 'get_' + attrname ) return _getter def _map_property ( self , attrname ) : mapped_vals = [ getattr ( obj , attrname ) for obj in self . _obj_list ] return mapped_vals return InstanceList_ ( obj_list , shared_attrs )
Executes methods and attribute calls on a list of objects of the same type
831
15
8,842
def _heappush_max ( heap , item ) : heap . append ( item ) heapq . _siftdown_max ( heap , 0 , len ( heap ) - 1 )
why is this not in heapq
40
7
8,843
def take_column ( self , keys , * extra_keys ) : import utool as ut keys = ut . ensure_iterable ( keys ) + list ( extra_keys ) key_to_list = ut . dict_subset ( self . _key_to_list , keys ) newself = self . __class__ ( key_to_list , self . _meta . copy ( ) ) return newself
Takes a subset of columns
89
6
8,844
def take ( self , idxs ) : import utool as ut if False : key_to_list = ut . odict ( [ ( key , ut . take ( val , idxs ) ) for key , val in six . iteritems ( self . _key_to_list ) ] ) else : import numpy as np key_to_list = ut . odict ( [ ( key , ut . take ( val , idxs ) ) if not isinstance ( val , np . ndarray ) else val . take ( idxs , axis = 0 ) for key , val in six . iteritems ( self . _key_to_list ) ] ) newself = self . __class__ ( key_to_list , self . _meta . copy ( ) ) return newself
Takes a subset of rows
168
6
8,845
def remove ( self , idxs ) : import utool as ut keep_idxs = ut . index_complement ( idxs , len ( self ) ) return self . take ( keep_idxs )
Returns a copy with idxs removed
44
7
8,846
def group_items ( self , labels ) : import utool as ut unique_labels , groups = self . group ( labels ) label_to_group = ut . odict ( zip ( unique_labels , groups ) ) return label_to_group
group as dict
55
3
8,847
def group ( self , labels ) : unique_labels , groupxs = self . group_indicies ( labels ) groups = [ self . take ( idxs ) for idxs in groupxs ] return unique_labels , groups
group as list
50
3
8,848
def cast_column ( self , keys , func ) : import utool as ut for key in ut . ensure_iterable ( keys ) : self [ key ] = [ func ( v ) for v in self [ key ] ]
like map column but applies values inplace
48
8
8,849
def merge_rows ( self , key , merge_scalars = True ) : import utool as ut unique_labels , groupxs = self . group_indicies ( key ) single_xs = [ xs for xs in groupxs if len ( xs ) == 1 ] multi_xs = [ xs for xs in groupxs if len ( xs ) > 1 ] singles = self . take ( ut . flatten ( single_xs ) ) multis = [ self . take ( idxs ) for idxs in multi_xs ] merged_groups = [ ] for group in multis : newgroup = { } for key_ in group . keys ( ) : val = group [ key_ ] if key_ == key : # key_ was garuenteed unique val_ = val [ 0 ] elif hasattr ( val [ 0 ] . __class__ , 'union' ) : # HACK # Sets are unioned val_ = ut . oset . union ( * val ) elif isinstance ( val [ 0 ] , ( ut . oset , ) ) : # Sets are unioned val_ = ut . oset . union ( * val ) elif isinstance ( val [ 0 ] , ( set ) ) : # Sets are unioned val_ = set . union ( * val ) elif isinstance ( val [ 0 ] , ( tuple , list ) ) : # Lists are merged together val_ = ut . flatten ( val ) #val_ = ut.unique(ut.flatten(val)) else : if ut . allsame ( val ) : # Merge items that are the same val_ = val [ 0 ] else : if merge_scalars : # If mergeing scalars is ok, then # Values become lists if they are different val_ = val else : if True : # If there is only one non-none value then use that. other_vals = ut . filter_Nones ( val ) if len ( other_vals ) == 1 : val_ = val [ 0 ] else : raise ValueError ( 'tried to merge a scalar in %r, val=%r' % ( key_ , val ) ) else : # If merging scalars is not ok, then # we must raise an error raise ValueError ( 'tried to merge a scalar in %r, val=%r' % ( key_ , val ) ) newgroup [ key_ ] = [ val_ ] merged_groups . append ( ut . ColumnLists ( newgroup ) ) merged_multi = self . __class__ . flatten ( merged_groups ) merged = singles + merged_multi return merged
Uses key as a unique index an merges all duplicates rows . Use cast_column to modify types of columns before merging to affect behavior of duplicate rectification .
566
34
8,850
def peek ( self ) : # Ammortized O(1) _heap = self . _heap _dict = self . _dict val , key = _heap [ 0 ] # Remove items marked for lazy deletion as they are encountered while key not in _dict or _dict [ key ] != val : self . _heappop ( _heap ) val , key = _heap [ 0 ] return key , val
Peek at the next item in the queue
91
9
8,851
def peek_many ( self , n ) : if n == 0 : return [ ] elif n == 1 : return [ self . peek ( ) ] else : items = list ( self . pop_many ( n ) ) self . update ( items ) return items
Actually this can be quite inefficient
55
6
8,852
def pop ( self , key = util_const . NoParam , default = util_const . NoParam ) : # Dictionary pop if key is specified if key is not util_const . NoParam : if default is util_const . NoParam : return ( key , self . _dict . pop ( key ) ) else : return ( key , self . _dict . pop ( key , default ) ) # Otherwise do a heap pop try : # Ammortized O(1) _heap = self . _heap _dict = self . _dict val , key = self . _heappop ( _heap ) # Remove items marked for lazy deletion as they are encountered while key not in _dict or _dict [ key ] != val : val , key = self . _heappop ( _heap ) except IndexError : if len ( _heap ) == 0 : raise IndexError ( 'queue is empty' ) else : raise del _dict [ key ] return key , val
Pop the next item off the queue
209
7
8,853
def __execute_fromimport ( module , modname , import_tuples , verbose = False ) : if verbose : print ( '[UTIL_IMPORT] EXECUTING %d FROM IMPORT TUPLES' % ( len ( import_tuples ) , ) ) from_imports = __get_from_imports ( import_tuples ) for name , fromlist in from_imports : full_modname = '.' . join ( ( modname , name ) ) tmp = __import__ ( full_modname , globals ( ) , locals ( ) , fromlist = fromlist , level = 0 ) for attrname in fromlist : setattr ( module , attrname , getattr ( tmp , attrname ) ) return from_imports
Module From Imports
170
4
8,854
def _initstr ( modname , imports , from_imports , inject_execstr , withheader = True ) : header = _make_module_header ( ) if withheader else '' import_str = _make_imports_str ( imports , modname ) fromimport_str = _make_fromimport_str ( from_imports , modname ) initstr = '\n' . join ( [ str_ for str_ in [ header , import_str , fromimport_str , inject_execstr , ] if len ( str_ ) > 0 ] ) return initstr
Calls the other string makers
127
6
8,855
def _inject_execstr ( modname , import_tuples ) : if modname == 'utool' : # Special case import of the util_inject module injecter = 'util_inject' injecter_import = '' else : # Normal case implicit import of util_inject injecter_import = 'import utool' injecter = 'utool' injectstr_fmt = textwrap . dedent ( r''' # STARTBLOCK {injecter_import} print, rrr, profile = {injecter}.inject2(__name__, '[{modname}]') def reassign_submodule_attributes(verbose=1): """ Updates attributes in the __init__ modules with updated attributes in the submodules. """ import sys if verbose and '--quiet' not in sys.argv: print('dev reimport') # Self import import {modname} # Implicit reassignment. seen_ = set([]) for tup in IMPORT_TUPLES: if len(tup) > 2 and tup[2]: continue # dont import package names submodname, fromimports = tup[0:2] submod = getattr({modname}, submodname) for attr in dir(submod): if attr.startswith('_'): continue if attr in seen_: # This just holds off bad behavior # but it does mimic normal util_import behavior # which is good continue seen_.add(attr) setattr({modname}, attr, getattr(submod, attr)) def reload_subs(verbose=1): """ Reloads {modname} and submodules """ if verbose: print('Reloading {modname} submodules') rrr(verbose > 1) def wrap_fbrrr(mod): def fbrrr(*args, **kwargs): """ fallback reload """ if verbose > 0: print('Auto-reload (using rrr) not setup for mod=%r' % (mod,)) return fbrrr def get_rrr(mod): if hasattr(mod, 'rrr'): return mod.rrr else: return wrap_fbrrr(mod) def get_reload_subs(mod): return getattr(mod, 'reload_subs', wrap_fbrrr(mod)) {reload_body} rrr(verbose > 1) try: # hackish way of propogating up the new reloaded submodule attributes reassign_submodule_attributes(verbose=verbose) except Exception as ex: print(ex) rrrr = reload_subs # ENDBLOCK ''' ) injectstr_fmt = injectstr_fmt . replace ( '# STARTBLOCK' , '' ) injectstr_fmt = injectstr_fmt . replace ( '# ENDBLOCK' , '' ) rrrdir_fmt = ' get_reload_subs({modname})(verbose=verbose)' rrrfile_fmt = ' get_rrr({modname})(verbose > 1)' def _reload_command ( tup ) : if len ( tup ) > 2 and tup [ 2 ] is True : return rrrdir_fmt . format ( modname = tup [ 0 ] ) else : return rrrfile_fmt . format ( modname = tup [ 0 ] ) reload_body = '\n' . join ( map ( _reload_command , import_tuples ) ) . strip ( ) format_dict = { 'modname' : modname , 'reload_body' : reload_body , 'injecter' : injecter , 'injecter_import' : injecter_import , } inject_execstr = injectstr_fmt . format ( * * format_dict ) . strip ( ) return inject_execstr
Injection and Reload String Defs
855
7
8,856
def make_initstr ( modname , import_tuples , verbose = False ) : imports = [ tup [ 0 ] for tup in import_tuples ] from_imports = __get_from_imports ( import_tuples ) inject_execstr = _inject_execstr ( modname , import_tuples ) return _initstr ( modname , imports , from_imports , inject_execstr )
Just creates the string representation . Does no importing .
95
10
8,857
def make_import_tuples ( module_path , exclude_modnames = [ ] ) : from utool import util_path kwargs = dict ( private = False , full = False ) module_list = util_path . ls_modulefiles ( module_path , noext = True , * * kwargs ) package_list = util_path . ls_moduledirs ( module_path , * * kwargs ) exclude_set = set ( exclude_modnames ) module_import_tuples = [ ( modname , None ) for modname in module_list if modname not in exclude_set ] package_import_tuples = [ ( modname , None , True ) for modname in package_list if modname not in exclude_set ] import_tuples = ( module_import_tuples + package_import_tuples ) return import_tuples
Infer the import_tuples from a module_path
192
12
8,858
def get_resource_dir ( ) : #resource_prefix = '~' if WIN32 : dpath_ = '~/AppData/Roaming' elif LINUX : dpath_ = '~/.config' elif DARWIN : dpath_ = '~/Library/Application Support' else : raise AssertionError ( 'unknown os' ) dpath = normpath ( expanduser ( dpath_ ) ) return dpath
Returns a directory which should be writable for any application
93
11
8,859
def load_data ( fpath , * * kwargs ) : ext = splitext ( fpath ) [ 1 ] if ext in [ '.pickle' , '.cPkl' , '.pkl' ] : return load_cPkl ( fpath , * * kwargs ) elif ext in [ '.json' ] : return load_json ( fpath , * * kwargs ) elif ext in [ '.hdf5' ] : return load_hdf5 ( fpath , * * kwargs ) elif ext in [ '.txt' ] : return load_text ( fpath , * * kwargs ) elif HAS_NUMPY and ext in [ '.npz' , '.npy' ] : return load_numpy ( fpath , * * kwargs ) else : assert False , 'unknown ext=%r for fpath=%r' % ( ext , fpath )
More generic interface to load data
202
6
8,860
def save_data ( fpath , data , * * kwargs ) : ext = splitext ( fpath ) [ 1 ] if ext in [ '.pickle' , '.cPkl' , '.pkl' ] : return save_cPkl ( fpath , data , * * kwargs ) elif ext in [ '.json' ] : return save_json ( fpath , data , * * kwargs ) elif ext in [ '.hdf5' ] : return save_hdf5 ( fpath , data , * * kwargs ) elif ext in [ '.txt' ] : return save_text ( fpath , * * kwargs ) elif HAS_NUMPY and ext in [ '.npz' , '.npy' ] : return save_numpy ( fpath , data , * * kwargs ) else : assert False , 'unknown ext=%r for fpath=%r' % ( ext , fpath )
More generic interface to write data
212
6
8,861
def write_to ( fpath , to_write , aslines = False , verbose = None , onlyifdiff = False , mode = 'w' , n = None ) : if onlyifdiff : import utool as ut if ut . hashstr ( read_from ( fpath ) ) == ut . hashstr ( to_write ) : print ( '[util_io] * no difference' ) return verbose = _rectify_verb_write ( verbose ) if verbose : # n = None if verbose > 1 else 2 # print('[util_io] * Writing to text file: %r ' % util_path.tail(fpath, n=n)) print ( '[util_io] * Writing to text file: {}' . format ( fpath ) ) backup = False and exists ( fpath ) if backup : util_path . copy ( fpath , fpath + '.backup' ) if not isinstance ( fpath , six . string_types ) : # Assuming a file object with a name attribute # Should just read from the file fpath = fpath . name with open ( fpath , mode ) as file_ : if aslines : file_ . writelines ( to_write ) else : # Ensure python2 writes in bytes if six . PY2 : if isinstance ( to_write , unicode ) : # NOQA to_write = to_write . encode ( 'utf8' ) try : file_ . write ( to_write ) except UnicodeEncodeError as ex : start = max ( ex . args [ 2 ] - 10 , 0 ) end = ex . args [ 3 ] + 10 context = to_write [ start : end ] print ( repr ( context ) ) print ( context ) from utool import util_dbg util_dbg . printex ( ex , keys = [ ( type , 'to_write' ) ] ) file_ . close ( ) if backup : # restore util_path . copy ( fpath + '.backup' , fpath ) # import utool # utool.embed() raise
Writes text to a file . Automatically encodes text as utf8 .
445
17
8,862
def read_from ( fpath , verbose = None , aslines = False , strict = True , n = None , errors = 'replace' ) : if n is None : n = __READ_TAIL_N__ verbose = _rectify_verb_read ( verbose ) if verbose : print ( '[util_io] * Reading text file: %r ' % util_path . tail ( fpath , n = n ) ) try : if not util_path . checkpath ( fpath , verbose = verbose , n = n ) : raise IOError ( '[io] * FILE DOES NOT EXIST!' ) #with open(fpath, 'r') as file_: with open ( fpath , 'rb' ) as file_ : if aslines : #text = file_.readlines() if six . PY2 : # python2 writes in bytes, so read as bytes then convert to # utf8 text = [ line . decode ( 'utf8' , errors = errors ) for line in file_ . readlines ( ) ] else : text = [ line . decode ( 'utf8' , errors = errors ) for line in file_ . readlines ( ) ] #text = file_.readlines() else : # text = file_.read() if six . PY2 : text = file_ . read ( ) . decode ( 'utf8' , errors = errors ) else : #text = file_.read() text = file_ . read ( ) . decode ( 'utf8' , errors = errors ) return text except IOError as ex : from utool import util_dbg if verbose or strict : util_dbg . printex ( ex , ' * Error reading fpath=%r' % util_path . tail ( fpath , n = n ) , '[io]' ) if strict : raise
r Reads text from a file . Automatically returns utf8 .
395
15
8,863
def save_cPkl ( fpath , data , verbose = None , n = None ) : verbose = _rectify_verb_write ( verbose ) if verbose : print ( '[util_io] * save_cPkl(%r, data)' % ( util_path . tail ( fpath , n = n ) , ) ) with open ( fpath , 'wb' ) as file_ : # Use protocol 2 to support python2 and 3 pickle . dump ( data , file_ , protocol = 2 )
Saves data to a pickled file with optional verbosity
115
12
8,864
def load_cPkl ( fpath , verbose = None , n = None ) : verbose = _rectify_verb_read ( verbose ) if verbose : print ( '[util_io] * load_cPkl(%r)' % ( util_path . tail ( fpath , n = n ) , ) ) try : with open ( fpath , 'rb' ) as file_ : data = pickle . load ( file_ ) except UnicodeDecodeError : if six . PY3 : # try to open python2 pickle with open ( fpath , 'rb' ) as file_ : data = pickle . load ( file_ , encoding = 'latin1' ) else : raise except ValueError as ex : if six . PY2 : if ex . message == 'unsupported pickle protocol: 4' : raise ValueError ( 'unsupported Python3 pickle protocol 4 ' 'in Python2 for fpath=%r' % ( fpath , ) ) else : raise else : raise return data
Loads a pickled file with optional verbosity . Aims for compatibility between python2 and python3 .
223
22
8,865
def save_hdf5 ( fpath , data , verbose = None , compression = 'lzf' ) : import h5py verbose = _rectify_verb_write ( verbose ) if verbose : print ( '[util_io] * save_hdf5(%r, data)' % ( util_path . tail ( fpath ) , ) ) if verbose > 1 : if isinstance ( data , dict ) : print ( '[util_io] ... shapes=%r' % ( [ val . shape for val in data . values ( ) ] , ) ) else : print ( '[util_io] ... shape=%r' % ( data . shape , ) ) chunks = True # True enables auto-chunking fname = basename ( fpath ) # check for parallel hdf5 #have_mpi = h5py.h5.get_config().mpi #if have_mpi: # import mpi4py # h5kw = dict(driver='mpio', comm=mpi4py.MPI.COMM_WORLD) # # cant use compression with mpi # #ValueError: Unable to create dataset (Parallel i/o does not support filters yet) #else: h5kw = { } if isinstance ( data , dict ) : array_data = { key : val for key , val in data . items ( ) if isinstance ( val , ( list , np . ndarray ) ) } attr_data = { key : val for key , val in data . items ( ) if key not in array_data } #assert all([ # isinstance(vals, np.ndarray) # for vals in six.itervalues(data) #]), ('can only save dicts as ndarrays') # file_ = h5py.File(fpath, 'w', **h5kw) with h5py . File ( fpath , mode = 'w' , * * h5kw ) as file_ : grp = file_ . create_group ( fname ) for key , val in six . iteritems ( array_data ) : val = np . asarray ( val ) dset = grp . create_dataset ( key , val . shape , val . dtype , chunks = chunks , compression = compression ) dset [ ... ] = val for key , val in six . iteritems ( attr_data ) : grp . attrs [ key ] = val else : assert isinstance ( data , np . ndarray ) shape = data . shape dtype = data . dtype #if verbose or (verbose is None and __PRINT_WRITES__): # print('[util_io] * save_hdf5(%r, data)' % (util_path.tail(fpath),)) # file_ = h5py.File(fpath, 'w', **h5kw) with h5py . File ( fpath , mode = 'w' , * * h5kw ) as file_ : #file_.create_dataset( # fname, shape, dtype, chunks=chunks, compression=compression, # data=data) dset = file_ . create_dataset ( fname , shape , dtype , chunks = chunks , compression = compression ) dset [ ... ] = data
r Restricted save of data using hdf5 . Can only save ndarrays and dicts of ndarrays .
730
27
8,866
def save_pytables ( fpath , data , verbose = False ) : import tables #from os.path import basename #fname = basename(fpath) #shape = data.shape #dtype = data.dtype #file_ = tables.open_file(fpath) verbose = _rectify_verb_write ( verbose ) if verbose : print ( '[util_io] * save_pytables(%r, data)' % ( util_path . tail ( fpath ) , ) ) with tables . open_file ( fpath , 'w' ) as file_ : atom = tables . Atom . from_dtype ( data . dtype ) filters = tables . Filters ( complib = 'blosc' , complevel = 5 ) dset = file_ . createCArray ( file_ . root , 'data' , atom , data . shape , filters = filters ) # save w/o compressive filter #dset = file_.createCArray(file_.root, 'all_data', atom, all_data.shape) dset [ : ] = data
sudo pip install numexpr sudo pip install tables
241
9
8,867
def start_simple_webserver ( domain = None , port = 5832 ) : import tornado . ioloop import tornado . web import tornado . httpserver import tornado . wsgi import flask app = flask . Flask ( '__simple__' ) @ app . route ( '/' , methods = [ 'GET' , 'POST' , 'DELETE' , 'PUT' ] ) def echo_args ( * args , * * kwargs ) : from flask import request print ( 'Simple server was pinged' ) print ( 'args = %r' % ( args , ) ) print ( 'kwargs = %r' % ( kwargs , ) ) print ( 'request.args = %r' % ( request . args , ) ) print ( 'request.form = %r' % ( request . form , ) ) return '' if domain is None : domain = get_localhost ( ) app . server_domain = domain app . server_port = port app . server_url = 'http://%s:%s' % ( app . server_domain , app . server_port ) print ( 'app.server_url = %s' % ( app . server_url , ) ) http_server = tornado . httpserver . HTTPServer ( tornado . wsgi . WSGIContainer ( app ) ) http_server . listen ( app . server_port ) tornado . ioloop . IOLoop . instance ( ) . start ( )
r simple webserver that echos its arguments
315
10
8,868
def render_html ( html_str ) : import utool as ut from os . path import abspath import webbrowser try : html_str = html_str . decode ( 'utf8' ) except Exception : pass html_dpath = ut . ensure_app_resource_dir ( 'utool' , 'temp_html' ) fpath = abspath ( ut . unixjoin ( html_dpath , 'temp.html' ) ) url = 'file://' + fpath ut . writeto ( fpath , html_str ) webbrowser . open ( url )
makes a temporary html rendering
124
5
8,869
def publish ( func ) : @ wraps ( func ) def wrapper ( self , * args , * * kwargs ) : # outgoing payload = func ( self , * args , * * kwargs ) payload . pop ( 'self' , None ) self . _publish ( func . __name__ , payload ) return None wrapper . is_publish = True return wrapper
publish the return value of this function as a message from this endpoint
79
14
8,870
def request ( func = None , timeout = 600 ) : if func is None : return partial ( request , timeout = timeout ) @ wraps ( func ) def wrapper ( self , * args , * * kwargs ) : params = func ( self , * args , * * kwargs ) self = params . pop ( 'self' , None ) entity = params . pop ( 'entity' , None ) app_name = params . pop ( 'app_name' , None ) request_id = unique_hex ( ) params [ 'request_id' ] = request_id future = self . _send_request ( app_name , endpoint = func . __name__ , entity = entity , params = params , timeout = timeout ) return future wrapper . is_request = True return wrapper
use to request an api call from a specific endpoint
166
10
8,871
def serialize_problem ( req , resp , problem ) : preferred = req . client_prefers ( ( 'application/json' , 'application/problem+json' ) ) if preferred is None : preferred = 'application/json' resp . data = problem . to_json ( ) . encode ( 'utf-8' ) resp . content_type = preferred resp . append_header ( 'Vary' , 'Accept' )
Serialize the given instance of Problem .
93
8
8,872
def add_psms_to_proteindata ( proteindata , p_acc , pool , psmdata ) : seq , psm_id = psmdata [ 2 ] , psmdata [ 3 ] try : proteindata [ p_acc ] [ 'pools' ] [ pool ] [ 'psms' ] . add ( psm_id ) except KeyError : emptyinfo = { 'psms' : set ( ) , 'peptides' : set ( ) , 'unipeps' : 0 } try : proteindata [ p_acc ] [ 'pools' ] [ pool ] = emptyinfo except KeyError : proteindata [ p_acc ] . update ( { 'pools' : { pool : emptyinfo } } ) proteindata [ p_acc ] [ 'pools' ] [ pool ] [ 'psms' ] . add ( psm_id ) proteindata [ p_acc ] [ 'pools' ] [ pool ] [ 'peptides' ] . add ( seq )
Fill function for create_featuredata_map
227
10
8,873
def print_traceback ( with_colors = True ) : #traceback.print_tb() import traceback stack = traceback . extract_stack ( ) stack_lines = traceback . format_list ( stack ) tbtext = '' . join ( stack_lines ) if with_colors : try : from pygments import highlight from pygments . lexers import get_lexer_by_name from pygments . formatters import TerminalFormatter lexer = get_lexer_by_name ( 'pytb' , stripall = True ) formatter = TerminalFormatter ( bg = 'dark' ) formatted_text = highlight ( tbtext , lexer , formatter ) print ( formatted_text ) except Exception : print ( tbtext ) else : print ( tbtext )
prints current stack
175
3
8,874
def is_valid_varname ( varname ) : if not isinstance ( varname , six . string_types ) : return False match_obj = re . match ( varname_regex , varname ) valid_syntax = match_obj is not None valid_name = not keyword . iskeyword ( varname ) isvalid = valid_syntax and valid_name return isvalid
Checks syntax and validity of a variable name
86
9
8,875
def execstr_dict ( dict_ , local_name = None , exclude_list = None , explicit = False ) : import utool as ut if explicit : expr_list = [ ] for ( key , val ) in sorted ( dict_ . items ( ) ) : assert isinstance ( key , six . string_types ) , 'keys must be strings' expr_list . append ( '%s = %s' % ( key , ut . repr2 ( val ) , ) ) execstr = '\n' . join ( expr_list ) return execstr else : if local_name is None : # Magic way of getting the local name of dict_ local_name = get_varname_from_locals ( dict_ , get_parent_frame ( ) . f_locals ) try : if exclude_list is None : exclude_list = [ ] assert isinstance ( exclude_list , list ) exclude_list . append ( local_name ) expr_list = [ ] assert isinstance ( dict_ , dict ) , 'incorrect type type(dict_)=%r, dict_=%r' % ( type ( dict ) , dict_ ) for ( key , val ) in sorted ( dict_ . items ( ) ) : assert isinstance ( key , six . string_types ) , 'keys must be strings' if not is_valid_varname ( key ) : continue if not any ( ( fnmatch . fnmatch ( key , pat ) for pat in exclude_list ) ) : expr = '%s = %s[%s]' % ( key , local_name , ut . repr2 ( key ) ) expr_list . append ( expr ) execstr = '\n' . join ( expr_list ) return execstr except Exception as ex : locals_ = locals ( ) ut . printex ( ex , key_list = [ 'locals_' ] ) raise
returns execable python code that declares variables using keys and values
408
13
8,876
def embed2 ( * * kwargs ) : config = kwargs . get ( 'config' ) header = kwargs . pop ( 'header' , u'' ) stack_depth = kwargs . pop ( 'stack_depth' , 2 ) compile_flags = kwargs . pop ( 'compile_flags' , None ) import IPython from IPython . core . interactiveshell import InteractiveShell from IPython . terminal . embed import InteractiveShellEmbed if config is None : config = IPython . terminal . ipapp . load_default_config ( ) config . InteractiveShellEmbed = config . TerminalInteractiveShell kwargs [ 'config' ] = config #save ps1/ps2 if defined ps1 = None ps2 = None try : ps1 = sys . ps1 ps2 = sys . ps2 except AttributeError : pass #save previous instance saved_shell_instance = InteractiveShell . _instance if saved_shell_instance is not None : cls = type ( saved_shell_instance ) cls . clear_instance ( ) shell = InteractiveShellEmbed . instance ( * * kwargs ) shell ( header = header , stack_depth = stack_depth , compile_flags = compile_flags ) InteractiveShellEmbed . clear_instance ( ) #restore previous instance if saved_shell_instance is not None : cls = type ( saved_shell_instance ) cls . clear_instance ( ) for subclass in cls . _walk_mro ( ) : subclass . _instance = saved_shell_instance if ps1 is not None : sys . ps1 = ps1 sys . ps2 = ps2
Modified from IPython . terminal . embed . embed so I can mess with stack_depth
356
19
8,877
def search_stack_for_localvar ( varname ) : curr_frame = inspect . currentframe ( ) print ( ' * Searching parent frames for: ' + six . text_type ( varname ) ) frame_no = 0 while curr_frame . f_back is not None : if varname in curr_frame . f_locals . keys ( ) : print ( ' * Found in frame: ' + six . text_type ( frame_no ) ) return curr_frame . f_locals [ varname ] frame_no += 1 curr_frame = curr_frame . f_back print ( '... Found nothing in all ' + six . text_type ( frame_no ) + ' frames.' ) return None
Finds a local varable somewhere in the stack and returns the value
163
14
8,878
def formatex ( ex , msg = '[!?] Caught exception' , prefix = None , key_list = [ ] , locals_ = None , iswarning = False , tb = False , N = 0 , keys = None , colored = None ) : # Get error prefix and local info if prefix is None : prefix = get_caller_prefix ( aserror = True , N = N ) if locals_ is None : locals_ = get_parent_frame ( N = N ) . f_locals if keys is not None : # shorthand for key_list key_list = keys # build exception message errstr_list = [ ] # list of exception strings ex_tag = 'WARNING' if iswarning else 'EXCEPTION' errstr_list . append ( '<!!! %s !!!>' % ex_tag ) if tb or FORCE_TB : tbtext = traceback . format_exc ( ) if colored or COLORED_EXCEPTIONS : from utool import util_str tbtext = util_str . highlight_text ( tbtext , lexer_name = 'pytb' , stripall = True ) errstr_list . append ( tbtext ) errstr_list . append ( prefix + ' ' + six . text_type ( msg ) + '\n%r: %s' % ( type ( ex ) , six . text_type ( ex ) ) ) #errstr_list.append(prefix + ' ' + six.text_type(msg) + '\ntype(ex)=%r' % (type(ex),)) parse_locals_keylist ( locals_ , key_list , errstr_list , prefix ) errstr_list . append ( '</!!! %s !!!>' % ex_tag ) return '\n' . join ( errstr_list )
r Formats an exception with relevant info
403
8
8,879
def parse_locals_keylist ( locals_ , key_list , strlist_ = None , prefix = '' ) : from utool import util_str if strlist_ is None : strlist_ = [ ] for key in key_list : try : if key is None : strlist_ . append ( '' ) elif isinstance ( key , tuple ) : # Given a tuple of information tup = key func , key_ = tup val = get_varval_from_locals ( key_ , locals_ ) funcvalstr = six . text_type ( func ( val ) ) callname = util_str . get_callable_name ( func ) strlist_ . append ( '%s %s(%s) = %s' % ( prefix , callname , key_ , funcvalstr ) ) elif isinstance ( key , six . string_types ) : # Try to infer print from variable name val = get_varval_from_locals ( key , locals_ ) #valstr = util_str.truncate_str(repr(val), maxlen=200) valstr = util_str . truncate_str ( util_str . repr2 ( val ) , maxlen = 200 ) strlist_ . append ( '%s %s = %s' % ( prefix , key , valstr ) ) else : # Try to infer print from variable value val = key typestr = repr ( type ( val ) ) namestr = get_varname_from_locals ( val , locals_ ) #valstr = util_str.truncate_str(repr(val), maxlen=200) valstr = util_str . truncate_str ( util_str . repr2 ( val ) , maxlen = 200 ) strlist_ . append ( '%s %s %s = %s' % ( prefix , typestr , namestr , valstr ) ) except AssertionError as ex : strlist_ . append ( prefix + ' ' + six . text_type ( ex ) + ' (this likely due to a misformatted printex and is not related to the exception)' ) return strlist_
For each key in keylist puts its value in locals into a stringlist
477
15
8,880
def switch_psm_to_peptable_fields ( oldheader ) : return { old : new for old , new in zip ( [ mzidtsvdata . HEADER_PEPTIDE , mzidtsvdata . HEADER_PROTEIN , mzidtsvdata . HEADER_PEPTIDE_Q , mzidtsvdata . HEADER_PEPTIDE_PEP ] , [ peptabledata . HEADER_PEPTIDE , peptabledata . HEADER_PROTEINS , peptabledata . HEADER_QVAL , peptabledata . HEADER_PEP ] ) }
Returns a dict map with old to new header fields
142
10
8,881
def add_instruction ( self , instr ) : assert ( isinstance ( instr , Instruction ) ) self . instruction_list . append ( instr ) if instr . lhs not in self . defined_variables : if isinstance ( instr . lhs , Variable ) : self . defined_variables . append ( instr . lhs ) if isinstance ( instr , EqInstruction ) : if isinstance ( instr . rhs , Variable ) : if instr . rhs not in self . used_variables : self . used_variables . append ( instr . rhs ) else : if isinstance ( instr . rhs_1 , Variable ) : if instr . rhs_1 not in self . used_variables : self . used_variables . append ( instr . rhs_1 ) if isinstance ( instr . rhs_2 , Variable ) : if instr . rhs_2 not in self . used_variables : self . used_variables . append ( instr . rhs_2 )
Adds the argument instruction in the list of instructions of this basic block .
217
14
8,882
def set_condition ( self , condition , condition_instr = None ) : assert ( isinstance ( condition , Numeric ) ) if condition_instr is not None : assert ( isinstance ( condition_instr , CmpInstruction ) ) self . condition = condition self . condition_instr = condition_instr if condition_instr is not None : if condition_instr . lhs not in self . defined_variables : if isinstance ( condition_instr . lhs , Variable ) : self . defined_variables . append ( condition_instr . lhs ) if isinstance ( condition_instr . rhs_1 , Variable ) : if condition_instr . rhs_1 not in self . used_variables : self . used_variables . append ( condition_instr . rhs_1 ) if isinstance ( condition_instr . rhs_2 , Variable ) : if condition_instr . rhs_2 not in self . used_variables : self . used_variables . append ( condition_instr . rhs_2 )
Defines the condition which decides how the basic block exits
237
11
8,883
def add_basic_block ( self , basic_block ) : assert ( isinstance ( basic_block , BasicBlock ) ) self . basic_block_list . append ( basic_block )
Adds the given basic block in the function
41
8
8,884
def get_variable ( self , var_name ) : assert ( isinstance ( var_name , str ) ) if isinstance ( var_name , str ) : for var in self . variable_list : if var . name == var_name : return var new_var = Variable ( var_name ) self . variable_list . append ( new_var ) return new_var
If a variable with the name var_name exists in this function s variable list \ then that variable object is returned ; else a new variable is created \ with the given name and added to the variable list of this function \ and returned back
81
48
8,885
def add_input_variable ( self , var ) : assert ( isinstance ( var , Variable ) ) self . input_variable_list . append ( var )
Adds the argument variable as one of the input variable
34
10
8,886
def add_output_variable ( self , var ) : assert ( isinstance ( var , Variable ) ) self . output_variable_list . append ( var )
Adds the argument variable as one of the output variable
34
10
8,887
def tokenize ( self ) : self . token_list = [ ] ps = self . parse_string . strip ( ) i = 0 last_token = None while i < len ( ps ) and ps [ i ] . isspace ( ) : i += 1 while i < len ( ps ) : token = '' if ps [ i ] . isalpha ( ) : while i < len ( ps ) and ( ps [ i ] . isalnum ( ) or ps [ i ] == '_' ) : token += ps [ i ] i += 1 elif ps [ i ] . isdigit ( ) : while i < len ( ps ) and ( ps [ i ] . isdigit ( ) or ps [ i ] == '.' or ps [ i ] == 'e' or ps [ i ] == 'E' or ( ps [ i ] == '+' and ( ps [ i - 1 ] == 'e' or ps [ i - 1 ] == 'E' ) ) or ( ps [ i ] == '-' and ( ps [ i - 1 ] == 'e' or ps [ i - 1 ] == 'E' ) ) ) : token += ps [ i ] i += 1 elif ps [ i ] == '.' : if ps [ i + 1 ] . isdigit ( ) : while i < len ( ps ) and ( ps [ i ] . isdigit ( ) or ps [ i ] == '.' ) : token += ps [ i ] i += 1 else : while i < len ( ps ) and ( ps [ i ] . isalpha ( ) or ps [ i ] == '.' ) : token += ps [ i ] i += 1 else : token += ps [ i ] i += 1 if token == '-' and ( last_token == None or last_token == '(' or self . is_op ( last_token ) ) : token = '~' self . token_list += [ token ] last_token = token while i < len ( ps ) and ps [ i ] . isspace ( ) : i += 1
Tokenizes the string stored in the parser object into a list of tokens .
434
15
8,888
def parse ( self ) : #print("Parsing: %s"%self.parse_string) self . tokenize ( ) if self . debug : print ( "Tokens found: %s" % self . token_list ) try : parse_tree = self . parse2 ( ) except Exception as e : raise e return parse_tree
Tokenizes and parses an arithmetic expression into a parse tree .
73
13
8,889
def insert_keys ( self , keys ) : start = 0 bulk_insert = self . bulk_insert keys_len = len ( keys ) query = 'INSERT IGNORE INTO gauged_keys (namespace, `key`) VALUES ' execute = self . cursor . execute while start < keys_len : rows = keys [ start : start + bulk_insert ] params = [ param for params in rows for param in params ] insert = '(%s,%s),' * ( len ( rows ) - 1 ) + '(%s,%s)' execute ( query + insert , params ) start += bulk_insert
Insert keys into a table which assigns an ID
132
9
8,890
def get_writer_position ( self , name ) : cursor = self . cursor cursor . execute ( 'SELECT timestamp FROM gauged_writer_history ' 'WHERE id = %s' , ( name , ) ) result = cursor . fetchone ( ) return result [ 0 ] if result else 0
Get the current writer position
62
5
8,891
def get_namespaces ( self ) : cursor = self . cursor cursor . execute ( 'SELECT DISTINCT namespace FROM gauged_statistics' ) return [ namespace for namespace , in cursor ]
Get a list of namespaces
42
6
8,892
def remove_namespace ( self , namespace ) : params = ( namespace , ) execute = self . cursor . execute execute ( 'DELETE FROM gauged_data WHERE namespace = %s' , params ) execute ( 'DELETE FROM gauged_statistics WHERE namespace = %s' , params ) execute ( 'DELETE FROM gauged_keys WHERE namespace = %s' , params ) self . remove_cache ( namespace )
Remove all data associated with the current namespace
93
8
8,893
def remove_cache ( self , namespace , key = None ) : if key is None : self . cursor . execute ( 'DELETE FROM gauged_cache ' 'WHERE namespace = %s' , ( namespace , ) ) else : self . cursor . execute ( 'DELETE FROM gauged_cache ' 'WHERE namespace = %s and `key` = %s' , ( namespace , key ) )
Remove all cached values for the specified namespace optionally specifying a key
87
12
8,894
def clear_schema ( self ) : execute = self . cursor . execute execute ( 'TRUNCATE TABLE gauged_data' ) execute ( 'TRUNCATE TABLE gauged_keys' ) execute ( 'TRUNCATE TABLE gauged_writer_history' ) execute ( 'TRUNCATE TABLE gauged_cache' ) execute ( 'TRUNCATE TABLE gauged_statistics' ) self . db . commit ( )
Clear all gauged data
96
5
8,895
def quantum_random ( ) : import quantumrandom data16 = quantumrandom . uint16 ( array_length = 2 ) assert data16 . flags [ 'C_CONTIGUOUS' ] data32 = data16 . view ( np . dtype ( 'uint32' ) ) [ 0 ] return data32
returns a 32 bit unsigned integer quantum random number
65
10
8,896
def _npstate_to_pystate ( npstate ) : PY_VERSION = 3 version , keys , pos , has_gauss , cached_gaussian_ = npstate keys_pos = tuple ( map ( int , keys ) ) + ( int ( pos ) , ) cached_gaussian_ = cached_gaussian_ if has_gauss else None pystate = ( PY_VERSION , keys_pos , cached_gaussian_ ) return pystate
Convert state of a NumPy RandomState object to a state that can be used by Python s Random .
100
22
8,897
def _pystate_to_npstate ( pystate ) : NP_VERSION = 'MT19937' version , keys_pos_ , cached_gaussian_ = pystate keys , pos = keys_pos_ [ : - 1 ] , keys_pos_ [ - 1 ] keys = np . array ( keys , dtype = np . uint32 ) has_gauss = cached_gaussian_ is not None cached_gaussian = cached_gaussian_ if has_gauss else 0.0 npstate = ( NP_VERSION , keys , pos , has_gauss , cached_gaussian ) return npstate
Convert state of a Python Random object to state usable by NumPy RandomState .
133
17
8,898
def ensure_rng ( rng , impl = 'numpy' ) : if impl == 'numpy' : if rng is None : rng = np . random elif isinstance ( rng , int ) : rng = np . random . RandomState ( seed = rng ) elif isinstance ( rng , random . Random ) : # Convert python to numpy random state py_rng = rng pystate = py_rng . getstate ( ) npstate = _pystate_to_npstate ( pystate ) rng = np_rng = np . random . RandomState ( seed = 0 ) np_rng . set_state ( npstate ) elif impl == 'python' : if rng is None : rng = random elif isinstance ( rng , int ) : rng = random . Random ( rng ) elif isinstance ( rng , np . random . RandomState ) : # Convert numpy to python random state np_rng = rng npstate = np_rng . get_state ( ) pystate = _npstate_to_pystate ( npstate ) rng = py_rng = random . Random ( 0 ) py_rng . setstate ( pystate ) else : raise KeyError ( 'unknown rng impl={}' . format ( impl ) ) return rng
Returns a random number generator
295
5
8,899
def random_indexes ( max_index , subset_size = None , seed = None , rng = None ) : subst_ = np . arange ( 0 , max_index ) rng = ensure_rng ( seed if rng is None else rng ) rng . shuffle ( subst_ ) if subset_size is None : subst = subst_ else : subst = subst_ [ 0 : min ( subset_size , max_index ) ] return subst
random unrepeated indicies
99
6