idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
9,500
def add_dict ( self , dyn_dict ) : if not isinstance ( dyn_dict , dict ) : raise Exception ( 'DynStruct.add_dict expects a dictionary.' + 'Recieved: ' + six . text_type ( type ( dyn_dict ) ) ) for ( key , val ) in six . iteritems ( dyn_dict ) : self [ key ] = val
Adds a dictionary to the prefs
83
7
9,501
def to_dict ( self ) : dyn_dict = { } for ( key , val ) in six . iteritems ( self . __dict__ ) : if key not in self . _printable_exclude : dyn_dict [ key ] = val return dyn_dict
Converts dynstruct to a dictionary .
58
8
9,502
def execstr ( self , local_name ) : execstr = '' for ( key , val ) in six . iteritems ( self . __dict__ ) : if key not in self . _printable_exclude : execstr += key + ' = ' + local_name + '.' + key + '\n' return execstr
returns a string which when evaluated will add the stored variables to the current namespace
72
16
9,503
def get_proteins_for_peptide ( self , psm_id ) : protsql = self . get_sql_select ( [ 'protein_acc' ] , 'protein_psm' ) protsql = '{0} WHERE psm_id=?' . format ( protsql ) cursor = self . get_cursor ( ) proteins = cursor . execute ( protsql , psm_id ) . fetchall ( ) return [ x [ 0 ] for x in proteins ]
Returns list of proteins for a passed psm_id
111
11
9,504
def raise_if_error ( frame ) : if "status" not in frame or frame [ "status" ] == b"\x00" : return codes_and_exceptions = { b"\x01" : exceptions . ZigBeeUnknownError , b"\x02" : exceptions . ZigBeeInvalidCommand , b"\x03" : exceptions . ZigBeeInvalidParameter , b"\x04" : exceptions . ZigBeeTxFailure } if frame [ "status" ] in codes_and_exceptions : raise codes_and_exceptions [ frame [ "status" ] ] ( ) raise exceptions . ZigBeeUnknownStatus ( )
Checks a frame and raises the relevant exception if required .
138
12
9,505
def hex_to_int ( value ) : if version_info . major >= 3 : return int . from_bytes ( value , "big" ) return int ( value . encode ( "hex" ) , 16 )
Convert hex string like \ x0A \ xE3 to 2787 .
46
17
9,506
def adc_to_percentage ( value , max_volts , clamp = True ) : percentage = ( 100.0 / const . ADC_MAX_VAL ) * value return max ( min ( 100 , percentage ) , 0 ) if clamp else percentage
Convert the ADC raw value to a percentage .
54
10
9,507
def convert_adc ( value , output_type , max_volts ) : return { const . ADC_RAW : lambda x : x , const . ADC_PERCENTAGE : adc_to_percentage , const . ADC_VOLTS : adc_to_volts , const . ADC_MILLIVOLTS : adc_to_millivolts } [ output_type ] ( value , max_volts )
Converts the output from the ADC into the desired type .
95
12
9,508
def _frame_received ( self , frame ) : try : self . _rx_frames [ frame [ "frame_id" ] ] = frame except KeyError : # Has no frame_id, ignore? pass _LOGGER . debug ( "Frame received: %s" , frame ) # Give the frame to any interested functions for handler in self . _rx_handlers : handler ( frame )
Put the frame into the _rx_frames dict with a key of the frame_id .
84
19
9,509
def _send ( self , * * kwargs ) : if kwargs . get ( "dest_addr_long" ) is not None : self . zb . remote_at ( * * kwargs ) else : self . zb . at ( * * kwargs )
Send a frame to either the local ZigBee or a remote device .
62
14
9,510
def _send_and_wait ( self , * * kwargs ) : frame_id = self . next_frame_id kwargs . update ( dict ( frame_id = frame_id ) ) self . _send ( * * kwargs ) timeout = datetime . now ( ) + const . RX_TIMEOUT while datetime . now ( ) < timeout : try : frame = self . _rx_frames . pop ( frame_id ) raise_if_error ( frame ) return frame except KeyError : sleep ( 0.1 ) continue _LOGGER . exception ( "Did not receive response within configured timeout period." ) raise exceptions . ZigBeeResponseTimeout ( )
Send a frame to either the local ZigBee or a remote device and wait for a pre - defined amount of time for its response .
145
27
9,511
def _get_parameter ( self , parameter , dest_addr_long = None ) : frame = self . _send_and_wait ( command = parameter , dest_addr_long = dest_addr_long ) return frame [ "parameter" ]
Fetches and returns the value of the specified parameter .
55
12
9,512
def get_sample ( self , dest_addr_long = None ) : frame = self . _send_and_wait ( command = b"IS" , dest_addr_long = dest_addr_long ) if "parameter" in frame : # @TODO: Is there always one value? Is it always a list? return frame [ "parameter" ] [ 0 ] return { }
Initiate a sample and return its data .
86
10
9,513
def read_digital_pin ( self , pin_number , dest_addr_long = None ) : sample = self . get_sample ( dest_addr_long = dest_addr_long ) try : return sample [ const . DIGITAL_PINS [ pin_number ] ] except KeyError : raise exceptions . ZigBeePinNotConfigured ( "Pin %s (%s) is not configured as a digital input or output." % ( pin_number , const . IO_PIN_COMMANDS [ pin_number ] ) )
Fetches a sample and returns the boolean value of the requested digital pin .
114
16
9,514
def set_gpio_pin ( self , pin_number , setting , dest_addr_long = None ) : assert setting in const . GPIO_SETTINGS . values ( ) self . _send_and_wait ( command = const . IO_PIN_COMMANDS [ pin_number ] , parameter = setting . value , dest_addr_long = dest_addr_long )
Set a gpio pin setting .
83
7
9,515
def get_gpio_pin ( self , pin_number , dest_addr_long = None ) : frame = self . _send_and_wait ( command = const . IO_PIN_COMMANDS [ pin_number ] , dest_addr_long = dest_addr_long ) value = frame [ "parameter" ] return const . GPIO_SETTINGS [ value ]
Get a gpio pin setting .
83
7
9,516
def get_supply_voltage ( self , dest_addr_long = None ) : value = self . _get_parameter ( b"%V" , dest_addr_long = dest_addr_long ) return ( hex_to_int ( value ) * ( 1200 / 1024.0 ) ) / 1000
Fetches the value of %V and returns it as volts .
69
14
9,517
def add ( self , key ) : if key not in self . _map : self . _map [ key ] = link = _Link ( ) root = self . _root last = root . prev link . prev , link . next , link . key = last , root , key last . next = root . prev = weakref . proxy ( link )
Store new key in a new link at the end of the linked list
74
14
9,518
def index ( self , item ) : for count , other in enumerate ( self ) : if item == other : return count raise ValueError ( '%r is not in OrderedSet' % ( item , ) )
Find the index of item in the OrderedSet
46
10
9,519
def value ( self , key , timestamp = None , namespace = None ) : return self . make_context ( key = key , end = timestamp , namespace = namespace ) . value ( )
Get the value of a gauge at the specified time
39
10
9,520
def aggregate ( self , key , aggregate , start = None , end = None , namespace = None , percentile = None ) : return self . make_context ( key = key , aggregate = aggregate , start = start , end = end , namespace = namespace , percentile = percentile ) . aggregate ( )
Get an aggregate of all gauge data stored in the specified date range
61
13
9,521
def value_series ( self , key , start = None , end = None , interval = None , namespace = None , cache = None ) : return self . make_context ( key = key , start = start , end = end , interval = interval , namespace = namespace , cache = cache ) . value_series ( )
Get a time series of gauge values
67
7
9,522
def aggregate_series ( self , key , aggregate , start = None , end = None , interval = None , namespace = None , cache = None , percentile = None ) : return self . make_context ( key = key , aggregate = aggregate , start = start , end = end , interval = interval , namespace = namespace , cache = cache , percentile = percentile ) . aggregate_series ( )
Get a time series of gauge aggregates
81
8
9,523
def keys ( self , prefix = None , limit = None , offset = None , namespace = None ) : return self . make_context ( prefix = prefix , limit = limit , offset = offset , namespace = namespace ) . keys ( )
Get gauge keys
49
3
9,524
def statistics ( self , start = None , end = None , namespace = None ) : return self . make_context ( start = start , end = end , namespace = namespace ) . statistics ( )
Get write statistics for the specified namespace and date range
41
10
9,525
def sync ( self ) : self . driver . create_schema ( ) self . driver . set_metadata ( { 'current_version' : Gauged . VERSION , 'initial_version' : Gauged . VERSION , 'block_size' : self . config . block_size , 'resolution' : self . config . resolution , 'created_at' : long ( time ( ) * 1000 ) } , replace = False )
Create the necessary schema
93
4
9,526
def make_context ( self , * * kwargs ) : self . check_schema ( ) return Context ( self . driver , self . config , * * kwargs )
Create a new context for reading data
39
7
9,527
def check_schema ( self ) : if self . valid_schema : return config = self . config metadata = self . metadata ( ) if 'current_version' not in metadata : raise GaugedSchemaError ( 'Gauged schema not found, ' 'try a gauged.sync()' ) if metadata [ 'current_version' ] != Gauged . VERSION : msg = 'The schema is version %s while this Gauged is version %s. ' msg += 'Try upgrading Gauged and/or running gauged_migrate.py' msg = msg % ( metadata [ 'current_version' ] , Gauged . VERSION ) raise GaugedVersionMismatchError ( msg ) expected_block_size = '%s/%s' % ( config . block_size , config . resolution ) block_size = '%s/%s' % ( metadata [ 'block_size' ] , metadata [ 'resolution' ] ) if block_size != expected_block_size : msg = 'Expected %s and got %s' % ( expected_block_size , block_size ) warn ( msg , GaugedBlockSizeMismatch ) self . valid_schema = True
Check the schema exists and matches configuration
260
7
9,528
def nx_dag_node_rank ( graph , nodes = None ) : import utool as ut source = list ( ut . nx_source_nodes ( graph ) ) [ 0 ] longest_paths = dict ( [ ( target , dag_longest_path ( graph , source , target ) ) for target in graph . nodes ( ) ] ) node_to_rank = ut . map_dict_vals ( len , longest_paths ) if nodes is None : return node_to_rank else : ranks = ut . dict_take ( node_to_rank , nodes ) return ranks
Returns rank of nodes that define the level each node is on in a topological sort . This is the same as the Graphviz dot rank .
130
30
9,529
def nx_all_nodes_between ( graph , source , target , data = False ) : import utool as ut if source is None : # assume there is a single source sources = list ( ut . nx_source_nodes ( graph ) ) assert len ( sources ) == 1 , ( 'specify source if there is not only one' ) source = sources [ 0 ] if target is None : # assume there is a single source sinks = list ( ut . nx_sink_nodes ( graph ) ) assert len ( sinks ) == 1 , ( 'specify sink if there is not only one' ) target = sinks [ 0 ] all_simple_paths = list ( nx . all_simple_paths ( graph , source , target ) ) nodes = sorted ( set . union ( * map ( set , all_simple_paths ) ) ) return nodes
Find all nodes with on paths between source and target .
190
11
9,530
def nx_all_simple_edge_paths ( G , source , target , cutoff = None , keys = False , data = False ) : if cutoff is None : cutoff = len ( G ) - 1 if cutoff < 1 : return import utool as ut import six visited_nodes = [ source ] visited_edges = [ ] if G . is_multigraph ( ) : get_neighbs = ut . partial ( G . edges , keys = keys , data = data ) else : get_neighbs = ut . partial ( G . edges , data = data ) edge_stack = [ iter ( get_neighbs ( source ) ) ] while edge_stack : children_edges = edge_stack [ - 1 ] child_edge = six . next ( children_edges , None ) if child_edge is None : edge_stack . pop ( ) visited_nodes . pop ( ) if len ( visited_edges ) > 0 : visited_edges . pop ( ) elif len ( visited_nodes ) < cutoff : child_node = child_edge [ 1 ] if child_node == target : yield visited_edges + [ child_edge ] elif child_node not in visited_nodes : visited_nodes . append ( child_node ) visited_edges . append ( child_edge ) edge_stack . append ( iter ( get_neighbs ( child_node ) ) ) else : for edge in [ child_edge ] + list ( children_edges ) : if edge [ 1 ] == target : yield visited_edges + [ edge ] edge_stack . pop ( ) visited_nodes . pop ( ) if len ( visited_edges ) > 0 : visited_edges . pop ( )
Returns each path from source to target as a list of edges .
379
13
9,531
def nx_delete_node_attr ( graph , name , nodes = None ) : if nodes is None : nodes = list ( graph . nodes ( ) ) removed = 0 # names = [name] if not isinstance(name, list) else name node_dict = nx_node_dict ( graph ) if isinstance ( name , list ) : for node in nodes : for name_ in name : try : del node_dict [ node ] [ name_ ] removed += 1 except KeyError : pass else : for node in nodes : try : del node_dict [ node ] [ name ] removed += 1 except KeyError : pass return removed
Removes node attributes
137
4
9,532
def nx_delete_edge_attr ( graph , name , edges = None ) : removed = 0 keys = [ name ] if not isinstance ( name , ( list , tuple ) ) else name if edges is None : if graph . is_multigraph ( ) : edges = graph . edges ( keys = True ) else : edges = graph . edges ( ) if graph . is_multigraph ( ) : for u , v , k in edges : for key_ in keys : try : del graph [ u ] [ v ] [ k ] [ key_ ] removed += 1 except KeyError : pass else : for u , v in edges : for key_ in keys : try : del graph [ u ] [ v ] [ key_ ] removed += 1 except KeyError : pass return removed
Removes an attributes from specific edges in the graph
165
10
9,533
def nx_gen_node_values ( G , key , nodes , default = util_const . NoParam ) : node_dict = nx_node_dict ( G ) if default is util_const . NoParam : return ( node_dict [ n ] [ key ] for n in nodes ) else : return ( node_dict [ n ] . get ( key , default ) for n in nodes )
Generates attributes values of specific nodes
87
7
9,534
def nx_gen_node_attrs ( G , key , nodes = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if on_missing is None : on_missing = 'error' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' if nodes is None : nodes = G . nodes ( ) # Generate `node_data` nodes and data dictionary node_dict = nx_node_dict ( G ) if on_missing == 'error' : node_data = ( ( n , node_dict [ n ] ) for n in nodes ) elif on_missing == 'filter' : node_data = ( ( n , node_dict [ n ] ) for n in nodes if n in G ) elif on_missing == 'default' : node_data = ( ( n , node_dict . get ( n , { } ) ) for n in nodes ) else : raise KeyError ( 'on_missing={} must be error, filter or default' . format ( on_missing ) ) # Get `node_attrs` desired value out of dictionary if on_keyerr == 'error' : node_attrs = ( ( n , d [ key ] ) for n , d in node_data ) elif on_keyerr == 'filter' : node_attrs = ( ( n , d [ key ] ) for n , d in node_data if key in d ) elif on_keyerr == 'default' : node_attrs = ( ( n , d . get ( key , default ) ) for n , d in node_data ) else : raise KeyError ( 'on_keyerr={} must be error filter or default' . format ( on_keyerr ) ) return node_attrs
Improved generator version of nx . get_node_attributes
406
13
9,535
def nx_gen_edge_values ( G , key , edges = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if edges is None : edges = G . edges ( ) if on_missing is None : on_missing = 'error' if on_keyerr is None : on_keyerr = 'default' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' # Generate `data_iter` edges and data dictionary if on_missing == 'error' : data_iter = ( G . adj [ u ] [ v ] for u , v in edges ) elif on_missing == 'default' : data_iter = ( G . adj [ u ] [ v ] if G . has_edge ( u , v ) else { } for u , v in edges ) else : raise KeyError ( 'on_missing={} must be error, filter or default' . format ( on_missing ) ) # Get `value_iter` desired value out of dictionary if on_keyerr == 'error' : value_iter = ( d [ key ] for d in data_iter ) elif on_keyerr == 'default' : value_iter = ( d . get ( key , default ) for d in data_iter ) else : raise KeyError ( 'on_keyerr={} must be error or default' . format ( on_keyerr ) ) return value_iter
Generates attributes values of specific edges
330
7
9,536
def nx_gen_edge_attrs ( G , key , edges = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if on_missing is None : on_missing = 'error' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' if edges is None : if G . is_multigraph ( ) : raise NotImplementedError ( '' ) # uvk_iter = G.edges(keys=True) else : edges = G . edges ( ) # Generate `edge_data` edges and data dictionary if on_missing == 'error' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) for u , v in edges ) elif on_missing == 'filter' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) for u , v in edges if G . has_edge ( u , v ) ) elif on_missing == 'default' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) if G . has_edge ( u , v ) else ( ( u , v ) , { } ) for u , v in edges ) else : raise KeyError ( 'on_missing={}' . format ( on_missing ) ) # Get `edge_attrs` desired value out of dictionary if on_keyerr == 'error' : edge_attrs = ( ( e , d [ key ] ) for e , d in edge_data ) elif on_keyerr == 'filter' : edge_attrs = ( ( e , d [ key ] ) for e , d in edge_data if key in d ) elif on_keyerr == 'default' : edge_attrs = ( ( e , d . get ( key , default ) ) for e , d in edge_data ) else : raise KeyError ( 'on_keyerr={}' . format ( on_keyerr ) ) return edge_attrs
Improved generator version of nx . get_edge_attributes
468
13
9,537
def nx_minimum_weight_component ( graph , weight = 'weight' ) : mwc = nx . minimum_spanning_tree ( graph , weight = weight ) # negative edges only reduce the total weight neg_edges = ( e for e , w in nx_gen_edge_attrs ( graph , weight ) if w < 0 ) mwc . add_edges_from ( neg_edges ) return mwc
A minimum weight component is an MST + all negative edges
98
12
9,538
def nx_ensure_agraph_color ( graph ) : from plottool import color_funcs import plottool as pt #import six def _fix_agraph_color ( data ) : try : orig_color = data . get ( 'color' , None ) alpha = data . get ( 'alpha' , None ) color = orig_color if color is None and alpha is not None : color = [ 0 , 0 , 0 ] if color is not None : color = pt . ensure_nonhex_color ( color ) #if isinstance(color, np.ndarray): # color = color.tolist() color = list ( color_funcs . ensure_base255 ( color ) ) if alpha is not None : if len ( color ) == 3 : color += [ int ( alpha * 255 ) ] else : color [ 3 ] = int ( alpha * 255 ) color = tuple ( color ) if len ( color ) == 3 : data [ 'color' ] = '#%02x%02x%02x' % color else : data [ 'color' ] = '#%02x%02x%02x%02x' % color except Exception as ex : import utool as ut ut . printex ( ex , keys = [ 'color' , 'orig_color' , 'data' ] ) raise for node , node_data in graph . nodes ( data = True ) : data = node_data _fix_agraph_color ( data ) for u , v , edge_data in graph . edges ( data = True ) : data = edge_data _fix_agraph_color ( data )
changes colors to hex strings on graph attrs
348
9
9,539
def dag_longest_path ( graph , source , target ) : if source == target : return [ source ] allpaths = nx . all_simple_paths ( graph , source , target ) longest_path = [ ] for l in allpaths : if len ( l ) > len ( longest_path ) : longest_path = l return longest_path
Finds the longest path in a dag between two nodes
79
11
9,540
def simplify_graph ( graph ) : import utool as ut nodes = sorted ( list ( graph . nodes ( ) ) ) node_lookup = ut . make_index_lookup ( nodes ) if graph . is_multigraph ( ) : edges = list ( graph . edges ( keys = True ) ) else : edges = list ( graph . edges ( ) ) new_nodes = ut . take ( node_lookup , nodes ) if graph . is_multigraph ( ) : new_edges = [ ( node_lookup [ e [ 0 ] ] , node_lookup [ e [ 1 ] ] , e [ 2 ] , { } ) for e in edges ] else : new_edges = [ ( node_lookup [ e [ 0 ] ] , node_lookup [ e [ 1 ] ] ) for e in edges ] cls = graph . __class__ new_graph = cls ( ) new_graph . add_nodes_from ( new_nodes ) new_graph . add_edges_from ( new_edges ) return new_graph
strips out everything but connectivity
233
6
9,541
def subgraph_from_edges ( G , edge_list , ref_back = True ) : # TODO: support multi-di-graph sub_nodes = list ( { y for x in edge_list for y in x [ 0 : 2 ] } ) #edge_list_no_data = [edge[0:2] for edge in edge_list] multi_edge_list = [ edge [ 0 : 3 ] for edge in edge_list ] if ref_back : G_sub = G . subgraph ( sub_nodes ) for edge in G_sub . edges ( keys = True ) : if edge not in multi_edge_list : G_sub . remove_edge ( * edge ) else : G_sub = G . subgraph ( sub_nodes ) . copy ( ) for edge in G_sub . edges ( keys = True ) : if edge not in multi_edge_list : G_sub . remove_edge ( * edge ) return G_sub
Creates a networkx graph that is a subgraph of G defined by the list of edges in edge_list .
214
24
9,542
def all_multi_paths ( graph , source , target , data = False ) : path_multiedges = list ( nx_all_simple_edge_paths ( graph , source , target , keys = True , data = data ) ) return path_multiedges
r Returns specific paths along multi - edges from the source to this table . Multipaths are identified by edge keys .
60
24
9,543
def bfs_conditional ( G , source , reverse = False , keys = True , data = False , yield_nodes = True , yield_if = None , continue_if = None , visited_nodes = None , yield_source = False ) : if reverse and hasattr ( G , 'reverse' ) : G = G . reverse ( ) if isinstance ( G , nx . Graph ) : neighbors = functools . partial ( G . edges , data = data ) else : neighbors = functools . partial ( G . edges , keys = keys , data = data ) queue = collections . deque ( [ ] ) if visited_nodes is None : visited_nodes = set ( [ ] ) else : visited_nodes = set ( visited_nodes ) if source not in visited_nodes : if yield_nodes and yield_source : yield source visited_nodes . add ( source ) new_edges = neighbors ( source ) if isinstance ( new_edges , list ) : new_edges = iter ( new_edges ) queue . append ( ( source , new_edges ) ) while queue : parent , edges = queue [ 0 ] for edge in edges : child = edge [ 1 ] if yield_nodes : if child not in visited_nodes : if yield_if is None or yield_if ( G , child , edge ) : yield child else : if yield_if is None or yield_if ( G , child , edge ) : yield edge if child not in visited_nodes : visited_nodes . add ( child ) # Add new children to queue if the condition is satisfied if continue_if is None or continue_if ( G , child , edge ) : new_edges = neighbors ( child ) if isinstance ( new_edges , list ) : new_edges = iter ( new_edges ) queue . append ( ( child , new_edges ) ) queue . popleft ( )
Produce edges in a breadth - first - search starting at source but only return nodes that satisfiy a condition and only iterate past a node if it satisfies a different condition .
423
36
9,544
def color_nodes ( graph , labelattr = 'label' , brightness = .878 , outof = None , sat_adjust = None ) : import plottool as pt import utool as ut node_to_lbl = nx . get_node_attributes ( graph , labelattr ) unique_lbls = sorted ( set ( node_to_lbl . values ( ) ) ) ncolors = len ( unique_lbls ) if outof is None : if ( ncolors ) == 1 : unique_colors = [ pt . LIGHT_BLUE ] elif ( ncolors ) == 2 : # https://matplotlib.org/examples/color/named_colors.html unique_colors = [ 'royalblue' , 'orange' ] unique_colors = list ( map ( pt . color_funcs . ensure_base01 , unique_colors ) ) else : unique_colors = pt . distinct_colors ( ncolors , brightness = brightness ) else : unique_colors = pt . distinct_colors ( outof , brightness = brightness ) if sat_adjust : unique_colors = [ pt . color_funcs . adjust_hsv_of_rgb ( c , sat_adjust = sat_adjust ) for c in unique_colors ] # Find edges and aids strictly between two nids if outof is None : lbl_to_color = ut . dzip ( unique_lbls , unique_colors ) else : gray = pt . color_funcs . ensure_base01 ( 'lightgray' ) unique_colors = [ gray ] + unique_colors offset = max ( 1 , min ( unique_lbls ) ) - 1 node_to_lbl = ut . map_vals ( lambda nid : max ( 0 , nid - offset ) , node_to_lbl ) lbl_to_color = ut . dzip ( range ( outof + 1 ) , unique_colors ) node_to_color = ut . map_vals ( lbl_to_color , node_to_lbl ) nx . set_node_attributes ( graph , name = 'color' , values = node_to_color ) ut . nx_ensure_agraph_color ( graph )
Colors edges and nodes by nid
506
8
9,545
def approx_min_num_components ( nodes , negative_edges ) : import utool as ut num = 0 g_neg = nx . Graph ( ) g_neg . add_nodes_from ( nodes ) g_neg . add_edges_from ( negative_edges ) # Collapse all nodes with degree 0 if nx . __version__ . startswith ( '2' ) : deg0_nodes = [ n for n , d in g_neg . degree ( ) if d == 0 ] else : deg0_nodes = [ n for n , d in g_neg . degree_iter ( ) if d == 0 ] for u , v in ut . itertwo ( deg0_nodes ) : nx_contracted_nodes ( g_neg , v , u , inplace = True ) # g_neg = nx.contracted_nodes(g_neg, v, u, self_loops=False) # Initialize unused nodes to be everything unused = list ( g_neg . nodes ( ) ) # complement of the graph contains all possible positive edges g_pos = nx . complement ( g_neg ) if False : from networkx . algorithms . approximation import clique maxiset , cliques = clique . clique_removal ( g_pos ) num = len ( cliques ) return num # Iterate until we have used all nodes while len ( unused ) > 0 : # Seed a new "minimum component" num += 1 # Grab a random unused node n1 #idx1 = np.random.randint(0, len(unused)) idx1 = 0 n1 = unused [ idx1 ] unused . remove ( n1 ) neigbs = list ( g_pos . neighbors ( n1 ) ) neigbs = ut . isect ( neigbs , unused ) while len ( neigbs ) > 0 : # Find node n2, that n1 could be connected to #idx2 = np.random.randint(0, len(neigbs)) idx2 = 0 n2 = neigbs [ idx2 ] unused . remove ( n2 ) # Collapse negative information of n1 and n2 g_neg = nx . contracted_nodes ( g_neg , n1 , n2 ) # Compute new possible positive edges g_pos = nx . complement ( g_neg ) # Iterate until n1 has no more possible connections neigbs = list ( g_pos . neighbors ( n1 ) ) neigbs = ut . isect ( neigbs , unused ) print ( 'num = %r' % ( num , ) ) return num
Find approximate minimum number of connected components possible Each edge represents that two nodes must be separated
582
17
9,546
def solve ( self , y , h , t_end ) : ts = [ ] ys = [ ] yi = y ti = 0.0 while ti < t_end : ts . append ( ti ) yi = self . step ( yi , None , ti , h ) ys . append ( yi ) ti += h return ts , ys
Given a function initial conditions step size and end value this will calculate an unforced system . The default start time is t = 0 . 0 but this can be changed .
77
34
9,547
def step ( self , y , u , t , h ) : k1 = h * self . func ( t , y , u ) k2 = h * self . func ( t + .5 * h , y + .5 * h * k1 , u ) k3 = h * self . func ( t + .5 * h , y + .5 * h * k2 , u ) k4 = h * self . func ( t + h , y + h * k3 , u ) return y + ( k1 + 2 * k2 + 2 * k3 + k4 ) / 6.0
This is called by solve but can be called by the user who wants to run through an integration with a control force .
131
24
9,548
def generate_proteins ( pepfn , proteins , pepheader , scorecol , minlog , higherbetter = True , protcol = False ) : protein_peptides = { } if minlog : higherbetter = False if not protcol : protcol = peptabledata . HEADER_MASTERPROTEINS for psm in reader . generate_tsv_psms ( pepfn , pepheader ) : p_acc = psm [ protcol ] if ';' in p_acc : continue protein_peptides = evaluate_peptide ( protein_peptides , psm , p_acc , higherbetter , scorecol , fncol = False ) if minlog : try : nextbestscore = min ( [ pep [ 'score' ] for pep in protein_peptides . values ( ) if pep [ 'score' ] > 0 ] ) except ValueError : import sys sys . stderr . write ( 'Cannot find score of type {} which is above 0. ' 'Only scores above zero can have a -log value. ' 'Exiting.' . format ( scorecol ) ) sys . exit ( 1 ) nextbestscore = - log ( nextbestscore , 10 ) for protein in proteins : try : peptide = protein_peptides [ protein [ prottabledata . HEADER_PROTEIN ] ] except KeyError : print ( 'WARNING - protein {} not found in peptide ' 'table' . format ( protein [ prottabledata . HEADER_PROTEIN ] ) ) peptide = { 'score' : 'NA' } if minlog and peptide [ 'score' ] != 'NA' : peptide [ 'score' ] = log_score ( peptide [ 'score' ] , nextbestscore ) protein [ prottabledata . HEADER_QSCORE ] = str ( peptide [ 'score' ] ) yield protein
Best peptide for each protein in a table
414
9
9,549
def add ( self , child ) : if isinstance ( child , Run ) : self . add_run ( child ) elif isinstance ( child , Record ) : self . add_record ( child ) elif isinstance ( child , EventRecord ) : self . add_event_record ( child ) elif isinstance ( child , DataDisplay ) : self . add_data_display ( child ) elif isinstance ( child , DataWriter ) : self . add_data_writer ( child ) elif isinstance ( child , EventWriter ) : self . add_event_writer ( child ) else : raise ModelError ( 'Unsupported child element' )
Adds a typed child object to the simulation spec .
140
10
9,550
def fetch ( self , id_ , return_fields = None ) : game_params = { "id" : id_ } if return_fields is not None : self . _validate_return_fields ( return_fields ) field_list = "," . join ( return_fields ) game_params [ "field_list" ] = field_list response = self . _query ( game_params , direct = True ) return response
Wrapper for fetching details of game by ID
93
10
9,551
def define_options ( self , names , parser_options = None ) : def copy_option ( options , name ) : return { k : v for k , v in options [ name ] . items ( ) } if parser_options is None : parser_options = { } options = { } for name in names : try : option = copy_option ( parser_options , name ) except KeyError : option = copy_option ( shared_options , name ) try : options . update ( { option [ 'clarg' ] : option } ) except TypeError : options . update ( { option [ 'clarg' ] [ 0 ] : option } ) return options
Given a list of option names this returns a list of dicts defined in all_options and self . shared_options . These can then be used to populate the argparser with
140
36
9,552
def current_memory_usage ( ) : import psutil proc = psutil . Process ( os . getpid ( ) ) #meminfo = proc.get_memory_info() meminfo = proc . memory_info ( ) rss = meminfo [ 0 ] # Resident Set Size / Mem Usage vms = meminfo [ 1 ] # Virtual Memory Size / VM Size # NOQA return rss
Returns this programs current memory usage in bytes
85
8
9,553
def num_unused_cpus ( thresh = 10 ) : import psutil cpu_usage = psutil . cpu_percent ( percpu = True ) return sum ( [ p < thresh for p in cpu_usage ] )
Returns the number of cpus with utilization less than thresh percent
50
13
9,554
def get_protein_group_content ( pgmap , master ) : # first item (0) is only a placeholder so the lookup.INDEX things get the # correct number. Would be nice with a solution, but the INDEXes were # originally made for mzidtsv protein group adding. pg_content = [ [ 0 , master , protein , len ( peptides ) , len ( [ psm for pgpsms in peptides . values ( ) for psm in pgpsms ] ) , sum ( [ psm [ 1 ] for pgpsms in peptides . values ( ) for psm in pgpsms ] ) , # score next ( iter ( next ( iter ( peptides . values ( ) ) ) ) ) [ 3 ] , # coverage next ( iter ( next ( iter ( peptides . values ( ) ) ) ) ) [ 2 ] , # evid level ] for protein , peptides in pgmap . items ( ) ] return pg_content
For each master protein we generate the protein group proteins complete with sequences psm_ids and scores . Master proteins are included in this group .
206
28
9,555
def get_protein_data ( peptide , pdata , headerfields , accfield ) : report = get_proteins ( peptide , pdata , headerfields ) return get_cov_descriptions ( peptide , pdata , report )
These fields are currently not pool dependent so headerfields is ignored
55
12
9,556
def get_num_chunks ( length , chunksize ) : n_chunks = int ( math . ceil ( length / chunksize ) ) return n_chunks
r Returns the number of chunks that a list will be split into given a chunksize .
37
18
9,557
def ProgChunks ( list_ , chunksize , nInput = None , * * kwargs ) : if nInput is None : nInput = len ( list_ ) n_chunks = get_num_chunks ( nInput , chunksize ) kwargs [ 'length' ] = n_chunks if 'freq' not in kwargs : kwargs [ 'freq' ] = 1 chunk_iter = util_iter . ichunks ( list_ , chunksize ) progiter_ = ProgressIter ( chunk_iter , * * kwargs ) return progiter_
Yeilds an iterator in chunks and computes progress Progress version of ut . ichunks
130
19
9,558
def ensure_newline ( self ) : DECTCEM_SHOW = '\033[?25h' # show cursor AT_END = DECTCEM_SHOW + '\n' if not self . _cursor_at_newline : self . write ( AT_END ) self . _cursor_at_newline = True
use before any custom printing when using the progress iter to ensure your print statement starts on a new line instead of at the end of a progress line
75
29
9,559
def _get_timethresh_heuristics ( self ) : if self . length > 1E5 : time_thresh = 2.5 elif self . length > 1E4 : time_thresh = 2.0 elif self . length > 1E3 : time_thresh = 1.0 else : time_thresh = 0.5 return time_thresh
resonably decent hueristics for how much time to wait before updating progress .
83
17
9,560
def load_code ( name , base_path = None , recurse = False ) : if '/' in name : return load_location ( name , base_path , module = False ) return importer . import_code ( name , base_path , recurse = recurse )
Load executable code from a URL or a path
60
9
9,561
def load ( name , base_path = None ) : if '/' in name : return load_location ( name , base_path , module = True ) return importer . import_symbol ( name , base_path )
Load a module from a URL or a path
48
9
9,562
def extend ( path = None , cache = None ) : if path is None : path = config . PATH try : path = path . split ( ':' ) except : pass sys . path . extend ( [ library . to_path ( p , cache ) for p in path ] )
Extend sys . path by a list of git paths .
59
12
9,563
def extender ( path = None , cache = None ) : old_path = sys . path [ : ] extend ( path , cache = None ) try : yield finally : sys . path = old_path
A context that temporarily extends sys . path and reverts it after the context is complete .
43
18
9,564
def add ( self , child ) : if isinstance ( child , Case ) : self . add_case ( child ) else : raise ModelError ( 'Unsupported child element' )
Adds a typed child object to the conditional derived variable .
38
11
9,565
def add ( self , child ) : if isinstance ( child , Action ) : self . add_action ( child ) else : raise ModelError ( 'Unsupported child element' )
Adds a typed child object to the event handler .
38
10
9,566
def add ( self , child ) : if isinstance ( child , StateVariable ) : self . add_state_variable ( child ) elif isinstance ( child , DerivedVariable ) : self . add_derived_variable ( child ) elif isinstance ( child , ConditionalDerivedVariable ) : self . add_conditional_derived_variable ( child ) elif isinstance ( child , TimeDerivative ) : self . add_time_derivative ( child ) elif isinstance ( child , EventHandler ) : self . add_event_handler ( child ) elif isinstance ( child , KineticScheme ) : self . add_kinetic_scheme ( child ) else : raise ModelError ( 'Unsupported child element' )
Adds a typed child object to the behavioral object .
161
10
9,567
def add ( self , child ) : if isinstance ( child , Regime ) : self . add_regime ( child ) else : Behavioral . add ( self , child )
Adds a typed child object to the dynamics object .
37
10
9,568
def create_bioset_lookup ( lookupdb , spectrafns , set_names ) : unique_setnames = set ( set_names ) lookupdb . store_biosets ( ( ( x , ) for x in unique_setnames ) ) set_id_map = lookupdb . get_setnames ( ) mzmlfiles = ( ( os . path . basename ( fn ) , set_id_map [ setname ] ) for fn , setname in zip ( spectrafns , set_names ) ) lookupdb . store_mzmlfiles ( mzmlfiles ) lookupdb . index_biosets ( )
Fills lookup database with biological set names
143
8
9,569
def get_modpath_from_modname ( modname , prefer_pkg = False , prefer_main = False ) : from os . path import dirname , basename , join , exists initname = '__init__.py' mainname = '__main__.py' if modname in sys . modules : modpath = sys . modules [ modname ] . __file__ . replace ( '.pyc' , '.py' ) else : import pkgutil loader = pkgutil . find_loader ( modname ) modpath = loader . filename . replace ( '.pyc' , '.py' ) if '.' not in basename ( modpath ) : modpath = join ( modpath , initname ) if prefer_pkg : if modpath . endswith ( initname ) or modpath . endswith ( mainname ) : modpath = dirname ( modpath ) if prefer_main : if modpath . endswith ( initname ) : main_modpath = modpath [ : - len ( initname ) ] + mainname if exists ( main_modpath ) : modpath = main_modpath return modpath
Same as get_modpath but doesnt import directly
247
10
9,570
def check_module_installed ( modname ) : import pkgutil if '.' in modname : # Prevent explicit import if possible parts = modname . split ( '.' ) base = parts [ 0 ] submods = parts [ 1 : ] loader = pkgutil . find_loader ( base ) if loader is not None : # TODO: check to see if path to the submod exists submods return True loader = pkgutil . find_loader ( modname ) is_installed = loader is not None return is_installed
Check if a python module is installed without attempting to import it . Note that if modname indicates a child module the parent module is always loaded .
113
29
9,571
def import_module_from_fpath ( module_fpath ) : from os . path import basename , splitext , isdir , join , exists , dirname , split import platform if isdir ( module_fpath ) : module_fpath = join ( module_fpath , '__init__.py' ) print ( 'module_fpath = {!r}' . format ( module_fpath ) ) if not exists ( module_fpath ) : raise ImportError ( 'module_fpath={!r} does not exist' . format ( module_fpath ) ) python_version = platform . python_version ( ) modname = splitext ( basename ( module_fpath ) ) [ 0 ] if modname == '__init__' : modname = split ( dirname ( module_fpath ) ) [ 1 ] if util_inject . PRINT_INJECT_ORDER : if modname not in sys . argv : util_inject . noinject ( modname , N = 2 , via = 'ut.import_module_from_fpath' ) if python_version . startswith ( '2.7' ) : import imp module = imp . load_source ( modname , module_fpath ) elif python_version . startswith ( '3' ) : import importlib . machinery loader = importlib . machinery . SourceFileLoader ( modname , module_fpath ) module = loader . load_module ( ) # module = loader.exec_module(modname) else : raise AssertionError ( 'invalid python version={!r}' . format ( python_version ) ) return module
r imports module from a file path
364
7
9,572
def print_locals ( * args , * * kwargs ) : from utool import util_str from utool import util_dbg from utool import util_dict locals_ = util_dbg . get_parent_frame ( ) . f_locals keys = kwargs . get ( 'keys' , None if len ( args ) == 0 else [ ] ) to_print = { } for arg in args : varname = util_dbg . get_varname_from_locals ( arg , locals_ ) to_print [ varname ] = arg if keys is not None : to_print . update ( util_dict . dict_take ( locals_ , keys ) ) if not to_print : to_print = locals_ locals_str = util_str . repr4 ( to_print ) print ( locals_str )
Prints local variables in function .
185
7
9,573
def _extract_archive ( archive_fpath , archive_file , archive_namelist , output_dir , force_commonprefix = True , prefix = None , dryrun = False , verbose = not QUIET , overwrite = None ) : # force extracted components into a subdirectory if force_commonprefix is # on return_path = output_diG # FIXMpathE doesn't work right if prefix is not None : output_dir = join ( output_dir , prefix ) util_path . ensurepath ( output_dir ) archive_basename , ext = split_archive_ext ( basename ( archive_fpath ) ) if force_commonprefix and commonprefix ( archive_namelist ) == '' : # use the archivename as the default common prefix output_dir = join ( output_dir , archive_basename ) util_path . ensurepath ( output_dir ) for member in archive_namelist : ( dname , fname ) = split ( member ) dpath = join ( output_dir , dname ) util_path . ensurepath ( dpath ) if verbose : print ( '[utool] Unarchive ' + fname + ' in ' + dpath ) if not dryrun : if overwrite is False : if exists ( join ( output_dir , member ) ) : continue archive_file . extract ( member , path = output_dir ) return output_dir
archive_fpath = zip_fpath archive_file = zip_file
300
16
9,574
def open_url_in_browser ( url , browsername = None , fallback = False ) : import webbrowser print ( '[utool] Opening url=%r in browser' % ( url , ) ) if browsername is None : browser = webbrowser . open ( url ) else : browser = get_prefered_browser ( pref_list = [ browsername ] , fallback = fallback ) return browser . open ( url )
r Opens a url in the specified or default browser
97
11
9,575
def url_read ( url , verbose = True ) : if url . find ( '://' ) == - 1 : url = 'http://' + url if verbose : print ( 'Reading data from url=%r' % ( url , ) ) try : file_ = _urllib . request . urlopen ( url ) #file_ = _urllib.urlopen(url) except IOError : raise data = file_ . read ( ) file_ . close ( ) return data
r Directly reads data from url
107
7
9,576
def url_read_text ( url , verbose = True ) : data = url_read ( url , verbose ) text = data . decode ( 'utf8' ) return text
r Directly reads text data from url
39
8
9,577
def clean_dropbox_link ( dropbox_url ) : cleaned_url = dropbox_url . replace ( 'www.dropbox' , 'dl.dropbox' ) postfix_list = [ '?dl=0' ] for postfix in postfix_list : if cleaned_url . endswith ( postfix ) : cleaned_url = cleaned_url [ : - 1 * len ( postfix ) ] # cleaned_url = cleaned_url.rstrip('?dl=0') return cleaned_url
Dropbox links should be en - mass downloaed from dl . dropbox
113
17
9,578
def grab_selenium_chromedriver ( redownload = False ) : import utool as ut import os import stat # TODO: use a better download dir (but it must be in the PATh or selenium freaks out) chromedriver_dpath = ut . ensuredir ( ut . truepath ( '~/bin' ) ) chromedriver_fpath = join ( chromedriver_dpath , 'chromedriver' ) if not ut . checkpath ( chromedriver_fpath ) or redownload : assert chromedriver_dpath in os . environ [ 'PATH' ] . split ( os . pathsep ) # TODO: make this work for windows as well if ut . LINUX and ut . util_cplat . is64bit_python ( ) : import requests rsp = requests . get ( 'http://chromedriver.storage.googleapis.com/LATEST_RELEASE' , timeout = TIMEOUT ) assert rsp . status_code == 200 url = 'http://chromedriver.storage.googleapis.com/' + rsp . text . strip ( ) + '/chromedriver_linux64.zip' ut . grab_zipped_url ( url , download_dir = chromedriver_dpath , redownload = True ) else : raise AssertionError ( 'unsupported chrome driver getter script' ) if not ut . WIN32 : st = os . stat ( chromedriver_fpath ) os . chmod ( chromedriver_fpath , st . st_mode | stat . S_IEXEC ) ut . assert_exists ( chromedriver_fpath ) os . environ [ 'webdriver.chrome.driver' ] = chromedriver_fpath return chromedriver_fpath
r Automatically download selenium chrome driver if needed
394
11
9,579
def grab_selenium_driver ( driver_name = None ) : from selenium import webdriver if driver_name is None : driver_name = 'firefox' if driver_name . lower ( ) == 'chrome' : grab_selenium_chromedriver ( ) return webdriver . Chrome ( ) elif driver_name . lower ( ) == 'firefox' : # grab_selenium_chromedriver() return webdriver . Firefox ( ) else : raise AssertionError ( 'unknown name = %r' % ( driver_name , ) )
pip install selenium - U
122
8
9,580
def grab_file_url ( file_url , appname = 'utool' , download_dir = None , delay = None , spoof = False , fname = None , verbose = True , redownload = False , check_hash = False ) : file_url = clean_dropbox_link ( file_url ) if fname is None : fname = basename ( file_url ) # Download zipfile to if download_dir is None : download_dir = util_cplat . get_app_cache_dir ( appname ) # Zipfile should unzip to: fpath = join ( download_dir , fname ) # If check hash, get remote hash and assert local copy is the same if check_hash : if isinstance ( check_hash , ( list , tuple ) ) : hash_list = check_hash else : hash_list = [ 'md5' ] # hash_list = ['sha1.custom', 'md5', 'sha1', 'sha256'] # Get expected remote file hash_remote , hash_tag_remote = grab_file_remote_hash ( file_url , hash_list , verbose = verbose ) hash_list = [ hash_tag_remote ] # We have a valid candidate hash from remote, check for same hash locally hash_local , hash_tag_local = get_file_local_hash ( fpath , hash_list , verbose = verbose ) if verbose : print ( '[utool] Pre Local Hash: %r' % ( hash_local , ) ) print ( '[utool] Pre Remote Hash: %r' % ( hash_remote , ) ) # Check all 4 hash conditions if hash_remote is None : # No remote hash provided, turn off post-download hash check check_hash = False elif hash_local is None : if verbose : print ( '[utool] Remote hash provided but local hash missing, redownloading.' ) redownload = True elif hash_local == hash_remote : assert hash_tag_local == hash_tag_remote , ( 'hash tag disagreement' ) else : if verbose : print ( '[utool] Both hashes provided, but they disagree, redownloading.' ) redownload = True # Download util_path . ensurepath ( download_dir ) if redownload or not exists ( fpath ) : # Download testdata if verbose : print ( '[utool] Downloading file %s' % fpath ) if delay is not None : print ( '[utool] delay download by %r seconds' % ( delay , ) ) time . sleep ( delay ) download_url ( file_url , fpath , spoof = spoof ) else : if verbose : print ( '[utool] Already have file %s' % fpath ) util_path . assert_exists ( fpath ) # Post-download local hash verification if check_hash : # File has been successfuly downloaded, write remote hash to local hash file hash_fpath = '%s.%s' % ( fpath , hash_tag_remote , ) with open ( hash_fpath , 'w' ) as hash_file : hash_file . write ( hash_remote ) # For sanity check (custom) and file verification (hashing), get local hash again hash_local , hash_tag_local = get_file_local_hash ( fpath , hash_list , verbose = verbose ) if verbose : print ( '[utool] Post Local Hash: %r' % ( hash_local , ) ) assert hash_local == hash_remote , 'Post-download hash disagreement' assert hash_tag_local == hash_tag_remote , 'Post-download hash tag disagreement' return fpath
r Downloads a file and returns the local path of the file .
806
13
9,581
def grab_zipped_url ( zipped_url , ensure = True , appname = 'utool' , download_dir = None , force_commonprefix = True , cleanup = False , redownload = False , spoof = False ) : zipped_url = clean_dropbox_link ( zipped_url ) zip_fname = split ( zipped_url ) [ 1 ] data_name = split_archive_ext ( zip_fname ) [ 0 ] # Download zipfile to if download_dir is None : download_dir = util_cplat . get_app_cache_dir ( appname ) # Zipfile should unzip to: data_dir = join ( download_dir , data_name ) if ensure or redownload : if redownload : util_path . remove_dirs ( data_dir ) util_path . ensurepath ( download_dir ) if not exists ( data_dir ) or redownload : # Download and unzip testdata zip_fpath = realpath ( join ( download_dir , zip_fname ) ) #print('[utool] Downloading archive %s' % zip_fpath) if not exists ( zip_fpath ) or redownload : download_url ( zipped_url , zip_fpath , spoof = spoof ) unarchive_file ( zip_fpath , force_commonprefix ) if cleanup : util_path . delete ( zip_fpath ) # Cleanup if cleanup : util_path . assert_exists ( data_dir ) return util_path . unixpath ( data_dir )
r downloads and unzips the url
346
8
9,582
def scp_pull ( remote_path , local_path = '.' , remote = 'localhost' , user = None ) : import utool as ut if user is not None : remote_uri = user + '@' + remote + ':' + remote_path else : remote_uri = remote + ':' + remote_path scp_exe = 'scp' scp_args = ( scp_exe , '-r' , remote_uri , local_path ) ut . cmd ( scp_args )
r wrapper for scp
113
5
9,583
def list_remote ( remote_uri , verbose = False ) : remote_uri1 , remote_dpath = remote_uri . split ( ':' ) if not remote_dpath : remote_dpath = '.' import utool as ut out = ut . cmd ( 'ssh' , remote_uri1 , 'ls -l %s' % ( remote_dpath , ) , verbose = verbose ) import re # Find lines that look like ls output split_lines = [ re . split ( r'\s+' , t ) for t in out [ 0 ] . split ( '\n' ) ] paths = [ ' ' . join ( t2 [ 8 : ] ) for t2 in split_lines if len ( t2 ) > 8 ] return paths
remote_uri = user
167
5
9,584
def rsync ( src_uri , dst_uri , exclude_dirs = [ ] , port = 22 , dryrun = False ) : from utool import util_cplat rsync_exe = 'rsync' rsync_options = '-avhzP' #rsync_options += ' --port=%d' % (port,) rsync_options += ' -e "ssh -p %d"' % ( port , ) if len ( exclude_dirs ) > 0 : exclude_tup = [ '--exclude ' + dir_ for dir_ in exclude_dirs ] exclude_opts = ' ' . join ( exclude_tup ) rsync_options += ' ' + exclude_opts cmdtuple = ( rsync_exe , rsync_options , src_uri , dst_uri ) cmdstr = ' ' . join ( cmdtuple ) print ( '[rsync] src_uri = %r ' % ( src_uri , ) ) print ( '[rsync] dst_uri = %r ' % ( dst_uri , ) ) print ( '[rsync] cmdstr = %r' % cmdstr ) print ( cmdstr ) #if not dryrun: util_cplat . cmd ( cmdstr , dryrun = dryrun )
r Wrapper for rsync
278
6
9,585
def get_cache ( self , namespace , query_hash , length , start , end ) : query = 'SELECT start, value FROM gauged_cache WHERE namespace = ? ' 'AND hash = ? AND length = ? AND start BETWEEN ? AND ?' cursor = self . cursor cursor . execute ( query , ( namespace , query_hash , length , start , end ) ) return tuple ( cursor . fetchall ( ) )
Get a cached value for the specified date range and query
90
11
9,586
def review ( cls , content , log , parent , window_icon ) : # pragma: no cover dlg = DlgReview ( content , log , parent , window_icon ) if dlg . exec_ ( ) : return dlg . ui . edit_main . toPlainText ( ) , dlg . ui . edit_log . toPlainText ( ) return None , None
Reviews the final bug report .
92
7
9,587
def get_version ( ) : version_desc = open ( os . path . join ( os . path . abspath ( APISettings . VERSION_FILE ) ) ) version_file = version_desc . read ( ) try : version = re . search ( r"version=['\"]([^'\"]+)['\"]" , version_file ) . group ( 1 ) return version except FileNotFoundError : Shell . fail ( 'File not found!' ) raise FileNotFoundError except ValueError : Shell . fail ( 'Version not found in file ' + version_file + '!' ) raise ValueError finally : version_desc . close ( )
Return version from setup . py
140
6
9,588
def set_version ( old_version , new_version ) : try : if APISettings . DEBUG : Shell . debug ( '* ' + old_version + ' --> ' + new_version ) return True for line in fileinput . input ( os . path . abspath ( APISettings . VERSION_FILE ) , inplace = True ) : print ( line . replace ( old_version , new_version ) , end = '' ) Shell . success ( '* ' + old_version + ' --> ' + new_version ) except FileNotFoundError : Shell . warn ( 'File not found!' )
Write new version into VERSION_FILE
131
8
9,589
def set_major ( self ) : old_version = self . get_version ( ) new_version = str ( int ( old_version . split ( '.' , 5 ) [ 0 ] ) + 1 ) + '.0.0' self . set_version ( old_version , new_version )
Increment the major number of project
65
7
9,590
def set_minor ( self ) : old_version = self . get_version ( ) new_version = str ( int ( old_version . split ( '.' , 5 ) [ 0 ] ) ) + '.' + str ( int ( old_version . split ( '.' , 5 ) [ 1 ] ) + 1 ) + '.0' self . set_version ( old_version , new_version )
Increment the minor number of project
88
7
9,591
def set_patch ( self , pre_release_tag = '' ) : current_version = self . get_version ( ) current_patch = self . get_patch_version ( current_version ) current_pre_release_tag = self . get_current_pre_release_tag ( current_patch ) current_RELEASE_SEPARATOR = self . get_current_RELEASE_SEPARATOR ( current_patch ) new_patch = '' # The new patch should get a release tag if pre_release_tag : # Check, if the current patch already contains a pre_release_tag. if current_pre_release_tag : new_patch = str ( current_patch . split ( current_pre_release_tag , 2 ) [ 0 ] ) + pre_release_tag if pre_release_tag == current_pre_release_tag : new_patch += str ( int ( current_patch . split ( current_pre_release_tag , 2 ) [ 1 ] ) + 1 ) else : new_patch += '0' # The current patch does not contains a pre_release_tag. else : new_patch = str ( int ( current_patch ) + 1 ) + APISettings . RELEASE_SEPARATOR + pre_release_tag + '0' # The new patch should not contain any tag. So just increase it. else : if current_RELEASE_SEPARATOR : new_patch = str ( int ( current_patch . split ( current_RELEASE_SEPARATOR , 2 ) [ 0 ] ) + 1 ) elif current_pre_release_tag : new_patch = str ( int ( current_patch . split ( current_pre_release_tag , 2 ) [ 0 ] ) + 1 ) else : new_patch = str ( int ( current_patch ) + 1 ) new_version = str ( int ( current_version . split ( '.' , 5 ) [ 0 ] ) ) + '.' + str ( int ( current_version . split ( '.' , 5 ) [ 1 ] ) ) + '.' + str ( new_patch ) self . set_version ( current_version , new_version )
Increment the patch number of project
466
7
9,592
def flush ( self ) : ( slice_ , self . __buffer ) = ( self . __buffer , '' ) self . __size = 0 return slice_
Return all buffered data and clear the stack .
33
10
9,593
def __send_hello ( self ) : _logger . debug ( "Saying hello: [%s]" , self ) self . __c . send ( nsq . config . protocol . MAGIC_IDENTIFIER )
Initiate the handshake .
48
6
9,594
def __sender ( self ) : # If we're ignoring the quit, the connections will have to be closed # by the server. while ( self . __ignore_quit is True or self . __nice_quit_ev . is_set ( ) is False ) and self . __force_quit_ev . is_set ( ) is False : # TODO(dustin): The quit-signals aren't being properly set after a producer # stop. # TODO(dustin): Consider breaking the loop if we haven't yet retried to # reconnect a couple of times. A connection will automatically be # reattempted. try : ( command , parts ) = self . __outgoing_q . get ( block = False ) except gevent . queue . Empty : gevent . sleep ( nsq . config . client . WRITE_THROTTLE_S ) else : _logger . debug ( "Dequeued outgoing command ((%d) remaining): " "[%s]" , self . __outgoing_q . qsize ( ) , self . __distill_command_name ( command ) ) self . __send_command_primitive ( command , parts ) self . __send_thread_ev . set ( )
Send - loop .
263
4
9,595
def __receiver ( self ) : # If we're ignoring the quit, the connections will have to be closed # by the server. while ( self . __ignore_quit is True or self . __nice_quit_ev . is_set ( ) is False ) and self . __force_quit_ev . is_set ( ) is False : # TODO(dustin): The quit-signals aren't being properly set after a producer # stop. # TODO(dustin): Consider breaking the loop if we haven't yet retried to # reconnect a couple of times. A connection will automatically be # reattempted. try : self . __read_frame ( ) except errno . EAGAIN : gevent . sleep ( nsq . config . client . READ_THROTTLE_S ) self . __receive_thread_ev . set ( )
Receive - loop .
186
5
9,596
def run ( self ) : while self . __nice_quit_ev . is_set ( ) is False : self . __connect ( ) _logger . info ( "Connection re-connect loop has terminated: %s" , self . __mc )
Connect the server and maintain the connection . This shall not return until a connection has been determined to absolutely not be available .
54
24
9,597
def save ( obj , filename , protocol = 4 ) : with open ( filename , 'wb' ) as f : pickle . dump ( obj , f , protocol = protocol )
Serialize an object to disk using pickle protocol .
37
11
9,598
def load_json ( filename , * * kwargs ) : with open ( filename , 'r' , encoding = 'utf-8' ) as f : return json . load ( f , * * kwargs )
Load a JSON object from the specified file .
47
9
9,599
def save_json ( obj , filename , * * kwargs ) : with open ( filename , 'w' , encoding = 'utf-8' ) as f : json . dump ( obj , f , * * kwargs )
Save an object as a JSON file .
50
8