idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
9,200 | def get_one ( cls , db , * args , * * kwargs ) : data = db [ cls . collection ] . find_one ( * args , * * kwargs ) if data : return cls . wrap_incoming ( data , db ) else : return None | Returns an object that corresponds to given query or None . | 63 | 11 |
9,201 | def get_id ( self ) : import warnings warnings . warn ( '{0}.get_id() is deprecated, ' 'use {0}.id instead' . format ( type ( self ) . __name__ ) , DeprecationWarning ) return self . get ( '_id' ) | Returns object id or None . | 62 | 6 |
9,202 | def get_ref ( self ) : _id = self . id if _id is None : return None else : return DBRef ( self . collection , _id ) | Returns a DBRef for this object or None . | 35 | 10 |
9,203 | def formatTime ( self , record , datefmt = None ) : # noqa if datefmt : s = datetime . datetime . now ( ) . strftime ( datefmt ) else : t = datetime . datetime . now ( ) . strftime ( self . default_time_format ) s = self . default_msec_format % ( t , record . msecs ) return s | Overrides formatTime method to use datetime module instead of time module to display time in microseconds . Time module by default does not resolve time to microseconds . | 88 | 34 |
9,204 | def related_to ( self , instance ) : return self . filter ( table_name = instance . table_name , record_id = instance . record_id ) | Filter for all log objects of the same connected model as the given instance . | 35 | 15 |
9,205 | def capture_insert_from_model ( cls , table_name , record_id , * , exclude_fields = ( ) ) : exclude_cols = ( ) if exclude_fields : model_cls = get_connected_model_for_table_name ( table_name ) exclude_cols = cls . _fieldnames_to_colnames ( model_cls , exclude_fields ) raw_query = sql . SQL ( """ SELECT {schema}.hc_capture_insert_from_row( hstore({schema}.{table_name}.*), %(table_name)s, ARRAY[{exclude_cols}]::text[] -- cast to type expected by stored procedure ) AS id FROM {schema}.{table_name} WHERE id = %(record_id)s """ ) . format ( schema = sql . Identifier ( settings . HEROKU_CONNECT_SCHEMA ) , table_name = sql . Identifier ( table_name ) , exclude_cols = sql . SQL ( ', ' ) . join ( sql . Identifier ( col ) for col in exclude_cols ) , ) params = { 'record_id' : record_id , 'table_name' : table_name } result_qs = TriggerLog . objects . raw ( raw_query , params ) return list ( result_qs ) | Create a fresh insert record from the current model state in the database . | 300 | 14 |
9,206 | def capture_update_from_model ( cls , table_name , record_id , * , update_fields = ( ) ) : include_cols = ( ) if update_fields : model_cls = get_connected_model_for_table_name ( table_name ) include_cols = cls . _fieldnames_to_colnames ( model_cls , update_fields ) raw_query = sql . SQL ( """ SELECT {schema}.hc_capture_update_from_row( hstore({schema}.{table_name}.*), %(table_name)s, ARRAY[{include_cols}]::text[] -- cast to type expected by stored procedure ) AS id FROM {schema}.{table_name} WHERE id = %(record_id)s """ ) . format ( schema = sql . Identifier ( settings . HEROKU_CONNECT_SCHEMA ) , table_name = sql . Identifier ( table_name ) , include_cols = sql . SQL ( ', ' ) . join ( sql . Identifier ( col ) for col in include_cols ) , ) params = { 'record_id' : record_id , 'table_name' : table_name } result_qs = TriggerLog . objects . raw ( raw_query , params ) return list ( result_qs ) | Create a fresh update record from the current model state in the database . | 299 | 14 |
9,207 | def get_model ( self ) : model_cls = get_connected_model_for_table_name ( self . table_name ) return model_cls . _default_manager . filter ( id = self . record_id ) . first ( ) | Fetch the instance of the connected model referenced by this log record . | 56 | 14 |
9,208 | def related ( self , * , exclude_self = False ) : manager = type ( self ) . _default_manager queryset = manager . related_to ( self ) if exclude_self : queryset = queryset . exclude ( id = self . id ) return queryset | Get a QuerySet for all trigger log objects for the same connected model . | 62 | 15 |
9,209 | def _fieldnames_to_colnames ( model_cls , fieldnames ) : get_field = model_cls . _meta . get_field fields = map ( get_field , fieldnames ) return { f . column for f in fields } | Get the names of columns referenced by the given model fields . | 55 | 12 |
9,210 | def redo ( self ) : trigger_log = self . _to_live_trigger_log ( state = TRIGGER_LOG_STATE [ 'NEW' ] ) trigger_log . save ( force_insert = True ) # make sure we get a fresh row self . state = TRIGGER_LOG_STATE [ 'REQUEUED' ] self . save ( update_fields = [ 'state' ] ) return trigger_log | Re - sync the change recorded in this trigger log . | 95 | 11 |
9,211 | def add_isoquant_data ( proteins , quantproteins , quantacc , quantfields ) : for protein in base_add_isoquant_data ( proteins , quantproteins , prottabledata . HEADER_PROTEIN , quantacc , quantfields ) : yield protein | Runs through a protein table and adds quant data from ANOTHER protein table that contains that data . | 64 | 20 |
9,212 | def add_isoquant_data ( peptides , quantpeptides , quantacc , quantfields ) : for peptide in base_add_isoquant_data ( peptides , quantpeptides , peptabledata . HEADER_PEPTIDE , quantacc , quantfields ) : yield peptide | Runs through a peptide table and adds quant data from ANOTHER peptide table that contains that data . | 68 | 22 |
9,213 | def map ( self , fn ) : return TimeSeries ( [ ( x , fn ( y ) ) for x , y in self . points ] ) | Run a map function across all y points in the series | 31 | 11 |
9,214 | def build_proteintable ( pqdb , headerfields , mergecutoff , isobaric = False , precursor = False , probability = False , fdr = False , pep = False , genecentric = False ) : pdmap = create_featuredata_map ( pqdb , genecentric = genecentric , psm_fill_fun = pinfo . add_psms_to_proteindata , pgene_fill_fun = pinfo . add_protgene_to_protdata , count_fun = pinfo . count_peps_psms , get_uniques = True ) empty_return = lambda x , y , z : { } iso_fun = { True : get_isobaric_quant , False : empty_return } [ isobaric ] ms1_fun = { True : get_precursor_quant , False : empty_return } [ precursor ] prob_fun = { True : get_prot_probability , False : empty_return } [ probability ] fdr_fun = { True : get_prot_fdr , False : empty_return } [ fdr ] pep_fun = { True : get_prot_pep , False : empty_return } [ pep ] pdata_fun = { True : get_protein_data_genecentric , False : get_protein_data } [ genecentric is not False ] protein_sql , sqlfieldmap = pqdb . prepare_mergetable_sql ( precursor , isobaric , probability , fdr , pep ) accession_field = prottabledata . ACCESSIONS [ genecentric ] proteins = pqdb . get_merged_features ( protein_sql ) protein = next ( proteins ) outprotein = { accession_field : protein [ sqlfieldmap [ 'p_acc' ] ] } check_prot = { k : v for k , v in outprotein . items ( ) } if not mergecutoff or protein_pool_fdr_cutoff ( protein , sqlfieldmap , mergecutoff ) : fill_mergefeature ( outprotein , iso_fun , ms1_fun , prob_fun , fdr_fun , pep_fun , pdata_fun , protein , sqlfieldmap , headerfields , pdmap , accession_field ) for protein in proteins : if mergecutoff and not protein_pool_fdr_cutoff ( protein , sqlfieldmap , mergecutoff ) : continue p_acc = protein [ sqlfieldmap [ 'p_acc' ] ] if p_acc != outprotein [ accession_field ] : # check if protein has been filled, otherwise do not output # sometimes proteins have NA in all fields if outprotein != check_prot : yield outprotein outprotein = { accession_field : p_acc } check_prot = { k : v for k , v in outprotein . items ( ) } fill_mergefeature ( outprotein , iso_fun , ms1_fun , prob_fun , fdr_fun , pep_fun , pdata_fun , protein , sqlfieldmap , headerfields , pdmap , accession_field ) if outprotein != check_prot : yield outprotein | Fetches proteins and quants from joined lookup table loops through them and when all of a protein s quants have been collected yields the protein quant information . | 712 | 32 |
9,215 | def count_protein_group_hits ( lineproteins , groups ) : hits = [ ] for group in groups : hits . append ( 0 ) for protein in lineproteins : if protein in group : hits [ - 1 ] += 1 return [ str ( x ) for x in hits ] | Takes a list of protein accessions and a list of protein groups content from DB . Counts for each group in list how many proteins are found in lineproteins . Returns list of str amounts . | 64 | 42 |
9,216 | def get_logging_dir ( appname = 'default' ) : from utool . _internal import meta_util_cache from utool . _internal import meta_util_cplat from utool import util_cache if appname is None or appname == 'default' : appname = util_cache . get_default_appname ( ) resource_dpath = meta_util_cplat . get_resource_dir ( ) default = join ( resource_dpath , appname , 'logs' ) # Check global cache for a custom logging dir otherwise # use the default. log_dir = meta_util_cache . global_cache_read ( logdir_cacheid , appname = appname , default = default ) log_dir_realpath = realpath ( log_dir ) return log_dir_realpath | The default log dir is in the system resource directory But the utool global cache allows for the user to override where the logs for a specific app should be stored . | 182 | 33 |
9,217 | def add_logging_handler ( handler , format_ = 'file' ) : global __UTOOL_ROOT_LOGGER__ if __UTOOL_ROOT_LOGGER__ is None : builtins . print ( '[WARNING] logger not started, cannot add handler' ) return # create formatter and add it to the handlers #logformat = '%Y-%m-%d %H:%M:%S' #logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' timeformat = '%H:%M:%S' if format_ == 'file' : logformat = '[%(asctime)s]%(message)s' elif format_ == 'stdout' : logformat = '%(message)s' else : raise AssertionError ( 'unknown logging format_: %r' % format_ ) # Create formatter for handlers formatter = logging . Formatter ( logformat , timeformat ) handler . setLevel ( logging . DEBUG ) handler . setFormatter ( formatter ) __UTOOL_ROOT_LOGGER__ . addHandler ( handler ) | mostly for util_logging internals | 259 | 8 |
9,218 | def stop_logging ( ) : global __UTOOL_ROOT_LOGGER__ global __UTOOL_PRINT__ global __UTOOL_WRITE__ global __UTOOL_FLUSH__ if __UTOOL_ROOT_LOGGER__ is not None : # Flush remaining buffer if VERBOSE or LOGGING_VERBOSE : _utool_print ( ) ( ) ( '<__LOG_STOP__>' ) _utool_flush ( ) ( ) # Remove handlers for h in __UTOOL_ROOT_LOGGER__ . handlers [ : ] : __UTOOL_ROOT_LOGGER__ . removeHandler ( h ) # Reset objects __UTOOL_ROOT_LOGGER__ = None __UTOOL_PRINT__ = None __UTOOL_WRITE__ = None __UTOOL_FLUSH__ = None | Restores utool print functions to python defaults | 186 | 9 |
9,219 | def replace_nones ( list_ , repl = - 1 ) : repl_list = [ repl if item is None else ( replace_nones ( item , repl ) if isinstance ( item , list ) else item ) for item in list_ ] return repl_list | r Recursively removes Nones in all lists and sublists and replaces them with the repl variable | 57 | 20 |
9,220 | def recursive_replace ( list_ , target , repl = - 1 ) : repl_list = [ recursive_replace ( item , target , repl ) if isinstance ( item , ( list , np . ndarray ) ) else ( repl if item == target else item ) for item in list_ ] return repl_list | r Recursively removes target in all lists and sublists and replaces them with the repl variable | 67 | 19 |
9,221 | def ensure_list_size ( list_ , size_ ) : lendiff = ( size_ ) - len ( list_ ) if lendiff > 0 : extension = [ None for _ in range ( lendiff ) ] list_ . extend ( extension ) | Allocates more space if needbe . | 53 | 9 |
9,222 | def multi_replace ( instr , search_list = [ ] , repl_list = None ) : repl_list = [ '' ] * len ( search_list ) if repl_list is None else repl_list for ser , repl in zip ( search_list , repl_list ) : instr = instr . replace ( ser , repl ) return instr | Does a string replace with a list of search and replacements | 73 | 11 |
9,223 | def invertible_flatten1 ( unflat_list ) : nextnum = functools . partial ( six . next , itertools . count ( 0 ) ) # Build an unflat list of flat indexes reverse_list = [ [ nextnum ( ) for _ in tup ] for tup in unflat_list ] flat_list = flatten ( unflat_list ) return flat_list , reverse_list | r Flattens unflat_list but remember how to reconstruct the unflat_list Returns flat_list and the reverse_list with indexes into the flat_list | 91 | 34 |
9,224 | def invertible_flatten2 ( unflat_list ) : sublen_list = list ( map ( len , unflat_list ) ) if not util_type . HAVE_NUMPY : cumlen_list = np . cumsum ( sublen_list ) # Build an unflat list of flat indexes else : cumlen_list = list ( accumulate ( sublen_list ) ) flat_list = flatten ( unflat_list ) return flat_list , cumlen_list | An alternative to invertible_flatten1 which uses cumsum | 107 | 15 |
9,225 | def invertible_flatten2_numpy ( unflat_arrs , axis = 0 ) : cumlen_list = np . cumsum ( [ arr . shape [ axis ] for arr in unflat_arrs ] ) flat_list = np . concatenate ( unflat_arrs , axis = axis ) return flat_list , cumlen_list | more numpy version | 80 | 4 |
9,226 | def unflat_unique_rowid_map ( func , unflat_rowids , * * kwargs ) : import utool as ut # First flatten the list, and remember the original dimensions flat_rowids , reverse_list = ut . invertible_flatten2 ( unflat_rowids ) # Then make the input unique flat_rowids_arr = np . array ( flat_rowids ) unique_flat_rowids , inverse_unique = np . unique ( flat_rowids_arr , return_inverse = True ) # Then preform the lookup / implicit mapping unique_flat_vals = func ( unique_flat_rowids , * * kwargs ) # Then broadcast unique values back to original flat positions flat_vals_ = np . array ( unique_flat_vals ) [ inverse_unique ] #flat_vals_ = np.array(unique_flat_vals).take(inverse_unique, axis=0) output_shape = tuple ( list ( flat_rowids_arr . shape ) + list ( flat_vals_ . shape [ 1 : ] ) ) flat_vals = np . array ( flat_vals_ ) . reshape ( output_shape ) # Then _unflatten the results to the original input dimensions unflat_vals = ut . unflatten2 ( flat_vals , reverse_list ) return unflat_vals | performs only one call to the underlying func with unique rowids the func must be some lookup function | 296 | 20 |
9,227 | def allsame ( list_ , strict = True ) : if len ( list_ ) == 0 : return True first_item = list_ [ 0 ] return list_all_eq_to ( list_ , first_item , strict ) | checks to see if list is equal everywhere | 50 | 8 |
9,228 | def list_all_eq_to ( list_ , val , strict = True ) : if util_type . HAVE_NUMPY and isinstance ( val , np . ndarray ) : return all ( [ np . all ( item == val ) for item in list_ ] ) try : # FUTURE WARNING # FutureWarning: comparison to `None` will result in an elementwise object comparison in the future. with warnings . catch_warnings ( ) : warnings . filterwarnings ( 'ignore' , category = FutureWarning ) flags = [ item == val for item in list_ ] return all ( [ np . all ( flag ) if hasattr ( flag , '__array__' ) else flag for flag in flags ] ) #return all([item == val for item in list_]) except ValueError : if not strict : return all ( [ repr ( item ) == repr ( val ) for item in list_ ] ) else : raise | checks to see if list is equal everywhere to a value | 200 | 11 |
9,229 | def get_dirty_items ( item_list , flag_list ) : assert len ( item_list ) == len ( flag_list ) dirty_items = [ item for ( item , flag ) in zip ( item_list , flag_list ) if not flag ] #print('num_dirty_items = %r' % len(dirty_items)) #print('item_list = %r' % (item_list,)) #print('flag_list = %r' % (flag_list,)) return dirty_items | Returns each item in item_list where not flag in flag_list | 114 | 14 |
9,230 | def filterfalse_items ( item_list , flag_list ) : assert len ( item_list ) == len ( flag_list ) filtered_items = list ( util_iter . ifilterfalse_items ( item_list , flag_list ) ) return filtered_items | Returns items in item list where the corresponding item in flag list is true | 59 | 14 |
9,231 | def isect ( list1 , list2 ) : set2 = set ( list2 ) return [ item for item in list1 if item in set2 ] | r returns list1 elements that are also in list2 . preserves order of list1 | 33 | 17 |
9,232 | def is_subset_of_any ( set_ , other_sets ) : set_ = set ( set_ ) other_sets = map ( set , other_sets ) return any ( [ set_ . issubset ( other_set ) for other_set in other_sets ] ) | returns True if set_ is a subset of any set in other_sets | 63 | 16 |
9,233 | def unique_ordered ( list_ ) : list_ = list ( list_ ) flag_list = flag_unique_items ( list_ ) unique_list = compress ( list_ , flag_list ) return unique_list | Returns unique items in list_ in the order they were seen . | 47 | 13 |
9,234 | def setdiff ( list1 , list2 ) : set2 = set ( list2 ) return [ item for item in list1 if item not in set2 ] | returns list1 elements that are not in list2 . preserves order of list1 | 34 | 17 |
9,235 | def isetdiff_flags ( list1 , list2 ) : set2 = set ( list2 ) return ( item not in set2 for item in list1 ) | move to util_iter | 35 | 5 |
9,236 | def unflat_take ( items_list , unflat_index_list ) : return [ unflat_take ( items_list , xs ) if isinstance ( xs , list ) else take ( items_list , xs ) for xs in unflat_index_list ] | r Returns nested subset of items_list | 62 | 8 |
9,237 | def argsort ( * args , * * kwargs ) : if len ( args ) == 1 and isinstance ( args [ 0 ] , dict ) : dict_ = args [ 0 ] index_list = list ( dict_ . keys ( ) ) value_list = list ( dict_ . values ( ) ) return sortedby2 ( index_list , value_list ) else : index_list = list ( range ( len ( args [ 0 ] ) ) ) return sortedby2 ( index_list , * args , * * kwargs ) | like np . argsort but for lists | 116 | 8 |
9,238 | def argsort2 ( indexable , key = None , reverse = False ) : # Create an iterator of value/key pairs if isinstance ( indexable , dict ) : vk_iter = ( ( v , k ) for k , v in indexable . items ( ) ) else : vk_iter = ( ( v , k ) for k , v in enumerate ( indexable ) ) # Sort by values and extract the keys if key is None : indices = [ k for v , k in sorted ( vk_iter , reverse = reverse ) ] else : indices = [ k for v , k in sorted ( vk_iter , key = lambda vk : key ( vk [ 0 ] ) , reverse = reverse ) ] return indices | Returns the indices that would sort a indexable object . | 158 | 11 |
9,239 | def index_complement ( index_list , len_ = None ) : mask1 = index_to_boolmask ( index_list , len_ ) mask2 = not_list ( mask1 ) index_list_bar = list_where ( mask2 ) return index_list_bar | Returns the other indicies in a list of length len_ | 62 | 12 |
9,240 | def take_complement ( list_ , index_list ) : mask = not_list ( index_to_boolmask ( index_list , len ( list_ ) ) ) return compress ( list_ , mask ) | Returns items in list_ not indexed by index_list | 46 | 11 |
9,241 | def take ( list_ , index_list ) : try : return [ list_ [ index ] for index in index_list ] except TypeError : return list_ [ index_list ] | Selects a subset of a list based on a list of indices . This is similar to np . take but pure python . | 39 | 25 |
9,242 | def take_percentile ( arr , percent ) : size = len ( arr ) stop = min ( int ( size * percent ) , len ( arr ) ) return arr [ 0 : stop ] | take the top percent items in a list rounding up | 40 | 10 |
9,243 | def snapped_slice ( size , frac , n ) : if size < n : n = size start = int ( size * frac - ceil ( n / 2 ) ) + 1 stop = int ( size * frac + floor ( n / 2 ) ) + 1 # slide to the front or the back buf = 0 if stop >= size : buf = ( size - stop ) elif start < 0 : buf = 0 - start stop += buf start += buf assert stop <= size , 'out of bounds [%r, %r]' % ( stop , start ) sl = slice ( start , stop ) return sl | r Creates a slice spanning n items in a list of length size at position frac . | 130 | 19 |
9,244 | def take_percentile_parts ( arr , front = None , mid = None , back = None ) : slices = [ ] if front : slices += [ snapped_slice ( len ( arr ) , 0.0 , front ) ] if mid : slices += [ snapped_slice ( len ( arr ) , 0.5 , mid ) ] if back : slices += [ snapped_slice ( len ( arr ) , 1.0 , back ) ] parts = flatten ( [ arr [ sl ] for sl in slices ] ) return parts | r Take parts from front back or middle of a list | 111 | 11 |
9,245 | def broadcast_zip ( list1 , list2 ) : try : len ( list1 ) except TypeError : list1 = list ( list1 ) try : len ( list2 ) except TypeError : list2 = list ( list2 ) # if len(list1) == 0 or len(list2) == 0: # # Corner case where either list is empty # return [] if len ( list1 ) == 1 and len ( list2 ) > 1 : list1 = list1 * len ( list2 ) elif len ( list1 ) > 1 and len ( list2 ) == 1 : list2 = list2 * len ( list1 ) elif len ( list1 ) != len ( list2 ) : raise ValueError ( 'out of alignment len(list1)=%r, len(list2)=%r' % ( len ( list1 ) , len ( list2 ) ) ) # return list(zip(list1, list2)) return zip ( list1 , list2 ) | r Zips elementwise pairs between list1 and list2 . Broadcasts the first dimension if a single list is of length 1 . | 211 | 27 |
9,246 | def equal ( list1 , list2 ) : return [ item1 == item2 for item1 , item2 in broadcast_zip ( list1 , list2 ) ] | takes flags returns indexes of True values | 35 | 8 |
9,247 | def scalar_input_map ( func , input_ ) : if util_iter . isiterable ( input_ ) : return list ( map ( func , input_ ) ) else : return func ( input_ ) | Map like function | 46 | 3 |
9,248 | def partial_imap_1to1 ( func , si_func ) : @ functools . wraps ( si_func ) def wrapper ( input_ ) : if not util_iter . isiterable ( input_ ) : return func ( si_func ( input_ ) ) else : return list ( map ( func , si_func ( input_ ) ) ) set_funcname ( wrapper , util_str . get_callable_name ( func ) + '_mapper_' + get_funcname ( si_func ) ) return wrapper | a bit messy | 118 | 3 |
9,249 | def sample_zip ( items_list , num_samples , allow_overflow = False , per_bin = 1 ) : # Prealloc a list of lists samples_list = [ [ ] for _ in range ( num_samples ) ] # Sample the ix-th value from every list samples_iter = zip_longest ( * items_list ) sx = 0 for ix , samples_ in zip ( range ( num_samples ) , samples_iter ) : samples = filter_Nones ( samples_ ) samples_list [ sx ] . extend ( samples ) # Put per_bin from each sublist into a sample if ( ix + 1 ) % per_bin == 0 : sx += 1 # Check for overflow if allow_overflow : overflow_samples = flatten ( [ filter_Nones ( samples_ ) for samples_ in samples_iter ] ) return samples_list , overflow_samples else : try : samples_iter . next ( ) except StopIteration : pass else : raise AssertionError ( 'Overflow occured' ) return samples_list | Helper for sampling | 237 | 3 |
9,250 | def issorted ( list_ , op = operator . le ) : return all ( op ( list_ [ ix ] , list_ [ ix + 1 ] ) for ix in range ( len ( list_ ) - 1 ) ) | Determines if a list is sorted | 50 | 8 |
9,251 | def list_depth ( list_ , func = max , _depth = 0 ) : depth_list = [ list_depth ( item , func = func , _depth = _depth + 1 ) for item in list_ if util_type . is_listlike ( item ) ] if len ( depth_list ) > 0 : return func ( depth_list ) else : return _depth | Returns the deepest level of nesting within a list of lists | 81 | 11 |
9,252 | def depth ( sequence , func = max , _depth = 0 ) : if isinstance ( sequence , dict ) : sequence = list ( sequence . values ( ) ) depth_list = [ depth ( item , func = func , _depth = _depth + 1 ) for item in sequence if ( isinstance ( item , dict ) or util_type . is_listlike ( item ) ) ] if len ( depth_list ) > 0 : return func ( depth_list ) else : return _depth | Find the nesting depth of a nested sequence | 104 | 8 |
9,253 | def list_deep_types ( list_ ) : type_list = [ ] for item in list_ : if util_type . is_listlike ( item ) : type_list . extend ( list_deep_types ( item ) ) else : type_list . append ( type ( item ) ) return type_list | Returns all types in a deep list | 68 | 7 |
9,254 | def depth_profile ( list_ , max_depth = None , compress_homogenous = True , compress_consecutive = False , new_depth = False ) : if isinstance ( list_ , dict ) : list_ = list ( list_ . values ( ) ) # handle dict level_shape_list = [ ] # For a pure bottom level list return the length if not any ( map ( util_type . is_listlike , list_ ) ) : return len ( list_ ) if False and new_depth : pass # max_depth_ = None if max_depth is None else max_depth - 1 # if max_depth_ is None or max_depth_ > 0: # pass # else: # for item in list_: # if isinstance(item, dict): # item = list(item.values()) # handle dict # if util_type.is_listlike(item): # if max_depth is None: # level_shape_list.append(depth_profile(item, None)) # else: # if max_depth >= 0: # level_shape_list.append(depth_profile(item, max_depth - 1)) # else: # level_shape_list.append(str(len(item))) # else: # level_shape_list.append(1) else : for item in list_ : if isinstance ( item , dict ) : item = list ( item . values ( ) ) # handle dict if util_type . is_listlike ( item ) : if max_depth is None : level_shape_list . append ( depth_profile ( item , None ) ) else : if max_depth >= 0 : level_shape_list . append ( depth_profile ( item , max_depth - 1 ) ) else : level_shape_list . append ( str ( len ( item ) ) ) else : level_shape_list . append ( 1 ) if compress_homogenous : # removes redudant information by returning a shape duple if allsame ( level_shape_list ) : dim_ = level_shape_list [ 0 ] len_ = len ( level_shape_list ) if isinstance ( dim_ , tuple ) : level_shape_list = tuple ( [ len_ ] + list ( dim_ ) ) else : level_shape_list = tuple ( [ len_ , dim_ ] ) if compress_consecutive : hash_list = list ( map ( hash , map ( str , level_shape_list ) ) ) consec_list = group_consecutives ( hash_list , 0 ) if len ( consec_list ) != len ( level_shape_list ) : len_list = list ( map ( len , consec_list ) ) cumsum_list = np . cumsum ( len_list ) consec_str = '[' thresh = 1 for len_ , cumsum in zip ( len_list , cumsum_list ) : value = level_shape_list [ cumsum - 1 ] if len_ > thresh : consec_str += str ( value ) + '] * ' + str ( len_ ) consec_str += ' + [' else : consec_str += str ( value ) + ', ' if consec_str . endswith ( ', ' ) : consec_str = consec_str [ : - 2 ] #consec_str += ']' #consec_str = consec_str.rstrip(', ').rstrip(']') #consec_str = consec_str.rstrip(', ') #if consec_str.endswith(']'): # consec_str = consec_str[:-1] consec_str += ']' level_shape_list = consec_str return level_shape_list | r Returns a nested list corresponding the shape of the nested structures lists represent depth tuples represent shape . The values of the items do not matter . only the lengths . | 807 | 33 |
9,255 | def list_cover ( list1 , list2 ) : set2 = set ( list2 ) incover_list = [ item1 in set2 for item1 in list1 ] return incover_list | r returns boolean for each position in list1 if it is in list2 | 43 | 15 |
9,256 | def list_alignment ( list1 , list2 , missing = False ) : import utool as ut item1_to_idx = make_index_lookup ( list1 ) if missing : sortx = ut . dict_take ( item1_to_idx , list2 , None ) else : sortx = ut . take ( item1_to_idx , list2 ) return sortx | Assumes list items are unique | 88 | 6 |
9,257 | def list_transpose ( list_ , shape = None ) : num_cols_set = unique ( [ len ( x ) for x in list_ ] ) if shape is None : if len ( num_cols_set ) == 0 : raise ValueError ( 'listT does not support empty transpose without shapes' ) else : assert len ( shape ) == 2 , 'shape must be a 2-tuple' if len ( num_cols_set ) == 0 : return [ [ ] for _ in range ( shape [ 1 ] ) ] elif num_cols_set [ 0 ] == 0 : return [ ] if len ( num_cols_set ) != 1 : raise ValueError ( 'inconsistent column lengths=%r' % ( num_cols_set , ) ) return list ( zip ( * list_ ) ) | r Swaps rows and columns . nCols should be specified if the initial list is empty . | 183 | 20 |
9,258 | def delete_items_by_index ( list_ , index_list , copy = False ) : if copy : list_ = list_ [ : ] # Rectify negative indicies index_list_ = [ ( len ( list_ ) + x if x < 0 else x ) for x in index_list ] # Remove largest indicies first index_list_ = sorted ( index_list_ , reverse = True ) for index in index_list_ : del list_ [ index ] return list_ | Remove items from list_ at positions specified in index_list The original list_ is preserved if copy is True | 105 | 22 |
9,259 | def delete_list_items ( list_ , item_list , copy = False ) : if copy : list_ = list_ [ : ] for item in item_list : list_ . remove ( item ) return list_ | r Remove items in item_list from list_ . The original list_ is preserved if copy is True | 47 | 21 |
9,260 | def length_hint ( obj , default = 0 ) : try : return len ( obj ) except TypeError : try : get_hint = type ( obj ) . __length_hint__ except AttributeError : return default try : hint = get_hint ( obj ) except TypeError : return default if hint is NotImplemented : return default if not isinstance ( hint , int ) : raise TypeError ( "Length hint must be an integer, not %r" % type ( hint ) ) if hint < 0 : raise ValueError ( "__length_hint__() should return >= 0" ) return hint | Return an estimate of the number of items in obj . | 133 | 11 |
9,261 | def add_parser_arguments ( parser , args , group = None , prefix = DATA_PREFIX ) : if group : parser = parser . add_argument_group ( group ) for arg , kwargs in iteritems ( args ) : arg_name = kwargs . pop ( 'arg' , arg . replace ( '_' , '-' ) ) if 'metavar' not in kwargs : kwargs [ 'metavar' ] = arg . upper ( ) if 'dest' in kwargs : kwargs [ 'dest' ] = prefix + kwargs [ 'dest' ] else : kwargs [ 'dest' ] = prefix + arg parser . add_argument ( '--' + arg_name , * * kwargs ) | Helper method that populates parser arguments . The argument values can be later retrieved with extract_arguments method . | 169 | 22 |
9,262 | def add_mutually_exclusive_args ( parser , args , required = False , prefix = DATA_PREFIX ) : parser = parser . add_mutually_exclusive_group ( required = required ) for arg , kwargs in iteritems ( args ) : arg_name = kwargs . pop ( 'arg' , arg . replace ( '_' , '-' ) ) if 'metavar' not in kwargs : kwargs [ 'metavar' ] = arg . upper ( ) parser . add_argument ( '--' + arg_name , dest = prefix + arg , * * kwargs ) | Helper method that populates mutually exclusive arguments . The argument values can be later retrieved with extract_arguments method . | 137 | 23 |
9,263 | def add_create_update_args ( parser , required_args , optional_args , create = False ) : if create : for key in required_args : required_args [ key ] [ 'required' ] = True add_parser_arguments ( parser , required_args , group = 'required arguments' ) else : optional_args . update ( required_args ) add_parser_arguments ( parser , optional_args ) | Wrapper around add_parser_arguments . | 92 | 10 |
9,264 | def extract_arguments ( args , prefix = DATA_PREFIX ) : data = { } for key , value in iteritems ( args . __dict__ ) : if key . startswith ( prefix ) and value is not None : parts = key [ len ( prefix ) : ] . split ( '__' ) # Think of `d` as a pointer into the resulting nested dictionary. # The `for` loop iterates over all parts of the key except the last # to find the proper dict into which the value should be inserted. # If the subdicts do not exist, they are created. d = data for p in parts [ : - 1 ] : assert p not in d or isinstance ( d [ p ] , dict ) d = d . setdefault ( p , { } ) # At this point `d` points to the correct dict and value can be # inserted. d [ parts [ - 1 ] ] = value if value != '' else None return data | Return a dict of arguments created by add_parser_arguments . | 206 | 14 |
9,265 | def create_searchspace ( lookup , fastafn , proline_cut = False , reverse_seqs = True , do_trypsinize = True ) : allpeps = [ ] for record in SeqIO . parse ( fastafn , 'fasta' ) : if do_trypsinize : pepseqs = trypsinize ( record . seq , proline_cut ) else : pepseqs = [ record . seq ] # Exchange all leucines to isoleucines because MS can't differ pepseqs = [ ( str ( pep ) . replace ( 'L' , 'I' ) , ) for pep in pepseqs ] allpeps . extend ( pepseqs ) if len ( allpeps ) > 1000000 : # more than x peps, write to SQLite lookup . write_peps ( allpeps , reverse_seqs ) allpeps = [ ] # write remaining peps to sqlite lookup . write_peps ( allpeps , reverse_seqs ) lookup . index_peps ( reverse_seqs ) lookup . close_connection ( ) | Given a FASTA database proteins are trypsinized and resulting peptides stored in a database or dict for lookups | 248 | 25 |
9,266 | def hashid_arr ( arr , label = 'arr' , hashlen = 16 ) : hashstr = hash_data ( arr ) [ 0 : hashlen ] if isinstance ( arr , ( list , tuple ) ) : shapestr = len ( arr ) else : shapestr = ',' . join ( list ( map ( str , arr . shape ) ) ) hashid = '{}-{}-{}' . format ( label , shapestr , hashstr ) return hashid | newer version of hashstr_arr2 | 107 | 9 |
9,267 | def _update_hasher ( hasher , data ) : if isinstance ( data , ( tuple , list , zip ) ) : needs_iteration = True elif ( util_type . HAVE_NUMPY and isinstance ( data , np . ndarray ) and data . dtype . kind == 'O' ) : # ndarrays of objects cannot be hashed directly. needs_iteration = True else : needs_iteration = False if needs_iteration : # try to nest quickly without recursive calls SEP = b'SEP' iter_prefix = b'ITER' # if isinstance(data, tuple): # iter_prefix = b'TUP' # else: # iter_prefix = b'LIST' iter_ = iter ( data ) hasher . update ( iter_prefix ) try : for item in iter_ : prefix , hashable = _covert_to_hashable ( data ) binary_data = SEP + prefix + hashable # b''.join([SEP, prefix, hashable]) hasher . update ( binary_data ) except TypeError : # need to use recursive calls # Update based on current item _update_hasher ( hasher , item ) for item in iter_ : # Ensure the items have a spacer between them hasher . update ( SEP ) _update_hasher ( hasher , item ) else : prefix , hashable = _covert_to_hashable ( data ) binary_data = prefix + hashable # b''.join([prefix, hashable]) hasher . update ( binary_data ) | This is the clear winner over the generate version . Used by hash_data | 340 | 15 |
9,268 | def combine_hashes ( bytes_list , hasher = None ) : if hasher is None : hasher = hashlib . sha256 ( ) for b in bytes_list : hasher . update ( b ) hasher . update ( SEP_BYTE ) return hasher . digest ( ) | Only works on bytes | 65 | 4 |
9,269 | def hash_data ( data , hashlen = None , alphabet = None ) : if alphabet is None : alphabet = ALPHABET_27 if hashlen is None : hashlen = HASH_LEN2 if isinstance ( data , stringlike ) and len ( data ) == 0 : # Make a special hash for empty data text = ( alphabet [ 0 ] * hashlen ) else : hasher = hashlib . sha512 ( ) _update_hasher ( hasher , data ) # Get a 128 character hex string text = hasher . hexdigest ( ) # Shorten length of string (by increasing base) hashstr2 = convert_hexstr_to_bigbase ( text , alphabet , bigbase = len ( alphabet ) ) # Truncate text = hashstr2 [ : hashlen ] return text | r Get a unique hash depending on the state of the data . | 175 | 13 |
9,270 | def convert_hexstr_to_bigbase ( hexstr , alphabet = ALPHABET , bigbase = BIGBASE ) : x = int ( hexstr , 16 ) # first convert to base 16 if x == 0 : return '0' sign = 1 if x > 0 else - 1 x *= sign digits = [ ] while x : digits . append ( alphabet [ x % bigbase ] ) x //= bigbase if sign < 0 : digits . append ( '-' ) digits . reverse ( ) newbase_str = '' . join ( digits ) return newbase_str | r Packs a long hexstr into a shorter length string with a larger base | 124 | 15 |
9,271 | def get_file_hash ( fpath , blocksize = 65536 , hasher = None , stride = 1 , hexdigest = False ) : if hasher is None : hasher = hashlib . sha1 ( ) with open ( fpath , 'rb' ) as file_ : buf = file_ . read ( blocksize ) while len ( buf ) > 0 : hasher . update ( buf ) if stride > 1 : file_ . seek ( blocksize * ( stride - 1 ) , 1 ) # skip blocks buf = file_ . read ( blocksize ) if hexdigest : return hasher . hexdigest ( ) else : return hasher . digest ( ) | r For better hashes use hasher = hashlib . sha256 and keep stride = 1 | 145 | 19 |
9,272 | def get_file_uuid ( fpath , hasher = None , stride = 1 ) : if hasher is None : hasher = hashlib . sha1 ( ) # 20 bytes of output #hasher = hashlib.sha256() # 32 bytes of output # sha1 produces a 20 byte hash hashbytes_20 = get_file_hash ( fpath , hasher = hasher , stride = stride ) # sha1 produces 20 bytes, but UUID requires 16 bytes hashbytes_16 = hashbytes_20 [ 0 : 16 ] uuid_ = uuid . UUID ( bytes = hashbytes_16 ) return uuid_ | Creates a uuid from the hash of a file | 140 | 11 |
9,273 | def combine_uuids ( uuids , ordered = True , salt = '' ) : if len ( uuids ) == 0 : return get_zero_uuid ( ) elif len ( uuids ) == 1 : return uuids [ 0 ] else : if not ordered : uuids = sorted ( uuids ) sep_str = '-' sep_byte = six . binary_type ( six . b ( sep_str ) ) pref = six . binary_type ( six . b ( '{}{}{}' . format ( salt , sep_str , len ( uuids ) ) ) ) combined_bytes = pref + sep_byte . join ( [ u . bytes for u in uuids ] ) combined_uuid = hashable_to_uuid ( combined_bytes ) return combined_uuid | Creates a uuid that specifies a group of UUIDS | 179 | 13 |
9,274 | def __start_connection ( self , context , node , ccallbacks = None ) : _logger . debug ( "Creating connection object: CONTEXT=[%s] NODE=[%s]" , context , node ) c = nsq . connection . Connection ( context , node , self . __identify , self . __message_handler , self . __quit_ev , ccallbacks , ignore_quit = self . __connection_ignore_quit ) g = gevent . spawn ( c . run ) # Now, wait for the thread to finish the connection. timeout_s = nsq . config . client . NEW_CONNECTION_NEGOTIATE_TIMEOUT_S if c . connected_ev . wait ( timeout_s ) is False : _logger . error ( "New connection to server [%s] timed-out. Cleaning-" "up thread." , node ) g . kill ( ) g . join ( ) # We'll try again on the next audit. raise EnvironmentError ( "Connection to server [%s] failed." % ( node , ) ) self . __connections . append ( ( node , c , g ) ) | Start a new connection and manage it from a new greenlet . | 248 | 13 |
9,275 | def __audit_connections ( self , ccallbacks ) : while self . __quit_ev . is_set ( ) is False : # Remove any connections that are dead. self . __connections = filter ( lambda ( n , c , g ) : not g . ready ( ) , self . __connections ) connected_node_couplets_s = set ( [ ( c . managed_connection . context , node ) for ( node , c , g ) in self . __connections ] ) # Warn if there are any still-active connections that are no longer # being advertised (probably where we were given some lookup servers # that have dropped this particular *nsqd* server). lingering_nodes_s = connected_node_couplets_s - self . __node_couplets_s if lingering_nodes_s : _logger . warning ( "Server(s) are connected but no longer " "advertised: %s" , lingering_nodes_s ) # Connect any servers that don't currently have a connection. unused_nodes_s = self . __node_couplets_s - connected_node_couplets_s for ( context , node ) in unused_nodes_s : _logger . info ( "Trying to connect unconnected server: " "CONTEXT=[%s] NODE=[%s]" , context , node ) self . __start_connection ( context , node , ccallbacks ) else : # Are there both no unused servers and no connected servers? if not connected_node_couplets_s : _logger . error ( "All servers have gone away. Stopping " "client." ) # Clear our list of servers, and squash the "no servers!" # error so that we can shut things down in the right order. try : self . set_servers ( [ ] ) except EnvironmentError : pass self . __quit_ev . set ( ) return interval_s = nsq . config . client . GRANULAR_CONNECTION_AUDIT_SLEEP_STEP_TIME_S audit_wait_s = float ( nsq . config . client . CONNECTION_AUDIT_WAIT_S ) while audit_wait_s > 0 and self . __quit_ev . is_set ( ) is False : gevent . sleep ( interval_s ) audit_wait_s -= interval_s | Monitor state of all connections and utility of all servers . | 519 | 11 |
9,276 | def __join_connections ( self ) : interval_s = nsq . config . client . CONNECTION_CLOSE_AUDIT_WAIT_S graceful_wait_s = nsq . config . client . CONNECTION_QUIT_CLOSE_TIMEOUT_S graceful = False while graceful_wait_s > 0 : if not self . __connections : break connected_list = [ c . is_connected for ( n , c , g ) in self . __connections ] if any ( connected_list ) is False : graceful = True break # We need to give the greenlets periodic control, in order to finish # up. gevent . sleep ( interval_s ) graceful_wait_s -= interval_s if graceful is False : connected_list = [ c for ( n , c , g ) in self . __connections if c . is_connected ] _logger . error ( "We were told to terminate, but not all " "connections were stopped: [%s]" , connected_list ) | Wait for all connections to close . There are no side - effects here . We just want to try and leave - after - everything has closed in general . | 220 | 31 |
9,277 | def __manage_connections ( self , ccallbacks = None ) : _logger . info ( "Running client." ) # Create message-handler. if self . __message_handler_cls is not None : # TODO(dustin): Move this to another thread if we can mix multithreading with coroutines. self . __message_handler = self . __message_handler_cls ( self . __election , ccallbacks ) # Spawn the initial connections to all of the servers. for ( context , node ) in self . __node_couplets_s : self . __start_connection ( context , node , ccallbacks ) # Wait for at least one connection to the server. self . __wait_for_one_server_connection ( ) # Indicate that the client is okay to pass control back to the caller. self . __is_alive = True self . __ready_ev . set ( ) # Loop, and maintain all connections. This exits when the quit event # is set. self . __audit_connections ( ccallbacks ) # Wait for all of the connections to close. They will respond to the # same quit event that terminate the audit loop just above. self . __join_connections ( ) _logger . info ( "Connection management has stopped." ) self . __is_alive = False | This runs as the main connection management greenlet . | 291 | 10 |
9,278 | def set_servers ( self , node_couplets ) : node_couplets_s = set ( node_couplets ) if node_couplets_s != self . __node_couplets_s : _logger . info ( "Servers have changed. NEW: %s REMOVED: %s" , node_couplets_s - self . __node_couplets_s , self . __node_couplets_s - node_couplets_s ) # Since no servers means no connection greenlets, and the discover # greenlet is technically scheduled and not running between # invocations, this should successfully terminate the process. if not node_couplets_s : raise EnvironmentError ( "No servers available." ) self . __node_couplets_s = node_couplets_s | Set the current collection of servers . The entries are 2 - tuples of contexts and nodes . | 185 | 19 |
9,279 | def start ( self , ccallbacks = None ) : self . __manage_g = gevent . spawn ( self . __manage_connections , ccallbacks ) self . __ready_ev . wait ( ) | Establish and maintain connections . | 48 | 6 |
9,280 | def stop ( self ) : _logger . debug ( "Emitting quit signal for connections." ) self . __quit_ev . set ( ) _logger . info ( "Waiting for connection manager to stop." ) self . __manage_g . join ( ) | Stop all of the connections . | 58 | 6 |
9,281 | def run ( file_path , include_dirs = [ ] , dlems = False , nogui = False ) : import argparse args = argparse . Namespace ( ) args . lems_file = file_path args . I = include_dirs args . dlems = dlems args . nogui = nogui main ( args = args ) | Function for running from a script or shell . | 79 | 9 |
9,282 | def connect ( self , nice_quit_ev ) : _logger . debug ( "Connecting to discovered node: [%s]" , self . server_host ) stop_epoch = time . time ( ) + nsq . config . client . MAXIMUM_CONNECT_ATTEMPT_PERIOD_S timeout_s = nsq . config . client . INITIAL_CONNECT_FAIL_WAIT_S backoff_rate = nsq . config . client . CONNECT_FAIL_WAIT_BACKOFF_RATE while stop_epoch >= time . time ( ) and nice_quit_ev . is_set ( ) is False : try : c = self . primitive_connect ( ) except gevent . socket . error : _logger . exception ( "Could not connect to discovered server: " "[%s]" , self . server_host ) else : _logger . info ( "Discovered server-node connected: [%s]" , self . server_host ) return c timeout_s = min ( timeout_s * backoff_rate , nsq . config . client . MAXIMUM_CONNECT_FAIL_WAIT_S ) _logger . info ( "Waiting for (%d) seconds before reconnecting." , timeout_s ) gevent . sleep ( timeout_s ) raise nsq . exceptions . NsqConnectGiveUpError ( "Could not connect to the nsqlookupd server: [%s]" % ( self . server_host , ) ) | Connect the server . We expect this to implement backoff and all connection logistics for servers that were discovered via a lookup node . | 330 | 25 |
9,283 | def connect ( self , nice_quit_ev ) : _logger . debug ( "Connecting to explicit server node: [%s]" , self . server_host ) # According to the docs, a nsqlookupd-discovered server should fall-out # of the lineup immediately if it fails. If it comes back, nsqlookupd # will give it back to us. try : c = self . primitive_connect ( ) except gevent . socket . error : _logger . exception ( "Could not connect to explicit server: [%s]" , self . server_host ) raise nsq . exceptions . NsqConnectGiveUpError ( "Could not connect to the nsqd server: [%s]" % ( self . server_host , ) ) _logger . info ( "Explicit server-node connected: [%s]" , self . server_host ) return c | Connect the server . We expect this to implement connection logistics for servers that were explicitly prescribed to us . | 193 | 20 |
9,284 | def prepare ( self ) : self . target = self . fn self . targetheader = reader . get_tsv_header ( self . target ) self . decoyheader = reader . get_tsv_header ( self . decoyfn ) | No percolator XML for protein tables | 52 | 8 |
9,285 | def obtain_token ( self ) : token_end_points = ( 'token/obtain' , 'obtain-token' , 'obtain_token' ) for end_point in token_end_points : try : return self . auth [ end_point ] . _ ( page_size = None ) [ 'token' ] except BeanBagException as e : if e . response . status_code != 404 : raise raise Exception ( 'Could not obtain token from any known URL.' ) | Try to obtain token from all end - points that were ever used to serve the token . If the request returns 404 NOT FOUND retry with older version of the URL . | 106 | 35 |
9,286 | def results ( self , * args , * * kwargs ) : def worker ( ) : kwargs [ 'page' ] = 1 while True : response = self . client ( * args , * * kwargs ) if isinstance ( response , list ) : yield response break elif _is_page ( response ) : yield response [ 'results' ] if response [ 'next' ] : kwargs [ 'page' ] += 1 else : break else : raise NoResultsError ( response ) return itertools . chain . from_iterable ( worker ( ) ) | Return an iterator with all pages of data . Return NoResultsError with response if there is unexpected data . | 123 | 21 |
9,287 | def get_isoquant_fields ( pqdb = False , poolnames = False ) : # FIXME when is a None database passed? if pqdb is None : return { } try : channels_psms = pqdb . get_isoquant_amountpsms_channels ( ) except OperationalError : # FIXME what does this catch? return { } quantheader , psmsheader = OrderedDict ( ) , OrderedDict ( ) for chan_name , amnt_psms_name in channels_psms : quantheader [ chan_name ] = poolnames if amnt_psms_name : psmsheader [ amnt_psms_name ] = poolnames quantheader . update ( psmsheader ) return quantheader | Returns a headerfield dict for isobaric quant channels . Channels are taken from DB and there isn t a pool - independent version of this yet | 168 | 30 |
9,288 | def watch_for_events ( ) : fd = inotify . init ( ) try : wd = inotify . add_watch ( fd , '/tmp' , inotify . IN_CLOSE_WRITE ) while True : for event in inotify . get_events ( fd ) : print ( "event:" , event . name , event . get_mask_description ( ) ) finally : os . close ( fd ) | Wait for events and print them to stdout . | 97 | 10 |
9,289 | def format_body ( self , description , sys_info = None , traceback = None ) : body = BODY_ITEM_TEMPLATE % { 'name' : 'Description' , 'value' : description } if traceback : traceback = '\n' . join ( traceback . splitlines ( ) [ - NB_LINES_MAX : ] ) body += BODY_ITEM_TEMPLATE % { 'name' : 'Traceback' , 'value' : '```\n%s\n```' % traceback } if sys_info : sys_info = '- %s' % '\n- ' . join ( sys_info . splitlines ( ) ) body += BODY_ITEM_TEMPLATE % { 'name' : 'System information' , 'value' : sys_info } return body | Formats the body using markdown . | 190 | 8 |
9,290 | def list ( ) : for node in env . nodes : print "%s (%s, %s)" % ( node . tags [ "Name" ] , node . ip_address , node . private_ip_address ) | List EC2 name and public and private ip address | 46 | 10 |
9,291 | def quick_search ( self , name , platform = None , sort_by = None , desc = True ) : if platform is None : query_filter = "name:{0}" . format ( name ) else : query_filter = "name:{0},platforms:{1}" . format ( name , platform ) search_params = { "filter" : query_filter } if sort_by is not None : self . _validate_sort_field ( sort_by ) if desc : direction = self . SORT_ORDER_DESCENDING else : direction = self . SORT_ORDER_ASCENDING search_params [ "sort" ] = "{0}:{1}" . format ( sort_by , direction ) response = self . _query ( search_params ) return response | Quick search method that allows you to search for a game using only the title and the platform | 170 | 18 |
9,292 | def send_ping ( self , payload = None ) : yield from asyncio . sleep ( self . _interval ) self . _handler . send_ping ( payload = payload ) self . _start_timer ( payload = payload ) | Sends the ping after the interval specified when initializing | 49 | 11 |
9,293 | def pong_received ( self , payload = None ) : if self . _timer is not None : self . _timer . cancel ( ) self . _failures = 0 asyncio . async ( self . send_ping ( payload = payload ) ) | Called when a pong is received . So the timer is cancelled | 53 | 14 |
9,294 | def is_comparable_type ( var , type_ ) : other_types = COMPARABLE_TYPES . get ( type_ , type_ ) return isinstance ( var , other_types ) | Check to see if var is an instance of known compatible types for type_ | 44 | 15 |
9,295 | def smart_cast ( var , type_ ) : #if isinstance(type_, tuple): # for trytype in type_: # try: # return trytype(var) # except Exception: # pass # raise TypeError('Cant figure out type=%r' % (type_,)) if type_ is None or var is None : return var #if not isinstance(type_, six.string_types): try : if issubclass ( type_ , type ( None ) ) : return var except TypeError : pass if is_str ( var ) : if type_ in VALID_BOOL_TYPES : return bool_from_str ( var ) elif type_ is slice : args = [ None if len ( arg ) == 0 else int ( arg ) for arg in var . split ( ':' ) ] return slice ( * args ) elif type_ is list : # need more intelligent parsing here subvar_list = var . split ( ',' ) return [ smart_cast2 ( subvar ) for subvar in subvar_list ] elif isinstance ( type_ , six . string_types ) : if type_ == 'fuzzy_subset' : return fuzzy_subset ( var ) if type_ == 'eval' : return eval ( var , { } , { } ) #elif type_ == 'fuzzy_int': # return fuzzy_subset(var) else : raise NotImplementedError ( 'Uknown smart type_=%r' % ( type_ , ) ) return type_ ( var ) | casts var to type and tries to be clever when var is a string | 339 | 14 |
9,296 | def fuzzy_subset ( str_ ) : if str_ is None : return str_ if ':' in str_ : return smart_cast ( str_ , slice ) if str_ . startswith ( '[' ) : return smart_cast ( str_ [ 1 : - 1 ] , list ) else : return smart_cast ( str_ , list ) | converts a string into an argument to list_take | 76 | 11 |
9,297 | def fuzzy_int ( str_ ) : try : ret = int ( str_ ) return ret except Exception : # Parse comma separated values as ints if re . match ( r'\d*,\d*,?\d*' , str_ ) : return tuple ( map ( int , str_ . split ( ',' ) ) ) # Parse range values as ints if re . match ( r'\d*:\d*:?\d*' , str_ ) : return tuple ( range ( * map ( int , str_ . split ( ':' ) ) ) ) raise | lets some special strings be interpreted as ints | 125 | 9 |
9,298 | def get_type ( var ) : if HAVE_NUMPY and isinstance ( var , np . ndarray ) : if _WIN32 : # This is a weird system specific error # https://github.com/numpy/numpy/issues/3667 type_ = var . dtype else : type_ = var . dtype . type elif HAVE_PANDAS and isinstance ( var , pd . Index ) : if _WIN32 : type_ = var . dtype else : type_ = var . dtype . type else : type_ = type ( var ) return type_ | Gets types accounting for numpy | 129 | 7 |
9,299 | def get_homogenous_list_type ( list_ ) : # TODO Expand and make work correctly if HAVE_NUMPY and isinstance ( list_ , np . ndarray ) : item = list_ elif isinstance ( list_ , list ) and len ( list_ ) > 0 : item = list_ [ 0 ] else : item = None if item is not None : if is_float ( item ) : type_ = float elif is_int ( item ) : type_ = int elif is_bool ( item ) : type_ = bool elif is_str ( item ) : type_ = str else : type_ = get_type ( item ) else : type_ = None return type_ | Returns the best matching python type even if it is an ndarray assumes all items in the list are of the same type . does not check this | 154 | 30 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.