idx int64 0 252k | question stringlengths 48 5.28k | target stringlengths 5 1.23k |
|---|---|---|
9,300 | def length_hint ( obj , default = 0 ) : try : return len ( obj ) except TypeError : try : get_hint = type ( obj ) . __length_hint__ except AttributeError : return default try : hint = get_hint ( obj ) except TypeError : return default if hint is NotImplemented : return default if not isinstance ( hint , int ) : raise T... | Return an estimate of the number of items in obj . |
9,301 | def add_parser_arguments ( parser , args , group = None , prefix = DATA_PREFIX ) : if group : parser = parser . add_argument_group ( group ) for arg , kwargs in iteritems ( args ) : arg_name = kwargs . pop ( 'arg' , arg . replace ( '_' , '-' ) ) if 'metavar' not in kwargs : kwargs [ 'metavar' ] = arg . upper ( ) if 'de... | Helper method that populates parser arguments . The argument values can be later retrieved with extract_arguments method . |
9,302 | def add_mutually_exclusive_args ( parser , args , required = False , prefix = DATA_PREFIX ) : parser = parser . add_mutually_exclusive_group ( required = required ) for arg , kwargs in iteritems ( args ) : arg_name = kwargs . pop ( 'arg' , arg . replace ( '_' , '-' ) ) if 'metavar' not in kwargs : kwargs [ 'metavar' ] ... | Helper method that populates mutually exclusive arguments . The argument values can be later retrieved with extract_arguments method . |
9,303 | def add_create_update_args ( parser , required_args , optional_args , create = False ) : if create : for key in required_args : required_args [ key ] [ 'required' ] = True add_parser_arguments ( parser , required_args , group = 'required arguments' ) else : optional_args . update ( required_args ) add_parser_arguments ... | Wrapper around add_parser_arguments . |
9,304 | def extract_arguments ( args , prefix = DATA_PREFIX ) : data = { } for key , value in iteritems ( args . __dict__ ) : if key . startswith ( prefix ) and value is not None : parts = key [ len ( prefix ) : ] . split ( '__' ) d = data for p in parts [ : - 1 ] : assert p not in d or isinstance ( d [ p ] , dict ) d = d . se... | Return a dict of arguments created by add_parser_arguments . |
9,305 | def create_searchspace ( lookup , fastafn , proline_cut = False , reverse_seqs = True , do_trypsinize = True ) : allpeps = [ ] for record in SeqIO . parse ( fastafn , 'fasta' ) : if do_trypsinize : pepseqs = trypsinize ( record . seq , proline_cut ) else : pepseqs = [ record . seq ] pepseqs = [ ( str ( pep ) . replace ... | Given a FASTA database proteins are trypsinized and resulting peptides stored in a database or dict for lookups |
9,306 | def hashid_arr ( arr , label = 'arr' , hashlen = 16 ) : hashstr = hash_data ( arr ) [ 0 : hashlen ] if isinstance ( arr , ( list , tuple ) ) : shapestr = len ( arr ) else : shapestr = ',' . join ( list ( map ( str , arr . shape ) ) ) hashid = '{}-{}-{}' . format ( label , shapestr , hashstr ) return hashid | newer version of hashstr_arr2 |
9,307 | def _update_hasher ( hasher , data ) : if isinstance ( data , ( tuple , list , zip ) ) : needs_iteration = True elif ( util_type . HAVE_NUMPY and isinstance ( data , np . ndarray ) and data . dtype . kind == 'O' ) : needs_iteration = True else : needs_iteration = False if needs_iteration : SEP = b'SEP' iter_prefix = b'... | This is the clear winner over the generate version . Used by hash_data |
9,308 | def combine_hashes ( bytes_list , hasher = None ) : if hasher is None : hasher = hashlib . sha256 ( ) for b in bytes_list : hasher . update ( b ) hasher . update ( SEP_BYTE ) return hasher . digest ( ) | Only works on bytes |
9,309 | def hash_data ( data , hashlen = None , alphabet = None ) : r if alphabet is None : alphabet = ALPHABET_27 if hashlen is None : hashlen = HASH_LEN2 if isinstance ( data , stringlike ) and len ( data ) == 0 : text = ( alphabet [ 0 ] * hashlen ) else : hasher = hashlib . sha512 ( ) _update_hasher ( hasher , data ) text =... | r Get a unique hash depending on the state of the data . |
9,310 | def convert_hexstr_to_bigbase ( hexstr , alphabet = ALPHABET , bigbase = BIGBASE ) : r x = int ( hexstr , 16 ) if x == 0 : return '0' sign = 1 if x > 0 else - 1 x *= sign digits = [ ] while x : digits . append ( alphabet [ x % bigbase ] ) x //= bigbase if sign < 0 : digits . append ( '-' ) digits . reverse ( ) newbase_... | r Packs a long hexstr into a shorter length string with a larger base |
9,311 | def get_file_hash ( fpath , blocksize = 65536 , hasher = None , stride = 1 , hexdigest = False ) : r if hasher is None : hasher = hashlib . sha1 ( ) with open ( fpath , 'rb' ) as file_ : buf = file_ . read ( blocksize ) while len ( buf ) > 0 : hasher . update ( buf ) if stride > 1 : file_ . seek ( blocksize * ( stride ... | r For better hashes use hasher = hashlib . sha256 and keep stride = 1 |
9,312 | def get_file_uuid ( fpath , hasher = None , stride = 1 ) : if hasher is None : hasher = hashlib . sha1 ( ) hashbytes_20 = get_file_hash ( fpath , hasher = hasher , stride = stride ) hashbytes_16 = hashbytes_20 [ 0 : 16 ] uuid_ = uuid . UUID ( bytes = hashbytes_16 ) return uuid_ | Creates a uuid from the hash of a file |
9,313 | def combine_uuids ( uuids , ordered = True , salt = '' ) : if len ( uuids ) == 0 : return get_zero_uuid ( ) elif len ( uuids ) == 1 : return uuids [ 0 ] else : if not ordered : uuids = sorted ( uuids ) sep_str = '-' sep_byte = six . binary_type ( six . b ( sep_str ) ) pref = six . binary_type ( six . b ( '{}{}{}' . for... | Creates a uuid that specifies a group of UUIDS |
9,314 | def __start_connection ( self , context , node , ccallbacks = None ) : _logger . debug ( "Creating connection object: CONTEXT=[%s] NODE=[%s]" , context , node ) c = nsq . connection . Connection ( context , node , self . __identify , self . __message_handler , self . __quit_ev , ccallbacks , ignore_quit = self . __conn... | Start a new connection and manage it from a new greenlet . |
9,315 | def __audit_connections ( self , ccallbacks ) : while self . __quit_ev . is_set ( ) is False : self . __connections = filter ( lambda ( n , c , g ) : not g . ready ( ) , self . __connections ) connected_node_couplets_s = set ( [ ( c . managed_connection . context , node ) for ( node , c , g ) in self . __connections ] ... | Monitor state of all connections and utility of all servers . |
9,316 | def __join_connections ( self ) : interval_s = nsq . config . client . CONNECTION_CLOSE_AUDIT_WAIT_S graceful_wait_s = nsq . config . client . CONNECTION_QUIT_CLOSE_TIMEOUT_S graceful = False while graceful_wait_s > 0 : if not self . __connections : break connected_list = [ c . is_connected for ( n , c , g ) in self . ... | Wait for all connections to close . There are no side - effects here . We just want to try and leave - after - everything has closed in general . |
9,317 | def __manage_connections ( self , ccallbacks = None ) : _logger . info ( "Running client." ) if self . __message_handler_cls is not None : self . __message_handler = self . __message_handler_cls ( self . __election , ccallbacks ) for ( context , node ) in self . __node_couplets_s : self . __start_connection ( context ,... | This runs as the main connection management greenlet . |
9,318 | def set_servers ( self , node_couplets ) : node_couplets_s = set ( node_couplets ) if node_couplets_s != self . __node_couplets_s : _logger . info ( "Servers have changed. NEW: %s REMOVED: %s" , node_couplets_s - self . __node_couplets_s , self . __node_couplets_s - node_couplets_s ) if not node_couplets_s : raise Envi... | Set the current collection of servers . The entries are 2 - tuples of contexts and nodes . |
9,319 | def start ( self , ccallbacks = None ) : self . __manage_g = gevent . spawn ( self . __manage_connections , ccallbacks ) self . __ready_ev . wait ( ) | Establish and maintain connections . |
9,320 | def stop ( self ) : _logger . debug ( "Emitting quit signal for connections." ) self . __quit_ev . set ( ) _logger . info ( "Waiting for connection manager to stop." ) self . __manage_g . join ( ) | Stop all of the connections . |
9,321 | def run ( file_path , include_dirs = [ ] , dlems = False , nogui = False ) : import argparse args = argparse . Namespace ( ) args . lems_file = file_path args . I = include_dirs args . dlems = dlems args . nogui = nogui main ( args = args ) | Function for running from a script or shell . |
9,322 | def connect ( self , nice_quit_ev ) : _logger . debug ( "Connecting to discovered node: [%s]" , self . server_host ) stop_epoch = time . time ( ) + nsq . config . client . MAXIMUM_CONNECT_ATTEMPT_PERIOD_S timeout_s = nsq . config . client . INITIAL_CONNECT_FAIL_WAIT_S backoff_rate = nsq . config . client . CONNECT_FAIL... | Connect the server . We expect this to implement backoff and all connection logistics for servers that were discovered via a lookup node . |
9,323 | def connect ( self , nice_quit_ev ) : _logger . debug ( "Connecting to explicit server node: [%s]" , self . server_host ) try : c = self . primitive_connect ( ) except gevent . socket . error : _logger . exception ( "Could not connect to explicit server: [%s]" , self . server_host ) raise nsq . exceptions . NsqConnectG... | Connect the server . We expect this to implement connection logistics for servers that were explicitly prescribed to us . |
9,324 | def prepare ( self ) : self . target = self . fn self . targetheader = reader . get_tsv_header ( self . target ) self . decoyheader = reader . get_tsv_header ( self . decoyfn ) | No percolator XML for protein tables |
9,325 | def obtain_token ( self ) : token_end_points = ( 'token/obtain' , 'obtain-token' , 'obtain_token' ) for end_point in token_end_points : try : return self . auth [ end_point ] . _ ( page_size = None ) [ 'token' ] except BeanBagException as e : if e . response . status_code != 404 : raise raise Exception ( 'Could not obt... | Try to obtain token from all end - points that were ever used to serve the token . If the request returns 404 NOT FOUND retry with older version of the URL . |
9,326 | def results ( self , * args , ** kwargs ) : def worker ( ) : kwargs [ 'page' ] = 1 while True : response = self . client ( * args , ** kwargs ) if isinstance ( response , list ) : yield response break elif _is_page ( response ) : yield response [ 'results' ] if response [ 'next' ] : kwargs [ 'page' ] += 1 else : break ... | Return an iterator with all pages of data . Return NoResultsError with response if there is unexpected data . |
9,327 | def get_isoquant_fields ( pqdb = False , poolnames = False ) : if pqdb is None : return { } try : channels_psms = pqdb . get_isoquant_amountpsms_channels ( ) except OperationalError : return { } quantheader , psmsheader = OrderedDict ( ) , OrderedDict ( ) for chan_name , amnt_psms_name in channels_psms : quantheader [ ... | Returns a headerfield dict for isobaric quant channels . Channels are taken from DB and there isn t a pool - independent version of this yet |
9,328 | def watch_for_events ( ) : fd = inotify . init ( ) try : wd = inotify . add_watch ( fd , '/tmp' , inotify . IN_CLOSE_WRITE ) while True : for event in inotify . get_events ( fd ) : print ( "event:" , event . name , event . get_mask_description ( ) ) finally : os . close ( fd ) | Wait for events and print them to stdout . |
9,329 | def format_body ( self , description , sys_info = None , traceback = None ) : body = BODY_ITEM_TEMPLATE % { 'name' : 'Description' , 'value' : description } if traceback : traceback = '\n' . join ( traceback . splitlines ( ) [ - NB_LINES_MAX : ] ) body += BODY_ITEM_TEMPLATE % { 'name' : 'Traceback' , 'value' : '```\n%s... | Formats the body using markdown . |
9,330 | def list ( ) : "List EC2 name and public and private ip address" for node in env . nodes : print "%s (%s, %s)" % ( node . tags [ "Name" ] , node . ip_address , node . private_ip_address ) | List EC2 name and public and private ip address |
9,331 | def quick_search ( self , name , platform = None , sort_by = None , desc = True ) : if platform is None : query_filter = "name:{0}" . format ( name ) else : query_filter = "name:{0},platforms:{1}" . format ( name , platform ) search_params = { "filter" : query_filter } if sort_by is not None : self . _validate_sort_fie... | Quick search method that allows you to search for a game using only the title and the platform |
9,332 | def send_ping ( self , payload = None ) : yield from asyncio . sleep ( self . _interval ) self . _handler . send_ping ( payload = payload ) self . _start_timer ( payload = payload ) | Sends the ping after the interval specified when initializing |
9,333 | def pong_received ( self , payload = None ) : if self . _timer is not None : self . _timer . cancel ( ) self . _failures = 0 asyncio . async ( self . send_ping ( payload = payload ) ) | Called when a pong is received . So the timer is cancelled |
9,334 | def is_comparable_type ( var , type_ ) : other_types = COMPARABLE_TYPES . get ( type_ , type_ ) return isinstance ( var , other_types ) | Check to see if var is an instance of known compatible types for type_ |
9,335 | def smart_cast ( var , type_ ) : if type_ is None or var is None : return var try : if issubclass ( type_ , type ( None ) ) : return var except TypeError : pass if is_str ( var ) : if type_ in VALID_BOOL_TYPES : return bool_from_str ( var ) elif type_ is slice : args = [ None if len ( arg ) == 0 else int ( arg ) for ar... | casts var to type and tries to be clever when var is a string |
9,336 | def fuzzy_subset ( str_ ) : if str_ is None : return str_ if ':' in str_ : return smart_cast ( str_ , slice ) if str_ . startswith ( '[' ) : return smart_cast ( str_ [ 1 : - 1 ] , list ) else : return smart_cast ( str_ , list ) | converts a string into an argument to list_take |
9,337 | def fuzzy_int ( str_ ) : try : ret = int ( str_ ) return ret except Exception : if re . match ( r'\d*,\d*,?\d*' , str_ ) : return tuple ( map ( int , str_ . split ( ',' ) ) ) if re . match ( r'\d*:\d*:?\d*' , str_ ) : return tuple ( range ( * map ( int , str_ . split ( ':' ) ) ) ) raise | lets some special strings be interpreted as ints |
9,338 | def get_type ( var ) : if HAVE_NUMPY and isinstance ( var , np . ndarray ) : if _WIN32 : type_ = var . dtype else : type_ = var . dtype . type elif HAVE_PANDAS and isinstance ( var , pd . Index ) : if _WIN32 : type_ = var . dtype else : type_ = var . dtype . type else : type_ = type ( var ) return type_ | Gets types accounting for numpy |
9,339 | def get_homogenous_list_type ( list_ ) : if HAVE_NUMPY and isinstance ( list_ , np . ndarray ) : item = list_ elif isinstance ( list_ , list ) and len ( list_ ) > 0 : item = list_ [ 0 ] else : item = None if item is not None : if is_float ( item ) : type_ = float elif is_int ( item ) : type_ = int elif is_bool ( item )... | Returns the best matching python type even if it is an ndarray assumes all items in the list are of the same type . does not check this |
9,340 | def pop ( self ) : if self . stack : val = self . stack [ 0 ] self . stack = self . stack [ 1 : ] return val else : raise StackError ( 'Stack empty' ) | Pops a value off the top of the stack . |
9,341 | def create_spectra_lookup ( lookup , fn_spectra ) : to_store = [ ] mzmlmap = lookup . get_mzmlfile_map ( ) for fn , spectrum in fn_spectra : spec_id = '{}_{}' . format ( mzmlmap [ fn ] , spectrum [ 'scan' ] ) mzml_rt = round ( float ( spectrum [ 'rt' ] ) , 12 ) mzml_iit = round ( float ( spectrum [ 'iit' ] ) , 12 ) mz ... | Stores all spectra rt injection time and scan nr in db |
9,342 | def assert_raises ( ex_type , func , * args , ** kwargs ) : r try : func ( * args , ** kwargs ) except Exception as ex : assert isinstance ( ex , ex_type ) , ( 'Raised %r but type should have been %r' % ( ex , ex_type ) ) return True else : raise AssertionError ( 'No error was raised' ) | r Checks that a function raises an error when given specific arguments . |
9,343 | def command_for_all_connections ( self , cb ) : for connection in self . __master . connections : cb ( connection . command ) | Invoke the callback with a command - object for each connection . |
9,344 | def dump_autogen_code ( fpath , autogen_text , codetype = 'python' , fullprint = None , show_diff = None , dowrite = None ) : import utool as ut if dowrite is None : dowrite = ut . get_argflag ( ( '-w' , '--write' ) ) if show_diff is None : show_diff = ut . get_argflag ( '--diff' ) num_context_lines = ut . get_argval (... | Helper that write a file if - w is given on command line otherwise it just prints it out . It has the opption of comparing a diff to the file . |
9,345 | def autofix_codeblock ( codeblock , max_line_len = 80 , aggressive = False , very_aggressive = False , experimental = False ) : r import autopep8 arglist = [ '--max-line-length' , '80' ] if aggressive : arglist . extend ( [ '-a' ] ) if very_aggressive : arglist . extend ( [ '-a' , '-a' ] ) if experimental : arglist . e... | r Uses autopep8 to format a block of code |
9,346 | def auto_docstr ( modname , funcname , verbose = True , moddir = None , modpath = None , ** kwargs ) : r func , module , error_str = load_func_from_module ( modname , funcname , verbose = verbose , moddir = moddir , modpath = modpath ) if error_str is None : try : docstr = make_default_docstr ( func , ** kwargs ) excep... | r called from vim . Uses strings of filename and modnames to build docstr |
9,347 | def make_args_docstr ( argname_list , argtype_list , argdesc_list , ismethod , va_name = None , kw_name = None , kw_keys = [ ] ) : r import utool as ut if ismethod : argname_list = argname_list [ 1 : ] argtype_list = argtype_list [ 1 : ] argdesc_list = argdesc_list [ 1 : ] argdoc_list = [ arg + ' (%s): %s' % ( _type , ... | r Builds the argument docstring |
9,348 | def make_default_docstr ( func , with_args = True , with_ret = True , with_commandline = True , with_example = True , with_header = False , with_debug = False ) : r import utool as ut funcinfo = ut . util_inspect . infer_function_info ( func ) argname_list = funcinfo . argname_list argtype_list = funcinfo . argtype_lis... | r Tries to make a sensible default docstr so the user can fill things in without typing too much |
9,349 | def remove_codeblock_syntax_sentinals ( code_text ) : r flags = re . MULTILINE | re . DOTALL code_text_ = code_text code_text_ = re . sub ( r'^ *# *REM [^\n]*$\n?' , '' , code_text_ , flags = flags ) code_text_ = re . sub ( r'^ *# STARTBLOCK *$\n' , '' , code_text_ , flags = flags ) code_text_ = re . sub ( r'^ *# ENDBL... | r Removes template comments and vim sentinals |
9,350 | def sort_protein_group ( pgroup , sortfunctions , sortfunc_index ) : pgroup_out = [ ] subgroups = sortfunctions [ sortfunc_index ] ( pgroup ) sortfunc_index += 1 for subgroup in subgroups : if len ( subgroup ) > 1 and sortfunc_index < len ( sortfunctions ) : pgroup_out . extend ( sort_protein_group ( subgroup , sortfun... | Recursive function that sorts protein group by a number of sorting functions . |
9,351 | def sort_amounts ( proteins , sort_index ) : amounts = { } for protein in proteins : amount_x_for_protein = protein [ sort_index ] try : amounts [ amount_x_for_protein ] . append ( protein ) except KeyError : amounts [ amount_x_for_protein ] = [ protein ] return [ v for k , v in sorted ( amounts . items ( ) , reverse =... | Generic function for sorting peptides and psms . Assumes a higher number is better for what is passed at sort_index position in protein . |
9,352 | def free ( self ) : if self . _ptr is None : return Gauged . map_free ( self . ptr ) SparseMap . ALLOCATIONS -= 1 self . _ptr = None | Free the map |
9,353 | def append ( self , position , array ) : if not Gauged . map_append ( self . ptr , position , array . ptr ) : raise MemoryError | Append an array to the end of the map . The position must be greater than any positions in the map |
9,354 | def slice ( self , start = 0 , end = 0 ) : tmp = Gauged . map_new ( ) if tmp is None : raise MemoryError if not Gauged . map_concat ( tmp , self . ptr , start , end , 0 ) : Gauged . map_free ( tmp ) raise MemoryError return SparseMap ( tmp ) | Slice the map from [ start end ) |
9,355 | def concat ( self , operand , start = 0 , end = 0 , offset = 0 ) : if not Gauged . map_concat ( self . ptr , operand . ptr , start , end , offset ) : raise MemoryError | Concat a map . You can also optionally slice the operand map and apply an offset to each position before concatting |
9,356 | def buffer ( self , byte_offset = 0 ) : contents = self . ptr . contents ptr = addressof ( contents . buffer . contents ) + byte_offset length = contents . length * 4 - byte_offset return buffer ( ( c_char * length ) . from_address ( ptr ) . raw ) if length else None | Get a copy of the map buffer |
9,357 | def matches ( target , entry ) : for t , e in itertools . zip_longest ( target , entry ) : if e and t != e : return False return entry [ 0 ] and entry [ 1 ] | Does the target match the whitelist entry? |
9,358 | def check_entry ( * entry ) : whitelist = read_whitelist ( ) if not check_allow_prompt ( entry , whitelist ) : whitelist . append ( entry ) write_whitelist ( whitelist ) | Throws an exception if the entry isn t on the whitelist . |
9,359 | def load_uncached ( location , use_json = None ) : if not whitelist . is_file ( location ) : r = requests . get ( raw . raw ( location ) ) if not r . ok : raise ValueError ( 'Couldn\'t read %s with code %s:\n%s' % ( location , r . status_code , r . text ) ) data = r . text else : try : f = os . path . realpath ( os . p... | Return data at either a file location or at the raw version of a URL or raise an exception . |
9,360 | def find_group_differences ( groups1 , groups2 ) : r import utool as ut item_to_others1 = { item : set ( _group ) - { item } for _group in groups1 for item in _group } item_to_others2 = { item : set ( _group ) - { item } for _group in groups2 for item in _group } flat_items1 = ut . flatten ( groups1 ) flat_items2 = ut ... | r Returns a measure of how disimilar two groupings are |
9,361 | def find_group_consistencies ( groups1 , groups2 ) : r group1_list = { tuple ( sorted ( _group ) ) for _group in groups1 } group2_list = { tuple ( sorted ( _group ) ) for _group in groups2 } common_groups = list ( group1_list . intersection ( group2_list ) ) return common_groups | r Returns a measure of group consistency |
9,362 | def compare_groups ( true_groups , pred_groups ) : r import utool as ut true = { frozenset ( _group ) for _group in true_groups } pred = { frozenset ( _group ) for _group in pred_groups } common = true . intersection ( pred ) true_sets = true . difference ( common ) pred_sets = pred . difference ( common ) pred_conn = ... | r Finds how predictions need to be modified to match the true grouping . |
9,363 | def grouping_delta_stats ( old , new ) : import pandas as pd import utool as ut group_delta = ut . grouping_delta ( old , new ) stats = ut . odict ( ) unchanged = group_delta [ 'unchanged' ] splits = group_delta [ 'splits' ] merges = group_delta [ 'merges' ] hybrid = group_delta [ 'hybrid' ] statsmap = ut . partial ( l... | Returns statistics about grouping changes |
9,364 | def upper_diag_self_prodx ( list_ ) : return [ ( item1 , item2 ) for n1 , item1 in enumerate ( list_ ) for n2 , item2 in enumerate ( list_ ) if n1 < n2 ] | upper diagnoal of cartesian product of self and self . Weird name . fixme |
9,365 | def colwise_diag_idxs ( size , num = 2 ) : r import utool as ut diag_idxs = ut . iprod ( * [ range ( size ) for _ in range ( num ) ] ) upper_diag_idxs = [ tup [ : : - 1 ] for tup in diag_idxs if all ( [ a > b for a , b in ut . itertwo ( tup ) ] ) ] return upper_diag_idxs | r dont trust this implementation or this function name |
9,366 | def product_nonsame ( list1 , list2 ) : for item1 , item2 in itertools . product ( list1 , list2 ) : if item1 != item2 : yield ( item1 , item2 ) | product of list1 and list2 where items are non equal |
9,367 | def greedy_max_inden_setcover ( candidate_sets_dict , items , max_covers = None ) : uncovered_set = set ( items ) rejected_keys = set ( ) accepted_keys = set ( ) covered_items_list = [ ] while True : if max_covers is not None and len ( covered_items_list ) >= max_covers : break maxkey = None maxlen = - 1 for key , cand... | greedy algorithm for maximum independent set cover |
9,368 | def setcover_greedy ( candidate_sets_dict , items = None , set_weights = None , item_values = None , max_weight = None ) : r import utool as ut solution_cover = { } if items is None : items = ut . flatten ( candidate_sets_dict . values ( ) ) if set_weights is None : get_weight = len else : def get_weight ( solution_cov... | r Greedy algorithm for various covering problems . approximation gaurentees depending on specifications like set_weights and item values |
9,369 | def item_hist ( list_ ) : dict_hist = { } for item in list_ : if item not in dict_hist : dict_hist [ item ] = 0 dict_hist [ item ] += 1 return dict_hist | counts the number of times each item appears in the dictionary |
9,370 | def get_nth_prime ( n , max_prime = 4100 , safe = True ) : if n <= 100 : first_100_primes = ( 2 , 3 , 5 , 7 , 11 , 13 , 17 , 19 , 23 , 29 , 31 , 37 , 41 , 43 , 47 , 53 , 59 , 61 , 67 , 71 , 73 , 79 , 83 , 89 , 97 , 101 , 103 , 107 , 109 , 113 , 127 , 131 , 137 , 139 , 149 , 151 , 157 , 163 , 167 , 173 , 179 , 181 , 191... | hacky but still brute force algorithm for finding nth prime for small tests |
9,371 | def knapsack ( items , maxweight , method = 'recursive' ) : r if method == 'recursive' : return knapsack_recursive ( items , maxweight ) elif method == 'iterative' : return knapsack_iterative ( items , maxweight ) elif method == 'ilp' : return knapsack_ilp ( items , maxweight ) else : raise NotImplementedError ( '[util... | r Solve the knapsack problem by finding the most valuable subsequence of items subject that weighs no more than maxweight . |
9,372 | def knapsack_ilp ( items , maxweight , verbose = False ) : import pulp values = [ t [ 0 ] for t in items ] weights = [ t [ 1 ] for t in items ] indices = [ t [ 2 ] for t in items ] prob = pulp . LpProblem ( "Knapsack" , pulp . LpMaximize ) x = pulp . LpVariable . dicts ( name = 'x' , indexs = indices , lowBound = 0 , u... | solves knapsack using an integer linear program |
9,373 | def knapsack_iterative ( items , maxweight ) : weights = [ t [ 1 ] for t in items ] max_exp = max ( [ number_of_decimals ( w_ ) for w_ in weights ] ) coeff = 10 ** max_exp int_maxweight = int ( maxweight * coeff ) int_items = [ ( v , int ( w * coeff ) , idx ) for v , w , idx in items ] return knapsack_iterative_int ( i... | items = int_items maxweight = int_maxweight |
9,374 | def knapsack_iterative_int ( items , maxweight ) : r values = [ t [ 0 ] for t in items ] weights = [ t [ 1 ] for t in items ] maxsize = maxweight + 1 dpmat = defaultdict ( lambda : defaultdict ( lambda : np . inf ) ) kmat = defaultdict ( lambda : defaultdict ( lambda : False ) ) idx_subset = [ ] for w in range ( maxsiz... | r Iterative knapsack method |
9,375 | def knapsack_iterative_numpy ( items , maxweight ) : items = np . array ( items ) weights = items . T [ 1 ] max_exp = max ( [ number_of_decimals ( w_ ) for w_ in weights ] ) coeff = 10 ** max_exp weights = ( weights * coeff ) . astype ( np . int ) values = items . T [ 0 ] MAXWEIGHT = int ( maxweight * coeff ) W_SIZE = ... | Iterative knapsack method |
9,376 | def knapsack_greedy ( items , maxweight ) : r items_subset = [ ] total_weight = 0 total_value = 0 for item in items : value , weight = item [ 0 : 2 ] if total_weight + weight > maxweight : continue else : items_subset . append ( item ) total_weight += weight total_value += value return total_value , items_subset | r non - optimal greedy version of knapsack algorithm does not sort input . Sort the input by largest value first if desired . |
9,377 | def choose ( n , k ) : import scipy . misc return scipy . misc . comb ( n , k , exact = True , repetition = False ) | N choose k |
9,378 | def almost_eq ( arr1 , arr2 , thresh = 1E-11 , ret_error = False ) : error = np . abs ( arr1 - arr2 ) passed = error < thresh if ret_error : return passed , error return passed | checks if floating point number are equal to a threshold |
9,379 | def norm_zero_one ( array , dim = None ) : if not util_type . is_float ( array ) : array = array . astype ( np . float32 ) array_max = array . max ( dim ) array_min = array . min ( dim ) array_exnt = np . subtract ( array_max , array_min ) array_norm = np . divide ( np . subtract ( array , array_min ) , array_exnt ) re... | normalizes a numpy array from 0 to 1 based in its extent |
9,380 | def group_indices ( groupid_list ) : item_list = range ( len ( groupid_list ) ) grouped_dict = util_dict . group_items ( item_list , groupid_list ) keys_ = list ( grouped_dict . keys ( ) ) try : keys = sorted ( keys_ ) except TypeError : keys = util_list . sortedby2 ( keys_ , keys_ ) groupxs = util_dict . dict_take ( g... | groups indicies of each item in groupid_list |
9,381 | def ungroup_gen ( grouped_items , groupxs , fill = None ) : import utool as ut minpergroup = [ min ( xs ) if len ( xs ) else 0 for xs in groupxs ] minval = min ( minpergroup ) if len ( minpergroup ) else 0 flat_groupx = ut . flatten ( groupxs ) sortx = ut . argsort ( flat_groupx ) groupx_sorted = ut . take ( flat_group... | Ungroups items returning a generator . Note that this is much slower than the list version and is not gaurenteed to have better memory usage . |
9,382 | def ungroup_unique ( unique_items , groupxs , maxval = None ) : if maxval is None : maxpergroup = [ max ( xs ) if len ( xs ) else 0 for xs in groupxs ] maxval = max ( maxpergroup ) if len ( maxpergroup ) else 0 ungrouped_items = [ None ] * ( maxval + 1 ) for item , xs in zip ( unique_items , groupxs ) : for x in xs : u... | Ungroups unique items to correspond to original non - unique list |
9,383 | def edit_distance ( string1 , string2 ) : import utool as ut try : import Levenshtein except ImportError as ex : ut . printex ( ex , 'pip install python-Levenshtein' ) raise import utool as ut isiter1 = ut . isiterable ( string1 ) isiter2 = ut . isiterable ( string2 ) strs1 = string1 if isiter1 else [ string1 ] strs2 =... | Edit distance algorithm . String1 and string2 can be either strings or lists of strings |
9,384 | def standardize_boolexpr ( boolexpr_ , parens = False ) : r import utool as ut import re onlyvars = boolexpr_ onlyvars = re . sub ( '\\bnot\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\band\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\bor\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\(' , '' , onlyvars ) onlyvars ... | r Standardizes a boolean expression into an or - ing of and - ed variables |
9,385 | def expensive_task_gen ( num = 8700 ) : r import utool as ut for x in range ( 0 , num ) : with ut . Timer ( verbose = False ) as t : ut . is_prime ( x ) yield t . ellapsed | r Runs a task that takes some time |
9,386 | def factors ( n ) : return set ( reduce ( list . __add__ , ( [ i , n // i ] for i in range ( 1 , int ( n ** 0.5 ) + 1 ) if n % i == 0 ) ) ) | Computes all the integer factors of the number n |
9,387 | def add_protein_data ( proteins , pgdb , headerfields , genecentric = False , pool_to_output = False ) : proteindata = create_featuredata_map ( pgdb , genecentric = genecentric , psm_fill_fun = add_psms_to_proteindata , pgene_fill_fun = add_protgene_to_protdata , count_fun = count_peps_psms , pool_to_output = pool_to_o... | First creates a map with all master proteins with data then outputs protein data dicts for rows of a tsv . If a pool is given then only output for that pool will be shown in the protein table . |
9,388 | def get_protein_data_pgrouped ( proteindata , p_acc , headerfields ) : report = get_protein_data_base ( proteindata , p_acc , headerfields ) return get_cov_protnumbers ( proteindata , p_acc , report ) | Parses protein data for a certain protein into tsv output dictionary |
9,389 | def keys ( self , namespace , prefix = None , limit = None , offset = None ) : params = [ namespace ] query = 'SELECT key FROM gauged_keys WHERE namespace = %s' if prefix is not None : query += ' AND key LIKE %s' params . append ( prefix + '%' ) if limit is not None : query += ' LIMIT %s' params . append ( limit ) if o... | Get keys from a namespace |
9,390 | def get_block ( self , namespace , offset , key ) : cursor = self . cursor cursor . execute ( 'SELECT data, flags FROM gauged_data ' 'WHERE namespace = %s AND "offset" = %s AND key = %s' , ( namespace , offset , key ) ) row = cursor . fetchone ( ) return ( None , None ) if row is None else row | Get the block identified by namespace offset key and value |
9,391 | def block_offset_bounds ( self , namespace ) : cursor = self . cursor cursor . execute ( 'SELECT MIN("offset"), MAX("offset") ' 'FROM gauged_statistics WHERE namespace = %s' , ( namespace , ) ) return cursor . fetchone ( ) | Get the minimum and maximum block offset for the specified namespace |
9,392 | def set_writer_position ( self , name , timestamp ) : execute = self . cursor . execute execute ( 'DELETE FROM gauged_writer_history WHERE id = %s' , ( name , ) ) execute ( 'INSERT INTO gauged_writer_history (id, timestamp) ' 'VALUES (%s, %s)' , ( name , timestamp , ) ) | Insert a timestamp to keep track of the current writer position |
9,393 | def add_cache ( self , namespace , key , query_hash , length , cache ) : start = 0 bulk_insert = self . bulk_insert cache_len = len ( cache ) row = '(%s,%s,%s,%s,%s,%s)' query = 'INSERT INTO gauged_cache ' '(namespace, key, "hash", length, start, value) VALUES ' execute = self . cursor . execute query_hash = self . psy... | Add cached values for the specified date range and query |
9,394 | def get_environment_vars ( filename ) : if sys . platform == "linux" or sys . platform == "linux2" : return { 'LD_PRELOAD' : path . join ( LIBFAKETIME_DIR , "libfaketime.so.1" ) , 'FAKETIME_SKIP_CMDS' : 'nodejs' , 'FAKETIME_TIMESTAMP_FILE' : filename , } elif sys . platform == "darwin" : return { 'DYLD_INSERT_LIBRARIES... | Return a dict of environment variables required to run a service under faketime . |
9,395 | def change_time ( filename , newtime ) : with open ( filename , "w" ) as faketimetxt_handle : faketimetxt_handle . write ( "@" + newtime . strftime ( "%Y-%m-%d %H:%M:%S" ) ) | Change the time of a process or group of processes by writing a new time to the time file . |
9,396 | def filter_unique_peptides ( peptides , score , ns ) : scores = { 'q' : 'q_value' , 'pep' : 'pep' , 'p' : 'p_value' , 'svm' : 'svm_score' } highest = { } for el in peptides : featscore = float ( el . xpath ( 'xmlns:%s' % scores [ score ] , namespaces = ns ) [ 0 ] . text ) seq = reader . get_peptide_seq ( el , ns ) if s... | Filters unique peptides from multiple Percolator output XML files . Takes a dir with a set of XMLs a score to filter on and a namespace . Outputs an ElementTree . |
9,397 | def import_symbol ( name = None , path = None , typename = None , base_path = None ) : _ , symbol = _import ( name or typename , path or base_path ) return symbol | Import a module or a typename within a module from its name . |
9,398 | def add_to_win32_PATH ( script_fpath , * add_path_list ) : r import utool as ut write_dir = dirname ( script_fpath ) key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]' rtype = 'REG_EXPAND_SZ' win_pathlist = list ( os . environ [ 'PATH' ] . split ( os . path . pathsep ) ) new_path_... | r Writes a registery script to update the PATH variable into the sync registry |
9,399 | def dzip ( list1 , list2 ) : r try : len ( list1 ) except TypeError : list1 = list ( list1 ) try : len ( list2 ) except TypeError : list2 = list ( list2 ) if len ( list1 ) == 0 and len ( list2 ) == 1 : list2 = [ ] if len ( list2 ) == 1 and len ( list1 ) > 1 : list2 = list2 * len ( list1 ) if len ( list1 ) != len ( list... | r Zips elementwise pairs between list1 and list2 into a dictionary . Values from list2 can be broadcast onto list1 . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.