query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Return an estimate of the number of items in obj .
def length_hint ( obj , default = 0 ) : try : return len ( obj ) except TypeError : try : get_hint = type ( obj ) . __length_hint__ except AttributeError : return default try : hint = get_hint ( obj ) except TypeError : return default if hint is NotImplemented : return default if not isinstance ( hint , int ) : raise TypeError ( "Length hint must be an integer, not %r" % type ( hint ) ) if hint < 0 : raise ValueError ( "__length_hint__() should return >= 0" ) return hint
9,300
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L3459-L3486
[ "def", "watch", "(", "self", ",", "pipeline", "=", "None", ",", "full_document", "=", "'default'", ",", "resume_after", "=", "None", ",", "max_await_time_ms", "=", "None", ",", "batch_size", "=", "None", ",", "collation", "=", "None", ",", "start_at_operation_time", "=", "None", ",", "session", "=", "None", ")", ":", "return", "DatabaseChangeStream", "(", "self", ",", "pipeline", ",", "full_document", ",", "resume_after", ",", "max_await_time_ms", ",", "batch_size", ",", "collation", ",", "start_at_operation_time", ",", "session", ")" ]
Helper method that populates parser arguments . The argument values can be later retrieved with extract_arguments method .
def add_parser_arguments ( parser , args , group = None , prefix = DATA_PREFIX ) : if group : parser = parser . add_argument_group ( group ) for arg , kwargs in iteritems ( args ) : arg_name = kwargs . pop ( 'arg' , arg . replace ( '_' , '-' ) ) if 'metavar' not in kwargs : kwargs [ 'metavar' ] = arg . upper ( ) if 'dest' in kwargs : kwargs [ 'dest' ] = prefix + kwargs [ 'dest' ] else : kwargs [ 'dest' ] = prefix + arg parser . add_argument ( '--' + arg_name , * * kwargs )
9,301
https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/plugin_helpers.py#L102-L125
[ "def", "load", "(", "self", ")", ":", "try", ":", "if", "os", ".", "path", ".", "getsize", "(", "self", ".", "state_file", ")", "<=", "1", ":", "raise", "IOError", "(", "\"File is empty.\"", ")", "with", "open", "(", "self", ".", "state_file", ")", "as", "fh", ":", "state", "=", "json", ".", "load", "(", "fh", ")", "assert", "isinstance", "(", "state", ",", "dict", ")", "self", ".", "hosts", "=", "state", "[", "'hosts'", "]", "self", ".", "stats", "=", "state", "[", "'stats'", "]", "for", "key", "in", "self", ".", "stats", ":", "self", ".", "stats", "[", "key", "]", "[", "'open_requests'", "]", "=", "0", "except", "(", "IOError", ",", "OSError", ")", ":", "# There is no state file; start empty.", "self", ".", "hosts", "=", "{", "}", "self", ".", "stats", "=", "{", "}" ]
Helper method that populates mutually exclusive arguments . The argument values can be later retrieved with extract_arguments method .
def add_mutually_exclusive_args ( parser , args , required = False , prefix = DATA_PREFIX ) : parser = parser . add_mutually_exclusive_group ( required = required ) for arg , kwargs in iteritems ( args ) : arg_name = kwargs . pop ( 'arg' , arg . replace ( '_' , '-' ) ) if 'metavar' not in kwargs : kwargs [ 'metavar' ] = arg . upper ( ) parser . add_argument ( '--' + arg_name , dest = prefix + arg , * * kwargs )
9,302
https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/plugin_helpers.py#L128-L147
[ "def", "log_likelihood", "(", "C", ",", "T", ")", ":", "C", "=", "C", ".", "tocsr", "(", ")", "T", "=", "T", ".", "tocsr", "(", ")", "ind", "=", "scipy", ".", "nonzero", "(", "C", ")", "relT", "=", "np", ".", "array", "(", "T", "[", "ind", "]", ")", "[", "0", ",", ":", "]", "relT", "=", "np", ".", "log", "(", "relT", ")", "relC", "=", "np", ".", "array", "(", "C", "[", "ind", "]", ")", "[", "0", ",", ":", "]", "return", "relT", ".", "dot", "(", "relC", ")" ]
Wrapper around add_parser_arguments .
def add_create_update_args ( parser , required_args , optional_args , create = False ) : if create : for key in required_args : required_args [ key ] [ 'required' ] = True add_parser_arguments ( parser , required_args , group = 'required arguments' ) else : optional_args . update ( required_args ) add_parser_arguments ( parser , optional_args )
9,303
https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/plugin_helpers.py#L150-L169
[ "def", "to_json", "(", "self", ")", ":", "web_resp", "=", "collections", ".", "OrderedDict", "(", ")", "web_resp", "[", "'status_code'", "]", "=", "self", ".", "status_code", "web_resp", "[", "'status_text'", "]", "=", "dict", "(", "HTTP_CODES", ")", ".", "get", "(", "self", ".", "status_code", ")", "web_resp", "[", "'data'", "]", "=", "self", ".", "data", "if", "self", ".", "data", "is", "not", "None", "else", "{", "}", "web_resp", "[", "'errors'", "]", "=", "self", ".", "errors", "or", "[", "]", "return", "web_resp" ]
Return a dict of arguments created by add_parser_arguments .
def extract_arguments ( args , prefix = DATA_PREFIX ) : data = { } for key , value in iteritems ( args . __dict__ ) : if key . startswith ( prefix ) and value is not None : parts = key [ len ( prefix ) : ] . split ( '__' ) # Think of `d` as a pointer into the resulting nested dictionary. # The `for` loop iterates over all parts of the key except the last # to find the proper dict into which the value should be inserted. # If the subdicts do not exist, they are created. d = data for p in parts [ : - 1 ] : assert p not in d or isinstance ( d [ p ] , dict ) d = d . setdefault ( p , { } ) # At this point `d` points to the correct dict and value can be # inserted. d [ parts [ - 1 ] ] = value if value != '' else None return data
9,304
https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/plugin_helpers.py#L172-L194
[ "def", "ITRF_position_velocity_error", "(", "self", ",", "t", ")", ":", "rTEME", ",", "vTEME", ",", "error", "=", "self", ".", "_position_and_velocity_TEME_km", "(", "t", ")", "rTEME", "/=", "AU_KM", "vTEME", "/=", "AU_KM", "vTEME", "*=", "DAY_S", "rITRF", ",", "vITRF", "=", "TEME_to_ITRF", "(", "t", ".", "ut1", ",", "rTEME", ",", "vTEME", ")", "return", "rITRF", ",", "vITRF", ",", "error" ]
Given a FASTA database proteins are trypsinized and resulting peptides stored in a database or dict for lookups
def create_searchspace ( lookup , fastafn , proline_cut = False , reverse_seqs = True , do_trypsinize = True ) : allpeps = [ ] for record in SeqIO . parse ( fastafn , 'fasta' ) : if do_trypsinize : pepseqs = trypsinize ( record . seq , proline_cut ) else : pepseqs = [ record . seq ] # Exchange all leucines to isoleucines because MS can't differ pepseqs = [ ( str ( pep ) . replace ( 'L' , 'I' ) , ) for pep in pepseqs ] allpeps . extend ( pepseqs ) if len ( allpeps ) > 1000000 : # more than x peps, write to SQLite lookup . write_peps ( allpeps , reverse_seqs ) allpeps = [ ] # write remaining peps to sqlite lookup . write_peps ( allpeps , reverse_seqs ) lookup . index_peps ( reverse_seqs ) lookup . close_connection ( )
9,305
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/searchspace.py#L24-L43
[ "def", "clear_stalled_files", "(", "self", ")", ":", "# FIXME: put lock in directory?", "CLEAR_AFTER", "=", "self", ".", "config", "[", "\"DELETE_STALLED_AFTER\"", "]", "minimum_age", "=", "time", ".", "time", "(", ")", "-", "CLEAR_AFTER", "for", "user_dir", "in", "self", ".", "UPLOAD_DIR", ".", "iterdir", "(", ")", ":", "if", "not", "user_dir", ".", "is_dir", "(", ")", ":", "logger", ".", "error", "(", "\"Found non-directory in upload dir: %r\"", ",", "bytes", "(", "user_dir", ")", ")", "continue", "for", "content", "in", "user_dir", ".", "iterdir", "(", ")", ":", "if", "not", "content", ".", "is_file", "(", ")", ":", "logger", ".", "error", "(", "\"Found non-file in user upload dir: %r\"", ",", "bytes", "(", "content", ")", ")", "continue", "if", "content", ".", "stat", "(", ")", ".", "st_ctime", "<", "minimum_age", ":", "content", ".", "unlink", "(", ")" ]
newer version of hashstr_arr2
def hashid_arr ( arr , label = 'arr' , hashlen = 16 ) : hashstr = hash_data ( arr ) [ 0 : hashlen ] if isinstance ( arr , ( list , tuple ) ) : shapestr = len ( arr ) else : shapestr = ',' . join ( list ( map ( str , arr . shape ) ) ) hashid = '{}-{}-{}' . format ( label , shapestr , hashstr ) return hashid
9,306
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L163-L171
[ "def", "run", "(", "self", ")", ":", "elapsed", "=", "0", "run_time", "=", "self", ".", "config", "[", "'run_time'", "]", "start_time", "=", "time", ".", "time", "(", ")", "t", "=", "time", ".", "time", "self", ".", "turrets_manager", ".", "start", "(", "self", ".", "transaction_context", ")", "self", ".", "started", "=", "True", "while", "elapsed", "<=", "run_time", ":", "try", ":", "self", ".", "_run_loop_action", "(", ")", "self", ".", "_print_status", "(", "elapsed", ")", "elapsed", "=", "t", "(", ")", "-", "start_time", "except", "(", "Exception", ",", "KeyboardInterrupt", ")", ":", "print", "(", "\"\\nStopping test, sending stop command to turrets\"", ")", "self", ".", "turrets_manager", ".", "stop", "(", ")", "self", ".", "stats_handler", ".", "write_remaining", "(", ")", "traceback", ".", "print_exc", "(", ")", "break", "self", ".", "turrets_manager", ".", "stop", "(", ")", "print", "(", "\"\\n\\nProcessing all remaining messages... This could take time depending on message volume\"", ")", "t", "=", "time", ".", "time", "(", ")", "self", ".", "result_collector", ".", "unbind", "(", "self", ".", "result_collector", ".", "LAST_ENDPOINT", ")", "self", ".", "_clean_queue", "(", ")", "print", "(", "\"took %s\"", "%", "(", "time", ".", "time", "(", ")", "-", "t", ")", ")" ]
This is the clear winner over the generate version . Used by hash_data
def _update_hasher ( hasher , data ) : if isinstance ( data , ( tuple , list , zip ) ) : needs_iteration = True elif ( util_type . HAVE_NUMPY and isinstance ( data , np . ndarray ) and data . dtype . kind == 'O' ) : # ndarrays of objects cannot be hashed directly. needs_iteration = True else : needs_iteration = False if needs_iteration : # try to nest quickly without recursive calls SEP = b'SEP' iter_prefix = b'ITER' # if isinstance(data, tuple): # iter_prefix = b'TUP' # else: # iter_prefix = b'LIST' iter_ = iter ( data ) hasher . update ( iter_prefix ) try : for item in iter_ : prefix , hashable = _covert_to_hashable ( data ) binary_data = SEP + prefix + hashable # b''.join([SEP, prefix, hashable]) hasher . update ( binary_data ) except TypeError : # need to use recursive calls # Update based on current item _update_hasher ( hasher , item ) for item in iter_ : # Ensure the items have a spacer between them hasher . update ( SEP ) _update_hasher ( hasher , item ) else : prefix , hashable = _covert_to_hashable ( data ) binary_data = prefix + hashable # b''.join([prefix, hashable]) hasher . update ( binary_data )
9,307
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L241-L358
[ "def", "correspondence", "(", "soup", ")", ":", "correspondence", "=", "[", "]", "author_notes_nodes", "=", "raw_parser", ".", "author_notes", "(", "soup", ")", "if", "author_notes_nodes", ":", "corresp_nodes", "=", "raw_parser", ".", "corresp", "(", "author_notes_nodes", ")", "for", "tag", "in", "corresp_nodes", ":", "correspondence", ".", "append", "(", "tag", ".", "text", ")", "return", "correspondence" ]
Only works on bytes
def combine_hashes ( bytes_list , hasher = None ) : if hasher is None : hasher = hashlib . sha256 ( ) for b in bytes_list : hasher . update ( b ) hasher . update ( SEP_BYTE ) return hasher . digest ( )
9,308
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L410-L434
[ "def", "_psturng", "(", "q", ",", "r", ",", "v", ")", ":", "if", "q", "<", "0.", ":", "raise", "ValueError", "(", "'q should be >= 0'", ")", "opt_func", "=", "lambda", "p", ",", "r", ",", "v", ":", "abs", "(", "_qsturng", "(", "p", ",", "r", ",", "v", ")", "-", "q", ")", "if", "v", "==", "1", ":", "if", "q", "<", "_qsturng", "(", ".9", ",", "r", ",", "1", ")", ":", "return", ".1", "elif", "q", ">", "_qsturng", "(", ".999", ",", "r", ",", "1", ")", ":", "return", ".001", "return", "1.", "-", "fminbound", "(", "opt_func", ",", ".9", ",", ".999", ",", "args", "=", "(", "r", ",", "v", ")", ")", "else", ":", "if", "q", "<", "_qsturng", "(", ".1", ",", "r", ",", "v", ")", ":", "return", ".9", "elif", "q", ">", "_qsturng", "(", ".999", ",", "r", ",", "v", ")", ":", "return", ".001", "return", "1.", "-", "fminbound", "(", "opt_func", ",", ".1", ",", ".999", ",", "args", "=", "(", "r", ",", "v", ")", ")" ]
r Get a unique hash depending on the state of the data .
def hash_data ( data , hashlen = None , alphabet = None ) : if alphabet is None : alphabet = ALPHABET_27 if hashlen is None : hashlen = HASH_LEN2 if isinstance ( data , stringlike ) and len ( data ) == 0 : # Make a special hash for empty data text = ( alphabet [ 0 ] * hashlen ) else : hasher = hashlib . sha512 ( ) _update_hasher ( hasher , data ) # Get a 128 character hex string text = hasher . hexdigest ( ) # Shorten length of string (by increasing base) hashstr2 = convert_hexstr_to_bigbase ( text , alphabet , bigbase = len ( alphabet ) ) # Truncate text = hashstr2 [ : hashlen ] return text
9,309
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L438-L498
[ "def", "cancelRealTimeBars", "(", "self", ",", "bars", ":", "RealTimeBarList", ")", ":", "self", ".", "client", ".", "cancelRealTimeBars", "(", "bars", ".", "reqId", ")", "self", ".", "wrapper", ".", "endSubscription", "(", "bars", ")" ]
r Packs a long hexstr into a shorter length string with a larger base
def convert_hexstr_to_bigbase ( hexstr , alphabet = ALPHABET , bigbase = BIGBASE ) : x = int ( hexstr , 16 ) # first convert to base 16 if x == 0 : return '0' sign = 1 if x > 0 else - 1 x *= sign digits = [ ] while x : digits . append ( alphabet [ x % bigbase ] ) x //= bigbase if sign < 0 : digits . append ( '-' ) digits . reverse ( ) newbase_str = '' . join ( digits ) return newbase_str
9,310
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L766-L806
[ "def", "grading_status_text", "(", "self", ")", ":", "if", "self", ".", "assignment", ".", "is_graded", "(", ")", ":", "if", "self", ".", "is_grading_finished", "(", ")", ":", "return", "str", "(", "'Yes ({0})'", ".", "format", "(", "self", ".", "grading", ")", ")", "else", ":", "return", "str", "(", "'No'", ")", "else", ":", "return", "str", "(", "'Not graded'", ")" ]
r For better hashes use hasher = hashlib . sha256 and keep stride = 1
def get_file_hash ( fpath , blocksize = 65536 , hasher = None , stride = 1 , hexdigest = False ) : if hasher is None : hasher = hashlib . sha1 ( ) with open ( fpath , 'rb' ) as file_ : buf = file_ . read ( blocksize ) while len ( buf ) > 0 : hasher . update ( buf ) if stride > 1 : file_ . seek ( blocksize * ( stride - 1 ) , 1 ) # skip blocks buf = file_ . read ( blocksize ) if hexdigest : return hasher . hexdigest ( ) else : return hasher . digest ( )
9,311
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L829-L897
[ "def", "parse_fields_whois", "(", "self", ",", "response", ")", ":", "try", ":", "temp", "=", "response", ".", "split", "(", "'|'", ")", "# Parse out the ASN information.", "ret", "=", "{", "'asn_registry'", ":", "temp", "[", "4", "]", ".", "strip", "(", "' \\n'", ")", "}", "if", "ret", "[", "'asn_registry'", "]", "not", "in", "self", ".", "rir_whois", ".", "keys", "(", ")", ":", "raise", "ASNRegistryError", "(", "'ASN registry {0} is not known.'", ".", "format", "(", "ret", "[", "'asn_registry'", "]", ")", ")", "ret", "[", "'asn'", "]", "=", "temp", "[", "0", "]", ".", "strip", "(", "' \\n'", ")", "ret", "[", "'asn_cidr'", "]", "=", "temp", "[", "2", "]", ".", "strip", "(", "' \\n'", ")", "ret", "[", "'asn_country_code'", "]", "=", "temp", "[", "3", "]", ".", "strip", "(", "' \\n'", ")", ".", "upper", "(", ")", "ret", "[", "'asn_date'", "]", "=", "temp", "[", "5", "]", ".", "strip", "(", "' \\n'", ")", "ret", "[", "'asn_description'", "]", "=", "temp", "[", "6", "]", ".", "strip", "(", "' \\n'", ")", "except", "ASNRegistryError", ":", "raise", "except", "Exception", "as", "e", ":", "raise", "ASNParseError", "(", "'Parsing failed for \"{0}\" with exception: {1}.'", "''", ".", "format", "(", "response", ",", "e", ")", "[", ":", "100", "]", ")", "return", "ret" ]
Creates a uuid from the hash of a file
def get_file_uuid ( fpath , hasher = None , stride = 1 ) : if hasher is None : hasher = hashlib . sha1 ( ) # 20 bytes of output #hasher = hashlib.sha256() # 32 bytes of output # sha1 produces a 20 byte hash hashbytes_20 = get_file_hash ( fpath , hasher = hasher , stride = stride ) # sha1 produces 20 bytes, but UUID requires 16 bytes hashbytes_16 = hashbytes_20 [ 0 : 16 ] uuid_ = uuid . UUID ( bytes = hashbytes_16 ) return uuid_
9,312
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L968-L979
[ "def", "set_port_info", "(", "self", ",", "webport", ",", "mediaport", ",", "httpsport", ",", "onvifport", ",", "callback", "=", "None", ")", ":", "params", "=", "{", "'webPort'", ":", "webport", ",", "'mediaPort'", ":", "mediaport", ",", "'httpsPort'", ":", "httpsport", ",", "'onvifPort'", ":", "onvifport", ",", "}", "return", "self", ".", "execute_command", "(", "'setPortInfo'", ",", "params", ",", "callback", "=", "callback", ")" ]
Creates a uuid that specifies a group of UUIDS
def combine_uuids ( uuids , ordered = True , salt = '' ) : if len ( uuids ) == 0 : return get_zero_uuid ( ) elif len ( uuids ) == 1 : return uuids [ 0 ] else : if not ordered : uuids = sorted ( uuids ) sep_str = '-' sep_byte = six . binary_type ( six . b ( sep_str ) ) pref = six . binary_type ( six . b ( '{}{}{}' . format ( salt , sep_str , len ( uuids ) ) ) ) combined_bytes = pref + sep_byte . join ( [ u . bytes for u in uuids ] ) combined_uuid = hashable_to_uuid ( combined_bytes ) return combined_uuid
9,313
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L1028-L1087
[ "def", "_print_download_progress_msg", "(", "self", ",", "msg", ",", "flush", "=", "False", ")", ":", "if", "self", ".", "_interactive_mode", "(", ")", ":", "# Print progress message to console overwriting previous progress", "# message.", "self", ".", "_max_prog_str", "=", "max", "(", "self", ".", "_max_prog_str", ",", "len", "(", "msg", ")", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r%-{}s\"", ".", "format", "(", "self", ".", "_max_prog_str", ")", "%", "msg", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "flush", ":", "print", "(", "\"\\n\"", ")", "else", ":", "# Interactive progress tracking is disabled. Print progress to the", "# standard TF log.", "logging", ".", "info", "(", "msg", ")" ]
Start a new connection and manage it from a new greenlet .
def __start_connection ( self , context , node , ccallbacks = None ) : _logger . debug ( "Creating connection object: CONTEXT=[%s] NODE=[%s]" , context , node ) c = nsq . connection . Connection ( context , node , self . __identify , self . __message_handler , self . __quit_ev , ccallbacks , ignore_quit = self . __connection_ignore_quit ) g = gevent . spawn ( c . run ) # Now, wait for the thread to finish the connection. timeout_s = nsq . config . client . NEW_CONNECTION_NEGOTIATE_TIMEOUT_S if c . connected_ev . wait ( timeout_s ) is False : _logger . error ( "New connection to server [%s] timed-out. Cleaning-" "up thread." , node ) g . kill ( ) g . join ( ) # We'll try again on the next audit. raise EnvironmentError ( "Connection to server [%s] failed." % ( node , ) ) self . __connections . append ( ( node , c , g ) )
9,314
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L44-L76
[ "def", "populateFromDirectory", "(", "self", ",", "vcfDirectory", ")", ":", "pattern", "=", "os", ".", "path", ".", "join", "(", "vcfDirectory", ",", "\"*.vcf.gz\"", ")", "dataFiles", "=", "[", "]", "indexFiles", "=", "[", "]", "for", "vcfFile", "in", "glob", ".", "glob", "(", "pattern", ")", ":", "dataFiles", ".", "append", "(", "vcfFile", ")", "indexFiles", ".", "append", "(", "vcfFile", "+", "\".tbi\"", ")", "self", ".", "populateFromFile", "(", "dataFiles", ",", "indexFiles", ")" ]
Monitor state of all connections and utility of all servers .
def __audit_connections ( self , ccallbacks ) : while self . __quit_ev . is_set ( ) is False : # Remove any connections that are dead. self . __connections = filter ( lambda ( n , c , g ) : not g . ready ( ) , self . __connections ) connected_node_couplets_s = set ( [ ( c . managed_connection . context , node ) for ( node , c , g ) in self . __connections ] ) # Warn if there are any still-active connections that are no longer # being advertised (probably where we were given some lookup servers # that have dropped this particular *nsqd* server). lingering_nodes_s = connected_node_couplets_s - self . __node_couplets_s if lingering_nodes_s : _logger . warning ( "Server(s) are connected but no longer " "advertised: %s" , lingering_nodes_s ) # Connect any servers that don't currently have a connection. unused_nodes_s = self . __node_couplets_s - connected_node_couplets_s for ( context , node ) in unused_nodes_s : _logger . info ( "Trying to connect unconnected server: " "CONTEXT=[%s] NODE=[%s]" , context , node ) self . __start_connection ( context , node , ccallbacks ) else : # Are there both no unused servers and no connected servers? if not connected_node_couplets_s : _logger . error ( "All servers have gone away. Stopping " "client." ) # Clear our list of servers, and squash the "no servers!" # error so that we can shut things down in the right order. try : self . set_servers ( [ ] ) except EnvironmentError : pass self . __quit_ev . set ( ) return interval_s = nsq . config . client . GRANULAR_CONNECTION_AUDIT_SLEEP_STEP_TIME_S audit_wait_s = float ( nsq . config . client . CONNECTION_AUDIT_WAIT_S ) while audit_wait_s > 0 and self . __quit_ev . is_set ( ) is False : gevent . sleep ( interval_s ) audit_wait_s -= interval_s
9,315
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L111-L170
[ "def", "addBiosample", "(", "self", ")", ":", "self", ".", "_openRepo", "(", ")", "dataset", "=", "self", ".", "_repo", ".", "getDatasetByName", "(", "self", ".", "_args", ".", "datasetName", ")", "biosample", "=", "bio_metadata", ".", "Biosample", "(", "dataset", ",", "self", ".", "_args", ".", "biosampleName", ")", "biosample", ".", "populateFromJson", "(", "self", ".", "_args", ".", "biosample", ")", "self", ".", "_updateRepo", "(", "self", ".", "_repo", ".", "insertBiosample", ",", "biosample", ")" ]
Wait for all connections to close . There are no side - effects here . We just want to try and leave - after - everything has closed in general .
def __join_connections ( self ) : interval_s = nsq . config . client . CONNECTION_CLOSE_AUDIT_WAIT_S graceful_wait_s = nsq . config . client . CONNECTION_QUIT_CLOSE_TIMEOUT_S graceful = False while graceful_wait_s > 0 : if not self . __connections : break connected_list = [ c . is_connected for ( n , c , g ) in self . __connections ] if any ( connected_list ) is False : graceful = True break # We need to give the greenlets periodic control, in order to finish # up. gevent . sleep ( interval_s ) graceful_wait_s -= interval_s if graceful is False : connected_list = [ c for ( n , c , g ) in self . __connections if c . is_connected ] _logger . error ( "We were told to terminate, but not all " "connections were stopped: [%s]" , connected_list )
9,316
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L172-L200
[ "def", "DbGetDevicePropertyHist", "(", "self", ",", "argin", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"In DbGetDevicePropertyHist()\"", ")", "device_name", "=", "argin", "[", "0", "]", "prop_name", "=", "argin", "[", "1", "]", "return", "self", ".", "db", ".", "get_device_property_hist", "(", "device_name", ",", "prop_name", ")" ]
This runs as the main connection management greenlet .
def __manage_connections ( self , ccallbacks = None ) : _logger . info ( "Running client." ) # Create message-handler. if self . __message_handler_cls is not None : # TODO(dustin): Move this to another thread if we can mix multithreading with coroutines. self . __message_handler = self . __message_handler_cls ( self . __election , ccallbacks ) # Spawn the initial connections to all of the servers. for ( context , node ) in self . __node_couplets_s : self . __start_connection ( context , node , ccallbacks ) # Wait for at least one connection to the server. self . __wait_for_one_server_connection ( ) # Indicate that the client is okay to pass control back to the caller. self . __is_alive = True self . __ready_ev . set ( ) # Loop, and maintain all connections. This exits when the quit event # is set. self . __audit_connections ( ccallbacks ) # Wait for all of the connections to close. They will respond to the # same quit event that terminate the audit loop just above. self . __join_connections ( ) _logger . info ( "Connection management has stopped." ) self . __is_alive = False
9,317
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L202-L237
[ "def", "read_metadata", "(", "self", ",", "f", ",", "objects", ",", "previous_segment", "=", "None", ")", ":", "if", "not", "self", ".", "toc", "[", "\"kTocMetaData\"", "]", ":", "try", ":", "self", ".", "ordered_objects", "=", "previous_segment", ".", "ordered_objects", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"kTocMetaData is not set for segment but \"", "\"there is no previous segment\"", ")", "self", ".", "calculate_chunks", "(", ")", "return", "if", "not", "self", ".", "toc", "[", "\"kTocNewObjList\"", "]", ":", "# In this case, there can be a list of new objects that", "# are appended, or previous objects can also be repeated", "# if their properties change", "self", ".", "ordered_objects", "=", "[", "copy", "(", "o", ")", "for", "o", "in", "previous_segment", ".", "ordered_objects", "]", "log", ".", "debug", "(", "\"Reading metadata at %d\"", ",", "f", ".", "tell", "(", ")", ")", "# First four bytes have number of objects in metadata", "num_objects", "=", "types", ".", "Int32", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "for", "obj", "in", "range", "(", "num_objects", ")", ":", "# Read the object path", "object_path", "=", "types", ".", "String", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "# If this is a new segment for an existing object,", "# reuse the existing object, otherwise,", "# create a new object and add it to the object dictionary", "if", "object_path", "in", "objects", ":", "obj", "=", "objects", "[", "object_path", "]", "else", ":", "obj", "=", "TdmsObject", "(", "object_path", ",", "self", ".", "tdms_file", ")", "objects", "[", "object_path", "]", "=", "obj", "# Add this segment object to the list of segment objects,", "# re-using any properties from previous segments.", "updating_existing", "=", "False", "if", "not", "self", ".", "toc", "[", "\"kTocNewObjList\"", "]", ":", "# Search for the same object from the previous segment", "# object list.", "obj_index", "=", "[", "i", "for", "i", ",", "o", "in", "enumerate", "(", "self", ".", "ordered_objects", ")", "if", "o", ".", "tdms_object", "is", "obj", "]", "if", "len", "(", "obj_index", ")", ">", "0", ":", "updating_existing", "=", "True", "log", ".", "debug", "(", "\"Updating object in segment list\"", ")", "obj_index", "=", "obj_index", "[", "0", "]", "segment_obj", "=", "self", ".", "ordered_objects", "[", "obj_index", "]", "if", "not", "updating_existing", ":", "if", "obj", ".", "_previous_segment_object", "is", "not", "None", ":", "log", ".", "debug", "(", "\"Copying previous segment object\"", ")", "segment_obj", "=", "copy", "(", "obj", ".", "_previous_segment_object", ")", "else", ":", "log", ".", "debug", "(", "\"Creating a new segment object\"", ")", "segment_obj", "=", "_TdmsSegmentObject", "(", "obj", ",", "self", ".", "endianness", ")", "self", ".", "ordered_objects", ".", "append", "(", "segment_obj", ")", "# Read the metadata for this object, updating any", "# data structure information and properties.", "segment_obj", ".", "_read_metadata", "(", "f", ")", "obj", ".", "_previous_segment_object", "=", "segment_obj", "self", ".", "calculate_chunks", "(", ")" ]
Set the current collection of servers . The entries are 2 - tuples of contexts and nodes .
def set_servers ( self , node_couplets ) : node_couplets_s = set ( node_couplets ) if node_couplets_s != self . __node_couplets_s : _logger . info ( "Servers have changed. NEW: %s REMOVED: %s" , node_couplets_s - self . __node_couplets_s , self . __node_couplets_s - node_couplets_s ) # Since no servers means no connection greenlets, and the discover # greenlet is technically scheduled and not running between # invocations, this should successfully terminate the process. if not node_couplets_s : raise EnvironmentError ( "No servers available." ) self . __node_couplets_s = node_couplets_s
9,318
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L239-L257
[ "def", "create", "(", "cls", ",", "destination", ")", ":", "mdb_gz_b64", "=", "\"\"\"\\\n H4sICIenn1gC/25ldzIwMDMubWRiAO2de2wcRx3Hf7O7Pt/d3u6eLyEtVaOaqg+EkjQvuVVDwa9a\n jWXHdZxQQlCJ7fOrfp3OTpqkhVxTItFWIhVQVFBRVNIKRaColVpAUKGKRwwFqUAhKiBIpUaoVWP+\n qKgIIHL8Znb39u72znWJiWP3+9l473fzm/nNY3cdf2fmbBJEPdO9E+nebLq+fWC6vrWZOImen9D7\n 9sR+vPPNE0PZxo/TE5879mj+yNc3/OzAD2bXv3DmV9/o/8PZnxxr+/fDL2w79ulzN7e+/sS/zvzz\n w3+N1z28p3PTfQ3nfn/m2YmeFS2no89uWnvqwO5HUvd/5Phr938tes3j/zm5+qT41J8/P/iZx87/\n +qHrjgyduubG1t/+7eWB2XztTNuT+1clZt9c2/e7HRGizevWEwAAAAAAAACAhUEIwvE+PoRIO8K7\n FzT6obPPwTMBAAAAAAAAAABcfpzPXwya+Ispo1xlEO2KEEX9eaGyWnrqyKQ60tQ0AcNZRcR1RYuy\n +XZCxoqRzmaMI6cKGRJuJVrIEZUOQ9UrHStUYpyzKkdNmSPFDkM6aguhXMdVHCMuHXE2Suu4IFQJ\n l6CErNWUDouDlbdKOZIcrKLD4S5WdNhqIEodqlVaofKgVTHpiBQ6uLG0uaKsuYbf3IS8BmV1qFAm\n j1Z5Hbp06GWDKC+DTS00SRN8DFA/TXNfW6mXX3upj7+mOHWllzLAObN8du0gdSdlKO3ZcWqjMbaH\n uOQqtidViRF+P0HbOH2c3xm0lfMb1EH7uHZ5vp32c+ks+5PqfSeXS9NejjTAvZQpd7J3kuuJFqLE\n qYvuVa3Ocqk7OVXWNMFxZPRVtJ1zSXuCBrlkh+rjEF1Zlt5Dw6qN0xx5Bx3gGgbowVo56EIjkc9T\n xX9Jdd+5PKDOD6q3VQvwv7qiZ8st419cdYHlo6iuriF8X4HA590AsodXhvrsj0yMDPnAuI+ZvOrq\n 1o7K51Hdy7a8cdXNm5AedbfG5W3j3lOybxFZKb6zAgAAAAAAsNzQxAlbvnYJV3VcUU3/S2luBIKF\n ha+IlWp+wxW4IiRXRSXxKeNU1eOxUuUbSOIINbEM7WT506ZE3LASgCOeYJWCMcnCsI/u8eSsFEYR\n lnlbWa6+u0jTYqSkvuQL9G5CLFwTRBMAAAAAAAAAgMtW/79lyVdLKxW7oqDF3bXOniib0UD/m/xq\n loWqvFwt3DX/mrLNALIu3V35NkpK1JDmL+2XOmr9pf1gKiFY4I672wc0mveaf6zaenyKmljPT6t5\n hT7a6y13y0XqjFpwneJjRC0oRwvL3eUL2fHCcuyGIntjhTkDuZCd5Vc5j+HNUMyx+myYcpHW5YG5\n ZijUdbg2VFu4ZzzcHFM3seQLAAAAAAAAAMtc//9S6cm1emX97ytK1v81rHelhtfVfAFnseZXRdV9\n Ad7+dhGS5kbl3eqe/K8pU/nnYwX5X2VeoLbCZwHi7txD6aTELabnoLJ5AfPFC8JmFd3Pun+MlfM4\n q/846/4s62i5+8Dmc7EvSVN0UG2tL00p1uPXqZTt/G5QqX+5lbufz+mSctVzFce6upBrTG3Fd+cn\n pmiYrUyw8+GNfL4hn8/k83qZrVlyGzgPeqbhjcOqx7KMEZRpU/MPQ+rsldEtuYm8vExkznoMS+6b\n KC5TZRt8wVf4xEkFX4V5D/X2vYz1/EcR8yMAAAAAAACAJY0Qf/d3vLPUlb//b4Nzzv6W3Wevtl+1\n vmxts2LWTxOHErcm3jGfMUfNG0yMGQAAAAAAeJ/8rLwAMXIYRgCARFv8IIaYtKpGqCdqlN/2kupD\n /ob67qXhsi0lDh2Vp6728faO9tHuUflfWJ1wE0e6724f35XuG71r16Dr0FwH573by6rKi0N7RveN\n tnd6aTVBWrpjd3fnuJtsBMnDk90ju7zckSA5XGGtdGrK2dWhUnRcMgAAAAAAAAD4v2CIV6vqf82I\n Jusbcwsy7wkWSf/n1JQNq/Oc+uQGq/ecmsphYZ6Tn6XwRLjwxb7mTxDoakLgURUFshwAAAAAAAAA\n ljpCrHZ8W/f2/2NUAAAAAAAAAAAAhXH5RLm4IIbotqot7hbW/0MGWCp46/+pgpHwjZS3IyAlfMPy\n tgakNN+wfcPxNgukdN9I+kadt30gZfhGjW+s8I2V3s6CVNTbWZCK+Eatb3zAN1Z5mw5SMd+I+wZ+\n +QQAAAAAAAAA/K8IcdT27Zqi3/+HkQEAAAAAAAAAsGgkMQQLjSHqbQPDAAAAAAAAAAAALGuw/g8A\n AAAAAAAA4DJUqwsQI7cQDWlcLiMq1/9rcGMBAAAAAAAAAADLGuh/AAAAAAAAAAAA+h8AAAAAAAAA\n AABLHyHusDTPjtLzTtoxnRftUftqe8YatDA+AAAAAAAAAPDeqJN/KVt+et0R9PYnzz7W8PrZRv+V\n HblO6qEDNEXbaYDGqJemaYQmaYJThtnK8Gvzb1opfDRTPZmUlxUY86qgm/ZyFVkOOqCC3kLhoyEI\n qs8raBO10O0q3EYKH+uDcNq8wnVRH93D7evnYZhHG5kkB3a0OYO2ctCWV9ZR+FhT0l2HCzl6xVBz\n XZyPUvi4taTjcwRuVUF7uYW9HMy9MJspfGwMAoo5A+5Qwca8UHN2WogeU/fu0ito1vmjM+M85zzp\n fNG5zxl2djrNzk3O9+0m+yWrx2q0fpH4buJ4Yk3ig4lvmkfxx9gBAAAAAAC4OAylQfJ5h5pfSVCc\n f853gqSmWPSZux6xjUznltH2HT/flNu7++0NZ7/07cg/vnPbVu30y6d/NLvlabPh+j81v/Xc5g9l\n 1h2f+epn9+VPdN90OHHvU50fm94y/ZXvWQ/tP/yJG/NH3llz8A79tlNPG72DHSePHdzz2s3XPzVj\n vzSUvSHjVys1Rv5CSUv8pEvcEqkbV/KX35JaQ+npikmRS9o4rtYIt8RYnJa4Ou6SV6stTm+l7rcX\n q9qSy+23pCVIcgV/SZKuJj5CSRc4Y/PpkiesLJcI53J37NvFuQzv4peGL0/SypP+C+45xVAAMAEA\n \"\"\"", "pristine", "=", "StringIO", "(", ")", "pristine", ".", "write", "(", "base64", ".", "b64decode", "(", "mdb_gz_b64", ")", ")", "pristine", ".", "seek", "(", "0", ")", "pristine", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "pristine", ",", "mode", "=", "'rb'", ")", "with", "open", "(", "destination", ",", "'wb'", ")", "as", "handle", ":", "shutil", ".", "copyfileobj", "(", "pristine", ",", "handle", ")", "return", "cls", "(", "destination", ")" ]
Establish and maintain connections .
def start ( self , ccallbacks = None ) : self . __manage_g = gevent . spawn ( self . __manage_connections , ccallbacks ) self . __ready_ev . wait ( )
9,319
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L268-L272
[ "def", "download_unicodedata", "(", "version", ",", "output", "=", "HOME", ",", "no_zip", "=", "False", ")", ":", "files", "=", "[", "'UnicodeData.txt'", ",", "'Scripts.txt'", ",", "'Blocks.txt'", ",", "'PropList.txt'", ",", "'DerivedCoreProperties.txt'", ",", "'DerivedNormalizationProps.txt'", ",", "'CompositionExclusions.txt'", ",", "'PropertyValueAliases.txt'", ",", "'PropertyAliases.txt'", ",", "'EastAsianWidth.txt'", ",", "'LineBreak.txt'", ",", "'HangulSyllableType.txt'", ",", "'DerivedAge.txt'", ",", "'auxiliary/WordBreakProperty.txt'", ",", "'auxiliary/SentenceBreakProperty.txt'", ",", "'auxiliary/GraphemeBreakProperty.txt'", ",", "'extracted/DerivedDecompositionType.txt'", ",", "'extracted/DerivedNumericType.txt'", ",", "'extracted/DerivedNumericValues.txt'", ",", "'extracted/DerivedJoiningType.txt'", ",", "'extracted/DerivedJoiningGroup.txt'", ",", "'extracted/DerivedCombiningClass.txt'", "]", "files", ".", "append", "(", "'ScriptExtensions.txt'", ")", "if", "PY35", ":", "files", ".", "append", "(", "'IndicPositionalCategory.txt'", ")", "else", ":", "files", ".", "append", "(", "'IndicMatraCategory.txt'", ")", "files", ".", "append", "(", "'IndicSyllabicCategory.txt'", ")", "if", "PY34", ":", "files", ".", "append", "(", "'BidiBrackets.txt'", ")", "if", "PY37", ":", "files", ".", "append", "(", "'VerticalOrientation.txt'", ")", "http_url", "=", "'http://www.unicode.org/Public/%s/ucd/'", "%", "version", "ftp_url", "=", "'ftp://ftp.unicode.org/Public/%s/ucd/'", "%", "version", "destination", "=", "os", ".", "path", ".", "join", "(", "output", ",", "'unicodedata'", ",", "version", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "destination", ")", ":", "os", ".", "makedirs", "(", "destination", ")", "zip_data", "=", "not", "no_zip", "for", "f", "in", "files", ":", "file_location", "=", "os", ".", "path", ".", "join", "(", "destination", ",", "os", ".", "path", ".", "basename", "(", "f", ")", ")", "retrieved", "=", "False", "if", "not", "os", ".", "path", ".", "exists", "(", "file_location", ")", ":", "for", "url", "in", "(", "ftp_url", ",", "http_url", ")", ":", "furl", "=", "url", "+", "f", "try", ":", "print", "(", "'Downloading: %s --> %s'", "%", "(", "furl", ",", "file_location", ")", ")", "response", "=", "urlopen", "(", "furl", ",", "timeout", "=", "30", ")", "data", "=", "response", ".", "read", "(", ")", "except", "Exception", ":", "print", "(", "'Failed: %s'", "%", "url", ")", "continue", "with", "open", "(", "file_location", ",", "'w'", ")", "as", "uf", ":", "uf", ".", "write", "(", "data", ".", "decode", "(", "'utf-8'", ")", ")", "retrieved", "=", "True", "break", "if", "not", "retrieved", ":", "print", "(", "'Failed to acquire all needed Unicode files!'", ")", "break", "else", ":", "retrieved", "=", "True", "print", "(", "'Skipping: found %s'", "%", "file_location", ")", "if", "not", "retrieved", ":", "zip_data", "=", "False", "break", "if", "zip_data", "and", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "output", ",", "'unicodedata'", ",", "'%s.zip'", "%", "version", ")", ")", ":", "zip_unicode", "(", "output", ",", "version", ")" ]
Stop all of the connections .
def stop ( self ) : _logger . debug ( "Emitting quit signal for connections." ) self . __quit_ev . set ( ) _logger . info ( "Waiting for connection manager to stop." ) self . __manage_g . join ( )
9,320
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L274-L281
[ "def", "download_storyitem", "(", "self", ",", "item", ":", "StoryItem", ",", "target", ":", "str", ")", "->", "bool", ":", "date_local", "=", "item", ".", "date_local", "dirname", "=", "_PostPathFormatter", "(", "item", ")", ".", "format", "(", "self", ".", "dirname_pattern", ",", "target", "=", "target", ")", "filename", "=", "dirname", "+", "'/'", "+", "self", ".", "format_filename", "(", "item", ",", "target", "=", "target", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "exist_ok", "=", "True", ")", "downloaded", "=", "False", "if", "not", "item", ".", "is_video", "or", "self", ".", "download_video_thumbnails", "is", "True", ":", "url", "=", "item", ".", "url", "downloaded", "=", "self", ".", "download_pic", "(", "filename", "=", "filename", ",", "url", "=", "url", ",", "mtime", "=", "date_local", ")", "if", "item", ".", "is_video", "and", "self", ".", "download_videos", "is", "True", ":", "downloaded", "|=", "self", ".", "download_pic", "(", "filename", "=", "filename", ",", "url", "=", "item", ".", "video_url", ",", "mtime", "=", "date_local", ")", "# Save caption if desired", "metadata_string", "=", "_ArbitraryItemFormatter", "(", "item", ")", ".", "format", "(", "self", ".", "storyitem_metadata_txt_pattern", ")", ".", "strip", "(", ")", "if", "metadata_string", ":", "self", ".", "save_caption", "(", "filename", "=", "filename", ",", "mtime", "=", "item", ".", "date_local", ",", "caption", "=", "metadata_string", ")", "# Save metadata as JSON if desired.", "if", "self", ".", "save_metadata", "is", "not", "False", ":", "self", ".", "save_metadata_json", "(", "filename", ",", "item", ")", "self", ".", "context", ".", "log", "(", ")", "return", "downloaded" ]
Function for running from a script or shell .
def run ( file_path , include_dirs = [ ] , dlems = False , nogui = False ) : import argparse args = argparse . Namespace ( ) args . lems_file = file_path args . I = include_dirs args . dlems = dlems args . nogui = nogui main ( args = args )
9,321
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/run.py#L44-L54
[ "def", "write_union", "(", "fo", ",", "datum", ",", "schema", ")", ":", "if", "isinstance", "(", "datum", ",", "tuple", ")", ":", "(", "name", ",", "datum", ")", "=", "datum", "for", "index", ",", "candidate", "in", "enumerate", "(", "schema", ")", ":", "if", "extract_record_type", "(", "candidate", ")", "==", "'record'", ":", "schema_name", "=", "candidate", "[", "'name'", "]", "else", ":", "schema_name", "=", "candidate", "if", "name", "==", "schema_name", ":", "break", "else", ":", "msg", "=", "'provided union type name %s not found in schema %s'", "%", "(", "name", ",", "schema", ")", "raise", "ValueError", "(", "msg", ")", "else", ":", "pytype", "=", "type", "(", "datum", ")", "best_match_index", "=", "-", "1", "most_fields", "=", "-", "1", "for", "index", ",", "candidate", "in", "enumerate", "(", "schema", ")", ":", "if", "validate", "(", "datum", ",", "candidate", ",", "raise_errors", "=", "False", ")", ":", "if", "extract_record_type", "(", "candidate", ")", "==", "'record'", ":", "fields", "=", "len", "(", "candidate", "[", "'fields'", "]", ")", "if", "fields", ">", "most_fields", ":", "best_match_index", "=", "index", "most_fields", "=", "fields", "else", ":", "best_match_index", "=", "index", "break", "if", "best_match_index", "<", "0", ":", "msg", "=", "'%r (type %s) do not match %s'", "%", "(", "datum", ",", "pytype", ",", "schema", ")", "raise", "ValueError", "(", "msg", ")", "index", "=", "best_match_index", "# write data", "write_long", "(", "fo", ",", "index", ")", "write_data", "(", "fo", ",", "datum", ",", "schema", "[", "index", "]", ")" ]
Connect the server . We expect this to implement backoff and all connection logistics for servers that were discovered via a lookup node .
def connect ( self , nice_quit_ev ) : _logger . debug ( "Connecting to discovered node: [%s]" , self . server_host ) stop_epoch = time . time ( ) + nsq . config . client . MAXIMUM_CONNECT_ATTEMPT_PERIOD_S timeout_s = nsq . config . client . INITIAL_CONNECT_FAIL_WAIT_S backoff_rate = nsq . config . client . CONNECT_FAIL_WAIT_BACKOFF_RATE while stop_epoch >= time . time ( ) and nice_quit_ev . is_set ( ) is False : try : c = self . primitive_connect ( ) except gevent . socket . error : _logger . exception ( "Could not connect to discovered server: " "[%s]" , self . server_host ) else : _logger . info ( "Discovered server-node connected: [%s]" , self . server_host ) return c timeout_s = min ( timeout_s * backoff_rate , nsq . config . client . MAXIMUM_CONNECT_FAIL_WAIT_S ) _logger . info ( "Waiting for (%d) seconds before reconnecting." , timeout_s ) gevent . sleep ( timeout_s ) raise nsq . exceptions . NsqConnectGiveUpError ( "Could not connect to the nsqlookupd server: [%s]" % ( self . server_host , ) )
9,322
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/node.py#L62-L98
[ "def", "keep_frameshifts", "(", "mut_df", ",", "indel_len_col", "=", "True", ")", ":", "# keep only frameshifts", "mut_df", "=", "mut_df", "[", "is_frameshift_annotation", "(", "mut_df", ")", "]", "if", "indel_len_col", ":", "# calculate length", "mut_df", ".", "loc", "[", ":", ",", "'indel len'", "]", "=", "compute_indel_length", "(", "mut_df", ")", "return", "mut_df" ]
Connect the server . We expect this to implement connection logistics for servers that were explicitly prescribed to us .
def connect ( self , nice_quit_ev ) : _logger . debug ( "Connecting to explicit server node: [%s]" , self . server_host ) # According to the docs, a nsqlookupd-discovered server should fall-out # of the lineup immediately if it fails. If it comes back, nsqlookupd # will give it back to us. try : c = self . primitive_connect ( ) except gevent . socket . error : _logger . exception ( "Could not connect to explicit server: [%s]" , self . server_host ) raise nsq . exceptions . NsqConnectGiveUpError ( "Could not connect to the nsqd server: [%s]" % ( self . server_host , ) ) _logger . info ( "Explicit server-node connected: [%s]" , self . server_host ) return c
9,323
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/node.py#L107-L130
[ "def", "setOverlayAlpha", "(", "self", ",", "ulOverlayHandle", ",", "fAlpha", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayAlpha", "result", "=", "fn", "(", "ulOverlayHandle", ",", "fAlpha", ")", "return", "result" ]
No percolator XML for protein tables
def prepare ( self ) : self . target = self . fn self . targetheader = reader . get_tsv_header ( self . target ) self . decoyheader = reader . get_tsv_header ( self . decoyfn )
9,324
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/prottable/fdr.py#L32-L36
[ "def", "warp", "(", "self", ",", "warp_matrix", ",", "img", ",", "iflag", "=", "cv2", ".", "INTER_NEAREST", ")", ":", "height", ",", "width", "=", "img", ".", "shape", "[", ":", "2", "]", "warped_img", "=", "np", ".", "zeros_like", "(", "img", ",", "dtype", "=", "img", ".", "dtype", ")", "# Check if image to warp is 2D or 3D. If 3D need to loop over channels", "if", "(", "self", ".", "interpolation_type", "==", "InterpolationType", ".", "LINEAR", ")", "or", "img", ".", "ndim", "==", "2", ":", "warped_img", "=", "cv2", ".", "warpAffine", "(", "img", ".", "astype", "(", "np", ".", "float32", ")", ",", "warp_matrix", ",", "(", "width", ",", "height", ")", ",", "flags", "=", "iflag", ")", ".", "astype", "(", "img", ".", "dtype", ")", "elif", "img", ".", "ndim", "==", "3", ":", "for", "idx", "in", "range", "(", "img", ".", "shape", "[", "-", "1", "]", ")", ":", "warped_img", "[", "...", ",", "idx", "]", "=", "cv2", ".", "warpAffine", "(", "img", "[", "...", ",", "idx", "]", ".", "astype", "(", "np", ".", "float32", ")", ",", "warp_matrix", ",", "(", "width", ",", "height", ")", ",", "flags", "=", "iflag", ")", ".", "astype", "(", "img", ".", "dtype", ")", "else", ":", "raise", "ValueError", "(", "'Image has incorrect number of dimensions: {}'", ".", "format", "(", "img", ".", "ndim", ")", ")", "return", "warped_img" ]
Try to obtain token from all end - points that were ever used to serve the token . If the request returns 404 NOT FOUND retry with older version of the URL .
def obtain_token ( self ) : token_end_points = ( 'token/obtain' , 'obtain-token' , 'obtain_token' ) for end_point in token_end_points : try : return self . auth [ end_point ] . _ ( page_size = None ) [ 'token' ] except BeanBagException as e : if e . response . status_code != 404 : raise raise Exception ( 'Could not obtain token from any known URL.' )
9,325
https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/__init__.py#L195-L210
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "logging", ".", "debug", "(", "\"comparing %r and %r...\"", ",", "self", ".", "stripped", ",", "other", ".", "stripped", ")", "ratio", "=", "SequenceMatcher", "(", "a", "=", "self", ".", "stripped", ",", "b", "=", "other", ".", "stripped", ")", ".", "ratio", "(", ")", "similarity", "=", "self", ".", "Similarity", "(", "ratio", ")", "return", "similarity" ]
Return an iterator with all pages of data . Return NoResultsError with response if there is unexpected data .
def results ( self , * args , * * kwargs ) : def worker ( ) : kwargs [ 'page' ] = 1 while True : response = self . client ( * args , * * kwargs ) if isinstance ( response , list ) : yield response break elif _is_page ( response ) : yield response [ 'results' ] if response [ 'next' ] : kwargs [ 'page' ] += 1 else : break else : raise NoResultsError ( response ) return itertools . chain . from_iterable ( worker ( ) )
9,326
https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/__init__.py#L331-L352
[ "def", "compare", "(", "left", ",", "right", ")", ":", "with", "open_zip", "(", "left", ")", "as", "l", ":", "with", "open_zip", "(", "right", ")", "as", "r", ":", "return", "compare_zips", "(", "l", ",", "r", ")" ]
Returns a headerfield dict for isobaric quant channels . Channels are taken from DB and there isn t a pool - independent version of this yet
def get_isoquant_fields ( pqdb = False , poolnames = False ) : # FIXME when is a None database passed? if pqdb is None : return { } try : channels_psms = pqdb . get_isoquant_amountpsms_channels ( ) except OperationalError : # FIXME what does this catch? return { } quantheader , psmsheader = OrderedDict ( ) , OrderedDict ( ) for chan_name , amnt_psms_name in channels_psms : quantheader [ chan_name ] = poolnames if amnt_psms_name : psmsheader [ amnt_psms_name ] = poolnames quantheader . update ( psmsheader ) return quantheader
9,327
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/headers/base.py#L83-L100
[ "def", "Find", "(", "self", ",", "find_specs", "=", "None", ")", ":", "if", "not", "find_specs", ":", "find_specs", "=", "[", "FindSpec", "(", ")", "]", "registry_key", "=", "self", ".", "_win_registry", ".", "GetRootKey", "(", ")", "for", "matching_path", "in", "self", ".", "_FindInKey", "(", "registry_key", ",", "find_specs", ",", "0", ")", ":", "yield", "matching_path" ]
Wait for events and print them to stdout .
def watch_for_events ( ) : fd = inotify . init ( ) try : wd = inotify . add_watch ( fd , '/tmp' , inotify . IN_CLOSE_WRITE ) while True : for event in inotify . get_events ( fd ) : print ( "event:" , event . name , event . get_mask_description ( ) ) finally : os . close ( fd )
9,328
https://github.com/trendels/gevent_inotifyx/blob/b1e531616d150e86b13aeca450a61c66f9bbc855/example.py#L16-L25
[ "def", "load", "(", "self", ",", "container_name", ",", "slot", ",", "label", "=", "None", ",", "share", "=", "False", ")", ":", "if", "share", ":", "raise", "NotImplementedError", "(", "\"Sharing not supported\"", ")", "try", ":", "name", "=", "self", ".", "LW_TRANSLATION", "[", "container_name", "]", "except", "KeyError", ":", "if", "container_name", "in", "self", ".", "LW_NO_EQUIVALENT", ":", "raise", "NotImplementedError", "(", "\"Labware {} is not supported\"", ".", "format", "(", "container_name", ")", ")", "elif", "container_name", "in", "(", "'magdeck'", ",", "'tempdeck'", ")", ":", "raise", "NotImplementedError", "(", "\"Module load not yet implemented\"", ")", "else", ":", "name", "=", "container_name", "return", "self", ".", "_ctx", ".", "load_labware_by_name", "(", "name", ",", "slot", ",", "label", ")" ]
Formats the body using markdown .
def format_body ( self , description , sys_info = None , traceback = None ) : body = BODY_ITEM_TEMPLATE % { 'name' : 'Description' , 'value' : description } if traceback : traceback = '\n' . join ( traceback . splitlines ( ) [ - NB_LINES_MAX : ] ) body += BODY_ITEM_TEMPLATE % { 'name' : 'Traceback' , 'value' : '```\n%s\n```' % traceback } if sys_info : sys_info = '- %s' % '\n- ' . join ( sys_info . splitlines ( ) ) body += BODY_ITEM_TEMPLATE % { 'name' : 'System information' , 'value' : sys_info } return body
9,329
https://github.com/ColinDuquesnoy/QCrash/blob/775e1b15764e2041a8f9a08bea938e4d6ce817c7/qcrash/formatters/markdown.py#L21-L43
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
List EC2 name and public and private ip address
def list ( ) : for node in env . nodes : print "%s (%s, %s)" % ( node . tags [ "Name" ] , node . ip_address , node . private_ip_address )
9,330
https://github.com/garethr/cloth/blob/b50c7cd6b03f49a931ee55ec94212760c50694a9/src/cloth/tasks.py#L40-L44
[ "def", "main", "(", ")", ":", "# Parse the arguments", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Convert MSBuild XML to JSON format'", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--toolchain'", ",", "help", "=", "'The name of the toolchain'", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--output'", ",", "help", "=", "'The output directory'", ",", "default", "=", "''", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--overwrite'", ",", "help", "=", "'Whether previously output should be overwritten'", ",", "dest", "=", "'overwrite'", ",", "action", "=", "'store_true'", ")", "parser", ".", "set_defaults", "(", "overwrite", "=", "False", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--debug'", ",", "help", "=", "\"Debug tool output\"", ",", "action", "=", "\"store_const\"", ",", "dest", "=", "\"loglevel\"", ",", "const", "=", "logging", ".", "DEBUG", ",", "default", "=", "logging", ".", "WARNING", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "help", "=", "\"Verbose output\"", ",", "action", "=", "\"store_const\"", ",", "dest", "=", "\"loglevel\"", ",", "const", "=", "logging", ".", "INFO", ")", "parser", ".", "add_argument", "(", "'input'", ",", "help", "=", "'The input files'", ",", "nargs", "=", "'+'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "toolchain", "=", "args", ".", "toolchain", "logging", ".", "basicConfig", "(", "level", "=", "args", ".", "loglevel", ")", "logging", ".", "info", "(", "'Creating %s toolchain files'", ",", "toolchain", ")", "values", "=", "{", "}", "# Iterate through the inputs", "for", "input", "in", "args", ".", "input", ":", "input", "=", "__get_path", "(", "input", ")", "read_msbuild_xml", "(", "input", ",", "values", ")", "# Determine if the output directory needs to be created", "output_dir", "=", "__get_path", "(", "args", ".", "output", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "mkdir", "(", "output_dir", ")", "logging", ".", "info", "(", "'Created output directory %s'", ",", "output_dir", ")", "for", "key", ",", "value", "in", "values", ".", "items", "(", ")", ":", "output_path", "=", "__output_path", "(", "toolchain", ",", "key", ",", "output_dir", ")", "if", "os", ".", "path", ".", "exists", "(", "output_path", ")", "and", "not", "args", ".", "overwrite", ":", "logging", ".", "info", "(", "'Comparing previous output to current'", ")", "__merge_json_values", "(", "value", ",", "read_msbuild_json", "(", "output_path", ")", ")", "else", ":", "logging", ".", "info", "(", "'Original output will be overwritten'", ")", "logging", ".", "info", "(", "'Writing MS Build JSON file at %s'", ",", "output_path", ")", "__write_json_file", "(", "output_path", ",", "value", ")" ]
Quick search method that allows you to search for a game using only the title and the platform
def quick_search ( self , name , platform = None , sort_by = None , desc = True ) : if platform is None : query_filter = "name:{0}" . format ( name ) else : query_filter = "name:{0},platforms:{1}" . format ( name , platform ) search_params = { "filter" : query_filter } if sort_by is not None : self . _validate_sort_field ( sort_by ) if desc : direction = self . SORT_ORDER_DESCENDING else : direction = self . SORT_ORDER_ASCENDING search_params [ "sort" ] = "{0}:{1}" . format ( sort_by , direction ) response = self . _query ( search_params ) return response
9,331
https://github.com/steveYeah/PyBomb/blob/54045d74e642f8a1c4366c24bd6a330ae3da6257/pybomb/clients/games_client.py#L88-L118
[ "def", "normalize_variables", "(", "cls", ",", "variables", ")", ":", "# if the version is False, empty string, etc, throw it out", "if", "variables", ".", "get", "(", "'version'", ",", "True", ")", "in", "(", "''", ",", "False", ",", "'_NO_VERSION'", ",", "None", ")", ":", "del", "variables", "[", "'version'", "]", "return", "super", "(", "PackageResource", ",", "cls", ")", ".", "normalize_variables", "(", "variables", ")" ]
Sends the ping after the interval specified when initializing
def send_ping ( self , payload = None ) : yield from asyncio . sleep ( self . _interval ) self . _handler . send_ping ( payload = payload ) self . _start_timer ( payload = payload )
9,332
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/pinger.py#L38-L44
[ "def", "linear_rref", "(", "A", ",", "b", ",", "Matrix", "=", "None", ",", "S", "=", "None", ")", ":", "if", "Matrix", "is", "None", ":", "from", "sympy", "import", "Matrix", "if", "S", "is", "None", ":", "from", "sympy", "import", "S", "mat_rows", "=", "[", "_map2l", "(", "S", ",", "list", "(", "row", ")", "+", "[", "v", "]", ")", "for", "row", ",", "v", "in", "zip", "(", "A", ",", "b", ")", "]", "aug", "=", "Matrix", "(", "mat_rows", ")", "raug", ",", "pivot", "=", "aug", ".", "rref", "(", ")", "nindep", "=", "len", "(", "pivot", ")", "return", "raug", "[", ":", "nindep", ",", ":", "-", "1", "]", ",", "raug", "[", ":", "nindep", ",", "-", "1", "]" ]
Called when a pong is received . So the timer is cancelled
def pong_received ( self , payload = None ) : if self . _timer is not None : self . _timer . cancel ( ) self . _failures = 0 asyncio . async ( self . send_ping ( payload = payload ) )
9,333
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/pinger.py#L46-L53
[ "def", "set_key_value", "(", "self", ",", "value", ",", "store_type", "=", "PUBLIC_KEY_STORE_TYPE_BASE64", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "if", "PUBLIC_KEY_STORE_TYPE_HEX", "in", "value", ":", "self", ".", "set_key_value", "(", "value", "[", "PUBLIC_KEY_STORE_TYPE_HEX", "]", ",", "PUBLIC_KEY_STORE_TYPE_HEX", ")", "elif", "PUBLIC_KEY_STORE_TYPE_BASE64", "in", "value", ":", "self", ".", "set_key_value", "(", "value", "[", "PUBLIC_KEY_STORE_TYPE_BASE64", "]", ",", "PUBLIC_KEY_STORE_TYPE_BASE64", ")", "elif", "PUBLIC_KEY_STORE_TYPE_BASE85", "in", "value", ":", "self", ".", "set_key_value", "(", "value", "[", "PUBLIC_KEY_STORE_TYPE_BASE85", "]", ",", "PUBLIC_KEY_STORE_TYPE_BASE85", ")", "elif", "PUBLIC_KEY_STORE_TYPE_JWK", "in", "value", ":", "self", ".", "set_key_value", "(", "value", "[", "PUBLIC_KEY_STORE_TYPE_JWK", "]", ",", "PUBLIC_KEY_STORE_TYPE_JWK", ")", "elif", "PUBLIC_KEY_STORE_TYPE_PEM", "in", "value", ":", "self", ".", "set_key_value", "(", "value", "[", "PUBLIC_KEY_STORE_TYPE_PEM", "]", ",", "PUBLIC_KEY_STORE_TYPE_PEM", ")", "else", ":", "self", ".", "_value", "=", "value", "self", ".", "_store_type", "=", "store_type" ]
Check to see if var is an instance of known compatible types for type_
def is_comparable_type ( var , type_ ) : other_types = COMPARABLE_TYPES . get ( type_ , type_ ) return isinstance ( var , other_types )
9,334
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L104-L133
[ "def", "get", "(", "self", ")", ":", "_", ",", "accounts", "=", "BaseAccount", ".", "search", "(", ")", "if", "ROLE_ADMIN", "not", "in", "session", "[", "'user'", "]", ".", "roles", ":", "accounts", "=", "list", "(", "filter", "(", "lambda", "acct", ":", "acct", ".", "account_id", "in", "session", "[", "'accounts'", "]", ",", "accounts", ")", ")", "if", "accounts", ":", "return", "self", ".", "make_response", "(", "{", "'message'", ":", "None", ",", "'accounts'", ":", "[", "x", ".", "to_json", "(", "is_admin", "=", "ROLE_ADMIN", "in", "session", "[", "'user'", "]", ".", "roles", "or", "False", ")", "for", "x", "in", "accounts", "]", "}", ")", "else", ":", "return", "self", ".", "make_response", "(", "{", "'message'", ":", "'Unable to find any accounts'", ",", "'accounts'", ":", "None", "}", ",", "HTTP", ".", "NOT_FOUND", ")" ]
casts var to type and tries to be clever when var is a string
def smart_cast ( var , type_ ) : #if isinstance(type_, tuple): # for trytype in type_: # try: # return trytype(var) # except Exception: # pass # raise TypeError('Cant figure out type=%r' % (type_,)) if type_ is None or var is None : return var #if not isinstance(type_, six.string_types): try : if issubclass ( type_ , type ( None ) ) : return var except TypeError : pass if is_str ( var ) : if type_ in VALID_BOOL_TYPES : return bool_from_str ( var ) elif type_ is slice : args = [ None if len ( arg ) == 0 else int ( arg ) for arg in var . split ( ':' ) ] return slice ( * args ) elif type_ is list : # need more intelligent parsing here subvar_list = var . split ( ',' ) return [ smart_cast2 ( subvar ) for subvar in subvar_list ] elif isinstance ( type_ , six . string_types ) : if type_ == 'fuzzy_subset' : return fuzzy_subset ( var ) if type_ == 'eval' : return eval ( var , { } , { } ) #elif type_ == 'fuzzy_int': # return fuzzy_subset(var) else : raise NotImplementedError ( 'Uknown smart type_=%r' % ( type_ , ) ) return type_ ( var )
9,335
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L169-L260
[ "def", "increment", "(", "self", ",", "amount", "=", "1", ")", ":", "self", ".", "_primaryProgressBar", ".", "setValue", "(", "self", ".", "value", "(", ")", "+", "amount", ")", "QApplication", ".", "instance", "(", ")", ".", "processEvents", "(", ")" ]
converts a string into an argument to list_take
def fuzzy_subset ( str_ ) : if str_ is None : return str_ if ':' in str_ : return smart_cast ( str_ , slice ) if str_ . startswith ( '[' ) : return smart_cast ( str_ [ 1 : - 1 ] , list ) else : return smart_cast ( str_ , list )
9,336
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L331-L342
[ "def", "equality", "(", "self", ",", "other", ")", ":", "# Compare specified attributes for equality", "cname", "=", "self", ".", "__class__", ".", "__name__", "for", "aname", "in", "self", ".", "attributes", ":", "try", ":", "attr1", "=", "getattr", "(", "self", ",", "aname", ")", "attr2", "=", "getattr", "(", "other", ",", "aname", ")", "except", "AttributeError", "as", "error", ":", "logging", ".", "debug", "(", "\"%s.%s: %s\"", ",", "cname", ",", "aname", ",", "error", ")", "return", "False", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'=='", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ")", "eql", "=", "(", "attr1", "==", "attr2", ")", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'=='", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ",", "result", "=", "eql", ")", "if", "not", "eql", ":", "return", "False", "return", "True" ]
lets some special strings be interpreted as ints
def fuzzy_int ( str_ ) : try : ret = int ( str_ ) return ret except Exception : # Parse comma separated values as ints if re . match ( r'\d*,\d*,?\d*' , str_ ) : return tuple ( map ( int , str_ . split ( ',' ) ) ) # Parse range values as ints if re . match ( r'\d*:\d*:?\d*' , str_ ) : return tuple ( range ( * map ( int , str_ . split ( ':' ) ) ) ) raise
9,337
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L345-L359
[ "def", "build", "(", "self", ",", "message", ")", ":", "context", "=", "None", "if", "message", ".", "message_type", "in", "[", "Types", ".", "CALL_REQ", ",", "Types", ".", "CALL_RES", "]", ":", "self", ".", "verify_message", "(", "message", ")", "context", "=", "self", ".", "build_context", "(", "message", ")", "# streaming message", "if", "message", ".", "flags", "==", "common", ".", "FlagsType", ".", "fragment", ":", "self", ".", "message_buffer", "[", "message", ".", "id", "]", "=", "context", "# find the incompleted stream", "num", "=", "0", "for", "i", ",", "arg", "in", "enumerate", "(", "context", ".", "argstreams", ")", ":", "if", "arg", ".", "state", "!=", "StreamState", ".", "completed", ":", "num", "=", "i", "break", "self", ".", "close_argstream", "(", "context", ",", "num", ")", "return", "context", "elif", "message", ".", "message_type", "in", "[", "Types", ".", "CALL_REQ_CONTINUE", ",", "Types", ".", "CALL_RES_CONTINUE", "]", ":", "context", "=", "self", ".", "message_buffer", ".", "get", "(", "message", ".", "id", ")", "if", "context", "is", "None", ":", "# missing call msg before continue msg", "raise", "FatalProtocolError", "(", "\"missing call message after receiving continue message\"", ",", "message", ".", "id", ",", ")", "# find the incompleted stream", "dst", "=", "0", "for", "i", ",", "arg", "in", "enumerate", "(", "context", ".", "argstreams", ")", ":", "if", "arg", ".", "state", "!=", "StreamState", ".", "completed", ":", "dst", "=", "i", "break", "try", ":", "self", ".", "verify_message", "(", "message", ")", "except", "InvalidChecksumError", "as", "e", ":", "context", ".", "argstreams", "[", "dst", "]", ".", "set_exception", "(", "e", ")", "raise", "src", "=", "0", "while", "src", "<", "len", "(", "message", ".", "args", ")", ":", "context", ".", "argstreams", "[", "dst", "]", ".", "write", "(", "message", ".", "args", "[", "src", "]", ")", "dst", "+=", "1", "src", "+=", "1", "if", "message", ".", "flags", "!=", "FlagsType", ".", "fragment", ":", "# get last fragment. mark it as completed", "assert", "(", "len", "(", "context", ".", "argstreams", ")", "==", "CallContinueMessage", ".", "max_args_num", ")", "self", ".", "message_buffer", ".", "pop", "(", "message", ".", "id", ",", "None", ")", "context", ".", "flags", "=", "FlagsType", ".", "none", "self", ".", "close_argstream", "(", "context", ",", "dst", "-", "1", ")", "return", "None", "elif", "message", ".", "message_type", "==", "Types", ".", "ERROR", ":", "context", "=", "self", ".", "message_buffer", ".", "pop", "(", "message", ".", "id", ",", "None", ")", "if", "context", "is", "None", ":", "log", ".", "info", "(", "'Unconsumed error %s'", ",", "message", ")", "return", "None", "else", ":", "error", "=", "TChannelError", ".", "from_code", "(", "message", ".", "code", ",", "description", "=", "message", ".", "description", ",", "tracing", "=", "context", ".", "tracing", ",", ")", "context", ".", "set_exception", "(", "error", ")", "return", "error", "else", ":", "return", "message" ]
Gets types accounting for numpy
def get_type ( var ) : if HAVE_NUMPY and isinstance ( var , np . ndarray ) : if _WIN32 : # This is a weird system specific error # https://github.com/numpy/numpy/issues/3667 type_ = var . dtype else : type_ = var . dtype . type elif HAVE_PANDAS and isinstance ( var , pd . Index ) : if _WIN32 : type_ = var . dtype else : type_ = var . dtype . type else : type_ = type ( var ) return type_
9,338
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L377-L403
[ "def", "gen_xlsx_category", "(", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "XLSX_FILE", ")", ":", "pass", "else", ":", "return", "# 在分类中排序", "order_index", "=", "1", "all_cate_arr", "=", "[", "]", "for", "sheet_ranges", "in", "load_workbook", "(", "filename", "=", "XLSX_FILE", ")", ":", "kind_sig", "=", "str", "(", "sheet_ranges", "[", "'A1'", "]", ".", "value", ")", ".", "strip", "(", ")", "for", "row_num", "in", "range", "(", "3", ",", "10000", ")", ":", "# 父类", "a_cell_val", "=", "sheet_ranges", "[", "'A{0}'", ".", "format", "(", "row_num", ")", "]", ".", "value", "b_cell_val", "=", "sheet_ranges", "[", "'B{0}'", ".", "format", "(", "row_num", ")", "]", ".", "value", "c_cell_val", "=", "sheet_ranges", "[", "'C{0}'", ".", "format", "(", "row_num", ")", "]", ".", "value", "if", "a_cell_val", "or", "b_cell_val", "or", "c_cell_val", ":", "pass", "else", ":", "break", "if", "a_cell_val", "and", "a_cell_val", "!=", "''", ":", "cell_arr", "=", "a_cell_val", ".", "strip", "(", ")", "p_uid", "=", "cell_arr", "[", "1", ":", "]", "# 所有以 t 开头", "t_slug", "=", "sheet_ranges", "[", "'C{0}'", ".", "format", "(", "row_num", ")", "]", ".", "value", ".", "strip", "(", ")", "t_title", "=", "sheet_ranges", "[", "'D{0}'", ".", "format", "(", "row_num", ")", "]", ".", "value", ".", "strip", "(", ")", "u_uid", "=", "p_uid", "+", "(", "4", "-", "len", "(", "p_uid", ")", ")", "*", "'0'", "pp_uid", "=", "'0000'", "elif", "b_cell_val", "and", "b_cell_val", "!=", "''", ":", "cell_arr", "=", "b_cell_val", "c_iud", "=", "cell_arr", "[", "1", ":", "]", "t_slug", "=", "sheet_ranges", "[", "'C{0}'", ".", "format", "(", "row_num", ")", "]", ".", "value", ".", "strip", "(", ")", "t_title", "=", "sheet_ranges", "[", "'D{0}'", ".", "format", "(", "row_num", ")", "]", ".", "value", ".", "strip", "(", ")", "if", "len", "(", "c_iud", ")", "==", "4", ":", "u_uid", "=", "c_iud", "else", ":", "u_uid", "=", "'{0}{1}'", ".", "format", "(", "p_uid", ",", "c_iud", ")", "pp_uid", "=", "p_uid", "+", "(", "4", "-", "len", "(", "p_uid", ")", ")", "*", "'0'", "else", ":", "continue", "post_data", "=", "{", "'name'", ":", "t_title", ",", "'slug'", ":", "t_slug", ",", "'order'", ":", "order_index", ",", "'uid'", ":", "u_uid", ",", "'pid'", ":", "pp_uid", ",", "'kind'", ":", "kind_sig", ",", "}", "all_cate_arr", ".", "append", "(", "post_data", ")", "MCategory", ".", "add_or_update", "(", "u_uid", ",", "post_data", ")", "order_index", "+=", "1", "return", "all_cate_arr" ]
Returns the best matching python type even if it is an ndarray assumes all items in the list are of the same type . does not check this
def get_homogenous_list_type ( list_ ) : # TODO Expand and make work correctly if HAVE_NUMPY and isinstance ( list_ , np . ndarray ) : item = list_ elif isinstance ( list_ , list ) and len ( list_ ) > 0 : item = list_ [ 0 ] else : item = None if item is not None : if is_float ( item ) : type_ = float elif is_int ( item ) : type_ = int elif is_bool ( item ) : type_ = bool elif is_str ( item ) : type_ = str else : type_ = get_type ( item ) else : type_ = None return type_
9,339
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L528-L553
[ "def", "unpause_topic", "(", "self", ",", "topic", ")", ":", "nsq", ".", "assert_valid_topic_name", "(", "topic", ")", "return", "self", ".", "_request", "(", "'POST'", ",", "'/topic/unpause'", ",", "fields", "=", "{", "'topic'", ":", "topic", "}", ")" ]
Pops a value off the top of the stack .
def pop ( self ) : if self . stack : val = self . stack [ 0 ] self . stack = self . stack [ 1 : ] return val else : raise StackError ( 'Stack empty' )
9,340
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/base/stack.py#L36-L51
[ "def", "user_agent", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "indicator_obj", "=", "UserAgent", "(", "text", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_indicator", "(", "indicator_obj", ")" ]
Stores all spectra rt injection time and scan nr in db
def create_spectra_lookup ( lookup , fn_spectra ) : to_store = [ ] mzmlmap = lookup . get_mzmlfile_map ( ) for fn , spectrum in fn_spectra : spec_id = '{}_{}' . format ( mzmlmap [ fn ] , spectrum [ 'scan' ] ) mzml_rt = round ( float ( spectrum [ 'rt' ] ) , 12 ) mzml_iit = round ( float ( spectrum [ 'iit' ] ) , 12 ) mz = float ( spectrum [ 'mz' ] ) to_store . append ( ( spec_id , mzmlmap [ fn ] , spectrum [ 'scan' ] , spectrum [ 'charge' ] , mz , mzml_rt , mzml_iit ) ) if len ( to_store ) == DB_STORE_CHUNK : lookup . store_mzmls ( to_store ) to_store = [ ] lookup . store_mzmls ( to_store ) lookup . index_mzml ( )
9,341
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/spectra.py#L4-L19
[ "def", "_update_attachments_to_cloud", "(", "self", ")", ":", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'attachments'", ")", ".", "format", "(", "id", "=", "self", ".", "_parent", ".", "object_id", ")", ")", "# ! potentially several api requests can be made by this method.", "for", "attachment", "in", "self", ".", "__attachments", ":", "if", "attachment", ".", "on_cloud", "is", "False", ":", "# upload attachment:", "response", "=", "self", ".", "_parent", ".", "con", ".", "post", "(", "url", ",", "data", "=", "attachment", ".", "to_api_data", "(", ")", ")", "if", "not", "response", ":", "return", "False", "data", "=", "response", ".", "json", "(", ")", "# update attachment data", "attachment", ".", "attachment_id", "=", "data", ".", "get", "(", "'id'", ")", "attachment", ".", "content", "=", "data", ".", "get", "(", "self", ".", "_cc", "(", "'contentBytes'", ")", ",", "None", ")", "attachment", ".", "on_cloud", "=", "True", "for", "attachment", "in", "self", ".", "__removed_attachments", ":", "if", "attachment", ".", "on_cloud", "and", "attachment", ".", "attachment_id", "is", "not", "None", ":", "# delete attachment", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'attachment'", ")", ".", "format", "(", "id", "=", "self", ".", "_parent", ".", "object_id", ",", "ida", "=", "attachment", ".", "attachment_id", ")", ")", "response", "=", "self", ".", "_parent", ".", "con", ".", "delete", "(", "url", ")", "if", "not", "response", ":", "return", "False", "self", ".", "__removed_attachments", "=", "[", "]", "# reset the removed attachments", "log", ".", "debug", "(", "'Successfully updated attachments on {}'", ".", "format", "(", "self", ".", "_parent", ".", "object_id", ")", ")", "return", "True" ]
r Checks that a function raises an error when given specific arguments .
def assert_raises ( ex_type , func , * args , * * kwargs ) : try : func ( * args , * * kwargs ) except Exception as ex : assert isinstance ( ex , ex_type ) , ( 'Raised %r but type should have been %r' % ( ex , ex_type ) ) return True else : raise AssertionError ( 'No error was raised' )
9,342
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_assert.py#L26-L55
[ "def", "prepare_blobs", "(", "self", ")", ":", "self", ".", "raw_header", "=", "self", ".", "extract_header", "(", ")", "if", "self", ".", "cache_enabled", ":", "self", ".", "_cache_offsets", "(", ")" ]
Invoke the callback with a command - object for each connection .
def command_for_all_connections ( self , cb ) : for connection in self . __master . connections : cb ( connection . command )
9,343
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection_election.py#L40-L44
[ "def", "setOverlayTexelAspect", "(", "self", ",", "ulOverlayHandle", ",", "fTexelAspect", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTexelAspect", "result", "=", "fn", "(", "ulOverlayHandle", ",", "fTexelAspect", ")", "return", "result" ]
Helper that write a file if - w is given on command line otherwise it just prints it out . It has the opption of comparing a diff to the file .
def dump_autogen_code ( fpath , autogen_text , codetype = 'python' , fullprint = None , show_diff = None , dowrite = None ) : import utool as ut if dowrite is None : dowrite = ut . get_argflag ( ( '-w' , '--write' ) ) if show_diff is None : show_diff = ut . get_argflag ( '--diff' ) num_context_lines = ut . get_argval ( '--diff' , type_ = int , default = None ) show_diff = show_diff or num_context_lines is not None num_context_lines = ut . get_argval ( '--diff' , type_ = int , default = None ) if fullprint is None : fullprint = True if fullprint is False : fullprint = ut . get_argflag ( '--print' ) print ( '[autogen] Autogenerated %s...\n+---\n' % ( fpath , ) ) if not dowrite : if fullprint : ut . print_code ( autogen_text , lexer_name = codetype ) print ( '\nL___' ) else : print ( 'specify --print to write to stdout' ) pass print ( 'specify -w to write, or --diff to compare' ) print ( '...would write to: %s' % fpath ) if show_diff : if ut . checkpath ( fpath , verbose = True ) : prev_text = ut . read_from ( fpath ) textdiff = ut . get_textdiff ( prev_text , autogen_text , num_context_lines = num_context_lines ) try : ut . print_difftext ( textdiff ) except UnicodeDecodeError : import unicodedata textdiff = unicodedata . normalize ( 'NFKD' , textdiff ) . encode ( 'ascii' , 'ignore' ) ut . print_difftext ( textdiff ) if dowrite : print ( 'WARNING: Not writing. Remove --diff from command line' ) elif dowrite : ut . write_to ( fpath , autogen_text )
9,344
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L22-L69
[ "def", "get_host_health_temperature_sensors", "(", "self", ",", "data", "=", "None", ")", ":", "data", "=", "self", ".", "get_host_health_data", "(", "data", ")", "d", "=", "data", "[", "'GET_EMBEDDED_HEALTH_DATA'", "]", "[", "'TEMPERATURE'", "]", "[", "'TEMP'", "]", "if", "not", "isinstance", "(", "d", ",", "list", ")", ":", "d", "=", "[", "d", "]", "return", "d" ]
r Uses autopep8 to format a block of code
def autofix_codeblock ( codeblock , max_line_len = 80 , aggressive = False , very_aggressive = False , experimental = False ) : # FIXME idk how to remove the blank line following the function with # autopep8. It seems to not be supported by them, but it looks bad. import autopep8 arglist = [ '--max-line-length' , '80' ] if aggressive : arglist . extend ( [ '-a' ] ) if very_aggressive : arglist . extend ( [ '-a' , '-a' ] ) if experimental : arglist . extend ( [ '--experimental' ] ) arglist . extend ( [ '' ] ) autopep8_options = autopep8 . parse_args ( arglist ) fixed_codeblock = autopep8 . fix_code ( codeblock , options = autopep8_options ) return fixed_codeblock
9,345
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L186-L223
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
r called from vim . Uses strings of filename and modnames to build docstr
def auto_docstr ( modname , funcname , verbose = True , moddir = None , modpath = None , * * kwargs ) : #import utool as ut func , module , error_str = load_func_from_module ( modname , funcname , verbose = verbose , moddir = moddir , modpath = modpath ) if error_str is None : try : docstr = make_default_docstr ( func , * * kwargs ) except Exception as ex : import utool as ut error_str = ut . formatex ( ex , 'Caught Error in parsing docstr' , tb = True ) #ut.printex(ex) error_str += ( '\n\nReplicateCommand:\n ' 'python -m utool --tf auto_docstr ' '--modname={modname} --funcname={funcname} --moddir={moddir}' ) . format ( modname = modname , funcname = funcname , moddir = moddir ) error_str += '\n kwargs=' + ut . repr4 ( kwargs ) return error_str else : docstr = error_str return docstr
9,346
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L396-L442
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", "BaseCategoryDetail", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "context", "[", "'category'", "]", "=", "self", ".", "category", "return", "context" ]
r Builds the argument docstring
def make_args_docstr ( argname_list , argtype_list , argdesc_list , ismethod , va_name = None , kw_name = None , kw_keys = [ ] ) : import utool as ut if ismethod : # Remove self from the list argname_list = argname_list [ 1 : ] argtype_list = argtype_list [ 1 : ] argdesc_list = argdesc_list [ 1 : ] argdoc_list = [ arg + ' (%s): %s' % ( _type , desc ) for arg , _type , desc in zip ( argname_list , argtype_list , argdesc_list ) ] # Add in varargs and kwargs # References: # http://www.sphinx-doc.org/en/stable/ext/example_google.html#example-google if va_name is not None : argdoc_list . append ( '*' + va_name + ':' ) if kw_name is not None : import textwrap prefix = '**' + kw_name + ': ' wrapped_lines = textwrap . wrap ( ', ' . join ( kw_keys ) , width = 70 - len ( prefix ) ) sep = '\n' + ( ' ' * len ( prefix ) ) kw_keystr = sep . join ( wrapped_lines ) argdoc_list . append ( ( prefix + kw_keystr ) . strip ( ) ) # align? align_args = False if align_args : argdoc_aligned_list = ut . align_lines ( argdoc_list , character = '(' ) arg_docstr = '\n' . join ( argdoc_aligned_list ) else : arg_docstr = '\n' . join ( argdoc_list ) return arg_docstr
9,347
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L457-L529
[ "def", "swo_read_stimulus", "(", "self", ",", "port", ",", "num_bytes", ")", ":", "if", "port", "<", "0", "or", "port", ">", "31", ":", "raise", "ValueError", "(", "'Invalid port number: %s'", "%", "port", ")", "buf_size", "=", "num_bytes", "buf", "=", "(", "ctypes", ".", "c_uint8", "*", "buf_size", ")", "(", ")", "bytes_read", "=", "self", ".", "_dll", ".", "JLINKARM_SWO_ReadStimulus", "(", "port", ",", "buf", ",", "buf_size", ")", "return", "list", "(", "buf", ")", "[", ":", "bytes_read", "]" ]
r Tries to make a sensible default docstr so the user can fill things in without typing too much
def make_default_docstr ( func , with_args = True , with_ret = True , with_commandline = True , with_example = True , with_header = False , with_debug = False ) : import utool as ut #from utool import util_inspect funcinfo = ut . util_inspect . infer_function_info ( func ) argname_list = funcinfo . argname_list argtype_list = funcinfo . argtype_list argdesc_list = funcinfo . argdesc_list return_header = funcinfo . return_header return_type = funcinfo . return_type return_name = funcinfo . return_name return_desc = funcinfo . return_desc funcname = funcinfo . funcname modname = funcinfo . modname defaults = funcinfo . defaults num_indent = funcinfo . num_indent needs_surround = funcinfo . needs_surround funcname = funcinfo . funcname ismethod = funcinfo . ismethod va_name = funcinfo . va_name kw_name = funcinfo . kw_name kw_keys = funcinfo . kw_keys docstr_parts = [ ] # Header part if with_header : header_block = funcname docstr_parts . append ( header_block ) # Args part if with_args and len ( argname_list ) > 0 : argheader = 'Args' arg_docstr = make_args_docstr ( argname_list , argtype_list , argdesc_list , ismethod , va_name , kw_name , kw_keys ) argsblock = make_docstr_block ( argheader , arg_docstr ) docstr_parts . append ( argsblock ) # if False: # with_kw = with_args # if with_kw and len(kwarg_keys) > 0: # #ut.embed() # import textwrap # kwargs_docstr = ', '.join(kwarg_keys) # kwargs_docstr = '\n'.join(textwrap.wrap(kwargs_docstr)) # kwargsblock = make_docstr_block('Kwargs', kwargs_docstr) # docstr_parts.append(kwargsblock) # Return / Yeild part if with_ret and return_header is not None : if return_header is not None : return_doctr = make_returns_or_yeilds_docstr ( return_type , return_name , return_desc ) returnblock = make_docstr_block ( return_header , return_doctr ) docstr_parts . append ( returnblock ) # Example part # try to generate a simple and unit testable example if with_commandline : cmdlineheader = 'CommandLine' cmdlinecode = make_cmdline_docstr ( funcname , modname ) cmdlineblock = make_docstr_block ( cmdlineheader , cmdlinecode ) docstr_parts . append ( cmdlineblock ) if with_example : exampleheader = 'Example' examplecode = make_example_docstr ( funcname , modname , argname_list , defaults , return_type , return_name , ismethod ) examplecode_ = ut . indent ( examplecode , '>>> ' ) exampleblock = make_docstr_block ( exampleheader , examplecode_ ) docstr_parts . append ( exampleblock ) # DEBUG part (in case something goes wrong) if with_debug : debugheader = 'Debug' debugblock = ut . codeblock ( ''' num_indent = {num_indent} ''' ) . format ( num_indent = num_indent ) debugblock = make_docstr_block ( debugheader , debugblock ) docstr_parts . append ( debugblock ) # Enclosure / Indentation Parts if needs_surround : docstr_parts = [ 'r"""' ] + [ '\n\n' . join ( docstr_parts ) ] + [ '"""' ] default_docstr = '\n' . join ( docstr_parts ) else : default_docstr = '\n\n' . join ( docstr_parts ) docstr_indent = ' ' * ( num_indent + 4 ) default_docstr = ut . indent ( default_docstr , docstr_indent ) return default_docstr
9,348
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L763-L894
[ "def", "subtract", "(", "self", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "ndarray", ")", ":", "if", "val", ".", "shape", "!=", "self", ".", "value_shape", ":", "raise", "Exception", "(", "'Cannot subtract image with dimensions %s '", "'from images with dimension %s'", "%", "(", "str", "(", "val", ".", "shape", ")", ",", "str", "(", "self", ".", "value_shape", ")", ")", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "x", "-", "val", ",", "value_shape", "=", "self", ".", "value_shape", ")" ]
r Removes template comments and vim sentinals
def remove_codeblock_syntax_sentinals ( code_text ) : flags = re . MULTILINE | re . DOTALL code_text_ = code_text code_text_ = re . sub ( r'^ *# *REM [^\n]*$\n?' , '' , code_text_ , flags = flags ) code_text_ = re . sub ( r'^ *# STARTBLOCK *$\n' , '' , code_text_ , flags = flags ) code_text_ = re . sub ( r'^ *# ENDBLOCK *$\n?' , '' , code_text_ , flags = flags ) code_text_ = code_text_ . rstrip ( ) return code_text_
9,349
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L897-L913
[ "def", "decompressBWTPoolProcess", "(", "tup", ")", ":", "(", "inputDir", ",", "outputDir", ",", "startIndex", ",", "endIndex", ")", "=", "tup", "if", "startIndex", "==", "endIndex", ":", "return", "True", "#load the thing we'll be extracting from", "msbwt", "=", "MultiStringBWT", ".", "CompressedMSBWT", "(", ")", "msbwt", ".", "loadMsbwt", "(", "inputDir", ",", "None", ")", "#open our output", "outputBwt", "=", "np", ".", "load", "(", "outputDir", "+", "'/msbwt.npy'", ",", "'r+'", ")", "outputBwt", "[", "startIndex", ":", "endIndex", "]", "=", "msbwt", ".", "getBWTRange", "(", "startIndex", ",", "endIndex", ")", "return", "True" ]
Recursive function that sorts protein group by a number of sorting functions .
def sort_protein_group ( pgroup , sortfunctions , sortfunc_index ) : pgroup_out = [ ] subgroups = sortfunctions [ sortfunc_index ] ( pgroup ) sortfunc_index += 1 for subgroup in subgroups : if len ( subgroup ) > 1 and sortfunc_index < len ( sortfunctions ) : pgroup_out . extend ( sort_protein_group ( subgroup , sortfunctions , sortfunc_index ) ) else : pgroup_out . extend ( subgroup ) return pgroup_out
9,350
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/proteingroup_sorters.py#L35-L48
[ "def", "bind_texture", "(", "texture", ")", ":", "if", "not", "getattr", "(", "texture", ",", "'image'", ",", "None", ")", ":", "texture", ".", "image", "=", "load_image", "(", "texture", ".", "path", ")", "glEnable", "(", "texture", ".", "image", ".", "target", ")", "glBindTexture", "(", "texture", ".", "image", ".", "target", ",", "texture", ".", "image", ".", "id", ")", "gl", ".", "glTexParameterf", "(", "texture", ".", "image", ".", "target", ",", "gl", ".", "GL_TEXTURE_WRAP_S", ",", "gl", ".", "GL_CLAMP_TO_EDGE", ")", "gl", ".", "glTexParameterf", "(", "texture", ".", "image", ".", "target", ",", "gl", ".", "GL_TEXTURE_WRAP_T", ",", "gl", ".", "GL_CLAMP_TO_EDGE", ")" ]
Generic function for sorting peptides and psms . Assumes a higher number is better for what is passed at sort_index position in protein .
def sort_amounts ( proteins , sort_index ) : amounts = { } for protein in proteins : amount_x_for_protein = protein [ sort_index ] try : amounts [ amount_x_for_protein ] . append ( protein ) except KeyError : amounts [ amount_x_for_protein ] = [ protein ] return [ v for k , v in sorted ( amounts . items ( ) , reverse = True ) ]
9,351
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/proteingroup_sorters.py#L51-L61
[ "def", "calc_regenerated", "(", "self", ",", "lastvotetime", ")", ":", "delta", "=", "datetime", ".", "utcnow", "(", ")", "-", "datetime", ".", "strptime", "(", "lastvotetime", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "td", "=", "delta", ".", "days", "ts", "=", "delta", ".", "seconds", "tt", "=", "(", "td", "*", "86400", ")", "+", "ts", "return", "tt", "*", "10000", "/", "86400", "/", "5" ]
Free the map
def free ( self ) : if self . _ptr is None : return Gauged . map_free ( self . ptr ) SparseMap . ALLOCATIONS -= 1 self . _ptr = None
9,352
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L65-L71
[ "def", "indication", "(", "self", ",", "apdu", ")", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\"indication %r\"", ",", "apdu", ")", "if", "self", ".", "state", "==", "IDLE", ":", "self", ".", "idle", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_REQUEST", ":", "self", ".", "segmented_request", "(", "apdu", ")", "elif", "self", ".", "state", "==", "AWAIT_RESPONSE", ":", "self", ".", "await_response", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_RESPONSE", ":", "self", ".", "segmented_response", "(", "apdu", ")", "else", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\" - invalid state\"", ")" ]
Append an array to the end of the map . The position must be greater than any positions in the map
def append ( self , position , array ) : if not Gauged . map_append ( self . ptr , position , array . ptr ) : raise MemoryError
9,353
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L73-L77
[ "def", "set_USRdict", "(", "self", ",", "USRdict", "=", "{", "}", ")", ":", "self", ".", "_check_inputs", "(", "USRdict", "=", "USRdict", ")", "self", ".", "_USRdict", "=", "USRdict" ]
Slice the map from [ start end )
def slice ( self , start = 0 , end = 0 ) : tmp = Gauged . map_new ( ) if tmp is None : raise MemoryError if not Gauged . map_concat ( tmp , self . ptr , start , end , 0 ) : Gauged . map_free ( tmp ) # pragma: no cover raise MemoryError return SparseMap ( tmp )
9,354
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L79-L87
[ "def", "isrchi", "(", "value", ",", "ndim", ",", "array", ")", ":", "value", "=", "ctypes", ".", "c_int", "(", "value", ")", "ndim", "=", "ctypes", ".", "c_int", "(", "ndim", ")", "array", "=", "stypes", ".", "toIntVector", "(", "array", ")", "return", "libspice", ".", "isrchi_c", "(", "value", ",", "ndim", ",", "array", ")" ]
Concat a map . You can also optionally slice the operand map and apply an offset to each position before concatting
def concat ( self , operand , start = 0 , end = 0 , offset = 0 ) : if not Gauged . map_concat ( self . ptr , operand . ptr , start , end , offset ) : raise MemoryError
9,355
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L89-L93
[ "def", "get_pwiz_tables", "(", "self", ",", "engine", ",", "database", ")", ":", "introspector", "=", "pwiz", ".", "make_introspector", "(", "engine", ",", "database", ".", "database", ",", "*", "*", "database", ".", "connect_kwargs", ")", "out_file", "=", "'/tmp/db_models.py'", "with", "Capturing", "(", ")", "as", "code", ":", "pwiz", ".", "print_models", "(", "introspector", ")", "code", "=", "'\\n'", ".", "join", "(", "code", ")", "# Unfortunately, introspect.getsource doesn't seem to work", "# with dynamically created classes unless it is written out", "# to a file. So write it out to a temporary file", "with", "open", "(", "out_file", ",", "'w'", ")", "as", "file_", ":", "file_", ".", "write", "(", "code", ")", "# Load up the DB models as a new module so that we can", "# compare them with those in the model definition", "return", "imp", ".", "load_source", "(", "'db_models'", ",", "out_file", ")" ]
Get a copy of the map buffer
def buffer ( self , byte_offset = 0 ) : contents = self . ptr . contents ptr = addressof ( contents . buffer . contents ) + byte_offset length = contents . length * 4 - byte_offset return buffer ( ( c_char * length ) . from_address ( ptr ) . raw ) if length else None
9,356
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L99-L105
[ "def", "_check_missing_manifests", "(", "self", ",", "segids", ")", ":", "manifest_paths", "=", "[", "self", ".", "_manifest_path", "(", "segid", ")", "for", "segid", "in", "segids", "]", "with", "Storage", "(", "self", ".", "vol", ".", "layer_cloudpath", ",", "progress", "=", "self", ".", "vol", ".", "progress", ")", "as", "stor", ":", "exists", "=", "stor", ".", "files_exist", "(", "manifest_paths", ")", "dne", "=", "[", "]", "for", "path", ",", "there", "in", "exists", ".", "items", "(", ")", ":", "if", "not", "there", ":", "(", "segid", ",", ")", "=", "re", ".", "search", "(", "r'(\\d+):0$'", ",", "path", ")", ".", "groups", "(", ")", "dne", ".", "append", "(", "segid", ")", "return", "dne" ]
Does the target match the whitelist entry?
def matches ( target , entry ) : # It must match all the non-empty entries. for t , e in itertools . zip_longest ( target , entry ) : if e and t != e : return False # ...and the provider and user can't be empty. return entry [ 0 ] and entry [ 1 ]
9,357
https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/whitelist.py#L29-L38
[ "async", "def", "process_events_async", "(", "self", ",", "events", ")", ":", "if", "events", ":", "# Synchronize to serialize calls to the processor. The handler is not installed until", "# after OpenAsync returns, so ProcessEventsAsync cannot conflict with OpenAsync. There", "# could be a conflict between ProcessEventsAsync and CloseAsync, however. All calls to", "# CloseAsync are protected by synchronizing too.", "try", ":", "last", "=", "events", "[", "-", "1", "]", "if", "last", "is", "not", "None", ":", "self", ".", "partition_context", ".", "set_offset_and_sequence_number", "(", "last", ")", "await", "self", ".", "processor", ".", "process_events_async", "(", "self", ".", "partition_context", ",", "events", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "await", "self", ".", "process_error_async", "(", "err", ")" ]
Throws an exception if the entry isn t on the whitelist .
def check_entry ( * entry ) : whitelist = read_whitelist ( ) if not check_allow_prompt ( entry , whitelist ) : whitelist . append ( entry ) write_whitelist ( whitelist )
9,358
https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/whitelist.py#L64-L69
[ "def", "union", "(", "self", ",", "other", ")", ":", "if", "self", ".", "_jrdd_deserializer", "==", "other", ".", "_jrdd_deserializer", ":", "rdd", "=", "RDD", "(", "self", ".", "_jrdd", ".", "union", "(", "other", ".", "_jrdd", ")", ",", "self", ".", "ctx", ",", "self", ".", "_jrdd_deserializer", ")", "else", ":", "# These RDDs contain data in different serialized formats, so we", "# must normalize them to the default serializer.", "self_copy", "=", "self", ".", "_reserialize", "(", ")", "other_copy", "=", "other", ".", "_reserialize", "(", ")", "rdd", "=", "RDD", "(", "self_copy", ".", "_jrdd", ".", "union", "(", "other_copy", ".", "_jrdd", ")", ",", "self", ".", "ctx", ",", "self", ".", "ctx", ".", "serializer", ")", "if", "(", "self", ".", "partitioner", "==", "other", ".", "partitioner", "and", "self", ".", "getNumPartitions", "(", ")", "==", "rdd", ".", "getNumPartitions", "(", ")", ")", ":", "rdd", ".", "partitioner", "=", "self", ".", "partitioner", "return", "rdd" ]
Return data at either a file location or at the raw version of a URL or raise an exception .
def load_uncached ( location , use_json = None ) : if not whitelist . is_file ( location ) : r = requests . get ( raw . raw ( location ) ) if not r . ok : raise ValueError ( 'Couldn\'t read %s with code %s:\n%s' % ( location , r . status_code , r . text ) ) data = r . text else : try : f = os . path . realpath ( os . path . abspath ( os . path . expanduser ( location ) ) ) data = open ( f ) . read ( ) except Exception as e : e . args = ( 'There was an error reading the file' , location , f ) + e . args raise if use_json is None : use_json = any ( location . endswith ( s ) for s in SUFFIXES ) if not use_json : return data try : return yaml . load ( data ) except Exception as e : e . args = ( 'There was a JSON error in the file' , location ) + e . args raise
9,359
https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/data.py#L12-L50
[ "def", "rebalance_replication_groups", "(", "self", ")", ":", "# Balance replicas over replication-groups for each partition", "if", "any", "(", "b", ".", "inactive", "for", "b", "in", "six", ".", "itervalues", "(", "self", ".", "cluster_topology", ".", "brokers", ")", ")", ":", "self", ".", "log", ".", "error", "(", "\"Impossible to rebalance replication groups because of inactive \"", "\"brokers.\"", ")", "raise", "RebalanceError", "(", "\"Impossible to rebalance replication groups because of inactive \"", "\"brokers\"", ")", "# Balance replica-count over replication-groups", "self", ".", "rebalance_replicas", "(", ")", "# Balance partition-count over replication-groups", "self", ".", "_rebalance_groups_partition_cnt", "(", ")" ]
r Returns a measure of how disimilar two groupings are
def find_group_differences ( groups1 , groups2 ) : import utool as ut # For each group, build mapping from each item to the members the group item_to_others1 = { item : set ( _group ) - { item } for _group in groups1 for item in _group } item_to_others2 = { item : set ( _group ) - { item } for _group in groups2 for item in _group } flat_items1 = ut . flatten ( groups1 ) flat_items2 = ut . flatten ( groups2 ) flat_items = list ( set ( flat_items1 + flat_items2 ) ) errors = [ ] item_to_error = { } for item in flat_items : # Determine the number of unshared members in each group others1 = item_to_others1 . get ( item , set ( [ ] ) ) others2 = item_to_others2 . get ( item , set ( [ ] ) ) missing1 = others1 - others2 missing2 = others2 - others1 error = len ( missing1 ) + len ( missing2 ) if error > 0 : item_to_error [ item ] = error errors . append ( error ) total_error = sum ( errors ) return total_error
9,360
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L43-L120
[ "def", "print_file", "(", "self", ")", ":", "editor", "=", "self", ".", "get_current_editor", "(", ")", "filename", "=", "self", ".", "get_current_filename", "(", ")", "printer", "=", "Printer", "(", "mode", "=", "QPrinter", ".", "HighResolution", ",", "header_font", "=", "self", ".", "get_plugin_font", "(", "'printer_header'", ")", ")", "printDialog", "=", "QPrintDialog", "(", "printer", ",", "editor", ")", "if", "editor", ".", "has_selected_text", "(", ")", ":", "printDialog", ".", "setOption", "(", "QAbstractPrintDialog", ".", "PrintSelection", ",", "True", ")", "self", ".", "redirect_stdio", ".", "emit", "(", "False", ")", "answer", "=", "printDialog", ".", "exec_", "(", ")", "self", ".", "redirect_stdio", ".", "emit", "(", "True", ")", "if", "answer", "==", "QDialog", ".", "Accepted", ":", "self", ".", "starting_long_process", "(", "_", "(", "\"Printing...\"", ")", ")", "printer", ".", "setDocName", "(", "filename", ")", "editor", ".", "print_", "(", "printer", ")", "self", ".", "ending_long_process", "(", ")" ]
r Returns a measure of group consistency
def find_group_consistencies ( groups1 , groups2 ) : group1_list = { tuple ( sorted ( _group ) ) for _group in groups1 } group2_list = { tuple ( sorted ( _group ) ) for _group in groups2 } common_groups = list ( group1_list . intersection ( group2_list ) ) return common_groups
9,361
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L123-L140
[ "def", "download", "(", "self", ",", "path", ",", "args", "=", "[", "]", ",", "filepath", "=", "None", ",", "opts", "=", "{", "}", ",", "compress", "=", "True", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "base", "+", "path", "wd", "=", "filepath", "or", "'.'", "params", "=", "[", "]", "params", ".", "append", "(", "(", "'stream-channels'", ",", "'true'", ")", ")", "params", ".", "append", "(", "(", "'archive'", ",", "'true'", ")", ")", "if", "compress", ":", "params", ".", "append", "(", "(", "'compress'", ",", "'true'", ")", ")", "for", "opt", "in", "opts", ".", "items", "(", ")", ":", "params", ".", "append", "(", "opt", ")", "for", "arg", "in", "args", ":", "params", ".", "append", "(", "(", "'arg'", ",", "arg", ")", ")", "method", "=", "'get'", "res", "=", "self", ".", "_do_request", "(", "method", ",", "url", ",", "params", "=", "params", ",", "stream", "=", "True", ",", "*", "*", "kwargs", ")", "self", ".", "_do_raise_for_status", "(", "res", ")", "# try to stream download as a tar file stream", "mode", "=", "'r|gz'", "if", "compress", "else", "'r|'", "with", "tarfile", ".", "open", "(", "fileobj", "=", "res", ".", "raw", ",", "mode", "=", "mode", ")", "as", "tf", ":", "tf", ".", "extractall", "(", "path", "=", "wd", ")" ]
r Finds how predictions need to be modified to match the true grouping .
def compare_groups ( true_groups , pred_groups ) : import utool as ut true = { frozenset ( _group ) for _group in true_groups } pred = { frozenset ( _group ) for _group in pred_groups } # Find the groups that are exactly the same common = true . intersection ( pred ) true_sets = true . difference ( common ) pred_sets = pred . difference ( common ) # connected compoment lookups pred_conn = { p : frozenset ( ps ) for ps in pred for p in ps } true_conn = { t : frozenset ( ts ) for ts in true for t in ts } # How many predictions can be merged into perfect pieces? # For each true sets, find if it can be made via merging pred sets pred_merges = [ ] true_merges = [ ] for ts in true_sets : ccs = set ( [ pred_conn . get ( t , frozenset ( ) ) for t in ts ] ) if frozenset . union ( * ccs ) == ts : # This is a pure merge pred_merges . append ( ccs ) true_merges . append ( ts ) # How many predictions can be split into perfect pieces? true_splits = [ ] pred_splits = [ ] for ps in pred_sets : ccs = set ( [ true_conn . get ( p , frozenset ( ) ) for p in ps ] ) if frozenset . union ( * ccs ) == ps : # This is a pure merge true_splits . append ( ccs ) pred_splits . append ( ps ) pred_merges_flat = ut . flatten ( pred_merges ) true_splits_flat = ut . flatten ( true_splits ) pred_hybrid = frozenset ( map ( frozenset , pred_sets ) ) . difference ( set ( pred_splits + pred_merges_flat ) ) true_hybrid = frozenset ( map ( frozenset , true_sets ) ) . difference ( set ( true_merges + true_splits_flat ) ) comparisons = { 'common' : common , # 'true_splits_flat': true_splits_flat, 'true_splits' : true_splits , 'true_merges' : true_merges , 'true_hybrid' : true_hybrid , 'pred_splits' : pred_splits , 'pred_merges' : pred_merges , # 'pred_merges_flat': pred_merges_flat, 'pred_hybrid' : pred_hybrid , } return comparisons
9,362
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L143-L234
[ "def", "get", "(", "self", ")", ":", "if", "self", ".", "ttl", "[", "self", ".", "idx", "]", "<=", "0", ":", "self", ".", "buffers", "[", "self", ".", "idx", "]", "=", "self", ".", "inqueue", ".", "get", "(", "timeout", "=", "300.0", ")", "self", ".", "ttl", "[", "self", ".", "idx", "]", "=", "self", ".", "cur_max_ttl", "if", "self", ".", "cur_max_ttl", "<", "self", ".", "max_ttl", ":", "self", ".", "cur_max_ttl", "+=", "1", "buf", "=", "self", ".", "buffers", "[", "self", ".", "idx", "]", "self", ".", "ttl", "[", "self", ".", "idx", "]", "-=", "1", "released", "=", "self", ".", "ttl", "[", "self", ".", "idx", "]", "<=", "0", "if", "released", ":", "self", ".", "buffers", "[", "self", ".", "idx", "]", "=", "None", "self", ".", "idx", "=", "(", "self", ".", "idx", "+", "1", ")", "%", "len", "(", "self", ".", "buffers", ")", "return", "buf", ",", "released" ]
Returns statistics about grouping changes
def grouping_delta_stats ( old , new ) : import pandas as pd import utool as ut group_delta = ut . grouping_delta ( old , new ) stats = ut . odict ( ) unchanged = group_delta [ 'unchanged' ] splits = group_delta [ 'splits' ] merges = group_delta [ 'merges' ] hybrid = group_delta [ 'hybrid' ] statsmap = ut . partial ( lambda x : ut . stats_dict ( map ( len , x ) , size = True ) ) stats [ 'unchanged' ] = statsmap ( unchanged ) stats [ 'old_split' ] = statsmap ( splits [ 'old' ] ) stats [ 'new_split' ] = statsmap ( ut . flatten ( splits [ 'new' ] ) ) stats [ 'old_merge' ] = statsmap ( ut . flatten ( merges [ 'old' ] ) ) stats [ 'new_merge' ] = statsmap ( merges [ 'new' ] ) stats [ 'old_hybrid' ] = statsmap ( hybrid [ 'old' ] ) stats [ 'new_hybrid' ] = statsmap ( hybrid [ 'new' ] ) df = pd . DataFrame . from_dict ( stats , orient = 'index' ) df = df . loc [ list ( stats . keys ( ) ) ] return df
9,363
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L440-L484
[ "def", "put_file", "(", "self", ",", "secure_data_path", ",", "filehandle", ",", "content_type", "=", "None", ")", ":", "# Parse out the filename from the path", "filename", "=", "secure_data_path", ".", "rsplit", "(", "'/'", ",", "1", ")", "if", "content_type", ":", "data", "=", "{", "'file-content'", ":", "(", "filename", ",", "filehandle", ",", "content_type", ")", "}", "else", ":", "data", "=", "{", "'file-content'", ":", "(", "filename", ",", "filehandle", ")", "}", "headers", "=", "self", ".", "HEADERS", ".", "copy", "(", ")", "if", "'Content-Type'", "in", "headers", ":", "headers", ".", "__delitem__", "(", "'Content-Type'", ")", "secret_resp", "=", "post_with_retry", "(", "self", ".", "cerberus_url", "+", "'/v1/secure-file/'", "+", "secure_data_path", ",", "files", "=", "data", ",", "headers", "=", "headers", ")", "throw_if_bad_response", "(", "secret_resp", ")", "return", "secret_resp" ]
upper diagnoal of cartesian product of self and self . Weird name . fixme
def upper_diag_self_prodx ( list_ ) : return [ ( item1 , item2 ) for n1 , item1 in enumerate ( list_ ) for n2 , item2 in enumerate ( list_ ) if n1 < n2 ]
9,364
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L487-L511
[ "def", "config_extensions", "(", "app", ")", ":", "cache", ".", "init_app", "(", "app", ")", "db", ".", "init_app", "(", "app", ")", "main", ".", "init_app", "(", "app", ")", "collect", ".", "init_app", "(", "app", ")", "config_babel", "(", "app", ")" ]
r dont trust this implementation or this function name
def colwise_diag_idxs ( size , num = 2 ) : # diag_idxs = list(diagonalized_iter(size)) # upper_diag_idxs = [(r, c) for r, c in diag_idxs if r < c] # # diag_idxs = list(diagonalized_iter(size)) import utool as ut diag_idxs = ut . iprod ( * [ range ( size ) for _ in range ( num ) ] ) #diag_idxs = list(ut.iprod(range(size), range(size))) # this is pretty much a simple c ordering upper_diag_idxs = [ tup [ : : - 1 ] for tup in diag_idxs if all ( [ a > b for a , b in ut . itertwo ( tup ) ] ) #if all([a > b for a, b in ut.itertwo(tup[:2])]) ] #upper_diag_idxs = [(c, r) for r, c in diag_idxs if r > c] # # upper_diag_idxs = [(r, c) for r, c in diag_idxs if r > c] return upper_diag_idxs
9,365
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L547-L591
[ "def", "detach", "(", "zpool", ",", "device", ")", ":", "## Update storage pool", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "__utils__", "[", "'zfs.zpool_command'", "]", "(", "command", "=", "'detach'", ",", "target", "=", "[", "zpool", ",", "device", "]", ",", ")", ",", "python_shell", "=", "False", ",", ")", "ret", "=", "__utils__", "[", "'zfs.parse_command_result'", "]", "(", "res", ",", "'detatched'", ")", "if", "ret", "[", "'detatched'", "]", ":", "## NOTE: lookup zpool status for vdev config", "ret", "[", "'vdevs'", "]", "=", "_clean_vdev_config", "(", "__salt__", "[", "'zpool.status'", "]", "(", "zpool", "=", "zpool", ")", "[", "zpool", "]", "[", "'config'", "]", "[", "zpool", "]", ",", ")", "return", "ret" ]
product of list1 and list2 where items are non equal
def product_nonsame ( list1 , list2 ) : for item1 , item2 in itertools . product ( list1 , list2 ) : if item1 != item2 : yield ( item1 , item2 )
9,366
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L600-L604
[ "def", "serial_assimilate", "(", "self", ",", "rootpath", ")", ":", "valid_paths", "=", "[", "]", "for", "(", "parent", ",", "subdirs", ",", "files", ")", "in", "os", ".", "walk", "(", "rootpath", ")", ":", "valid_paths", ".", "extend", "(", "self", ".", "_drone", ".", "get_valid_paths", "(", "(", "parent", ",", "subdirs", ",", "files", ")", ")", ")", "data", "=", "[", "]", "count", "=", "0", "total", "=", "len", "(", "valid_paths", ")", "for", "path", "in", "valid_paths", ":", "newdata", "=", "self", ".", "_drone", ".", "assimilate", "(", "path", ")", "self", ".", "_data", ".", "append", "(", "newdata", ")", "count", "+=", "1", "logger", ".", "info", "(", "'{}/{} ({:.2f}%) done'", ".", "format", "(", "count", ",", "total", ",", "count", "/", "total", "*", "100", ")", ")", "for", "d", "in", "data", ":", "self", ".", "_data", ".", "append", "(", "json", ".", "loads", "(", "d", ",", "cls", "=", "MontyDecoder", ")", ")" ]
greedy algorithm for maximum independent set cover
def greedy_max_inden_setcover ( candidate_sets_dict , items , max_covers = None ) : uncovered_set = set ( items ) rejected_keys = set ( ) accepted_keys = set ( ) covered_items_list = [ ] while True : # Break if we have enough covers if max_covers is not None and len ( covered_items_list ) >= max_covers : break maxkey = None maxlen = - 1 # Loop over candidates to find the biggested unadded cover set for key , candidate_items in six . iteritems ( candidate_sets_dict ) : if key in rejected_keys or key in accepted_keys : continue #print('Checking %r' % (key,)) lenval = len ( candidate_items ) # len(uncovered_set.intersection(candidate_items)) == lenval: if uncovered_set . issuperset ( candidate_items ) : if lenval > maxlen : maxkey = key maxlen = lenval else : rejected_keys . add ( key ) # Add the set to the cover if maxkey is None : break maxval = candidate_sets_dict [ maxkey ] accepted_keys . add ( maxkey ) covered_items_list . append ( list ( maxval ) ) # Add values in this key to the cover uncovered_set . difference_update ( maxval ) uncovered_items = list ( uncovered_set ) covertup = uncovered_items , covered_items_list , accepted_keys return covertup
9,367
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L611-L681
[ "def", "__registerSeqStr", "(", ")", ":", "_SeqStr", "=", "lambda", "self", ":", "(", "self", "and", "\"[%s]\"", "%", "(", "\", \"", ".", "join", "(", "map", "(", "repr", ",", "self", ")", ")", ")", ")", "or", "\"[]\"", "_SeqRepr", "=", "lambda", "self", ":", "(", "self", "and", "\"[%s]\"", "%", "(", "\", \"", ".", "join", "(", "map", "(", "repr", ",", "self", ")", ")", ")", ")", "or", "\"[]\"", "seqs", "=", "(", "StdStringVector", ",", "StdLongVector", ",", "CommandInfoList", ",", "AttributeInfoList", ",", "AttributeInfoListEx", ",", "PipeInfoList", ",", "DeviceDataHistoryList", ",", "GroupReplyList", ",", "GroupAttrReplyList", ",", "GroupCmdReplyList", ",", "DbData", ",", "DbDevInfos", ",", "DbDevExportInfos", ",", "DbDevImportInfos", ",", "DbHistoryList", ")", "for", "seq", "in", "seqs", ":", "seq", ".", "__str__", "=", "_SeqStr", "seq", ".", "__repr__", "=", "_SeqRepr" ]
r Greedy algorithm for various covering problems . approximation gaurentees depending on specifications like set_weights and item values
def setcover_greedy ( candidate_sets_dict , items = None , set_weights = None , item_values = None , max_weight = None ) : import utool as ut solution_cover = { } # If candset_weights or item_values not given use the length as defaults if items is None : items = ut . flatten ( candidate_sets_dict . values ( ) ) if set_weights is None : get_weight = len else : def get_weight ( solution_cover ) : sum ( [ set_weights [ key ] for key in solution_cover . keys ( ) ] ) if item_values is None : get_value = len else : def get_value ( vals ) : sum ( [ item_values [ v ] for v in vals ] ) if max_weight is None : max_weight = get_weight ( candidate_sets_dict ) avail_covers = { key : set ( val ) for key , val in candidate_sets_dict . items ( ) } # While we still need covers while get_weight ( solution_cover ) < max_weight and len ( avail_covers ) > 0 : # Find candiate set with the most uncovered items avail_covers . values ( ) uncovered_values = list ( map ( get_value , avail_covers . values ( ) ) ) chosen_idx = ut . argmax ( uncovered_values ) if uncovered_values [ chosen_idx ] <= 0 : # needlessly adding value-less items break chosen_key = list ( avail_covers . keys ( ) ) [ chosen_idx ] # Add values in this key to the cover chosen_set = avail_covers [ chosen_key ] solution_cover [ chosen_key ] = candidate_sets_dict [ chosen_key ] # Remove chosen set from available options and covered items # from remaining available sets del avail_covers [ chosen_key ] for vals in avail_covers . values ( ) : vals . difference_update ( chosen_set ) return solution_cover
9,368
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L684-L752
[ "def", "_StructMessageToJsonObject", "(", "self", ",", "message", ")", ":", "fields", "=", "message", ".", "fields", "ret", "=", "{", "}", "for", "key", "in", "fields", ":", "ret", "[", "key", "]", "=", "self", ".", "_ValueMessageToJsonObject", "(", "fields", "[", "key", "]", ")", "return", "ret" ]
counts the number of times each item appears in the dictionary
def item_hist ( list_ ) : dict_hist = { } # Insert each item into the correct group for item in list_ : if item not in dict_hist : dict_hist [ item ] = 0 dict_hist [ item ] += 1 return dict_hist
9,369
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L901-L909
[ "def", "attach", "(", "self", ",", "container", ",", "stdout", "=", "True", ",", "stderr", "=", "True", ",", "stream", "=", "False", ",", "logs", "=", "False", ",", "demux", "=", "False", ")", ":", "params", "=", "{", "'logs'", ":", "logs", "and", "1", "or", "0", ",", "'stdout'", ":", "stdout", "and", "1", "or", "0", ",", "'stderr'", ":", "stderr", "and", "1", "or", "0", ",", "'stream'", ":", "stream", "and", "1", "or", "0", "}", "headers", "=", "{", "'Connection'", ":", "'Upgrade'", ",", "'Upgrade'", ":", "'tcp'", "}", "u", "=", "self", ".", "_url", "(", "\"/containers/{0}/attach\"", ",", "container", ")", "response", "=", "self", ".", "_post", "(", "u", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "stream", "=", "True", ")", "output", "=", "self", ".", "_read_from_socket", "(", "response", ",", "stream", ",", "self", ".", "_check_is_tty", "(", "container", ")", ",", "demux", "=", "demux", ")", "if", "stream", ":", "return", "CancellableStream", "(", "output", ",", "response", ")", "else", ":", "return", "output" ]
hacky but still brute force algorithm for finding nth prime for small tests
def get_nth_prime ( n , max_prime = 4100 , safe = True ) : if n <= 100 : first_100_primes = ( 2 , 3 , 5 , 7 , 11 , 13 , 17 , 19 , 23 , 29 , 31 , 37 , 41 , 43 , 47 , 53 , 59 , 61 , 67 , 71 , 73 , 79 , 83 , 89 , 97 , 101 , 103 , 107 , 109 , 113 , 127 , 131 , 137 , 139 , 149 , 151 , 157 , 163 , 167 , 173 , 179 , 181 , 191 , 193 , 197 , 199 , 211 , 223 , 227 , 229 , 233 , 239 , 241 , 251 , 257 , 263 , 269 , 271 , 277 , 281 , 283 , 293 , 307 , 311 , 313 , 317 , 331 , 337 , 347 , 349 , 353 , 359 , 367 , 373 , 379 , 383 , 389 , 397 , 401 , 409 , 419 , 421 , 431 , 433 , 439 , 443 , 449 , 457 , 461 , 463 , 467 , 479 , 487 , 491 , 499 , 503 , 509 , 521 , 523 , 541 , ) #print(len(first_100_primes)) nth_prime = first_100_primes [ n - 1 ] else : if safe : primes = [ num for num in range ( 2 , max_prime ) if is_prime ( num ) ] nth_prime = primes [ n ] else : # This can run for a while... get it? while? nth_prime = get_nth_prime_bruteforce ( n ) return nth_prime
9,370
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1049-L1070
[ "def", "_ParseLogonApplications", "(", "self", ",", "parser_mediator", ",", "registry_key", ")", ":", "for", "application", "in", "self", ".", "_LOGON_APPLICATIONS", ":", "command_value", "=", "registry_key", ".", "GetValueByName", "(", "application", ")", "if", "not", "command_value", ":", "continue", "values_dict", "=", "{", "'Application'", ":", "application", ",", "'Command'", ":", "command_value", ".", "GetDataAsObject", "(", ")", ",", "'Trigger'", ":", "'Logon'", "}", "event_data", "=", "windows_events", ".", "WindowsRegistryEventData", "(", ")", "event_data", ".", "key_path", "=", "registry_key", ".", "path", "event_data", ".", "offset", "=", "registry_key", ".", "offset", "event_data", ".", "regvalue", "=", "values_dict", "event_data", ".", "source_append", "=", "': Winlogon'", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "registry_key", ".", "last_written_time", ",", "definitions", ".", "TIME_DESCRIPTION_WRITTEN", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
r Solve the knapsack problem by finding the most valuable subsequence of items subject that weighs no more than maxweight .
def knapsack ( items , maxweight , method = 'recursive' ) : if method == 'recursive' : return knapsack_recursive ( items , maxweight ) elif method == 'iterative' : return knapsack_iterative ( items , maxweight ) elif method == 'ilp' : return knapsack_ilp ( items , maxweight ) else : raise NotImplementedError ( '[util_alg] knapsack method=%r' % ( method , ) )
9,371
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1134-L1282
[ "def", "convertallfields", "(", "data", ",", "commdct", ",", "block", "=", "None", ")", ":", "# import pdbdb; pdb.set_trace()", "for", "key", "in", "list", "(", "data", ".", "dt", ".", "keys", "(", ")", ")", ":", "objs", "=", "data", ".", "dt", "[", "key", "]", "for", "i", ",", "obj", "in", "enumerate", "(", "objs", ")", ":", "key_i", "=", "data", ".", "dtls", ".", "index", "(", "key", ")", "key_comm", "=", "commdct", "[", "key_i", "]", "try", ":", "inblock", "=", "block", "[", "key_i", "]", "except", "TypeError", "as", "e", ":", "inblock", "=", "None", "obj", "=", "convertfields", "(", "key_comm", ",", "obj", ",", "inblock", ")", "objs", "[", "i", "]", "=", "obj" ]
solves knapsack using an integer linear program
def knapsack_ilp ( items , maxweight , verbose = False ) : import pulp # Given Input values = [ t [ 0 ] for t in items ] weights = [ t [ 1 ] for t in items ] indices = [ t [ 2 ] for t in items ] # Formulate integer program prob = pulp . LpProblem ( "Knapsack" , pulp . LpMaximize ) # Solution variables x = pulp . LpVariable . dicts ( name = 'x' , indexs = indices , lowBound = 0 , upBound = 1 , cat = pulp . LpInteger ) # maximize objective function prob . objective = sum ( v * x [ i ] for v , i in zip ( values , indices ) ) # subject to prob . add ( sum ( w * x [ i ] for w , i in zip ( weights , indices ) ) <= maxweight ) # Solve using with solver like CPLEX, GLPK, or SCIP. #pulp.CPLEX().solve(prob) pulp . PULP_CBC_CMD ( ) . solve ( prob ) # Read solution flags = [ x [ i ] . varValue for i in indices ] total_value = sum ( [ val for val , flag in zip ( values , flags ) if flag ] ) items_subset = [ item for item , flag in zip ( items , flags ) if flag ] # Print summary if verbose : print ( prob ) print ( 'OPT:' ) print ( '\n' . join ( [ ' %s = %s' % ( x [ i ] . name , x [ i ] . varValue ) for i in indices ] ) ) print ( 'total_value = %r' % ( total_value , ) ) return total_value , items_subset
9,372
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1286-L1337
[ "def", "numRegisteredForRole", "(", "self", ",", "role", ",", "includeTemporaryRegs", "=", "False", ")", ":", "count", "=", "self", ".", "eventregistration_set", ".", "filter", "(", "cancelled", "=", "False", ",", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "count", "(", ")", "if", "includeTemporaryRegs", ":", "count", "+=", "self", ".", "temporaryeventregistration_set", ".", "filter", "(", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "exclude", "(", "registration__expirationDate__lte", "=", "timezone", ".", "now", "(", ")", ")", ".", "count", "(", ")", "return", "count" ]
items = int_items maxweight = int_maxweight
def knapsack_iterative ( items , maxweight ) : # Knapsack requires integral weights weights = [ t [ 1 ] for t in items ] max_exp = max ( [ number_of_decimals ( w_ ) for w_ in weights ] ) coeff = 10 ** max_exp # Adjust weights to be integral int_maxweight = int ( maxweight * coeff ) int_items = [ ( v , int ( w * coeff ) , idx ) for v , w , idx in items ] return knapsack_iterative_int ( int_items , int_maxweight )
9,373
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1385-L1397
[ "def", "_getPath", "(", ")", ":", "if", "os", ".", "name", "==", "\"posix\"", ":", "path", "=", "os", ".", "getenv", "(", "\"HOME\"", ")", "+", "\"/.config/google-chrome/Default/Cookies\"", "return", "path", "import", "_winreg", "key", "=", "_winreg", ".", "OpenKey", "(", "_winreg", ".", "HKEY_CURRENT_USER", ",", "'Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Explorer\\\\Shell Folders'", ")", "path", "=", "_winreg", ".", "QueryValueEx", "(", "key", ",", "'Local AppData'", ")", "[", "0", "]", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'Google\\\\Chrome\\\\User Data\\\\Default\\\\Cookies'", ")", "return", "path" ]
r Iterative knapsack method
def knapsack_iterative_int ( items , maxweight ) : values = [ t [ 0 ] for t in items ] weights = [ t [ 1 ] for t in items ] maxsize = maxweight + 1 # Sparse representation seems better dpmat = defaultdict ( lambda : defaultdict ( lambda : np . inf ) ) kmat = defaultdict ( lambda : defaultdict ( lambda : False ) ) idx_subset = [ ] # NOQA for w in range ( maxsize ) : dpmat [ 0 ] [ w ] = 0 # For each item consider to include it or not for idx in range ( len ( items ) ) : item_val = values [ idx ] item_weight = weights [ idx ] # consider at each possible bag size for w in range ( maxsize ) : valid_item = item_weight <= w if idx > 0 : prev_val = dpmat [ idx - 1 ] [ w ] prev_noitem_val = dpmat [ idx - 1 ] [ w - item_weight ] else : prev_val = 0 prev_noitem_val = 0 withitem_val = item_val + prev_noitem_val more_valuable = withitem_val > prev_val if valid_item and more_valuable : dpmat [ idx ] [ w ] = withitem_val kmat [ idx ] [ w ] = True else : dpmat [ idx ] [ w ] = prev_val kmat [ idx ] [ w ] = False # Trace backwards to get the items used in the solution K = maxweight for idx in reversed ( range ( len ( items ) ) ) : if kmat [ idx ] [ K ] : idx_subset . append ( idx ) K = K - weights [ idx ] idx_subset = sorted ( idx_subset ) items_subset = [ items [ i ] for i in idx_subset ] total_value = dpmat [ len ( items ) - 1 ] [ maxweight ] return total_value , items_subset
9,374
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1400-L1477
[ "def", "render", "(", "self", ")", ":", "engine", "=", "Engine", "(", ")", "return", "engine", ".", "from_string", "(", "SUMMARY_TEMPLATE", ")", ".", "render", "(", "Context", "(", "self", ".", "__dict__", ")", ")" ]
Iterative knapsack method
def knapsack_iterative_numpy ( items , maxweight ) : #import numpy as np items = np . array ( items ) weights = items . T [ 1 ] # Find maximum decimal place (this problem is in NP) max_exp = max ( [ number_of_decimals ( w_ ) for w_ in weights ] ) coeff = 10 ** max_exp # Adjust weights to be integral weights = ( weights * coeff ) . astype ( np . int ) values = items . T [ 0 ] MAXWEIGHT = int ( maxweight * coeff ) W_SIZE = MAXWEIGHT + 1 dpmat = np . full ( ( len ( items ) , W_SIZE ) , np . inf ) kmat = np . full ( ( len ( items ) , W_SIZE ) , 0 , dtype = np . bool ) idx_subset = [ ] for w in range ( W_SIZE ) : dpmat [ 0 ] [ w ] = 0 for idx in range ( 1 , len ( items ) ) : item_val = values [ idx ] item_weight = weights [ idx ] for w in range ( W_SIZE ) : valid_item = item_weight <= w prev_val = dpmat [ idx - 1 ] [ w ] if valid_item : prev_noitem_val = dpmat [ idx - 1 ] [ w - item_weight ] withitem_val = item_val + prev_noitem_val more_valuable = withitem_val > prev_val else : more_valuable = False dpmat [ idx ] [ w ] = withitem_val if more_valuable else prev_val kmat [ idx ] [ w ] = more_valuable K = MAXWEIGHT for idx in reversed ( range ( 1 , len ( items ) ) ) : if kmat [ idx , K ] : idx_subset . append ( idx ) K = K - weights [ idx ] idx_subset = sorted ( idx_subset ) items_subset = [ items [ i ] for i in idx_subset ] total_value = dpmat [ len ( items ) - 1 ] [ MAXWEIGHT ] return total_value , items_subset
9,375
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1480-L1532
[ "def", "is_image_file_valid", "(", "file_path_name", ")", ":", "# Image.verify is only implemented for PNG images, and it only verifies", "# the CRC checksum in the image. The only way to check from within", "# Pillow is to load the image in a try/except and check the error. If", "# as much info as possible is from the image is needed,", "# ``ImageFile.LOAD_TRUNCATED_IMAGES=True`` needs to bet set and it", "# will attempt to parse as much as possible.", "try", ":", "with", "Image", ".", "open", "(", "file_path_name", ")", "as", "image", ":", "image", ".", "load", "(", ")", "except", "IOError", ":", "return", "False", "return", "True" ]
r non - optimal greedy version of knapsack algorithm does not sort input . Sort the input by largest value first if desired .
def knapsack_greedy ( items , maxweight ) : items_subset = [ ] total_weight = 0 total_value = 0 for item in items : value , weight = item [ 0 : 2 ] if total_weight + weight > maxweight : continue else : items_subset . append ( item ) total_weight += weight total_value += value return total_value , items_subset
9,376
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1548-L1587
[ "def", "save_matpower", "(", "self", ",", "fd", ")", ":", "from", "pylon", ".", "io", "import", "MATPOWERWriter", "MATPOWERWriter", "(", "self", ")", ".", "write", "(", "fd", ")" ]
N choose k
def choose ( n , k ) : import scipy . misc return scipy . misc . comb ( n , k , exact = True , repetition = False )
9,377
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1653-L1661
[ "def", "render_template", "(", "template_name", ",", "context", ",", "format", "=", "'png'", ",", "output", "=", "None", ",", "using", "=", "None", ",", "*", "*", "options", ")", ":", "# output stream, as required by casperjs_capture", "stream", "=", "BytesIO", "(", ")", "out_f", "=", "None", "# the suffix=.html is a hack for phantomjs which *will*", "# complain about not being able to open source file", "# unless it has a 'html' extension.", "with", "NamedTemporaryFile", "(", "suffix", "=", "'.html'", ")", "as", "render_file", ":", "template_content", "=", "render_to_string", "(", "template_name", ",", "context", ",", "using", "=", "using", ",", ")", "# now, we need to replace all occurences of STATIC_URL", "# with the corresponding file://STATIC_ROOT, but only", "# if STATIC_URL doesn't contain a public URI (like http(s))", "static_url", "=", "getattr", "(", "settings", ",", "'STATIC_URL'", ",", "''", ")", "if", "settings", ".", "STATIC_ROOT", "and", "static_url", "and", "not", "static_url", ".", "startswith", "(", "'http'", ")", ":", "template_content", "=", "template_content", ".", "replace", "(", "static_url", ",", "'file://%s'", "%", "settings", ".", "STATIC_ROOT", ")", "render_file", ".", "write", "(", "template_content", ".", "encode", "(", "'utf-8'", ")", ")", "# this is so that the temporary file actually gets filled", "# with the result.", "render_file", ".", "seek", "(", "0", ")", "casperjs_capture", "(", "stream", ",", "url", "=", "'file://%s'", "%", "render_file", ".", "name", ",", "*", "*", "options", ")", "# if no output was provided, use NamedTemporaryFile", "# (so it is an actual file) and return it (so that", "# after function ends, it gets automatically removed)", "if", "not", "output", ":", "out_f", "=", "NamedTemporaryFile", "(", ")", "else", ":", "# if output was provided, write the rendered", "# content to it", "out_f", "=", "open", "(", "output", ",", "'wb'", ")", "out_f", ".", "write", "(", "stream", ".", "getvalue", "(", ")", ")", "out_f", ".", "seek", "(", "0", ")", "# return the output if NamedTemporaryFile was used", "if", "not", "output", ":", "return", "out_f", "else", ":", "# otherwise, just close the file.", "out_f", ".", "close", "(", ")" ]
checks if floating point number are equal to a threshold
def almost_eq ( arr1 , arr2 , thresh = 1E-11 , ret_error = False ) : error = np . abs ( arr1 - arr2 ) passed = error < thresh if ret_error : return passed , error return passed
9,378
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2002-L2009
[ "def", "FromData", "(", "cls", ",", "stream", ",", "json_data", ",", "http", ",", "auto_transfer", "=", "None", ",", "gzip_encoded", "=", "False", ",", "*", "*", "kwds", ")", ":", "info", "=", "json", ".", "loads", "(", "json_data", ")", "missing_keys", "=", "cls", ".", "_REQUIRED_SERIALIZATION_KEYS", "-", "set", "(", "info", ".", "keys", "(", ")", ")", "if", "missing_keys", ":", "raise", "exceptions", ".", "InvalidDataError", "(", "'Invalid serialization data, missing keys: %s'", "%", "(", "', '", ".", "join", "(", "missing_keys", ")", ")", ")", "if", "'total_size'", "in", "kwds", ":", "raise", "exceptions", ".", "InvalidUserInputError", "(", "'Cannot override total_size on serialized Upload'", ")", "upload", "=", "cls", ".", "FromStream", "(", "stream", ",", "info", "[", "'mime_type'", "]", ",", "total_size", "=", "info", ".", "get", "(", "'total_size'", ")", ",", "gzip_encoded", "=", "gzip_encoded", ",", "*", "*", "kwds", ")", "if", "isinstance", "(", "stream", ",", "io", ".", "IOBase", ")", "and", "not", "stream", ".", "seekable", "(", ")", ":", "raise", "exceptions", ".", "InvalidUserInputError", "(", "'Cannot restart resumable upload on non-seekable stream'", ")", "if", "auto_transfer", "is", "not", "None", ":", "upload", ".", "auto_transfer", "=", "auto_transfer", "else", ":", "upload", ".", "auto_transfer", "=", "info", "[", "'auto_transfer'", "]", "upload", ".", "strategy", "=", "RESUMABLE_UPLOAD", "upload", ".", "_Initialize", "(", "# pylint: disable=protected-access", "http", ",", "info", "[", "'url'", "]", ")", "upload", ".", "RefreshResumableUploadState", "(", ")", "upload", ".", "EnsureInitialized", "(", ")", "if", "upload", ".", "auto_transfer", ":", "upload", ".", "StreamInChunks", "(", ")", "return", "upload" ]
normalizes a numpy array from 0 to 1 based in its extent
def norm_zero_one ( array , dim = None ) : if not util_type . is_float ( array ) : array = array . astype ( np . float32 ) array_max = array . max ( dim ) array_min = array . min ( dim ) array_exnt = np . subtract ( array_max , array_min ) array_norm = np . divide ( np . subtract ( array , array_min ) , array_exnt ) return array_norm
9,379
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2078-L2109
[ "def", "get_colorized_contents", "(", "contents", ",", "color_config", ")", ":", "colorizer", "=", "color", ".", "EgColorizer", "(", "color_config", ")", "result", "=", "colorizer", ".", "colorize_text", "(", "contents", ")", "return", "result" ]
groups indicies of each item in groupid_list
def group_indices ( groupid_list ) : item_list = range ( len ( groupid_list ) ) grouped_dict = util_dict . group_items ( item_list , groupid_list ) # Sort by groupid for cache efficiency keys_ = list ( grouped_dict . keys ( ) ) try : keys = sorted ( keys_ ) except TypeError : # Python 3 does not allow sorting mixed types keys = util_list . sortedby2 ( keys_ , keys_ ) groupxs = util_dict . dict_take ( grouped_dict , keys ) return keys , groupxs
9,380
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2162-L2198
[ "def", "get_console", "(", "request", ",", "console_type", ",", "instance", ")", ":", "if", "console_type", "==", "'AUTO'", ":", "check_consoles", "=", "CONSOLES", "else", ":", "try", ":", "check_consoles", "=", "{", "console_type", ":", "CONSOLES", "[", "console_type", "]", "}", "except", "KeyError", ":", "msg", "=", "_", "(", "'Console type \"%s\" not supported.'", ")", "%", "console_type", "raise", "exceptions", ".", "NotAvailable", "(", "msg", ")", "# Ugly workaround due novaclient API change from 2.17 to 2.18.", "try", ":", "httpnotimplemented", "=", "nova_exception", ".", "HttpNotImplemented", "except", "AttributeError", ":", "httpnotimplemented", "=", "nova_exception", ".", "HTTPNotImplemented", "for", "con_type", ",", "api_call", "in", "check_consoles", ".", "items", "(", ")", ":", "try", ":", "console", "=", "api_call", "(", "request", ",", "instance", ".", "id", ")", "# If not supported, don't log it to avoid lot of errors in case", "# of AUTO.", "except", "httpnotimplemented", ":", "continue", "except", "Exception", ":", "LOG", ".", "debug", "(", "'Console not available'", ",", "exc_info", "=", "True", ")", "continue", "if", "con_type", "==", "'SERIAL'", ":", "console_url", "=", "console", ".", "url", "else", ":", "console_url", "=", "\"%s&%s(%s)\"", "%", "(", "console", ".", "url", ",", "urlencode", "(", "{", "'title'", ":", "getattr", "(", "instance", ",", "\"name\"", ",", "\"\"", ")", "}", ")", ",", "instance", ".", "id", ")", "return", "(", "con_type", ",", "console_url", ")", "raise", "exceptions", ".", "NotAvailable", "(", "_", "(", "'No available console found.'", ")", ")" ]
Ungroups items returning a generator . Note that this is much slower than the list version and is not gaurenteed to have better memory usage .
def ungroup_gen ( grouped_items , groupxs , fill = None ) : import utool as ut # Determine the number of items if unknown #maxpergroup = [max(xs) if len(xs) else 0 for xs in groupxs] #maxval = max(maxpergroup) if len(maxpergroup) else 0 minpergroup = [ min ( xs ) if len ( xs ) else 0 for xs in groupxs ] minval = min ( minpergroup ) if len ( minpergroup ) else 0 flat_groupx = ut . flatten ( groupxs ) sortx = ut . argsort ( flat_groupx ) # Indicates the index being yeilded groupx_sorted = ut . take ( flat_groupx , sortx ) flat_items = ut . iflatten ( grouped_items ) # Storage for data weiting to be yeilded toyeild = { } items_yeilded = 0 # Indicates the index we are curently yeilding current_index = 0 # Determine where fills need to happen num_fills_before = [ minval ] + ( np . diff ( groupx_sorted ) - 1 ) . tolist ( ) + [ 0 ] # Check if there are fills before the first item fills = num_fills_before [ items_yeilded ] if fills > 0 : for _ in range ( fills ) : yield None current_index += 1 # Yield items as possible for yeild_at , item in zip ( flat_groupx , flat_items ) : if yeild_at > current_index : toyeild [ yeild_at ] = item elif yeild_at == current_index : # When we find the next element to yeild yield item current_index += 1 items_yeilded += 1 # Check if there are fills before the next item fills = num_fills_before [ items_yeilded ] if fills > 0 : for _ in range ( fills ) : yield None current_index += 1 # Now yield everything that came before this while current_index in toyeild : item = toyeild . pop ( current_index ) yield item current_index += 1 items_yeilded += 1 # Check if there are fills before the next item fills = num_fills_before [ items_yeilded ] if fills > 0 : for _ in range ( fills ) : yield None current_index += 1
9,381
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2306-L2406
[ "def", "cancelRealTimeBars", "(", "self", ",", "bars", ":", "RealTimeBarList", ")", ":", "self", ".", "client", ".", "cancelRealTimeBars", "(", "bars", ".", "reqId", ")", "self", ".", "wrapper", ".", "endSubscription", "(", "bars", ")" ]
Ungroups unique items to correspond to original non - unique list
def ungroup_unique ( unique_items , groupxs , maxval = None ) : if maxval is None : maxpergroup = [ max ( xs ) if len ( xs ) else 0 for xs in groupxs ] maxval = max ( maxpergroup ) if len ( maxpergroup ) else 0 ungrouped_items = [ None ] * ( maxval + 1 ) for item , xs in zip ( unique_items , groupxs ) : for x in xs : ungrouped_items [ x ] = item return ungrouped_items
9,382
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2409-L2443
[ "def", "ParseDownloadsRow", "(", "self", ",", "parser_mediator", ",", "query", ",", "row", ",", "*", "*", "unused_kwargs", ")", ":", "query_hash", "=", "hash", "(", "query", ")", "event_data", "=", "FirefoxDownloadEventData", "(", ")", "event_data", ".", "full_path", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'target'", ")", "event_data", ".", "mime_type", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'mimeType'", ")", "event_data", ".", "name", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'name'", ")", "event_data", ".", "offset", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'id'", ")", "event_data", ".", "query", "=", "query", "event_data", ".", "received_bytes", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'currBytes'", ")", "event_data", ".", "referrer", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'referrer'", ")", "event_data", ".", "temporary_location", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'tempPath'", ")", "event_data", ".", "total_bytes", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'maxBytes'", ")", "event_data", ".", "url", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'source'", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'startTime'", ")", "if", "timestamp", ":", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTimeInMicroseconds", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_START", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'endTime'", ")", "if", "timestamp", ":", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTimeInMicroseconds", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_END", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Edit distance algorithm . String1 and string2 can be either strings or lists of strings
def edit_distance ( string1 , string2 ) : import utool as ut try : import Levenshtein except ImportError as ex : ut . printex ( ex , 'pip install python-Levenshtein' ) raise #np.vectorize(Levenshtein.distance, [np.int]) #vec_lev = np.frompyfunc(Levenshtein.distance, 2, 1) #return vec_lev(string1, string2) import utool as ut isiter1 = ut . isiterable ( string1 ) isiter2 = ut . isiterable ( string2 ) strs1 = string1 if isiter1 else [ string1 ] strs2 = string2 if isiter2 else [ string2 ] distmat = [ [ Levenshtein . distance ( str1 , str2 ) for str2 in strs2 ] for str1 in strs1 ] # broadcast if not isiter2 : distmat = ut . take_column ( distmat , 0 ) if not isiter1 : distmat = distmat [ 0 ] return distmat
9,383
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2446-L2499
[ "def", "create", "(", "cls", ",", "destination", ")", ":", "mdb_gz_b64", "=", "\"\"\"\\\n H4sICIenn1gC/25ldzIwMDMubWRiAO2de2wcRx3Hf7O7Pt/d3u6eLyEtVaOaqg+EkjQvuVVDwa9a\n jWXHdZxQQlCJ7fOrfp3OTpqkhVxTItFWIhVQVFBRVNIKRaColVpAUKGKRwwFqUAhKiBIpUaoVWP+\n qKgIIHL8Znb39u72znWJiWP3+9l473fzm/nNY3cdf2fmbBJEPdO9E+nebLq+fWC6vrWZOImen9D7\n 9sR+vPPNE0PZxo/TE5879mj+yNc3/OzAD2bXv3DmV9/o/8PZnxxr+/fDL2w79ulzN7e+/sS/zvzz\n w3+N1z28p3PTfQ3nfn/m2YmeFS2no89uWnvqwO5HUvd/5Phr938tes3j/zm5+qT41J8/P/iZx87/\n +qHrjgyduubG1t/+7eWB2XztTNuT+1clZt9c2/e7HRGizevWEwAAAAAAAACAhUEIwvE+PoRIO8K7\n FzT6obPPwTMBAAAAAAAAAABcfpzPXwya+Ispo1xlEO2KEEX9eaGyWnrqyKQ60tQ0AcNZRcR1RYuy\n +XZCxoqRzmaMI6cKGRJuJVrIEZUOQ9UrHStUYpyzKkdNmSPFDkM6aguhXMdVHCMuHXE2Suu4IFQJ\n l6CErNWUDouDlbdKOZIcrKLD4S5WdNhqIEodqlVaofKgVTHpiBQ6uLG0uaKsuYbf3IS8BmV1qFAm\n j1Z5Hbp06GWDKC+DTS00SRN8DFA/TXNfW6mXX3upj7+mOHWllzLAObN8du0gdSdlKO3ZcWqjMbaH\n uOQqtidViRF+P0HbOH2c3xm0lfMb1EH7uHZ5vp32c+ks+5PqfSeXS9NejjTAvZQpd7J3kuuJFqLE\n qYvuVa3Ocqk7OVXWNMFxZPRVtJ1zSXuCBrlkh+rjEF1Zlt5Dw6qN0xx5Bx3gGgbowVo56EIjkc9T\n xX9Jdd+5PKDOD6q3VQvwv7qiZ8st419cdYHlo6iuriF8X4HA590AsodXhvrsj0yMDPnAuI+ZvOrq\n 1o7K51Hdy7a8cdXNm5AedbfG5W3j3lOybxFZKb6zAgAAAAAAsNzQxAlbvnYJV3VcUU3/S2luBIKF\n ha+IlWp+wxW4IiRXRSXxKeNU1eOxUuUbSOIINbEM7WT506ZE3LASgCOeYJWCMcnCsI/u8eSsFEYR\n lnlbWa6+u0jTYqSkvuQL9G5CLFwTRBMAAAAAAAAAgMtW/79lyVdLKxW7oqDF3bXOniib0UD/m/xq\n loWqvFwt3DX/mrLNALIu3V35NkpK1JDmL+2XOmr9pf1gKiFY4I672wc0mveaf6zaenyKmljPT6t5\n hT7a6y13y0XqjFpwneJjRC0oRwvL3eUL2fHCcuyGIntjhTkDuZCd5Vc5j+HNUMyx+myYcpHW5YG5\n ZijUdbg2VFu4ZzzcHFM3seQLAAAAAAAAAMtc//9S6cm1emX97ytK1v81rHelhtfVfAFnseZXRdV9\n Ad7+dhGS5kbl3eqe/K8pU/nnYwX5X2VeoLbCZwHi7txD6aTELabnoLJ5AfPFC8JmFd3Pun+MlfM4\n q/846/4s62i5+8Dmc7EvSVN0UG2tL00p1uPXqZTt/G5QqX+5lbufz+mSctVzFce6upBrTG3Fd+cn\n pmiYrUyw8+GNfL4hn8/k83qZrVlyGzgPeqbhjcOqx7KMEZRpU/MPQ+rsldEtuYm8vExkznoMS+6b\n KC5TZRt8wVf4xEkFX4V5D/X2vYz1/EcR8yMAAAAAAACAJY0Qf/d3vLPUlb//b4Nzzv6W3Wevtl+1\n vmxts2LWTxOHErcm3jGfMUfNG0yMGQAAAAAAeJ/8rLwAMXIYRgCARFv8IIaYtKpGqCdqlN/2kupD\n /ob67qXhsi0lDh2Vp6728faO9tHuUflfWJ1wE0e6724f35XuG71r16Dr0FwH573by6rKi0N7RveN\n tnd6aTVBWrpjd3fnuJtsBMnDk90ju7zckSA5XGGtdGrK2dWhUnRcMgAAAAAAAAD4v2CIV6vqf82I\n Jusbcwsy7wkWSf/n1JQNq/Oc+uQGq/ecmsphYZ6Tn6XwRLjwxb7mTxDoakLgURUFshwAAAAAAAAA\n ljpCrHZ8W/f2/2NUAAAAAAAAAAAAhXH5RLm4IIbotqot7hbW/0MGWCp46/+pgpHwjZS3IyAlfMPy\n tgakNN+wfcPxNgukdN9I+kadt30gZfhGjW+s8I2V3s6CVNTbWZCK+Eatb3zAN1Z5mw5SMd+I+wZ+\n +QQAAAAAAAAA/K8IcdT27Zqi3/+HkQEAAAAAAAAAsGgkMQQLjSHqbQPDAAAAAAAAAAAALGuw/g8A\n AAAAAAAA4DJUqwsQI7cQDWlcLiMq1/9rcGMBAAAAAAAAAADLGuh/AAAAAAAAAAAA+h8AAAAAAAAA\n AABLHyHusDTPjtLzTtoxnRftUftqe8YatDA+AAAAAAAAAPDeqJN/KVt+et0R9PYnzz7W8PrZRv+V\n HblO6qEDNEXbaYDGqJemaYQmaYJThtnK8Gvzb1opfDRTPZmUlxUY86qgm/ZyFVkOOqCC3kLhoyEI\n qs8raBO10O0q3EYKH+uDcNq8wnVRH93D7evnYZhHG5kkB3a0OYO2ctCWV9ZR+FhT0l2HCzl6xVBz\n XZyPUvi4taTjcwRuVUF7uYW9HMy9MJspfGwMAoo5A+5Qwca8UHN2WogeU/fu0ito1vmjM+M85zzp\n fNG5zxl2djrNzk3O9+0m+yWrx2q0fpH4buJ4Yk3ig4lvmkfxx9gBAAAAAAC4OAylQfJ5h5pfSVCc\n f853gqSmWPSZux6xjUznltH2HT/flNu7++0NZ7/07cg/vnPbVu30y6d/NLvlabPh+j81v/Xc5g9l\n 1h2f+epn9+VPdN90OHHvU50fm94y/ZXvWQ/tP/yJG/NH3llz8A79tlNPG72DHSePHdzz2s3XPzVj\n vzSUvSHjVys1Rv5CSUv8pEvcEqkbV/KX35JaQ+npikmRS9o4rtYIt8RYnJa4Ou6SV6stTm+l7rcX\n q9qSy+23pCVIcgV/SZKuJj5CSRc4Y/PpkiesLJcI53J37NvFuQzv4peGL0/SypP+C+45xVAAMAEA\n \"\"\"", "pristine", "=", "StringIO", "(", ")", "pristine", ".", "write", "(", "base64", ".", "b64decode", "(", "mdb_gz_b64", ")", ")", "pristine", ".", "seek", "(", "0", ")", "pristine", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "pristine", ",", "mode", "=", "'rb'", ")", "with", "open", "(", "destination", ",", "'wb'", ")", "as", "handle", ":", "shutil", ".", "copyfileobj", "(", "pristine", ",", "handle", ")", "return", "cls", "(", "destination", ")" ]
r Standardizes a boolean expression into an or - ing of and - ed variables
def standardize_boolexpr ( boolexpr_ , parens = False ) : import utool as ut import re onlyvars = boolexpr_ onlyvars = re . sub ( '\\bnot\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\band\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\bor\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\(' , '' , onlyvars ) onlyvars = re . sub ( '\\)' , '' , onlyvars ) varnames = ut . remove_doublspaces ( onlyvars ) . strip ( ) . split ( ' ' ) varied_dict = { var : [ True , False ] for var in varnames } bool_states = ut . all_dict_combinations ( varied_dict ) outputs = [ eval ( boolexpr_ , state . copy ( ) , state . copy ( ) ) for state in bool_states ] true_states = ut . compress ( bool_states , outputs ) true_tuples = ut . take_column ( true_states , varnames ) true_cases = [ str ( '' . join ( [ str ( int ( t ) ) for t in tup ] ) ) for tup in true_tuples ] # Convert to binary ones_bin = [ int ( x , 2 ) for x in true_cases ] #ones_str = [str(x) for x in true_cases] from quine_mccluskey . qm import QuineMcCluskey qm = QuineMcCluskey ( ) result = qm . simplify ( ones = ones_bin , num_bits = len ( varnames ) ) #result = qm.simplify_los(ones=ones_str, num_bits=len(varnames)) grouped_terms = [ dict ( ut . group_items ( varnames , rs ) ) for rs in result ] def parenjoin ( char , list_ ) : if len ( list_ ) == 0 : return '' else : if parens : return '(' + char . join ( list_ ) + ')' else : return char . join ( list_ ) if parens : expanded_terms = [ ( term . get ( '1' , [ ] ) + [ '(not ' + b + ')' for b in term . get ( '0' , [ ] ) ] + [ parenjoin ( ' ^ ' , term . get ( '^' , [ ] ) ) , parenjoin ( ' ~ ' , term . get ( '~' , [ ] ) ) , ] ) for term in grouped_terms ] else : expanded_terms = [ ( term . get ( '1' , [ ] ) + [ 'not ' + b for b in term . get ( '0' , [ ] ) ] + [ parenjoin ( ' ^ ' , term . get ( '^' , [ ] ) ) , parenjoin ( ' ~ ' , term . get ( '~' , [ ] ) ) , ] ) for term in grouped_terms ] final_terms = [ [ t for t in term if t ] for term in expanded_terms ] products = [ parenjoin ( ' and ' , [ f for f in form if f ] ) for form in final_terms ] final_expr = ' or ' . join ( products ) return final_expr
9,384
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2545-L2629
[ "def", "_partial_fit", "(", "model_and_meta", ",", "X", ",", "y", ",", "fit_params", ")", ":", "with", "log_errors", "(", ")", ":", "start", "=", "time", "(", ")", "model", ",", "meta", "=", "model_and_meta", "if", "len", "(", "X", ")", ":", "model", "=", "deepcopy", "(", "model", ")", "model", ".", "partial_fit", "(", "X", ",", "y", ",", "*", "*", "(", "fit_params", "or", "{", "}", ")", ")", "meta", "=", "dict", "(", "meta", ")", "meta", "[", "\"partial_fit_calls\"", "]", "+=", "1", "meta", "[", "\"partial_fit_time\"", "]", "=", "time", "(", ")", "-", "start", "return", "model", ",", "meta" ]
r Runs a task that takes some time
def expensive_task_gen ( num = 8700 ) : import utool as ut #time_list = [] for x in range ( 0 , num ) : with ut . Timer ( verbose = False ) as t : ut . is_prime ( x ) yield t . ellapsed
9,385
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2751-L2795
[ "def", "_parse_this_box", "(", "self", ",", "fptr", ",", "box_id", ",", "start", ",", "num_bytes", ")", ":", "try", ":", "parser", "=", "_BOX_WITH_ID", "[", "box_id", "]", ".", "parse", "except", "KeyError", ":", "# We don't recognize the box ID, so create an UnknownBox and be", "# done with it.", "msg", "=", "(", "'Unrecognized box ({box_id}) encountered at byte offset '", "'{offset}.'", ")", "msg", "=", "msg", ".", "format", "(", "box_id", "=", "box_id", ",", "offset", "=", "fptr", ".", "tell", "(", ")", "-", "8", ")", "warnings", ".", "warn", "(", "msg", ",", "UserWarning", ")", "box", "=", "UnknownBox", "(", "box_id", ",", "offset", "=", "start", ",", "length", "=", "num_bytes", ",", "longname", "=", "'Unknown'", ")", "return", "box", "try", ":", "box", "=", "parser", "(", "fptr", ",", "start", ",", "num_bytes", ")", "except", "ValueError", "as", "err", ":", "msg", "=", "(", "\"Encountered an unrecoverable ValueError while parsing a \"", "\"{box_id} box at byte offset {offset}. The original error \"", "\"message was \\\"{original_error_message}\\\".\"", ")", "msg", "=", "msg", ".", "format", "(", "box_id", "=", "_BOX_WITH_ID", "[", "box_id", "]", ".", "longname", ",", "offset", "=", "start", ",", "original_error_message", "=", "str", "(", "err", ")", ")", "warnings", ".", "warn", "(", "msg", ",", "UserWarning", ")", "box", "=", "UnknownBox", "(", "box_id", ".", "decode", "(", "'utf-8'", ")", ",", "length", "=", "num_bytes", ",", "offset", "=", "start", ",", "longname", "=", "'Unknown'", ")", "return", "box" ]
Computes all the integer factors of the number n
def factors ( n ) : return set ( reduce ( list . __add__ , ( [ i , n // i ] for i in range ( 1 , int ( n ** 0.5 ) + 1 ) if n % i == 0 ) ) )
9,386
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2801-L2817
[ "def", "_build_job_meta", "(", "cls", ",", "job_dir", ")", ":", "meta_file", "=", "os", ".", "path", ".", "join", "(", "job_dir", ",", "JOB_META_FILE", ")", "meta", "=", "parse_json", "(", "meta_file", ")", "if", "not", "meta", ":", "job_name", "=", "job_dir", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "user", "=", "os", ".", "environ", ".", "get", "(", "\"USER\"", ",", "None", ")", "meta", "=", "{", "\"job_id\"", ":", "job_name", ",", "\"job_name\"", ":", "job_name", ",", "\"user\"", ":", "user", ",", "\"type\"", ":", "\"ray\"", ",", "\"start_time\"", ":", "os", ".", "path", ".", "getctime", "(", "job_dir", ")", ",", "\"end_time\"", ":", "None", ",", "\"best_trial_id\"", ":", "None", ",", "}", "if", "meta", ".", "get", "(", "\"start_time\"", ",", "None", ")", ":", "meta", "[", "\"start_time\"", "]", "=", "timestamp2date", "(", "meta", "[", "\"start_time\"", "]", ")", "return", "meta" ]
First creates a map with all master proteins with data then outputs protein data dicts for rows of a tsv . If a pool is given then only output for that pool will be shown in the protein table .
def add_protein_data ( proteins , pgdb , headerfields , genecentric = False , pool_to_output = False ) : proteindata = create_featuredata_map ( pgdb , genecentric = genecentric , psm_fill_fun = add_psms_to_proteindata , pgene_fill_fun = add_protgene_to_protdata , count_fun = count_peps_psms , pool_to_output = pool_to_output , get_uniques = True ) dataget_fun = { True : get_protein_data_genecentric , False : get_protein_data_pgrouped } [ genecentric is not False ] firstfield = prottabledata . ACCESSIONS [ genecentric ] for protein in proteins : outprotein = { k : v for k , v in protein . items ( ) } outprotein [ firstfield ] = outprotein . pop ( prottabledata . HEADER_PROTEIN ) protein_acc = protein [ prottabledata . HEADER_PROTEIN ] outprotein . update ( dataget_fun ( proteindata , protein_acc , headerfields ) ) outprotein = { k : str ( v ) for k , v in outprotein . items ( ) } yield outprotein
9,387
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/info.py#L13-L34
[ "def", "remove_handlers_bound_to_instance", "(", "self", ",", "obj", ")", ":", "for", "handler", "in", "self", ".", "handlers", ":", "if", "handler", ".", "im_self", "==", "obj", ":", "self", "-=", "handler" ]
Parses protein data for a certain protein into tsv output dictionary
def get_protein_data_pgrouped ( proteindata , p_acc , headerfields ) : report = get_protein_data_base ( proteindata , p_acc , headerfields ) return get_cov_protnumbers ( proteindata , p_acc , report )
9,388
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/info.py#L41-L45
[ "def", "get_hyperparameter_configurations", "(", "self", ",", "num", ",", "r", ",", "searchspace_json", ",", "random_state", ")", ":", "# pylint: disable=invalid-name", "global", "_KEY", "# pylint: disable=global-statement", "assert", "self", ".", "i", "==", "0", "hyperparameter_configs", "=", "dict", "(", ")", "for", "_", "in", "range", "(", "num", ")", ":", "params_id", "=", "create_bracket_parameter_id", "(", "self", ".", "bracket_id", ",", "self", ".", "i", ")", "params", "=", "json2paramater", "(", "searchspace_json", ",", "random_state", ")", "params", "[", "_KEY", "]", "=", "r", "hyperparameter_configs", "[", "params_id", "]", "=", "params", "self", ".", "_record_hyper_configs", "(", "hyperparameter_configs", ")", "return", "[", "[", "key", ",", "value", "]", "for", "key", ",", "value", "in", "hyperparameter_configs", ".", "items", "(", ")", "]" ]
Get keys from a namespace
def keys ( self , namespace , prefix = None , limit = None , offset = None ) : params = [ namespace ] query = 'SELECT key FROM gauged_keys WHERE namespace = %s' if prefix is not None : query += ' AND key LIKE %s' params . append ( prefix + '%' ) if limit is not None : query += ' LIMIT %s' params . append ( limit ) if offset is not None : query += ' OFFSET %s' params . append ( offset ) cursor = self . cursor cursor . execute ( query , params ) return [ key for key , in cursor ]
9,389
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L25-L40
[ "def", "find_best_frametype", "(", "channel", ",", "start", ",", "end", ",", "frametype_match", "=", "None", ",", "allow_tape", "=", "True", ",", "connection", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "try", ":", "return", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'error'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "except", "RuntimeError", ":", "# gaps (or something else went wrong)", "ftout", "=", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "return_all", "=", "True", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'ignore'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "try", ":", "if", "isinstance", "(", "ftout", ",", "dict", ")", ":", "return", "{", "key", ":", "ftout", "[", "key", "]", "[", "0", "]", "for", "key", "in", "ftout", "}", "return", "ftout", "[", "0", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "\"Cannot find any valid frametypes for channel(s)\"", ")" ]
Get the block identified by namespace offset key and value
def get_block ( self , namespace , offset , key ) : cursor = self . cursor cursor . execute ( 'SELECT data, flags FROM gauged_data ' 'WHERE namespace = %s AND "offset" = %s AND key = %s' , ( namespace , offset , key ) ) row = cursor . fetchone ( ) return ( None , None ) if row is None else row
9,390
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L63-L71
[ "def", "deletecols", "(", "X", ",", "cols", ")", ":", "if", "isinstance", "(", "cols", ",", "str", ")", ":", "cols", "=", "cols", ".", "split", "(", "','", ")", "retain", "=", "[", "n", "for", "n", "in", "X", ".", "dtype", ".", "names", "if", "n", "not", "in", "cols", "]", "if", "len", "(", "retain", ")", ">", "0", ":", "return", "X", "[", "retain", "]", "else", ":", "return", "None" ]
Get the minimum and maximum block offset for the specified namespace
def block_offset_bounds ( self , namespace ) : cursor = self . cursor cursor . execute ( 'SELECT MIN("offset"), MAX("offset") ' 'FROM gauged_statistics WHERE namespace = %s' , ( namespace , ) ) return cursor . fetchone ( )
9,391
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L128-L135
[ "def", "exit", "(", "self", ",", "code", "=", "None", ",", "msg", "=", "None", ")", ":", "if", "code", "is", "None", ":", "code", "=", "self", ".", "tcex", ".", "exit_code", "if", "code", "==", "3", ":", "self", ".", "tcex", ".", "log", ".", "info", "(", "u'Changing exit code from 3 to 0.'", ")", "code", "=", "0", "# playbooks doesn't support partial failure", "elif", "code", "not", "in", "[", "0", ",", "1", "]", ":", "code", "=", "1", "self", ".", "tcex", ".", "exit", "(", "code", ",", "msg", ")" ]
Insert a timestamp to keep track of the current writer position
def set_writer_position ( self , name , timestamp ) : execute = self . cursor . execute execute ( 'DELETE FROM gauged_writer_history WHERE id = %s' , ( name , ) ) execute ( 'INSERT INTO gauged_writer_history (id, timestamp) ' 'VALUES (%s, %s)' , ( name , timestamp , ) )
9,392
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L161-L166
[ "def", "rc_notfound", "(", "cls", ",", "interface_type", ",", "resource_name", "=", "None", ")", ":", "msg", "=", "\"Resource class for %s not provided and default not found.\"", "%", "interface_type", "if", "resource_name", ":", "msg", "=", "\"Could not parse '%s'. %s\"", "%", "(", "resource_name", ",", "msg", ")", "return", "cls", "(", "msg", ")" ]
Add cached values for the specified date range and query
def add_cache ( self , namespace , key , query_hash , length , cache ) : start = 0 bulk_insert = self . bulk_insert cache_len = len ( cache ) row = '(%s,%s,%s,%s,%s,%s)' query = 'INSERT INTO gauged_cache ' '(namespace, key, "hash", length, start, value) VALUES ' execute = self . cursor . execute query_hash = self . psycopg2 . Binary ( query_hash ) while start < cache_len : rows = cache [ start : start + bulk_insert ] params = [ ] for timestamp , value in rows : params . extend ( ( namespace , key , query_hash , length , timestamp , value ) ) insert = ( row + ',' ) * ( len ( rows ) - 1 ) + row execute ( query + insert , params ) start += bulk_insert self . db . commit ( )
9,393
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L249-L268
[ "def", "fixpairs", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fixpairs", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "3", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "pairsfile", ",", "sep", ",", "sd", "=", "args", "newpairsfile", "=", "pairsfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "+", "\".new.pairs\"", "sep", "=", "int", "(", "sep", ")", "sd", "=", "int", "(", "sd", ")", "p", "=", "PairsFile", "(", "pairsfile", ")", "p", ".", "fixLibraryStats", "(", "sep", ",", "sd", ")", "p", ".", "write", "(", "newpairsfile", ")" ]
Return a dict of environment variables required to run a service under faketime .
def get_environment_vars ( filename ) : if sys . platform == "linux" or sys . platform == "linux2" : return { 'LD_PRELOAD' : path . join ( LIBFAKETIME_DIR , "libfaketime.so.1" ) , 'FAKETIME_SKIP_CMDS' : 'nodejs' , # node doesn't seem to work in the current version. 'FAKETIME_TIMESTAMP_FILE' : filename , } elif sys . platform == "darwin" : return { 'DYLD_INSERT_LIBRARIES' : path . join ( LIBFAKETIME_DIR , "libfaketime.1.dylib" ) , 'DYLD_FORCE_FLAT_NAMESPACE' : '1' , 'FAKETIME_TIMESTAMP_FILE' : filename , } else : raise RuntimeError ( "libfaketime does not support '{}' platform" . format ( sys . platform ) )
9,394
https://github.com/crdoconnor/faketime/blob/6e81ca070c0e601a52507b945ed45d5d42576b21/faketime/__init__.py#L9-L24
[ "def", "assume", "(", "self", ",", "other", ")", ":", "self", ".", "_arch", "=", "other", ".", "_arch", "self", ".", "_bits", "=", "other", ".", "_bits", "self", ".", "_endian", "=", "other", ".", "_endian", "self", ".", "_mode", "=", "other", ".", "_mode" ]
Change the time of a process or group of processes by writing a new time to the time file .
def change_time ( filename , newtime ) : with open ( filename , "w" ) as faketimetxt_handle : faketimetxt_handle . write ( "@" + newtime . strftime ( "%Y-%m-%d %H:%M:%S" ) )
9,395
https://github.com/crdoconnor/faketime/blob/6e81ca070c0e601a52507b945ed45d5d42576b21/faketime/__init__.py#L27-L30
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Filters unique peptides from multiple Percolator output XML files . Takes a dir with a set of XMLs a score to filter on and a namespace . Outputs an ElementTree .
def filter_unique_peptides ( peptides , score , ns ) : scores = { 'q' : 'q_value' , 'pep' : 'pep' , 'p' : 'p_value' , 'svm' : 'svm_score' } highest = { } for el in peptides : featscore = float ( el . xpath ( 'xmlns:%s' % scores [ score ] , namespaces = ns ) [ 0 ] . text ) seq = reader . get_peptide_seq ( el , ns ) if seq not in highest : highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore } if score == 'svm' : # greater than score is accepted if featscore > highest [ seq ] [ 'score' ] : highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore } else : # lower than score is accepted if featscore < highest [ seq ] [ 'score' ] : highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore } formatting . clear_el ( el ) for pep in list ( highest . values ( ) ) : yield pep [ 'pep_el' ]
9,396
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/pycolator/filters.py#L102-L136
[ "def", "_update_simulation_start", "(", "self", ",", "simulation_start", ")", ":", "self", ".", "simulation_start", "=", "simulation_start", "if", "self", ".", "simulation_duration", "is", "not", "None", "and", "self", ".", "simulation_start", "is", "not", "None", ":", "self", ".", "simulation_end", "=", "self", ".", "simulation_start", "+", "self", ".", "simulation_duration", "self", ".", "_update_simulation_start_cards", "(", ")" ]
Import a module or a typename within a module from its name .
def import_symbol ( name = None , path = None , typename = None , base_path = None ) : _ , symbol = _import ( name or typename , path or base_path ) return symbol
9,397
https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/importer.py#L35-L47
[ "def", "reset", "(", "self", ")", ":", "status", "=", "self", ".", "m_objPCANBasic", ".", "Reset", "(", "self", ".", "m_PcanHandle", ")", "return", "status", "==", "PCAN_ERROR_OK" ]
r Writes a registery script to update the PATH variable into the sync registry
def add_to_win32_PATH ( script_fpath , * add_path_list ) : import utool as ut write_dir = dirname ( script_fpath ) key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]' rtype = 'REG_EXPAND_SZ' # Read current PATH values win_pathlist = list ( os . environ [ 'PATH' ] . split ( os . path . pathsep ) ) new_path_list = ut . unique_ordered ( win_pathlist + list ( add_path_list ) ) #new_path_list = unique_ordered(win_pathlist, rob_pathlist) print ( '\n' . join ( new_path_list ) ) pathtxt = pathsep . join ( new_path_list ) varval_list = [ ( 'Path' , pathtxt ) ] regfile_str = make_regfile_str ( key , varval_list , rtype ) ut . view_directory ( write_dir ) print ( regfile_str ) ut . writeto ( script_fpath , regfile_str , mode = 'wb' ) print ( 'Please have an admin run the script. You may need to restart' )
9,398
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_win32.py#L60-L91
[ "def", "strategy", "(", "self", ")", ":", "max_codepoint", "=", "None", "if", "self", ".", "_unicode", "else", "127", "strategies", "=", "[", "]", "if", "self", ".", "_negate", ":", "if", "self", ".", "_categories", "or", "self", ".", "_whitelist_chars", ":", "strategies", ".", "append", "(", "hs", ".", "characters", "(", "blacklist_categories", "=", "self", ".", "_categories", "|", "set", "(", "[", "'Cc'", ",", "'Cs'", "]", ")", ",", "blacklist_characters", "=", "self", ".", "_whitelist_chars", ",", "max_codepoint", "=", "max_codepoint", ",", ")", ")", "if", "self", ".", "_blacklist_chars", ":", "strategies", ".", "append", "(", "hs", ".", "sampled_from", "(", "list", "(", "self", ".", "_blacklist_chars", "-", "self", ".", "_whitelist_chars", ")", ")", ")", "else", ":", "if", "self", ".", "_categories", "or", "self", ".", "_blacklist_chars", ":", "strategies", ".", "append", "(", "hs", ".", "characters", "(", "whitelist_categories", "=", "self", ".", "_categories", ",", "blacklist_characters", "=", "self", ".", "_blacklist_chars", ",", "max_codepoint", "=", "max_codepoint", ",", ")", ")", "if", "self", ".", "_whitelist_chars", ":", "strategies", ".", "append", "(", "hs", ".", "sampled_from", "(", "list", "(", "self", ".", "_whitelist_chars", "-", "self", ".", "_blacklist_chars", ")", ")", ")", "return", "hs", ".", "one_of", "(", "*", "strategies", ")", "if", "strategies", "else", "hs", ".", "just", "(", "u''", ")" ]
r Zips elementwise pairs between list1 and list2 into a dictionary . Values from list2 can be broadcast onto list1 .
def dzip ( list1 , list2 ) : try : len ( list1 ) except TypeError : list1 = list ( list1 ) try : len ( list2 ) except TypeError : list2 = list ( list2 ) if len ( list1 ) == 0 and len ( list2 ) == 1 : # Corner case: # allow the first list to be empty and the second list to broadcast a # value. This means that the equality check wont work for the case # where list1 and list2 are supposed to correspond, but the length of # list2 is 1. list2 = [ ] if len ( list2 ) == 1 and len ( list1 ) > 1 : list2 = list2 * len ( list1 ) if len ( list1 ) != len ( list2 ) : raise ValueError ( 'out of alignment len(list1)=%r, len(list2)=%r' % ( len ( list1 ) , len ( list2 ) ) ) return dict ( zip ( list1 , list2 ) )
9,399
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L25-L76
[ "def", "is_", "(", "self", ",", "state", ")", ":", "translator", "=", "self", ".", "_meta", "[", "'translator'", "]", "state", "=", "translator", ".", "translate", "(", "state", ")", "return", "self", ".", "actual_state", "==", "state" ]