query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
publish the return value of this function as a message from this endpoint
def publish ( func ) : @ wraps ( func ) def wrapper ( self , * args , * * kwargs ) : # outgoing payload = func ( self , * args , * * kwargs ) payload . pop ( 'self' , None ) self . _publish ( func . __name__ , payload ) return None wrapper . is_publish = True return wrapper
8,900
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/services.py#L26-L40
[ "def", "merge_in_place", "(", "self", ",", "others", ")", ":", "new_model_names", "=", "[", "]", "for", "predictor", "in", "others", ":", "for", "model", "in", "predictor", ".", "class1_pan_allele_models", ":", "model_name", "=", "self", ".", "model_name", "(", "\"pan-class1\"", ",", "len", "(", "self", ".", "class1_pan_allele_models", ")", ")", "self", ".", "class1_pan_allele_models", ".", "append", "(", "model", ")", "row", "=", "pandas", ".", "Series", "(", "collections", ".", "OrderedDict", "(", "[", "(", "\"model_name\"", ",", "model_name", ")", ",", "(", "\"allele\"", ",", "\"pan-class1\"", ")", ",", "(", "\"config_json\"", ",", "json", ".", "dumps", "(", "model", ".", "get_config", "(", ")", ")", ")", ",", "(", "\"model\"", ",", "model", ")", ",", "]", ")", ")", ".", "to_frame", "(", ")", ".", "T", "self", ".", "_manifest_df", "=", "pandas", ".", "concat", "(", "[", "self", ".", "manifest_df", ",", "row", "]", ",", "ignore_index", "=", "True", ")", "new_model_names", ".", "append", "(", "model_name", ")", "for", "allele", "in", "predictor", ".", "allele_to_allele_specific_models", ":", "if", "allele", "not", "in", "self", ".", "allele_to_allele_specific_models", ":", "self", ".", "allele_to_allele_specific_models", "[", "allele", "]", "=", "[", "]", "current_models", "=", "self", ".", "allele_to_allele_specific_models", "[", "allele", "]", "for", "model", "in", "predictor", ".", "allele_to_allele_specific_models", "[", "allele", "]", ":", "model_name", "=", "self", ".", "model_name", "(", "allele", ",", "len", "(", "current_models", ")", ")", "row", "=", "pandas", ".", "Series", "(", "collections", ".", "OrderedDict", "(", "[", "(", "\"model_name\"", ",", "model_name", ")", ",", "(", "\"allele\"", ",", "allele", ")", ",", "(", "\"config_json\"", ",", "json", ".", "dumps", "(", "model", ".", "get_config", "(", ")", ")", ")", ",", "(", "\"model\"", ",", "model", ")", ",", "]", ")", ")", ".", "to_frame", "(", ")", ".", "T", "self", ".", "_manifest_df", "=", "pandas", ".", "concat", "(", "[", "self", ".", "manifest_df", ",", "row", "]", ",", "ignore_index", "=", "True", ")", "current_models", ".", "append", "(", "model", ")", "new_model_names", ".", "append", "(", "model_name", ")", "self", ".", "clear_cache", "(", ")", "return", "new_model_names" ]
use to request an api call from a specific endpoint
def request ( func = None , timeout = 600 ) : if func is None : return partial ( request , timeout = timeout ) @ wraps ( func ) def wrapper ( self , * args , * * kwargs ) : params = func ( self , * args , * * kwargs ) self = params . pop ( 'self' , None ) entity = params . pop ( 'entity' , None ) app_name = params . pop ( 'app_name' , None ) request_id = unique_hex ( ) params [ 'request_id' ] = request_id future = self . _send_request ( app_name , endpoint = func . __name__ , entity = entity , params = params , timeout = timeout ) return future wrapper . is_request = True return wrapper
8,901
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/services.py#L83-L102
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
Serialize the given instance of Problem .
def serialize_problem ( req , resp , problem ) : preferred = req . client_prefers ( ( 'application/json' , 'application/problem+json' ) ) if preferred is None : preferred = 'application/json' resp . data = problem . to_json ( ) . encode ( 'utf-8' ) resp . content_type = preferred resp . append_header ( 'Vary' , 'Accept' )
8,902
https://github.com/grktsh/falcon-oas/blob/380921e82a50b565b3df6e494b06cc9dba961db7/src/falcon_oas/problems.py#L52-L62
[ "def", "_unstack_extension_series", "(", "series", ",", "level", ",", "fill_value", ")", ":", "# Implementation note: the basic idea is to", "# 1. Do a regular unstack on a dummy array of integers", "# 2. Followup with a columnwise take.", "# We use the dummy take to discover newly-created missing values", "# introduced by the reshape.", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "dummy_arr", "=", "np", ".", "arange", "(", "len", "(", "series", ")", ")", "# fill_value=-1, since we will do a series.values.take later", "result", "=", "_Unstacker", "(", "dummy_arr", ",", "series", ".", "index", ",", "level", "=", "level", ",", "fill_value", "=", "-", "1", ")", ".", "get_result", "(", ")", "out", "=", "[", "]", "values", "=", "extract_array", "(", "series", ",", "extract_numpy", "=", "False", ")", "for", "col", ",", "indices", "in", "result", ".", "iteritems", "(", ")", ":", "out", ".", "append", "(", "Series", "(", "values", ".", "take", "(", "indices", ".", "values", ",", "allow_fill", "=", "True", ",", "fill_value", "=", "fill_value", ")", ",", "name", "=", "col", ",", "index", "=", "result", ".", "index", ")", ")", "return", "concat", "(", "out", ",", "axis", "=", "'columns'", ",", "copy", "=", "False", ",", "keys", "=", "result", ".", "columns", ")" ]
Fill function for create_featuredata_map
def add_psms_to_proteindata ( proteindata , p_acc , pool , psmdata ) : seq , psm_id = psmdata [ 2 ] , psmdata [ 3 ] try : proteindata [ p_acc ] [ 'pools' ] [ pool ] [ 'psms' ] . add ( psm_id ) except KeyError : emptyinfo = { 'psms' : set ( ) , 'peptides' : set ( ) , 'unipeps' : 0 } try : proteindata [ p_acc ] [ 'pools' ] [ pool ] = emptyinfo except KeyError : proteindata [ p_acc ] . update ( { 'pools' : { pool : emptyinfo } } ) proteindata [ p_acc ] [ 'pools' ] [ pool ] [ 'psms' ] . add ( psm_id ) proteindata [ p_acc ] [ 'pools' ] [ pool ] [ 'peptides' ] . add ( seq )
8,903
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/proteindata.py#L27-L39
[ "def", "prep", "(", "config", "=", "None", ",", "path", "=", "None", ")", ":", "if", "config", "is", "None", ":", "config", "=", "parse", "(", ")", "if", "path", "is", "None", ":", "path", "=", "os", ".", "getcwd", "(", ")", "root", "=", "config", ".", "get", "(", "'root'", ",", "'path'", ")", "root", "=", "os", ".", "path", ".", "join", "(", "path", ",", "root", ")", "root", "=", "os", ".", "path", ".", "realpath", "(", "root", ")", "os", ".", "environ", "[", "'SCIDASH_HOME'", "]", "=", "root", "if", "sys", ".", "path", "[", "0", "]", "!=", "root", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "root", ")" ]
prints current stack
def print_traceback ( with_colors = True ) : #traceback.print_tb() import traceback stack = traceback . extract_stack ( ) stack_lines = traceback . format_list ( stack ) tbtext = '' . join ( stack_lines ) if with_colors : try : from pygments import highlight from pygments . lexers import get_lexer_by_name from pygments . formatters import TerminalFormatter lexer = get_lexer_by_name ( 'pytb' , stripall = True ) formatter = TerminalFormatter ( bg = 'dark' ) formatted_text = highlight ( tbtext , lexer , formatter ) print ( formatted_text ) except Exception : print ( tbtext ) else : print ( tbtext )
8,904
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dbg.py#L40-L61
[ "def", "namer", "(", "cls", ",", "imageUrl", ",", "pageUrl", ")", ":", "start", "=", "''", "tsmatch", "=", "compile", "(", "r'/(\\d+)-'", ")", ".", "search", "(", "imageUrl", ")", "if", "tsmatch", ":", "start", "=", "datetime", ".", "utcfromtimestamp", "(", "int", "(", "tsmatch", ".", "group", "(", "1", ")", ")", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "else", ":", "# There were only chapter 1, page 4 and 5 not matching when writing", "# this...", "start", "=", "'2015-04-11x'", "return", "start", "+", "\"-\"", "+", "pageUrl", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]" ]
Checks syntax and validity of a variable name
def is_valid_varname ( varname ) : if not isinstance ( varname , six . string_types ) : return False match_obj = re . match ( varname_regex , varname ) valid_syntax = match_obj is not None valid_name = not keyword . iskeyword ( varname ) isvalid = valid_syntax and valid_name return isvalid
8,905
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dbg.py#L101-L109
[ "def", "transform", "(", "self", ",", "X", ")", ":", "assert", "np", ".", "shape", "(", "X", ")", "[", "0", "]", "==", "len", "(", "self", ".", "_weights", ")", ",", "(", "'BlendingOptimizer: Number of models to blend its predictions and weights does not match: '", "'n_models={}, weights_len={}'", ".", "format", "(", "np", ".", "shape", "(", "X", ")", "[", "0", "]", ",", "len", "(", "self", ".", "_weights", ")", ")", ")", "blended_predictions", "=", "np", ".", "average", "(", "np", ".", "power", "(", "X", ",", "self", ".", "_power", ")", ",", "weights", "=", "self", ".", "_weights", ",", "axis", "=", "0", ")", "**", "(", "1.0", "/", "self", ".", "_power", ")", "return", "{", "'y_pred'", ":", "blended_predictions", "}" ]
returns execable python code that declares variables using keys and values
def execstr_dict ( dict_ , local_name = None , exclude_list = None , explicit = False ) : import utool as ut if explicit : expr_list = [ ] for ( key , val ) in sorted ( dict_ . items ( ) ) : assert isinstance ( key , six . string_types ) , 'keys must be strings' expr_list . append ( '%s = %s' % ( key , ut . repr2 ( val ) , ) ) execstr = '\n' . join ( expr_list ) return execstr else : if local_name is None : # Magic way of getting the local name of dict_ local_name = get_varname_from_locals ( dict_ , get_parent_frame ( ) . f_locals ) try : if exclude_list is None : exclude_list = [ ] assert isinstance ( exclude_list , list ) exclude_list . append ( local_name ) expr_list = [ ] assert isinstance ( dict_ , dict ) , 'incorrect type type(dict_)=%r, dict_=%r' % ( type ( dict ) , dict_ ) for ( key , val ) in sorted ( dict_ . items ( ) ) : assert isinstance ( key , six . string_types ) , 'keys must be strings' if not is_valid_varname ( key ) : continue if not any ( ( fnmatch . fnmatch ( key , pat ) for pat in exclude_list ) ) : expr = '%s = %s[%s]' % ( key , local_name , ut . repr2 ( key ) ) expr_list . append ( expr ) execstr = '\n' . join ( expr_list ) return execstr except Exception as ex : locals_ = locals ( ) ut . printex ( ex , key_list = [ 'locals_' ] ) raise
8,906
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dbg.py#L112-L203
[ "def", "readAnnotations", "(", "self", ")", ":", "annot", "=", "self", ".", "read_annotation", "(", ")", "annot", "=", "np", ".", "array", "(", "annot", ")", "if", "(", "annot", ".", "shape", "[", "0", "]", "==", "0", ")", ":", "return", "np", ".", "array", "(", "[", "]", ")", ",", "np", ".", "array", "(", "[", "]", ")", ",", "np", ".", "array", "(", "[", "]", ")", "ann_time", "=", "self", ".", "_get_float", "(", "annot", "[", ":", ",", "0", "]", ")", "ann_text", "=", "annot", "[", ":", ",", "2", "]", "ann_text_out", "=", "[", "\"\"", "for", "x", "in", "range", "(", "len", "(", "annot", "[", ":", ",", "1", "]", ")", ")", "]", "for", "i", "in", "np", ".", "arange", "(", "len", "(", "annot", "[", ":", ",", "1", "]", ")", ")", ":", "ann_text_out", "[", "i", "]", "=", "self", ".", "_convert_string", "(", "ann_text", "[", "i", "]", ")", "if", "annot", "[", "i", ",", "1", "]", "==", "''", ":", "annot", "[", "i", ",", "1", "]", "=", "'-1'", "ann_duration", "=", "self", ".", "_get_float", "(", "annot", "[", ":", ",", "1", "]", ")", "return", "ann_time", "/", "10000000", ",", "ann_duration", ",", "np", ".", "array", "(", "ann_text_out", ")" ]
Modified from IPython . terminal . embed . embed so I can mess with stack_depth
def embed2 ( * * kwargs ) : config = kwargs . get ( 'config' ) header = kwargs . pop ( 'header' , u'' ) stack_depth = kwargs . pop ( 'stack_depth' , 2 ) compile_flags = kwargs . pop ( 'compile_flags' , None ) import IPython from IPython . core . interactiveshell import InteractiveShell from IPython . terminal . embed import InteractiveShellEmbed if config is None : config = IPython . terminal . ipapp . load_default_config ( ) config . InteractiveShellEmbed = config . TerminalInteractiveShell kwargs [ 'config' ] = config #save ps1/ps2 if defined ps1 = None ps2 = None try : ps1 = sys . ps1 ps2 = sys . ps2 except AttributeError : pass #save previous instance saved_shell_instance = InteractiveShell . _instance if saved_shell_instance is not None : cls = type ( saved_shell_instance ) cls . clear_instance ( ) shell = InteractiveShellEmbed . instance ( * * kwargs ) shell ( header = header , stack_depth = stack_depth , compile_flags = compile_flags ) InteractiveShellEmbed . clear_instance ( ) #restore previous instance if saved_shell_instance is not None : cls = type ( saved_shell_instance ) cls . clear_instance ( ) for subclass in cls . _walk_mro ( ) : subclass . _instance = saved_shell_instance if ps1 is not None : sys . ps1 = ps1 sys . ps2 = ps2
8,907
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dbg.py#L559-L598
[ "def", "from_files", "(", "cls", ",", "files_to_sort", ",", "reader", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "satpy", ".", "readers", "import", "group_files", "file_groups", "=", "group_files", "(", "files_to_sort", ",", "reader", "=", "reader", ",", "*", "*", "kwargs", ")", "scenes", "=", "(", "Scene", "(", "filenames", "=", "fg", ")", "for", "fg", "in", "file_groups", ")", "return", "cls", "(", "scenes", ")" ]
Finds a local varable somewhere in the stack and returns the value
def search_stack_for_localvar ( varname ) : curr_frame = inspect . currentframe ( ) print ( ' * Searching parent frames for: ' + six . text_type ( varname ) ) frame_no = 0 while curr_frame . f_back is not None : if varname in curr_frame . f_locals . keys ( ) : print ( ' * Found in frame: ' + six . text_type ( frame_no ) ) return curr_frame . f_locals [ varname ] frame_no += 1 curr_frame = curr_frame . f_back print ( '... Found nothing in all ' + six . text_type ( frame_no ) + ' frames.' ) return None
8,908
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dbg.py#L684-L704
[ "def", "repair", "(", "self", ",", "volume_id_or_uri", ",", "timeout", "=", "-", "1", ")", ":", "data", "=", "{", "\"type\"", ":", "\"ExtraManagedStorageVolumePaths\"", ",", "\"resourceUri\"", ":", "self", ".", "_client", ".", "build_uri", "(", "volume_id_or_uri", ")", "}", "custom_headers", "=", "{", "'Accept-Language'", ":", "'en_US'", "}", "uri", "=", "self", ".", "URI", "+", "'/repair'", "return", "self", ".", "_client", ".", "create", "(", "data", ",", "uri", "=", "uri", ",", "timeout", "=", "timeout", ",", "custom_headers", "=", "custom_headers", ")" ]
r Formats an exception with relevant info
def formatex ( ex , msg = '[!?] Caught exception' , prefix = None , key_list = [ ] , locals_ = None , iswarning = False , tb = False , N = 0 , keys = None , colored = None ) : # Get error prefix and local info if prefix is None : prefix = get_caller_prefix ( aserror = True , N = N ) if locals_ is None : locals_ = get_parent_frame ( N = N ) . f_locals if keys is not None : # shorthand for key_list key_list = keys # build exception message errstr_list = [ ] # list of exception strings ex_tag = 'WARNING' if iswarning else 'EXCEPTION' errstr_list . append ( '<!!! %s !!!>' % ex_tag ) if tb or FORCE_TB : tbtext = traceback . format_exc ( ) if colored or COLORED_EXCEPTIONS : from utool import util_str tbtext = util_str . highlight_text ( tbtext , lexer_name = 'pytb' , stripall = True ) errstr_list . append ( tbtext ) errstr_list . append ( prefix + ' ' + six . text_type ( msg ) + '\n%r: %s' % ( type ( ex ) , six . text_type ( ex ) ) ) #errstr_list.append(prefix + ' ' + six.text_type(msg) + '\ntype(ex)=%r' % (type(ex),)) parse_locals_keylist ( locals_ , key_list , errstr_list , prefix ) errstr_list . append ( '</!!! %s !!!>' % ex_tag ) return '\n' . join ( errstr_list )
8,909
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dbg.py#L1090-L1170
[ "def", "round", "(", "self", ",", "decimals", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "def", "_dict_round", "(", "df", ",", "decimals", ")", ":", "for", "col", ",", "vals", "in", "df", ".", "iteritems", "(", ")", ":", "try", ":", "yield", "_series_round", "(", "vals", ",", "decimals", "[", "col", "]", ")", "except", "KeyError", ":", "yield", "vals", "def", "_series_round", "(", "s", ",", "decimals", ")", ":", "if", "is_integer_dtype", "(", "s", ")", "or", "is_float_dtype", "(", "s", ")", ":", "return", "s", ".", "round", "(", "decimals", ")", "return", "s", "nv", ".", "validate_round", "(", "args", ",", "kwargs", ")", "if", "isinstance", "(", "decimals", ",", "(", "dict", ",", "Series", ")", ")", ":", "if", "isinstance", "(", "decimals", ",", "Series", ")", ":", "if", "not", "decimals", ".", "index", ".", "is_unique", ":", "raise", "ValueError", "(", "\"Index of decimals must be unique\"", ")", "new_cols", "=", "[", "col", "for", "col", "in", "_dict_round", "(", "self", ",", "decimals", ")", "]", "elif", "is_integer", "(", "decimals", ")", ":", "# Dispatch to Series.round", "new_cols", "=", "[", "_series_round", "(", "v", ",", "decimals", ")", "for", "_", ",", "v", "in", "self", ".", "iteritems", "(", ")", "]", "else", ":", "raise", "TypeError", "(", "\"decimals must be an integer, a dict-like or a \"", "\"Series\"", ")", "if", "len", "(", "new_cols", ")", ">", "0", ":", "return", "self", ".", "_constructor", "(", "concat", "(", "new_cols", ",", "axis", "=", "1", ")", ",", "index", "=", "self", ".", "index", ",", "columns", "=", "self", ".", "columns", ")", "else", ":", "return", "self" ]
For each key in keylist puts its value in locals into a stringlist
def parse_locals_keylist ( locals_ , key_list , strlist_ = None , prefix = '' ) : from utool import util_str if strlist_ is None : strlist_ = [ ] for key in key_list : try : if key is None : strlist_ . append ( '' ) elif isinstance ( key , tuple ) : # Given a tuple of information tup = key func , key_ = tup val = get_varval_from_locals ( key_ , locals_ ) funcvalstr = six . text_type ( func ( val ) ) callname = util_str . get_callable_name ( func ) strlist_ . append ( '%s %s(%s) = %s' % ( prefix , callname , key_ , funcvalstr ) ) elif isinstance ( key , six . string_types ) : # Try to infer print from variable name val = get_varval_from_locals ( key , locals_ ) #valstr = util_str.truncate_str(repr(val), maxlen=200) valstr = util_str . truncate_str ( util_str . repr2 ( val ) , maxlen = 200 ) strlist_ . append ( '%s %s = %s' % ( prefix , key , valstr ) ) else : # Try to infer print from variable value val = key typestr = repr ( type ( val ) ) namestr = get_varname_from_locals ( val , locals_ ) #valstr = util_str.truncate_str(repr(val), maxlen=200) valstr = util_str . truncate_str ( util_str . repr2 ( val ) , maxlen = 200 ) strlist_ . append ( '%s %s %s = %s' % ( prefix , typestr , namestr , valstr ) ) except AssertionError as ex : strlist_ . append ( prefix + ' ' + six . text_type ( ex ) + ' (this likely due to a misformatted printex and is not related to the exception)' ) return strlist_
8,910
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dbg.py#L1265-L1331
[ "def", "permutation_entropy", "(", "x", ",", "n", ",", "tau", ")", ":", "PeSeq", "=", "[", "]", "Em", "=", "embed_seq", "(", "x", ",", "tau", ",", "n", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "Em", ")", ")", ":", "r", "=", "[", "]", "z", "=", "[", "]", "for", "j", "in", "range", "(", "0", ",", "len", "(", "Em", "[", "i", "]", ")", ")", ":", "z", ".", "append", "(", "Em", "[", "i", "]", "[", "j", "]", ")", "for", "j", "in", "range", "(", "0", ",", "len", "(", "Em", "[", "i", "]", ")", ")", ":", "z", ".", "sort", "(", ")", "r", ".", "append", "(", "z", ".", "index", "(", "Em", "[", "i", "]", "[", "j", "]", ")", ")", "z", "[", "z", ".", "index", "(", "Em", "[", "i", "]", "[", "j", "]", ")", "]", "=", "-", "1", "PeSeq", ".", "append", "(", "r", ")", "RankMat", "=", "[", "]", "while", "len", "(", "PeSeq", ")", ">", "0", ":", "RankMat", ".", "append", "(", "PeSeq", ".", "count", "(", "PeSeq", "[", "0", "]", ")", ")", "x", "=", "PeSeq", "[", "0", "]", "for", "j", "in", "range", "(", "0", ",", "PeSeq", ".", "count", "(", "PeSeq", "[", "0", "]", ")", ")", ":", "PeSeq", ".", "pop", "(", "PeSeq", ".", "index", "(", "x", ")", ")", "RankMat", "=", "numpy", ".", "array", "(", "RankMat", ")", "RankMat", "=", "numpy", ".", "true_divide", "(", "RankMat", ",", "RankMat", ".", "sum", "(", ")", ")", "EntropyMat", "=", "numpy", ".", "multiply", "(", "numpy", ".", "log2", "(", "RankMat", ")", ",", "RankMat", ")", "PE", "=", "-", "1", "*", "EntropyMat", ".", "sum", "(", ")", "return", "PE" ]
Determine the RDY value and set it . It can either be a static value a callback or None . If it s None we ll calculate the value based on our limits and connection counts .
def __send_rdy ( self , connection , command ) : if self . __consumer . original_rdy is None : node_count = self . __consumer . get_node_count_for_topic ( connection . context . topic ) self . __logger_rdy . debug ( "Calculating RDY: max_in_flight=(%d) " "node_count=(%d)" , self . __consumer . max_in_flight , node_count ) if self . __consumer . max_in_flight >= node_count : # Calculate the RDY based on the max_in_flight and total number # of servers. We always round up, or else we'd run the risk of # not facilitating some servers. rdy_this = int ( math . ceil ( float ( self . __consumer . max_in_flight ) / float ( node_count ) ) ) self . __logger_rdy . debug ( "Assigning RDY based on max_in_flight " "(%d) and node count (%d) (optimal): " "(%d)" , self . __consumer . max_in_flight , node_count , rdy_this ) else : # We have two possible scenarios: # (1) The client is starting up, and the total RDY count is # already accounted for. # (2) The client is already started, and another connection has # a (0) RDY count. # # In the case of (1), we'll take an RDY of (0). In the case of # (2) We'll send an RDY of (1) on their behalf, before we # assume a (0) for ourself. # Look for existing connections that have a (0) RDY (which # would've only been set to (0) intentionally). self . __logger_rdy . debug ( "(max_in_flight > nodes). Doing RDY " "election." ) sleeping_connections = [ c for ( c , info ) in self . __consumer . connection_context . items ( ) if info [ 'rdy_count' ] == 0 ] self . __logger_rdy . debug ( "Current sleeping_connections: %s" , sleeping_connections ) if sleeping_connections : elected_connection = random . choice ( sleeping_connections ) self . __logger_rdy . debug ( "Sending RDY of (1) on: [%s]" , elected_connection ) command_elected = nsq . command . Command ( elected_connection ) command_elected . rdy ( 1 ) else : self . __logger . debug ( "No sleeping connections. We got the " "short stick: [%s]" , connection ) rdy_this = 0 else : try : rdy_this = self . __consumer . original_rdy ( connection . node , self . __consumer . connection_count , self . __consumer ) self . __logger_rdy . debug ( "Using RDY from callback: (%d)" , rdy_this ) except TypeError : rdy_this = self . __consumer . original_rdy self . __logger_rdy . debug ( "Using static RDY: (%d)" , rdy_this ) # Make sure that the aggregate set of RDY counts doesn't exceed the # max. This constrains the previous value, above. rdy_this = min ( rdy_this + self . __get_total_rdy_count ( ) , self . __consumer . max_in_flight ) # Make sure we don't exceed the maximum specified by the server. This # only works because we're running greenlets, not threads. At any given # time, only one greenlet is running, and we can make sure to # distribute the remainder of (max_in_flight / nodes) across a subset # of the nodes (they don't all have to have an even slice of # max_in_flight). server_features = self . __consumer . identify . server_features max_rdy_count = server_features [ 'max_rdy_count' ] rdy_this = min ( max_rdy_count , rdy_this ) self . __logger_rdy . debug ( "Final RDY (max_in_flight=(%d) " "max_rdy_count=(%d)): (%d)" , self . __consumer . max_in_flight , max_rdy_count , rdy_this ) if rdy_this > 0 : command . rdy ( rdy_this ) else : self . __logger_rdy . info ( "This connection will go to sleep (not " "enough RDY to go around)." ) return rdy_this
8,911
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/consumer.py#L41-L153
[ "def", "concatenate_json", "(", "source_folder", ",", "destination_file", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "source_folder", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "'*.json'", ")", ":", "matches", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "with", "open", "(", "destination_file", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"[\\n\"", ")", "for", "m", "in", "matches", "[", ":", "-", "1", "]", ":", "f", ".", "write", "(", "open", "(", "m", ",", "\"rb\"", ")", ".", "read", "(", ")", ")", "f", ".", "write", "(", "\",\\n\"", ")", "f", ".", "write", "(", "open", "(", "matches", "[", "-", "1", "]", ",", "\"rb\"", ")", ".", "read", "(", ")", ")", "f", ".", "write", "(", "\"\\n]\"", ")" ]
Returns a dict map with old to new header fields
def switch_psm_to_peptable_fields ( oldheader ) : return { old : new for old , new in zip ( [ mzidtsvdata . HEADER_PEPTIDE , mzidtsvdata . HEADER_PROTEIN , mzidtsvdata . HEADER_PEPTIDE_Q , mzidtsvdata . HEADER_PEPTIDE_PEP ] , [ peptabledata . HEADER_PEPTIDE , peptabledata . HEADER_PROTEINS , peptabledata . HEADER_QVAL , peptabledata . HEADER_PEP ] ) }
8,912
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/headers/peptable.py#L11-L20
[ "def", "a_capture_show_configuration_failed", "(", "ctx", ")", ":", "result", "=", "ctx", ".", "device", ".", "send", "(", "\"show configuration failed\"", ")", "ctx", ".", "device", ".", "last_command_result", "=", "result", "index", "=", "result", ".", "find", "(", "\"SEMANTIC ERRORS\"", ")", "ctx", ".", "device", ".", "chain", ".", "connection", ".", "emit_message", "(", "result", ",", "log_level", "=", "logging", ".", "ERROR", ")", "if", "index", ">", "0", ":", "raise", "ConfigurationSemanticErrors", "(", "result", ")", "else", ":", "raise", "ConfigurationErrors", "(", "result", ")" ]
Adds the argument instruction in the list of instructions of this basic block .
def add_instruction ( self , instr ) : assert ( isinstance ( instr , Instruction ) ) self . instruction_list . append ( instr ) if instr . lhs not in self . defined_variables : if isinstance ( instr . lhs , Variable ) : self . defined_variables . append ( instr . lhs ) if isinstance ( instr , EqInstruction ) : if isinstance ( instr . rhs , Variable ) : if instr . rhs not in self . used_variables : self . used_variables . append ( instr . rhs ) else : if isinstance ( instr . rhs_1 , Variable ) : if instr . rhs_1 not in self . used_variables : self . used_variables . append ( instr . rhs_1 ) if isinstance ( instr . rhs_2 , Variable ) : if instr . rhs_2 not in self . used_variables : self . used_variables . append ( instr . rhs_2 )
8,913
https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/cfg.py#L169-L190
[ "def", "normals", "(", "vertices", ",", "indices", ")", ":", "# Compact similar vertices", "vertices", ",", "indices", ",", "mapping", "=", "compact", "(", "vertices", ",", "indices", ")", "T", "=", "vertices", "[", "indices", "]", "N", "=", "np", ".", "cross", "(", "T", "[", ":", ",", "1", "]", "-", "T", "[", ":", ",", "0", "]", ",", "T", "[", ":", ",", "2", "]", "-", "T", "[", ":", ",", "0", "]", ")", "L", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "N", "*", "N", ",", "axis", "=", "1", ")", ")", "L", "[", "L", "==", "0", "]", "=", "1.0", "# prevent divide-by-zero", "N", "/=", "L", "[", ":", ",", "np", ".", "newaxis", "]", "normals", "=", "np", ".", "zeros_like", "(", "vertices", ")", "normals", "[", "indices", "[", ":", ",", "0", "]", "]", "+=", "N", "normals", "[", "indices", "[", ":", ",", "1", "]", "]", "+=", "N", "normals", "[", "indices", "[", ":", ",", "2", "]", "]", "+=", "N", "L", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "normals", "*", "normals", ",", "axis", "=", "1", ")", ")", "L", "[", "L", "==", "0", "]", "=", "1.0", "normals", "/=", "L", "[", ":", ",", "np", ".", "newaxis", "]", "return", "normals", "[", "mapping", "]" ]
Defines the condition which decides how the basic block exits
def set_condition ( self , condition , condition_instr = None ) : assert ( isinstance ( condition , Numeric ) ) if condition_instr is not None : assert ( isinstance ( condition_instr , CmpInstruction ) ) self . condition = condition self . condition_instr = condition_instr if condition_instr is not None : if condition_instr . lhs not in self . defined_variables : if isinstance ( condition_instr . lhs , Variable ) : self . defined_variables . append ( condition_instr . lhs ) if isinstance ( condition_instr . rhs_1 , Variable ) : if condition_instr . rhs_1 not in self . used_variables : self . used_variables . append ( condition_instr . rhs_1 ) if isinstance ( condition_instr . rhs_2 , Variable ) : if condition_instr . rhs_2 not in self . used_variables : self . used_variables . append ( condition_instr . rhs_2 )
8,914
https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/cfg.py#L193-L219
[ "def", "getOverlayTransformTrackedDeviceComponent", "(", "self", ",", "ulOverlayHandle", ",", "pchComponentName", ",", "unComponentNameSize", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayTransformTrackedDeviceComponent", "punDeviceIndex", "=", "TrackedDeviceIndex_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "punDeviceIndex", ")", ",", "pchComponentName", ",", "unComponentNameSize", ")", "return", "result", ",", "punDeviceIndex" ]
Adds the given basic block in the function
def add_basic_block ( self , basic_block ) : assert ( isinstance ( basic_block , BasicBlock ) ) self . basic_block_list . append ( basic_block )
8,915
https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/cfg.py#L256-L259
[ "def", "merge_entities", "(", "self", ",", "from_entity_ids", ",", "to_entity_id", ",", "force", "=", "False", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'from_entity_ids'", ":", "from_entity_ids", ",", "'to_entity_id'", ":", "to_entity_id", ",", "'force'", ":", "force", ",", "}", "api_path", "=", "'/v1/{mount_point}/entity/merge'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
If a variable with the name var_name exists in this function s variable list \ then that variable object is returned ; else a new variable is created \ with the given name and added to the variable list of this function \ and returned back
def get_variable ( self , var_name ) : assert ( isinstance ( var_name , str ) ) if isinstance ( var_name , str ) : for var in self . variable_list : if var . name == var_name : return var new_var = Variable ( var_name ) self . variable_list . append ( new_var ) return new_var
8,916
https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/cfg.py#L268-L285
[ "def", "get_can_status_message", "(", "can_status", ")", ":", "status_msgs", "=", "{", "CanStatus", ".", "CANERR_TXMSGLOST", ":", "\"Transmit message lost\"", ",", "CanStatus", ".", "CANERR_MEMTEST", ":", "\"Memory test failed\"", ",", "CanStatus", ".", "CANERR_REGTEST", ":", "\"Register test failed\"", ",", "CanStatus", ".", "CANERR_QXMTFULL", ":", "\"Transmit queue is full\"", ",", "CanStatus", ".", "CANERR_QOVERRUN", ":", "\"Receive queue overrun\"", ",", "CanStatus", ".", "CANERR_QRCVEMPTY", ":", "\"Receive queue is empty\"", ",", "CanStatus", ".", "CANERR_BUSOFF", ":", "\"Bus Off\"", ",", "CanStatus", ".", "CANERR_BUSHEAVY", ":", "\"Error Passive\"", ",", "CanStatus", ".", "CANERR_BUSLIGHT", ":", "\"Warning Limit\"", ",", "CanStatus", ".", "CANERR_OVERRUN", ":", "\"Rx-buffer is full\"", ",", "CanStatus", ".", "CANERR_XMTFULL", ":", "\"Tx-buffer is full\"", ",", "}", "return", "\"OK\"", "if", "can_status", "==", "CanStatus", ".", "CANERR_OK", "else", "\", \"", ".", "join", "(", "msg", "for", "status", ",", "msg", "in", "status_msgs", ".", "items", "(", ")", "if", "can_status", "&", "status", ")" ]
Adds the argument variable as one of the input variable
def add_input_variable ( self , var ) : assert ( isinstance ( var , Variable ) ) self . input_variable_list . append ( var )
8,917
https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/cfg.py#L287-L290
[ "def", "adapt", "(", "obj", ",", "to_cls", ")", ":", "if", "obj", "is", "None", ":", "return", "obj", "elif", "isinstance", "(", "obj", ",", "to_cls", ")", ":", "return", "obj", "errors", "=", "[", "]", "if", "hasattr", "(", "obj", ",", "'__adapt__'", ")", "and", "obj", ".", "__adapt__", ":", "try", ":", "return", "obj", ".", "__adapt__", "(", "to_cls", ")", "except", "(", "AdaptError", ",", "TypeError", ")", "as", "e", ":", "ex_type", ",", "ex", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "errors", ".", "append", "(", "(", "obj", ".", "__adapt__", ",", "ex_type", ",", "ex", ",", "tb", ")", ")", "if", "hasattr", "(", "to_cls", ",", "'__adapt__'", ")", "and", "to_cls", ".", "__adapt__", ":", "try", ":", "return", "to_cls", ".", "__adapt__", "(", "obj", ")", "except", "(", "AdaptError", ",", "TypeError", ")", "as", "e", ":", "ex_type", ",", "ex", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "errors", ".", "append", "(", "(", "to_cls", ".", "__adapt__", ",", "ex_type", ",", "ex", ",", "tb", ")", ")", "for", "k", "in", "get_adapter_path", "(", "obj", ",", "to_cls", ")", ":", "if", "k", "in", "__adapters__", ":", "try", ":", "return", "__adapters__", "[", "k", "]", "(", "obj", ",", "to_cls", ")", "except", "(", "AdaptError", ",", "TypeError", ")", "as", "e", ":", "ex_type", ",", "ex", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "errors", ".", "append", "(", "(", "__adapters__", "[", "k", "]", ",", "ex_type", ",", "ex", ",", "tb", ")", ")", "break", "raise", "AdaptErrors", "(", "'Could not adapt %r to %r'", "%", "(", "obj", ",", "to_cls", ")", ",", "errors", "=", "errors", ")" ]
Adds the argument variable as one of the output variable
def add_output_variable ( self , var ) : assert ( isinstance ( var , Variable ) ) self . output_variable_list . append ( var )
8,918
https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/cfg.py#L292-L295
[ "def", "open", "(", "self", ",", "file_path", ")", ":", "if", "self", ".", "is_opened", "(", ")", "and", "self", ".", "workbook", ".", "file_path", "==", "file_path", ":", "self", ".", "_logger", ".", "logger", ".", "debug", "(", "\"workbook already opened: {}\"", ".", "format", "(", "self", ".", "workbook", ".", "file_path", ")", ")", "return", "self", ".", "close", "(", ")", "self", ".", "_open", "(", "file_path", ")" ]
Tokenizes the string stored in the parser object into a list of tokens .
def tokenize ( self ) : self . token_list = [ ] ps = self . parse_string . strip ( ) i = 0 last_token = None while i < len ( ps ) and ps [ i ] . isspace ( ) : i += 1 while i < len ( ps ) : token = '' if ps [ i ] . isalpha ( ) : while i < len ( ps ) and ( ps [ i ] . isalnum ( ) or ps [ i ] == '_' ) : token += ps [ i ] i += 1 elif ps [ i ] . isdigit ( ) : while i < len ( ps ) and ( ps [ i ] . isdigit ( ) or ps [ i ] == '.' or ps [ i ] == 'e' or ps [ i ] == 'E' or ( ps [ i ] == '+' and ( ps [ i - 1 ] == 'e' or ps [ i - 1 ] == 'E' ) ) or ( ps [ i ] == '-' and ( ps [ i - 1 ] == 'e' or ps [ i - 1 ] == 'E' ) ) ) : token += ps [ i ] i += 1 elif ps [ i ] == '.' : if ps [ i + 1 ] . isdigit ( ) : while i < len ( ps ) and ( ps [ i ] . isdigit ( ) or ps [ i ] == '.' ) : token += ps [ i ] i += 1 else : while i < len ( ps ) and ( ps [ i ] . isalpha ( ) or ps [ i ] == '.' ) : token += ps [ i ] i += 1 else : token += ps [ i ] i += 1 if token == '-' and ( last_token == None or last_token == '(' or self . is_op ( last_token ) ) : token = '~' self . token_list += [ token ] last_token = token while i < len ( ps ) and ps [ i ] . isspace ( ) : i += 1
8,919
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/expr.py#L263-L315
[ "def", "tfidf_weight", "(", "X", ")", ":", "X", "=", "coo_matrix", "(", "X", ")", "# calculate IDF", "N", "=", "float", "(", "X", ".", "shape", "[", "0", "]", ")", "idf", "=", "log", "(", "N", ")", "-", "log1p", "(", "bincount", "(", "X", ".", "col", ")", ")", "# apply TF-IDF adjustment", "X", ".", "data", "=", "sqrt", "(", "X", ".", "data", ")", "*", "idf", "[", "X", ".", "col", "]", "return", "X" ]
Tokenizes and parses an arithmetic expression into a parse tree .
def parse ( self ) : #print("Parsing: %s"%self.parse_string) self . tokenize ( ) if self . debug : print ( "Tokens found: %s" % self . token_list ) try : parse_tree = self . parse2 ( ) except Exception as e : raise e return parse_tree
8,920
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/expr.py#L478-L493
[ "def", "Connect", "(", "self", ")", ":", "device_path", "=", "self", ".", "path", "if", "device_path", "not", "in", "mockobject", ".", "objects", ":", "raise", "dbus", ".", "exceptions", ".", "DBusException", "(", "'No such device.'", ",", "name", "=", "'org.bluez.Error.NoSuchDevice'", ")", "device", "=", "mockobject", ".", "objects", "[", "device_path", "]", "device", ".", "props", "[", "AUDIO_IFACE", "]", "[", "'State'", "]", "=", "dbus", ".", "String", "(", "\"connected\"", ",", "variant_level", "=", "1", ")", "device", ".", "EmitSignal", "(", "AUDIO_IFACE", ",", "'PropertyChanged'", ",", "'sv'", ",", "[", "'State'", ",", "dbus", ".", "String", "(", "\"connected\"", ",", "variant_level", "=", "1", ")", ",", "]", ")", "device", ".", "props", "[", "DEVICE_IFACE", "]", "[", "'Connected'", "]", "=", "dbus", ".", "Boolean", "(", "True", ",", "variant_level", "=", "1", ")", "device", ".", "EmitSignal", "(", "DEVICE_IFACE", ",", "'PropertyChanged'", ",", "'sv'", ",", "[", "'Connected'", ",", "dbus", ".", "Boolean", "(", "True", ",", "variant_level", "=", "1", ")", ",", "]", ")" ]
Insert keys into a table which assigns an ID
def insert_keys ( self , keys ) : start = 0 bulk_insert = self . bulk_insert keys_len = len ( keys ) query = 'INSERT IGNORE INTO gauged_keys (namespace, `key`) VALUES ' execute = self . cursor . execute while start < keys_len : rows = keys [ start : start + bulk_insert ] params = [ param for params in rows for param in params ] insert = '(%s,%s),' * ( len ( rows ) - 1 ) + '(%s,%s)' execute ( query + insert , params ) start += bulk_insert
8,921
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/mysql.py#L81-L93
[ "def", "run", "(", "self", ")", ":", "self", ".", "run_plugins", "(", ")", "while", "True", ":", "# Reload plugins and config if either the config file or plugin", "# directory are modified.", "if", "self", ".", "_config_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_config_file_path", ")", "or", "self", ".", "_plugin_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_plugin_path", ")", ":", "self", ".", "thread_manager", ".", "kill_all_threads", "(", ")", "self", ".", "output_dict", ".", "clear", "(", ")", "self", ".", "reload", "(", ")", "self", ".", "run_plugins", "(", ")", "self", ".", "output_to_bar", "(", "json", ".", "dumps", "(", "self", ".", "_remove_empty_output", "(", ")", ")", ")", "time", ".", "sleep", "(", "self", ".", "config", ".", "general", "[", "'interval'", "]", ")" ]
Get the current writer position
def get_writer_position ( self , name ) : cursor = self . cursor cursor . execute ( 'SELECT timestamp FROM gauged_writer_history ' 'WHERE id = %s' , ( name , ) ) result = cursor . fetchone ( ) return result [ 0 ] if result else 0
8,922
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/mysql.py#L174-L180
[ "def", "setGroups", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "requests", "=", "0", "groups", "=", "[", "]", "try", ":", "for", "gk", "in", "self", "[", "'groupKeys'", "]", ":", "try", ":", "g", "=", "self", ".", "mambugroupclass", "(", "entid", "=", "gk", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambugroup", "import", "MambuGroup", "self", ".", "mambugroupclass", "=", "MambuGroup", "g", "=", "self", ".", "mambugroupclass", "(", "entid", "=", "gk", ",", "*", "args", ",", "*", "*", "kwargs", ")", "requests", "+=", "1", "groups", ".", "append", "(", "g", ")", "except", "KeyError", ":", "pass", "self", "[", "'groups'", "]", "=", "groups", "return", "requests" ]
Get a list of namespaces
def get_namespaces ( self ) : cursor = self . cursor cursor . execute ( 'SELECT DISTINCT namespace FROM gauged_statistics' ) return [ namespace for namespace , in cursor ]
8,923
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/mysql.py#L182-L186
[ "def", "malware", "(", "self", ",", "malware", ",", "password", ",", "file_name", ")", ":", "if", "not", "self", ".", "can_update", "(", ")", ":", "self", ".", "_tcex", ".", "handle_error", "(", "910", ",", "[", "self", ".", "type", "]", ")", "self", ".", "_data", "[", "'malware'", "]", "=", "malware", "self", ".", "_data", "[", "'password'", "]", "=", "password", "self", ".", "_data", "[", "'fileName'", "]", "=", "file_name", "request", "=", "{", "'malware'", ":", "malware", ",", "'password'", ":", "password", ",", "'fileName'", ":", "file_name", "}", "return", "self", ".", "tc_requests", ".", "update", "(", "self", ".", "api_type", ",", "self", ".", "api_sub_type", ",", "self", ".", "unique_id", ",", "request", ")" ]
Remove all data associated with the current namespace
def remove_namespace ( self , namespace ) : params = ( namespace , ) execute = self . cursor . execute execute ( 'DELETE FROM gauged_data WHERE namespace = %s' , params ) execute ( 'DELETE FROM gauged_statistics WHERE namespace = %s' , params ) execute ( 'DELETE FROM gauged_keys WHERE namespace = %s' , params ) self . remove_cache ( namespace )
8,924
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/mysql.py#L188-L195
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
Remove all cached values for the specified namespace optionally specifying a key
def remove_cache ( self , namespace , key = None ) : if key is None : self . cursor . execute ( 'DELETE FROM gauged_cache ' 'WHERE namespace = %s' , ( namespace , ) ) else : self . cursor . execute ( 'DELETE FROM gauged_cache ' 'WHERE namespace = %s and `key` = %s' , ( namespace , key ) )
8,925
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/mysql.py#L274-L283
[ "def", "file_is_seekable", "(", "f", ")", ":", "try", ":", "f", ".", "tell", "(", ")", "logger", ".", "info", "(", "\"File is seekable!\"", ")", "except", "IOError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ESPIPE", ":", "return", "False", "else", ":", "raise", "return", "True" ]
Clear all gauged data
def clear_schema ( self ) : execute = self . cursor . execute execute ( 'TRUNCATE TABLE gauged_data' ) execute ( 'TRUNCATE TABLE gauged_keys' ) execute ( 'TRUNCATE TABLE gauged_writer_history' ) execute ( 'TRUNCATE TABLE gauged_cache' ) execute ( 'TRUNCATE TABLE gauged_statistics' ) self . db . commit ( )
8,926
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/mysql.py#L355-L363
[ "async", "def", "close_async", "(", "self", ")", ":", "if", "self", ".", "message_handler", ":", "await", "self", ".", "message_handler", ".", "destroy_async", "(", ")", "self", ".", "message_handler", "=", "None", "self", ".", "_shutdown", "=", "True", "if", "self", ".", "_keep_alive_thread", ":", "await", "self", ".", "_keep_alive_thread", "self", ".", "_keep_alive_thread", "=", "None", "if", "not", "self", ".", "_session", ":", "return", "# already closed.", "if", "not", "self", ".", "_connection", ".", "cbs", ":", "_logger", ".", "info", "(", "\"Closing non-CBS session.\"", ")", "await", "asyncio", ".", "shield", "(", "self", ".", "_session", ".", "destroy_async", "(", ")", ")", "else", ":", "_logger", ".", "info", "(", "\"CBS session pending %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "self", ".", "_session", "=", "None", "if", "not", "self", ".", "_ext_connection", ":", "_logger", ".", "info", "(", "\"Closing exclusive connection %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "await", "asyncio", ".", "shield", "(", "self", ".", "_connection", ".", "destroy_async", "(", ")", ")", "else", ":", "_logger", ".", "info", "(", "\"Shared connection remaining open.\"", ")", "self", ".", "_connection", "=", "None" ]
returns a 32 bit unsigned integer quantum random number
def quantum_random ( ) : import quantumrandom data16 = quantumrandom . uint16 ( array_length = 2 ) assert data16 . flags [ 'C_CONTIGUOUS' ] data32 = data16 . view ( np . dtype ( 'uint32' ) ) [ 0 ] return data32
8,927
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L19-L25
[ "def", "disengage", "(", "self", ")", ":", "if", "self", ".", "_driver", "and", "self", ".", "_driver", ".", "is_connected", "(", ")", ":", "self", ".", "_driver", ".", "home", "(", ")", "self", ".", "_engaged", "=", "False" ]
Convert state of a NumPy RandomState object to a state that can be used by Python s Random .
def _npstate_to_pystate ( npstate ) : PY_VERSION = 3 version , keys , pos , has_gauss , cached_gaussian_ = npstate keys_pos = tuple ( map ( int , keys ) ) + ( int ( pos ) , ) cached_gaussian_ = cached_gaussian_ if has_gauss else None pystate = ( PY_VERSION , keys_pos , cached_gaussian_ ) return pystate
8,928
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L28-L52
[ "def", "kill_workflow", "(", "self", ")", ":", "logger", ".", "info", "(", "'kill workflow of experiment \"%s\"'", ",", "self", ".", "experiment_name", ")", "content", "=", "dict", "(", ")", "url", "=", "self", ".", "_build_api_url", "(", "'/experiments/{experiment_id}/workflow/kill'", ".", "format", "(", "experiment_id", "=", "self", ".", "_experiment_id", ")", ")", "res", "=", "self", ".", "_session", ".", "post", "(", "url", ")", "res", ".", "raise_for_status", "(", ")" ]
Convert state of a Python Random object to state usable by NumPy RandomState .
def _pystate_to_npstate ( pystate ) : NP_VERSION = 'MT19937' version , keys_pos_ , cached_gaussian_ = pystate keys , pos = keys_pos_ [ : - 1 ] , keys_pos_ [ - 1 ] keys = np . array ( keys , dtype = np . uint32 ) has_gauss = cached_gaussian_ is not None cached_gaussian = cached_gaussian_ if has_gauss else 0.0 npstate = ( NP_VERSION , keys , pos , has_gauss , cached_gaussian ) return npstate
8,929
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L55-L81
[ "def", "kill_workflow", "(", "self", ")", ":", "logger", ".", "info", "(", "'kill workflow of experiment \"%s\"'", ",", "self", ".", "experiment_name", ")", "content", "=", "dict", "(", ")", "url", "=", "self", ".", "_build_api_url", "(", "'/experiments/{experiment_id}/workflow/kill'", ".", "format", "(", "experiment_id", "=", "self", ".", "_experiment_id", ")", ")", "res", "=", "self", ".", "_session", ".", "post", "(", "url", ")", "res", ".", "raise_for_status", "(", ")" ]
Returns a random number generator
def ensure_rng ( rng , impl = 'numpy' ) : if impl == 'numpy' : if rng is None : rng = np . random elif isinstance ( rng , int ) : rng = np . random . RandomState ( seed = rng ) elif isinstance ( rng , random . Random ) : # Convert python to numpy random state py_rng = rng pystate = py_rng . getstate ( ) npstate = _pystate_to_npstate ( pystate ) rng = np_rng = np . random . RandomState ( seed = 0 ) np_rng . set_state ( npstate ) elif impl == 'python' : if rng is None : rng = random elif isinstance ( rng , int ) : rng = random . Random ( rng ) elif isinstance ( rng , np . random . RandomState ) : # Convert numpy to python random state np_rng = rng npstate = np_rng . get_state ( ) pystate = _npstate_to_pystate ( npstate ) rng = py_rng = random . Random ( 0 ) py_rng . setstate ( pystate ) else : raise KeyError ( 'unknown rng impl={}' . format ( impl ) ) return rng
8,930
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L84-L139
[ "def", "getOverlayTransformTrackedDeviceComponent", "(", "self", ",", "ulOverlayHandle", ",", "pchComponentName", ",", "unComponentNameSize", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayTransformTrackedDeviceComponent", "punDeviceIndex", "=", "TrackedDeviceIndex_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "punDeviceIndex", ")", ",", "pchComponentName", ",", "unComponentNameSize", ")", "return", "result", ",", "punDeviceIndex" ]
random unrepeated indicies
def random_indexes ( max_index , subset_size = None , seed = None , rng = None ) : subst_ = np . arange ( 0 , max_index ) rng = ensure_rng ( seed if rng is None else rng ) rng . shuffle ( subst_ ) if subset_size is None : subst = subst_ else : subst = subst_ [ 0 : min ( subset_size , max_index ) ] return subst
8,931
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L142-L175
[ "def", "latitude", "(", "self", ",", "value", "=", "0.0", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `latitude`'", ".", "format", "(", "value", ")", ")", "if", "value", "<", "-", "90.0", ":", "raise", "ValueError", "(", "'value need to be greater or equal -90.0 '", "'for field `latitude`'", ")", "if", "value", ">", "90.0", ":", "raise", "ValueError", "(", "'value need to be smaller 90.0 '", "'for field `latitude`'", ")", "self", ".", "_latitude", "=", "value" ]
Returns n evenly spaced indexes . Returns as many as possible if trunc is true
def spaced_indexes ( len_ , n , trunc = False ) : if n is None : return np . arange ( len_ ) all_indexes = np . arange ( len_ ) if trunc : n = min ( len_ , n ) if n == 0 : return np . empty ( 0 ) stride = len_ // n try : indexes = all_indexes [ 0 : - 1 : stride ] except ValueError : raise ValueError ( 'cannot slice list of len_=%r into n=%r parts' % ( len_ , n ) ) return indexes
8,932
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L201-L219
[ "def", "vb_clone_vm", "(", "name", "=", "None", ",", "clone_from", "=", "None", ",", "clone_mode", "=", "0", ",", "timeout", "=", "10000", ",", "*", "*", "kwargs", ")", ":", "vbox", "=", "vb_get_box", "(", ")", "log", ".", "info", "(", "'Clone virtualbox machine %s from %s'", ",", "name", ",", "clone_from", ")", "source_machine", "=", "vbox", ".", "findMachine", "(", "clone_from", ")", "groups", "=", "None", "os_type_id", "=", "'Other'", "new_machine", "=", "vbox", ".", "createMachine", "(", "None", ",", "# Settings file", "name", ",", "groups", ",", "os_type_id", ",", "None", "# flags", ")", "progress", "=", "source_machine", ".", "cloneTo", "(", "new_machine", ",", "clone_mode", ",", "# CloneMode", "None", "# CloneOptions : None = Full?", ")", "progress", ".", "waitForCompletion", "(", "timeout", ")", "log", ".", "info", "(", "'Finished cloning %s from %s'", ",", "name", ",", "clone_from", ")", "vbox", ".", "registerMachine", "(", "new_machine", ")", "return", "vb_xpcom_to_attribute_dict", "(", "new_machine", ",", "'IMachine'", ")" ]
Grabs data randomly
def random_sample ( list_ , nSample , strict = False , rng = None , seed = None ) : rng = ensure_rng ( seed if rng is None else rng ) if isinstance ( list_ , list ) : list2_ = list_ [ : ] else : list2_ = np . copy ( list_ ) if len ( list2_ ) == 0 and not strict : return list2_ rng . shuffle ( list2_ ) if nSample is None and strict is False : return list2_ if not strict : nSample = min ( max ( 0 , nSample ) , len ( list2_ ) ) sample_list = list2_ [ : nSample ] return sample_list
8,933
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L323-L365
[ "def", "notebooks_in_git_index", "(", "fmt", ")", ":", "git_status", "=", "system", "(", "'git'", ",", "'status'", ",", "'--porcelain'", ")", "re_modified", "=", "re", ".", "compile", "(", "r'^[AM]+\\s+(?P<name>.*)'", ",", "re", ".", "MULTILINE", ")", "modified_files_in_git_index", "=", "re_modified", ".", "findall", "(", "git_status", ")", "files", "=", "[", "]", "for", "nb_file", "in", "modified_files_in_git_index", ":", "if", "nb_file", ".", "startswith", "(", "'\"'", ")", "and", "nb_file", ".", "endswith", "(", "'\"'", ")", ":", "nb_file", "=", "nb_file", "[", "1", ":", "-", "1", "]", "try", ":", "base_path", "(", "nb_file", ",", "fmt", ")", "files", ".", "append", "(", "nb_file", ")", "except", "InconsistentPath", ":", "continue", "return", "files" ]
Grabs data randomly but in a repeatable way
def deterministic_sample ( list_ , nSample , seed = 0 , rng = None , strict = False ) : rng = ensure_rng ( seed if rng is None else rng ) sample_list = random_sample ( list_ , nSample , strict = strict , rng = rng ) return sample_list
8,934
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L368-L372
[ "def", "which", "(", "exe_name", ")", ":", "def", "is_exe", "(", "file_path_name", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "file_path_name", ")", "and", "os", ".", "access", "(", "file_path_name", ",", "os", ".", "X_OK", ")", "is_platform_windows", "=", "(", "platform", ".", "system", "(", ")", "==", "'Windows'", ")", "fpath", ",", "_fname", "=", "os", ".", "path", ".", "split", "(", "exe_name", ")", "if", "fpath", ":", "if", "is_exe", "(", "exe_name", ")", ":", "return", "exe_name", "else", ":", "for", "path", "in", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "exe_file_path_name", "=", "os", ".", "path", ".", "join", "(", "path", ",", "exe_name", ")", "if", "is_exe", "(", "exe_file_path_name", ")", ":", "return", "exe_file_path_name", "if", "is_platform_windows", ":", "windows_exe_file_path_name", "=", "'%s.exe'", "%", "exe_file_path_name", "if", "is_exe", "(", "windows_exe_file_path_name", ")", ":", "return", "windows_exe_file_path_name", "windows_com_file_path_name", "=", "'%s.exe'", "%", "exe_file_path_name", "if", "is_exe", "(", "windows_com_file_path_name", ")", ":", "return", "windows_com_file_path_name", "return", "None" ]
Returns n evenly spaced items
def spaced_items ( list_ , n , * * kwargs ) : indexes = spaced_indexes ( len ( list_ ) , n , * * kwargs ) items = list_ [ indexes ] return items
8,935
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L375-L379
[ "def", "ParseFileEntry", "(", "self", ",", "parser_mediator", ",", "file_entry", ")", ":", "index_file_parser", "=", "ChromeCacheIndexFileParser", "(", ")", "file_object", "=", "file_entry", ".", "GetFileObject", "(", ")", "try", ":", "index_file_parser", ".", "ParseFileObject", "(", "parser_mediator", ",", "file_object", ")", "except", "(", "IOError", ",", "errors", ".", "ParseError", ")", "as", "exception", ":", "file_object", ".", "close", "(", ")", "display_name", "=", "parser_mediator", ".", "GetDisplayName", "(", ")", "raise", "errors", ".", "UnableToParseFile", "(", "'[{0:s}] unable to parse index file {1:s} with error: {2!s}'", ".", "format", "(", "self", ".", "NAME", ",", "display_name", ",", "exception", ")", ")", "# TODO: create event based on index file creation time.", "try", ":", "file_system", "=", "file_entry", ".", "GetFileSystem", "(", ")", "self", ".", "_ParseIndexTable", "(", "parser_mediator", ",", "file_system", ",", "file_entry", ",", "index_file_parser", ".", "index_table", ")", "finally", ":", "file_object", ".", "close", "(", ")" ]
We re assuming that the static list of servers can serve the given topic since we have to preexisting knowledge about them .
def get_servers ( self , topic ) : return ( nsq . node . ServerNode ( sh ) for sh in self . __server_hosts )
8,936
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/node_collection.py#L14-L19
[ "def", "setOverlayTextureColorSpace", "(", "self", ",", "ulOverlayHandle", ",", "eTextureColorSpace", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTextureColorSpace", "result", "=", "fn", "(", "ulOverlayHandle", ",", "eTextureColorSpace", ")", "return", "result" ]
A lexical analyzer for the CTfile formatted files .
def tokenizer ( text ) : for entry in text . split ( '$$$$\n' ) : if entry . rstrip ( ) : lines_stream = deque ( entry . split ( '\n' ) ) else : continue # yield from _molfile(stream=lines_stream) for token in _molfile ( stream = lines_stream ) : yield token if len ( lines_stream ) : # yield from _sdfile(stream=lines_stream) for token in _sdfile ( stream = lines_stream ) : yield token yield EndOfFile ( )
8,937
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/tokenizer.py#L48-L70
[ "async", "def", "set_room_temperatures_by_name", "(", "self", ",", "room_name", ",", "sleep_temp", "=", "None", ",", "comfort_temp", "=", "None", ",", "away_temp", "=", "None", ")", ":", "if", "sleep_temp", "is", "None", "and", "comfort_temp", "is", "None", "and", "away_temp", "is", "None", ":", "return", "for", "room_id", ",", "_room", "in", "self", ".", "rooms", ".", "items", "(", ")", ":", "if", "_room", ".", "name", "==", "room_name", ":", "await", "self", ".", "set_room_temperatures", "(", "room_id", ",", "sleep_temp", ",", "comfort_temp", ",", "away_temp", ")", "return", "_LOGGER", ".", "error", "(", "\"Could not find a room with name %s\"", ",", "room_name", ")" ]
Process atom and bond blocks of Ctab .
def _ctab_atom_bond_block ( number_of_lines , block_type , stream ) : for _ in range ( int ( number_of_lines ) ) : line = stream . popleft ( ) yield block_type ( * line . split ( ) )
8,938
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/tokenizer.py#L134-L147
[ "def", "update_thumbnail", "(", "api_key", ",", "api_secret", ",", "video_key", ",", "position", "=", "7.0", ",", "*", "*", "kwargs", ")", ":", "jwplatform_client", "=", "jwplatform", ".", "Client", "(", "api_key", ",", "api_secret", ")", "logging", ".", "info", "(", "\"Updating video thumbnail.\"", ")", "try", ":", "response", "=", "jwplatform_client", ".", "videos", ".", "thumbnails", ".", "update", "(", "video_key", "=", "video_key", ",", "position", "=", "position", ",", "# Parameter which specifies seconds into video to extract thumbnail from.", "*", "*", "kwargs", ")", "except", "jwplatform", ".", "errors", ".", "JWPlatformError", "as", "e", ":", "logging", ".", "error", "(", "\"Encountered an error updating thumbnail.\\n{}\"", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "e", ".", "message", ")", "return", "response" ]
Process properties block of Ctab .
def _ctab_property_block ( stream ) : line = stream . popleft ( ) while line != 'M END' : name = line . split ( ) [ 1 ] yield CtabPropertiesBlockLine ( name , line ) line = stream . popleft ( )
8,939
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/tokenizer.py#L150-L162
[ "def", "on_websocket_message", "(", "message", ":", "str", ")", "->", "None", ":", "msgs", "=", "json", ".", "loads", "(", "message", ")", "for", "msg", "in", "msgs", ":", "if", "not", "isinstance", "(", "msg", ",", "dict", ")", ":", "logger", ".", "error", "(", "'Invalid WS message format: {}'", ".", "format", "(", "message", ")", ")", "continue", "_type", "=", "msg", ".", "get", "(", "'type'", ")", "if", "_type", "==", "'log'", ":", "log_handler", "(", "msg", "[", "'level'", "]", ",", "msg", "[", "'message'", "]", ")", "elif", "_type", "==", "'event'", ":", "event_handler", "(", "msg", "[", "'event'", "]", ")", "elif", "_type", "==", "'response'", ":", "response_handler", "(", "msg", ")", "else", ":", "raise", "ValueError", "(", "'Unkown message type: {}'", ".", "format", "(", "message", ")", ")" ]
Creates scorefiles for qvality s target and decoy distributions
def set_features ( self ) : self . scores = { } for t_or_d , feats in zip ( [ 'target' , 'decoy' ] , [ self . target , self . decoy ] ) : self . scores [ t_or_d ] = { } self . scores [ t_or_d ] [ 'scores' ] = self . score_get_fun ( feats , self . featuretype , self . prepare_percolator_output ) self . scores [ t_or_d ] [ 'fn' ] = '{}_qvality_input.txt' . format ( t_or_d ) writers . write_qvality_input ( self . scores [ t_or_d ] [ 'scores' ] , self . scores [ t_or_d ] [ 'fn' ] )
8,940
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/pycolator/qvality.py#L41-L51
[ "def", "pop_no_diff_fields", "(", "latest_config", ",", "current_config", ")", ":", "for", "field", "in", "[", "'userIdentity'", ",", "'principalId'", ",", "'userAgent'", ",", "'sourceIpAddress'", ",", "'requestParameters'", ",", "'eventName'", "]", ":", "latest_config", ".", "pop", "(", "field", ",", "None", ")", "current_config", ".", "pop", "(", "field", ",", "None", ")" ]
This actually runs the qvality program from PATH .
def write ( self ) : outfn = self . create_outfilepath ( self . fn , self . outsuffix ) command = [ 'qvality' ] command . extend ( self . qvalityoptions ) command . extend ( [ self . scores [ 'target' ] [ 'fn' ] , self . scores [ 'decoy' ] [ 'fn' ] , '-o' , outfn ] ) subprocess . call ( command )
8,941
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/pycolator/qvality.py#L53-L60
[ "def", "detect_column_renamings", "(", "self", ",", "table_differences", ")", ":", "rename_candidates", "=", "{", "}", "for", "added_column_name", ",", "added_column", "in", "table_differences", ".", "added_columns", ".", "items", "(", ")", ":", "for", "removed_column", "in", "table_differences", ".", "removed_columns", ".", "values", "(", ")", ":", "if", "len", "(", "self", ".", "diff_column", "(", "added_column", ",", "removed_column", ")", ")", "==", "0", ":", "if", "added_column", ".", "get_name", "(", ")", "not", "in", "rename_candidates", ":", "rename_candidates", "[", "added_column", ".", "get_name", "(", ")", "]", "=", "[", "]", "rename_candidates", "[", "added_column", ".", "get_name", "(", ")", "]", "=", "(", "removed_column", ",", "added_column", ",", "added_column_name", ")", "for", "candidate_columns", "in", "rename_candidates", ".", "values", "(", ")", ":", "if", "len", "(", "candidate_columns", ")", "==", "1", ":", "removed_column", ",", "added_column", ",", "_", "=", "candidate_columns", "[", "0", "]", "removed_column_name", "=", "removed_column", ".", "get_name", "(", ")", ".", "lower", "(", ")", "added_column_name", "=", "added_column", ".", "get_name", "(", ")", ".", "lower", "(", ")", "if", "removed_column_name", "not", "in", "table_differences", ".", "renamed_columns", ":", "table_differences", ".", "renamed_columns", "[", "removed_column_name", "]", "=", "added_column", "del", "table_differences", ".", "added_columns", "[", "added_column_name", "]", "del", "table_differences", ".", "removed_columns", "[", "removed_column_name", "]" ]
r Creates default structure for a new repo
def setup_repo ( ) : print ( '\n [setup_repo]!' ) # import os from functools import partial import utool as ut # import os code_dpath = ut . truepath ( ut . get_argval ( '--code-dir' , default = '~/code' ) ) _code_dpath = ut . unexpanduser ( code_dpath ) repo_fname = ( ut . get_argval ( ( '--repo' , '--repo-name' ) , type_ = str ) ) repo_dpath = join ( code_dpath , repo_fname ) modname = ut . get_argval ( '--modname' , default = repo_fname ) ut . ensuredir ( repo_dpath , verbose = True ) _regencmd = 'python -m utool --tf setup_repo --repo={repo_fname} --codedir={_code_dpath} --modname={modname}' flake8_noqacmd = 'flake8' + ':noqa' regencmd = _regencmd . format ( * * locals ( ) ) with ut . ChdirContext ( repo_dpath ) : # os.chdir(repo_fname) locals_ = locals ( ) force = True _ensure_text = partial ( ensure_text , repo_dpath = '.' , force = None , locals_ = locals_ ) _ensure_text ( fname = 'todo.md' , text = ut . codeblock ( r''' # STARTBLOCK # {modname} TODO File * Add TODOS! # ENDBLOCK ''' ) ) _ensure_text ( fname = 'README.md' , text = ut . codeblock ( r''' # STARTBLOCK # {modname} README FILE # ENDBLOCK ''' ) ) _ensure_text ( fname = 'setup.py' , chmod = '+x' , text = ut . codeblock ( r''' # STARTBLOCK #!/usr/bin/env python """ Initially Generated By: {regencmd} --force-{fname} """ from __future__ import absolute_import, division, print_function, unicode_literals from setuptools import setup try: from utool import util_setup except ImportError: print('ERROR: setup requires utool') raise INSTALL_REQUIRES = [ #'cython >= 0.21.1', #'numpy >= 1.9.0', #'scipy >= 0.16.0', ] CLUTTER_PATTERNS = [ # Patterns removed by python setup.py clean ] if __name__ == '__main__': kwargs = util_setup.setuptools_setup( setup_fpath=__file__, name='{modname}', packages=util_setup.find_packages(), version=util_setup.parse_package_for_version('{modname}'), license=util_setup.read_license('LICENSE'), long_description=util_setup.parse_readme('README.md'), ext_modules=util_setup.find_ext_modules(), cmdclass=util_setup.get_cmdclass(), #description='description of module', #url='https://github.com/<username>/{repo_fname}.git', #author='<author>', #author_email='<author_email>', keywords='', install_requires=INSTALL_REQUIRES, clutter_patterns=CLUTTER_PATTERNS, #package_data={{'build': ut.get_dynamic_lib_globstrs()}}, #build_command=lambda: ut.std_build_command(dirname(__file__)), classifiers=[], ) setup(**kwargs) # ENDBLOCK ''' ) ) _ensure_text ( fname = '.gitignore' , text = ut . codeblock ( r''' # STARTBLOCK *.py[cod] # C extensions *.so # Packages *.egg *.egg-info dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 __pycache__ # Installer logs pip-log.txt # Print Logs logs # Unit test / coverage reports .coverage .tox nosetests.xml # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject .DS_Store *.dump.txt *.sqlite3 # profiler *.lprof *.prof *.flann *.npz # utool output _timeings.txt failed.txt *.orig _doc timeings.txt failed_doctests.txt # ENDBLOCK ''' ) ) _ensure_text ( fname = join ( repo_dpath , modname , '__init__.py' ) , text = ut . codeblock ( r''' # STARTBLOCK # -*- coding: utf-8 -*- # {flake8_noqacmd} """ Initially Generated By: {regencmd} """ from __future__ import absolute_import, division, print_function, unicode_literals import sys __version__ = '0.0.0' IMPORT_TUPLES = [ # ('<modname>', None), ] __DYNAMIC__ = '--nodyn' not in sys.argv """ python -c "import {modname}" --dump-{modname}-init python -c "import {modname}" --update-{modname}-init """ DOELSE = False if __DYNAMIC__: # Dynamically import listed util libraries and their members. from utool._internal import util_importer ignore_endswith = [] import_execstr = util_importer.dynamic_import( __name__, IMPORT_TUPLES, ignore_endswith=ignore_endswith) exec(import_execstr) DOELSE = False else: DOELSE = True if DOELSE: # <AUTOGEN_INIT> pass # </AUTOGEN_INIT> # ENDBLOCK ''' ) ) _ensure_text ( fname = join ( repo_dpath , modname , '__main__.py' ) , chmod = '+x' , text = ut . codeblock ( r''' # STARTBLOCK #!/usr/bin/env python # -*- coding: utf-8 -*- """ Initially Generated By: {regencmd} """ from __future__ import absolute_import, division, print_function, unicode_literals def {modname}_main(): ignore_prefix = [] ignore_suffix = [] import utool as ut ut.main_function_tester('{modname}', ignore_prefix, ignore_suffix) if __name__ == '__main__': """ Usage: python -m {modname} <funcname> """ print('Running {modname} main') {modname}_main() # ENDBLOCK ''' ) ) _ensure_text ( fname = 'run_tests.py' , chmod = '+x' , text = ut . codeblock ( r''' # STARTBLOCK #!/usr/bin/env python """ Initially Generated By: {regencmd} --force-{fname} """ from __future__ import absolute_import, division, print_function import sys import utool as ut def run_tests(): # Build module list and run tests import sys ut.change_term_title('RUN {modname} TESTS') exclude_doctests_fnames = set([ ]) exclude_dirs = [ '_broken', 'old', 'tests', 'timeits', '_scripts', '_timeits', '_doc', 'notebook', ] dpath_list = ['{modname}'] doctest_modname_list = ut.find_doctestable_modnames( dpath_list, exclude_doctests_fnames, exclude_dirs) coverage = ut.get_argflag(('--coverage', '--cov',)) if coverage: import coverage cov = coverage.Coverage(source=doctest_modname_list) cov.start() print('Starting coverage') exclude_lines = [ 'pragma: no cover', 'def __repr__', 'if self.debug:', 'if settings.DEBUG', 'raise AssertionError', 'raise NotImplementedError', 'if 0:', 'if ut.VERBOSE', 'if _debug:', 'if __name__ == .__main__.:', 'print(.*)', ] for line in exclude_lines: cov.exclude(line) for modname in doctest_modname_list: exec('import ' + modname, globals()) module_list = [sys.modules[name] for name in doctest_modname_list] nPass, nTotal, failed_cmd_list = ut.doctest_module_list(module_list) if coverage: print('Stoping coverage') cov.stop() print('Saving coverage') cov.save() print('Generating coverage html report') cov.html_report() if nPass != nTotal: return 1 else: return 0 if __name__ == '__main__': import multiprocessing multiprocessing.freeze_support() retcode = run_tests() sys.exit(retcode) # ENDBLOCK ''' ) ) ut . ensuredir ( join ( repo_dpath , modname ) , verbose = True )
8,942
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_project.py#L143-L499
[ "def", "readAnnotations", "(", "self", ")", ":", "annot", "=", "self", ".", "read_annotation", "(", ")", "annot", "=", "np", ".", "array", "(", "annot", ")", "if", "(", "annot", ".", "shape", "[", "0", "]", "==", "0", ")", ":", "return", "np", ".", "array", "(", "[", "]", ")", ",", "np", ".", "array", "(", "[", "]", ")", ",", "np", ".", "array", "(", "[", "]", ")", "ann_time", "=", "self", ".", "_get_float", "(", "annot", "[", ":", ",", "0", "]", ")", "ann_text", "=", "annot", "[", ":", ",", "2", "]", "ann_text_out", "=", "[", "\"\"", "for", "x", "in", "range", "(", "len", "(", "annot", "[", ":", ",", "1", "]", ")", ")", "]", "for", "i", "in", "np", ".", "arange", "(", "len", "(", "annot", "[", ":", ",", "1", "]", ")", ")", ":", "ann_text_out", "[", "i", "]", "=", "self", ".", "_convert_string", "(", "ann_text", "[", "i", "]", ")", "if", "annot", "[", "i", ",", "1", "]", "==", "''", ":", "annot", "[", "i", ",", "1", "]", "=", "'-1'", "ann_duration", "=", "self", ".", "_get_float", "(", "annot", "[", ":", ",", "1", "]", ")", "return", "ann_time", "/", "10000000", ",", "ann_duration", ",", "np", ".", "array", "(", "ann_text_out", ")" ]
r Greps the projects defined in the current UserProfile
def grep_projects ( tofind_list , user_profile = None , verbose = True , new = False , * * kwargs ) : import utool as ut user_profile = ensure_user_profile ( user_profile ) print ( 'user_profile = {!r}' . format ( user_profile ) ) kwargs = kwargs . copy ( ) colored = kwargs . pop ( 'colored' , True ) grepkw = { } grepkw [ 'greater_exclude_dirs' ] = user_profile . project_exclude_dirs grepkw [ 'exclude_dirs' ] = user_profile . project_exclude_dirs grepkw [ 'dpath_list' ] = user_profile . project_dpaths grepkw [ 'include_patterns' ] = user_profile . project_include_patterns grepkw [ 'exclude_patterns' ] = user_profile . project_exclude_patterns grepkw . update ( kwargs ) msg_list1 = [ ] msg_list2 = [ ] print_ = msg_list1 . append print_ ( 'Greping Projects' ) print_ ( 'tofind_list = %s' % ( ut . repr4 ( tofind_list , nl = True ) , ) ) #print_('grepkw = %s' % ut.repr4(grepkw, nl=True)) if verbose : print ( '\n' . join ( msg_list1 ) ) #with ut.Timer('greping', verbose=True): grep_result = ut . grep ( tofind_list , * * grepkw ) found_fpath_list , found_lines_list , found_lxs_list = grep_result # HACK, duplicate behavior. TODO: write grep print result function reflags = grepkw . get ( 'reflags' , 0 ) _exprs_flags = [ ut . extend_regex2 ( expr , reflags ) for expr in tofind_list ] extended_regex_list = ut . take_column ( _exprs_flags , 0 ) reflags_list = ut . take_column ( _exprs_flags , 1 ) # HACK # pat = ut.util_regex.regex_or(extended_regex_list) reflags = reflags_list [ 0 ] # from utool import util_regex resultstr = ut . make_grep_resultstr ( grep_result , extended_regex_list , reflags , colored = colored ) msg_list2 . append ( resultstr ) print_ = msg_list2 . append #for fpath, lines, lxs in zip(found_fpath_list, found_lines_list, # found_lxs_list): # print_('----------------------') # print_('found %d line(s) in %r: ' % (len(lines), fpath)) # name = split(fpath)[1] # max_line = len(lines) # ndigits = str(len(str(max_line))) # for (lx, line) in zip(lxs, lines): # line = line.replace('\n', '') # print_(('%s : %' + ndigits + 'd |%s') % (name, lx, line)) # iter_ = zip(found_fpath_list, found_lines_list, found_lxs_list) # for fpath, lines, lxs in iter_: # print_('----------------------') # print_('found %d line(s) in %r: ' % (len(lines), fpath)) # name = split(fpath)[1] # max_line = len(lines) # ndigits = str(len(str(max_line))) # for (lx, line) in zip(lxs, lines): # line = line.replace('\n', '') # colored_line = ut.highlight_regex( # line.rstrip('\n'), pat, reflags=reflags) # print_(('%s : %' + ndigits + 'd |%s') % (name, lx, colored_line)) print_ ( '====================' ) print_ ( 'found_fpath_list = ' + ut . repr4 ( found_fpath_list ) ) print_ ( '' ) #print_('gvim -o ' + ' '.join(found_fpath_list)) if verbose : print ( '\n' . join ( msg_list2 ) ) msg_list = msg_list1 + msg_list2 if new : return GrepResult ( found_fpath_list , found_lines_list , found_lxs_list , extended_regex_list , reflags ) else : return msg_list
8,943
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_project.py#L606-L708
[ "def", "delete_attachments", "(", "self", ",", "volumeID", ",", "attachmentsID", ")", ":", "log", ".", "debug", "(", "\"deleting attachments from volume '{}': {}\"", ".", "format", "(", "volumeID", ",", "attachmentsID", ")", ")", "rawVolume", "=", "self", ".", "_req_raw_volume", "(", "volumeID", ")", "insID", "=", "[", "a", "[", "'id'", "]", "for", "a", "in", "rawVolume", "[", "'_source'", "]", "[", "'_attachments'", "]", "]", "# check that all requested file are present", "for", "id", "in", "attachmentsID", ":", "if", "id", "not", "in", "insID", ":", "raise", "NotFoundException", "(", "\"could not found attachment '{}' of the volume '{}'\"", ".", "format", "(", "id", ",", "volumeID", ")", ")", "for", "index", ",", "id", "in", "enumerate", "(", "attachmentsID", ")", ":", "rawVolume", "[", "'_source'", "]", "[", "'_attachments'", "]", ".", "pop", "(", "insID", ".", "index", "(", "id", ")", ")", "self", ".", "_db", ".", "modify_book", "(", "volumeID", ",", "rawVolume", "[", "'_source'", "]", ",", "version", "=", "rawVolume", "[", "'_version'", "]", ")" ]
Compile libfaketime .
def run ( self ) : if sys . platform == "linux" or sys . platform == "linux2" : libname = 'libfaketime.so.1' libnamemt = 'libfaketimeMT.so.1' elif sys . platform == "darwin" : libname = 'libfaketime.1.dylib' libnamemt = 'libfaketimeMT.1.dylib' else : sys . stderr . write ( "WARNING : libfaketime does not support platform {}\n" . format ( sys . platform ) ) sys . stderr . flush ( ) return faketime_lib = join ( 'faketime' , libname ) faketime_lib_mt = join ( 'faketime' , libnamemt ) self . my_outputs = [ ] setup_py_directory = dirname ( realpath ( __file__ ) ) faketime_directory = join ( setup_py_directory , "faketime" ) os . chdir ( faketime_directory ) if sys . platform == "linux" or sys . platform == "linux2" : subprocess . check_call ( [ 'make' , ] ) else : os . chdir ( setup_py_directory ) if "10.12" in subprocess . check_output ( [ "sw_vers" , "-productVersion" ] ) . decode ( 'utf8' ) : self . copy_file ( join ( 'faketime' , "libfaketime.c.sierra" ) , join ( 'faketime' , "libfaketime.c" ) ) os . chdir ( faketime_directory ) subprocess . check_call ( [ 'make' , '-f' , 'Makefile.OSX' ] ) os . chdir ( setup_py_directory ) dest = join ( self . install_purelib , dirname ( faketime_lib ) ) dest_mt = join ( self . install_purelib , dirname ( faketime_lib_mt ) ) try : os . makedirs ( dest ) except OSError as e : if e . errno != 17 : raise self . copy_file ( faketime_lib , dest ) if exists ( faketime_lib_mt ) : self . copy_file ( faketime_lib_mt , dest_mt ) self . my_outputs . append ( join ( dest , libname ) ) install . run ( self )
8,944
https://github.com/crdoconnor/faketime/blob/6e81ca070c0e601a52507b945ed45d5d42576b21/setup.py#L14-L62
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
r Interfaces to either multiprocessing or futures . Esentially maps args_gen onto func using pool . imap . However args_gen must be a tuple of args that will be unpacked and send to the function . Thus the function can take multiple args . Also specifing keyword args is supported .
def generate2 ( func , args_gen , kw_gen = None , ntasks = None , ordered = True , force_serial = False , use_pool = False , chunksize = None , nprocs = None , progkw = { } , nTasks = None , verbose = None ) : if verbose is None : verbose = 2 if ntasks is None : ntasks = nTasks if ntasks is None : try : ntasks = len ( args_gen ) except TypeError : # Cast to a list args_gen = list ( args_gen ) ntasks = len ( args_gen ) if ntasks == 1 or ntasks < __MIN_PARALLEL_TASKS__ : force_serial = True if __FORCE_SERIAL__ : force_serial = __FORCE_SERIAL__ if ntasks == 0 : if verbose : print ( '[ut.generate2] submitted 0 tasks' ) raise StopIteration if nprocs is None : nprocs = min ( ntasks , get_default_numprocs ( ) ) if nprocs == 1 : force_serial = True if kw_gen is None : kw_gen = [ { } ] * ntasks if isinstance ( kw_gen , dict ) : # kw_gen can be a single dict applied to everything kw_gen = [ kw_gen ] * ntasks if force_serial : for result in _generate_serial2 ( func , args_gen , kw_gen , ntasks = ntasks , progkw = progkw , verbose = verbose ) : yield result else : if verbose : gentype = 'mp' if use_pool else 'futures' fmtstr = '[generate2] executing {} {} tasks using {} {} procs' print ( fmtstr . format ( ntasks , get_funcname ( func ) , nprocs , gentype ) ) if verbose > 1 : lbl = '(pargen) %s: ' % ( get_funcname ( func ) , ) progkw_ = dict ( freq = None , bs = True , adjust = False , freq_est = 'absolute' ) progkw_ . update ( progkw ) # print('progkw_.update = {!r}'.format(progkw_.update)) progpart = util_progress . ProgPartial ( length = ntasks , lbl = lbl , * * progkw_ ) if use_pool : # Use multiprocessing if chunksize is None : chunksize = max ( min ( 4 , ntasks ) , min ( 8 , ntasks // ( nprocs ** 2 ) ) ) try : pool = multiprocessing . Pool ( nprocs ) if ordered : pmap_func = pool . imap else : pmap_func = pool . imap_unordered wrapped_arg_gen = zip ( [ func ] * len ( args_gen ) , args_gen , kw_gen ) res_gen = pmap_func ( _kw_wrap_worker , wrapped_arg_gen , chunksize ) if verbose > 1 : res_gen = progpart ( res_gen ) for res in res_gen : yield res finally : pool . close ( ) pool . join ( ) else : # Use futures executor = futures . ProcessPoolExecutor ( nprocs ) try : fs_list = [ executor . submit ( func , * a , * * k ) for a , k in zip ( args_gen , kw_gen ) ] fs_gen = fs_list if not ordered : fs_gen = futures . as_completed ( fs_gen ) if verbose > 1 : fs_gen = progpart ( fs_gen ) for fs in fs_gen : yield fs . result ( ) finally : executor . shutdown ( wait = True )
8,945
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_parallel.py#L51-L279
[ "def", "GetCacheSize", "(", "self", ")", ":", "if", "not", "self", ".", "_cache_start_offset", "or", "not", "self", ".", "_cache_end_offset", ":", "return", "0", "return", "self", ".", "_cache_end_offset", "-", "self", ".", "_cache_start_offset" ]
internal serial generator
def _generate_serial2 ( func , args_gen , kw_gen = None , ntasks = None , progkw = { } , verbose = None , nTasks = None ) : if verbose is None : verbose = 2 if ntasks is None : ntasks = nTasks if ntasks is None : ntasks = len ( args_gen ) if verbose > 0 : print ( '[ut._generate_serial2] executing %d %s tasks in serial' % ( ntasks , get_funcname ( func ) ) ) # kw_gen can be a single dict applied to everything if kw_gen is None : kw_gen = [ { } ] * ntasks if isinstance ( kw_gen , dict ) : kw_gen = [ kw_gen ] * ntasks # Get iterator with or without progress if verbose > 1 : lbl = '(sergen) %s: ' % ( get_funcname ( func ) , ) progkw_ = dict ( freq = None , bs = True , adjust = False , freq_est = 'between' ) progkw_ . update ( progkw ) args_gen = util_progress . ProgIter ( args_gen , length = ntasks , lbl = lbl , * * progkw_ ) for args , kw in zip ( args_gen , kw_gen ) : result = func ( * args , * * kw ) yield result
8,946
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_parallel.py#L287-L316
[ "def", "load_toml_rest_api_config", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "LOGGER", ".", "info", "(", "\"Skipping rest api loading from non-existent config file: %s\"", ",", "filename", ")", "return", "RestApiConfig", "(", ")", "LOGGER", ".", "info", "(", "\"Loading rest api information from config: %s\"", ",", "filename", ")", "try", ":", "with", "open", "(", "filename", ")", "as", "fd", ":", "raw_config", "=", "fd", ".", "read", "(", ")", "except", "IOError", "as", "e", ":", "raise", "RestApiConfigurationError", "(", "\"Unable to load rest api configuration file: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "toml_config", "=", "toml", ".", "loads", "(", "raw_config", ")", "invalid_keys", "=", "set", "(", "toml_config", ".", "keys", "(", ")", ")", ".", "difference", "(", "[", "'bind'", ",", "'connect'", ",", "'timeout'", ",", "'opentsdb_db'", ",", "'opentsdb_url'", ",", "'opentsdb_username'", ",", "'opentsdb_password'", ",", "'client_max_size'", "]", ")", "if", "invalid_keys", ":", "raise", "RestApiConfigurationError", "(", "\"Invalid keys in rest api config: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "sorted", "(", "list", "(", "invalid_keys", ")", ")", ")", ")", ")", "config", "=", "RestApiConfig", "(", "bind", "=", "toml_config", ".", "get", "(", "\"bind\"", ",", "None", ")", ",", "connect", "=", "toml_config", ".", "get", "(", "'connect'", ",", "None", ")", ",", "timeout", "=", "toml_config", ".", "get", "(", "'timeout'", ",", "None", ")", ",", "opentsdb_url", "=", "toml_config", ".", "get", "(", "'opentsdb_url'", ",", "None", ")", ",", "opentsdb_db", "=", "toml_config", ".", "get", "(", "'opentsdb_db'", ",", "None", ")", ",", "opentsdb_username", "=", "toml_config", ".", "get", "(", "'opentsdb_username'", ",", "None", ")", ",", "opentsdb_password", "=", "toml_config", ".", "get", "(", "'opentsdb_password'", ",", "None", ")", ",", "client_max_size", "=", "toml_config", ".", "get", "(", "'client_max_size'", ",", "None", ")", ")", "return", "config" ]
r Generator that runs a slow source generator in a separate process .
def buffered_generator ( source_gen , buffer_size = 2 , use_multiprocessing = False ) : if buffer_size < 2 : raise RuntimeError ( "Minimal buffer_ size is 2!" ) if use_multiprocessing : print ( 'WARNING seems to freeze if passed in a generator' ) #assert False, 'dont use this buffered multiprocessing' if False : pool = multiprocessing . Pool ( processes = get_default_numprocs ( ) , initializer = init_worker , maxtasksperchild = None ) Process = pool . Process else : Process = multiprocessing . Process _Queue = multiprocessing . Queue target = _buffered_generation_process else : _Queue = queue . Queue Process = KillableThread target = _buffered_generation_thread # the effective buffer_ size is one less, because the generation process # will generate one extra element and block until there is room in the # buffer_. buffer_ = _Queue ( maxsize = buffer_size - 1 ) # previously None was used as a sentinal, which fails when source_gen # genrates None need to make object that it will not be generated by the # process. A reasonable hack is to use the StopIteration exception instead sentinal = StopIteration process = Process ( target = target , args = ( iter ( source_gen ) , buffer_ , sentinal ) ) #if not use_multiprocessing: process . daemon = True process . start ( ) while True : #output = buffer_.get(timeout=1.0) output = buffer_ . get ( ) if output is sentinal : raise StopIteration yield output
8,947
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_parallel.py#L669-L769
[ "def", "recognize", "(", "self", ",", "node", ":", "yaml", ".", "Node", ",", "expected_type", ":", "Type", ")", "->", "RecResult", ":", "logger", ".", "debug", "(", "'Recognizing {} as a {}'", ".", "format", "(", "node", ",", "expected_type", ")", ")", "recognized_types", "=", "None", "if", "expected_type", "in", "[", "str", ",", "int", ",", "float", ",", "bool", ",", "bool_union_fix", ",", "datetime", ",", "None", ",", "type", "(", "None", ")", "]", ":", "recognized_types", ",", "message", "=", "self", ".", "__recognize_scalar", "(", "node", ",", "expected_type", ")", "elif", "is_generic_union", "(", "expected_type", ")", ":", "recognized_types", ",", "message", "=", "self", ".", "__recognize_union", "(", "node", ",", "expected_type", ")", "elif", "is_generic_list", "(", "expected_type", ")", ":", "recognized_types", ",", "message", "=", "self", ".", "__recognize_list", "(", "node", ",", "expected_type", ")", "elif", "is_generic_dict", "(", "expected_type", ")", ":", "recognized_types", ",", "message", "=", "self", ".", "__recognize_dict", "(", "node", ",", "expected_type", ")", "elif", "expected_type", "in", "self", ".", "__registered_classes", ".", "values", "(", ")", ":", "recognized_types", ",", "message", "=", "self", ".", "__recognize_user_classes", "(", "node", ",", "expected_type", ")", "if", "recognized_types", "is", "None", ":", "raise", "RecognitionError", "(", "(", "'Could not recognize for type {},'", "' is it registered?'", ")", ".", "format", "(", "expected_type", ")", ")", "logger", ".", "debug", "(", "'Recognized types {} matching {}'", ".", "format", "(", "recognized_types", ",", "expected_type", ")", ")", "return", "recognized_types", ",", "message" ]
Orders window ids by most recently used
def sort_window_ids ( winid_list , order = 'mru' ) : import utool as ut winid_order = XCtrl . sorted_window_ids ( order ) sorted_win_ids = ut . isect ( winid_order , winid_list ) return sorted_win_ids
8,948
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_ubuntu.py#L384-L391
[ "def", "Build", "(", "self", ")", ":", "self", ".", "SetupVars", "(", ")", "self", ".", "Clean", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "args", ".", "grr_src", ")", ":", "self", ".", "GitCheckoutGRR", "(", ")", "proto_sdist", "=", "self", ".", "MakeProtoSdist", "(", ")", "core_sdist", "=", "self", ".", "MakeCoreSdist", "(", ")", "client_sdist", "=", "self", ".", "MakeClientSdist", "(", ")", "client_builder_sdist", "=", "self", ".", "MakeClientBuilderSdist", "(", ")", "self", ".", "InstallGRR", "(", "proto_sdist", ")", "self", ".", "InstallGRR", "(", "core_sdist", ")", "self", ".", "InstallGRR", "(", "client_sdist", ")", "self", ".", "InstallGRR", "(", "client_builder_sdist", ")", "self", ".", "BuildTemplates", "(", ")", "if", "args", ".", "test_repack_install", ":", "self", ".", "_RepackTemplates", "(", ")", "self", ".", "_InstallInstallers", "(", ")" ]
sudo apt - get install xautomation apt - get install autokey - gtk
def focus_window ( winhandle , path = None , name = None , sleeptime = .01 ) : import utool as ut import time print ( 'focus: ' + winhandle ) args = [ 'wmctrl' , '-xa' , winhandle ] ut . cmd ( * args , verbose = False , quiet = True ) time . sleep ( sleeptime )
8,949
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_ubuntu.py#L668-L681
[ "def", "num_in_memory", "(", "self", ")", ":", "n", "=", "len", "(", "self", ".", "_data", ")", "-", "1", "while", "n", ">=", "0", ":", "if", "isinstance", "(", "self", ".", "_data", "[", "n", "]", ",", "_TensorValueDiscarded", ")", ":", "break", "n", "-=", "1", "return", "len", "(", "self", ".", "_data", ")", "-", "1", "-", "n" ]
Gives files matching pattern the same chmod flags as setup . py
def setup_chmod ( setup_fpath , setup_dir , chmod_patterns ) : #st_mode = os.stat(setup_fpath).st_mode st_mode = 33277 for pattern in chmod_patterns : for fpath in util_path . glob ( setup_dir , pattern , recursive = True ) : print ( '[setup] chmod fpath=%r' % fpath ) os . chmod ( fpath , st_mode )
8,950
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_setup.py#L103-L110
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
Implicitly build kwargs based on standard info
def __infer_setup_kwargs ( module , kwargs ) : # Get project name from the module #if 'name' not in kwargs: # kwargs['name'] = module.__name__ #else: # raise AssertionError('must specify module name!') name = kwargs [ 'name' ] # Our projects depend on utool #if kwargs['name'] != 'utool': # install_requires = kwargs.get('install_requires', []) # if 'utool' not in install_requires: # install_requires.append('utool') # kwargs['install_requires'] = install_requires packages = kwargs . get ( 'packages' , [ ] ) if name not in packages : packages . append ( name ) kwargs [ 'packages' ] = packages if 'version' not in kwargs : version = parse_package_for_version ( name ) kwargs [ 'version' ] = version # Parse version #if 'version' not in kwargs: # if module is None: # version_errmsg = 'You must include a version (preferably one that matches the __version__ variable in your modules init file' # raise AssertionError(version_errmsg) # else: # Parse license if 'license' not in kwargs : try : kwargs [ 'license' ] = read_license ( 'LICENSE' ) except IOError : pass # Parse readme if 'long_description' not in kwargs : kwargs [ 'long_description' ] = parse_readme ( )
8,951
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_setup.py#L605-L643
[ "def", "queries", "(", "self", ",", "request", ")", ":", "queries", "=", "self", ".", "get_queries", "(", "request", ")", "worlds", "=", "[", "]", "with", "self", ".", "mapper", ".", "begin", "(", ")", "as", "session", ":", "for", "_", "in", "range", "(", "queries", ")", ":", "world", "=", "session", ".", "query", "(", "World", ")", ".", "get", "(", "randint", "(", "1", ",", "MAXINT", ")", ")", "worlds", ".", "append", "(", "self", ".", "get_json", "(", "world", ")", ")", "return", "Json", "(", "worlds", ")", ".", "http_response", "(", "request", ")" ]
Replace elements in iterable with values from an alias dict suppressing empty values .
def _replaced ( __values , * * __replacements ) : return tuple ( o for o in ( __replacements . get ( name , name ) for name in __values ) if o )
8,952
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/admin.py#L14-L20
[ "def", "cublasGetVersion", "(", "handle", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetVersion_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
Get the base name of the admin route for a model or model instance .
def _get_admin_route_name ( model_or_instance ) : model = model_or_instance if isinstance ( model_or_instance , type ) else type ( model_or_instance ) return 'admin:{meta.app_label}_{meta.model_name}' . format ( meta = model . _meta )
8,953
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/admin.py#L23-L31
[ "def", "create_frvect", "(", "timeseries", ")", ":", "# create timing dimension", "dims", "=", "frameCPP", ".", "Dimension", "(", "timeseries", ".", "size", ",", "timeseries", ".", "dx", ".", "value", ",", "str", "(", "timeseries", ".", "dx", ".", "unit", ")", ",", "0", ")", "# create FrVect", "vect", "=", "frameCPP", ".", "FrVect", "(", "timeseries", ".", "name", "or", "''", ",", "FRVECT_TYPE_FROM_NUMPY", "[", "timeseries", ".", "dtype", ".", "type", "]", ",", "1", ",", "dims", ",", "str", "(", "timeseries", ".", "unit", ")", ")", "# populate FrVect and return", "vect", ".", "GetDataArray", "(", ")", "[", ":", "]", "=", "numpy", ".", "require", "(", "timeseries", ".", "value", ",", "requirements", "=", "[", "'C'", "]", ")", "return", "vect" ]
Build a filter URL to an admin changelist of all objects with similar field values .
def _build_admin_filter_url ( model , filters ) : url = reverse ( _get_admin_route_name ( model ) + '_changelist' ) parts = urlsplit ( url ) query = parse_qs ( parts . query ) query . update ( filters ) parts_with_filter = parts . _replace ( query = urlencode ( query ) ) return urlunsplit ( parts_with_filter )
8,954
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/admin.py#L34-L41
[ "def", "start", "(", "self", ",", "timeout_secs", "=", "60", ")", ":", "def", "_handle_timeout", "(", "signum", ",", "frame", ")", ":", "raise", "HerokuTimeoutError", "(", "\"Failed to start after {} seconds.\"", ".", "format", "(", "timeout_secs", ",", "self", ".", "_record", ")", ")", "if", "self", ".", "is_running", ":", "self", ".", "out", ".", "log", "(", "\"Local Heroku is already running.\"", ")", "return", "signal", ".", "signal", "(", "signal", ".", "SIGALRM", ",", "_handle_timeout", ")", "signal", ".", "alarm", "(", "timeout_secs", ")", "self", ".", "_boot", "(", ")", "try", ":", "success", "=", "self", ".", "_verify_startup", "(", ")", "finally", ":", "signal", ".", "alarm", "(", "0", ")", "if", "not", "success", ":", "self", ".", "stop", "(", "signal", ".", "SIGKILL", ")", "raise", "HerokuStartupError", "(", "\"Failed to start for unknown reason: {}\"", ".", "format", "(", "self", ".", "_record", ")", ")", "return", "True" ]
Create a function that links to a changelist of all objects with similar field values .
def _make_admin_link_to_similar ( primary_field , * fields , name = None ) : fields = ( primary_field , ) + fields url_template = '<a href="{url}">{name_or_value}</a>' def field_link ( self , obj ) : value = getattr ( obj , primary_field , None ) name_or_value = name or value filters = { field_name : getattr ( obj , field_name ) for field_name in fields } url = _build_admin_filter_url ( obj , filters ) return format_html ( url_template , * * locals ( ) ) if url else value field_link . allow_tags = True field_link . short_description = primary_field . replace ( '_' , ' ' ) . capitalize ( ) field_link . admin_order_field = primary_field field_link . __name__ = field_link . __name__ . replace ( 'field' , primary_field ) return field_link
8,955
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/admin.py#L44-L60
[ "def", "shutdown", "(", "self", ")", ":", "vm", "=", "self", ".", "get_vm_failfast", "(", "self", ".", "config", "[", "'name'", "]", ")", "if", "vm", ".", "runtime", ".", "powerState", "==", "vim", ".", "VirtualMachinePowerState", ".", "poweredOff", ":", "print", "(", "\"%s already poweredOff\"", "%", "vm", ".", "name", ")", "else", ":", "if", "self", ".", "guestToolsRunning", "(", "vm", ")", ":", "timeout_minutes", "=", "10", "print", "(", "\"waiting for %s to shutdown \"", "\"(%s minutes before forced powerOff)\"", "%", "(", "vm", ".", "name", ",", "str", "(", "timeout_minutes", ")", ")", ")", "vm", ".", "ShutdownGuest", "(", ")", "if", "self", ".", "WaitForVirtualMachineShutdown", "(", "vm", ",", "timeout_minutes", "*", "60", ")", ":", "print", "(", "\"shutdown complete\"", ")", "print", "(", "\"%s poweredOff\"", "%", "vm", ".", "name", ")", "else", ":", "print", "(", "\"%s has not shutdown after %s minutes:\"", "\"will powerOff\"", "%", "(", "vm", ".", "name", ",", "str", "(", "timeout_minutes", ")", ")", ")", "self", ".", "powerOff", "(", ")", "else", ":", "print", "(", "\"GuestTools not running or not installed: will powerOff\"", ")", "self", ".", "powerOff", "(", ")" ]
Try to re - apply a failed trigger log action .
def _retry_failed_log ( failed_trigger_log ) : model = type ( failed_trigger_log ) try : failed_trigger_log = ( model . objects . select_for_update ( ) . get ( id = failed_trigger_log . id , state = TRIGGER_LOG_STATE [ 'FAILED' ] , ) ) except model . DoesNotExist : return False failed_trigger_log . redo ( ) return True
8,956
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/admin.py#L69-L92
[ "def", "stations", "(", "self", ",", "*", ",", "generated", "=", "True", ",", "library", "=", "True", ")", ":", "station_list", "=", "[", "]", "for", "chunk", "in", "self", ".", "stations_iter", "(", "page_size", "=", "49995", ")", ":", "for", "station", "in", "chunk", ":", "if", "(", "(", "generated", "and", "not", "station", ".", "get", "(", "'inLibrary'", ")", ")", "or", "(", "library", "and", "station", ".", "get", "(", "'inLibrary'", ")", ")", ")", ":", "station_list", ".", "append", "(", "station", ")", "return", "station_list" ]
Set FAILED trigger logs in queryset to IGNORED .
def ignore_failed_logs_action ( self , request , queryset ) : count = _ignore_failed_logs ( queryset ) self . message_user ( request , _ ( '{count} failed trigger logs marked as ignored.' ) . format ( count = count ) , )
8,957
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/admin.py#L156-L162
[ "def", "Nu_vertical_cylinder", "(", "Pr", ",", "Gr", ",", "L", "=", "None", ",", "D", "=", "None", ",", "Method", "=", "None", ",", "AvailableMethods", "=", "False", ")", ":", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "for", "key", ",", "values", "in", "vertical_cylinder_correlations", ".", "items", "(", ")", ":", "if", "values", "[", "4", "]", "or", "all", "(", "(", "L", ",", "D", ")", ")", ":", "methods", ".", "append", "(", "key", ")", "if", "'Popiel & Churchill'", "in", "methods", ":", "methods", ".", "remove", "(", "'Popiel & Churchill'", ")", "methods", ".", "insert", "(", "0", ",", "'Popiel & Churchill'", ")", "elif", "'McAdams, Weiss & Saunders'", "in", "methods", ":", "methods", ".", "remove", "(", "'McAdams, Weiss & Saunders'", ")", "methods", ".", "insert", "(", "0", ",", "'McAdams, Weiss & Saunders'", ")", "return", "methods", "if", "AvailableMethods", ":", "return", "list_methods", "(", ")", "if", "not", "Method", ":", "Method", "=", "list_methods", "(", ")", "[", "0", "]", "if", "Method", "in", "vertical_cylinder_correlations", ":", "if", "vertical_cylinder_correlations", "[", "Method", "]", "[", "4", "]", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ")", "else", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ",", "L", "=", "L", ",", "D", "=", "D", ")", "else", ":", "raise", "Exception", "(", "\"Correlation name not recognized; see the \"", "\"documentation for the available options.\"", ")" ]
Try to re - apply FAILED trigger log actions in the queryset .
def retry_failed_logs_action ( self , request , queryset ) : count = 0 for trigger_log in queryset : retried = _retry_failed_log ( trigger_log ) if retried : count += 1 self . message_user ( request , _ ( '{count} failed trigger logs retried.' ) . format ( count = count ) , )
8,958
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/admin.py#L165-L175
[ "def", "prompt_protocol", "(", ")", ":", "stop", "=", "3", "ans", "=", "\"\"", "while", "True", "and", "stop", ">", "0", ":", "ans", "=", "input", "(", "\"Save as (d)ictionary or (o)bject?\\n\"", "\"* Note:\\n\"", "\"Dictionaries are more basic, and are compatible with Python v2.7+.\\n\"", "\"Objects are more complex, and are only compatible with v3.4+ \"", ")", "if", "ans", "not", "in", "(", "\"d\"", ",", "\"o\"", ")", ":", "print", "(", "\"Invalid response: Please choose 'd' or 'o'\"", ")", "else", ":", "break", "# if a valid answer isn't captured, default to dictionary (safer, broader)", "if", "ans", "==", "\"\"", ":", "ans", "=", "\"d\"", "return", "ans" ]
Reads PSMs from file stores them to a database backend in chunked PSMs .
def create_psm_lookup ( fn , fastafn , mapfn , header , pgdb , unroll = False , specfncol = None , decoy = False , fastadelim = None , genefield = None ) : proteins = store_proteins_descriptions ( pgdb , fastafn , fn , mapfn , header , decoy , fastadelim , genefield ) mzmlmap = pgdb . get_mzmlfile_map ( ) sequences = { } for psm in tsvreader . generate_tsv_psms ( fn , header ) : seq = tsvreader . get_psm_sequence ( psm , unroll ) sequences [ seq ] = 1 pgdb . store_pepseqs ( ( ( seq , ) for seq in sequences ) ) pepseqmap = pgdb . get_peptide_seq_map ( ) psms = [ ] for row , psm in enumerate ( tsvreader . generate_tsv_psms ( fn , header ) ) : specfn , psm_id , scan , seq , score = tsvreader . get_psm ( psm , unroll , specfncol ) if len ( psms ) % DB_STORE_CHUNK == 0 : pgdb . store_psms ( psms ) psms = [ ] psms . append ( { 'rownr' : row , 'psm_id' : psm_id , 'seq' : pepseqmap [ seq ] , 'score' : score , 'specfn' : mzmlmap [ specfn ] , 'scannr' : scan , 'spec_id' : '{}_{}' . format ( mzmlmap [ specfn ] , scan ) , } ) pgdb . store_psms ( psms ) pgdb . index_psms ( ) store_psm_protein_relations ( fn , header , pgdb , proteins )
8,959
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/psms.py#L9-L40
[ "def", "win32_refresh_window", "(", "cls", ")", ":", "# Get console handle", "handle", "=", "windll", ".", "kernel32", ".", "GetConsoleWindow", "(", ")", "RDW_INVALIDATE", "=", "0x0001", "windll", ".", "user32", ".", "RedrawWindow", "(", "handle", ",", "None", ",", "None", ",", "c_uint", "(", "RDW_INVALIDATE", ")", ")" ]
Reads PSMs from file extracts their proteins and peptides and passes them to a database backend in chunks .
def store_psm_protein_relations ( fn , header , pgdb , proteins ) : # TODO do we need an OrderedDict or is regular dict enough? # Sorting for psm_id useful? allpsms = OrderedDict ( ) last_id , psmids_to_store = None , set ( ) store_soon = False for psm in tsvreader . generate_tsv_psms ( fn , header ) : psm_id , prots = tsvreader . get_pepproteins ( psm ) prots = [ x for x in prots if x in proteins ] try : # In case the PSMs are presented unrolled allpsms [ psm_id ] . extend ( prots ) except KeyError : allpsms [ psm_id ] = prots if len ( psmids_to_store ) % DB_STORE_CHUNK == 0 : store_soon = True if store_soon and last_id != psm_id : pgdb . store_peptides_proteins ( allpsms , psmids_to_store ) store_soon = False psmids_to_store = set ( ) psmids_to_store . add ( psm_id ) last_id = psm_id if len ( psmids_to_store ) > 0 : pgdb . store_peptides_proteins ( allpsms , psmids_to_store ) pgdb . index_protein_peptides ( ) return allpsms
8,960
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/psms.py#L92-L120
[ "def", "flash", "(", "self", ",", "duration", "=", "0.0", ")", ":", "for", "_", "in", "range", "(", "2", ")", ":", "self", ".", "on", "=", "not", "self", ".", "on", "time", ".", "sleep", "(", "duration", ")" ]
If an error is thrown in the scope of this function s stack frame then the decorated function name and the arguments passed to it will be printed to the utool print function .
def on_exception_report_input ( func_ = None , force = False , keys = None ) : def _closure_onexceptreport ( func ) : if not ONEX_REPORT_INPUT and not force : return func @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_onexceptreport ( * args , * * kwargs ) : try : #import utool #if utool.DEBUG: # print('[IN EXCPRPT] args=%r' % (args,)) # print('[IN EXCPRPT] kwargs=%r' % (kwargs,)) return func ( * args , * * kwargs ) except Exception as ex : from utool import util_str print ( 'ERROR occured! Reporting input to function' ) if keys is not None : from utool import util_inspect from utool import util_list from utool import util_dict argspec = util_inspect . get_func_argspec ( func ) in_kwargs_flags = [ key in kwargs for key in keys ] kwarg_keys = util_list . compress ( keys , in_kwargs_flags ) kwarg_vals = [ kwargs . get ( key ) for key in kwarg_keys ] flags = util_list . not_list ( in_kwargs_flags ) arg_keys = util_list . compress ( keys , flags ) arg_idxs = [ argspec . args . index ( key ) for key in arg_keys ] num_nodefault = len ( argspec . args ) - len ( argspec . defaults ) default_vals = ( ( [ None ] * ( num_nodefault ) ) + list ( argspec . defaults ) ) args_ = list ( args ) + default_vals [ len ( args ) + 1 : ] arg_vals = util_list . take ( args_ , arg_idxs ) requested_dict = dict ( util_list . flatten ( [ zip ( kwarg_keys , kwarg_vals ) , zip ( arg_keys , arg_vals ) ] ) ) print ( 'input dict = ' + util_str . repr4 ( util_dict . dict_subset ( requested_dict , keys ) ) ) # (print out specific keys only) pass arg_strs = ', ' . join ( [ repr ( util_str . truncate_str ( str ( arg ) ) ) for arg in args ] ) kwarg_strs = ', ' . join ( [ util_str . truncate_str ( '%s=%r' % ( key , val ) ) for key , val in six . iteritems ( kwargs ) ] ) msg = ( '\nERROR: funcname=%r,\n * args=%s,\n * kwargs=%r\n' % ( meta_util_six . get_funcname ( func ) , arg_strs , kwarg_strs ) ) msg += ' * len(args) = %r\n' % len ( args ) msg += ' * len(kwargs) = %r\n' % len ( kwargs ) util_dbg . printex ( ex , msg , pad_stdout = True ) raise wrp_onexceptreport = preserve_sig ( wrp_onexceptreport , func ) return wrp_onexceptreport if func_ is None : return _closure_onexceptreport else : return _closure_onexceptreport ( func_ )
8,961
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L211-L270
[ "def", "create", "(", "cls", ",", "destination", ")", ":", "mdb_gz_b64", "=", "\"\"\"\\\n H4sICIenn1gC/25ldzIwMDMubWRiAO2de2wcRx3Hf7O7Pt/d3u6eLyEtVaOaqg+EkjQvuVVDwa9a\n jWXHdZxQQlCJ7fOrfp3OTpqkhVxTItFWIhVQVFBRVNIKRaColVpAUKGKRwwFqUAhKiBIpUaoVWP+\n qKgIIHL8Znb39u72znWJiWP3+9l473fzm/nNY3cdf2fmbBJEPdO9E+nebLq+fWC6vrWZOImen9D7\n 9sR+vPPNE0PZxo/TE5879mj+yNc3/OzAD2bXv3DmV9/o/8PZnxxr+/fDL2w79ulzN7e+/sS/zvzz\n w3+N1z28p3PTfQ3nfn/m2YmeFS2no89uWnvqwO5HUvd/5Phr938tes3j/zm5+qT41J8/P/iZx87/\n +qHrjgyduubG1t/+7eWB2XztTNuT+1clZt9c2/e7HRGizevWEwAAAAAAAACAhUEIwvE+PoRIO8K7\n FzT6obPPwTMBAAAAAAAAAABcfpzPXwya+Ispo1xlEO2KEEX9eaGyWnrqyKQ60tQ0AcNZRcR1RYuy\n +XZCxoqRzmaMI6cKGRJuJVrIEZUOQ9UrHStUYpyzKkdNmSPFDkM6aguhXMdVHCMuHXE2Suu4IFQJ\n l6CErNWUDouDlbdKOZIcrKLD4S5WdNhqIEodqlVaofKgVTHpiBQ6uLG0uaKsuYbf3IS8BmV1qFAm\n j1Z5Hbp06GWDKC+DTS00SRN8DFA/TXNfW6mXX3upj7+mOHWllzLAObN8du0gdSdlKO3ZcWqjMbaH\n uOQqtidViRF+P0HbOH2c3xm0lfMb1EH7uHZ5vp32c+ks+5PqfSeXS9NejjTAvZQpd7J3kuuJFqLE\n qYvuVa3Ocqk7OVXWNMFxZPRVtJ1zSXuCBrlkh+rjEF1Zlt5Dw6qN0xx5Bx3gGgbowVo56EIjkc9T\n xX9Jdd+5PKDOD6q3VQvwv7qiZ8st419cdYHlo6iuriF8X4HA590AsodXhvrsj0yMDPnAuI+ZvOrq\n 1o7K51Hdy7a8cdXNm5AedbfG5W3j3lOybxFZKb6zAgAAAAAAsNzQxAlbvnYJV3VcUU3/S2luBIKF\n ha+IlWp+wxW4IiRXRSXxKeNU1eOxUuUbSOIINbEM7WT506ZE3LASgCOeYJWCMcnCsI/u8eSsFEYR\n lnlbWa6+u0jTYqSkvuQL9G5CLFwTRBMAAAAAAAAAgMtW/79lyVdLKxW7oqDF3bXOniib0UD/m/xq\n loWqvFwt3DX/mrLNALIu3V35NkpK1JDmL+2XOmr9pf1gKiFY4I672wc0mveaf6zaenyKmljPT6t5\n hT7a6y13y0XqjFpwneJjRC0oRwvL3eUL2fHCcuyGIntjhTkDuZCd5Vc5j+HNUMyx+myYcpHW5YG5\n ZijUdbg2VFu4ZzzcHFM3seQLAAAAAAAAAMtc//9S6cm1emX97ytK1v81rHelhtfVfAFnseZXRdV9\n Ad7+dhGS5kbl3eqe/K8pU/nnYwX5X2VeoLbCZwHi7txD6aTELabnoLJ5AfPFC8JmFd3Pun+MlfM4\n q/846/4s62i5+8Dmc7EvSVN0UG2tL00p1uPXqZTt/G5QqX+5lbufz+mSctVzFce6upBrTG3Fd+cn\n pmiYrUyw8+GNfL4hn8/k83qZrVlyGzgPeqbhjcOqx7KMEZRpU/MPQ+rsldEtuYm8vExkznoMS+6b\n KC5TZRt8wVf4xEkFX4V5D/X2vYz1/EcR8yMAAAAAAACAJY0Qf/d3vLPUlb//b4Nzzv6W3Wevtl+1\n vmxts2LWTxOHErcm3jGfMUfNG0yMGQAAAAAAeJ/8rLwAMXIYRgCARFv8IIaYtKpGqCdqlN/2kupD\n /ob67qXhsi0lDh2Vp6728faO9tHuUflfWJ1wE0e6724f35XuG71r16Dr0FwH573by6rKi0N7RveN\n tnd6aTVBWrpjd3fnuJtsBMnDk90ju7zckSA5XGGtdGrK2dWhUnRcMgAAAAAAAAD4v2CIV6vqf82I\n Jusbcwsy7wkWSf/n1JQNq/Oc+uQGq/ecmsphYZ6Tn6XwRLjwxb7mTxDoakLgURUFshwAAAAAAAAA\n ljpCrHZ8W/f2/2NUAAAAAAAAAAAAhXH5RLm4IIbotqot7hbW/0MGWCp46/+pgpHwjZS3IyAlfMPy\n tgakNN+wfcPxNgukdN9I+kadt30gZfhGjW+s8I2V3s6CVNTbWZCK+Eatb3zAN1Z5mw5SMd+I+wZ+\n +QQAAAAAAAAA/K8IcdT27Zqi3/+HkQEAAAAAAAAAsGgkMQQLjSHqbQPDAAAAAAAAAAAALGuw/g8A\n AAAAAAAA4DJUqwsQI7cQDWlcLiMq1/9rcGMBAAAAAAAAAADLGuh/AAAAAAAAAAAA+h8AAAAAAAAA\n AABLHyHusDTPjtLzTtoxnRftUftqe8YatDA+AAAAAAAAAPDeqJN/KVt+et0R9PYnzz7W8PrZRv+V\n HblO6qEDNEXbaYDGqJemaYQmaYJThtnK8Gvzb1opfDRTPZmUlxUY86qgm/ZyFVkOOqCC3kLhoyEI\n qs8raBO10O0q3EYKH+uDcNq8wnVRH93D7evnYZhHG5kkB3a0OYO2ctCWV9ZR+FhT0l2HCzl6xVBz\n XZyPUvi4taTjcwRuVUF7uYW9HMy9MJspfGwMAoo5A+5Qwca8UHN2WogeU/fu0ito1vmjM+M85zzp\n fNG5zxl2djrNzk3O9+0m+yWrx2q0fpH4buJ4Yk3ig4lvmkfxx9gBAAAAAAC4OAylQfJ5h5pfSVCc\n f853gqSmWPSZux6xjUznltH2HT/flNu7++0NZ7/07cg/vnPbVu30y6d/NLvlabPh+j81v/Xc5g9l\n 1h2f+epn9+VPdN90OHHvU50fm94y/ZXvWQ/tP/yJG/NH3llz8A79tlNPG72DHSePHdzz2s3XPzVj\n vzSUvSHjVys1Rv5CSUv8pEvcEqkbV/KX35JaQ+npikmRS9o4rtYIt8RYnJa4Ou6SV6stTm+l7rcX\n q9qSy+23pCVIcgV/SZKuJj5CSRc4Y/PpkiesLJcI53J37NvFuQzv4peGL0/SypP+C+45xVAAMAEA\n \"\"\"", "pristine", "=", "StringIO", "(", ")", "pristine", ".", "write", "(", "base64", ".", "b64decode", "(", "mdb_gz_b64", ")", ")", "pristine", ".", "seek", "(", "0", ")", "pristine", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "pristine", ",", "mode", "=", "'rb'", ")", "with", "open", "(", "destination", ",", "'wb'", ")", "as", "handle", ":", "shutil", ".", "copyfileobj", "(", "pristine", ",", "handle", ")", "return", "cls", "(", "destination", ")" ]
does the actual work of indent_func
def _indent_decor ( lbl ) : def closure_indent ( func ) : if util_arg . TRACE : @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_indent ( * args , * * kwargs ) : with util_print . Indenter ( lbl ) : print ( ' ...trace[in]' ) ret = func ( * args , * * kwargs ) print ( ' ...trace[out]' ) return ret else : @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_indent ( * args , * * kwargs ) : with util_print . Indenter ( lbl ) : ret = func ( * args , * * kwargs ) return ret wrp_indent_ = ignores_exc_tb ( wrp_indent ) wrp_indent_ = preserve_sig ( wrp_indent , func ) return wrp_indent_ return closure_indent
8,962
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L305-L329
[ "def", "binary", "(", "self", ")", ":", "def", "_get_binary", "(", ")", ":", "# Retrieve all entries from the remote virtual folder", "parser", "=", "self", ".", "_create_directory_parser", "(", "self", ".", "path", ")", "if", "not", "parser", ".", "entries", ":", "raise", "errors", ".", "NotFoundError", "(", "'No entries found'", ",", "self", ".", "path", ")", "# Download the first matched directory entry", "pattern", "=", "re", ".", "compile", "(", "self", ".", "binary_regex", ",", "re", ".", "IGNORECASE", ")", "for", "entry", "in", "parser", ".", "entries", ":", "try", ":", "self", ".", "_binary", "=", "pattern", ".", "match", "(", "entry", ")", ".", "group", "(", ")", "break", "except", "Exception", ":", "# No match, continue with next entry", "continue", "else", ":", "raise", "errors", ".", "NotFoundError", "(", "\"Binary not found in folder\"", ",", "self", ".", "path", ")", "self", ".", "_retry_check_404", "(", "_get_binary", ")", "return", "self", ".", "_binary" ]
Takes either no arguments or an alias label
def indent_func ( input_ ) : if isinstance ( input_ , six . string_types ) : # A label was specified lbl = input_ return _indent_decor ( lbl ) elif isinstance ( input_ , ( bool , tuple ) ) : # Allow individually turning of of this decorator func = input_ return func else : # Use the function name as the label func = input_ lbl = '[' + meta_util_six . get_funcname ( func ) + ']' return _indent_decor ( lbl ) ( func )
8,963
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L332-L348
[ "def", "socket_monitor_loop", "(", "self", ")", ":", "try", ":", "while", "True", ":", "gevent", ".", "socket", ".", "wait_read", "(", "self", ".", "socket", ".", "fileno", "(", ")", ")", "self", ".", "_handle_log_rotations", "(", ")", "self", ".", "capture_packet", "(", ")", "finally", ":", "self", ".", "clean_up", "(", ")" ]
Causes output of function to be printed in an XML style block
def tracefunc_xml ( func ) : funcname = meta_util_six . get_funcname ( func ) def wrp_tracefunc2 ( * args , * * kwargs ) : verbose = kwargs . get ( 'verbose' , True ) if verbose : print ( '<%s>' % ( funcname , ) ) with util_print . Indenter ( ' ' ) : ret = func ( * args , * * kwargs ) if verbose : print ( '</%s>' % ( funcname , ) ) return ret wrp_tracefunc2_ = ignores_exc_tb ( wrp_tracefunc2 ) wrp_tracefunc2_ = preserve_sig ( wrp_tracefunc2_ , func ) return wrp_tracefunc2_
8,964
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L351-L367
[ "def", "saelgv", "(", "vec1", ",", "vec2", ")", ":", "vec1", "=", "stypes", ".", "toDoubleVector", "(", "vec1", ")", "vec2", "=", "stypes", ".", "toDoubleVector", "(", "vec2", ")", "smajor", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "sminor", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "libspice", ".", "saelgv_c", "(", "vec1", ",", "vec2", ",", "smajor", ",", "sminor", ")", "return", "stypes", ".", "cVectorToPython", "(", "smajor", ")", ",", "stypes", ".", "cVectorToPython", "(", "sminor", ")" ]
DEPRICATE in favor of accepts_scalar_input2 only accepts one input as vector
def accepts_scalar_input ( func ) : #@on_exception_report_input @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_asi ( self , input_ , * args , * * kwargs ) : #if HAVE_PANDAS: # if isinstance(input_, (pd.DataFrame, pd.Series)): # input_ = input_.values if util_iter . isiterable ( input_ ) : # If input is already iterable do default behavior return func ( self , input_ , * args , * * kwargs ) else : # If input is scalar, wrap input, execute, and unpack result #ret = func(self, (input_,), *args, **kwargs) ret = func ( self , [ input_ ] , * args , * * kwargs ) if ret is not None : return ret [ 0 ] wrp_asi = preserve_sig ( wrp_asi , func ) return wrp_asi
8,965
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L372-L418
[ "def", "_retrieve_offsets", "(", "self", ",", "timestamps", ",", "timeout_ms", "=", "float", "(", "\"inf\"", ")", ")", ":", "if", "not", "timestamps", ":", "return", "{", "}", "start_time", "=", "time", ".", "time", "(", ")", "remaining_ms", "=", "timeout_ms", "while", "remaining_ms", ">", "0", ":", "future", "=", "self", ".", "_send_offset_requests", "(", "timestamps", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "future", ",", "timeout_ms", "=", "remaining_ms", ")", "if", "future", ".", "succeeded", "(", ")", ":", "return", "future", ".", "value", "if", "not", "future", ".", "retriable", "(", ")", ":", "raise", "future", ".", "exception", "# pylint: disable-msg=raising-bad-type", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "if", "remaining_ms", "<", "0", ":", "break", "if", "future", ".", "exception", ".", "invalid_metadata", ":", "refresh_future", "=", "self", ".", "_client", ".", "cluster", ".", "request_update", "(", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "refresh_future", ",", "timeout_ms", "=", "remaining_ms", ")", "else", ":", "time", ".", "sleep", "(", "self", ".", "config", "[", "'retry_backoff_ms'", "]", "/", "1000.0", ")", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "raise", "Errors", ".", "KafkaTimeoutError", "(", "\"Failed to get offsets by timestamps in %s ms\"", "%", "(", "timeout_ms", ",", ")", ")" ]
debugging function for accepts_scalar_input2 checks to make sure all the iterable inputs are of the same length
def __assert_param_consistency ( args , argx_list_ ) : if util_arg . NO_ASSERTS : return if len ( argx_list_ ) == 0 : return True argx_flags = [ util_iter . isiterable ( args [ argx ] ) for argx in argx_list_ ] try : assert all ( [ argx_flags [ 0 ] == flag for flag in argx_flags ] ) , ( 'invalid mixing of iterable and scalar inputs' ) except AssertionError as ex : print ( '!!! ASSERTION ERROR IN UTIL_DECOR !!!' ) for argx in argx_list_ : print ( '[util_decor] args[%d] = %r' % ( argx , args [ argx ] ) ) raise ex
8,966
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L463-L480
[ "def", "throttle", "(", "self", ",", "wait", ")", ":", "ns", "=", "self", ".", "Namespace", "(", ")", "ns", ".", "timeout", "=", "None", "ns", ".", "throttling", "=", "None", "ns", ".", "more", "=", "None", "ns", ".", "result", "=", "None", "def", "done", "(", ")", ":", "ns", ".", "more", "=", "ns", ".", "throttling", "=", "False", "whenDone", "=", "_", ".", "debounce", "(", "done", ",", "wait", ")", "wait", "=", "(", "float", "(", "wait", ")", "/", "float", "(", "1000", ")", ")", "def", "throttled", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "later", "(", ")", ":", "ns", ".", "timeout", "=", "None", "if", "ns", ".", "more", ":", "self", ".", "obj", "(", "*", "args", ",", "*", "*", "kwargs", ")", "whenDone", "(", ")", "if", "not", "ns", ".", "timeout", ":", "ns", ".", "timeout", "=", "Timer", "(", "wait", ",", "later", ")", "ns", ".", "timeout", ".", "start", "(", ")", "if", "ns", ".", "throttling", ":", "ns", ".", "more", "=", "True", "else", ":", "ns", ".", "throttling", "=", "True", "ns", ".", "result", "=", "self", ".", "obj", "(", "*", "args", ",", "*", "*", "kwargs", ")", "whenDone", "(", ")", "return", "ns", ".", "result", "return", "self", ".", "_wrap", "(", "throttled", ")" ]
DEPRICATE IN FAVOR OF accepts_scalar_input2
def accepts_scalar_input_vector_output ( func ) : @ ignores_exc_tb ( outer_wrapper = False ) #@wraps(func) def wrp_asivo ( self , input_ , * args , * * kwargs ) : #import utool #if utool.DEBUG: # print('[IN SIVO] args=%r' % (args,)) # print('[IN SIVO] kwargs=%r' % (kwargs,)) if util_iter . isiterable ( input_ ) : # If input is already iterable do default behavior return func ( self , input_ , * args , * * kwargs ) else : # If input is scalar, wrap input, execute, and unpack result result = func ( self , ( input_ , ) , * args , * * kwargs ) # The output length could be 0 on a scalar input if len ( result ) == 0 : return [ ] else : assert len ( result ) == 1 , 'error in asivo' return result [ 0 ] return wrp_asivo
8,967
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L483-L521
[ "def", "_retrieve_offsets", "(", "self", ",", "timestamps", ",", "timeout_ms", "=", "float", "(", "\"inf\"", ")", ")", ":", "if", "not", "timestamps", ":", "return", "{", "}", "start_time", "=", "time", ".", "time", "(", ")", "remaining_ms", "=", "timeout_ms", "while", "remaining_ms", ">", "0", ":", "future", "=", "self", ".", "_send_offset_requests", "(", "timestamps", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "future", ",", "timeout_ms", "=", "remaining_ms", ")", "if", "future", ".", "succeeded", "(", ")", ":", "return", "future", ".", "value", "if", "not", "future", ".", "retriable", "(", ")", ":", "raise", "future", ".", "exception", "# pylint: disable-msg=raising-bad-type", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "if", "remaining_ms", "<", "0", ":", "break", "if", "future", ".", "exception", ".", "invalid_metadata", ":", "refresh_future", "=", "self", ".", "_client", ".", "cluster", ".", "request_update", "(", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "refresh_future", ",", "timeout_ms", "=", "remaining_ms", ")", "else", ":", "time", ".", "sleep", "(", "self", ".", "config", "[", "'retry_backoff_ms'", "]", "/", "1000.0", ")", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "raise", "Errors", ".", "KafkaTimeoutError", "(", "\"Failed to get offsets by timestamps in %s ms\"", "%", "(", "timeout_ms", ",", ")", ")" ]
Allows the first input to be a numpy array and get result in numpy form
def accepts_numpy ( func ) : #@ignores_exc_tb #@wraps(func) def wrp_accepts_numpy ( self , input_ , * args , * * kwargs ) : if not ( util_type . HAVE_NUMPY and isinstance ( input_ , np . ndarray ) ) : # If the input is not numpy, just call the function return func ( self , input_ , * args , * * kwargs ) else : # TODO: use a variant of util_list.unflat_unique_rowid_map # If the input is a numpy array, and return the output with the same # shape as the input if UNIQUE_NUMPY : # Remove redundant input (because we are passing it to SQL) input_list , inverse_unique = np . unique ( input_ , return_inverse = True ) else : input_list = input_ . flatten ( ) # Call the function in list format # TODO: is this necessary? input_list = input_list . tolist ( ) output_list = func ( self , input_list , * args , * * kwargs ) # Put the output back into numpy if UNIQUE_NUMPY : # Reconstruct redundant queries output_arr = np . array ( output_list ) [ inverse_unique ] output_shape = tuple ( list ( input_ . shape ) + list ( output_arr . shape [ 1 : ] ) ) return np . array ( output_arr ) . reshape ( output_shape ) else : return np . array ( output_list ) . reshape ( input_ . shape ) wrp_accepts_numpy = preserve_sig ( wrp_accepts_numpy , func ) return wrp_accepts_numpy
8,968
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L529-L559
[ "def", "metis", "(", "hdf5_file_name", ",", "N_clusters_max", ")", ":", "file_name", "=", "wgraph", "(", "hdf5_file_name", ")", "labels", "=", "sgraph", "(", "N_clusters_max", ",", "file_name", ")", "subprocess", ".", "call", "(", "[", "'rm'", ",", "file_name", "]", ")", "return", "labels" ]
Memoization decorator for functions taking a nonzero number of arguments .
def memoize_nonzero ( func ) : class _memorizer ( dict ) : def __init__ ( self , func ) : self . func = func def __call__ ( self , * args ) : return self [ args ] def __missing__ ( self , key ) : ret = self [ key ] = self . func ( * key ) return ret return _memorizer ( func )
8,969
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L562-L577
[ "def", "merge_entities", "(", "self", ",", "from_entity_ids", ",", "to_entity_id", ",", "force", "=", "False", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'from_entity_ids'", ":", "from_entity_ids", ",", "'to_entity_id'", ":", "to_entity_id", ",", "'force'", ":", "force", ",", "}", "api_path", "=", "'/v1/{mount_point}/entity/merge'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
simple memoization decorator
def memoize ( func ) : cache = func . _util_decor_memoize_cache = { } # @functools.wraps(func) def memoizer ( * args , * * kwargs ) : key = str ( args ) + str ( kwargs ) if key not in cache : cache [ key ] = func ( * args , * * kwargs ) return cache [ key ] memoizer = preserve_sig ( memoizer , func ) memoizer . cache = cache return memoizer
8,970
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L601-L651
[ "def", "concatenate_json", "(", "source_folder", ",", "destination_file", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "source_folder", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "'*.json'", ")", ":", "matches", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "with", "open", "(", "destination_file", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"[\\n\"", ")", "for", "m", "in", "matches", "[", ":", "-", "1", "]", ":", "f", ".", "write", "(", "open", "(", "m", ",", "\"rb\"", ")", ".", "read", "(", ")", ")", "f", ".", "write", "(", "\",\\n\"", ")", "f", ".", "write", "(", "open", "(", "matches", "[", "-", "1", "]", ",", "\"rb\"", ")", ".", "read", "(", ")", ")", "f", ".", "write", "(", "\"\\n]\"", ")" ]
Returns a memcached version of a function
def lazyfunc ( func ) : closuremem_ = [ { } ] def wrapper ( * args , * * kwargs ) : mem = closuremem_ [ 0 ] key = ( repr ( args ) , repr ( kwargs ) ) try : return mem [ key ] except KeyError : mem [ key ] = func ( * args , * * kwargs ) return mem [ key ] return wrapper
8,971
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L752-L765
[ "def", "_config_pipe", "(", "self", ")", ":", "self", ".", "_cfg", ".", "enable_device", "(", "self", ".", "id", ")", "# configure the color stream", "self", ".", "_cfg", ".", "enable_stream", "(", "rs", ".", "stream", ".", "color", ",", "RealSenseSensor", ".", "COLOR_IM_WIDTH", ",", "RealSenseSensor", ".", "COLOR_IM_HEIGHT", ",", "rs", ".", "format", ".", "bgr8", ",", "RealSenseSensor", ".", "FPS", ")", "# configure the depth stream", "self", ".", "_cfg", ".", "enable_stream", "(", "rs", ".", "stream", ".", "depth", ",", "RealSenseSensor", ".", "DEPTH_IM_WIDTH", ",", "360", "if", "self", ".", "_depth_align", "else", "RealSenseSensor", ".", "DEPTH_IM_HEIGHT", ",", "rs", ".", "format", ".", "z16", ",", "RealSenseSensor", ".", "FPS", ")" ]
Changes docstr of one functio to that of another
def apply_docstr ( docstr_func ) : def docstr_applier ( func ) : #docstr = meta_util_six.get_funcdoc(docstr_func) #meta_util_six.set_funcdoc(func, docstr) if isinstance ( docstr_func , six . string_types ) : olddoc = meta_util_six . get_funcdoc ( func ) if olddoc is None : olddoc = '' newdoc = olddoc + docstr_func meta_util_six . set_funcdoc ( func , newdoc ) return func else : preserved_func = preserve_sig ( func , docstr_func ) return preserved_func return docstr_applier
8,972
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L768-L785
[ "def", "stations", "(", "self", ",", "*", ",", "generated", "=", "True", ",", "library", "=", "True", ")", ":", "station_list", "=", "[", "]", "for", "chunk", "in", "self", ".", "stations_iter", "(", "page_size", "=", "49995", ")", ":", "for", "station", "in", "chunk", ":", "if", "(", "(", "generated", "and", "not", "station", ".", "get", "(", "'inLibrary'", ")", ")", "or", "(", "library", "and", "station", ".", "get", "(", "'inLibrary'", ")", ")", ")", ":", "station_list", ".", "append", "(", "station", ")", "return", "station_list" ]
Decorates a wrapper function .
def preserve_sig ( wrapper , orig_func , force = False ) : #if True: # import functools # return functools.wraps(orig_func)(wrapper) from utool . _internal import meta_util_six from utool import util_str from utool import util_inspect if wrapper is orig_func : # nothing to do return orig_func orig_docstr = meta_util_six . get_funcdoc ( orig_func ) orig_docstr = '' if orig_docstr is None else orig_docstr orig_argspec = util_inspect . get_func_argspec ( orig_func ) wrap_name = meta_util_six . get_funccode ( wrapper ) . co_name orig_name = meta_util_six . get_funcname ( orig_func ) # At the very least preserve info in a dictionary _utinfo = { } _utinfo [ 'orig_func' ] = orig_func _utinfo [ 'wrap_name' ] = wrap_name _utinfo [ 'orig_name' ] = orig_name _utinfo [ 'orig_argspec' ] = orig_argspec if hasattr ( wrapper , '_utinfo' ) : parent_wrapper_utinfo = wrapper . _utinfo _utinfo [ 'parent_wrapper_utinfo' ] = parent_wrapper_utinfo if hasattr ( orig_func , '_utinfo' ) : parent_orig_utinfo = orig_func . _utinfo _utinfo [ 'parent_orig_utinfo' ] = parent_orig_utinfo # environment variable is set if you are building documentation # preserve sig if building docs building_docs = os . environ . get ( 'UTOOL_AUTOGEN_SPHINX_RUNNING' , 'OFF' ) == 'ON' if force or SIG_PRESERVE or building_docs : # PRESERVES ALL SIGNATURES WITH EXECS src_fmt = r''' def _wrp_preserve{defsig}: """ {orig_docstr} """ try: return wrapper{callsig} except Exception as ex: import utool as ut msg = ('Failure in signature preserving wrapper:\n') ut.printex(ex, msg) raise ''' # Put wrapped function into a scope globals_ = { 'wrapper' : wrapper } locals_ = { } # argspec is :ArgSpec(args=['bar', 'baz'], varargs=None, keywords=None, # defaults=(True,)) # get orig functions argspec # get functions signature # Get function call signature (no defaults) # Define an exec function argspec = inspect . getargspec ( orig_func ) ( args , varargs , varkw , defaults ) = argspec defsig = inspect . formatargspec ( * argspec ) callsig = inspect . formatargspec ( * argspec [ 0 : 3 ] ) # TODO: # ut.func_defsig # ut.func_callsig src_fmtdict = dict ( defsig = defsig , callsig = callsig , orig_docstr = orig_docstr ) src = textwrap . dedent ( src_fmt ) . format ( * * src_fmtdict ) # Define the new function on the fly # (I wish there was a non exec / eval way to do this) #print(src) code = compile ( src , '<string>' , 'exec' ) six . exec_ ( code , globals_ , locals_ ) #six.exec_(src, globals_, locals_) # Use functools.update_wapper to complete preservation _wrp_preserve = functools . update_wrapper ( locals_ [ '_wrp_preserve' ] , orig_func ) # Keep debug info _utinfo [ 'src' ] = src # Set an internal sig variable that we may use #_wrp_preserve.__sig__ = defsig else : # PRESERVES SOME SIGNATURES NO EXEC # signature preservation is turned off. just preserve the name. # Does not use any exec or eval statments. _wrp_preserve = functools . update_wrapper ( wrapper , orig_func ) # Just do something to preserve signature DEBUG_WRAPPED_DOCSTRING = False if DEBUG_WRAPPED_DOCSTRING : new_docstr_fmtstr = util_str . codeblock ( ''' Wrapped function {wrap_name}({orig_name}) orig_argspec = {orig_argspec} orig_docstr = {orig_docstr} ''' ) else : new_docstr_fmtstr = util_str . codeblock ( ''' {orig_docstr} ''' ) new_docstr = new_docstr_fmtstr . format ( wrap_name = wrap_name , orig_name = orig_name , orig_docstr = orig_docstr , orig_argspec = orig_argspec ) meta_util_six . set_funcdoc ( _wrp_preserve , new_docstr ) _wrp_preserve . _utinfo = _utinfo return _wrp_preserve
8,973
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L788-L935
[ "def", "merge_offsets_metadata", "(", "topics", ",", "*", "offsets_responses", ")", ":", "result", "=", "dict", "(", ")", "for", "topic", "in", "topics", ":", "partition_offsets", "=", "[", "response", "[", "topic", "]", "for", "response", "in", "offsets_responses", "if", "topic", "in", "response", "]", "result", "[", "topic", "]", "=", "merge_partition_offsets", "(", "*", "partition_offsets", ")", "return", "result" ]
helper function to round a number to significant figures
def _sigfigs ( n , sigfigs = 3 ) : n = float ( n ) if n == 0 or math . isnan ( n ) : # avoid math domain errors return n return round ( n , - int ( math . floor ( math . log10 ( abs ( n ) ) ) - sigfigs + 1 ) )
8,974
https://github.com/kurtbrose/faststat/blob/5060c0e10acaafd4a48de3f16869bfccc1deb44a/faststat/faststat.py#L408-L413
[ "def", "km3h5concat", "(", "input_files", ",", "output_file", ",", "n_events", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "km3pipe", "import", "Pipeline", "# noqa", "from", "km3pipe", ".", "io", "import", "HDF5Pump", ",", "HDF5Sink", "# noqa", "pipe", "=", "Pipeline", "(", ")", "pipe", ".", "attach", "(", "HDF5Pump", ",", "filenames", "=", "input_files", ",", "*", "*", "kwargs", ")", "pipe", ".", "attach", "(", "StatusBar", ",", "every", "=", "250", ")", "pipe", ".", "attach", "(", "HDF5Sink", ",", "filename", "=", "output_file", ",", "*", "*", "kwargs", ")", "pipe", ".", "drain", "(", "n_events", ")" ]
Merge moments of two samples A and B . parameters are m_a ... m_a4 = first through fourth moment of sample A n_a = size of sample A m_b ... m_b4 = first through fourth moment of sample B n_b = size of sample B
def merge_moments ( m_a , m_a2 , m_a3 , m_a4 , n_a , m_b , m_b2 , m_b3 , m_b4 , n_b ) : delta = m_b - m_a delta_2 = delta * delta delta_3 = delta * delta_2 delta_4 = delta * delta_3 n_x = n_a + n_b m_x = m_a + delta * n_b / n_x m_x2 = m_a2 + m_b2 + delta_2 * n_a * n_b / n_x m_x3 = m_a3 + m_b3 + delta_3 * n_a * n_b * ( n_a - n_b ) + 3 * delta * ( n_a * m_2b - n_b * m_2a ) / n_x m_x4 = ( m_a4 + m_b4 + delta_4 * ( n_a * n_b * ( n_a * n_a - n_a * n_b + n_b * n_b ) ) / ( n_x ** 3 ) + 6 * delta_2 * ( n_a * n_a * m_b2 + n_b * n_b * m_a2 ) / ( n_x ** 2 ) + 4 * delta * ( n_a * m_b3 - n_b * m_a3 ) / n_x ) return m_x , m_x2 , m_x3 , m_x4 , n_x
8,975
https://github.com/kurtbrose/faststat/blob/5060c0e10acaafd4a48de3f16869bfccc1deb44a/faststat/faststat.py#L416-L436
[ "def", "RunValidationOutputToConsole", "(", "feed", ",", "options", ")", ":", "accumulator", "=", "CountingConsoleProblemAccumulator", "(", "options", ".", "error_types_ignore_list", ")", "problems", "=", "transitfeed", ".", "ProblemReporter", "(", "accumulator", ")", "_", ",", "exit_code", "=", "RunValidation", "(", "feed", ",", "options", ",", "problems", ")", "return", "exit_code" ]
Register that a transition has taken place . nxt is an identifier for the state being entered . cur is an identifier for the state being left . since is the time at which the previous state was entered .
def _transition ( self , nxt , cur = None , since = None ) : self . transition_intervals [ ( cur , nxt ) ] . tick ( ) if since : self . state_durations [ cur ] . end ( since )
8,976
https://github.com/kurtbrose/faststat/blob/5060c0e10acaafd4a48de3f16869bfccc1deb44a/faststat/faststat.py#L235-L244
[ "def", "density", "(", "self", ",", "value", ")", ":", "self", ".", "_density", "=", "float", "(", "value", ")", "self", ".", "_cache", ".", "delete", "(", "'mass_properties'", ")" ]
cleanup after a transitor weakref fires
def _cleanup ( self , ref ) : self . transitor_states [ self . _weakref_holder [ ref ] ] -= 1 del self . _weakref_holder [ ref ]
8,977
https://github.com/kurtbrose/faststat/blob/5060c0e10acaafd4a48de3f16869bfccc1deb44a/faststat/faststat.py#L252-L255
[ "def", "create_public_ip", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "public_ip_name", ",", "dns_label", ",", "location", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourceGroups/'", ",", "resource_group", ",", "'/providers/Microsoft.Network/publicIPAddresses/'", ",", "public_ip_name", ",", "'?api-version='", ",", "NETWORK_API", "]", ")", "ip_body", "=", "{", "'location'", ":", "location", "}", "properties", "=", "{", "'publicIPAllocationMethod'", ":", "'Dynamic'", "}", "properties", "[", "'dnsSettings'", "]", "=", "{", "'domainNameLabel'", ":", "dns_label", "}", "ip_body", "[", "'properties'", "]", "=", "properties", "body", "=", "json", ".", "dumps", "(", "ip_body", ")", "return", "do_put", "(", "endpoint", ",", "body", ",", "access_token", ")" ]
commit a walkers data after it is collected
def _commit ( self , ref ) : path_times = self . _weakref_path_map [ ref ] path_times . append ( nanotime ( ) ) del self . _weakref_path_map [ ref ] path = tuple ( path_times [ 1 : : 2 ] ) times = path_times [ : : 2 ] if path not in self . path_stats : # tuple to save a tiny bit of memory self . path_stats [ path ] = tuple ( [ Duration ( interval = False ) for i in range ( len ( path ) ) ] ) path_stats = self . path_stats [ path ] for i in range ( 1 , len ( times ) ) : path_stats [ i - 1 ] . _stats . add ( times [ i ] - times [ i - 1 ] )
8,978
https://github.com/kurtbrose/faststat/blob/5060c0e10acaafd4a48de3f16869bfccc1deb44a/faststat/faststat.py#L310-L323
[ "def", "native_libraries_verify", "(", ")", ":", "with", "open", "(", "BINARY_EXT_TEMPLATE", ",", "\"r\"", ")", "as", "file_obj", ":", "template", "=", "file_obj", ".", "read", "(", ")", "expected", "=", "template", ".", "format", "(", "revision", "=", "REVISION", ")", "with", "open", "(", "BINARY_EXT_FILE", ",", "\"r\"", ")", "as", "file_obj", ":", "contents", "=", "file_obj", ".", "read", "(", ")", "if", "contents", "!=", "expected", ":", "err_msg", "=", "\"\\n\"", "+", "get_diff", "(", "contents", ",", "expected", ",", "\"docs/python/binary-extension.rst.actual\"", ",", "\"docs/python/binary-extension.rst.expected\"", ",", ")", "raise", "ValueError", "(", "err_msg", ")", "else", ":", "print", "(", "\"docs/python/binary-extension.rst contents are as expected.\"", ")" ]
Makes a pretty ASCII format of the data suitable for displaying in a console or saving to a text file . Returns a list of lines .
def pformat ( self , prefix = ( ) ) : nan = float ( "nan" ) def sformat ( segment , stat ) : FMT = "n={0}, mean={1}, p50/95={2}/{3}, max={4}" line_segs = [ segment ] for s in [ stat ] : p = s . get_percentiles ( ) p50 , p95 = p . get ( 0.50 , nan ) , p . get ( 0.95 , nan ) line_segs . append ( FMT . format ( s . n , s . mean , p50 , p95 , s . max ) ) return '{0}: {1}' . format ( * line_segs ) lines = [ ] for path in sorted ( self . path_stats . keys ( ) ) : lines . append ( '=====================' ) for seg , stat in zip ( path , self . path_stats [ path ] ) : lines . append ( sformat ( seg , stat ) ) return lines
8,979
https://github.com/kurtbrose/faststat/blob/5060c0e10acaafd4a48de3f16869bfccc1deb44a/faststat/faststat.py#L325-L347
[ "def", "until", "(", "coro", ",", "coro_test", ",", "assert_coro", "=", "None", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "@", "asyncio", ".", "coroutine", "def", "assert_coro", "(", "value", ")", ":", "return", "not", "value", "return", "(", "yield", "from", "whilst", "(", "coro", ",", "coro_test", ",", "assert_coro", "=", "assert_coro", ",", "*", "args", ",", "*", "*", "kw", ")", ")" ]
Generates tuples of specfile and quant element for general formats
def specfn_quant_generator ( specfiles , quantfiles , tag , ignore_tags ) : for specfn , qfn in zip ( specfiles , quantfiles ) : for quant_el in basereader . generate_xmltags ( qfn , tag , ignore_tags ) : yield os . path . basename ( specfn ) , quant_el
8,980
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/openms.py#L5-L9
[ "def", "create_session", "(", "self", ",", "lock_type", "=", "library", ".", "LockType", ".", "shared", ",", "session", "=", "None", ")", ":", "if", "session", "is", "None", ":", "session", "=", "library", ".", "ISession", "(", ")", "# NOTE: The following hack handles the issue of unknown machine state.", "# This occurs most frequently when a machine is powered off and", "# in spite waiting for the completion event to end, the state of", "# machine still raises the following Error:", "# virtualbox.library.VBoxErrorVmError: 0x80bb0003 (Failed to \\", "# get a console object from the direct session (Unknown \\", "# Status 0x80BB0002))", "error", "=", "None", "for", "_", "in", "range", "(", "10", ")", ":", "try", ":", "self", ".", "lock_machine", "(", "session", ",", "lock_type", ")", "except", "Exception", "as", "exc", ":", "error", "=", "exc", "time", ".", "sleep", "(", "1", ")", "continue", "else", ":", "break", "else", ":", "if", "error", "is", "not", "None", ":", "raise", "Exception", "(", "\"Failed to create clone - %s\"", "%", "error", ")", "return", "session" ]
Returns a dict with feature information
def get_feature_info ( feature ) : dimensions = feature . findall ( 'position' ) for dim in dimensions : if dim . attrib [ 'dim' ] == '0' : rt = dim . text elif dim . attrib [ 'dim' ] == '1' : mz = dim . text return { 'rt' : float ( rt ) , 'mz' : float ( mz ) , 'charge' : int ( feature . find ( 'charge' ) . text ) , 'intensity' : float ( feature . find ( 'intensity' ) . text ) , }
8,981
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/openms.py#L31-L42
[ "def", "use_comparative_log_view", "(", "self", ")", ":", "self", ".", "_log_view", "=", "COMPARATIVE", "# self._get_provider_session('log_entry_log_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_comparative_log_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Merge in undefined map entries from given map .
def merge_maps ( m , base ) : for k in base . keys ( ) : if k not in m : m [ k ] = base [ k ]
8,982
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/base/util.py#L16-L29
[ "def", "_send_register_payload", "(", "self", ",", "websocket", ")", ":", "file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "HANDSHAKE_FILE_NAME", ")", "data", "=", "codecs", ".", "open", "(", "file", ",", "'r'", ",", "'utf-8'", ")", "raw_handshake", "=", "data", ".", "read", "(", ")", "handshake", "=", "json", ".", "loads", "(", "raw_handshake", ")", "handshake", "[", "'payload'", "]", "[", "'client-key'", "]", "=", "self", ".", "client_key", "yield", "from", "websocket", ".", "send", "(", "json", ".", "dumps", "(", "handshake", ")", ")", "raw_response", "=", "yield", "from", "websocket", ".", "recv", "(", ")", "response", "=", "json", ".", "loads", "(", "raw_response", ")", "if", "response", "[", "'type'", "]", "==", "'response'", "and", "response", "[", "'payload'", "]", "[", "'pairingType'", "]", "==", "'PROMPT'", ":", "raw_response", "=", "yield", "from", "websocket", ".", "recv", "(", ")", "response", "=", "json", ".", "loads", "(", "raw_response", ")", "if", "response", "[", "'type'", "]", "==", "'registered'", ":", "self", ".", "client_key", "=", "response", "[", "'payload'", "]", "[", "'client-key'", "]", "self", ".", "save_key_file", "(", ")" ]
Merge in undefined list entries from given list .
def merge_lists ( l , base ) : for i in base : if i not in l : l . append ( i )
8,983
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/base/util.py#L31-L44
[ "def", "kendalltau", "(", "X", ")", ":", "corrs", "=", "np", ".", "zeros", "(", "(", "X", ".", "shape", "[", "1", "]", ",", "X", ".", "shape", "[", "1", "]", ")", ")", "for", "idx", ",", "cola", "in", "enumerate", "(", "X", ".", "T", ")", ":", "for", "jdx", ",", "colb", "in", "enumerate", "(", "X", ".", "T", ")", ":", "corrs", "[", "idx", ",", "jdx", "]", "=", "sp_kendalltau", "(", "cola", ",", "colb", ")", "[", "0", "]", "return", "corrs" ]
Fed with a psms generator this returns the 3 PSMs with the highest precursor intensities ( or areas or whatever is given in the HEADER_PRECURSOR_QUANT
def generate_top_psms ( psms , protcol ) : top_ms1_psms = { } for psm in psms : protacc = psm [ protcol ] precursor_amount = psm [ mzidtsvdata . HEADER_PRECURSOR_QUANT ] if ';' in protacc or precursor_amount == 'NA' : continue precursor_amount = float ( precursor_amount ) psm_seq = psm [ mzidtsvdata . HEADER_PEPTIDE ] try : peptide_area = top_ms1_psms [ protacc ] [ psm_seq ] except KeyError : try : top_ms1_psms [ protacc ] [ psm_seq ] = precursor_amount except KeyError : top_ms1_psms [ protacc ] = { psm_seq : precursor_amount } else : if precursor_amount > peptide_area : top_ms1_psms [ protacc ] [ psm_seq ] = precursor_amount return top_ms1_psms
8,984
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/precursorarea.py#L5-L27
[ "def", "add", "(", "self", ",", "watch_key", ",", "tensor_value", ")", ":", "if", "watch_key", "not", "in", "self", ".", "_tensor_data", ":", "self", ".", "_tensor_data", "[", "watch_key", "]", "=", "_WatchStore", "(", "watch_key", ",", "mem_bytes_limit", "=", "self", ".", "_watch_mem_bytes_limit", ")", "self", ".", "_tensor_data", "[", "watch_key", "]", ".", "add", "(", "tensor_value", ")" ]
Collects PSMs with the highes precursor quant values adds sum of the top 3 of these to a protein table
def add_ms1_quant_from_top3_mzidtsv ( proteins , psms , headerfields , protcol ) : if not protcol : protcol = mzidtsvdata . HEADER_MASTER_PROT top_ms1_psms = generate_top_psms ( psms , protcol ) for protein in proteins : prot_acc = protein [ prottabledata . HEADER_PROTEIN ] prec_area = calculate_protein_precursor_quant ( top_ms1_psms , prot_acc ) outprotein = { k : v for k , v in protein . items ( ) } outprotein [ headerfields [ 'precursorquant' ] [ prottabledata . HEADER_AREA ] [ None ] ] = str ( prec_area ) yield outprotein
8,985
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/precursorarea.py#L40-L52
[ "def", "deleteAllGroups", "(", "server", ")", ":", "try", ":", "client", ",", "key", "=", "_get_session", "(", "server", ")", "except", "Exception", "as", "exc", ":", "err_msg", "=", "'Exception raised when connecting to spacewalk server ({0}): {1}'", ".", "format", "(", "server", ",", "exc", ")", "log", ".", "error", "(", "err_msg", ")", "return", "{", "'Error'", ":", "err_msg", "}", "groups", "=", "client", ".", "systemgroup", ".", "listAllGroups", "(", "key", ")", "deleted_groups", "=", "[", "]", "failed_groups", "=", "[", "]", "for", "group", "in", "groups", ":", "if", "client", ".", "systemgroup", ".", "delete", "(", "key", ",", "group", "[", "'name'", "]", ")", "==", "1", ":", "deleted_groups", ".", "append", "(", "group", "[", "'name'", "]", ")", "else", ":", "failed_groups", ".", "append", "(", "group", "[", "'name'", "]", ")", "ret", "=", "{", "'deleted'", ":", "deleted_groups", "}", "if", "failed_groups", ":", "ret", "[", "'failed'", "]", "=", "failed_groups", "return", "ret" ]
similar to matlab toc
def toc ( tt , return_msg = False , write_msg = True , verbose = None ) : if verbose is not None : write_msg = verbose ( msg , start_time ) = tt ellapsed = ( default_timer ( ) - start_time ) if ( not return_msg ) and write_msg and msg is not None : sys . stdout . write ( '...toc(%.4fs, ' % ellapsed + '"' + str ( msg ) + '"' + ')\n' ) if return_msg : return msg else : return ellapsed
8,986
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_time.py#L42-L58
[ "def", "users", "(", "self", ",", "start", "=", "1", ",", "num", "=", "10", ",", "sortField", "=", "\"fullName\"", ",", "sortOrder", "=", "\"asc\"", ",", "role", "=", "None", ")", ":", "users", "=", "[", "]", "url", "=", "self", ".", "_url", "+", "\"/users\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"start\"", ":", "start", ",", "\"num\"", ":", "num", "}", "if", "not", "role", "is", "None", ":", "params", "[", "'role'", "]", "=", "role", "if", "not", "sortField", "is", "None", ":", "params", "[", "'sortField'", "]", "=", "sortField", "if", "not", "sortOrder", "is", "None", ":", "params", "[", "'sortOrder'", "]", "=", "sortOrder", "from", ".", "_community", "import", "Community", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "if", "\"users\"", "in", "res", ":", "if", "len", "(", "res", "[", "'users'", "]", ")", ">", "0", ":", "parsed", "=", "urlparse", ".", "urlparse", "(", "self", ".", "_url", ")", "if", "parsed", ".", "netloc", ".", "lower", "(", ")", ".", "find", "(", "'arcgis.com'", ")", "==", "-", "1", ":", "cURL", "=", "\"%s://%s/%s/sharing/rest/community\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ",", "parsed", ".", "path", "[", "1", ":", "]", ".", "split", "(", "'/'", ")", "[", "0", "]", ")", "else", ":", "cURL", "=", "\"%s://%s/sharing/rest/community\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ")", "com", "=", "Community", "(", "url", "=", "cURL", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "for", "r", "in", "res", "[", "'users'", "]", ":", "users", ".", "append", "(", "com", ".", "users", ".", "user", "(", "r", "[", "\"username\"", "]", ")", ")", "res", "[", "'users'", "]", "=", "users", "return", "res" ]
r pip install delorean
def parse_timestamp ( timestamp , zone = 'UTC' , timestamp_format = None ) : if timestamp is None : return None use_delorean = True or six . PY2 if use_delorean : import delorean ## customize delorean string method #def __str__(self): # return str(self.datetime) # #return str(self.datetime) + ' ' + str(self.timezone) #delorean.Delorean.__str__ = __str__ ## method types must be injected into the class ##ut.inject_func_as_method(dn, __str__, '__repr__', override=True) if not isinstance ( timestamp , six . string_types ) : raise NotImplementedError ( 'Unknown format: timestamp=%r' % ( timestamp , ) ) # Normal format, or non-standard year first data if timestamp_format is None : # dont warn because we will take care of utc timefmt = determine_timestamp_format ( timestamp , warn = False ) else : timefmt = timestamp_format if timefmt is None or not isinstance ( timefmt , six . string_types ) : raise AssertionError ( 'unknown timestamp_format=%r' % ( timestamp_format , ) ) # Fixup timestamp utc_offset = None if len ( timestamp ) == 20 and '\x00' in timestamp : timestamp_ = timestamp . replace ( '\x00' , ' ' ) . strip ( ';' ) . strip ( ) elif use_delorean and len ( timestamp ) > 19 : timestamp_ = timestamp [ : 19 ] . strip ( ';' ) . strip ( ) utc_offset = timestamp [ 19 : ] else : timestamp_ = timestamp dt_ = datetime . datetime . strptime ( timestamp_ , timefmt ) if use_delorean : #if utc and utc_offset is not None: #if utc: # dn_ = delorean.Delorean(dt_, 'UTC') #else: if zone is None : zone = time . tzname [ 0 ] if zone == 'local' : zone = time . tzname [ 0 ] dn_ = delorean . Delorean ( dt_ , zone ) else : dn_ = dt_ if utc_offset is not None and zone == 'UTC' : if use_delorean : # Python 2.7 does not account for timezones if ':' in utc_offset : sign = { ' ' : + 1 , '+' : + 1 , '-' : - 1 } [ utc_offset [ 0 ] ] hours , seconds = utc_offset [ 1 : ] . split ( ':' ) delta_ = datetime . timedelta ( hours = int ( hours ) , seconds = int ( seconds ) ) delta = sign * delta_ else : import pytz tzname = utc_offset . strip ( ) delta = pytz . timezone ( tzname ) . utcoffset ( dt_ ) # Move back to utc dn = dn_ - delta else : raise AssertionError ( 'python3 should take care of timezone' ) else : dn = dn_ if use_delorean : if not zone != 'UTC' : dn . shift ( zone ) return dn . datetime
8,987
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_time.py#L447-L563
[ "def", "determine_offset", "(", "self", ")", ":", "# In case of regular RTDC, the first contour is", "# missing. In case of fRTDC, it is there, so we", "# might have an offset. We find out if the first", "# contour frame is missing by comparing it to", "# the \"frame\" column of the rtdc dataset.", "fref", "=", "self", ".", "_contour_data", ".", "get_frame", "(", "0", ")", "f0", "=", "self", ".", "frame", "[", "0", "]", "f1", "=", "self", ".", "frame", "[", "1", "]", "# Use allclose to avoid float/integer comparison problems", "if", "np", ".", "allclose", "(", "fref", ",", "f0", ")", ":", "self", ".", "event_offset", "=", "0", "elif", "np", ".", "allclose", "(", "fref", ",", "f1", ")", ":", "self", ".", "event_offset", "=", "1", "else", ":", "msg", "=", "\"Contour data has unknown offset (frame {})!\"", ".", "format", "(", "fref", ")", "raise", "IndexError", "(", "msg", ")", "self", ".", "_initialized", "=", "True" ]
fraction is how much through the day you are . 0 = start of the day 1 = end of the day .
def date_to_datetime ( date , fraction = 0.0 ) : day_seconds = ( 60 * 60 * 24 ) - 1 total_seconds = int ( day_seconds * fraction ) delta = datetime . timedelta ( seconds = total_seconds ) time = datetime . time ( ) dt = datetime . datetime . combine ( date , time ) + delta return dt
8,988
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_time.py#L1115-L1124
[ "def", "render_cvmfs_pvc", "(", "cvmfs_volume", ")", ":", "name", "=", "CVMFS_REPOSITORIES", "[", "cvmfs_volume", "]", "rendered_template", "=", "dict", "(", "REANA_CVMFS_PVC_TEMPLATE", ")", "rendered_template", "[", "'metadata'", "]", "[", "'name'", "]", "=", "'csi-cvmfs-{}-pvc'", ".", "format", "(", "name", ")", "rendered_template", "[", "'spec'", "]", "[", "'storageClassName'", "]", "=", "\"csi-cvmfs-{}\"", ".", "format", "(", "name", ")", "return", "rendered_template" ]
Use the EC2 API to get a list of all machines
def ec2_instances ( ) : region = boto . ec2 . get_region ( REGION ) reservations = region . connect ( ) . get_all_instances ( ) instances = [ ] for reservation in reservations : instances += reservation . instances return instances
8,989
https://github.com/garethr/cloth/blob/b50c7cd6b03f49a931ee55ec94212760c50694a9/src/cloth/utils.py#L12-L19
[ "def", "remove_unused_resources", "(", "issues", ",", "app_dir", ",", "ignore_layouts", ")", ":", "for", "issue", "in", "issues", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "app_dir", ",", "issue", ".", "filepath", ")", "if", "issue", ".", "remove_file", ":", "remove_resource_file", "(", "issue", ",", "filepath", ",", "ignore_layouts", ")", "else", ":", "remove_resource_value", "(", "issue", ",", "filepath", ")" ]
Filter list of machines matching an expression
def instances ( exp = ".*" ) : expression = re . compile ( exp ) instances = [ ] for node in ec2_instances ( ) : if node . tags and ip ( node ) : try : if expression . match ( node . tags . get ( "Name" ) ) : instances . append ( node ) except TypeError : pass return instances
8,990
https://github.com/garethr/cloth/blob/b50c7cd6b03f49a931ee55ec94212760c50694a9/src/cloth/utils.py#L27-L38
[ "def", "index", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "index", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "frgscffile", ",", "=", "args", "gzfile", "=", "frgscffile", "+", "\".gz\"", "cmd", "=", "\"bgzip -c {0}\"", ".", "format", "(", "frgscffile", ")", "if", "not", "op", ".", "exists", "(", "gzfile", ")", ":", "sh", "(", "cmd", ",", "outfile", "=", "gzfile", ")", "tbifile", "=", "gzfile", "+", "\".tbi\"", "# Sequence, begin, end in 2, 3, 4-th column, respectively", "cmd", "=", "\"tabix -s 2 -b 3 -e 4 {0}\"", ".", "format", "(", "gzfile", ")", "if", "not", "op", ".", "exists", "(", "tbifile", ")", ":", "sh", "(", "cmd", ")" ]
Set the fabric environment for the specifed node
def use ( node ) : try : role = node . tags . get ( "Name" ) . split ( '-' ) [ 1 ] env . roledefs [ role ] += [ ip ( node ) ] except IndexError : pass env . nodes += [ node ] env . hosts += [ ip ( node ) ]
8,991
https://github.com/garethr/cloth/blob/b50c7cd6b03f49a931ee55ec94212760c50694a9/src/cloth/utils.py#L40-L48
[ "def", "parse_tophat_log", "(", "self", ",", "raw_data", ")", ":", "if", "'Aligned pairs'", "in", "raw_data", ":", "# Paired end data", "regexes", "=", "{", "'overall_aligned_percent'", ":", "r\"([\\d\\.]+)% overall read mapping rate.\"", ",", "'concordant_aligned_percent'", ":", "r\"([\\d\\.]+)% concordant pair alignment rate.\"", ",", "'aligned_total'", ":", "r\"Aligned pairs:\\s+(\\d+)\"", ",", "'aligned_multimap'", ":", "r\"Aligned pairs:\\s+\\d+\\n\\s+of these:\\s+(\\d+)\"", ",", "'aligned_discordant'", ":", "r\"(\\d+) \\([\\s\\d\\.]+%\\) are discordant alignments\"", ",", "'total_reads'", ":", "r\"[Rr]eads:\\n\\s+Input\\s*:\\s+(\\d+)\"", ",", "}", "else", ":", "# Single end data", "regexes", "=", "{", "'total_reads'", ":", "r\"[Rr]eads:\\n\\s+Input\\s*:\\s+(\\d+)\"", ",", "'aligned_total'", ":", "r\"Mapped\\s*:\\s+(\\d+)\"", ",", "'aligned_multimap'", ":", "r\"of these\\s*:\\s+(\\d+)\"", ",", "'overall_aligned_percent'", ":", "r\"([\\d\\.]+)% overall read mapping rate.\"", ",", "}", "parsed_data", "=", "{", "}", "for", "k", ",", "r", "in", "regexes", ".", "items", "(", ")", ":", "r_search", "=", "re", ".", "search", "(", "r", ",", "raw_data", ",", "re", ".", "MULTILINE", ")", "if", "r_search", ":", "parsed_data", "[", "k", "]", "=", "float", "(", "r_search", ".", "group", "(", "1", ")", ")", "if", "len", "(", "parsed_data", ")", "==", "0", ":", "return", "None", "parsed_data", "[", "'concordant_aligned_percent'", "]", "=", "parsed_data", ".", "get", "(", "'concordant_aligned_percent'", ",", "0", ")", "parsed_data", "[", "'aligned_total'", "]", "=", "parsed_data", ".", "get", "(", "'aligned_total'", ",", "0", ")", "parsed_data", "[", "'aligned_multimap'", "]", "=", "parsed_data", ".", "get", "(", "'aligned_multimap'", ",", "0", ")", "parsed_data", "[", "'aligned_discordant'", "]", "=", "parsed_data", ".", "get", "(", "'aligned_discordant'", ",", "0", ")", "parsed_data", "[", "'unaligned_total'", "]", "=", "parsed_data", "[", "'total_reads'", "]", "-", "parsed_data", "[", "'aligned_total'", "]", "parsed_data", "[", "'aligned_not_multimapped_discordant'", "]", "=", "parsed_data", "[", "'aligned_total'", "]", "-", "parsed_data", "[", "'aligned_multimap'", "]", "-", "parsed_data", "[", "'aligned_discordant'", "]", "return", "parsed_data" ]
Constructs explicit mapping . Order of items in regex map matters . Items at top are given preference .
def build_alias_map ( regex_map , tag_vocab ) : import utool as ut import re alias_map = ut . odict ( [ ] ) for pats , new_tag in reversed ( regex_map ) : pats = ut . ensure_iterable ( pats ) for pat in pats : flags = [ re . match ( pat , t ) for t in tag_vocab ] for old_tag in ut . compress ( tag_vocab , flags ) : alias_map [ old_tag ] = new_tag identity_map = ut . take_column ( regex_map , 1 ) for tag in ut . filter_Nones ( identity_map ) : alias_map [ tag ] = tag return alias_map
8,992
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_tags.py#L65-L89
[ "def", "skip_all", "(", "self", ")", ":", "storage", ",", "streaming", "=", "self", ".", "engine", ".", "count", "(", ")", "if", "self", ".", "selector", ".", "output", ":", "self", ".", "offset", "=", "streaming", "else", ":", "self", ".", "offset", "=", "storage", "self", ".", "_count", "=", "0" ]
update tags to new values
def alias_tags ( tags_list , alias_map ) : def _alias_dict ( tags ) : tags_ = [ alias_map . get ( t , t ) for t in tags ] return list ( set ( [ t for t in tags_ if t is not None ] ) ) tags_list_ = [ _alias_dict ( tags ) for tags in tags_list ] return tags_list_
8,993
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_tags.py#L92-L119
[ "def", "get_connection", "(", "self", ",", "command", ",", "args", "=", "(", ")", ")", ":", "# TODO: find a better way to determine if connection is free", "# and not havily used.", "command", "=", "command", ".", "upper", "(", ")", ".", "strip", "(", ")", "is_pubsub", "=", "command", "in", "_PUBSUB_COMMANDS", "if", "is_pubsub", "and", "self", ".", "_pubsub_conn", ":", "if", "not", "self", ".", "_pubsub_conn", ".", "closed", ":", "return", "self", ".", "_pubsub_conn", ",", "self", ".", "_pubsub_conn", ".", "address", "self", ".", "_pubsub_conn", "=", "None", "for", "i", "in", "range", "(", "self", ".", "freesize", ")", ":", "conn", "=", "self", ".", "_pool", "[", "0", "]", "self", ".", "_pool", ".", "rotate", "(", "1", ")", "if", "conn", ".", "closed", ":", "# or conn._waiters: (eg: busy connection)", "continue", "if", "conn", ".", "in_pubsub", ":", "continue", "if", "is_pubsub", ":", "self", ".", "_pubsub_conn", "=", "conn", "self", ".", "_pool", ".", "remove", "(", "conn", ")", "self", ".", "_used", ".", "add", "(", "conn", ")", "return", "conn", ",", "conn", ".", "address", "return", "None", ",", "self", ".", "_address" ]
Conditions that can not be dry_run
def setup ( self ) : self . client = self . _get_client ( ) sg = self . _create_isolation_security_group ( ) if self . exists is not True : acl = self . _create_network_acl ( ) self . _add_network_acl_entries ( acl ) self . _add_security_group_rule ( sg ) self . _add_security_group_to_instance ( sg ) if self . dry_run is not False : self . _add_security_group_rule ( sg ) self . _add_security_group_to_instance ( sg )
8,994
https://github.com/ThreatResponse/aws_ir_plugins/blob/b5128ef5cbd91fc0b5d55615f1c14cb036ae7c73/aws_ir_plugins/examineracl_host.py#L23-L35
[ "def", "serialise", "(", "self", ",", "default_endianness", "=", "None", ")", ":", "# Figure out an endianness.", "endianness", "=", "(", "default_endianness", "or", "DEFAULT_ENDIANNESS", ")", "if", "hasattr", "(", "self", ",", "'_Meta'", ")", ":", "endianness", "=", "self", ".", "_Meta", ".", "get", "(", "'endianness'", ",", "endianness", ")", "inferred_fields", "=", "set", "(", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "_type_mapping", ")", ":", "inferred_fields", "|=", "{", "x", ".", "_name", "for", "x", "in", "v", ".", "dependent_fields", "(", ")", "}", "for", "field", "in", "inferred_fields", ":", "setattr", "(", "self", ",", "field", ",", "None", ")", "# Some fields want to manipulate other fields that appear before them (e.g. Unions)", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "_type_mapping", ")", ":", "v", ".", "prepare", "(", "self", ",", "getattr", "(", "self", ",", "k", ")", ")", "message", "=", "b''", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "_type_mapping", ")", ":", "message", "+=", "v", ".", "value_to_bytes", "(", "self", ",", "getattr", "(", "self", ",", "k", ")", ",", "default_endianness", "=", "endianness", ")", "return", "message" ]
r Ensures that the filename is not too long
def _args2_fpath ( dpath , fname , cfgstr , ext ) : if len ( ext ) > 0 and ext [ 0 ] != '.' : raise ValueError ( 'Please be explicit and use a dot in ext' ) max_len = 128 # should hashlen be larger? cfgstr_hashlen = 16 prefix = fname fname_cfgstr = consensed_cfgstr ( prefix , cfgstr , max_len = max_len , cfgstr_hashlen = cfgstr_hashlen ) fpath = join ( dpath , fname_cfgstr + ext ) fpath = normpath ( fpath ) return fpath
8,995
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L161-L207
[ "def", "percent_initiated_conversations", "(", "records", ")", ":", "interactions", "=", "defaultdict", "(", "list", ")", "for", "r", "in", "records", ":", "interactions", "[", "r", ".", "correspondent_id", "]", ".", "append", "(", "r", ")", "def", "_percent_initiated", "(", "grouped", ")", ":", "mapped", "=", "[", "(", "1", "if", "conv", "[", "0", "]", ".", "direction", "==", "'out'", "else", "0", ",", "1", ")", "for", "conv", "in", "_conversations", "(", "grouped", ")", "]", "return", "mapped", "all_couples", "=", "[", "sublist", "for", "i", "in", "interactions", ".", "values", "(", ")", "for", "sublist", "in", "_percent_initiated", "(", "i", ")", "]", "if", "len", "(", "all_couples", ")", "==", "0", ":", "init", ",", "total", "=", "0", ",", "0", "else", ":", "init", ",", "total", "=", "list", "(", "map", "(", "sum", ",", "list", "(", "zip", "(", "*", "all_couples", ")", ")", ")", ")", "return", "init", "/", "total", "if", "total", "!=", "0", "else", "0" ]
Saves data using util_io but smartly constructs a filename
def save_cache ( dpath , fname , cfgstr , data , ext = '.cPkl' , verbose = None ) : fpath = _args2_fpath ( dpath , fname , cfgstr , ext ) util_io . save_data ( fpath , data , verbose = verbose ) return fpath
8,996
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L210-L216
[ "def", "get_partition_trees", "(", "self", ",", "p", ")", ":", "trees", "=", "[", "]", "for", "grp", "in", "p", ".", "get_membership", "(", ")", ":", "try", ":", "result", "=", "self", ".", "get_group_result", "(", "grp", ")", "trees", ".", "append", "(", "result", "[", "'ml_tree'", "]", ")", "except", "ValueError", ":", "trees", ".", "append", "(", "None", ")", "logger", ".", "error", "(", "'No tree found for group {}'", ".", "format", "(", "grp", ")", ")", "return", "trees" ]
Loads data using util_io but smartly constructs a filename
def load_cache ( dpath , fname , cfgstr , ext = '.cPkl' , verbose = None , enabled = True ) : if verbose is None : verbose = VERBOSE_CACHE if not USE_CACHE or not enabled : if verbose > 1 : print ( '[util_cache] ... cache disabled: dpath=%s cfgstr=%r' % ( basename ( dpath ) , cfgstr , ) ) raise IOError ( 3 , 'Cache Loading Is Disabled' ) fpath = _args2_fpath ( dpath , fname , cfgstr , ext ) if not exists ( fpath ) : if verbose > 0 : print ( '[util_cache] ... cache does not exist: dpath=%r fname=%r cfgstr=%r' % ( basename ( dpath ) , fname , cfgstr , ) ) raise IOError ( 2 , 'No such file or directory: %r' % ( fpath , ) ) else : if verbose > 2 : print ( '[util_cache] ... cache exists: dpath=%r fname=%r cfgstr=%r' % ( basename ( dpath ) , fname , cfgstr , ) ) import utool as ut nbytes = ut . get_file_nBytes ( fpath ) big_verbose = ( nbytes > 1E6 and verbose > 2 ) or verbose > 2 if big_verbose : print ( '[util_cache] About to read file of size %s' % ( ut . byte_str2 ( nbytes ) , ) ) try : with ut . Timer ( fpath , verbose = big_verbose and verbose > 3 ) : data = util_io . load_data ( fpath , verbose = verbose > 2 ) except ( EOFError , IOError , ImportError ) as ex : print ( 'CORRUPTED? fpath = %s' % ( fpath , ) ) if verbose > 1 : print ( '[util_cache] ... cache miss dpath=%s cfgstr=%r' % ( basename ( dpath ) , cfgstr , ) ) raise IOError ( str ( ex ) ) except Exception : print ( 'CORRUPTED? fpath = %s' % ( fpath , ) ) raise else : if verbose > 2 : print ( '[util_cache] ... cache hit' ) return data
8,997
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L219-L260
[ "def", "get_partition_trees", "(", "self", ",", "p", ")", ":", "trees", "=", "[", "]", "for", "grp", "in", "p", ".", "get_membership", "(", ")", ":", "try", ":", "result", "=", "self", ".", "get_group_result", "(", "grp", ")", "trees", ".", "append", "(", "result", "[", "'ml_tree'", "]", ")", "except", "ValueError", ":", "trees", ".", "append", "(", "None", ")", "logger", ".", "error", "(", "'No tree found for group {}'", ".", "format", "(", "grp", ")", ")", "return", "trees" ]
returns None if cache cannot be loaded
def tryload_cache ( dpath , fname , cfgstr , verbose = None ) : try : return load_cache ( dpath , fname , cfgstr , verbose = verbose ) except IOError : return None
8,998
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L263-L270
[ "def", "status", "(", "sec", ")", ":", "if", "_meta_", ".", "prg_bar", "in", "[", "\"on\"", ",", "\"ON\"", "]", ":", "syms", "=", "[", "\"|\"", ",", "\"/\"", ",", "\"-\"", ",", "\"\\\\\"", "]", "for", "sym", "in", "syms", ":", "sys", ".", "stdout", ".", "write", "(", "\"\\b{0}{1}{2}\"", ".", "format", "(", "_meta_", ".", "color", "[", "\"GREY\"", "]", ",", "sym", ",", "_meta_", ".", "color", "[", "\"ENDC\"", "]", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "time", ".", "sleep", "(", "float", "(", "sec", ")", ")" ]
loads a list of similar cached datas . Returns flags that needs to be computed
def tryload_cache_list ( dpath , fname , cfgstr_list , verbose = False ) : data_list = [ tryload_cache ( dpath , fname , cfgstr , verbose ) for cfgstr in cfgstr_list ] ismiss_list = [ data is None for data in data_list ] return data_list , ismiss_list
8,999
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L274-L280
[ "def", "remove_handler", "(", "self", ",", "handler", ")", ":", "super", "(", "Session", ",", "self", ")", ".", "remove_handler", "(", "handler", ")", "self", ".", "promote", "(", ")", "self", ".", "stop_heartbeat", "(", ")" ]