query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
r Uses difflib to return a difference string between two similar texts
def get_textdiff ( text1 , text2 , num_context_lines = 0 , ignore_whitespace = False ) : import difflib text1 = ensure_unicode ( text1 ) text2 = ensure_unicode ( text2 ) text1_lines = text1 . splitlines ( ) text2_lines = text2 . splitlines ( ) if ignore_whitespace : text1_lines = [ t . rstrip ( ) for t in text1_lines ] text2_lines = [ t . rstrip ( ) for t in text2_lines ] ndiff_kw = dict ( linejunk = difflib . IS_LINE_JUNK , charjunk = difflib . IS_CHARACTER_JUNK ) else : ndiff_kw = { } all_diff_lines = list ( difflib . ndiff ( text1_lines , text2_lines , * * ndiff_kw ) ) if num_context_lines is None : diff_lines = all_diff_lines else : from utool import util_list # boolean for every line if it is marked or not ismarked_list = [ len ( line ) > 0 and line [ 0 ] in '+-?' for line in all_diff_lines ] # flag lines that are within num_context_lines away from a diff line isvalid_list = ismarked_list [ : ] for i in range ( 1 , num_context_lines + 1 ) : isvalid_list [ : - i ] = util_list . or_lists ( isvalid_list [ : - i ] , ismarked_list [ i : ] ) isvalid_list [ i : ] = util_list . or_lists ( isvalid_list [ i : ] , ismarked_list [ : - i ] ) USE_BREAK_LINE = True if USE_BREAK_LINE : # insert a visual break when there is a break in context diff_lines = [ ] prev = False visual_break = '\n <... FILTERED CONTEXT ...> \n' #print(isvalid_list) for line , valid in zip ( all_diff_lines , isvalid_list ) : if valid : diff_lines . append ( line ) elif prev : if False : diff_lines . append ( visual_break ) prev = valid else : diff_lines = util_list . compress ( all_diff_lines , isvalid_list ) return '\n' . join ( diff_lines )
9,500
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2343-L2433
[ "def", "serve_forever", "(", "self", ",", "poll_interval", "=", "0.5", ")", ":", "self", ".", "serial_port", ".", "timeout", "=", "poll_interval", "while", "not", "self", ".", "_shutdown_request", ":", "try", ":", "self", ".", "serve_once", "(", ")", "except", "(", "CRCError", ",", "struct", ".", "error", ")", "as", "e", ":", "log", ".", "error", "(", "'Can\\'t handle request: {0}'", ".", "format", "(", "e", ")", ")", "except", "(", "SerialTimeoutException", ",", "ValueError", ")", ":", "pass" ]
Joins a list of words using English conjunction rules
def conj_phrase ( list_ , cond = 'or' ) : if len ( list_ ) == 0 : return '' elif len ( list_ ) == 1 : return list_ [ 0 ] elif len ( list_ ) == 2 : return ' ' . join ( ( list_ [ 0 ] , cond , list_ [ 1 ] ) ) else : condstr = '' . join ( ( ', ' + cond , ' ' ) ) return ', ' . join ( ( ', ' . join ( list_ [ : - 2 ] ) , condstr . join ( list_ [ - 2 : ] ) ) )
9,501
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2439-L2477
[ "def", "GetExtractionStatusUpdateCallback", "(", "self", ")", ":", "if", "self", ".", "_mode", "==", "self", ".", "MODE_LINEAR", ":", "return", "self", ".", "_PrintExtractionStatusUpdateLinear", "if", "self", ".", "_mode", "==", "self", ".", "MODE_WINDOW", ":", "return", "self", ".", "_PrintExtractionStatusUpdateWindow", "return", "None" ]
r Uses pyfiglet to create bubble text .
def bubbletext ( text , font = 'cybermedium' ) : import utool as ut pyfiglet = ut . tryimport ( 'pyfiglet' , 'git+https://github.com/pwaller/pyfiglet' ) if pyfiglet is None : return text else : bubble_text = pyfiglet . figlet_format ( text , font = font ) return bubble_text
9,502
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2563-L2586
[ "def", "edit", "(", "self", ",", "data_src", ",", "value", ")", ":", "# check if opening file", "if", "'filename'", "in", "value", ":", "items", "=", "[", "k", "for", "k", ",", "v", "in", "self", ".", "reg", ".", "data_source", ".", "iteritems", "(", ")", "if", "v", "==", "data_src", "]", "self", ".", "reg", ".", "unregister", "(", "items", ")", "# remove items from Registry", "# open file and register new data", "self", ".", "open", "(", "data_src", ",", "value", "[", "'filename'", "]", ",", "value", ".", "get", "(", "'path'", ")", ")", "self", ".", "layer", "[", "data_src", "]", ".", "update", "(", "value", ")" ]
heuristic check if str is url formatted
def is_url ( str_ ) : return any ( [ str_ . startswith ( 'http://' ) , str_ . startswith ( 'https://' ) , str_ . startswith ( 'www.' ) , '.org/' in str_ , '.com/' in str_ , ] )
9,503
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2685-L2693
[ "def", "select", "(", "read_streams", ",", "write_streams", ",", "timeout", "=", "0", ")", ":", "exception_streams", "=", "[", "]", "try", ":", "return", "builtin_select", ".", "select", "(", "read_streams", ",", "write_streams", ",", "exception_streams", ",", "timeout", ",", ")", "[", "0", ":", "2", "]", "except", "builtin_select", ".", "error", "as", "e", ":", "# POSIX signals interrupt select()", "no", "=", "e", ".", "errno", "if", "six", ".", "PY3", "else", "e", "[", "0", "]", "if", "no", "==", "errno", ".", "EINTR", ":", "return", "(", "[", "]", ",", "[", "]", ")", "else", ":", "raise", "e" ]
r Like range but returns characters
def chr_range ( * args , * * kw ) : if len ( args ) == 1 : stop , = args start , step = 0 , 1 elif len ( args ) == 2 : start , stop = args step = 1 elif len ( args ) == 3 : start , stop , step = args else : raise ValueError ( 'incorrect args' ) chr_ = six . unichr base = ord ( kw . get ( 'base' , 'a' ) ) if isinstance ( start , int ) : start = base + start if isinstance ( stop , int ) : stop = base + stop if isinstance ( start , six . string_types ) : start = ord ( start ) if isinstance ( stop , six . string_types ) : stop = ord ( stop ) if step is None : step = 1 list_ = list ( map ( six . text_type , map ( chr_ , range ( start , stop , step ) ) ) ) return list_
9,504
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2772-L2828
[ "def", "agp", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "agp", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "3", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "tpffile", ",", "certificatefile", ",", "agpfile", "=", "args", "orientationguide", "=", "DictFile", "(", "tpffile", ",", "valuepos", "=", "2", ")", "cert", "=", "Certificate", "(", "certificatefile", ")", "cert", ".", "write_AGP", "(", "agpfile", ",", "orientationguide", "=", "orientationguide", ")" ]
FIXME Use pygments instead
def highlight_regex ( str_ , pat , reflags = 0 , color = 'red' ) : #import colorama # from colorama import Fore, Style #color = Fore.MAGENTA # color = Fore.RED #match = re.search(pat, str_, flags=reflags) matches = list ( re . finditer ( pat , str_ , flags = reflags ) ) colored = str_ for match in reversed ( matches ) : #pass #if match is None: # return str_ #else: start = match . start ( ) end = match . end ( ) #colorama.init() colored_part = color_text ( colored [ start : end ] , color ) colored = colored [ : start ] + colored_part + colored [ end : ] # colored = (colored[:start] + color + colored[start:end] + # Style.RESET_ALL + colored[end:]) #colorama.deinit() return colored
9,505
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2912-L2938
[ "def", "_create_peephole_variables", "(", "self", ",", "dtype", ")", ":", "self", ".", "_w_f_diag", "=", "tf", ".", "get_variable", "(", "self", ".", "W_F_DIAG", ",", "shape", "=", "[", "self", ".", "_hidden_size", "]", ",", "dtype", "=", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "self", ".", "W_F_DIAG", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "self", ".", "W_F_DIAG", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "self", ".", "W_F_DIAG", ")", ")", "self", ".", "_w_i_diag", "=", "tf", ".", "get_variable", "(", "self", ".", "W_I_DIAG", ",", "shape", "=", "[", "self", ".", "_hidden_size", "]", ",", "dtype", "=", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "self", ".", "W_I_DIAG", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "self", ".", "W_I_DIAG", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "self", ".", "W_I_DIAG", ")", ")", "self", ".", "_w_o_diag", "=", "tf", ".", "get_variable", "(", "self", ".", "W_O_DIAG", ",", "shape", "=", "[", "self", ".", "_hidden_size", "]", ",", "dtype", "=", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "self", ".", "W_O_DIAG", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "self", ".", "W_O_DIAG", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "self", ".", "W_O_DIAG", ")", ")" ]
FIXME Use pygments instead . must be mututally exclusive
def highlight_multi_regex ( str_ , pat_to_color , reflags = 0 ) : #import colorama # from colorama import Fore, Style #color = Fore.MAGENTA # color = Fore.RED #match = re.search(pat, str_, flags=reflags) colored = str_ to_replace = [ ] for pat , color in pat_to_color . items ( ) : matches = list ( re . finditer ( pat , str_ , flags = reflags ) ) for match in matches : start = match . start ( ) end = match . end ( ) to_replace . append ( ( end , start , color ) ) for tup in reversed ( sorted ( to_replace ) ) : end , start , color = tup colored_part = color_text ( colored [ start : end ] , color ) colored = colored [ : start ] + colored_part + colored [ end : ] return colored
9,506
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2941-L2967
[ "def", "future_set_exception_unless_cancelled", "(", "future", ":", "\"Union[futures.Future[_T], Future[_T]]\"", ",", "exc", ":", "BaseException", ")", "->", "None", ":", "if", "not", "future", ".", "cancelled", "(", ")", ":", "future", ".", "set_exception", "(", "exc", ")", "else", ":", "app_log", ".", "error", "(", "\"Exception after Future was cancelled\"", ",", "exc_info", "=", "exc", ")" ]
Searches up and down until it finds the endpoints of a block Rectify with find_paragraph_end in pyvim_funcs
def find_block_end ( row , line_list , sentinal , direction = 1 ) : import re row_ = row line_ = line_list [ row_ ] flag1 = row_ == 0 or row_ == len ( line_list ) - 1 flag2 = re . match ( sentinal , line_ ) if not ( flag1 or flag2 ) : while True : if ( row_ == 0 or row_ == len ( line_list ) - 1 ) : break line_ = line_list [ row_ ] if re . match ( sentinal , line_ ) : break row_ += direction return row_
9,507
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L3492-L3510
[ "def", "to_workspace_value", "(", "self", ",", "result", ",", "assets", ")", ":", "return", "result", ".", "unstack", "(", ")", ".", "fillna", "(", "self", ".", "missing_value", ")", ".", "reindex", "(", "columns", "=", "assets", ",", "fill_value", "=", "self", ".", "missing_value", ",", ")", ".", "values" ]
uses ghostscript to write a pdf
def compress_pdf ( pdf_fpath , output_fname = None ) : import utool as ut ut . assertpath ( pdf_fpath ) suffix = '_' + ut . get_datestamp ( False ) + '_compressed' print ( 'pdf_fpath = %r' % ( pdf_fpath , ) ) output_pdf_fpath = ut . augpath ( pdf_fpath , suffix , newfname = output_fname ) print ( 'output_pdf_fpath = %r' % ( output_pdf_fpath , ) ) gs_exe = find_ghostscript_exe ( ) cmd_list = ( gs_exe , '-sDEVICE=pdfwrite' , '-dCompatibilityLevel=1.4' , '-dNOPAUSE' , '-dQUIET' , '-dBATCH' , '-sOutputFile=' + output_pdf_fpath , pdf_fpath ) ut . cmd ( * cmd_list ) return output_pdf_fpath
9,508
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L37-L57
[ "def", "resource_type", "(", "self", ",", "resource_id", ")", ":", "match", "=", "re", ".", "search", "(", "r\"repositories/\\d+/(resources|archival_objects)/\\d+\"", ",", "resource_id", ")", "if", "match", "and", "match", ".", "groups", "(", ")", ":", "type_", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "return", "\"resource\"", "if", "type_", "==", "\"resources\"", "else", "\"resource_component\"", "else", ":", "raise", "ArchivesSpaceError", "(", "\"Unable to determine type of provided ID: {}\"", ".", "format", "(", "resource_id", ")", ")" ]
r dummy preamble and document to wrap around latex fragment
def make_full_document ( text , title = None , preamp_decl = { } , preamb_extra = None ) : import utool as ut doc_preamb = ut . codeblock ( ''' %\\documentclass{article} \\documentclass[10pt,twocolumn,letterpaper]{article} % \\usepackage[utf8]{inputenc} \\usepackage[T1]{fontenc} \\usepackage{times} \\usepackage{epsfig} \\usepackage{graphicx} \\usepackage{amsmath,amsthm,amssymb} \\usepackage[usenames,dvipsnames,svgnames,table]{xcolor} \\usepackage{multirow} \\usepackage{subcaption} \\usepackage{booktabs} %\\pagenumbering{gobble} ''' ) if preamb_extra is not None : if isinstance ( preamb_extra , ( list , tuple ) ) : preamb_extra = '\n' . join ( preamb_extra ) doc_preamb += '\n' + preamb_extra + '\n' if title is not None : preamp_decl [ 'title' ] = title decl_lines = [ r'\{key}{{{val}}}' . format ( key = key , val = val ) for key , val in preamp_decl . items ( ) ] doc_decllines = '\n' . join ( decl_lines ) doc_header = ut . codeblock ( r''' \begin{document} ''' ) if preamp_decl . get ( 'title' ) is not None : doc_header += r'\maketitle' doc_footer = ut . codeblock ( r''' \end{document} ''' ) text_ = '\n' . join ( ( doc_preamb , doc_decllines , doc_header , text , doc_footer ) ) return text_
9,509
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L60-L125
[ "def", "query", "(", "self", ",", "domain", ")", ":", "result", "=", "{", "}", "try", ":", "result", "=", "self", ".", "pdns", ".", "query", "(", "domain", ")", "except", ":", "self", ".", "error", "(", "'Exception while querying passiveDNS. Check the domain format.'", ")", "# Clean the datetime problems in order to correct the json serializability", "clean_result", "=", "[", "]", "for", "ind", ",", "resultset", "in", "enumerate", "(", "result", ")", ":", "if", "resultset", ".", "get", "(", "'time_first'", ",", "None", ")", ":", "resultset", "[", "'time_first'", "]", "=", "resultset", ".", "get", "(", "'time_first'", ")", ".", "isoformat", "(", "' '", ")", "if", "resultset", ".", "get", "(", "'time_last'", ",", "None", ")", ":", "resultset", "[", "'time_last'", "]", "=", "resultset", ".", "get", "(", "'time_last'", ")", ".", "isoformat", "(", "' '", ")", "clean_result", ".", "append", "(", "resultset", ")", "return", "clean_result" ]
compiles latex and shows the result
def render_latex_text ( input_text , nest_in_doc = False , preamb_extra = None , appname = 'utool' , verbose = None ) : import utool as ut if verbose is None : verbose = ut . VERBOSE dpath = ut . ensure_app_resource_dir ( appname , 'latex_tmp' ) # put a latex framgent in a full document # print(input_text) fname = 'temp_render_latex' pdf_fpath = ut . compile_latex_text ( input_text , dpath = dpath , fname = fname , preamb_extra = preamb_extra , verbose = verbose ) ut . startfile ( pdf_fpath ) return pdf_fpath
9,510
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L128-L142
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Renders latex text into a jpeg .
def render_latex ( input_text , dpath = None , fname = None , preamb_extra = None , verbose = 1 , * * kwargs ) : import utool as ut import vtool as vt # turn off page numbers input_text_ = '\pagenumbering{gobble}\n' + input_text # fname, _ = splitext(fname) img_fname = ut . ensure_ext ( fname , [ '.jpg' ] + list ( ut . IMG_EXTENSIONS ) ) img_fpath = join ( dpath , img_fname ) pdf_fpath = ut . compile_latex_text ( input_text_ , fname = fname , dpath = dpath , preamb_extra = preamb_extra , verbose = verbose , move = False ) ext = splitext ( img_fname ) [ 1 ] fpath_in = ut . convert_pdf_to_image ( pdf_fpath , ext = ext , verbose = verbose ) # Clip of boundaries of the pdf imag vt . clipwhite_ondisk ( fpath_in , fpath_out = img_fpath , verbose = verbose > 1 ) return img_fpath
9,511
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L240-L290
[ "def", "listen_fds", "(", "unset_environment", "=", "True", ")", ":", "num", "=", "_listen_fds", "(", "unset_environment", ")", "return", "list", "(", "range", "(", "LISTEN_FDS_START", ",", "LISTEN_FDS_START", "+", "num", ")", ")" ]
hack for candidacy
def get_latex_figure_str2 ( fpath_list , cmdname , * * kwargs ) : import utool as ut from os . path import relpath # Make relative paths if kwargs . pop ( 'relpath' , True ) : start = ut . truepath ( '~/latex/crall-candidacy-2015' ) fpath_list = [ relpath ( fpath , start ) for fpath in fpath_list ] cmdname = ut . latex_sanitize_command_name ( cmdname ) kwargs [ 'caption_str' ] = kwargs . get ( 'caption_str' , cmdname ) figure_str = ut . get_latex_figure_str ( fpath_list , * * kwargs ) latex_block = ut . latex_newcommand ( cmdname , figure_str ) return latex_block
9,512
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L711-L724
[ "def", "delete_variable", "(", "self", ",", "key", ")", ":", "key", "=", "str", "(", "key", ")", "if", "self", ".", "is_locked", "(", "key", ")", ":", "raise", "RuntimeError", "(", "\"Global variable is locked\"", ")", "with", "self", ".", "__global_lock", ":", "if", "key", "in", "self", ".", "__global_variable_dictionary", ":", "access_key", "=", "self", ".", "lock_variable", "(", "key", ",", "block", "=", "True", ")", "del", "self", ".", "__global_variable_dictionary", "[", "key", "]", "self", ".", "unlock_variable", "(", "key", ",", "access_key", ")", "del", "self", ".", "__variable_locks", "[", "key", "]", "del", "self", ".", "__variable_references", "[", "key", "]", "else", ":", "raise", "AttributeError", "(", "\"Global variable %s does not exist!\"", "%", "str", "(", "key", ")", ")", "logger", ".", "debug", "(", "\"Global variable %s was deleted!\"", "%", "str", "(", "key", ")", ")" ]
Queue a gauge or gauges to be written
def add ( self , data , value = None , timestamp = None , namespace = None , debug = False ) : if value is not None : return self . add ( ( ( data , value ) , ) , timestamp = timestamp , namespace = namespace , debug = debug ) writer = self . writer if writer is None : raise GaugedUseAfterFreeError if timestamp is None : timestamp = long ( time ( ) * 1000 ) config = self . config block_size = config . block_size this_block = timestamp // block_size this_array = ( timestamp % block_size ) // config . resolution if namespace is None : namespace = config . namespace if this_block < self . current_block or ( this_block == self . current_block and this_array < self . current_array ) : if config . append_only_violation == Writer . ERROR : msg = 'Gauged is append-only; timestamps must be increasing' raise GaugedAppendOnlyError ( msg ) elif config . append_only_violation == Writer . REWRITE : this_block = self . current_block this_array = self . current_array else : return if isinstance ( data , unicode ) : data = data . encode ( 'utf8' ) if debug : return self . debug ( timestamp , namespace , data ) if this_block > self . current_block : self . flush_blocks ( ) self . current_block = this_block self . current_array = this_array elif this_array > self . current_array : if not Gauged . writer_flush_arrays ( writer , self . current_array ) : raise MemoryError self . current_array = this_array data_points = 0 namespace_statistics = self . statistics [ namespace ] whitelist = config . key_whitelist skip_long_keys = config . key_overflow == Writer . IGNORE skip_gauge_nan = config . gauge_nan == Writer . IGNORE if isinstance ( data , str ) and skip_gauge_nan and skip_long_keys and whitelist is None : # fast path data_points = c_uint32 ( 0 ) if not Gauged . writer_emit_pairs ( writer , namespace , data , byref ( data_points ) ) : raise MemoryError data_points = data_points . value else : if isinstance ( data , dict ) : data = data . iteritems ( ) elif isinstance ( data , str ) : data = self . parse_query ( data ) emit = Gauged . writer_emit for key , value in data : key = to_bytes ( key ) if whitelist is not None and key not in whitelist : continue try : value = float ( value ) except ValueError : value = float ( 'nan' ) if value != value : # => NaN? if skip_gauge_nan : continue raise GaugedNaNError success = emit ( writer , namespace , key , c_float ( value ) ) if success != 1 : if not success : raise MemoryError elif success == Writer . KEY_OVERFLOW and not skip_long_keys : msg = 'Key is larger than the driver allows ' msg += '(%s)' % key raise GaugedKeyOverflowError ( msg ) data_points += 1 namespace_statistics . data_points += data_points if self . flush_now : self . flush ( )
9,513
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L47-L128
[ "def", "close", "(", "self", ")", ":", "if", "not", "(", "yield", "from", "super", "(", ")", ".", "close", "(", ")", ")", ":", "return", "False", "for", "adapter", "in", "self", ".", "_ethernet_adapters", ".", "values", "(", ")", ":", "if", "adapter", "is", "not", "None", ":", "for", "nio", "in", "adapter", ".", "ports", ".", "values", "(", ")", ":", "if", "nio", "and", "isinstance", "(", "nio", ",", "NIOUDP", ")", ":", "self", ".", "manager", ".", "port_manager", ".", "release_udp_port", "(", "nio", ".", "lport", ",", "self", ".", "_project", ")", "try", ":", "self", ".", "acpi_shutdown", "=", "False", "yield", "from", "self", ".", "stop", "(", ")", "except", "VMwareError", ":", "pass", "if", "self", ".", "linked_clone", ":", "yield", "from", "self", ".", "manager", ".", "remove_from_vmware_inventory", "(", "self", ".", "_vmx_path", ")" ]
Flush all pending gauges
def flush ( self ) : writer = self . writer if writer is None : raise GaugedUseAfterFreeError self . flush_writer_position ( ) keys = self . translate_keys ( ) blocks = [ ] current_block = self . current_block statistics = self . statistics driver = self . driver flags = 0 # for future extensions, e.g. block compression for namespace , key , block in self . pending_blocks ( ) : length = block . byte_length ( ) if not length : continue key_id = keys [ ( namespace , key ) ] statistics [ namespace ] . byte_count += length blocks . append ( ( namespace , current_block , key_id , block . buffer ( ) , flags ) ) if self . config . overwrite_blocks : driver . replace_blocks ( blocks ) else : driver . insert_or_append_blocks ( blocks ) if not Gauged . writer_flush_maps ( writer , True ) : raise MemoryError update_namespace = driver . add_namespace_statistics for namespace , stats in statistics . iteritems ( ) : update_namespace ( namespace , self . current_block , stats . data_points , stats . byte_count ) statistics . clear ( ) driver . commit ( ) self . flush_now = False
9,514
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L130-L162
[ "def", "get_connection_details", "(", "session", ",", "vcenter_resource_model", ",", "resource_context", ")", ":", "session", "=", "session", "resource_context", "=", "resource_context", "# get vCenter connection details from vCenter resource", "user", "=", "vcenter_resource_model", ".", "user", "vcenter_url", "=", "resource_context", ".", "address", "password", "=", "session", ".", "DecryptPassword", "(", "vcenter_resource_model", ".", "password", ")", ".", "Value", "return", "VCenterConnectionDetails", "(", "vcenter_url", ",", "user", ",", "password", ")" ]
Get a timestamp representing the position just after the last written gauge
def resume_from ( self ) : position = self . driver . get_writer_position ( self . config . writer_name ) return position + self . config . resolution if position else 0
9,515
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L164-L168
[ "def", "GetFile", "(", "message", "=", "None", ",", "title", "=", "None", ",", "directory", "=", "None", ",", "fileName", "=", "None", ",", "allowsMultipleSelection", "=", "False", ",", "fileTypes", "=", "None", ")", ":", "return", "dispatcher", "[", "\"GetFile\"", "]", "(", "message", "=", "message", ",", "title", "=", "title", ",", "directory", "=", "directory", ",", "fileName", "=", "fileName", ",", "allowsMultipleSelection", "=", "allowsMultipleSelection", ",", "fileTypes", "=", "fileTypes", ")" ]
Clear all data from timestamp onwards . Note that the timestamp is rounded down to the nearest block boundary
def clear_from ( self , timestamp ) : block_size = self . config . block_size offset , remainder = timestamp // block_size , timestamp % block_size if remainder : raise ValueError ( 'Timestamp must be on a block boundary' ) self . driver . clear_from ( offset , timestamp )
9,516
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L170-L177
[ "def", "replace_orig_field", "(", "self", ",", "option", ")", ":", "if", "option", ":", "option_new", "=", "list", "(", "option", ")", "for", "opt", "in", "option", ":", "if", "opt", "in", "self", ".", "trans_opts", ".", "fields", ":", "index", "=", "option_new", ".", "index", "(", "opt", ")", "option_new", "[", "index", ":", "index", "+", "1", "]", "=", "get_translation_fields", "(", "opt", ")", "elif", "isinstance", "(", "opt", ",", "(", "tuple", ",", "list", ")", ")", "and", "(", "[", "o", "for", "o", "in", "opt", "if", "o", "in", "self", ".", "trans_opts", ".", "fields", "]", ")", ":", "index", "=", "option_new", ".", "index", "(", "opt", ")", "option_new", "[", "index", ":", "index", "+", "1", "]", "=", "self", ".", "replace_orig_field", "(", "opt", ")", "option", "=", "option_new", "return", "option" ]
Clear all data before timestamp for a given key . Note that the timestamp is rounded down to the nearest block boundary
def clear_key_before ( self , key , namespace = None , timestamp = None ) : block_size = self . config . block_size if namespace is None : namespace = self . config . namespace if timestamp is not None : offset , remainder = divmod ( timestamp , block_size ) if remainder : raise ValueError ( 'timestamp must be on a block boundary' ) if offset == 0 : raise ValueError ( 'cannot delete before offset zero' ) offset -= 1 self . driver . clear_key_before ( key , namespace , offset , timestamp ) else : self . driver . clear_key_before ( key , namespace )
9,517
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L179-L194
[ "def", "create_decompress", "(", "fmt", ")", ":", "OPENJPEG", ".", "opj_create_decompress", ".", "argtypes", "=", "[", "ctypes", ".", "c_int", "]", "restype", "=", "ctypes", ".", "POINTER", "(", "DecompressionInfoType", ")", "OPENJPEG", ".", "opj_create_decompress", ".", "restype", "=", "restype", "dinfo", "=", "OPENJPEG", ".", "opj_create_decompress", "(", "fmt", ")", "return", "dinfo" ]
Create counts line in CTfile format .
def _to_ctfile_counts_line ( self , key ) : counter = OrderedCounter ( self . counts_line_format ) self [ key ] [ 'number_of_atoms' ] = str ( len ( self . atoms ) ) self [ key ] [ 'number_of_bonds' ] = str ( len ( self . bonds ) ) counts_line = '' . join ( [ str ( value ) . rjust ( spacing ) for value , spacing in zip ( self [ key ] . values ( ) , counter . values ( ) ) ] ) return '{}\n' . format ( counts_line )
9,518
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L346-L358
[ "def", "_get_db", "(", ")", ":", "from", ".", "settings", "import", "settings", "mongo", "=", "settings", ".", "MONGODB", "if", "'URI'", "in", "mongo", "and", "mongo", "[", "'URI'", "]", ":", "uri", "=", "mongo", "[", "'URI'", "]", "else", ":", "uri", "=", "'mongodb://'", "if", "all", "(", "mongo", ".", "get", "(", "key", ")", "for", "key", "in", "(", "'USERNAME'", ",", "'PASSWORD'", ")", ")", ":", "uri", "+=", "'{0}:{1}@'", ".", "format", "(", "mongo", "[", "'USERNAME'", "]", ",", "mongo", "[", "'PASSWORD'", "]", ")", "if", "'HOSTS'", "in", "mongo", "and", "mongo", "[", "'HOSTS'", "]", ":", "uri", "+=", "','", ".", "join", "(", "'{0}:{1}'", ".", "format", "(", "host", ",", "port", ")", "for", "(", "host", ",", "port", ")", "in", "zip", "(", "mongo", "[", "'HOSTS'", "]", ",", "mongo", "[", "'PORTS'", "]", ")", ",", ")", "else", ":", "uri", "+=", "'{0}:{1}'", ".", "format", "(", "mongo", "[", "'HOST'", "]", ",", "mongo", ".", "get", "(", "'PORT'", ",", "27017", ")", ")", "uri", "+=", "'/'", "+", "mongo", "[", "'DATABASE'", "]", "if", "'OPTIONS'", "in", "mongo", "and", "mongo", "[", "'OPTIONS'", "]", ":", "uri", "+=", "'?{0}'", ".", "format", "(", "'&'", ".", "join", "(", "mongo", "[", "'OPTIONS'", "]", ")", ")", "client", "=", "ConnectionFailureProxy", "(", "MongoClient", "(", "uri", ",", "connect", "=", "False", ")", ")", "database", "=", "client", "[", "parse_uri", "(", "uri", ")", "[", "'database'", "]", "]", "return", "database" ]
Create atom block in CTfile format .
def _to_ctfile_atom_block ( self , key ) : counter = OrderedCounter ( Atom . atom_block_format ) ctab_atom_block = '\n' . join ( [ '' . join ( [ str ( value ) . rjust ( spacing ) for value , spacing in zip ( atom . _ctab_data . values ( ) , counter . values ( ) ) ] ) for atom in self [ key ] ] ) return '{}\n' . format ( ctab_atom_block )
9,519
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L360-L371
[ "def", "quad_gauss_legendre", "(", "order", ",", "lower", "=", "0", ",", "upper", "=", "1", ",", "composite", "=", "None", ")", ":", "order", "=", "numpy", ".", "asarray", "(", "order", ",", "dtype", "=", "int", ")", ".", "flatten", "(", ")", "lower", "=", "numpy", ".", "asarray", "(", "lower", ")", ".", "flatten", "(", ")", "upper", "=", "numpy", ".", "asarray", "(", "upper", ")", ".", "flatten", "(", ")", "dim", "=", "max", "(", "lower", ".", "size", ",", "upper", ".", "size", ",", "order", ".", "size", ")", "order", "=", "numpy", ".", "ones", "(", "dim", ",", "dtype", "=", "int", ")", "*", "order", "lower", "=", "numpy", ".", "ones", "(", "dim", ")", "*", "lower", "upper", "=", "numpy", ".", "ones", "(", "dim", ")", "*", "upper", "if", "composite", "is", "None", ":", "composite", "=", "numpy", ".", "array", "(", "0", ")", "composite", "=", "numpy", ".", "asarray", "(", "composite", ")", "if", "not", "composite", ".", "size", ":", "composite", "=", "numpy", ".", "array", "(", "[", "numpy", ".", "linspace", "(", "0", ",", "1", ",", "composite", "+", "1", ")", "]", "*", "dim", ")", "else", ":", "composite", "=", "numpy", ".", "array", "(", "composite", ")", "if", "len", "(", "composite", ".", "shape", ")", "<=", "1", ":", "composite", "=", "numpy", ".", "transpose", "(", "[", "composite", "]", ")", "composite", "=", "(", "(", "composite", ".", "T", "-", "lower", ")", "/", "(", "upper", "-", "lower", ")", ")", ".", "T", "results", "=", "[", "_gauss_legendre", "(", "order", "[", "i", "]", ",", "composite", "[", "i", "]", ")", "for", "i", "in", "range", "(", "dim", ")", "]", "abscis", "=", "numpy", ".", "array", "(", "[", "_", "[", "0", "]", "for", "_", "in", "results", "]", ")", "weights", "=", "numpy", ".", "array", "(", "[", "_", "[", "1", "]", "for", "_", "in", "results", "]", ")", "abscis", "=", "chaospy", ".", "quad", ".", "combine", "(", "abscis", ")", "weights", "=", "chaospy", ".", "quad", ".", "combine", "(", "weights", ")", "abscis", "=", "(", "upper", "-", "lower", ")", "*", "abscis", "+", "lower", "weights", "=", "numpy", ".", "prod", "(", "weights", "*", "(", "upper", "-", "lower", ")", ",", "1", ")", "return", "abscis", ".", "T", ",", "weights" ]
Create bond block in CTfile format .
def _to_ctfile_bond_block ( self , key ) : counter = OrderedCounter ( Bond . bond_block_format ) ctab_bond_block = '\n' . join ( [ '' . join ( [ str ( value ) . rjust ( spacing ) for value , spacing in zip ( bond . _ctab_data . values ( ) , counter . values ( ) ) ] ) for bond in self [ key ] ] ) return '{}\n' . format ( ctab_bond_block )
9,520
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L373-L384
[ "def", "quad_gauss_legendre", "(", "order", ",", "lower", "=", "0", ",", "upper", "=", "1", ",", "composite", "=", "None", ")", ":", "order", "=", "numpy", ".", "asarray", "(", "order", ",", "dtype", "=", "int", ")", ".", "flatten", "(", ")", "lower", "=", "numpy", ".", "asarray", "(", "lower", ")", ".", "flatten", "(", ")", "upper", "=", "numpy", ".", "asarray", "(", "upper", ")", ".", "flatten", "(", ")", "dim", "=", "max", "(", "lower", ".", "size", ",", "upper", ".", "size", ",", "order", ".", "size", ")", "order", "=", "numpy", ".", "ones", "(", "dim", ",", "dtype", "=", "int", ")", "*", "order", "lower", "=", "numpy", ".", "ones", "(", "dim", ")", "*", "lower", "upper", "=", "numpy", ".", "ones", "(", "dim", ")", "*", "upper", "if", "composite", "is", "None", ":", "composite", "=", "numpy", ".", "array", "(", "0", ")", "composite", "=", "numpy", ".", "asarray", "(", "composite", ")", "if", "not", "composite", ".", "size", ":", "composite", "=", "numpy", ".", "array", "(", "[", "numpy", ".", "linspace", "(", "0", ",", "1", ",", "composite", "+", "1", ")", "]", "*", "dim", ")", "else", ":", "composite", "=", "numpy", ".", "array", "(", "composite", ")", "if", "len", "(", "composite", ".", "shape", ")", "<=", "1", ":", "composite", "=", "numpy", ".", "transpose", "(", "[", "composite", "]", ")", "composite", "=", "(", "(", "composite", ".", "T", "-", "lower", ")", "/", "(", "upper", "-", "lower", ")", ")", ".", "T", "results", "=", "[", "_gauss_legendre", "(", "order", "[", "i", "]", ",", "composite", "[", "i", "]", ")", "for", "i", "in", "range", "(", "dim", ")", "]", "abscis", "=", "numpy", ".", "array", "(", "[", "_", "[", "0", "]", "for", "_", "in", "results", "]", ")", "weights", "=", "numpy", ".", "array", "(", "[", "_", "[", "1", "]", "for", "_", "in", "results", "]", ")", "abscis", "=", "chaospy", ".", "quad", ".", "combine", "(", "abscis", ")", "weights", "=", "chaospy", ".", "quad", ".", "combine", "(", "weights", ")", "abscis", "=", "(", "upper", "-", "lower", ")", "*", "abscis", "+", "lower", "weights", "=", "numpy", ".", "prod", "(", "weights", "*", "(", "upper", "-", "lower", ")", ",", "1", ")", "return", "abscis", ".", "T", ",", "weights" ]
Create ctab properties block in CTfile format from atom - specific properties .
def _to_ctfile_property_block ( self ) : ctab_properties_data = defaultdict ( list ) for atom in self . atoms : for ctab_property_key , ctab_property_value in atom . _ctab_property_data . items ( ) : ctab_properties_data [ ctab_property_key ] . append ( OrderedDict ( zip ( self . ctab_conf [ self . version ] [ ctab_property_key ] [ 'values' ] , [ atom . atom_number , ctab_property_value ] ) ) ) ctab_property_lines = [ ] for ctab_property_key , ctab_property_value in ctab_properties_data . items ( ) : for entry in ctab_property_value : ctab_property_line = '{} {}{}' . format ( self . ctab_conf [ self . version ] [ ctab_property_key ] [ 'fmt' ] , 1 , '' . join ( [ str ( value ) . rjust ( 4 ) for value in entry . values ( ) ] ) ) ctab_property_lines . append ( ctab_property_line ) if ctab_property_lines : return '{}\n' . format ( '\n' . join ( ctab_property_lines ) ) return ''
9,521
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L386-L408
[ "def", "calc_regenerated", "(", "self", ",", "lastvotetime", ")", ":", "delta", "=", "datetime", ".", "utcnow", "(", ")", "-", "datetime", ".", "strptime", "(", "lastvotetime", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "td", "=", "delta", ".", "days", "ts", "=", "delta", ".", "seconds", "tt", "=", "(", "td", "*", "86400", ")", "+", "ts", "return", "tt", "*", "10000", "/", "86400", "/", "5" ]
Delete atoms by atom number .
def delete_atom ( self , * atom_numbers ) : for atom_number in atom_numbers : deletion_atom = self . atom_by_number ( atom_number = atom_number ) # update atom numbers for atom in self . atoms : if int ( atom . atom_number ) > int ( atom_number ) : atom . atom_number = str ( int ( atom . atom_number ) - 1 ) # find index of a bond to remove and update ctab data dict with new atom numbers for index , bond in enumerate ( self . bonds ) : bond . update_atom_numbers ( ) if atom_number in { bond . first_atom_number , bond . second_atom_number } : self . bonds . remove ( bond ) # remove atom from neighbors list for atom in self . atoms : if deletion_atom in atom . neighbors : atom . neighbors . remove ( deletion_atom ) self . atoms . remove ( deletion_atom )
9,522
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L522-L548
[ "def", "dump_all_handler_stats", "(", "self", ")", ":", "stats", "=", "[", "]", "for", "h", "in", "self", ".", "capture_handlers", ":", "now", "=", "calendar", ".", "timegm", "(", "time", ".", "gmtime", "(", ")", ")", "rot_time", "=", "calendar", ".", "timegm", "(", "h", "[", "'log_rot_time'", "]", ")", "time_delta", "=", "now", "-", "rot_time", "approx_data_rate", "=", "'{} bytes/second'", ".", "format", "(", "h", "[", "'data_read'", "]", "/", "float", "(", "time_delta", ")", ")", "stats", ".", "append", "(", "{", "'name'", ":", "h", "[", "'name'", "]", ",", "'reads'", ":", "h", "[", "'reads'", "]", ",", "'data_read_length'", ":", "'{} bytes'", ".", "format", "(", "h", "[", "'data_read'", "]", ")", ",", "'approx_data_rate'", ":", "approx_data_rate", "}", ")", "return", "stats" ]
Construct new SDfile object from Molfile object .
def from_molfile ( cls , molfile , data = None ) : if not data : data = OrderedDict ( ) if not isinstance ( molfile , Molfile ) : raise ValueError ( 'Not a Molfile type: "{}"' . format ( type ( molfile ) ) ) if not isinstance ( data , dict ) : raise ValueError ( 'Not a dict type: "{}"' . format ( type ( data ) ) ) sdfile = cls ( ) sdfile [ '1' ] = OrderedDict ( ) sdfile [ '1' ] [ 'molfile' ] = molfile sdfile [ '1' ] [ 'data' ] = data return sdfile
9,523
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L768-L789
[ "def", "Heartbeat", "(", "self", ")", ":", "service_key", "=", "_GetServiceKey", "(", ")", "try", ":", "winreg", ".", "SetValueEx", "(", "service_key", ",", "\"Nanny.heartbeat\"", ",", "0", ",", "winreg", ".", "REG_DWORD", ",", "int", "(", "time", ".", "time", "(", ")", ")", ")", "except", "OSError", "as", "e", ":", "logging", ".", "debug", "(", "\"Failed to heartbeat nanny at %s: %s\"", ",", "service_key", ",", "e", ")" ]
Add new data item .
def add_data ( self , id , key , value ) : self [ str ( id ) ] [ 'data' ] . setdefault ( key , [ ] ) self [ str ( id ) ] [ 'data' ] [ key ] . append ( value )
9,524
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L791-L801
[ "def", "start_rekey", "(", "self", ",", "secret_shares", "=", "5", ",", "secret_threshold", "=", "3", ",", "pgp_keys", "=", "None", ",", "backup", "=", "False", ",", "require_verification", "=", "False", ",", "recovery_key", "=", "False", ")", ":", "params", "=", "{", "'secret_shares'", ":", "secret_shares", ",", "'secret_threshold'", ":", "secret_threshold", ",", "'require_verification'", ":", "require_verification", ",", "}", "if", "pgp_keys", ":", "if", "len", "(", "pgp_keys", ")", "!=", "secret_shares", ":", "raise", "ParamValidationError", "(", "'length of pgp_keys argument must equal secret shares value'", ")", "params", "[", "'pgp_keys'", "]", "=", "pgp_keys", "params", "[", "'backup'", "]", "=", "backup", "api_path", "=", "'/v1/sys/rekey/init'", "if", "recovery_key", ":", "api_path", "=", "'/v1/sys/rekey-recovery-key/init'", "response", "=", "self", ".", "_adapter", ".", "put", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]
Add Molfile and data to SDfile object .
def add_molfile ( self , molfile , data ) : if not isinstance ( molfile , Molfile ) : raise ValueError ( 'Not a Molfile type: "{}"' . format ( type ( molfile ) ) ) if not isinstance ( data , dict ) : raise ValueError ( 'Not a dict type: "{}"' . format ( type ( data ) ) ) entry_ids = sorted ( self . keys ( ) , key = lambda x : int ( x ) ) if entry_ids : last_entry_id = str ( entry_ids [ - 1 ] ) else : last_entry_id = '0' new_entry_id = str ( int ( last_entry_id ) + 1 ) self [ new_entry_id ] = OrderedDict ( ) self [ new_entry_id ] [ 'molfile' ] = molfile self [ new_entry_id ] [ 'data' ] = data
9,525
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L803-L827
[ "def", "on_predicate", "(", "wait_gen", ",", "predicate", "=", "operator", ".", "not_", ",", "max_tries", "=", "None", ",", "max_time", "=", "None", ",", "jitter", "=", "full_jitter", ",", "on_success", "=", "None", ",", "on_backoff", "=", "None", ",", "on_giveup", "=", "None", ",", "logger", "=", "'backoff'", ",", "*", "*", "wait_gen_kwargs", ")", ":", "def", "decorate", "(", "target", ")", ":", "# change names because python 2.x doesn't have nonlocal", "logger_", "=", "logger", "if", "isinstance", "(", "logger_", ",", "basestring", ")", ":", "logger_", "=", "logging", ".", "getLogger", "(", "logger_", ")", "on_success_", "=", "_config_handlers", "(", "on_success", ")", "on_backoff_", "=", "_config_handlers", "(", "on_backoff", ",", "_log_backoff", ",", "logger_", ")", "on_giveup_", "=", "_config_handlers", "(", "on_giveup", ",", "_log_giveup", ",", "logger_", ")", "retry", "=", "None", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "5", ")", ":", "# pragma: python=3.5", "import", "asyncio", "if", "asyncio", ".", "iscoroutinefunction", "(", "target", ")", ":", "import", "backoff", ".", "_async", "retry", "=", "backoff", ".", "_async", ".", "retry_predicate", "elif", "_is_event_loop", "(", ")", "and", "_is_current_task", "(", ")", ":", "# Verify that sync version is not being run from coroutine", "# (that would lead to event loop hiccups).", "raise", "TypeError", "(", "\"backoff.on_predicate applied to a regular function \"", "\"inside coroutine, this will lead to event loop \"", "\"hiccups. Use backoff.on_predicate on coroutines in \"", "\"asynchronous code.\"", ")", "if", "retry", "is", "None", ":", "retry", "=", "_sync", ".", "retry_predicate", "return", "retry", "(", "target", ",", "wait_gen", ",", "predicate", ",", "max_tries", ",", "max_time", ",", "jitter", ",", "on_success_", ",", "on_backoff_", ",", "on_giveup_", ",", "wait_gen_kwargs", ")", "# Return a function which decorates a target with a retry loop.", "return", "decorate" ]
Add new SDfile to current SDfile .
def add_sdfile ( self , sdfile ) : if not isinstance ( sdfile , SDfile ) : raise ValueError ( 'Not a SDfile type: "{}"' . format ( type ( sdfile ) ) ) for entry_id in sdfile : self . add_molfile ( molfile = sdfile [ entry_id ] [ 'molfile' ] , data = sdfile [ entry_id ] [ 'data' ] )
9,526
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L829-L841
[ "def", "set_response_content_type", "(", "self", ",", "response_content_types", "=", "None", ")", ":", "request_content_types", "=", "self", ".", "content_types", "if", "request_content_types", ":", "ct", "=", "request_content_types", ".", "best_match", "(", "response_content_types", ")", "if", "ct", "and", "'*'", "in", "ct", ":", "ct", "=", "None", "if", "not", "ct", "and", "response_content_types", ":", "raise", "HttpException", "(", "status", "=", "415", ",", "msg", "=", "request_content_types", ")", "self", ".", "response", ".", "content_type", "=", "ct" ]
Access neighbor atoms .
def neighbor_atoms ( self , atom_symbol = None ) : if not atom_symbol : return self . neighbors else : return [ atom for atom in self . neighbors if atom [ 'atom_symbol' ] == atom_symbol ]
9,527
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L983-L993
[ "def", "dump_all_handler_stats", "(", "self", ")", ":", "stats", "=", "[", "]", "for", "h", "in", "self", ".", "capture_handlers", ":", "now", "=", "calendar", ".", "timegm", "(", "time", ".", "gmtime", "(", ")", ")", "rot_time", "=", "calendar", ".", "timegm", "(", "h", "[", "'log_rot_time'", "]", ")", "time_delta", "=", "now", "-", "rot_time", "approx_data_rate", "=", "'{} bytes/second'", ".", "format", "(", "h", "[", "'data_read'", "]", "/", "float", "(", "time_delta", ")", ")", "stats", ".", "append", "(", "{", "'name'", ":", "h", "[", "'name'", "]", ",", "'reads'", ":", "h", "[", "'reads'", "]", ",", "'data_read_length'", ":", "'{} bytes'", ".", "format", "(", "h", "[", "'data_read'", "]", ")", ",", "'approx_data_rate'", ":", "approx_data_rate", "}", ")", "return", "stats" ]
Update links first_atom_number - > second_atom_number
def update_atom_numbers ( self ) : self . _ctab_data [ 'first_atom_number' ] = self . first_atom . atom_number self . _ctab_data [ 'second_atom_number' ] = self . second_atom . atom_number
9,528
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L1111-L1118
[ "def", "vb_stop_vm", "(", "name", "=", "None", ",", "timeout", "=", "10000", ",", "*", "*", "kwargs", ")", ":", "vbox", "=", "vb_get_box", "(", ")", "machine", "=", "vbox", ".", "findMachine", "(", "name", ")", "log", ".", "info", "(", "'Stopping machine %s'", ",", "name", ")", "session", "=", "_virtualboxManager", ".", "openMachineSession", "(", "machine", ")", "try", ":", "console", "=", "session", ".", "console", "progress", "=", "console", ".", "powerDown", "(", ")", "progress", ".", "waitForCompletion", "(", "timeout", ")", "finally", ":", "_virtualboxManager", ".", "closeMachineSession", "(", "session", ")", "vb_wait_for_session_state", "(", "session", ")", "log", ".", "info", "(", "'Stopped machine %s is now %s'", ",", "name", ",", "vb_machinestate_to_str", "(", "machine", ".", "state", ")", ")", "return", "vb_xpcom_to_attribute_dict", "(", "machine", ",", "'IMachine'", ")" ]
Default encoder .
def default ( self , o ) : if isinstance ( o , Atom ) or isinstance ( o , Bond ) : return o . _ctab_data else : return o . __dict__
9,529
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L1144-L1155
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Returns ServerConfig instance with configuration given server .
def get ( self , server ) : server_config = self . config . get ( server ) try : while server_config is None : new_config = self . _read_next_config ( ) server_config = new_config . get ( server ) new_config . update ( self . config ) self . config = new_config except StopIteration : return _default_server_configuration ( server ) if CONFIG_URL_KEY_NAME not in server_config : message = "'%s' must be specified in configuration for '%s'" % ( CONFIG_URL_KEY_NAME , server ) raise ServerConfigMissingUrlError ( message ) return ServerConfig ( server_config )
9,530
https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/config.py#L94-L121
[ "def", "extract", "(", "src_vector", ":", "str", ",", "burn_attribute", ":", "str", ",", "src_raster", ":", "list", ",", "dst_names", ":", "list", ",", "dst_dir", ":", "str", ",", "src_raster_template", ":", "str", "=", "None", ",", "gdal_dtype", ":", "int", "=", "4", ",", "n_jobs", ":", "int", "=", "1", ")", ":", "if", "src_raster_template", "is", "None", ":", "src_raster_template", "=", "src_raster", "[", "0", "]", "path_rasterized", "=", "os", ".", "path", ".", "join", "(", "dst_dir", ",", "f\"burn_attribute_rasterized_{burn_attribute}.tif\"", ")", "paths_extracted_aux", "=", "{", "ele", ":", "os", ".", "path", ".", "join", "(", "dst_dir", ",", "f\"{ele}.npy\"", ")", "for", "ele", "in", "[", "f\"aux_vector_{burn_attribute}\"", ",", "\"aux_coord_x\"", ",", "\"aux_coord_y\"", "]", "}", "paths_extracted_raster", "=", "{", "}", "for", "path", ",", "name", "in", "zip", "(", "src_raster", ",", "dst_names", ")", ":", "dst", "=", "f\"{os.path.join(dst_dir, name)}.npy\"", "if", "not", "os", ".", "path", ".", "exists", "(", "dst", ")", ":", "paths_extracted_raster", "[", "path", "]", "=", "dst", "if", "not", "os", ".", "path", ".", "exists", "(", "dst_dir", ")", ":", "os", ".", "makedirs", "(", "dst_dir", ")", "# if it does not already exist, here we first create the rasterized data", "if", "not", "os", ".", "path", ".", "exists", "(", "path_rasterized", ")", ":", "if", "src_raster_template", "is", "None", ":", "src_raster_template", "=", "src_raster", "[", "0", "]", "# print(\"Rasterizing vector attribute.\")", "rasterize", "(", "src_vector", "=", "src_vector", ",", "burn_attribute", "=", "burn_attribute", ",", "src_raster_template", "=", "src_raster_template", ",", "dst_rasterized", "=", "path_rasterized", ",", "gdal_dtype", "=", "gdal_dtype", ")", "# if any of the destination files do not exist we need the locations of the pixels to be", "# extracted in form of a numpy array bool (mask_arr) that fits the rasters from which we will", "# extract below", "if", "not", "(", "all", "(", "[", "os", ".", "path", ".", "exists", "(", "path", ")", "for", "path", "in", "paths_extracted_aux", ".", "values", "(", ")", "]", ")", "and", "all", "(", "[", "os", ".", "path", ".", "exists", "(", "path", ")", "for", "path", "in", "paths_extracted_raster", ".", "values", "(", ")", "]", ")", ")", ":", "# print(\"Creating mask array for pixels to be extracted.\")", "mask_arr", "=", "_get_mask_array", "(", "path_rasterized", ",", "paths_extracted_aux", ",", "burn_attribute", ")", "else", ":", "return", "0", "# create the pixel coordinates if they do not exist", "if", "not", "all", "(", "[", "os", ".", "path", ".", "exists", "(", "paths_extracted_aux", "[", "\"aux_coord_x\"", "]", ")", ",", "os", ".", "path", ".", "exists", "(", "paths_extracted_aux", "[", "\"aux_coord_y\"", "]", ")", "]", ")", ":", "_create_and_save_coords", "(", "path_rasterized", ",", "paths_extracted_aux", ",", "mask_arr", ")", "# lets extract the raster values in case of sequential processing", "# or remove existing raster layers to prepare parallel processing", "if", "n_jobs", "==", "1", ":", "for", "path_src", ",", "path_dst", "in", "tqdm", "(", "paths_extracted_raster", ".", "items", "(", ")", ",", "total", "=", "len", "(", "paths_extracted_raster", ")", ")", ":", "_extract_and_save_one_layer", "(", "path_src", ",", "path_dst", ",", "mask_arr", ")", "else", ":", "import", "multiprocessing", "as", "mp", "if", "n_jobs", "==", "-", "1", ":", "n_jobs", "=", "mp", ".", "cpu_count", "(", ")", "pool", "=", "mp", ".", "Pool", "(", "processes", "=", "n_jobs", ")", "_", "=", "[", "pool", ".", "apply_async", "(", "_extract_and_save_one_layer", ",", "args", "=", "(", "src", ",", "dst", ",", "mask_arr", ")", ")", "for", "src", ",", "dst", "in", "paths_extracted_raster", ".", "items", "(", ")", "]", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "return", "0" ]
Free the underlying C array
def free ( self ) : if self . _ptr is None : return Gauged . array_free ( self . ptr ) FloatArray . ALLOCATIONS -= 1 self . _ptr = None
9,531
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/float_array.py#L59-L65
[ "def", "connection", "(", "cls", ")", ":", "local", "=", "cls", ".", "_threadlocal", "if", "not", "getattr", "(", "local", ",", "'connection'", ",", "None", ")", ":", "# Make sure these variables are no longer affected by other threads.", "local", ".", "user", "=", "cls", ".", "user", "local", ".", "password", "=", "cls", ".", "password", "local", ".", "site", "=", "cls", ".", "site", "local", ".", "timeout", "=", "cls", ".", "timeout", "local", ".", "headers", "=", "cls", ".", "headers", "local", ".", "format", "=", "cls", ".", "format", "local", ".", "version", "=", "cls", ".", "version", "local", ".", "url", "=", "cls", ".", "url", "if", "cls", ".", "site", "is", "None", ":", "raise", "ValueError", "(", "\"No shopify session is active\"", ")", "local", ".", "connection", "=", "ShopifyConnection", "(", "cls", ".", "site", ",", "cls", ".", "user", ",", "cls", ".", "password", ",", "cls", ".", "timeout", ",", "cls", ".", "format", ")", "return", "local", ".", "connection" ]
Takes dbfn and connects gets quants for each line in tsvfn sorts them in line by using keys in quantheader list .
def generate_psms_quanted ( quantdb , tsvfn , isob_header , oldheader , isobaric = False , precursor = False ) : allquants , sqlfields = quantdb . select_all_psm_quants ( isobaric , precursor ) quant = next ( allquants ) for rownr , psm in enumerate ( readers . generate_tsv_psms ( tsvfn , oldheader ) ) : outpsm = { x : y for x , y in psm . items ( ) } if precursor : pquant = quant [ sqlfields [ 'precursor' ] ] if pquant is None : pquant = 'NA' outpsm . update ( { mzidtsvdata . HEADER_PRECURSOR_QUANT : str ( pquant ) } ) if isobaric : isoquants = { } while quant [ 0 ] == rownr : isoquants . update ( { quant [ sqlfields [ 'isochan' ] ] : str ( quant [ sqlfields [ 'isoquant' ] ] ) } ) try : quant = next ( allquants ) except StopIteration : # last PSM, break from while loop or it is not yielded at all break outpsm . update ( get_quant_NAs ( isoquants , isob_header ) ) else : try : quant = next ( allquants ) except StopIteration : # last PSM, needs explicit yield/break or it will not be yielded yield outpsm break yield outpsm
9,532
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/quant.py#L5-L36
[ "def", "get_all_info", "(", "pdb_id", ")", ":", "out", "=", "to_dict", "(", "get_info", "(", "pdb_id", ")", ")", "[", "'molDescription'", "]", "[", "'structureId'", "]", "out", "=", "remove_at_sign", "(", "out", ")", "return", "out" ]
r \ x62
def t_escaped_BACKSPACE_CHAR ( self , t ) : # 'b' t . lexer . pop_state ( ) t . value = unichr ( 0x0008 ) return t
9,533
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L173-L177
[ "def", "density_hub", "(", "self", ",", "weather_df", ")", ":", "if", "self", ".", "density_model", "!=", "'interpolation_extrapolation'", ":", "temperature_hub", "=", "self", ".", "temperature_hub", "(", "weather_df", ")", "# Calculation of density in kg/m³ at hub height", "if", "self", ".", "density_model", "==", "'barometric'", ":", "logging", ".", "debug", "(", "'Calculating density using barometric height '", "'equation.'", ")", "closest_height", "=", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "min", "(", "range", "(", "len", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", ")", ")", ",", "key", "=", "lambda", "i", ":", "abs", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "i", "]", "-", "self", ".", "power_plant", ".", "hub_height", ")", ")", "]", "density_hub", "=", "density", ".", "barometric", "(", "weather_df", "[", "'pressure'", "]", "[", "closest_height", "]", ",", "closest_height", ",", "self", ".", "power_plant", ".", "hub_height", ",", "temperature_hub", ")", "elif", "self", ".", "density_model", "==", "'ideal_gas'", ":", "logging", ".", "debug", "(", "'Calculating density using ideal gas equation.'", ")", "closest_height", "=", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "min", "(", "range", "(", "len", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", ")", ")", ",", "key", "=", "lambda", "i", ":", "abs", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "i", "]", "-", "self", ".", "power_plant", ".", "hub_height", ")", ")", "]", "density_hub", "=", "density", ".", "ideal_gas", "(", "weather_df", "[", "'pressure'", "]", "[", "closest_height", "]", ",", "closest_height", ",", "self", ".", "power_plant", ".", "hub_height", ",", "temperature_hub", ")", "elif", "self", ".", "density_model", "==", "'interpolation_extrapolation'", ":", "logging", ".", "debug", "(", "'Calculating density using linear inter- or '", "'extrapolation.'", ")", "density_hub", "=", "tools", ".", "linear_interpolation_extrapolation", "(", "weather_df", "[", "'density'", "]", ",", "self", ".", "power_plant", ".", "hub_height", ")", "else", ":", "raise", "ValueError", "(", "\"'{0}' is an invalid value. \"", ".", "format", "(", "self", ".", "density_model", ")", "+", "\"`density_model` \"", "+", "\"must be 'barometric', 'ideal_gas' or \"", "+", "\"'interpolation_extrapolation'.\"", ")", "return", "density_hub" ]
r \ x66
def t_escaped_FORM_FEED_CHAR ( self , t ) : # 'f' t . lexer . pop_state ( ) t . value = unichr ( 0x000c ) return t
9,534
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L179-L183
[ "def", "checkIfAvailable", "(", "self", ",", "dateTime", "=", "timezone", ".", "now", "(", ")", ")", ":", "return", "(", "self", ".", "startTime", ">=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__closeBookingDays'", ")", ")", "and", "self", ".", "startTime", "<=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__openBookingDays'", ")", ")", "and", "not", "self", ".", "eventRegistration", "and", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "available", "or", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "tentative", "and", "getattr", "(", "getattr", "(", "self", ".", "temporaryEventRegistration", ",", "'registration'", ",", "None", ")", ",", "'expirationDate'", ",", "timezone", ".", "now", "(", ")", ")", "<=", "timezone", ".", "now", "(", ")", ")", ")", ")" ]
r \ x72
def t_escaped_CARRIAGE_RETURN_CHAR ( self , t ) : # 'r' t . lexer . pop_state ( ) t . value = unichr ( 0x000d ) return t
9,535
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L185-L189
[ "def", "checkIfAvailable", "(", "self", ",", "dateTime", "=", "timezone", ".", "now", "(", ")", ")", ":", "return", "(", "self", ".", "startTime", ">=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__closeBookingDays'", ")", ")", "and", "self", ".", "startTime", "<=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__openBookingDays'", ")", ")", "and", "not", "self", ".", "eventRegistration", "and", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "available", "or", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "tentative", "and", "getattr", "(", "getattr", "(", "self", ".", "temporaryEventRegistration", ",", "'registration'", ",", "None", ")", ",", "'expirationDate'", ",", "timezone", ".", "now", "(", ")", ")", "<=", "timezone", ".", "now", "(", ")", ")", ")", ")" ]
r \ x6E
def t_escaped_LINE_FEED_CHAR ( self , t ) : # 'n' t . lexer . pop_state ( ) t . value = unichr ( 0x000a ) return t
9,536
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L191-L195
[ "def", "numRegisteredForRole", "(", "self", ",", "role", ",", "includeTemporaryRegs", "=", "False", ")", ":", "count", "=", "self", ".", "eventregistration_set", ".", "filter", "(", "cancelled", "=", "False", ",", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "count", "(", ")", "if", "includeTemporaryRegs", ":", "count", "+=", "self", ".", "temporaryeventregistration_set", ".", "filter", "(", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "exclude", "(", "registration__expirationDate__lte", "=", "timezone", ".", "now", "(", ")", ")", ".", "count", "(", ")", "return", "count" ]
r \ x74
def t_escaped_TAB_CHAR ( self , t ) : # 't' t . lexer . pop_state ( ) t . value = unichr ( 0x0009 ) return t
9,537
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L197-L201
[ "def", "numRegisteredForRole", "(", "self", ",", "role", ",", "includeTemporaryRegs", "=", "False", ")", ":", "count", "=", "self", ".", "eventregistration_set", ".", "filter", "(", "cancelled", "=", "False", ",", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "count", "(", ")", "if", "includeTemporaryRegs", ":", "count", "+=", "self", ".", "temporaryeventregistration_set", ".", "filter", "(", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "exclude", "(", "registration__expirationDate__lte", "=", "timezone", ".", "now", "(", ")", ")", ".", "count", "(", ")", "return", "count" ]
Returns mzid spectra data filenames and their IDs used in the mzIdentML file as a dict . Keys == IDs values == fns
def get_mzid_specfile_ids ( mzidfn , namespace ) : sid_fn = { } for specdata in mzid_specdata_generator ( mzidfn , namespace ) : sid_fn [ specdata . attrib [ 'id' ] ] = specdata . attrib [ 'name' ] return sid_fn
9,538
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/mzidplus.py#L96-L102
[ "def", "communityvisibilitystate", "(", "self", ")", ":", "if", "self", ".", "_communityvisibilitystate", "==", "None", ":", "return", "None", "elif", "self", ".", "_communityvisibilitystate", "in", "self", ".", "VisibilityState", ":", "return", "self", ".", "VisibilityState", "[", "self", ".", "_communityvisibilitystate", "]", "else", ":", "#Invalid State", "return", "None" ]
Loop through SpecIdentificationItem children . Find percolator data by matching to a dict lookup . Return a dict containing percolator data
def get_specidentitem_percolator_data ( item , xmlns ) : percomap = { '{0}userParam' . format ( xmlns ) : PERCO_HEADERMAP , } percodata = { } for child in item : try : percoscore = percomap [ child . tag ] [ child . attrib [ 'name' ] ] except KeyError : continue else : percodata [ percoscore ] = child . attrib [ 'value' ] outkeys = [ y for x in list ( percomap . values ( ) ) for y in list ( x . values ( ) ) ] for key in outkeys : try : percodata [ key ] except KeyError : percodata [ key ] = 'NA' return percodata
9,539
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/mzidplus.py#L115-L134
[ "def", "_has_old_request_ended", "(", "self", ",", "shard_state", ")", ":", "assert", "shard_state", ".", "slice_start_time", "is", "not", "None", "assert", "shard_state", ".", "slice_request_id", "is", "not", "None", "request_ids", "=", "[", "shard_state", ".", "slice_request_id", "]", "logs", "=", "None", "try", ":", "logs", "=", "list", "(", "logservice", ".", "fetch", "(", "request_ids", "=", "request_ids", ")", ")", "except", "(", "apiproxy_errors", ".", "FeatureNotEnabledError", ",", "apiproxy_errors", ".", "CapabilityDisabledError", ")", "as", "e", ":", "# Managed VMs do not have access to the logservice API", "# See https://groups.google.com/forum/#!topic/app-engine-managed-vms/r8i65uiFW0w", "logging", ".", "warning", "(", "\"Ignoring exception: %s\"", ",", "e", ")", "if", "not", "logs", "or", "not", "logs", "[", "0", "]", ".", "finished", ":", "return", "False", "return", "True" ]
Search for a path
def locate_path ( dname , recurse_down = True ) : tried_fpaths = [ ] root_dir = os . getcwd ( ) while root_dir is not None : dpath = join ( root_dir , dname ) if exists ( dpath ) : return dpath else : tried_fpaths . append ( dpath ) _new_root = dirname ( root_dir ) if _new_root == root_dir : root_dir = None break else : root_dir = _new_root if not recurse_down : break msg = 'Cannot locate dname=%r' % ( dname , ) msg = ( '\n[sysreq!] Checked: ' . join ( tried_fpaths ) ) print ( msg ) raise ImportError ( msg )
9,540
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_sysreq.py#L116-L137
[ "def", "open_pager", "(", "self", ")", ":", "n_rows", ",", "n_cols", "=", "self", ".", "term", ".", "stdscr", ".", "getmaxyx", "(", ")", "if", "self", ".", "config", "[", "'max_pager_cols'", "]", "is", "not", "None", ":", "n_cols", "=", "min", "(", "n_cols", ",", "self", ".", "config", "[", "'max_pager_cols'", "]", ")", "data", "=", "self", ".", "get_selected_item", "(", ")", "if", "data", "[", "'type'", "]", "==", "'Submission'", ":", "text", "=", "'\\n\\n'", ".", "join", "(", "(", "data", "[", "'permalink'", "]", ",", "data", "[", "'text'", "]", ")", ")", "self", ".", "term", ".", "open_pager", "(", "text", ",", "wrap", "=", "n_cols", ")", "elif", "data", "[", "'type'", "]", "==", "'Comment'", ":", "text", "=", "'\\n\\n'", ".", "join", "(", "(", "data", "[", "'permalink'", "]", ",", "data", "[", "'body'", "]", ")", ")", "self", ".", "term", ".", "open_pager", "(", "text", ",", "wrap", "=", "n_cols", ")", "else", ":", "self", ".", "term", ".", "flash", "(", ")" ]
r Outputs commands to help purge a repo
def total_purge_developed_repo ( repodir ) : assert repodir is not None import utool as ut import os repo = ut . util_git . Repo ( dpath = repodir ) user = os . environ [ 'USER' ] fmtdict = dict ( user = user , modname = repo . modname , reponame = repo . reponame , dpath = repo . dpath , global_site_pkgs = ut . get_global_dist_packages_dir ( ) , local_site_pkgs = ut . get_local_dist_packages_dir ( ) , venv_site_pkgs = ut . get_site_packages_dir ( ) , ) commands = [ _ . format ( * * fmtdict ) for _ in [ 'pip uninstall {modname}' , 'sudo -H pip uninstall {modname}' , 'sudo pip uninstall {modname}' , 'easy_install -m {modname}' , 'cd {dpath} && python setup.py develop --uninstall' , # If they still exist try chowning to current user 'sudo chown -R {user}:{user} {dpath}' , ] ] print ( 'Normal uninstall commands' ) print ( '\n' . join ( commands ) ) possible_link_paths = [ _ . format ( * * fmtdict ) for _ in [ '{dpath}/{modname}.egg-info' , '{dpath}/build' , '{venv_site_pkgs}/{reponame}.egg-info' , '{local_site_pkgs}/{reponame}.egg-info' , '{venv_site_pkgs}/{reponame}.egg-info' , ] ] from os . path import exists , basename existing_link_paths = [ path for path in possible_link_paths ] print ( '# Delete paths and eggs' ) for path in existing_link_paths : if exists ( path ) : if ut . get_file_info ( path ) [ 'owner' ] != user : print ( 'sudo /bin/rm -rf {path}' . format ( path = path ) ) else : print ( '/bin/rm -rf {path}' . format ( path = path ) ) #ut.delete(path) print ( '# Make sure nothing is in the easy install paths' ) easyinstall_paths = [ _ . format ( * * fmtdict ) for _ in [ '{venv_site_pkgs}/easy-install.pth' , '{local_site_pkgs}/easy-install.pth' , '{venv_site_pkgs}/easy-install.pth' , ] ] for path in easyinstall_paths : if exists ( path ) : easy_install_list = ut . readfrom ( path , verbose = False ) . strip ( ) . split ( '\n' ) easy_install_list_ = [ basename ( p ) for p in easy_install_list ] index1 = ut . listfind ( easy_install_list_ , repo . reponame ) index2 = ut . listfind ( easy_install_list_ , repo . modname ) if index1 is not None or index2 is not None : print ( 'Found at index1=%r, index=%r' % ( index1 , index2 ) ) if ut . get_file_info ( path ) [ 'owner' ] != user : print ( 'sudo gvim {path}' . format ( path = path ) ) else : print ( 'gvim {path}' . format ( path = path ) ) checkcmds = [ _ . format ( * * fmtdict ) for _ in [ 'python -c "import {modname}; print({modname}.__file__)"' ] ] import sys assert repo . modname not in sys . modules print ( "# CHECK STATUS" ) for cmd in checkcmds : print ( cmd )
9,541
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_sysreq.py#L151-L244
[ "async", "def", "load_varint", "(", "reader", ")", ":", "buffer", "=", "_UINT_BUFFER", "await", "reader", ".", "areadinto", "(", "buffer", ")", "width", "=", "int_mark_to_size", "(", "buffer", "[", "0", "]", "&", "PortableRawSizeMark", ".", "MASK", ")", "result", "=", "buffer", "[", "0", "]", "shift", "=", "8", "for", "_", "in", "range", "(", "width", "-", "1", ")", ":", "await", "reader", ".", "areadinto", "(", "buffer", ")", "result", "+=", "buffer", "[", "0", "]", "<<", "shift", "shift", "+=", "8", "return", "result", ">>", "2" ]
Adds a dictionary to the prefs
def add_dict ( self , dyn_dict ) : if not isinstance ( dyn_dict , dict ) : raise Exception ( 'DynStruct.add_dict expects a dictionary.' + 'Recieved: ' + six . text_type ( type ( dyn_dict ) ) ) for ( key , val ) in six . iteritems ( dyn_dict ) : self [ key ] = val
9,542
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/DynamicStruct.py#L52-L58
[ "def", "set_page_artid", "(", "self", ",", "page_start", "=", "None", ",", "page_end", "=", "None", ",", "artid", "=", "None", ")", ":", "if", "page_end", "and", "not", "page_start", ":", "raise", "ValueError", "(", "'End_page provided without start_page'", ")", "self", ".", "_ensure_reference_field", "(", "'publication_info'", ",", "{", "}", ")", "publication_info", "=", "self", ".", "obj", "[", "'reference'", "]", "[", "'publication_info'", "]", "if", "page_start", ":", "publication_info", "[", "'page_start'", "]", "=", "page_start", "if", "page_end", ":", "publication_info", "[", "'page_end'", "]", "=", "page_end", "if", "artid", ":", "publication_info", "[", "'artid'", "]", "=", "artid" ]
Converts dynstruct to a dictionary .
def to_dict ( self ) : dyn_dict = { } for ( key , val ) in six . iteritems ( self . __dict__ ) : if key not in self . _printable_exclude : dyn_dict [ key ] = val return dyn_dict
9,543
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/DynamicStruct.py#L60-L66
[ "def", "_constrain_L2_grad", "(", "op", ",", "grad", ")", ":", "inp", "=", "op", ".", "inputs", "[", "0", "]", "inp_norm", "=", "tf", ".", "norm", "(", "inp", ")", "unit_inp", "=", "inp", "/", "inp_norm", "grad_projection", "=", "dot", "(", "unit_inp", ",", "grad", ")", "parallel_grad", "=", "unit_inp", "*", "grad_projection", "is_in_ball", "=", "tf", ".", "less_equal", "(", "inp_norm", ",", "1", ")", "is_pointed_inward", "=", "tf", ".", "less", "(", "grad_projection", ",", "0", ")", "allow_grad", "=", "tf", ".", "logical_or", "(", "is_in_ball", ",", "is_pointed_inward", ")", "clip_grad", "=", "tf", ".", "logical_not", "(", "allow_grad", ")", "clipped_grad", "=", "tf", ".", "cond", "(", "clip_grad", ",", "lambda", ":", "grad", "-", "parallel_grad", ",", "lambda", ":", "grad", ")", "return", "clipped_grad" ]
returns a string which when evaluated will add the stored variables to the current namespace
def execstr ( self , local_name ) : execstr = '' for ( key , val ) in six . iteritems ( self . __dict__ ) : if key not in self . _printable_exclude : execstr += key + ' = ' + local_name + '.' + key + '\n' return execstr
9,544
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/DynamicStruct.py#L88-L99
[ "def", "isSupportedContent", "(", "cls", ",", "fileContent", ")", ":", "magic", "=", "bytearray", "(", "fileContent", ")", "[", ":", "4", "]", "return", "magic", "==", "p", "(", "'>I'", ",", "0xfeedface", ")", "or", "magic", "==", "p", "(", "'>I'", ",", "0xfeedfacf", ")", "or", "magic", "==", "p", "(", "'<I'", ",", "0xfeedface", ")", "or", "magic", "==", "p", "(", "'<I'", ",", "0xfeedfacf", ")" ]
Returns list of proteins for a passed psm_id
def get_proteins_for_peptide ( self , psm_id ) : protsql = self . get_sql_select ( [ 'protein_acc' ] , 'protein_psm' ) protsql = '{0} WHERE psm_id=?' . format ( protsql ) cursor = self . get_cursor ( ) proteins = cursor . execute ( protsql , psm_id ) . fetchall ( ) return [ x [ 0 ] for x in proteins ]
9,545
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/proteingroups.py#L79-L85
[ "def", "match_color_index", "(", "self", ",", "color", ")", ":", "from", "jcvi", ".", "utils", ".", "webcolors", "import", "color_diff", "if", "isinstance", "(", "color", ",", "int", ")", ":", "return", "color", "if", "color", ":", "if", "isinstance", "(", "color", ",", "six", ".", "string_types", ")", ":", "rgb", "=", "map", "(", "int", ",", "color", ".", "split", "(", "','", ")", ")", "else", ":", "rgb", "=", "color", ".", "Get", "(", ")", "logging", ".", "disable", "(", "logging", ".", "DEBUG", ")", "distances", "=", "[", "color_diff", "(", "rgb", ",", "x", ")", "for", "x", "in", "self", ".", "xlwt_colors", "]", "logging", ".", "disable", "(", "logging", ".", "NOTSET", ")", "result", "=", "distances", ".", "index", "(", "min", "(", "distances", ")", ")", "self", ".", "unused_colors", ".", "discard", "(", "self", ".", "xlwt_colors", "[", "result", "]", ")", "return", "result" ]
Checks a frame and raises the relevant exception if required .
def raise_if_error ( frame ) : if "status" not in frame or frame [ "status" ] == b"\x00" : return codes_and_exceptions = { b"\x01" : exceptions . ZigBeeUnknownError , b"\x02" : exceptions . ZigBeeInvalidCommand , b"\x03" : exceptions . ZigBeeInvalidParameter , b"\x04" : exceptions . ZigBeeTxFailure } if frame [ "status" ] in codes_and_exceptions : raise codes_and_exceptions [ frame [ "status" ] ] ( ) raise exceptions . ZigBeeUnknownStatus ( )
9,546
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L25-L39
[ "def", "retrieve", "(", "self", ",", "value", ")", ":", "# type: (Any) -> Any", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "for", "key", ",", "this_value", "in", "self", ".", "secrets", ".", "items", "(", ")", ":", "value", "=", "value", ".", "replace", "(", "key", ",", "this_value", ")", "elif", "isinstance", "(", "value", ",", "MutableMapping", ")", ":", "return", "{", "k", ":", "self", ".", "retrieve", "(", "v", ")", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "value", ",", "MutableSequence", ")", ":", "return", "[", "self", ".", "retrieve", "(", "v", ")", "for", "k", ",", "v", "in", "enumerate", "(", "value", ")", "]", "return", "value" ]
Convert hex string like \ x0A \ xE3 to 2787 .
def hex_to_int ( value ) : if version_info . major >= 3 : return int . from_bytes ( value , "big" ) return int ( value . encode ( "hex" ) , 16 )
9,547
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L42-L48
[ "def", "order_replicant_volume", "(", "self", ",", "volume_id", ",", "snapshot_schedule", ",", "location", ",", "tier", "=", "None", ")", ":", "file_mask", "=", "'billingItem[activeChildren,hourlyFlag],'", "'storageTierLevel,osType,staasVersion,'", "'hasEncryptionAtRest,snapshotCapacityGb,schedules,'", "'intervalSchedule,hourlySchedule,dailySchedule,'", "'weeklySchedule,storageType[keyName],provisionedIops'", "file_volume", "=", "self", ".", "get_file_volume_details", "(", "volume_id", ",", "mask", "=", "file_mask", ")", "order", "=", "storage_utils", ".", "prepare_replicant_order_object", "(", "self", ",", "snapshot_schedule", ",", "location", ",", "tier", ",", "file_volume", ",", "'file'", ")", "return", "self", ".", "client", ".", "call", "(", "'Product_Order'", ",", "'placeOrder'", ",", "order", ")" ]
Convert the ADC raw value to a percentage .
def adc_to_percentage ( value , max_volts , clamp = True ) : percentage = ( 100.0 / const . ADC_MAX_VAL ) * value return max ( min ( 100 , percentage ) , 0 ) if clamp else percentage
9,548
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L51-L56
[ "def", "initiate", "(", "self", ",", "*", "*", "kwargs", ")", ":", "run_startup", "=", "kwargs", ".", "pop", "(", "'run_startup'", ",", "True", ")", "setter", "=", "lambda", "value", ",", "name", ":", "setattr", "(", "self", ",", "name", ",", "value", ")", "d", "=", "defer", ".", "Deferred", "(", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "agency", ".", "_messaging", ".", "get_connection", ",", "self", ")", "d", ".", "addCallback", "(", "setter", ",", "\"_messaging\"", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "agency", ".", "_database", ".", "get_connection", ")", "d", ".", "addCallback", "(", "setter", ",", "'_database'", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "_reload_descriptor", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "_subscribe_for_descriptor_changes", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "_store_instance_id", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "_load_configuration", ")", "d", ".", "addCallback", "(", "setter", ",", "'_configuration'", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "join_shard", ",", "self", ".", "_descriptor", ".", "shard", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "journal_agent_created", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "_call_initiate", ",", "*", "*", "kwargs", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "call_next", ",", "self", ".", "_call_startup", ",", "call_startup", "=", "run_startup", ")", "d", ".", "addCallback", "(", "defer", ".", "override_result", ",", "self", ")", "d", ".", "addErrback", "(", "self", ".", "_startup_error", ")", "# Ensure the execution chain is broken", "self", ".", "call_next", "(", "d", ".", "callback", ",", "None", ")", "return", "d" ]
Converts the output from the ADC into the desired type .
def convert_adc ( value , output_type , max_volts ) : return { const . ADC_RAW : lambda x : x , const . ADC_PERCENTAGE : adc_to_percentage , const . ADC_VOLTS : adc_to_volts , const . ADC_MILLIVOLTS : adc_to_millivolts } [ output_type ] ( value , max_volts )
9,549
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L73-L82
[ "def", "build_synchronize_decorator", "(", ")", ":", "lock", "=", "threading", ".", "Lock", "(", ")", "def", "lock_decorator", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "lock_decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "lock", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "lock_decorated", "return", "lock_decorator" ]
Put the frame into the _rx_frames dict with a key of the frame_id .
def _frame_received ( self , frame ) : try : self . _rx_frames [ frame [ "frame_id" ] ] = frame except KeyError : # Has no frame_id, ignore? pass _LOGGER . debug ( "Frame received: %s" , frame ) # Give the frame to any interested functions for handler in self . _rx_handlers : handler ( frame )
9,550
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L122-L134
[ "def", "update", "(", "self", ",", "instance", ",", "validated_data", ")", ":", "admin", "=", "validated_data", ".", "pop", "(", "'is_superuser'", ",", "None", ")", "password", "=", "validated_data", ".", "pop", "(", "'password'", ",", "None", ")", "if", "validated_data", ".", "get", "(", "'email'", ")", "is", "not", "None", ":", "validated_data", "[", "'username'", "]", "=", "validated_data", "[", "'email'", "]", "for", "attr", ",", "value", "in", "validated_data", ".", "items", "(", ")", ":", "setattr", "(", "instance", ",", "attr", ",", "value", ")", "if", "admin", "is", "not", "None", ":", "instance", ".", "is_staff", "=", "admin", "instance", ".", "is_superuser", "=", "admin", "if", "password", "is", "not", "None", ":", "instance", ".", "set_password", "(", "password", ")", "instance", ".", "save", "(", ")", "return", "instance" ]
Send a frame to either the local ZigBee or a remote device .
def _send ( self , * * kwargs ) : if kwargs . get ( "dest_addr_long" ) is not None : self . zb . remote_at ( * * kwargs ) else : self . zb . at ( * * kwargs )
9,551
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L136-L143
[ "def", "__get_mapping", "(", "self", ",", "structures", ")", ":", "for", "c", "in", "permutations", "(", "structures", ",", "len", "(", "self", ".", "__patterns", ")", ")", ":", "for", "m", "in", "product", "(", "*", "(", "x", ".", "get_substructure_mapping", "(", "y", ",", "limit", "=", "0", ")", "for", "x", ",", "y", "in", "zip", "(", "self", ".", "__patterns", ",", "c", ")", ")", ")", ":", "mapping", "=", "{", "}", "for", "i", "in", "m", ":", "mapping", ".", "update", "(", "i", ")", "if", "mapping", ":", "yield", "mapping" ]
Send a frame to either the local ZigBee or a remote device and wait for a pre - defined amount of time for its response .
def _send_and_wait ( self , * * kwargs ) : frame_id = self . next_frame_id kwargs . update ( dict ( frame_id = frame_id ) ) self . _send ( * * kwargs ) timeout = datetime . now ( ) + const . RX_TIMEOUT while datetime . now ( ) < timeout : try : frame = self . _rx_frames . pop ( frame_id ) raise_if_error ( frame ) return frame except KeyError : sleep ( 0.1 ) continue _LOGGER . exception ( "Did not receive response within configured timeout period." ) raise exceptions . ZigBeeResponseTimeout ( )
9,552
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L145-L164
[ "def", "_clean_characters", "(", "x", ")", ":", "if", "not", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", ":", "x", "=", "str", "(", "x", ")", "else", ":", "if", "not", "all", "(", "ord", "(", "char", ")", "<", "128", "for", "char", "in", "x", ")", ":", "msg", "=", "\"Found unicode character in input YAML (%s)\"", "%", "(", "x", ")", "raise", "ValueError", "(", "repr", "(", "msg", ")", ")", "for", "problem", "in", "[", "\" \"", ",", "\".\"", ",", "\"/\"", ",", "\"\\\\\"", ",", "\"[\"", ",", "\"]\"", ",", "\"&\"", ",", "\";\"", ",", "\"#\"", ",", "\"+\"", ",", "\":\"", ",", "\")\"", ",", "\"(\"", "]", ":", "x", "=", "x", ".", "replace", "(", "problem", ",", "\"_\"", ")", "return", "x" ]
Fetches and returns the value of the specified parameter .
def _get_parameter ( self , parameter , dest_addr_long = None ) : frame = self . _send_and_wait ( command = parameter , dest_addr_long = dest_addr_long ) return frame [ "parameter" ]
9,553
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L166-L172
[ "def", "_check_for_exact_apache", "(", "start", ",", "lines", ")", ":", "APACHE2", "=", "\"\"\"\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may\nnot use this file except in compliance with the License. You may obtain\na copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\nWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\nLicense for the specific language governing permissions and limitations\nunder the License.\"\"\"", "# out of all the formatting I've seen, a 12 line version seems to be the", "# longest in the source tree. So just take the 12 lines starting with where", "# the Apache starting words were found, strip all the '#' and collapse the", "# spaces.", "content", "=", "''", ".", "join", "(", "lines", "[", "start", ":", "(", "start", "+", "12", ")", "]", ")", "content", "=", "re", ".", "sub", "(", "'\\#'", ",", "''", ",", "content", ")", "content", "=", "re", ".", "sub", "(", "'\\s+'", ",", "' '", ",", "content", ")", ".", "strip", "(", ")", "stripped_apache2", "=", "re", ".", "sub", "(", "'\\s+'", ",", "' '", ",", "APACHE2", ")", ".", "strip", "(", ")", "if", "stripped_apache2", "in", "content", ":", "return", "True", "else", ":", "print", "(", "\"<license>!=<apache2>:\\n'%s' !=\\n'%s'\"", "%", "(", "content", ",", "stripped_apache2", ")", ")", "return", "False" ]
Initiate a sample and return its data .
def get_sample ( self , dest_addr_long = None ) : frame = self . _send_and_wait ( command = b"IS" , dest_addr_long = dest_addr_long ) if "parameter" in frame : # @TODO: Is there always one value? Is it always a list? return frame [ "parameter" ] [ 0 ] return { }
9,554
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L188-L197
[ "def", "publish_server_opened", "(", "self", ",", "server_address", ",", "topology_id", ")", ":", "event", "=", "ServerOpeningEvent", "(", "server_address", ",", "topology_id", ")", "for", "subscriber", "in", "self", ".", "__server_listeners", ":", "try", ":", "subscriber", ".", "opened", "(", "event", ")", "except", "Exception", ":", "_handle_exception", "(", ")" ]
Fetches a sample and returns the boolean value of the requested digital pin .
def read_digital_pin ( self , pin_number , dest_addr_long = None ) : sample = self . get_sample ( dest_addr_long = dest_addr_long ) try : return sample [ const . DIGITAL_PINS [ pin_number ] ] except KeyError : raise exceptions . ZigBeePinNotConfigured ( "Pin %s (%s) is not configured as a digital input or output." % ( pin_number , const . IO_PIN_COMMANDS [ pin_number ] ) )
9,555
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L199-L210
[ "def", "merge_ownership_periods", "(", "mappings", ")", ":", "return", "valmap", "(", "lambda", "v", ":", "tuple", "(", "OwnershipPeriod", "(", "a", ".", "start", ",", "b", ".", "start", ",", "a", ".", "sid", ",", "a", ".", "value", ",", ")", "for", "a", ",", "b", "in", "sliding_window", "(", "2", ",", "concatv", "(", "sorted", "(", "v", ")", ",", "# concat with a fake ownership object to make the last", "# end date be max timestamp", "[", "OwnershipPeriod", "(", "pd", ".", "Timestamp", ".", "max", ".", "tz_localize", "(", "'utc'", ")", ",", "None", ",", "None", ",", "None", ",", ")", "]", ",", ")", ",", ")", ")", ",", "mappings", ",", ")" ]
Set a gpio pin setting .
def set_gpio_pin ( self , pin_number , setting , dest_addr_long = None ) : assert setting in const . GPIO_SETTINGS . values ( ) self . _send_and_wait ( command = const . IO_PIN_COMMANDS [ pin_number ] , parameter = setting . value , dest_addr_long = dest_addr_long )
9,556
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L237-L245
[ "def", "DeleteNotifications", "(", "self", ",", "session_ids", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "not", "session_ids", ":", "return", "for", "session_id", "in", "session_ids", ":", "if", "not", "isinstance", "(", "session_id", ",", "rdfvalue", ".", "SessionID", ")", ":", "raise", "RuntimeError", "(", "\"Can only delete notifications for rdfvalue.SessionIDs.\"", ")", "if", "start", "is", "None", ":", "start", "=", "0", "else", ":", "start", "=", "int", "(", "start", ")", "if", "end", "is", "None", ":", "end", "=", "self", ".", "frozen_timestamp", "or", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "for", "queue", ",", "ids", "in", "iteritems", "(", "collection", ".", "Group", "(", "session_ids", ",", "lambda", "session_id", ":", "session_id", ".", "Queue", "(", ")", ")", ")", ":", "queue_shards", "=", "self", ".", "GetAllNotificationShards", "(", "queue", ")", "self", ".", "data_store", ".", "DeleteNotifications", "(", "queue_shards", ",", "ids", ",", "start", ",", "end", ")" ]
Get a gpio pin setting .
def get_gpio_pin ( self , pin_number , dest_addr_long = None ) : frame = self . _send_and_wait ( command = const . IO_PIN_COMMANDS [ pin_number ] , dest_addr_long = dest_addr_long ) value = frame [ "parameter" ] return const . GPIO_SETTINGS [ value ]
9,557
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L247-L256
[ "def", "DeleteNotifications", "(", "self", ",", "session_ids", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "not", "session_ids", ":", "return", "for", "session_id", "in", "session_ids", ":", "if", "not", "isinstance", "(", "session_id", ",", "rdfvalue", ".", "SessionID", ")", ":", "raise", "RuntimeError", "(", "\"Can only delete notifications for rdfvalue.SessionIDs.\"", ")", "if", "start", "is", "None", ":", "start", "=", "0", "else", ":", "start", "=", "int", "(", "start", ")", "if", "end", "is", "None", ":", "end", "=", "self", ".", "frozen_timestamp", "or", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "for", "queue", ",", "ids", "in", "iteritems", "(", "collection", ".", "Group", "(", "session_ids", ",", "lambda", "session_id", ":", "session_id", ".", "Queue", "(", ")", ")", ")", ":", "queue_shards", "=", "self", ".", "GetAllNotificationShards", "(", "queue", ")", "self", ".", "data_store", ".", "DeleteNotifications", "(", "queue_shards", ",", "ids", ",", "start", ",", "end", ")" ]
Fetches the value of %V and returns it as volts .
def get_supply_voltage ( self , dest_addr_long = None ) : value = self . _get_parameter ( b"%V" , dest_addr_long = dest_addr_long ) return ( hex_to_int ( value ) * ( 1200 / 1024.0 ) ) / 1000
9,558
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L258-L263
[ "def", "_stream", "(", "self", ")", ":", "# pragma: no cover", "self", ".", "factory", ".", "protocol", "=", "LiveStream", "self", ".", "factory", ".", "datasift", "=", "{", "'on_open'", ":", "self", ".", "_on_open", ",", "'on_close'", ":", "self", ".", "_on_close", ",", "'on_message'", ":", "self", ".", "_on_message", ",", "'send_message'", ":", "None", "}", "if", "self", ".", "config", ".", "ssl", ":", "from", "twisted", ".", "internet", "import", "ssl", "options", "=", "ssl", ".", "optionsForClientTLS", "(", "hostname", "=", "WEBSOCKET_HOST", ")", "connectWS", "(", "self", ".", "factory", ",", "options", ")", "else", ":", "connectWS", "(", "self", ".", "factory", ")", "reactor", ".", "run", "(", ")" ]
Store new key in a new link at the end of the linked list
def add ( self , key ) : if key not in self . _map : self . _map [ key ] = link = _Link ( ) root = self . _root last = root . prev link . prev , link . next , link . key = last , root , key last . next = root . prev = weakref . proxy ( link )
9,559
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_set.py#L43-L50
[ "def", "from_binary", "(", "cls", ",", "pst", ",", "filename", ")", ":", "m", "=", "Matrix", ".", "from_binary", "(", "filename", ")", ".", "to_dataframe", "(", ")", "return", "ParameterEnsemble", ".", "from_dataframe", "(", "df", "=", "m", ",", "pst", "=", "pst", ")" ]
Find the index of item in the OrderedSet
def index ( self , item ) : for count , other in enumerate ( self ) : if item == other : return count raise ValueError ( '%r is not in OrderedSet' % ( item , ) )
9,560
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_set.py#L138-L154
[ "def", "get_changed_devices", "(", "self", ",", "timestamp", ")", ":", "if", "timestamp", "is", "None", ":", "payload", "=", "{", "}", "else", ":", "payload", "=", "{", "'timeout'", ":", "SUBSCRIPTION_WAIT", ",", "'minimumdelay'", ":", "SUBSCRIPTION_MIN_WAIT", "}", "payload", ".", "update", "(", "timestamp", ")", "# double the timeout here so requests doesn't timeout before vera", "payload", ".", "update", "(", "{", "'id'", ":", "'lu_sdata'", ",", "}", ")", "logger", ".", "debug", "(", "\"get_changed_devices() requesting payload %s\"", ",", "str", "(", "payload", ")", ")", "r", "=", "self", ".", "data_request", "(", "payload", ",", "TIMEOUT", "*", "2", ")", "r", ".", "raise_for_status", "(", ")", "# If the Vera disconnects before writing a full response (as lu_sdata", "# will do when interrupted by a Luup reload), the requests module will", "# happily return 200 with an empty string. So, test for empty response,", "# so we don't rely on the JSON parser to throw an exception.", "if", "r", ".", "text", "==", "\"\"", ":", "raise", "PyveraError", "(", "\"Empty response from Vera\"", ")", "# Catch a wide swath of what the JSON parser might throw, within", "# reason. Unfortunately, some parsers don't specifically return", "# json.decode.JSONDecodeError, but so far most seem to derive what", "# they do throw from ValueError, so that's helpful.", "try", ":", "result", "=", "r", ".", "json", "(", ")", "except", "ValueError", "as", "ex", ":", "raise", "PyveraError", "(", "\"JSON decode error: \"", "+", "str", "(", "ex", ")", ")", "if", "not", "(", "type", "(", "result", ")", "is", "dict", "and", "'loadtime'", "in", "result", "and", "'dataversion'", "in", "result", ")", ":", "raise", "PyveraError", "(", "\"Unexpected/garbled response from Vera\"", ")", "# At this point, all good. Update timestamp and return change data.", "device_data", "=", "result", ".", "get", "(", "'devices'", ")", "timestamp", "=", "{", "'loadtime'", ":", "result", ".", "get", "(", "'loadtime'", ")", ",", "'dataversion'", ":", "result", ".", "get", "(", "'dataversion'", ")", "}", "return", "[", "device_data", ",", "timestamp", "]" ]
Get the value of a gauge at the specified time
def value ( self , key , timestamp = None , namespace = None ) : return self . make_context ( key = key , end = timestamp , namespace = namespace ) . value ( )
9,561
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L68-L71
[ "def", "removeAllEntitlements", "(", "self", ",", "appId", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"appId\"", ":", "appId", "}", "url", "=", "self", ".", "_url", "+", "\"/licenses/removeAllEntitlements\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Get an aggregate of all gauge data stored in the specified date range
def aggregate ( self , key , aggregate , start = None , end = None , namespace = None , percentile = None ) : return self . make_context ( key = key , aggregate = aggregate , start = start , end = end , namespace = namespace , percentile = percentile ) . aggregate ( )
9,562
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L73-L79
[ "def", "CloseCHM", "(", "self", ")", ":", "if", "self", ".", "filename", "is", "not", "None", ":", "chmlib", ".", "chm_close", "(", "self", ".", "file", ")", "self", ".", "file", "=", "None", "self", ".", "filename", "=", "''", "self", ".", "title", "=", "\"\"", "self", ".", "home", "=", "\"/\"", "self", ".", "index", "=", "None", "self", ".", "topics", "=", "None", "self", ".", "encoding", "=", "None" ]
Get a time series of gauge values
def value_series ( self , key , start = None , end = None , interval = None , namespace = None , cache = None ) : return self . make_context ( key = key , start = start , end = end , interval = interval , namespace = namespace , cache = cache ) . value_series ( )
9,563
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L81-L86
[ "def", "upload_cbn_dir", "(", "dir_path", ",", "manager", ")", ":", "t", "=", "time", ".", "time", "(", ")", "for", "jfg_path", "in", "os", ".", "listdir", "(", "dir_path", ")", ":", "if", "not", "jfg_path", ".", "endswith", "(", "'.jgf'", ")", ":", "continue", "path", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "jfg_path", ")", "log", ".", "info", "(", "'opening %s'", ",", "path", ")", "with", "open", "(", "path", ")", "as", "f", ":", "cbn_jgif_dict", "=", "json", ".", "load", "(", "f", ")", "graph", "=", "pybel", ".", "from_cbn_jgif", "(", "cbn_jgif_dict", ")", "out_path", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "jfg_path", ".", "replace", "(", "'.jgf'", ",", "'.bel'", ")", ")", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "o", ":", "pybel", ".", "to_bel", "(", "graph", ",", "o", ")", "strip_annotations", "(", "graph", ")", "enrich_pubmed_citations", "(", "manager", "=", "manager", ",", "graph", "=", "graph", ")", "pybel", ".", "to_database", "(", "graph", ",", "manager", "=", "manager", ")", "log", ".", "info", "(", "''", ")", "log", ".", "info", "(", "'done in %.2f'", ",", "time", ".", "time", "(", ")", "-", "t", ")" ]
Get a time series of gauge aggregates
def aggregate_series ( self , key , aggregate , start = None , end = None , interval = None , namespace = None , cache = None , percentile = None ) : return self . make_context ( key = key , aggregate = aggregate , start = start , end = end , interval = interval , namespace = namespace , cache = cache , percentile = percentile ) . aggregate_series ( )
9,564
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L88-L95
[ "def", "_create_pure_shape", "(", "self", ",", "primitive_type", ",", "options", ",", "sizes", ",", "mass", ",", "precision", ")", ":", "lua_code", "=", "\"simCreatePureShape({}, {}, {{{}, {}, {}}}, {}, {{{}, {}}})\"", ".", "format", "(", "primitive_type", ",", "options", ",", "sizes", "[", "0", "]", ",", "sizes", "[", "1", "]", ",", "sizes", "[", "2", "]", ",", "mass", ",", "precision", "[", "0", "]", ",", "precision", "[", "1", "]", ")", "self", ".", "_inject_lua_code", "(", "lua_code", ")" ]
Get gauge keys
def keys ( self , prefix = None , limit = None , offset = None , namespace = None ) : return self . make_context ( prefix = prefix , limit = limit , offset = offset , namespace = namespace ) . keys ( )
9,565
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L97-L100
[ "def", "fi_ssn", "(", "ssn", ",", "allow_temporal_ssn", "=", "True", ")", ":", "if", "not", "ssn", ":", "return", "False", "result", "=", "re", ".", "match", "(", "ssn_pattern", ",", "ssn", ")", "if", "not", "result", ":", "return", "False", "gd", "=", "result", ".", "groupdict", "(", ")", "checksum", "=", "int", "(", "gd", "[", "'date'", "]", "+", "gd", "[", "'serial'", "]", ")", "return", "(", "int", "(", "gd", "[", "'serial'", "]", ")", ">=", "2", "and", "(", "allow_temporal_ssn", "or", "int", "(", "gd", "[", "'serial'", "]", ")", "<=", "899", ")", "and", "ssn_checkmarks", "[", "checksum", "%", "len", "(", "ssn_checkmarks", ")", "]", "==", "gd", "[", "'checksum'", "]", ")" ]
Get write statistics for the specified namespace and date range
def statistics ( self , start = None , end = None , namespace = None ) : return self . make_context ( start = start , end = end , namespace = namespace ) . statistics ( )
9,566
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L106-L109
[ "def", "reassignItem", "(", "self", ",", "targetUsername", ",", "targetFoldername", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"targetUsername\"", ":", "targetUsername", ",", "\"targetFoldername\"", ":", "targetFoldername", "}", "url", "=", "\"%s/reassign\"", "%", "self", ".", "root", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Create the necessary schema
def sync ( self ) : self . driver . create_schema ( ) self . driver . set_metadata ( { 'current_version' : Gauged . VERSION , 'initial_version' : Gauged . VERSION , 'block_size' : self . config . block_size , 'resolution' : self . config . resolution , 'created_at' : long ( time ( ) * 1000 ) } , replace = False )
9,567
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L111-L120
[ "def", "_error_messages", "(", "self", ",", "driver_id", ")", ":", "assert", "isinstance", "(", "driver_id", ",", "ray", ".", "DriverID", ")", "message", "=", "self", ".", "redis_client", ".", "execute_command", "(", "\"RAY.TABLE_LOOKUP\"", ",", "ray", ".", "gcs_utils", ".", "TablePrefix", ".", "ERROR_INFO", ",", "\"\"", ",", "driver_id", ".", "binary", "(", ")", ")", "# If there are no errors, return early.", "if", "message", "is", "None", ":", "return", "[", "]", "gcs_entries", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "message", ",", "0", ")", "error_messages", "=", "[", "]", "for", "i", "in", "range", "(", "gcs_entries", ".", "EntriesLength", "(", ")", ")", ":", "error_data", "=", "ray", ".", "gcs_utils", ".", "ErrorTableData", ".", "GetRootAsErrorTableData", "(", "gcs_entries", ".", "Entries", "(", "i", ")", ",", "0", ")", "assert", "driver_id", ".", "binary", "(", ")", "==", "error_data", ".", "DriverId", "(", ")", "error_message", "=", "{", "\"type\"", ":", "decode", "(", "error_data", ".", "Type", "(", ")", ")", ",", "\"message\"", ":", "decode", "(", "error_data", ".", "ErrorMessage", "(", ")", ")", ",", "\"timestamp\"", ":", "error_data", ".", "Timestamp", "(", ")", ",", "}", "error_messages", ".", "append", "(", "error_message", ")", "return", "error_messages" ]
Create a new context for reading data
def make_context ( self , * * kwargs ) : self . check_schema ( ) return Context ( self . driver , self . config , * * kwargs )
9,568
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L130-L133
[ "def", "join", "(", "chord_root", ",", "quality", "=", "''", ",", "extensions", "=", "None", ",", "bass", "=", "''", ")", ":", "chord_label", "=", "chord_root", "if", "quality", "or", "extensions", ":", "chord_label", "+=", "\":%s\"", "%", "quality", "if", "extensions", ":", "chord_label", "+=", "\"(%s)\"", "%", "\",\"", ".", "join", "(", "extensions", ")", "if", "bass", "and", "bass", "!=", "'1'", ":", "chord_label", "+=", "\"/%s\"", "%", "bass", "validate_chord_label", "(", "chord_label", ")", "return", "chord_label" ]
Check the schema exists and matches configuration
def check_schema ( self ) : if self . valid_schema : return config = self . config metadata = self . metadata ( ) if 'current_version' not in metadata : raise GaugedSchemaError ( 'Gauged schema not found, ' 'try a gauged.sync()' ) if metadata [ 'current_version' ] != Gauged . VERSION : msg = 'The schema is version %s while this Gauged is version %s. ' msg += 'Try upgrading Gauged and/or running gauged_migrate.py' msg = msg % ( metadata [ 'current_version' ] , Gauged . VERSION ) raise GaugedVersionMismatchError ( msg ) expected_block_size = '%s/%s' % ( config . block_size , config . resolution ) block_size = '%s/%s' % ( metadata [ 'block_size' ] , metadata [ 'resolution' ] ) if block_size != expected_block_size : msg = 'Expected %s and got %s' % ( expected_block_size , block_size ) warn ( msg , GaugedBlockSizeMismatch ) self . valid_schema = True
9,569
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L135-L154
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
Returns rank of nodes that define the level each node is on in a topological sort . This is the same as the Graphviz dot rank .
def nx_dag_node_rank ( graph , nodes = None ) : import utool as ut source = list ( ut . nx_source_nodes ( graph ) ) [ 0 ] longest_paths = dict ( [ ( target , dag_longest_path ( graph , source , target ) ) for target in graph . nodes ( ) ] ) node_to_rank = ut . map_dict_vals ( len , longest_paths ) if nodes is None : return node_to_rank else : ranks = ut . dict_take ( node_to_rank , nodes ) return ranks
9,570
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L243-L278
[ "def", "_GenerateConvertedValues", "(", "self", ",", "converter", ",", "grr_messages", ")", ":", "for", "batch", "in", "collection", ".", "Batch", "(", "grr_messages", ",", "self", ".", "BATCH_SIZE", ")", ":", "metadata_items", "=", "self", ".", "_GetMetadataForClients", "(", "[", "gm", ".", "source", "for", "gm", "in", "batch", "]", ")", "batch_with_metadata", "=", "zip", "(", "metadata_items", ",", "[", "gm", ".", "payload", "for", "gm", "in", "batch", "]", ")", "for", "result", "in", "converter", ".", "BatchConvert", "(", "batch_with_metadata", ",", "token", "=", "self", ".", "token", ")", ":", "yield", "result" ]
Find all nodes with on paths between source and target .
def nx_all_nodes_between ( graph , source , target , data = False ) : import utool as ut if source is None : # assume there is a single source sources = list ( ut . nx_source_nodes ( graph ) ) assert len ( sources ) == 1 , ( 'specify source if there is not only one' ) source = sources [ 0 ] if target is None : # assume there is a single source sinks = list ( ut . nx_sink_nodes ( graph ) ) assert len ( sinks ) == 1 , ( 'specify sink if there is not only one' ) target = sinks [ 0 ] all_simple_paths = list ( nx . all_simple_paths ( graph , source , target ) ) nodes = sorted ( set . union ( * map ( set , all_simple_paths ) ) ) return nodes
9,571
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L281-L300
[ "def", "denormalize_volume", "(", "volume", ")", ":", "id", "=", "volume", ".", "get", "(", "'id'", ",", "None", ")", "res", "=", "dict", "(", ")", "res", ".", "update", "(", "volume", "[", "'metadata'", "]", ")", "denorm_attachments", "=", "list", "(", ")", "for", "a", "in", "volume", "[", "'attachments'", "]", ":", "denorm_attachments", ".", "append", "(", "Archivant", ".", "denormalize_attachment", "(", "a", ")", ")", "res", "[", "'_attachments'", "]", "=", "denorm_attachments", "return", "id", ",", "res" ]
Returns each path from source to target as a list of edges .
def nx_all_simple_edge_paths ( G , source , target , cutoff = None , keys = False , data = False ) : if cutoff is None : cutoff = len ( G ) - 1 if cutoff < 1 : return import utool as ut import six visited_nodes = [ source ] visited_edges = [ ] if G . is_multigraph ( ) : get_neighbs = ut . partial ( G . edges , keys = keys , data = data ) else : get_neighbs = ut . partial ( G . edges , data = data ) edge_stack = [ iter ( get_neighbs ( source ) ) ] while edge_stack : children_edges = edge_stack [ - 1 ] child_edge = six . next ( children_edges , None ) if child_edge is None : edge_stack . pop ( ) visited_nodes . pop ( ) if len ( visited_edges ) > 0 : visited_edges . pop ( ) elif len ( visited_nodes ) < cutoff : child_node = child_edge [ 1 ] if child_node == target : yield visited_edges + [ child_edge ] elif child_node not in visited_nodes : visited_nodes . append ( child_node ) visited_edges . append ( child_edge ) edge_stack . append ( iter ( get_neighbs ( child_node ) ) ) else : for edge in [ child_edge ] + list ( children_edges ) : if edge [ 1 ] == target : yield visited_edges + [ edge ] edge_stack . pop ( ) visited_nodes . pop ( ) if len ( visited_edges ) > 0 : visited_edges . pop ( )
9,572
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L303-L351
[ "def", "win32_refresh_window", "(", "cls", ")", ":", "# Get console handle", "handle", "=", "windll", ".", "kernel32", ".", "GetConsoleWindow", "(", ")", "RDW_INVALIDATE", "=", "0x0001", "windll", ".", "user32", ".", "RedrawWindow", "(", "handle", ",", "None", ",", "None", ",", "c_uint", "(", "RDW_INVALIDATE", ")", ")" ]
Removes node attributes
def nx_delete_node_attr ( graph , name , nodes = None ) : if nodes is None : nodes = list ( graph . nodes ( ) ) removed = 0 # names = [name] if not isinstance(name, list) else name node_dict = nx_node_dict ( graph ) if isinstance ( name , list ) : for node in nodes : for name_ in name : try : del node_dict [ node ] [ name_ ] removed += 1 except KeyError : pass else : for node in nodes : try : del node_dict [ node ] [ name ] removed += 1 except KeyError : pass return removed
9,573
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L561-L601
[ "def", "merge", "(", "self", ",", "workdir", ",", "gswfk_file", ",", "dfpt_files", ",", "gkk_files", ",", "out_gkk", ",", "binascii", "=", "0", ")", ":", "raise", "NotImplementedError", "(", "\"This method should be tested\"", ")", "#out_gkk = out_gkk if cwd is None else os.path.join(os.path.abspath(cwd), out_gkk)", "# We work with absolute paths.", "gswfk_file", "=", "os", ".", "path", ".", "absath", "(", "gswfk_file", ")", "dfpt_files", "=", "[", "os", ".", "path", ".", "abspath", "(", "s", ")", "for", "s", "in", "list_strings", "(", "dfpt_files", ")", "]", "gkk_files", "=", "[", "os", ".", "path", ".", "abspath", "(", "s", ")", "for", "s", "in", "list_strings", "(", "gkk_files", ")", "]", "print", "(", "\"Will merge %d 1WF files, %d GKK file in output %s\"", "%", "(", "len", "(", "dfpt_files", ")", ",", "len", "(", "gkk_files", ")", ",", "out_gkk", ")", ")", "if", "self", ".", "verbose", ":", "for", "i", ",", "f", "in", "enumerate", "(", "dfpt_files", ")", ":", "print", "(", "\" [%d] 1WF %s\"", "%", "(", "i", ",", "f", ")", ")", "for", "i", ",", "f", "in", "enumerate", "(", "gkk_files", ")", ":", "print", "(", "\" [%d] GKK %s\"", "%", "(", "i", ",", "f", ")", ")", "self", ".", "stdin_fname", ",", "self", ".", "stdout_fname", ",", "self", ".", "stderr_fname", "=", "map", "(", "os", ".", "path", ".", "join", ",", "3", "*", "[", "workdir", "]", ",", "[", "\"mrggkk.stdin\"", ",", "\"mrggkk.stdout\"", ",", "\"mrggkk.stderr\"", "]", ")", "inp", "=", "StringIO", "(", ")", "inp", ".", "write", "(", "out_gkk", "+", "\"\\n\"", ")", "# Name of the output file", "inp", ".", "write", "(", "str", "(", "binascii", ")", "+", "\"\\n\"", ")", "# Integer flag: 0 --> binary output, 1 --> ascii formatted output", "inp", ".", "write", "(", "gswfk_file", "+", "\"\\n\"", ")", "# Name of the groud state wavefunction file WF", "#dims = len(dfpt_files, gkk_files, ?)", "dims", "=", "\" \"", ".", "join", "(", "[", "str", "(", "d", ")", "for", "d", "in", "dims", "]", ")", "inp", ".", "write", "(", "dims", "+", "\"\\n\"", ")", "# Number of 1WF, of GKK files, and number of 1WF files in all the GKK files", "# Names of the 1WF files...", "for", "fname", "in", "dfpt_files", ":", "inp", ".", "write", "(", "fname", "+", "\"\\n\"", ")", "# Names of the GKK files...", "for", "fname", "in", "gkk_files", ":", "inp", ".", "write", "(", "fname", "+", "\"\\n\"", ")", "self", ".", "stdin_data", "=", "[", "s", "for", "s", "in", "inp", ".", "getvalue", "(", ")", "]", "with", "open", "(", "self", ".", "stdin_fname", ",", "\"w\"", ")", "as", "fh", ":", "fh", ".", "writelines", "(", "self", ".", "stdin_data", ")", "# Force OS to write data to disk.", "fh", ".", "flush", "(", ")", "os", ".", "fsync", "(", "fh", ".", "fileno", "(", ")", ")", "self", ".", "execute", "(", "workdir", ")", "return", "out_gkk" ]
Removes an attributes from specific edges in the graph
def nx_delete_edge_attr ( graph , name , edges = None ) : removed = 0 keys = [ name ] if not isinstance ( name , ( list , tuple ) ) else name if edges is None : if graph . is_multigraph ( ) : edges = graph . edges ( keys = True ) else : edges = graph . edges ( ) if graph . is_multigraph ( ) : for u , v , k in edges : for key_ in keys : try : del graph [ u ] [ v ] [ k ] [ key_ ] removed += 1 except KeyError : pass else : for u , v in edges : for key_ in keys : try : del graph [ u ] [ v ] [ key_ ] removed += 1 except KeyError : pass return removed
9,574
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L605-L663
[ "def", "get_stats_display_width", "(", "self", ",", "curse_msg", ",", "without_option", "=", "False", ")", ":", "try", ":", "if", "without_option", ":", "# Size without options", "c", "=", "len", "(", "max", "(", "''", ".", "join", "(", "[", "(", "u", "(", "u", "(", "nativestr", "(", "i", "[", "'msg'", "]", ")", ")", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", ")", "if", "not", "i", "[", "'optional'", "]", "else", "\"\"", ")", "for", "i", "in", "curse_msg", "[", "'msgdict'", "]", "]", ")", ".", "split", "(", "'\\n'", ")", ",", "key", "=", "len", ")", ")", "else", ":", "# Size with all options", "c", "=", "len", "(", "max", "(", "''", ".", "join", "(", "[", "u", "(", "u", "(", "nativestr", "(", "i", "[", "'msg'", "]", ")", ")", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", ")", "for", "i", "in", "curse_msg", "[", "'msgdict'", "]", "]", ")", ".", "split", "(", "'\\n'", ")", ",", "key", "=", "len", ")", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "'ERROR: Can not compute plugin width ({})'", ".", "format", "(", "e", ")", ")", "return", "0", "else", ":", "return", "c" ]
Generates attributes values of specific nodes
def nx_gen_node_values ( G , key , nodes , default = util_const . NoParam ) : node_dict = nx_node_dict ( G ) if default is util_const . NoParam : return ( node_dict [ n ] [ key ] for n in nodes ) else : return ( node_dict [ n ] . get ( key , default ) for n in nodes )
9,575
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L748-L756
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Improved generator version of nx . get_node_attributes
def nx_gen_node_attrs ( G , key , nodes = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if on_missing is None : on_missing = 'error' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' if nodes is None : nodes = G . nodes ( ) # Generate `node_data` nodes and data dictionary node_dict = nx_node_dict ( G ) if on_missing == 'error' : node_data = ( ( n , node_dict [ n ] ) for n in nodes ) elif on_missing == 'filter' : node_data = ( ( n , node_dict [ n ] ) for n in nodes if n in G ) elif on_missing == 'default' : node_data = ( ( n , node_dict . get ( n , { } ) ) for n in nodes ) else : raise KeyError ( 'on_missing={} must be error, filter or default' . format ( on_missing ) ) # Get `node_attrs` desired value out of dictionary if on_keyerr == 'error' : node_attrs = ( ( n , d [ key ] ) for n , d in node_data ) elif on_keyerr == 'filter' : node_attrs = ( ( n , d [ key ] ) for n , d in node_data if key in d ) elif on_keyerr == 'default' : node_attrs = ( ( n , d . get ( key , default ) ) for n , d in node_data ) else : raise KeyError ( 'on_keyerr={} must be error filter or default' . format ( on_keyerr ) ) return node_attrs
9,576
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L759-L877
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
Generates attributes values of specific edges
def nx_gen_edge_values ( G , key , edges = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if edges is None : edges = G . edges ( ) if on_missing is None : on_missing = 'error' if on_keyerr is None : on_keyerr = 'default' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' # Generate `data_iter` edges and data dictionary if on_missing == 'error' : data_iter = ( G . adj [ u ] [ v ] for u , v in edges ) elif on_missing == 'default' : data_iter = ( G . adj [ u ] [ v ] if G . has_edge ( u , v ) else { } for u , v in edges ) else : raise KeyError ( 'on_missing={} must be error, filter or default' . format ( on_missing ) ) # Get `value_iter` desired value out of dictionary if on_keyerr == 'error' : value_iter = ( d [ key ] for d in data_iter ) elif on_keyerr == 'default' : value_iter = ( d . get ( key , default ) for d in data_iter ) else : raise KeyError ( 'on_keyerr={} must be error or default' . format ( on_keyerr ) ) return value_iter
9,577
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L880-L916
[ "def", "_index_audio_cmu", "(", "self", ",", "basename", "=", "None", ",", "replace_already_indexed", "=", "False", ")", ":", "self", ".", "_prepare_audio", "(", "basename", "=", "basename", ",", "replace_already_indexed", "=", "replace_already_indexed", ")", "for", "staging_audio_basename", "in", "self", ".", "_list_audio_files", "(", "sub_dir", "=", "\"staging\"", ")", ":", "original_audio_name", "=", "''", ".", "join", "(", "staging_audio_basename", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "[", ":", "-", "3", "]", "pocketsphinx_command", "=", "''", ".", "join", "(", "[", "\"pocketsphinx_continuous\"", ",", "\"-infile\"", ",", "str", "(", "\"{}/staging/{}\"", ".", "format", "(", "self", ".", "src_dir", ",", "staging_audio_basename", ")", ")", ",", "\"-time\"", ",", "\"yes\"", ",", "\"-logfn\"", ",", "\"/dev/null\"", "]", ")", "try", ":", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Now indexing {}\"", ".", "format", "(", "staging_audio_basename", ")", ")", "output", "=", "subprocess", ".", "check_output", "(", "[", "\"pocketsphinx_continuous\"", ",", "\"-infile\"", ",", "str", "(", "\"{}/staging/{}\"", ".", "format", "(", "self", ".", "src_dir", ",", "staging_audio_basename", ")", ")", ",", "\"-time\"", ",", "\"yes\"", ",", "\"-logfn\"", ",", "\"/dev/null\"", "]", ",", "universal_newlines", "=", "True", ")", ".", "split", "(", "'\\n'", ")", "str_timestamps_with_sil_conf", "=", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "split", "(", "\" \"", ")", ",", "filter", "(", "None", ",", "output", "[", "1", ":", "]", ")", ")", ")", "# Timestamps are putted in a list of a single element. To match", "# Watson's output.", "self", ".", "__timestamps_unregulated", "[", "original_audio_name", "+", "\".wav\"", "]", "=", "[", "(", "self", ".", "_timestamp_extractor_cmu", "(", "staging_audio_basename", ",", "str_timestamps_with_sil_conf", ")", ")", "]", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Done indexing {}\"", ".", "format", "(", "staging_audio_basename", ")", ")", "except", "OSError", "as", "e", ":", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "e", ",", "\"The command was: {}\"", ".", "format", "(", "pocketsphinx_command", ")", ")", "self", ".", "__errors", "[", "(", "time", "(", ")", ",", "staging_audio_basename", ")", "]", "=", "e", "self", ".", "_timestamp_regulator", "(", ")", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Finished indexing procedure\"", ")" ]
Improved generator version of nx . get_edge_attributes
def nx_gen_edge_attrs ( G , key , edges = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if on_missing is None : on_missing = 'error' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' if edges is None : if G . is_multigraph ( ) : raise NotImplementedError ( '' ) # uvk_iter = G.edges(keys=True) else : edges = G . edges ( ) # Generate `edge_data` edges and data dictionary if on_missing == 'error' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) for u , v in edges ) elif on_missing == 'filter' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) for u , v in edges if G . has_edge ( u , v ) ) elif on_missing == 'default' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) if G . has_edge ( u , v ) else ( ( u , v ) , { } ) for u , v in edges ) else : raise KeyError ( 'on_missing={}' . format ( on_missing ) ) # Get `edge_attrs` desired value out of dictionary if on_keyerr == 'error' : edge_attrs = ( ( e , d [ key ] ) for e , d in edge_data ) elif on_keyerr == 'filter' : edge_attrs = ( ( e , d [ key ] ) for e , d in edge_data if key in d ) elif on_keyerr == 'default' : edge_attrs = ( ( e , d . get ( key , default ) ) for e , d in edge_data ) else : raise KeyError ( 'on_keyerr={}' . format ( on_keyerr ) ) return edge_attrs
9,578
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L923-L989
[ "def", "_return_parsed_timezone_results", "(", "result", ",", "timezones", ",", "box", ",", "tz", ",", "name", ")", ":", "if", "tz", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot pass a tz argument when \"", "\"parsing strings with timezone \"", "\"information.\"", ")", "tz_results", "=", "np", ".", "array", "(", "[", "Timestamp", "(", "res", ")", ".", "tz_localize", "(", "zone", ")", "for", "res", ",", "zone", "in", "zip", "(", "result", ",", "timezones", ")", "]", ")", "if", "box", ":", "from", "pandas", "import", "Index", "return", "Index", "(", "tz_results", ",", "name", "=", "name", ")", "return", "tz_results" ]
A minimum weight component is an MST + all negative edges
def nx_minimum_weight_component ( graph , weight = 'weight' ) : mwc = nx . minimum_spanning_tree ( graph , weight = weight ) # negative edges only reduce the total weight neg_edges = ( e for e , w in nx_gen_edge_attrs ( graph , weight ) if w < 0 ) mwc . add_edges_from ( neg_edges ) return mwc
9,579
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1037-L1043
[ "def", "extract_secrets_from_android_rooted", "(", "adb_path", "=", "'adb'", ")", ":", "data", "=", "subprocess", ".", "check_output", "(", "[", "adb_path", ",", "'shell'", ",", "'su'", ",", "'-c'", ",", "\"'cat /data/data/com.valvesoftware.android.steam.community/files/Steamguard*'\"", "]", ")", "# When adb daemon is not running, `adb` will print a couple of lines before our data.", "# The data doesn't have new lines and its always on the last line.", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "'\\n'", ")", "[", "-", "1", "]", "if", "data", "[", "0", "]", "!=", "\"{\"", ":", "raise", "RuntimeError", "(", "\"Got invalid data: %s\"", "%", "repr", "(", "data", ")", ")", "return", "{", "int", "(", "x", "[", "'steamid'", "]", ")", ":", "x", "for", "x", "in", "map", "(", "json", ".", "loads", ",", "data", ".", "replace", "(", "\"}{\"", ",", "'}|||||{'", ")", ".", "split", "(", "'|||||'", ")", ")", "}" ]
changes colors to hex strings on graph attrs
def nx_ensure_agraph_color ( graph ) : from plottool import color_funcs import plottool as pt #import six def _fix_agraph_color ( data ) : try : orig_color = data . get ( 'color' , None ) alpha = data . get ( 'alpha' , None ) color = orig_color if color is None and alpha is not None : color = [ 0 , 0 , 0 ] if color is not None : color = pt . ensure_nonhex_color ( color ) #if isinstance(color, np.ndarray): # color = color.tolist() color = list ( color_funcs . ensure_base255 ( color ) ) if alpha is not None : if len ( color ) == 3 : color += [ int ( alpha * 255 ) ] else : color [ 3 ] = int ( alpha * 255 ) color = tuple ( color ) if len ( color ) == 3 : data [ 'color' ] = '#%02x%02x%02x' % color else : data [ 'color' ] = '#%02x%02x%02x%02x' % color except Exception as ex : import utool as ut ut . printex ( ex , keys = [ 'color' , 'orig_color' , 'data' ] ) raise for node , node_data in graph . nodes ( data = True ) : data = node_data _fix_agraph_color ( data ) for u , v , edge_data in graph . edges ( data = True ) : data = edge_data _fix_agraph_color ( data )
9,580
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1075-L1113
[ "def", "checkIfAvailable", "(", "self", ",", "dateTime", "=", "timezone", ".", "now", "(", ")", ")", ":", "return", "(", "self", ".", "startTime", ">=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__closeBookingDays'", ")", ")", "and", "self", ".", "startTime", "<=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__openBookingDays'", ")", ")", "and", "not", "self", ".", "eventRegistration", "and", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "available", "or", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "tentative", "and", "getattr", "(", "getattr", "(", "self", ".", "temporaryEventRegistration", ",", "'registration'", ",", "None", ")", ",", "'expirationDate'", ",", "timezone", ".", "now", "(", ")", ")", "<=", "timezone", ".", "now", "(", ")", ")", ")", ")" ]
Finds the longest path in a dag between two nodes
def dag_longest_path ( graph , source , target ) : if source == target : return [ source ] allpaths = nx . all_simple_paths ( graph , source , target ) longest_path = [ ] for l in allpaths : if len ( l ) > len ( longest_path ) : longest_path = l return longest_path
9,581
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1126-L1137
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
strips out everything but connectivity
def simplify_graph ( graph ) : import utool as ut nodes = sorted ( list ( graph . nodes ( ) ) ) node_lookup = ut . make_index_lookup ( nodes ) if graph . is_multigraph ( ) : edges = list ( graph . edges ( keys = True ) ) else : edges = list ( graph . edges ( ) ) new_nodes = ut . take ( node_lookup , nodes ) if graph . is_multigraph ( ) : new_edges = [ ( node_lookup [ e [ 0 ] ] , node_lookup [ e [ 1 ] ] , e [ 2 ] , { } ) for e in edges ] else : new_edges = [ ( node_lookup [ e [ 0 ] ] , node_lookup [ e [ 1 ] ] ) for e in edges ] cls = graph . __class__ new_graph = cls ( ) new_graph . add_nodes_from ( new_nodes ) new_graph . add_edges_from ( new_edges ) return new_graph
9,582
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1515-L1565
[ "def", "parse_options", "(", "args", "=", "None", ",", "config", "=", "True", ",", "rootdir", "=", "CURDIR", ",", "*", "*", "overrides", ")", ":", "# noqa", "args", "=", "args", "or", "[", "]", "# Parse args from command string", "options", "=", "PARSER", ".", "parse_args", "(", "args", ")", "options", ".", "file_params", "=", "dict", "(", ")", "options", ".", "linters_params", "=", "dict", "(", ")", "# Compile options from ini", "if", "config", ":", "cfg", "=", "get_config", "(", "str", "(", "options", ".", "options", ")", ",", "rootdir", "=", "rootdir", ")", "for", "opt", ",", "val", "in", "cfg", ".", "default", ".", "items", "(", ")", ":", "LOGGER", ".", "info", "(", "'Find option %s (%s)'", ",", "opt", ",", "val", ")", "passed_value", "=", "getattr", "(", "options", ",", "opt", ",", "_Default", "(", ")", ")", "if", "isinstance", "(", "passed_value", ",", "_Default", ")", ":", "if", "opt", "==", "'paths'", ":", "val", "=", "val", ".", "split", "(", ")", "if", "opt", "==", "'skip'", ":", "val", "=", "fix_pathname_sep", "(", "val", ")", "setattr", "(", "options", ",", "opt", ",", "_Default", "(", "val", ")", ")", "# Parse file related options", "for", "name", ",", "opts", "in", "cfg", ".", "sections", ".", "items", "(", ")", ":", "if", "name", "==", "cfg", ".", "default_section", ":", "continue", "if", "name", ".", "startswith", "(", "'pylama'", ")", ":", "name", "=", "name", "[", "7", ":", "]", "if", "name", "in", "LINTERS", ":", "options", ".", "linters_params", "[", "name", "]", "=", "dict", "(", "opts", ")", "continue", "mask", "=", "re", ".", "compile", "(", "fnmatch", ".", "translate", "(", "fix_pathname_sep", "(", "name", ")", ")", ")", "options", ".", "file_params", "[", "mask", "]", "=", "dict", "(", "opts", ")", "# Override options", "_override_options", "(", "options", ",", "*", "*", "overrides", ")", "# Postprocess options", "for", "name", "in", "options", ".", "__dict__", ":", "value", "=", "getattr", "(", "options", ",", "name", ")", "if", "isinstance", "(", "value", ",", "_Default", ")", ":", "setattr", "(", "options", ",", "name", ",", "process_value", "(", "name", ",", "value", ".", "value", ")", ")", "if", "options", ".", "concurrent", "and", "'pylint'", "in", "options", ".", "linters", ":", "LOGGER", ".", "warning", "(", "'Can\\'t parse code asynchronously with pylint enabled.'", ")", "options", ".", "concurrent", "=", "False", "return", "options" ]
Creates a networkx graph that is a subgraph of G defined by the list of edges in edge_list .
def subgraph_from_edges ( G , edge_list , ref_back = True ) : # TODO: support multi-di-graph sub_nodes = list ( { y for x in edge_list for y in x [ 0 : 2 ] } ) #edge_list_no_data = [edge[0:2] for edge in edge_list] multi_edge_list = [ edge [ 0 : 3 ] for edge in edge_list ] if ref_back : G_sub = G . subgraph ( sub_nodes ) for edge in G_sub . edges ( keys = True ) : if edge not in multi_edge_list : G_sub . remove_edge ( * edge ) else : G_sub = G . subgraph ( sub_nodes ) . copy ( ) for edge in G_sub . edges ( keys = True ) : if edge not in multi_edge_list : G_sub . remove_edge ( * edge ) return G_sub
9,583
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1568-L1603
[ "def", "filename_unsafe", "(", "self", ")", ":", "if", "'filename*'", "in", "self", ".", "assocs", ":", "return", "self", ".", "assocs", "[", "'filename*'", "]", ".", "string", "elif", "'filename'", "in", "self", ".", "assocs", ":", "# XXX Reject non-ascii (parsed via qdtext) here?", "return", "self", ".", "assocs", "[", "'filename'", "]", "elif", "self", ".", "location", "is", "not", "None", ":", "return", "posixpath", ".", "basename", "(", "self", ".", "location_path", ".", "rstrip", "(", "'/'", ")", ")" ]
r Returns specific paths along multi - edges from the source to this table . Multipaths are identified by edge keys .
def all_multi_paths ( graph , source , target , data = False ) : path_multiedges = list ( nx_all_simple_edge_paths ( graph , source , target , keys = True , data = data ) ) return path_multiedges
9,584
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1613-L1666
[ "def", "main", "(", "args", ")", ":", "print_in_box", "(", "'Validating submission '", "+", "args", ".", "submission_filename", ")", "random", ".", "seed", "(", ")", "temp_dir", "=", "args", ".", "temp_dir", "delete_temp_dir", "=", "False", "if", "not", "temp_dir", ":", "temp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "logging", ".", "info", "(", "'Created temporary directory: %s'", ",", "temp_dir", ")", "delete_temp_dir", "=", "True", "validator", "=", "submission_validator_lib", ".", "SubmissionValidator", "(", "temp_dir", ",", "args", ".", "use_gpu", ")", "if", "validator", ".", "validate_submission", "(", "args", ".", "submission_filename", ",", "args", ".", "submission_type", ")", ":", "print_in_box", "(", "'Submission is VALID!'", ")", "else", ":", "print_in_box", "(", "'Submission is INVALID, see log messages for details'", ")", "if", "delete_temp_dir", ":", "logging", ".", "info", "(", "'Deleting temporary directory: %s'", ",", "temp_dir", ")", "subprocess", ".", "call", "(", "[", "'rm'", ",", "'-rf'", ",", "temp_dir", "]", ")" ]
Produce edges in a breadth - first - search starting at source but only return nodes that satisfiy a condition and only iterate past a node if it satisfies a different condition .
def bfs_conditional ( G , source , reverse = False , keys = True , data = False , yield_nodes = True , yield_if = None , continue_if = None , visited_nodes = None , yield_source = False ) : if reverse and hasattr ( G , 'reverse' ) : G = G . reverse ( ) if isinstance ( G , nx . Graph ) : neighbors = functools . partial ( G . edges , data = data ) else : neighbors = functools . partial ( G . edges , keys = keys , data = data ) queue = collections . deque ( [ ] ) if visited_nodes is None : visited_nodes = set ( [ ] ) else : visited_nodes = set ( visited_nodes ) if source not in visited_nodes : if yield_nodes and yield_source : yield source visited_nodes . add ( source ) new_edges = neighbors ( source ) if isinstance ( new_edges , list ) : new_edges = iter ( new_edges ) queue . append ( ( source , new_edges ) ) while queue : parent , edges = queue [ 0 ] for edge in edges : child = edge [ 1 ] if yield_nodes : if child not in visited_nodes : if yield_if is None or yield_if ( G , child , edge ) : yield child else : if yield_if is None or yield_if ( G , child , edge ) : yield edge if child not in visited_nodes : visited_nodes . add ( child ) # Add new children to queue if the condition is satisfied if continue_if is None or continue_if ( G , child , edge ) : new_edges = neighbors ( child ) if isinstance ( new_edges , list ) : new_edges = iter ( new_edges ) queue . append ( ( child , new_edges ) ) queue . popleft ( )
9,585
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1744-L1826
[ "def", "from_raw_message", "(", "cls", ",", "rawmessage", ")", ":", "empty", "=", "cls", ".", "create_empty", "(", "0x00", ")", "userdata_dict", "=", "cls", ".", "normalize", "(", "empty", ",", "rawmessage", ")", "return", "Userdata", "(", "userdata_dict", ")" ]
Colors edges and nodes by nid
def color_nodes ( graph , labelattr = 'label' , brightness = .878 , outof = None , sat_adjust = None ) : import plottool as pt import utool as ut node_to_lbl = nx . get_node_attributes ( graph , labelattr ) unique_lbls = sorted ( set ( node_to_lbl . values ( ) ) ) ncolors = len ( unique_lbls ) if outof is None : if ( ncolors ) == 1 : unique_colors = [ pt . LIGHT_BLUE ] elif ( ncolors ) == 2 : # https://matplotlib.org/examples/color/named_colors.html unique_colors = [ 'royalblue' , 'orange' ] unique_colors = list ( map ( pt . color_funcs . ensure_base01 , unique_colors ) ) else : unique_colors = pt . distinct_colors ( ncolors , brightness = brightness ) else : unique_colors = pt . distinct_colors ( outof , brightness = brightness ) if sat_adjust : unique_colors = [ pt . color_funcs . adjust_hsv_of_rgb ( c , sat_adjust = sat_adjust ) for c in unique_colors ] # Find edges and aids strictly between two nids if outof is None : lbl_to_color = ut . dzip ( unique_lbls , unique_colors ) else : gray = pt . color_funcs . ensure_base01 ( 'lightgray' ) unique_colors = [ gray ] + unique_colors offset = max ( 1 , min ( unique_lbls ) ) - 1 node_to_lbl = ut . map_vals ( lambda nid : max ( 0 , nid - offset ) , node_to_lbl ) lbl_to_color = ut . dzip ( range ( outof + 1 ) , unique_colors ) node_to_color = ut . map_vals ( lbl_to_color , node_to_lbl ) nx . set_node_attributes ( graph , name = 'color' , values = node_to_color ) ut . nx_ensure_agraph_color ( graph )
9,586
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1829-L1865
[ "def", "configure_db", "(", "app", ")", ":", "models", ".", "db", ".", "init_app", "(", "app", ")", "log", "=", "logging", ".", "getLogger", "(", "'ara.webapp.configure_db'", ")", "log", ".", "debug", "(", "'Setting up database...'", ")", "if", "app", ".", "config", ".", "get", "(", "'ARA_AUTOCREATE_DATABASE'", ")", ":", "with", "app", ".", "app_context", "(", ")", ":", "migrations", "=", "app", ".", "config", "[", "'DB_MIGRATIONS'", "]", "flask_migrate", ".", "Migrate", "(", "app", ",", "models", ".", "db", ",", "directory", "=", "migrations", ")", "config", "=", "app", ".", "extensions", "[", "'migrate'", "]", ".", "migrate", ".", "get_config", "(", "migrations", ")", "# Verify if the database tables have been created at all", "inspector", "=", "Inspector", ".", "from_engine", "(", "models", ".", "db", ".", "engine", ")", "if", "len", "(", "inspector", ".", "get_table_names", "(", ")", ")", "==", "0", ":", "log", ".", "info", "(", "'Initializing new DB from scratch'", ")", "flask_migrate", ".", "upgrade", "(", "directory", "=", "migrations", ")", "# Get current alembic head revision", "script", "=", "ScriptDirectory", ".", "from_config", "(", "config", ")", "head", "=", "script", ".", "get_current_head", "(", ")", "# Get current revision, if available", "connection", "=", "models", ".", "db", ".", "engine", ".", "connect", "(", ")", "context", "=", "MigrationContext", ".", "configure", "(", "connection", ")", "current", "=", "context", ".", "get_current_revision", "(", ")", "if", "not", "current", ":", "log", ".", "info", "(", "'Unstable DB schema, stamping original revision'", ")", "flask_migrate", ".", "stamp", "(", "directory", "=", "migrations", ",", "revision", "=", "'da9459a1f71c'", ")", "if", "head", "!=", "current", ":", "log", ".", "info", "(", "'DB schema out of date, upgrading'", ")", "flask_migrate", ".", "upgrade", "(", "directory", "=", "migrations", ")" ]
Find approximate minimum number of connected components possible Each edge represents that two nodes must be separated
def approx_min_num_components ( nodes , negative_edges ) : import utool as ut num = 0 g_neg = nx . Graph ( ) g_neg . add_nodes_from ( nodes ) g_neg . add_edges_from ( negative_edges ) # Collapse all nodes with degree 0 if nx . __version__ . startswith ( '2' ) : deg0_nodes = [ n for n , d in g_neg . degree ( ) if d == 0 ] else : deg0_nodes = [ n for n , d in g_neg . degree_iter ( ) if d == 0 ] for u , v in ut . itertwo ( deg0_nodes ) : nx_contracted_nodes ( g_neg , v , u , inplace = True ) # g_neg = nx.contracted_nodes(g_neg, v, u, self_loops=False) # Initialize unused nodes to be everything unused = list ( g_neg . nodes ( ) ) # complement of the graph contains all possible positive edges g_pos = nx . complement ( g_neg ) if False : from networkx . algorithms . approximation import clique maxiset , cliques = clique . clique_removal ( g_pos ) num = len ( cliques ) return num # Iterate until we have used all nodes while len ( unused ) > 0 : # Seed a new "minimum component" num += 1 # Grab a random unused node n1 #idx1 = np.random.randint(0, len(unused)) idx1 = 0 n1 = unused [ idx1 ] unused . remove ( n1 ) neigbs = list ( g_pos . neighbors ( n1 ) ) neigbs = ut . isect ( neigbs , unused ) while len ( neigbs ) > 0 : # Find node n2, that n1 could be connected to #idx2 = np.random.randint(0, len(neigbs)) idx2 = 0 n2 = neigbs [ idx2 ] unused . remove ( n2 ) # Collapse negative information of n1 and n2 g_neg = nx . contracted_nodes ( g_neg , n1 , n2 ) # Compute new possible positive edges g_pos = nx . complement ( g_neg ) # Iterate until n1 has no more possible connections neigbs = list ( g_pos . neighbors ( n1 ) ) neigbs = ut . isect ( neigbs , unused ) print ( 'num = %r' % ( num , ) ) return num
9,587
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L2034-L2122
[ "def", "get_game_logs", "(", "self", ")", ":", "logs", "=", "self", ".", "response", ".", "json", "(", ")", "[", "'resultSets'", "]", "[", "0", "]", "[", "'rowSet'", "]", "headers", "=", "self", ".", "response", ".", "json", "(", ")", "[", "'resultSets'", "]", "[", "0", "]", "[", "'headers'", "]", "df", "=", "pd", ".", "DataFrame", "(", "logs", ",", "columns", "=", "headers", ")", "df", ".", "GAME_DATE", "=", "pd", ".", "to_datetime", "(", "df", ".", "GAME_DATE", ")", "return", "df" ]
Given a function initial conditions step size and end value this will calculate an unforced system . The default start time is t = 0 . 0 but this can be changed .
def solve ( self , y , h , t_end ) : ts = [ ] ys = [ ] yi = y ti = 0.0 while ti < t_end : ts . append ( ti ) yi = self . step ( yi , None , ti , h ) ys . append ( yi ) ti += h return ts , ys
9,588
https://github.com/walchko/pyrk/blob/f75dce843e795343d37cfe20d780989f56f0c418/pyrk/pyrk.py#L23-L42
[ "def", "_get_ref_lengths", "(", "self", ")", ":", "sam_reader", "=", "pysam", ".", "Samfile", "(", "self", ".", "bam", ",", "\"rb\"", ")", "return", "dict", "(", "zip", "(", "sam_reader", ".", "references", ",", "sam_reader", ".", "lengths", ")", ")" ]
This is called by solve but can be called by the user who wants to run through an integration with a control force .
def step ( self , y , u , t , h ) : k1 = h * self . func ( t , y , u ) k2 = h * self . func ( t + .5 * h , y + .5 * h * k1 , u ) k3 = h * self . func ( t + .5 * h , y + .5 * h * k2 , u ) k4 = h * self . func ( t + h , y + h * k3 , u ) return y + ( k1 + 2 * k2 + 2 * k3 + k4 ) / 6.0
9,589
https://github.com/walchko/pyrk/blob/f75dce843e795343d37cfe20d780989f56f0c418/pyrk/pyrk.py#L44-L58
[ "def", "_update_metadata_for_video", "(", "self", ",", "metadata_href", ",", "video", ")", ":", "current_metadata", "=", "self", ".", "clarify_client", ".", "get_metadata", "(", "metadata_href", ")", "cur_data", "=", "current_metadata", ".", "get", "(", "'data'", ")", "if", "cur_data", ".", "get", "(", "'updated_at'", ")", "!=", "video", "[", "'updated_at'", "]", ":", "self", ".", "log", "(", "'Updating metadata for video {0}'", ".", "format", "(", "video", "[", "'id'", "]", ")", ")", "if", "not", "self", ".", "dry_run", ":", "metadata", "=", "self", ".", "_metadata_from_video", "(", "video", ")", "self", ".", "clarify_client", ".", "update_metadata", "(", "metadata_href", ",", "metadata", "=", "metadata", ")", "self", ".", "sync_stats", "[", "'updated'", "]", "+=", "1" ]
Best peptide for each protein in a table
def generate_proteins ( pepfn , proteins , pepheader , scorecol , minlog , higherbetter = True , protcol = False ) : protein_peptides = { } if minlog : higherbetter = False if not protcol : protcol = peptabledata . HEADER_MASTERPROTEINS for psm in reader . generate_tsv_psms ( pepfn , pepheader ) : p_acc = psm [ protcol ] if ';' in p_acc : continue protein_peptides = evaluate_peptide ( protein_peptides , psm , p_acc , higherbetter , scorecol , fncol = False ) if minlog : try : nextbestscore = min ( [ pep [ 'score' ] for pep in protein_peptides . values ( ) if pep [ 'score' ] > 0 ] ) except ValueError : import sys sys . stderr . write ( 'Cannot find score of type {} which is above 0. ' 'Only scores above zero can have a -log value. ' 'Exiting.' . format ( scorecol ) ) sys . exit ( 1 ) nextbestscore = - log ( nextbestscore , 10 ) for protein in proteins : try : peptide = protein_peptides [ protein [ prottabledata . HEADER_PROTEIN ] ] except KeyError : print ( 'WARNING - protein {} not found in peptide ' 'table' . format ( protein [ prottabledata . HEADER_PROTEIN ] ) ) peptide = { 'score' : 'NA' } if minlog and peptide [ 'score' ] != 'NA' : peptide [ 'score' ] = log_score ( peptide [ 'score' ] , nextbestscore ) protein [ prottabledata . HEADER_QSCORE ] = str ( peptide [ 'score' ] ) yield protein
9,590
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/bestpeptide.py#L8-L46
[ "def", "initialize_communities_bucket", "(", ")", ":", "bucket_id", "=", "UUID", "(", "current_app", ".", "config", "[", "'COMMUNITIES_BUCKET_UUID'", "]", ")", "if", "Bucket", ".", "query", ".", "get", "(", "bucket_id", ")", ":", "raise", "FilesException", "(", "\"Bucket with UUID {} already exists.\"", ".", "format", "(", "bucket_id", ")", ")", "else", ":", "storage_class", "=", "current_app", ".", "config", "[", "'FILES_REST_DEFAULT_STORAGE_CLASS'", "]", "location", "=", "Location", ".", "get_default", "(", ")", "bucket", "=", "Bucket", "(", "id", "=", "bucket_id", ",", "location", "=", "location", ",", "default_storage_class", "=", "storage_class", ")", "db", ".", "session", ".", "add", "(", "bucket", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Adds a typed child object to the simulation spec .
def add ( self , child ) : if isinstance ( child , Run ) : self . add_run ( child ) elif isinstance ( child , Record ) : self . add_record ( child ) elif isinstance ( child , EventRecord ) : self . add_event_record ( child ) elif isinstance ( child , DataDisplay ) : self . add_data_display ( child ) elif isinstance ( child , DataWriter ) : self . add_data_writer ( child ) elif isinstance ( child , EventWriter ) : self . add_event_writer ( child ) else : raise ModelError ( 'Unsupported child element' )
9,591
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/simulation.py#L345-L365
[ "def", "list_container_instance_groups", "(", "access_token", ",", "subscription_id", ",", "resource_group", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourcegroups/'", ",", "resource_group", ",", "'/providers/Microsoft.ContainerInstance/ContainerGroups'", ",", "'?api-version='", ",", "CONTAINER_API", "]", ")", "return", "do_get", "(", "endpoint", ",", "access_token", ")" ]
Wrapper for fetching details of game by ID
def fetch ( self , id_ , return_fields = None ) : game_params = { "id" : id_ } if return_fields is not None : self . _validate_return_fields ( return_fields ) field_list = "," . join ( return_fields ) game_params [ "field_list" ] = field_list response = self . _query ( game_params , direct = True ) return response
9,592
https://github.com/steveYeah/PyBomb/blob/54045d74e642f8a1c4366c24bd6a330ae3da6257/pybomb/clients/game_client.py#L57-L76
[ "def", "compile_resource", "(", "resource", ")", ":", "return", "re", ".", "compile", "(", "\"^\"", "+", "trim_resource", "(", "re", ".", "sub", "(", "r\":(\\w+)\"", ",", "r\"(?P<\\1>[\\w-]+?)\"", ",", "resource", ")", ")", "+", "r\"(\\?(?P<querystring>.*))?$\"", ")" ]
Given a list of option names this returns a list of dicts defined in all_options and self . shared_options . These can then be used to populate the argparser with
def define_options ( self , names , parser_options = None ) : def copy_option ( options , name ) : return { k : v for k , v in options [ name ] . items ( ) } if parser_options is None : parser_options = { } options = { } for name in names : try : option = copy_option ( parser_options , name ) except KeyError : option = copy_option ( shared_options , name ) try : options . update ( { option [ 'clarg' ] : option } ) except TypeError : options . update ( { option [ 'clarg' ] [ 0 ] : option } ) return options
9,593
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/base.py#L30-L48
[ "def", "_at", "(", "self", ",", "t", ")", ":", "rITRF", ",", "vITRF", ",", "error", "=", "self", ".", "ITRF_position_velocity_error", "(", "t", ")", "rGCRS", ",", "vGCRS", "=", "ITRF_to_GCRS2", "(", "t", ",", "rITRF", ",", "vITRF", ")", "return", "rGCRS", ",", "vGCRS", ",", "rGCRS", ",", "error" ]
Returns this programs current memory usage in bytes
def current_memory_usage ( ) : import psutil proc = psutil . Process ( os . getpid ( ) ) #meminfo = proc.get_memory_info() meminfo = proc . memory_info ( ) rss = meminfo [ 0 ] # Resident Set Size / Mem Usage vms = meminfo [ 1 ] # Virtual Memory Size / VM Size # NOQA return rss
9,594
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_resources.py#L120-L130
[ "def", "_read_columns_file", "(", "f", ")", ":", "try", ":", "columns", "=", "json", ".", "loads", "(", "open", "(", "f", ",", "'r'", ")", ".", "read", "(", ")", ",", "object_pairs_hook", "=", "collections", ".", "OrderedDict", ")", "except", "Exception", "as", "err", ":", "raise", "InvalidColumnsFileError", "(", "\"There was an error while reading {0}: {1}\"", ".", "format", "(", "f", ",", "err", ")", ")", "# Options are not supported yet:", "if", "'__options'", "in", "columns", ":", "del", "columns", "[", "'__options'", "]", "return", "columns" ]
Returns the number of cpus with utilization less than thresh percent
def num_unused_cpus ( thresh = 10 ) : import psutil cpu_usage = psutil . cpu_percent ( percpu = True ) return sum ( [ p < thresh for p in cpu_usage ] )
9,595
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_resources.py#L185-L191
[ "def", "get_api_v1_info", "(", "api_prefix", ")", ":", "websocket_root", "=", "base_ws_uri", "(", ")", "+", "EVENTS_ENDPOINT", "docs_url", "=", "[", "'https://docs.bigchaindb.com/projects/server/en/v'", ",", "version", ".", "__version__", ",", "'/http-client-server-api.html'", ",", "]", "return", "{", "'docs'", ":", "''", ".", "join", "(", "docs_url", ")", ",", "'transactions'", ":", "'{}transactions/'", ".", "format", "(", "api_prefix", ")", ",", "'blocks'", ":", "'{}blocks/'", ".", "format", "(", "api_prefix", ")", ",", "'assets'", ":", "'{}assets/'", ".", "format", "(", "api_prefix", ")", ",", "'outputs'", ":", "'{}outputs/'", ".", "format", "(", "api_prefix", ")", ",", "'streams'", ":", "websocket_root", ",", "'metadata'", ":", "'{}metadata/'", ".", "format", "(", "api_prefix", ")", ",", "'validators'", ":", "'{}validators'", ".", "format", "(", "api_prefix", ")", ",", "}" ]
For each master protein we generate the protein group proteins complete with sequences psm_ids and scores . Master proteins are included in this group .
def get_protein_group_content ( pgmap , master ) : # first item (0) is only a placeholder so the lookup.INDEX things get the # correct number. Would be nice with a solution, but the INDEXes were # originally made for mzidtsv protein group adding. pg_content = [ [ 0 , master , protein , len ( peptides ) , len ( [ psm for pgpsms in peptides . values ( ) for psm in pgpsms ] ) , sum ( [ psm [ 1 ] for pgpsms in peptides . values ( ) for psm in pgpsms ] ) , # score next ( iter ( next ( iter ( peptides . values ( ) ) ) ) ) [ 3 ] , # coverage next ( iter ( next ( iter ( peptides . values ( ) ) ) ) ) [ 2 ] , # evid level ] for protein , peptides in pgmap . items ( ) ] return pg_content
9,596
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/proteingrouping.py#L180-L200
[ "def", "transfer_list", "(", "request", ",", "detailed", "=", "True", ",", "search_opts", "=", "None", ")", ":", "c_client", "=", "cinderclient", "(", "request", ")", "try", ":", "return", "[", "VolumeTransfer", "(", "v", ")", "for", "v", "in", "c_client", ".", "transfers", ".", "list", "(", "detailed", "=", "detailed", ",", "search_opts", "=", "search_opts", ")", "]", "except", "cinder_exception", ".", "Forbidden", "as", "error", ":", "LOG", ".", "error", "(", "error", ")", "return", "[", "]" ]
These fields are currently not pool dependent so headerfields is ignored
def get_protein_data ( peptide , pdata , headerfields , accfield ) : report = get_proteins ( peptide , pdata , headerfields ) return get_cov_descriptions ( peptide , pdata , report )
9,597
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/peptable/merge.py#L81-L85
[ "def", "start_video_recording", "(", "self", ",", "local_video_file_path", ",", "video_filename", ")", ":", "self", ".", "runner", ".", "info_log", "(", "\"Starting video recording...\"", ")", "self", ".", "local_video_recording_file_path", "=", "local_video_file_path", "self", ".", "remote_video_recording_file_path", "=", "video_filename", "self", ".", "execute_command", "(", "\"./start_recording.sh '%s'\"", "%", "self", ".", "remote_video_recording_file_path", ")" ]
r Returns the number of chunks that a list will be split into given a chunksize .
def get_num_chunks ( length , chunksize ) : n_chunks = int ( math . ceil ( length / chunksize ) ) return n_chunks
9,598
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_progress.py#L116-L142
[ "def", "find_parameter", "(", ")", ":", "p1", "=", "mdb", ".", "get_parameter", "(", "'/YSS/SIMULATOR/BatteryVoltage2'", ")", "print", "(", "'Via qualified name:'", ",", "p1", ")", "p2", "=", "mdb", ".", "get_parameter", "(", "'MDB:OPS Name/SIMULATOR_BatteryVoltage2'", ")", "print", "(", "'Via domain-specific alias:'", ",", "p2", ")" ]
Yeilds an iterator in chunks and computes progress Progress version of ut . ichunks
def ProgChunks ( list_ , chunksize , nInput = None , * * kwargs ) : if nInput is None : nInput = len ( list_ ) n_chunks = get_num_chunks ( nInput , chunksize ) kwargs [ 'length' ] = n_chunks if 'freq' not in kwargs : kwargs [ 'freq' ] = 1 chunk_iter = util_iter . ichunks ( list_ , chunksize ) progiter_ = ProgressIter ( chunk_iter , * * kwargs ) return progiter_
9,599
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_progress.py#L145-L186
[ "def", "valid", "(", "self", ")", ":", "now", "=", "timezone", ".", "now", "(", ")", "return", "self", ".", "filter", "(", "revoked", "=", "False", ",", "expires__gt", "=", "now", ",", "valid_from__lt", "=", "now", ")" ]