idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
9,000 | def list_images ( img_dpath_ , ignore_list = [ ] , recursive = False , fullpath = False , full = None , sort = True ) : #if not QUIET: # print(ignore_list) if full is not None : fullpath = fullpath or full img_dpath_ = util_str . ensure_unicode ( img_dpath_ ) img_dpath = realpath ( img_dpath_ ) ignore_set = set ( ignore_list ) gname_list_ = [ ] assertpath ( img_dpath ) # Get all the files in a directory recursively true_imgpath = truepath ( img_dpath ) for root , dlist , flist in os . walk ( true_imgpath ) : root = util_str . ensure_unicode ( root ) rel_dpath = relpath ( root , img_dpath ) # Ignore directories if any ( [ dname in ignore_set for dname in dirsplit ( rel_dpath ) ] ) : continue for fname in iter ( flist ) : fname = util_str . ensure_unicode ( fname ) gname = join ( rel_dpath , fname ) . replace ( '\\' , '/' ) if gname . startswith ( './' ) : gname = gname [ 2 : ] if fpath_has_imgext ( gname ) : # Ignore Files if gname in ignore_set : continue if fullpath : gpath = join ( img_dpath , gname ) gname_list_ . append ( gpath ) else : gname_list_ . append ( gname ) if not recursive : break if sort : gname_list = sorted ( gname_list_ ) return gname_list | r Returns a list of images in a directory . By default returns relative paths . | 389 | 16 |
9,001 | def assertpath ( path_ , msg = '' , * * kwargs ) : if NO_ASSERTS : return if path_ is None : raise AssertionError ( 'path is None! %s' % ( path_ , msg ) ) if path_ == '' : raise AssertionError ( 'path=%r is the empty string! %s' % ( path_ , msg ) ) if not checkpath ( path_ , * * kwargs ) : raise AssertionError ( 'path=%r does not exist! %s' % ( path_ , msg ) ) | Asserts that a patha exists | 128 | 8 |
9,002 | def matching_fpaths ( dpath_list , include_patterns , exclude_dirs = [ ] , greater_exclude_dirs = [ ] , exclude_patterns = [ ] , recursive = True ) : if isinstance ( dpath_list , six . string_types ) : dpath_list = [ dpath_list ] for dpath in dpath_list : for root , dname_list , fname_list in os . walk ( dpath ) : # Look at all subdirs subdirs = pathsplit_full ( relpath ( root , dpath ) ) # HACK: if any ( [ dir_ in greater_exclude_dirs for dir_ in subdirs ] ) : continue # Look at one subdir if basename ( root ) in exclude_dirs : continue _match = fnmatch . fnmatch for name in fname_list : # yeild filepaths that are included if any ( _match ( name , pat ) for pat in include_patterns ) : # ... and not excluded if not any ( _match ( name , pat ) for pat in exclude_patterns ) : fpath = join ( root , name ) yield fpath if not recursive : break | r walks dpath lists returning all directories that match the requested pattern . | 266 | 14 |
9,003 | def sed ( regexpr , repl , force = False , recursive = False , dpath_list = None , fpath_list = None , verbose = None , include_patterns = None , exclude_patterns = [ ] ) : #_grep(r, [repl], dpath_list=dpath_list, recursive=recursive) if include_patterns is None : include_patterns = [ '*.py' , '*.pyx' , '*.pxi' , '*.cxx' , '*.cpp' , '*.hxx' , '*.hpp' , '*.c' , '*.h' , '*.html' , '*.tex' ] if dpath_list is None : dpath_list = [ os . getcwd ( ) ] if verbose is None : verbose = ut . NOT_QUIET if fpath_list is None : greater_exclude_dirs = get_standard_exclude_dnames ( ) exclude_dirs = [ ] fpath_generator = matching_fpaths ( dpath_list , include_patterns , exclude_dirs , greater_exclude_dirs = greater_exclude_dirs , recursive = recursive , exclude_patterns = exclude_patterns ) else : fpath_generator = fpath_list if verbose : print ( 'sed-ing %r' % ( dpath_list , ) ) print ( ' * regular expression : %r' % ( regexpr , ) ) print ( ' * replacement : %r' % ( repl , ) ) print ( ' * include_patterns : %r' % ( include_patterns , ) ) print ( ' * recursive: %r' % ( recursive , ) ) print ( ' * force: %r' % ( force , ) ) from utool import util_str print ( ' * fpath_list: %s' % ( util_str . repr3 ( fpath_list ) , ) ) regexpr = extend_regex ( regexpr ) #if '\x08' in regexpr: # print('Remember \\x08 != \\b') # print('subsituting for you for you') # regexpr = regexpr.replace('\x08', '\\b') # print(' * regular expression : %r' % (regexpr,)) # Walk through each directory recursively num_changed = 0 num_files_checked = 0 fpaths_changed = [ ] for fpath in fpath_generator : num_files_checked += 1 changed_lines = sedfile ( fpath , regexpr , repl , force , verbose = verbose ) if changed_lines is not None : fpaths_changed . append ( fpath ) num_changed += len ( changed_lines ) import utool as ut print ( 'num_files_checked = %r' % ( num_files_checked , ) ) print ( 'fpaths_changed = %s' % ( ut . repr3 ( sorted ( fpaths_changed ) ) , ) ) print ( 'total lines changed = %r' % ( num_changed , ) ) | Python implementation of sed . NOT FINISHED | 682 | 9 |
9,004 | def get_win32_short_path_name ( long_name ) : import ctypes from ctypes import wintypes _GetShortPathNameW = ctypes . windll . kernel32 . GetShortPathNameW _GetShortPathNameW . argtypes = [ wintypes . LPCWSTR , wintypes . LPWSTR , wintypes . DWORD ] _GetShortPathNameW . restype = wintypes . DWORD output_buf_size = 0 while True : output_buf = ctypes . create_unicode_buffer ( output_buf_size ) needed = _GetShortPathNameW ( long_name , output_buf , output_buf_size ) if output_buf_size >= needed : short_name = output_buf . value break else : output_buf_size = needed return short_name | Gets the short path name of a given long path . | 184 | 12 |
9,005 | def platform_path ( path ) : try : if path == '' : raise ValueError ( 'path cannot be the empty string' ) # get path relative to cwd path1 = truepath_relative ( path ) if sys . platform . startswith ( 'win32' ) : path2 = expand_win32_shortname ( path1 ) else : path2 = path1 except Exception as ex : util_dbg . printex ( ex , keys = [ 'path' , 'path1' , 'path2' ] ) raise return path2 | r Returns platform specific path for pyinstaller usage | 118 | 10 |
9,006 | def find_lib_fpath ( libname , root_dir , recurse_down = True , verbose = False , debug = False ) : def get_lib_fname_list ( libname ) : """ input <libname>: library name (e.g. 'hesaff', not 'libhesaff') returns <libnames>: list of plausible library file names """ if sys . platform . startswith ( 'win32' ) : libnames = [ 'lib' + libname + '.dll' , libname + '.dll' ] elif sys . platform . startswith ( 'darwin' ) : libnames = [ 'lib' + libname + '.dylib' ] elif sys . platform . startswith ( 'linux' ) : libnames = [ 'lib' + libname + '.so' ] else : raise Exception ( 'Unknown operating system: %s' % sys . platform ) return libnames def get_lib_dpath_list ( root_dir ) : """ input <root_dir>: deepest directory to look for a library (dll, so, dylib) returns <libnames>: list of plausible directories to look. """ 'returns possible lib locations' get_lib_dpath_list = [ root_dir , join ( root_dir , 'lib' ) , join ( root_dir , 'build' ) , join ( root_dir , 'build' , 'lib' ) ] return get_lib_dpath_list lib_fname_list = get_lib_fname_list ( libname ) tried_fpaths = [ ] while root_dir is not None : for lib_fname in lib_fname_list : for lib_dpath in get_lib_dpath_list ( root_dir ) : lib_fpath = normpath ( join ( lib_dpath , lib_fname ) ) if exists ( lib_fpath ) : if verbose : print ( '\n[c] Checked: ' . join ( tried_fpaths ) ) if debug : print ( 'using: %r' % lib_fpath ) return lib_fpath else : # Remember which candiate library fpaths did not exist tried_fpaths . append ( lib_fpath ) _new_root = dirname ( root_dir ) if _new_root == root_dir : root_dir = None break else : root_dir = _new_root if not recurse_down : break msg = ( '\n[C!] load_clib(libname=%r root_dir=%r, recurse_down=%r, verbose=%r)' % ( libname , root_dir , recurse_down , verbose ) + '\n[c!] Cannot FIND dynamic library' ) print ( msg ) print ( '\n[c!] Checked: ' . join ( tried_fpaths ) ) raise ImportError ( msg ) | Search for the library | 640 | 4 |
9,007 | def ensure_mingw_drive ( win32_path ) : win32_drive , _path = splitdrive ( win32_path ) mingw_drive = '/' + win32_drive [ : - 1 ] . lower ( ) mingw_path = mingw_drive + _path return mingw_path | r replaces windows drives with mingw style drives | 68 | 9 |
9,008 | def ancestor_paths ( start = None , limit = { } ) : import utool as ut limit = ut . ensure_iterable ( limit ) limit = { expanduser ( p ) for p in limit } . union ( set ( limit ) ) if start is None : start = os . getcwd ( ) path = start prev = None while path != prev and prev not in limit : yield path prev = path path = dirname ( path ) | All paths above you | 95 | 4 |
9,009 | def search_candidate_paths ( candidate_path_list , candidate_name_list = None , priority_paths = None , required_subpaths = [ ] , verbose = None ) : import utool as ut if verbose is None : verbose = 0 if QUIET else 1 if verbose >= 1 : print ( '[search_candidate_paths] Searching for candidate paths' ) if candidate_name_list is not None : candidate_path_list_ = [ join ( dpath , fname ) for dpath , fname in itertools . product ( candidate_path_list , candidate_name_list ) ] else : candidate_path_list_ = candidate_path_list if priority_paths is not None : candidate_path_list_ = priority_paths + candidate_path_list_ return_path = None for path in candidate_path_list_ : if path is not None and exists ( path ) : if verbose >= 2 : print ( '[search_candidate_paths] Found candidate directory %r' % ( path , ) ) print ( '[search_candidate_paths] ... checking for approprate structure' ) # tomcat directory exists. Make sure it also contains a webapps dir subpath_list = [ join ( path , subpath ) for subpath in required_subpaths ] if all ( ut . checkpath ( path_ , verbose = verbose ) for path_ in subpath_list ) : return_path = path if verbose >= 2 : print ( '[search_candidate_paths] Found acceptable path' ) return return_path break if verbose >= 1 : print ( '[search_candidate_paths] Failed to find acceptable path' ) return return_path | searches for existing paths that meed a requirement | 383 | 11 |
9,010 | def symlink ( real_path , link_path , overwrite = False , on_error = 'raise' , verbose = 2 ) : path = normpath ( real_path ) link = normpath ( link_path ) if verbose : print ( '[util_path] Creating symlink: path={} link={}' . format ( path , link ) ) if os . path . islink ( link ) : if verbose : print ( '[util_path] symlink already exists' ) os_readlink = getattr ( os , "readlink" , None ) if callable ( os_readlink ) : if os_readlink ( link ) == path : if verbose > 1 : print ( '[path] ... and points to the right place' ) return link else : print ( '[util_path] Warning, symlinks are not implemented on windows' ) if verbose > 1 : print ( '[util_path] ... but it points somewhere else' ) if overwrite : delete ( link , verbose > 1 ) elif on_error == 'ignore' : return False try : os_symlink = getattr ( os , "symlink" , None ) if callable ( os_symlink ) : os_symlink ( path , link ) else : win_shortcut ( path , link ) except Exception as ex : import utool as ut checkpath ( link , verbose = True ) checkpath ( path , verbose = True ) do_raise = ( on_error == 'raise' ) ut . printex ( ex , '[util_path] error making symlink' , iswarning = not do_raise ) if do_raise : raise return link | Attempt to create a symbolic link . | 365 | 7 |
9,011 | def remove_broken_links ( dpath , verbose = True ) : fname_list = [ join ( dpath , fname ) for fname in os . listdir ( dpath ) ] broken_links = list ( filterfalse ( exists , filter ( islink , fname_list ) ) ) num_broken = len ( broken_links ) if verbose : if verbose > 1 or num_broken > 0 : print ( '[util_path] Removing %d broken links in %r' % ( num_broken , dpath , ) ) for link in broken_links : os . unlink ( link ) return num_broken | Removes all broken links in a directory | 138 | 8 |
9,012 | def non_existing_path ( path_ , dpath = None , offset = 0 , suffix = None , force_fmt = False ) : import utool as ut from os . path import basename , dirname if dpath is None : dpath = dirname ( path_ ) base_fmtstr = basename ( path_ ) if suffix is not None : base_fmtstr = ut . augpath ( base_fmtstr , suffix ) if '%' not in base_fmtstr : if not force_fmt : # If we have don't have to format, # then try to use the first choice first_choice = join ( dpath , base_fmtstr ) if not exists ( first_choice ) : return first_choice # otherwise we ensure we can format and we continue base_fmtstr = ut . augpath ( base_fmtstr , '%d' ) dname_list = ut . glob ( dpath , pattern = '*' , recursive = False , with_files = True , with_dirs = True ) conflict_set = set ( basename ( dname ) for dname in dname_list ) newname = ut . get_nonconflicting_string ( base_fmtstr , conflict_set , offset = offset ) newpath = join ( dpath , newname ) return newpath | r Searches for and finds a path garuenteed to not exist . | 293 | 16 |
9,013 | def create_isobaric_quant_lookup ( quantdb , specfn_consensus_els , channelmap ) : # store quantchannels in lookup and generate a db_id vs channel map channels_store = ( ( name , ) for name , c_id in sorted ( channelmap . items ( ) , key = lambda x : x [ 1 ] ) ) quantdb . store_channelmap ( channels_store ) channelmap_dbid = { channelmap [ ch_name ] : ch_id for ch_id , ch_name in quantdb . get_channelmap ( ) } quants = [ ] mzmlmap = quantdb . get_mzmlfile_map ( ) for specfn , consensus_el in specfn_consensus_els : rt = openmsreader . get_consxml_rt ( consensus_el ) rt = round ( float ( Decimal ( rt ) / 60 ) , 12 ) qdata = get_quant_data ( consensus_el ) spectra_id = quantdb . get_spectra_id ( mzmlmap [ specfn ] , retention_time = rt ) for channel_no in sorted ( qdata . keys ( ) ) : quants . append ( ( spectra_id , channelmap_dbid [ channel_no ] , qdata [ channel_no ] ) ) if len ( quants ) == DB_STORE_CHUNK : quantdb . store_isobaric_quants ( quants ) quantdb . store_isobaric_quants ( quants ) quantdb . index_isobaric_quants ( ) | Creates an sqlite lookup table of scannrs with quant data . | 354 | 15 |
9,014 | def get_precursors_from_window ( quantdb , minmz ) : featmap = { } mz = False features = quantdb . get_precursor_quant_window ( FEATURE_ALIGN_WINDOW_AMOUNT , minmz ) for feat_id , fn_id , charge , mz , rt in features : try : featmap [ fn_id ] [ charge ] . append ( ( mz , rt , feat_id ) ) except KeyError : try : featmap [ fn_id ] [ charge ] = [ ( mz , rt , feat_id ) ] except KeyError : featmap [ fn_id ] = { charge : [ ( mz , rt , feat_id ) ] } return featmap , mz | Returns a dict of a specified amount of features from the ms1 quant database and the highest mz of those features | 171 | 23 |
9,015 | def get_quant_data ( cons_el ) : quant_out = { } for reporter in cons_el . findall ( './/element' ) : quant_out [ reporter . attrib [ 'map' ] ] = reporter . attrib [ 'it' ] return quant_out | Gets quant data from consensusXML element | 62 | 9 |
9,016 | def get_plat_specifier ( ) : import setuptools # NOQA import distutils plat_name = distutils . util . get_platform ( ) plat_specifier = ".%s-%s" % ( plat_name , sys . version [ 0 : 3 ] ) if hasattr ( sys , 'gettotalrefcount' ) : plat_specifier += '-pydebug' return plat_specifier | Standard platform specifier used by distutils | 94 | 8 |
9,017 | def get_system_python_library ( ) : import os import utool as ut from os . path import basename , realpath pyname = basename ( realpath ( sys . executable ) ) ld_library_path = os . environ [ 'LD_LIBRARY_PATH' ] libdirs = [ x for x in ld_library_path . split ( os . pathsep ) if x ] + [ '/usr/lib' ] libfiles = ut . flatten ( [ ut . glob ( d , '*' + ut . get_lib_ext ( ) , recursive = True ) for d in libdirs ] ) python_libs = [ realpath ( f ) for f in libfiles if 'lib' + pyname in basename ( f ) ] python_libs = ut . unique_ordered ( python_libs ) assert len ( python_libs ) == 1 , str ( python_libs ) return python_libs [ 0 ] | FIXME ; hacky way of finding python library . Not cross platform yet . | 214 | 16 |
9,018 | def get_dynlib_dependencies ( lib_path ) : if LINUX : ldd_fpath = '/usr/bin/ldd' depend_out , depend_err , ret = cmd ( ldd_fpath , lib_path , verbose = False ) elif DARWIN : otool_fpath = '/opt/local/bin/otool' depend_out , depend_err , ret = cmd ( otool_fpath , '-L' , lib_path , verbose = False ) elif WIN32 : depend_out , depend_err , ret = cmd ( 'objdump' , '-p' , lib_path , verbose = False ) #fnmatch.filter(depend_out.split('\n'), '*DLL*') relevant_lines = [ line for line in depend_out . splitlines ( ) if 'DLL Name:' in line ] depend_out = '\n' . join ( relevant_lines ) assert ret == 0 , 'bad dependency check' return depend_out | Executes tools for inspecting dynamic library dependencies depending on the current platform . | 225 | 14 |
9,019 | def startfile ( fpath , detatch = True , quote = False , verbose = False , quiet = True ) : print ( '[cplat] startfile(%r)' % fpath ) fpath = normpath ( fpath ) # print('[cplat] fpath=%s' % fpath) if not exists ( fpath ) : raise Exception ( 'Cannot start nonexistant file: %r' % fpath ) #if quote: # fpath = '"%s"' % (fpath,) if not WIN32 : fpath = pipes . quote ( fpath ) if LINUX : #out, err, ret = cmd(['xdg-open', fpath], detatch=True) outtup = cmd ( ( 'xdg-open' , fpath ) , detatch = detatch , verbose = verbose , quiet = quiet ) #outtup = cmd('xdg-open', fpath, detatch=detatch) elif DARWIN : outtup = cmd ( ( 'open' , fpath ) , detatch = detatch , verbose = verbose , quiet = quiet ) elif WIN32 : os . startfile ( fpath ) else : raise RuntimeError ( 'Unknown Platform' ) if outtup is not None : out , err , ret = outtup if not ret : raise Exception ( out + ' -- ' + err ) pass | Uses default program defined by the system to open a file . | 304 | 13 |
9,020 | def view_directory ( dname = None , fname = None , verbose = True ) : from utool . util_arg import STRICT from utool . util_path import checkpath # from utool.util_str import SINGLE_QUOTE, DOUBLE_QUOTE if HAVE_PATHLIB and isinstance ( dname , pathlib . Path ) : dname = str ( dname ) if verbose : print ( '[cplat] view_directory(%r) ' % dname ) dname = os . getcwd ( ) if dname is None else dname open_prog = { 'win32' : 'explorer.exe' , 'linux' : 'nautilus' , 'darwin' : 'open' } [ OS_TYPE ] dname = normpath ( dname ) if STRICT : assert checkpath ( dname , verbose = verbose ) , 'directory doesnt exit' if fname is not None and OS_TYPE == 'linux' : arg = join ( dname , fname ) else : arg = dname # if ' ' in dname and not dname.startswith((SINGLE_QUOTE, DOUBLE_QUOTE)): # # Ensure quotations # dname = '"%s"' % dname # if not WIN32: # arg = dname # # arg = subprocess.list2cmdline([dname]) # # arg = pipes.quote(dname) # else: # arg = dname # spawn and detatch process args = ( open_prog , arg ) print ( subprocess . list2cmdline ( args ) ) subprocess . Popen ( args ) | View a directory in the operating system file browser . Currently supports windows explorer mac open and linux nautlius . | 366 | 23 |
9,021 | def platform_cache_dir ( ) : if WIN32 : # nocover dpath_ = '~/AppData/Local' elif LINUX : # nocover dpath_ = '~/.cache' elif DARWIN : # nocover dpath_ = '~/Library/Caches' else : # nocover raise NotImplementedError ( 'Unknown Platform %r' % ( sys . platform , ) ) dpath = normpath ( expanduser ( dpath_ ) ) return dpath | Returns a directory which should be writable for any application This should be used for temporary deletable data . | 111 | 21 |
9,022 | def __parse_cmd_args ( args , sudo , shell ) : # Case where tuple is passed in as only argument if isinstance ( args , tuple ) and len ( args ) == 1 and isinstance ( args [ 0 ] , tuple ) : args = args [ 0 ] if shell : # When shell is True, ensure args is a string if isinstance ( args , six . string_types ) : pass elif isinstance ( args , ( list , tuple ) ) and len ( args ) > 1 : args = ' ' . join ( args ) elif isinstance ( args , ( list , tuple ) ) and len ( args ) == 1 : if isinstance ( args [ 0 ] , ( tuple , list ) ) : args = ' ' . join ( args ) elif isinstance ( args [ 0 ] , six . string_types ) : args = args [ 0 ] else : # When shell is False, ensure args is a tuple if isinstance ( args , six . string_types ) : args = shlex . split ( args , posix = not WIN32 ) elif isinstance ( args , ( list , tuple ) ) : if len ( args ) > 1 : args = tuple ( args ) elif len ( args ) == 1 : if isinstance ( args [ 0 ] , ( tuple , list ) ) : args = tuple ( args [ 0 ] ) elif isinstance ( args [ 0 ] , six . string_types ) : args = shlex . split ( args [ 0 ] , posix = not WIN32 ) if sudo is True : if not WIN32 : if shell : args = 'sudo ' + args else : args = tuple ( [ 'sudo' ] ) + tuple ( args ) #if isinstance(args, six.string_types): # args = shlex.split(args) #args = ['sudo'] + args ## using sudo means we need to use a single string I believe #args = ' '.join(args) else : # TODO: strip out sudos pass # HACK FOR WINDOWS AGAIN # makes this command work: # python -c "import utool as ut; ut.cmd('build\\hesaffexe.exe ' + ut.grab_test_imgpath('star.png'))" # and this should still work # python -c "import utool as ut; ut.cmd('build\\hesaffexe.exe', ut.grab_test_imgpath('star.png'))" if WIN32 : if len ( args ) == 1 and isinstance ( args [ 0 ] , six . string_types ) : args = shlex . split ( args [ 0 ] , posix = not WIN32 ) return args | When shell is True Popen will only accept strings . No tuples Shell really should not be true . | 572 | 21 |
9,023 | def cmd2 ( command , shell = False , detatch = False , verbose = False , verbout = None ) : import shlex if isinstance ( command , ( list , tuple ) ) : raise ValueError ( 'command tuple not supported yet' ) args = shlex . split ( command , posix = not WIN32 ) if verbose is True : verbose = 2 if verbout is None : verbout = verbose >= 1 if verbose >= 2 : print ( '+=== START CMD2 ===' ) print ( 'Command:' ) print ( command ) if verbout : print ( '----' ) print ( 'Stdout:' ) proc = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , shell = shell , universal_newlines = True ) if detatch : info = { 'proc' : proc } else : write_fn = sys . stdout . write flush_fn = sys . stdout . flush logged_out = [ ] for line in _run_process ( proc ) : #line_ = line if six.PY2 else line.decode('utf-8') line_ = line if six . PY2 else line if len ( line_ ) > 0 : if verbout : write_fn ( line_ ) flush_fn ( ) logged_out . append ( line ) try : from utool import util_str # NOQA # out = '\n'.join(logged_out) out = '' . join ( logged_out ) except UnicodeDecodeError : from utool import util_str # NOQA logged_out = util_str . ensure_unicode_strlist ( logged_out ) # out = '\n'.join(logged_out) out = '' . join ( logged_out ) # print('logged_out = %r' % (logged_out,)) # raise ( out_ , err ) = proc . communicate ( ) ret = proc . wait ( ) info = { 'out' : out , 'err' : err , 'ret' : ret , } if verbose >= 2 : print ( 'L___ END CMD2 ___' ) return info | Trying to clean up cmd | 478 | 6 |
9,024 | def search_env_paths ( fname , key_list = None , verbose = None ) : import utool as ut # from os.path import join if key_list is None : key_list = [ key for key in os . environ if key . find ( 'PATH' ) > - 1 ] print ( 'key_list = %r' % ( key_list , ) ) found = ut . ddict ( list ) for key in key_list : dpath_list = os . environ [ key ] . split ( os . pathsep ) for dpath in dpath_list : #if verbose: # print('dpath = %r' % (dpath,)) # testname = join(dpath, fname) matches = ut . glob ( dpath , fname ) found [ key ] . extend ( matches ) #import fnmatch #import utool #utool.embed() #if ut.checkpath(testname, verbose=False): # if verbose: # print('Found in key=%r' % (key,)) # ut.checkpath(testname, verbose=True, info=True) # found += [testname] return dict ( found ) | r Searches your PATH to see if fname exists | 264 | 11 |
9,025 | def change_term_title ( title ) : if True : # Disabled return if not WIN32 : #print("CHANGE TERM TITLE to %r" % (title,)) if title : #os.environ['PS1'] = os.environ['PS1'] + '''"\e]2;\"''' + title + '''\"\a"''' cmd_str = r'''echo -en "\033]0;''' + title + '''\a"''' os . system ( cmd_str ) | only works on unix systems only tested on Ubuntu GNOME changes text on terminal title for identifying debugging tasks . | 118 | 21 |
9,026 | def unload_module ( modname ) : import sys import gc if modname in sys . modules : referrer_list = gc . get_referrers ( sys . modules [ modname ] ) #module = sys.modules[modname] for referer in referrer_list : if referer is not sys . modules : referer [ modname ] = None #del referer[modname] #sys.modules[modname] = module #del module refcount = sys . getrefcount ( sys . modules [ modname ] ) print ( '%s refcount=%r' % ( modname , refcount ) ) del sys . modules [ modname ] | WARNING POTENTIALLY DANGEROUS AND MAY NOT WORK | 147 | 14 |
9,027 | def base_add_isoquant_data ( features , quantfeatures , acc_col , quantacc_col , quantfields ) : quant_map = get_quantmap ( quantfeatures , quantacc_col , quantfields ) for feature in features : feat_acc = feature [ acc_col ] outfeat = { k : v for k , v in feature . items ( ) } try : outfeat . update ( quant_map [ feat_acc ] ) except KeyError : outfeat . update ( { field : 'NA' for field in quantfields } ) yield outfeat | Generic function that takes a peptide or protein table and adds quant data from ANOTHER such table . | 123 | 20 |
9,028 | def get_quantmap ( features , acc_col , quantfields ) : qmap = { } for feature in features : feat_acc = feature . pop ( acc_col ) qmap [ feat_acc ] = { qf : feature [ qf ] for qf in quantfields } return qmap | Runs through proteins that are in a quanted protein table extracts and maps their information based on the quantfields list input . Map is a dict with protein_accessions as keys . | 65 | 37 |
9,029 | def partition_varied_cfg_list ( cfg_list , default_cfg = None , recursive = False ) : import utool as ut if default_cfg is None : nonvaried_cfg = reduce ( ut . dict_intersection , cfg_list ) else : nonvaried_cfg = reduce ( ut . dict_intersection , [ default_cfg ] + cfg_list ) nonvaried_keys = list ( nonvaried_cfg . keys ( ) ) varied_cfg_list = [ ut . delete_dict_keys ( cfg . copy ( ) , nonvaried_keys ) for cfg in cfg_list ] if recursive : # Find which varied keys have dict values varied_keys = list ( set ( [ key for cfg in varied_cfg_list for key in cfg ] ) ) varied_vals_list = [ [ cfg [ key ] for cfg in varied_cfg_list if key in cfg ] for key in varied_keys ] for key , varied_vals in zip ( varied_keys , varied_vals_list ) : if len ( varied_vals ) == len ( cfg_list ) : if all ( [ isinstance ( val , dict ) for val in varied_vals ] ) : nonvaried_subdict , varied_subdicts = partition_varied_cfg_list ( varied_vals , recursive = recursive ) nonvaried_cfg [ key ] = nonvaried_subdict for cfg , subdict in zip ( varied_cfg_list , varied_subdicts ) : cfg [ key ] = subdict return nonvaried_cfg , varied_cfg_list | r Separates varied from non - varied parameters in a list of configs | 357 | 16 |
9,030 | def get_cfg_lbl ( cfg , name = None , nonlbl_keys = INTERNAL_CFGKEYS , key_order = None , with_name = True , default_cfg = None , sep = '' ) : import utool as ut if name is None : name = cfg . get ( '_cfgname' , '' ) if default_cfg is not None : # Remove defaulted labels cfg = ut . partition_varied_cfg_list ( [ cfg ] , default_cfg ) [ 1 ] [ 0 ] # remove keys that should not belong to the label _clean_cfg = ut . delete_keys ( cfg . copy ( ) , nonlbl_keys ) _lbl = ut . repr4 ( _clean_cfg , explicit = True , nl = False , strvals = True , key_order = key_order , itemsep = sep ) # _search = ['dict(', ')', ' '] _search = [ 'dict(' , ')' ] _repl = [ '' ] * len ( _search ) _lbl = ut . multi_replace ( _lbl , _search , _repl ) . rstrip ( ',' ) if not with_name : return _lbl if NAMEVARSEP in name : # hack for when name contains a little bit of the _lbl # VERY HACKY TO PARSE OUT PARTS OF THE GIVEN NAME. hacked_name , _cfgstr , _ = parse_cfgstr_name_options ( name ) _cfgstr_options_list = re . split ( r',\s*' + ut . negative_lookahead ( r'[^\[\]]*\]' ) , _cfgstr ) #cfgstr_options_list = cfgopt_strs.split(',') _cfg_options = ut . parse_cfgstr_list ( _cfgstr_options_list , smartcast = False , oldmode = False ) # ut . delete_keys ( _cfg_options , cfg . keys ( ) ) _preflbl = ut . repr4 ( _cfg_options , explicit = True , nl = False , strvals = True ) _preflbl = ut . multi_replace ( _preflbl , _search , _repl ) . rstrip ( ',' ) hacked_name += NAMEVARSEP + _preflbl ### cfg_lbl = hacked_name + _lbl else : cfg_lbl = name + NAMEVARSEP + _lbl return cfg_lbl | r Formats a flat configuration dict into a short string label . This is useful for re - creating command line strings . | 556 | 24 |
9,031 | def grid_search_generator ( grid_basis = [ ] , * args , * * kwargs ) : grid_basis_ = grid_basis + list ( args ) + list ( kwargs . items ( ) ) grid_basis_dict = OrderedDict ( grid_basis_ ) grid_point_iter = util_dict . iter_all_dict_combinations_ordered ( grid_basis_dict ) for grid_point in grid_point_iter : yield grid_point | r Iteratively yeilds individual configuration points inside a defined basis . | 113 | 14 |
9,032 | def get_cfgdict_list_subset ( cfgdict_list , keys ) : import utool as ut cfgdict_sublist_ = [ ut . dict_subset ( cfgdict , keys ) for cfgdict in cfgdict_list ] cfgtups_sublist_ = [ tuple ( ut . dict_to_keyvals ( cfgdict ) ) for cfgdict in cfgdict_sublist_ ] cfgtups_sublist = ut . unique_ordered ( cfgtups_sublist_ ) cfgdict_sublist = list ( map ( dict , cfgtups_sublist ) ) return cfgdict_sublist | r returns list of unique dictionaries only with keys specified in keys | 145 | 13 |
9,033 | def constrain_cfgdict_list ( cfgdict_list_ , constraint_func ) : cfgdict_list = [ ] for cfg_ in cfgdict_list_ : cfg = cfg_ . copy ( ) if constraint_func ( cfg ) is not False and len ( cfg ) > 0 : if cfg not in cfgdict_list : cfgdict_list . append ( cfg ) return cfgdict_list | constrains configurations and removes duplicates | 99 | 8 |
9,034 | def make_cfglbls ( cfgdict_list , varied_dict ) : import textwrap wrapper = textwrap . TextWrapper ( width = 50 ) cfglbl_list = [ ] for cfgdict_ in cfgdict_list : cfgdict = cfgdict_ . copy ( ) for key in six . iterkeys ( cfgdict_ ) : try : vals = varied_dict [ key ] # Dont print label if not varied if len ( vals ) == 1 : del cfgdict [ key ] else : # Dont print label if it is None (irrelevant) if cfgdict [ key ] is None : del cfgdict [ key ] except KeyError : # Don't print keys not in varydict del cfgdict [ key ] cfglbl = six . text_type ( cfgdict ) search_repl_list = [ ( '\'' , '' ) , ( '}' , '' ) , ( '{' , '' ) , ( ': ' , '=' ) ] for search , repl in search_repl_list : cfglbl = cfglbl . replace ( search , repl ) #cfglbl = str(cfgdict).replace('\'', '').replace('}', '').replace('{', '').replace(': ', '=') cfglbl = ( '\n' . join ( wrapper . wrap ( cfglbl ) ) ) cfglbl_list . append ( cfglbl ) return cfglbl_list | Show only the text in labels that mater from the cfgdict | 322 | 13 |
9,035 | def gridsearch_timer ( func_list , args_list , niters = None , * * searchkw ) : import utool as ut timings = ut . ddict ( list ) if niters is None : niters = len ( args_list ) if ut . is_funclike ( args_list ) : get_args = args_list else : get_args = args_list . __getitem__ #func_labels = searchkw.get('func_labels', list(range(len(func_list)))) func_labels = searchkw . get ( 'func_labels' , [ ut . get_funcname ( func ) for func in func_list ] ) use_cache = searchkw . get ( 'use_cache' , not ut . get_argflag ( ( '--nocache' , '--nocache-time' ) ) ) assert_eq = searchkw . get ( 'assert_eq' , True ) count_list = list ( range ( niters ) ) xlabel_list = [ ] cache = ut . ShelfCacher ( 'timeings.shelf' , enabled = use_cache ) for count in ut . ProgressIter ( count_list , lbl = 'Testing Timeings' ) : args_ = get_args ( count ) xlabel_list . append ( args_ ) if True : # HACK # There is an unhandled corner case that will fail if the function expects a tuple. if not isinstance ( args_ , tuple ) : args_ = ( args_ , ) assert isinstance ( args_ , tuple ) , 'args_ should be a tuple so it can be unpacked' ret_list = [ ] for func_ in func_list : try : kwargs_ = { } func_cachekey = ut . get_func_result_cachekey ( func_ , args_ , kwargs_ ) ellapsed = cache . load ( func_cachekey ) except ut . CacheMissException : with ut . Timer ( verbose = False ) as t : ret = func_ ( * args_ ) ret_list . append ( ret ) ellapsed = t . ellapsed cache . save ( func_cachekey , ellapsed ) timings [ func_ ] . append ( ellapsed ) if assert_eq : # Hacky, not guarenteed to work if cache is one ut . assert_all_eq ( list ( map ( ut . cachestr_repr , ret_list ) ) ) cache . close ( ) count_to_xtick = searchkw . get ( 'count_to_xtick' , lambda x , y : x ) xtick_list = [ count_to_xtick ( count , get_args ( count ) ) for count in count_list ] def plot_timings ( ) : import plottool as pt ydata_list = ut . dict_take ( timings , func_list ) xdata = xtick_list ylabel = 'seconds' xlabel = 'input size' pt . multi_plot ( xdata , ydata_list , label_list = func_labels , ylabel = ylabel , xlabel = xlabel , * * searchkw ) time_result = { 'plot_timings' : plot_timings , 'timings' : timings , } return time_result | Times a series of functions on a series of inputs | 722 | 10 |
9,036 | def get_mapping ( version = 1 , exported_at = None , app_name = None ) : if exported_at is None : exported_at = timezone . now ( ) app_name = app_name or settings . HEROKU_CONNECT_APP_NAME return { 'version' : version , 'connection' : { 'organization_id' : settings . HEROKU_CONNECT_ORGANIZATION_ID , 'app_name' : app_name , 'exported_at' : exported_at . isoformat ( ) , } , 'mappings' : [ model . get_heroku_connect_mapping ( ) for model in get_heroku_connect_models ( ) ] } | Return Heroku Connect mapping for the entire project . | 158 | 10 |
9,037 | def get_heroku_connect_models ( ) : from django . apps import apps apps . check_models_ready ( ) from heroku_connect . db . models import HerokuConnectModel return ( model for models in apps . all_models . values ( ) for model in models . values ( ) if issubclass ( model , HerokuConnectModel ) and not model . _meta . managed ) | Return all registered Heroku Connect Models . | 86 | 8 |
9,038 | def create_heroku_connect_schema ( using = DEFAULT_DB_ALIAS ) : connection = connections [ using ] with connection . cursor ( ) as cursor : cursor . execute ( _SCHEMA_EXISTS_QUERY , [ settings . HEROKU_CONNECT_SCHEMA ] ) schema_exists = cursor . fetchone ( ) [ 0 ] if schema_exists : return False cursor . execute ( "CREATE SCHEMA %s;" , [ AsIs ( settings . HEROKU_CONNECT_SCHEMA ) ] ) with connection . schema_editor ( ) as editor : for model in get_heroku_connect_models ( ) : editor . create_model ( model ) # Needs PostgreSQL and database superuser privileges (which is the case on Heroku): editor . execute ( 'CREATE EXTENSION IF NOT EXISTS "hstore";' ) from heroku_connect . models import ( TriggerLog , TriggerLogArchive ) for cls in [ TriggerLog , TriggerLogArchive ] : editor . create_model ( cls ) return True | Create Heroku Connect schema . | 236 | 6 |
9,039 | def get_connections ( app ) : payload = { 'app' : app } url = os . path . join ( settings . HEROKU_CONNECT_API_ENDPOINT , 'connections' ) response = requests . get ( url , params = payload , headers = _get_authorization_headers ( ) ) response . raise_for_status ( ) return response . json ( ) [ 'results' ] | Return all Heroku Connect connections setup with the given application . | 91 | 12 |
9,040 | def get_connection ( connection_id , deep = False ) : url = os . path . join ( settings . HEROKU_CONNECT_API_ENDPOINT , 'connections' , connection_id ) payload = { 'deep' : deep } response = requests . get ( url , params = payload , headers = _get_authorization_headers ( ) ) response . raise_for_status ( ) return response . json ( ) | Get Heroku Connection connection information . | 95 | 7 |
9,041 | def import_mapping ( connection_id , mapping ) : url = os . path . join ( settings . HEROKU_CONNECT_API_ENDPOINT , 'connections' , connection_id , 'actions' , 'import' ) response = requests . post ( url = url , json = mapping , headers = _get_authorization_headers ( ) ) response . raise_for_status ( ) | Import Heroku Connection mapping for given connection . | 89 | 9 |
9,042 | def link_connection_to_account ( app ) : url = os . path . join ( settings . HEROKU_CONNECT_API_ENDPOINT , 'users' , 'me' , 'apps' , app , 'auth' ) response = requests . post ( url = url , headers = _get_authorization_headers ( ) ) response . raise_for_status ( ) | Link the connection to your Heroku user account . | 85 | 10 |
9,043 | def fetch_cvparams_values_from_subel ( base , subelname , paramnames , ns ) : sub_el = basereader . find_element_xpath ( base , subelname , ns ) cvparams = get_all_cvparams ( sub_el , ns ) output = [ ] for param in paramnames : output . append ( fetch_cvparam_value_by_name ( cvparams , param ) ) return output | Searches a base element for subelement by name then takes the cvParams of that subelement and returns the values as a list for the paramnames that match . Value order in list equals input paramnames order . | 99 | 46 |
9,044 | def create_tables ( self , tables ) : cursor = self . get_cursor ( ) for table in tables : columns = mslookup_tables [ table ] try : cursor . execute ( 'CREATE TABLE {0}({1})' . format ( table , ', ' . join ( columns ) ) ) except sqlite3 . OperationalError as error : print ( error ) print ( 'Warning: Table {} already exists in database, will ' 'add to existing tables instead of creating ' 'new.' . format ( table ) ) else : self . conn . commit ( ) | Creates database tables in sqlite lookup db | 124 | 9 |
9,045 | def connect ( self , fn ) : self . conn = sqlite3 . connect ( fn ) cur = self . get_cursor ( ) cur . execute ( 'PRAGMA page_size=4096' ) cur . execute ( 'PRAGMA FOREIGN_KEYS=ON' ) cur . execute ( 'PRAGMA cache_size=10000' ) cur . execute ( 'PRAGMA journal_mode=MEMORY' ) | SQLite connect method initialize db | 96 | 6 |
9,046 | def index_column ( self , index_name , table , column ) : cursor = self . get_cursor ( ) try : cursor . execute ( 'CREATE INDEX {0} on {1}({2})' . format ( index_name , table , column ) ) except sqlite3 . OperationalError as error : print ( error ) print ( 'Skipping index creation and assuming it exists already' ) else : self . conn . commit ( ) | Called by interfaces to index specific column in table | 99 | 10 |
9,047 | def get_sql_select ( self , columns , table , distinct = False ) : sql = 'SELECT {0} {1} FROM {2}' dist = { True : 'DISTINCT' , False : '' } [ distinct ] return sql . format ( dist , ', ' . join ( columns ) , table ) | Creates and returns an SQL SELECT statement | 69 | 8 |
9,048 | def store_many ( self , sql , values ) : cursor = self . get_cursor ( ) cursor . executemany ( sql , values ) self . conn . commit ( ) | Abstraction over executemany method | 39 | 8 |
9,049 | def execute_sql ( self , sql ) : cursor = self . get_cursor ( ) cursor . execute ( sql ) return cursor | Executes SQL and returns cursor for it | 28 | 8 |
9,050 | def get_mzmlfile_map ( self ) : cursor = self . get_cursor ( ) cursor . execute ( 'SELECT mzmlfile_id, mzmlfilename FROM mzmlfiles' ) return { fn : fnid for fnid , fn in cursor . fetchall ( ) } | Returns dict of mzmlfilenames and their db ids | 71 | 15 |
9,051 | def get_spectra_id ( self , fn_id , retention_time = None , scan_nr = None ) : cursor = self . get_cursor ( ) sql = 'SELECT spectra_id FROM mzml WHERE mzmlfile_id=? ' values = [ fn_id ] if retention_time is not None : sql = '{0} AND retention_time=?' . format ( sql ) values . append ( retention_time ) if scan_nr is not None : sql = '{0} AND scan_nr=?' . format ( sql ) values . append ( scan_nr ) cursor . execute ( sql , tuple ( values ) ) return cursor . fetchone ( ) [ 0 ] | Returns spectra id for spectra filename and retention time | 154 | 11 |
9,052 | def to_string_monkey ( df , highlight_cols = None , latex = False ) : try : import pandas as pd import utool as ut import numpy as np import six if isinstance ( highlight_cols , six . string_types ) and highlight_cols == 'all' : highlight_cols = np . arange ( len ( df . columns ) ) # kwds = dict(buf=None, columns=None, col_space=None, header=True, # index=True, na_rep='NaN', formatters=None, # float_format=None, sparsify=None, index_names=True, # justify=None, line_width=None, max_rows=None, # max_cols=None, show_dimensions=False) # self = pd.formats.format.DataFrameFormatter(df, **kwds) try : self = pd . formats . format . DataFrameFormatter ( df ) except AttributeError : self = pd . io . formats . format . DataFrameFormatter ( df ) self . highlight_cols = highlight_cols def monkey ( self ) : return monkey_to_str_columns ( self , latex = latex ) ut . inject_func_as_method ( self , monkey , '_to_str_columns' , override = True , force = True ) def strip_ansi ( text ) : import re ansi_escape = re . compile ( r'\x1b[^m]*m' ) return ansi_escape . sub ( '' , text ) def justify_ansi ( self , texts , max_len , mode = 'right' ) : if mode == 'left' : return [ x . ljust ( max_len + ( len ( x ) - len ( strip_ansi ( x ) ) ) ) for x in texts ] elif mode == 'center' : return [ x . center ( max_len + ( len ( x ) - len ( strip_ansi ( x ) ) ) ) for x in texts ] else : return [ x . rjust ( max_len + ( len ( x ) - len ( strip_ansi ( x ) ) ) ) for x in texts ] ut . inject_func_as_method ( self . adj , justify_ansi , 'justify' , override = True , force = True ) def strlen_ansii ( self , text ) : return pd . compat . strlen ( strip_ansi ( text ) , encoding = self . encoding ) ut . inject_func_as_method ( self . adj , strlen_ansii , 'len' , override = True , force = True ) if False : strlen = ut . partial ( strlen_ansii , self . adj ) # NOQA justfunc = ut . partial ( justify_ansi , self . adj ) # NOQA # Essentially what to_string does strcols = monkey_to_str_columns ( self ) # texts = strcols[2] space = 1 lists = strcols str_ = self . adj . adjoin ( space , * lists ) print ( str_ ) print ( strip_ansi ( str_ ) ) self . to_string ( ) result = self . buf . getvalue ( ) # hack because adjoin is not working correctly with injected strlen result = '\n' . join ( [ x . rstrip ( ) for x in result . split ( '\n' ) ] ) return result except Exception as ex : ut . printex ( 'pandas monkey-patch is broken: {}' . format ( str ( ex ) ) , tb = True , iswarning = True ) return str ( df ) | monkey patch to pandas to highlight the maximum value in specified cols of a row | 810 | 17 |
9,053 | def translate ( value ) : if isinstance ( value , BaseValidator ) : return value if value is None : return Anything ( ) if isinstance ( value , type ) : return IsA ( value ) if type ( value ) in compat . func_types : real_value = value ( ) return IsA ( type ( real_value ) , default = real_value ) if isinstance ( value , list ) : if value == [ ] : # no inner spec, just an empty list as the default value return IsA ( list ) elif len ( value ) == 1 : # the only item as spec for each item of the collection return ListOf ( translate ( value [ 0 ] ) ) else : raise StructureSpecificationError ( 'Expected a list containing exactly 1 item; ' 'got {cnt}: {spec}' . format ( cnt = len ( value ) , spec = value ) ) if isinstance ( value , dict ) : if not value : return IsA ( dict ) items = [ ] for k , v in value . items ( ) : if isinstance ( k , BaseValidator ) : k_validator = k else : k_validator = translate ( k ) default = k_validator . get_default_for ( None ) if default is not None : k_validator = Equals ( default ) v_validator = translate ( v ) items . append ( ( k_validator , v_validator ) ) return DictOf ( items ) return IsA ( type ( value ) , default = value ) | Translates given schema from pythonic syntax to a validator . | 329 | 14 |
9,054 | def _merge ( self , value ) : if value is not None and not isinstance ( value , dict ) : # bogus value; will not pass validation but should be preserved return value if not self . _pairs : return { } collected = { } # collected.update(value) for k_validator , v_validator in self . _pairs : k_default = k_validator . get_default_for ( None ) if k_default is None : continue # even None is ok if value : v_for_this_k = value . get ( k_default ) else : v_for_this_k = None v_default = v_validator . get_default_for ( v_for_this_k ) collected . update ( { k_default : v_default } ) if value : for k , v in value . items ( ) : if k not in collected : collected [ k ] = v return collected | Returns a dictionary based on value with each value recursively merged with spec . | 203 | 16 |
9,055 | def handle_code ( code ) : code_keys = [ ] # it is a known code (e.g. {DOWN}, {ENTER}, etc) if code in CODES : code_keys . append ( VirtualKeyAction ( CODES [ code ] ) ) # it is an escaped modifier e.g. {%}, {^}, {+} elif len ( code ) == 1 : code_keys . append ( KeyAction ( code ) ) # it is a repetition or a pause {DOWN 5}, {PAUSE 1.3} elif ' ' in code : to_repeat , count = code . rsplit ( None , 1 ) if to_repeat == "PAUSE" : try : pause_time = float ( count ) except ValueError : raise KeySequenceError ( 'invalid pause time %s' % count ) code_keys . append ( PauseAction ( pause_time ) ) else : try : count = int ( count ) except ValueError : raise KeySequenceError ( 'invalid repetition count %s' % count ) # If the value in to_repeat is a VK e.g. DOWN # we need to add the code repeated if to_repeat in CODES : code_keys . extend ( [ VirtualKeyAction ( CODES [ to_repeat ] ) ] * count ) # otherwise parse the keys and we get back a KeyAction else : to_repeat = parse_keys ( to_repeat ) if isinstance ( to_repeat , list ) : keys = to_repeat * count else : keys = [ to_repeat ] * count code_keys . extend ( keys ) else : raise RuntimeError ( "Unknown code: %s" % code ) return code_keys | Handle a key or sequence of keys in braces | 367 | 9 |
9,056 | def parse_keys ( string , with_spaces = False , with_tabs = False , with_newlines = False , modifiers = None ) : keys = [ ] if not modifiers : modifiers = [ ] index = 0 while index < len ( string ) : c = string [ index ] index += 1 # check if one of CTRL, SHIFT, ALT has been pressed if c in MODIFIERS . keys ( ) : modifier = MODIFIERS [ c ] # remember that we are currently modified modifiers . append ( modifier ) # hold down the modifier key keys . append ( VirtualKeyAction ( modifier , up = False ) ) if DEBUG : print ( "MODS+" , modifiers ) continue # Apply modifiers over a bunch of characters (not just one!) elif c == "(" : # find the end of the bracketed text end_pos = string . find ( ")" , index ) if end_pos == - 1 : raise KeySequenceError ( '`)` not found' ) keys . extend ( parse_keys ( string [ index : end_pos ] , modifiers = modifiers ) ) index = end_pos + 1 # Escape or named key elif c == "{" : # We start searching from index + 1 to account for the case {}} end_pos = string . find ( "}" , index + 1 ) if end_pos == - 1 : raise KeySequenceError ( '`}` not found' ) code = string [ index : end_pos ] index = end_pos + 1 keys . extend ( handle_code ( code ) ) # unmatched ")" elif c == ')' : raise KeySequenceError ( '`)` should be preceeded by `(`' ) # unmatched "}" elif c == '}' : raise KeySequenceError ( '`}` should be preceeded by `{`' ) # so it is a normal character else : # don't output white space unless flags to output have been set if ( c == ' ' and not with_spaces or c == '\t' and not with_tabs or c == '\n' and not with_newlines ) : continue # output nuewline if c in ( '~' , '\n' ) : keys . append ( VirtualKeyAction ( CODES [ "ENTER" ] ) ) # safest are the virtual keys - so if our key is a virtual key # use a VirtualKeyAction #if ord(c) in CODE_NAMES: # keys.append(VirtualKeyAction(ord(c))) elif modifiers : keys . append ( EscapedKeyAction ( c ) ) else : keys . append ( KeyAction ( c ) ) # as we have handled the text - release the modifiers while modifiers : if DEBUG : print ( "MODS-" , modifiers ) keys . append ( VirtualKeyAction ( modifiers . pop ( ) , down = False ) ) # just in case there were any modifiers left pressed - release them while modifiers : keys . append ( VirtualKeyAction ( modifiers . pop ( ) , down = False ) ) return keys | Return the parsed keys | 656 | 4 |
9,057 | def SendKeys ( keys , pause = 0.05 , with_spaces = False , with_tabs = False , with_newlines = False , turn_off_numlock = True ) : keys = parse_keys ( keys , with_spaces , with_tabs , with_newlines ) for k in keys : k . Run ( ) time . sleep ( pause ) | Parse the keys and type them | 82 | 7 |
9,058 | def main ( ) : actions = """ {LWIN} {PAUSE .25} r {PAUSE .25} Notepad.exe{ENTER} {PAUSE 1} Hello{SPACE}World! {PAUSE 1} %{F4} {PAUSE .25} n """ SendKeys ( actions , pause = .1 ) keys = parse_keys ( actions ) for k in keys : print ( k ) k . Run ( ) time . sleep ( .1 ) test_strings = [ "\n" "(aa)some text\n" , "(a)some{ }text\n" , "(b)some{{}text\n" , "(c)some{+}text\n" , "(d)so%me{ab 4}text" , "(e)so%me{LEFT 4}text" , "(f)so%me{ENTER 4}text" , "(g)so%me{^aa 4}text" , "(h)some +(asdf)text" , "(i)some %^+(asdf)text" , "(j)some %^+a text+" , "(k)some %^+a tex+{&}" , "(l)some %^+a tex+(dsf)" , "" , ] for s in test_strings : print ( repr ( s ) ) keys = parse_keys ( s , with_newlines = True ) print ( keys ) for k in keys : k . Run ( ) time . sleep ( .1 ) print ( ) | Send some test strings | 329 | 4 |
9,059 | def GetInput ( self ) : actions = 1 # if both up and down if self . up and self . down : actions = 2 inputs = ( INPUT * actions ) ( ) vk , scan , flags = self . _get_key_info ( ) for inp in inputs : inp . type = INPUT_KEYBOARD inp . _ . ki . wVk = vk inp . _ . ki . wScan = scan inp . _ . ki . dwFlags |= flags # if we are releasing - then let it up if self . up : inputs [ - 1 ] . _ . ki . dwFlags |= KEYEVENTF_KEYUP return inputs | Build the INPUT structure for the action | 146 | 8 |
9,060 | def Run ( self ) : inputs = self . GetInput ( ) return SendInput ( len ( inputs ) , ctypes . byref ( inputs ) , ctypes . sizeof ( INPUT ) ) | Execute the action | 41 | 4 |
9,061 | def _get_down_up_string ( self ) : down_up = "" if not ( self . down and self . up ) : if self . down : down_up = "down" elif self . up : down_up = "up" return down_up | Return a string that will show whether the string is up or down | 59 | 13 |
9,062 | def key_description ( self ) : vk , scan , flags = self . _get_key_info ( ) desc = '' if vk : if vk in CODE_NAMES : desc = CODE_NAMES [ vk ] else : desc = "VK %d" % vk else : desc = "%s" % self . key return desc | Return a description of the key | 76 | 6 |
9,063 | def _get_key_info ( self ) : # copied more or less verbatim from # http://www.pinvoke.net/default.aspx/user32.sendinput if ( ( self . key >= 33 and self . key <= 46 ) or ( self . key >= 91 and self . key <= 93 ) ) : flags = KEYEVENTF_EXTENDEDKEY else : flags = 0 # This works for %{F4} - ALT + F4 #return self.key, 0, 0 # this works for Tic Tac Toe i.e. +{RIGHT} SHIFT + RIGHT return self . key , MapVirtualKey ( self . key , 0 ) , flags | Virtual keys have extended flag set | 149 | 6 |
9,064 | def _get_key_info ( self ) : vkey_scan = LoByte ( VkKeyScan ( self . key ) ) return ( vkey_scan , MapVirtualKey ( vkey_scan , 0 ) , 0 ) | EscapedKeyAction doesn t send it as Unicode and the vk and scan code are generated differently | 50 | 20 |
9,065 | def setup ( self ) : self . template = self . _generate_inline_policy ( ) if self . dry_run is not True : self . client = self . _get_client ( ) username = self . _get_username_for_key ( ) policy_document = self . _generate_inline_policy ( ) self . _attach_inline_policy ( username , policy_document ) pass | Method runs the plugin attaching policies to the user in question | 88 | 11 |
9,066 | def _get_policies ( self ) : username = self . _get_username_for_key ( ) policies = self . client . list_user_policies ( UserName = username ) return policies | Returns all the policy names for a given user | 46 | 9 |
9,067 | def _get_username_for_key ( self ) : response = self . client . get_access_key_last_used ( AccessKeyId = self . compromised_resource [ 'access_key_id' ] ) username = response [ 'UserName' ] return username | Find the user for a given access key | 59 | 8 |
9,068 | def _generate_inline_policy ( self ) : template_name = self . _locate_file ( 'deny-sts-before-time.json.j2' ) template_file = open ( template_name ) template_contents = template_file . read ( ) template_file . close ( ) jinja_template = Template ( template_contents ) policy_document = jinja_template . render ( before_date = self . _get_date ( ) ) return policy_document | Renders a policy from a jinja template | 111 | 10 |
9,069 | def _attach_inline_policy ( self , username , policy_document ) : response = self . client . put_user_policy ( UserName = username , PolicyName = "threatresponse-temporal-key-revocation" , PolicyDocument = policy_document ) logger . info ( 'An inline policy has been attached for' ' {u} revoking sts tokens.' . format ( u = username ) ) return response | Attaches the policy to the user | 89 | 7 |
9,070 | def _locate_file ( self , pattern , root = os . path . dirname ( 'revokests_key.py' ) ) : for path , dirs , files in os . walk ( os . path . abspath ( root ) ) : for filename in fnmatch . filter ( files , pattern ) : return os . path . join ( path , filename ) | Locate all files matching supplied filename pattern in and below | 79 | 11 |
9,071 | def generate_tsv_pep_protein_quants ( fns ) : for fn in fns : header = get_tsv_header ( fn ) for pquant in generate_split_tsv_lines ( fn , header ) : yield os . path . basename ( fn ) , header , pquant | Unlike generate_tsv_lines_multifile this generates tsv lines from multiple files that may have different headers . Yields fn header as well as quant data for each protein quant | 68 | 39 |
9,072 | def mzmlfn_kronikfeature_generator ( mzmlfns , kronikfns ) : for mzmlfn , kronikfn in zip ( mzmlfns , kronikfns ) : for quant_el in generate_kronik_feats ( kronikfn ) : yield os . path . basename ( mzmlfn ) , quant_el | Generates tuples of spectra filename and corresponding output features from kronik | 93 | 16 |
9,073 | def generate_split_tsv_lines ( fn , header ) : for line in generate_tsv_psms_line ( fn ) : yield { x : y for ( x , y ) in zip ( header , line . strip ( ) . split ( '\t' ) ) } | Returns dicts with header - keys and psm statistic values | 62 | 12 |
9,074 | def get_proteins_from_psm ( line ) : proteins = line [ mzidtsvdata . HEADER_PROTEIN ] . split ( ';' ) outproteins = [ ] for protein in proteins : prepost_protein = re . sub ( '\(pre=.*post=.*\)' , '' , protein ) . strip ( ) outproteins . append ( prepost_protein ) return outproteins | From a line return list of proteins reported by Mzid2TSV . When unrolled lines are given this returns the single protein from the line . | 98 | 31 |
9,075 | def aug_sysargv ( cmdstr ) : import shlex argv = shlex . split ( cmdstr ) sys . argv . extend ( argv ) | DEBUG FUNC modify argv to look like you ran a command | 35 | 13 |
9,076 | def get_module_verbosity_flags ( * labels ) : verbose_prefix_list = [ '--verbose-' , '--verb' , '--verb-' ] veryverbose_prefix_list = [ '--veryverbose-' , '--veryverb' , '--veryverb-' ] verbose_flags = tuple ( [ prefix + lbl for prefix , lbl in itertools . product ( verbose_prefix_list , labels ) ] ) veryverbose_flags = tuple ( [ prefix + lbl for prefix , lbl in itertools . product ( veryverbose_prefix_list , labels ) ] ) veryverbose_module = get_argflag ( veryverbose_flags ) or VERYVERBOSE verbose_module = ( get_argflag ( verbose_flags ) or veryverbose_module or VERBOSE ) if veryverbose_module : verbose_module = 2 return verbose_module , veryverbose_module | checks for standard flags for enableing module specific verbosity | 211 | 11 |
9,077 | def get_argflag ( argstr_ , default = False , help_ = '' , return_specified = None , need_prefix = True , return_was_specified = False , argv = None , debug = None , * * kwargs ) : if argv is None : argv = sys . argv assert isinstance ( default , bool ) , 'default must be boolean' argstr_list = meta_util_iter . ensure_iterable ( argstr_ ) #if VERYVERBOSE: # print('[util_arg] checking argstr_list=%r' % (argstr_list,)) # arg registration _register_arg ( argstr_list , bool , default , help_ ) parsed_val = default was_specified = False if debug is None : debug = DEBUG # Check environment variables for default as well as argv import os #""" #set UTOOL_NOCNN=True #export UTOOL_NOCNN True #""" #argv_orig = argv[:] # HACK: make this not happen very time you loop for key , val in os . environ . items ( ) : key = key . upper ( ) sentinal = 'UTOOL_' if key . startswith ( sentinal ) : flag = '--' + key [ len ( sentinal ) : ] . lower ( ) . replace ( '_' , '-' ) if val . upper ( ) in [ 'TRUE' , 'ON' ] : pass elif val . upper ( ) in [ 'FALSE' , 'OFF' ] : continue else : continue #flag += '=False' new_argv = [ flag ] argv = argv [ : ] + new_argv if debug : print ( 'ENV SPECIFIED COMMAND LINE' ) print ( 'argv.extend(new_argv=%r)' % ( new_argv , ) ) for argstr in argstr_list : #if VERYVERBOSE: # print('[util_arg] * checking argstr=%r' % (argstr,)) if not ( argstr . find ( '--' ) == 0 or ( argstr . find ( '-' ) == 0 and len ( argstr ) == 2 ) ) : raise AssertionError ( 'Invalid argstr: %r' % ( argstr , ) ) if not need_prefix : noprefix = argstr . replace ( '--' , '' ) if noprefix in argv : parsed_val = True was_specified = True break #if argstr.find('--no') == 0: #argstr = argstr.replace('--no', '--') noarg = argstr . replace ( '--' , '--no' ) if argstr in argv : parsed_val = True was_specified = True #if VERYVERBOSE: # print('[util_arg] * ...WAS_SPECIFIED. AND PARSED') break elif noarg in argv : parsed_val = False was_specified = True #if VERYVERBOSE: # print('[util_arg] * ...WAS_SPECIFIED. AND NOT PARSED') break elif argstr + '=True' in argv : parsed_val = True was_specified = True break elif argstr + '=False' in argv : parsed_val = False was_specified = True break if return_specified is None : return_specified = return_was_specified if return_specified : return parsed_val , was_specified else : return parsed_val | Checks if the commandline has a flag or a corresponding noflag | 773 | 14 |
9,078 | def get_arg_dict ( argv = None , prefix_list = [ '--' ] , type_hints = { } ) : if argv is None : argv = sys . argv arg_dict = { } def startswith_prefix ( arg ) : return any ( [ arg . startswith ( prefix ) for prefix in prefix_list ] ) def argx_has_value ( argv , argx ) : # Check if has a value if argv [ argx ] . find ( '=' ) > - 1 : return True if argx + 1 < len ( argv ) and not startswith_prefix ( argv [ argx + 1 ] ) : return True return False def get_arg_value ( argv , argx , argname ) : if argv [ argx ] . find ( '=' ) > - 1 : return '=' . join ( argv [ argx ] . split ( '=' ) [ 1 : ] ) else : type_ = type_hints . get ( argname , None ) if type_ is None : return argv [ argx + 1 ] else : return parse_arglist_hack ( argx , argv = argv ) for argx in range ( len ( argv ) ) : arg = argv [ argx ] for prefix in prefix_list : if arg . startswith ( prefix ) : argname = arg [ len ( prefix ) : ] if argx_has_value ( argv , argx ) : if arg . find ( '=' ) > - 1 : argname = arg [ len ( prefix ) : arg . find ( '=' ) ] argvalue = get_arg_value ( argv , argx , argname ) arg_dict [ argname ] = argvalue else : arg_dict [ argname ] = True break return arg_dict | r Yet another way for parsing args | 399 | 7 |
9,079 | def argv_flag_dec ( * argin , * * kwargs ) : kwargs = kwargs . copy ( ) kwargs [ 'default' ] = kwargs . get ( 'default' , False ) from utool import util_decor @ util_decor . ignores_exc_tb ( outer_wrapper = False ) def wrap_argv_flag_dec ( func ) : return __argv_flag_dec ( func , * * kwargs ) assert len ( argin ) < 2 , 'specify 0 or 1 args' if len ( argin ) == 1 and util_type . is_funclike ( argin [ 0 ] ) : func = argin [ 0 ] return wrap_argv_flag_dec ( func ) else : return wrap_argv_flag_dec | Decorators which control program flow based on sys . argv the decorated function does not execute without its corresponding flag | 181 | 23 |
9,080 | def __argv_flag_dec ( func , default = False , quiet = QUIET , indent = False ) : from utool import util_decor flagname = meta_util_six . get_funcname ( func ) if flagname . find ( 'no' ) == 0 : flagname = flagname [ 2 : ] flags = ( '--' + flagname . replace ( '_' , '-' ) , '--' + flagname , ) @ util_decor . ignores_exc_tb ( outer_wrapper = False ) def GaurdWrapper ( * args , * * kwargs ) : from utool import util_print # FIXME: the --print-all is a hack default_ = kwargs . pop ( 'default' , default ) alias_flags = kwargs . pop ( 'alias_flags' , [ ] ) is_flagged = ( get_argflag ( flags , default_ ) or get_argflag ( '--print-all' ) or any ( [ get_argflag ( _ ) for _ in alias_flags ] ) ) if flagname in kwargs : is_flagged = kwargs . pop ( flagname ) if is_flagged : func_label = flags [ 0 ] . replace ( '--' , '' ) . replace ( 'print-' , '' ) # print('') print ( '\n+ --- ' + func_label + ' ___' ) use_indent = indent is not False if indent is True : indent_ = '[%s]' % func_label else : indent_ = indent with util_print . Indenter ( indent_ , enabled = use_indent ) : ret = func ( * args , * * kwargs ) print ( 'L ___ ' + func_label + '___\n' ) return ret else : PRINT_DISABLED_FLAGDEC = not get_argflag ( '--noinform' , help_ = 'does not print disabled flag decorators' ) if not quiet and PRINT_DISABLED_FLAGDEC : #print('\n~~~ %s ~~~' % flag) print ( '~~~ %s ~~~' % flags [ 0 ] ) meta_util_six . set_funcname ( GaurdWrapper , meta_util_six . get_funcname ( func ) ) return GaurdWrapper | Logic for controlling if a function gets called based on command line | 515 | 13 |
9,081 | def get_argv_tail ( scriptname , prefer_main = None , argv = None ) : if argv is None : argv = sys . argv import utool as ut modname = ut . get_argval ( '-m' , help_ = 'specify module name to profile' , argv = argv ) if modname is not None : # hack to account for -m scripts modpath = ut . get_modpath ( modname , prefer_main = prefer_main ) argvx = argv . index ( modname ) + 1 argv_tail = [ modpath ] + argv [ argvx : ] else : try : argvx = argv . index ( scriptname ) except ValueError : for argvx , arg in enumerate ( argv ) : # HACK if scriptname in arg : break argv_tail = argv [ ( argvx + 1 ) : ] return argv_tail | r gets the rest of the arguments after a script has been invoked hack . accounts for python - m scripts . | 208 | 22 |
9,082 | def get_cmdline_varargs ( argv = None ) : if argv is None : argv = sys . argv scriptname = argv [ 0 ] if scriptname == '' : # python invoked by iteself pos_start = 0 pos_end = 0 else : pos_start = pos_end = 1 for idx in range ( pos_start , len ( argv ) ) : if argv [ idx ] . startswith ( '-' ) : pos_end = idx break else : pos_end = len ( argv ) cmdline_varargs = argv [ pos_start : pos_end ] return cmdline_varargs | Returns positional args specified directly after the scriptname and before any args starting with - on the commandline . | 144 | 21 |
9,083 | def argval ( key , default = None , type = None , smartcast = True , return_exists = False , argv = None ) : defaultable_types = ( tuple , list , int , float ) if type is None and isinstance ( default , defaultable_types ) : type = builtins . type ( default ) return get_argval ( key , type_ = type , default = default , return_was_specified = return_exists , smartcast = smartcast , argv = argv ) | alias for get_argval | 111 | 6 |
9,084 | def plot_real_feature ( df , feature_name , bins = 50 , figsize = ( 15 , 15 ) ) : ix_negative_target = df [ df . target == 0 ] . index ix_positive_target = df [ df . target == 1 ] . index plt . figure ( figsize = figsize ) ax_overall_dist = plt . subplot2grid ( ( 3 , 2 ) , ( 0 , 0 ) , colspan = 2 ) ax_target_conditional_dist = plt . subplot2grid ( ( 3 , 2 ) , ( 1 , 0 ) , colspan = 2 ) ax_botplot = plt . subplot2grid ( ( 3 , 2 ) , ( 2 , 0 ) ) ax_violin_plot = plt . subplot2grid ( ( 3 , 2 ) , ( 2 , 1 ) ) ax_overall_dist . set_title ( 'Distribution of {}' . format ( feature_name ) , fontsize = 16 ) sns . distplot ( df [ feature_name ] , bins = 50 , ax = ax_overall_dist ) sns . distplot ( df . loc [ ix_positive_target ] [ feature_name ] , bins = bins , ax = ax_target_conditional_dist , label = 'Positive Target' ) sns . distplot ( df . loc [ ix_negative_target ] [ feature_name ] , bins = bins , ax = ax_target_conditional_dist , label = 'Negative Target' ) ax_target_conditional_dist . legend ( loc = 'upper right' , prop = { 'size' : 14 } ) sns . boxplot ( y = feature_name , x = 'target' , data = df , ax = ax_botplot ) sns . violinplot ( y = feature_name , x = 'target' , data = df , ax = ax_violin_plot ) plt . show ( ) | Plot the distribution of a real - valued feature conditioned by the target . | 432 | 14 |
9,085 | def plot_pair ( df , feature_name_1 , feature_name_2 , kind = 'scatter' , alpha = 0.01 , * * kwargs ) : plt . figure ( ) sns . jointplot ( feature_name_1 , feature_name_2 , df , alpha = alpha , kind = kind , * * kwargs ) plt . show ( ) | Plot a scatterplot of two features against one another and calculate Pearson correlation coefficient . | 85 | 16 |
9,086 | def plot_feature_correlation_heatmap ( df , features , font_size = 9 , figsize = ( 15 , 15 ) , save_filename = None ) : features = features [ : ] features += [ 'target' ] mcorr = df [ features ] . corr ( ) mask = np . zeros_like ( mcorr , dtype = np . bool ) mask [ np . triu_indices_from ( mask ) ] = True cmap = sns . diverging_palette ( 220 , 10 , as_cmap = True ) fig = plt . figure ( figsize = figsize ) heatmap = sns . heatmap ( mcorr , mask = mask , cmap = cmap , square = True , annot = True , fmt = '0.2f' , annot_kws = { 'size' : font_size } , ) heatmap . tick_params ( axis = 'both' , which = 'major' , labelsize = font_size ) heatmap . tick_params ( axis = 'both' , which = 'minor' , labelsize = font_size ) heatmap . set_xticklabels ( features , rotation = 90 ) heatmap . set_yticklabels ( reversed ( features ) ) plt . show ( ) if save_filename is not None : fig . savefig ( save_filename , dpi = 300 ) | Plot a correlation heatmap between every feature pair . | 304 | 10 |
9,087 | def scatterplot_matrix ( df , features , downsample_frac = None , figsize = ( 15 , 15 ) ) : if downsample_frac : df = df . sample ( frac = downsample_frac ) plt . figure ( figsize = figsize ) sns . pairplot ( df [ features ] , hue = 'target' ) plt . show ( ) | Plot a scatterplot matrix for a list of features colored by target value . | 82 | 15 |
9,088 | def process_nested_tags ( self , node , tag = '' ) : ##print("---------Processing: %s, %s"%(node.tag,tag)) if tag == '' : t = node . ltag else : t = tag . lower ( ) for child in node . children : self . xml_node_stack = [ child ] + self . xml_node_stack ctagl = child . ltag if ctagl in self . tag_parse_table and ctagl in self . valid_children [ t ] : #print("Processing known type: %s"%ctagl) self . tag_parse_table [ ctagl ] ( child ) else : #print("Processing unknown type: %s"%ctagl) self . parse_component_by_typename ( child , child . tag ) self . xml_node_stack = self . xml_node_stack [ 1 : ] | Process child tags . | 204 | 4 |
9,089 | def parse ( self , xmltext ) : xml = LEMSXMLNode ( xe . XML ( xmltext ) ) if xml . ltag != 'lems' and xml . ltag != 'neuroml' : raise ParseError ( '<Lems> expected as root element (or even <neuroml>), found: {0}' . format ( xml . ltag ) ) ''' if xml.ltag == 'lems': if 'description' in xml.lattrib: self.description = xml.lattrib['description'] ''' self . process_nested_tags ( xml ) | Parse a string containing LEMS XML text . | 134 | 10 |
9,090 | def raise_error ( self , message , * params , * * key_params ) : s = 'Parser error in ' self . xml_node_stack . reverse ( ) if len ( self . xml_node_stack ) > 1 : node = self . xml_node_stack [ 0 ] s += '<{0}' . format ( node . tag ) if 'name' in node . lattrib : s += ' name=\"{0}\"' . format ( node . lattrib [ 'name' ] ) if 'id' in node . lattrib : s += ' id=\"{0}\"' . format ( node . lattrib [ 'id' ] ) s += '>' for node in self . xml_node_stack [ 1 : ] : s += '.<{0}' . format ( node . tag ) if 'name' in node . lattrib : s += ' name=\"{0}\"' . format ( node . lattrib [ 'name' ] ) if 'id' in node . lattrib : s += ' id=\"{0}\"' . format ( node . lattrib [ 'id' ] ) s += '>' s += ':\n ' + message raise ParseError ( s , * params , * * key_params ) self . xml_node_stack . reverse ( ) | Raise a parse error . | 285 | 6 |
9,091 | def parse_component_by_typename ( self , node , type_ ) : #print('Parsing component {0} by typename {1}'.format(node, type_)) if 'id' in node . lattrib : id_ = node . lattrib [ 'id' ] else : #self.raise_error('Component must have an id') id_ = node . tag #make_id() if 'type' in node . lattrib : type_ = node . lattrib [ 'type' ] else : type_ = node . tag component = Component ( id_ , type_ ) if self . current_component : component . set_parent_id ( self . current_component . id ) self . current_component . add_child ( component ) else : self . model . add_component ( component ) for key in node . attrib : if key . lower ( ) not in [ 'id' , 'type' ] : component . set_parameter ( key , node . attrib [ key ] ) old_component = self . current_component self . current_component = component self . process_nested_tags ( node , 'component' ) self . current_component = old_component | Parses components defined directly by component name . | 261 | 10 |
9,092 | def generate_tags_multiple_files ( input_files , tag , ignore_tags , ns = None ) : return itertools . chain . from_iterable ( [ generate_xmltags ( fn , tag , ignore_tags , ns ) for fn in input_files ] ) | Calls xmltag generator for multiple files . | 62 | 11 |
9,093 | def generate_tags_multiple_files_strings ( input_files , ns , tag , ignore_tags ) : for el in generate_tags_multiple_files ( input_files , tag , ignore_tags , ns ) : yield formatting . string_and_clear ( el , ns ) | Creates stringified xml output of elements with certain tag . | 61 | 12 |
9,094 | def generate_xmltags ( fn , returntag , ignore_tags , ns = None ) : xmlns = create_namespace ( ns ) ns_ignore = [ '{0}{1}' . format ( xmlns , x ) for x in ignore_tags ] for ac , el in etree . iterparse ( fn ) : if el . tag == '{0}{1}' . format ( xmlns , returntag ) : yield el elif el . tag in ns_ignore : formatting . clear_el ( el ) | Base generator for percolator xml psm peptide protein output as well as for mzML mzIdentML . ignore_tags are the ones that are cleared when met by parser . | 119 | 39 |
9,095 | def add_component_type ( self , component_type ) : name = component_type . name # To handle colons in names in LEMS if ':' in name : name = name . replace ( ':' , '_' ) component_type . name = name self . component_types [ name ] = component_type | Adds a component type to the model . | 69 | 8 |
9,096 | def add ( self , child ) : if isinstance ( child , Include ) : self . add_include ( child ) elif isinstance ( child , Dimension ) : self . add_dimension ( child ) elif isinstance ( child , Unit ) : self . add_unit ( child ) elif isinstance ( child , ComponentType ) : self . add_component_type ( child ) elif isinstance ( child , Component ) : self . add_component ( child ) elif isinstance ( child , FatComponent ) : self . add_fat_component ( child ) elif isinstance ( child , Constant ) : self . add_constant ( child ) else : raise ModelError ( 'Unsupported child element' ) | Adds a typed child object to the model . | 153 | 9 |
9,097 | def include_file ( self , path , include_dirs = [ ] ) : if self . include_includes : if self . debug : print ( "------------------ Including a file: %s" % path ) inc_dirs = include_dirs if include_dirs else self . include_dirs parser = LEMSFileParser ( self , inc_dirs , self . include_includes ) if os . access ( path , os . F_OK ) : if not path in self . included_files : parser . parse ( open ( path ) . read ( ) ) self . included_files . append ( path ) return else : if self . debug : print ( "Already included: %s" % path ) return else : for inc_dir in inc_dirs : new_path = ( inc_dir + '/' + path ) if os . access ( new_path , os . F_OK ) : if not new_path in self . included_files : parser . parse ( open ( new_path ) . read ( ) ) self . included_files . append ( new_path ) return else : if self . debug : print ( "Already included: %s" % path ) return msg = 'Unable to open ' + path if self . fail_on_missing_includes : raise Exception ( msg ) elif self . debug : print ( msg ) | Includes a file into the current model . | 292 | 8 |
9,098 | def import_from_file ( self , filepath ) : inc_dirs = self . include_directories [ : ] inc_dirs . append ( dirname ( filepath ) ) parser = LEMSFileParser ( self , inc_dirs , self . include_includes ) with open ( filepath ) as f : parser . parse ( f . read ( ) ) | Import a model from a file . | 80 | 7 |
9,099 | def export_to_dom ( self ) : namespaces = 'xmlns="http://www.neuroml.org/lems/%s" ' + 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' + 'xsi:schemaLocation="http://www.neuroml.org/lems/%s %s"' namespaces = namespaces % ( self . target_lems_version , self . target_lems_version , self . schema_location ) xmlstr = '<Lems %s>' % namespaces for include in self . includes : xmlstr += include . toxml ( ) for target in self . targets : xmlstr += '<Target component="{0}"/>' . format ( target ) for dimension in self . dimensions : xmlstr += dimension . toxml ( ) for unit in self . units : xmlstr += unit . toxml ( ) for constant in self . constants : xmlstr += constant . toxml ( ) for component_type in self . component_types : xmlstr += component_type . toxml ( ) for component in self . components : xmlstr += component . toxml ( ) xmlstr += '</Lems>' xmldom = minidom . parseString ( xmlstr ) return xmldom | Exports this model to a DOM . | 285 | 8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.