query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
splits a node from its kids maintaining parent pointers
def avl_release_kids ( node ) : left , right = node . left , node . right if left is not None : # assert left.parent is node left . parent = None if right is not None : # assert right.parent is node right . parent = None node . balance = 0 node . left = None node . right = None return node , left , right
9,200
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L500-L514
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
removes the parent of a child
def avl_release_parent ( node ) : parent = node . parent if parent is not None : if parent . right is node : parent . right = None elif parent . left is node : parent . left = None else : raise AssertionError ( 'impossible state' ) node . parent = None parent . balance = max ( height ( parent . right ) , height ( parent . left ) ) + 1 return node , parent
9,201
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L517-L531
[ "def", "send_string", "(", "self", ",", "string", ":", "str", ")", ":", "if", "not", "string", ":", "return", "string", "=", "string", ".", "replace", "(", "'\\n'", ",", "\"<enter>\"", ")", "string", "=", "string", ".", "replace", "(", "'\\t'", ",", "\"<tab>\"", ")", "_logger", ".", "debug", "(", "\"Send via event interface\"", ")", "self", ".", "__clearModifiers", "(", ")", "modifiers", "=", "[", "]", "for", "section", "in", "KEY_SPLIT_RE", ".", "split", "(", "string", ")", ":", "if", "len", "(", "section", ")", ">", "0", ":", "if", "Key", ".", "is_key", "(", "section", "[", ":", "-", "1", "]", ")", "and", "section", "[", "-", "1", "]", "==", "'+'", "and", "section", "[", ":", "-", "1", "]", "in", "MODIFIERS", ":", "# Section is a modifier application (modifier followed by '+')", "modifiers", ".", "append", "(", "section", "[", ":", "-", "1", "]", ")", "else", ":", "if", "len", "(", "modifiers", ")", ">", "0", ":", "# Modifiers ready for application - send modified key", "if", "Key", ".", "is_key", "(", "section", ")", ":", "self", ".", "interface", ".", "send_modified_key", "(", "section", ",", "modifiers", ")", "modifiers", "=", "[", "]", "else", ":", "self", ".", "interface", ".", "send_modified_key", "(", "section", "[", "0", "]", ",", "modifiers", ")", "if", "len", "(", "section", ")", ">", "1", ":", "self", ".", "interface", ".", "send_string", "(", "section", "[", "1", ":", "]", ")", "modifiers", "=", "[", "]", "else", ":", "# Normal string/key operation", "if", "Key", ".", "is_key", "(", "section", ")", ":", "self", ".", "interface", ".", "send_key", "(", "section", ")", "else", ":", "self", ".", "interface", ".", "send_string", "(", "section", ")", "self", ".", "__reapplyModifiers", "(", ")" ]
Joins two trees t1 and t1 with an intermediate key - value pair
def avl_join ( t1 , t2 , node ) : if DEBUG_JOIN : print ( '-- JOIN node=%r' % ( node , ) ) if t1 is None and t2 is None : if DEBUG_JOIN : print ( 'Join Case 1' ) top = node elif t1 is None : # FIXME keep track of count if possible if DEBUG_JOIN : print ( 'Join Case 2' ) top = avl_insert_dir ( t2 , node , 0 ) elif t2 is None : if DEBUG_JOIN : print ( 'Join Case 3' ) top = avl_insert_dir ( t1 , node , 1 ) else : h1 = height ( t1 ) h2 = height ( t2 ) if h1 > h2 + 1 : if DEBUG_JOIN : print ( 'Join Case 4' ) top = avl_join_dir_recursive ( t1 , t2 , node , 1 ) if DEBUG_JOIN : ascii_tree ( t1 , 'top' ) elif h2 > h1 + 1 : if DEBUG_JOIN : print ( 'Join Case 5' ) ascii_tree ( t1 ) ascii_tree ( t2 ) top = avl_join_dir_recursive ( t1 , t2 , node , 0 ) if DEBUG_JOIN : ascii_tree ( top ) else : if DEBUG_JOIN : print ( 'Join Case 6' ) # Insert at the top of the tree top = avl_new_top ( t1 , t2 , node , 0 ) return top
9,202
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L667-L746
[ "def", "write_options_to_YAML", "(", "self", ",", "filename", ")", ":", "fd", "=", "open", "(", "filename", ",", "\"w\"", ")", "yaml", ".", "dump", "(", "_options_to_dict", "(", "self", ".", "gc", ")", ",", "fd", ",", "default_flow_style", "=", "False", ")", "fd", ".", "close", "(", ")" ]
Removes the maximum element from the tree
def avl_split_last ( root ) : if root is None : raise IndexError ( 'Empty tree has no maximum element' ) root , left , right = avl_release_kids ( root ) if right is None : new_root , last_node = left , root else : new_right , last_node = avl_split_last ( right ) new_root = avl_join ( left , new_right , root ) return ( new_root , last_node )
9,203
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L749-L766
[ "def", "_save_files", "(", "self", ",", "data", ",", "dtype_out_time", ")", ":", "path", "=", "self", ".", "path_out", "[", "dtype_out_time", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "dir_out", ")", ":", "os", ".", "makedirs", "(", "self", ".", "dir_out", ")", "if", "'reg'", "in", "dtype_out_time", ":", "try", ":", "reg_data", "=", "xr", ".", "open_dataset", "(", "path", ")", "except", "(", "EOFError", ",", "RuntimeError", ",", "IOError", ")", ":", "reg_data", "=", "xr", ".", "Dataset", "(", ")", "reg_data", ".", "update", "(", "data", ")", "data_out", "=", "reg_data", "else", ":", "data_out", "=", "data", "if", "isinstance", "(", "data_out", ",", "xr", ".", "DataArray", ")", ":", "data_out", "=", "xr", ".", "Dataset", "(", "{", "self", ".", "name", ":", "data_out", "}", ")", "data_out", ".", "to_netcdf", "(", "path", ",", "engine", "=", "'netcdf4'", ",", "format", "=", "'NETCDF3_64BIT'", ")" ]
Removes the minimum element from the tree
def avl_split_first ( root ) : if root is None : raise IndexError ( 'Empty tree has no maximum element' ) root , left , right = avl_release_kids ( root ) if left is None : new_root , first_node = right , root else : new_left , first_node = avl_split_first ( left ) new_root = avl_join ( new_left , right , root ) return ( new_root , first_node )
9,204
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L769-L786
[ "def", "_save_files", "(", "self", ",", "data", ",", "dtype_out_time", ")", ":", "path", "=", "self", ".", "path_out", "[", "dtype_out_time", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "dir_out", ")", ":", "os", ".", "makedirs", "(", "self", ".", "dir_out", ")", "if", "'reg'", "in", "dtype_out_time", ":", "try", ":", "reg_data", "=", "xr", ".", "open_dataset", "(", "path", ")", "except", "(", "EOFError", ",", "RuntimeError", ",", "IOError", ")", ":", "reg_data", "=", "xr", ".", "Dataset", "(", ")", "reg_data", ".", "update", "(", "data", ")", "data_out", "=", "reg_data", "else", ":", "data_out", "=", "data", "if", "isinstance", "(", "data_out", ",", "xr", ".", "DataArray", ")", ":", "data_out", "=", "xr", ".", "Dataset", "(", "{", "self", ".", "name", ":", "data_out", "}", ")", "data_out", ".", "to_netcdf", "(", "path", ",", "engine", "=", "'netcdf4'", ",", "format", "=", "'NETCDF3_64BIT'", ")" ]
join two trees without any intermediate key
def avl_join2 ( t1 , t2 ) : if t1 is None and t2 is None : new_root = None elif t2 is None : new_root = t1 elif t1 is None : new_root = t2 else : new_left , last_node = avl_split_last ( t1 ) debug = 0 if debug : EulerTourTree ( root = new_left ) . _assert_nodes ( 'new_left' ) EulerTourTree ( root = last_node ) . _assert_nodes ( 'last_node' ) EulerTourTree ( root = t2 ) . _assert_nodes ( 't2' ) print ( 'new_left' ) EulerTourTree ( root = new_left ) . print_tree ( ) print ( 'last_node' ) EulerTourTree ( root = last_node ) . print_tree ( ) print ( 't2' ) EulerTourTree ( root = t2 ) . print_tree ( ) new_root = avl_join ( new_left , t2 , last_node ) if debug : print ( 'new_root' ) EulerTourTree ( root = new_root ) . print_tree ( ) EulerTourTree ( root = last_node ) . _assert_nodes ( 'new_root' ) return new_root
9,205
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L789-L831
[ "def", "send_audio_packet", "(", "self", ",", "data", ",", "*", ",", "encode", "=", "True", ")", ":", "self", ".", "checked_add", "(", "'sequence'", ",", "1", ",", "65535", ")", "if", "encode", ":", "encoded_data", "=", "self", ".", "encoder", ".", "encode", "(", "data", ",", "self", ".", "encoder", ".", "SAMPLES_PER_FRAME", ")", "else", ":", "encoded_data", "=", "data", "packet", "=", "self", ".", "_get_voice_packet", "(", "encoded_data", ")", "try", ":", "self", ".", "socket", ".", "sendto", "(", "packet", ",", "(", "self", ".", "endpoint_ip", ",", "self", ".", "voice_port", ")", ")", "except", "BlockingIOError", ":", "log", ".", "warning", "(", "'A packet has been dropped (seq: %s, timestamp: %s)'", ",", "self", ".", "sequence", ",", "self", ".", "timestamp", ")", "self", ".", "checked_add", "(", "'timestamp'", ",", "self", ".", "encoder", ".", "SAMPLES_PER_FRAME", ",", "4294967295", ")" ]
Get a networkx representation of the binary search tree .
def to_networkx ( self , labels = None , edge_labels = False ) : import networkx as nx graph = nx . DiGraph ( ) for node in self . _traverse_nodes ( ) : u = node . key graph . add_node ( u ) # Minor redundancy # Set node properties graph . nodes [ u ] [ 'value' ] = node . value if labels is not None : label = ',' . join ( [ str ( getattr ( node , k ) ) for k in labels ] ) graph . nodes [ u ] [ 'label' ] = label if node . left is not None : v = node . left . key graph . add_node ( v ) graph . add_edge ( u , v ) if edge_labels : graph . edge [ u ] [ v ] [ 'label' ] = 'L' if node . right is not None : v = node . right . key graph . add_node ( v ) graph . add_edge ( u , v ) if edge_labels : graph . edge [ u ] [ v ] [ 'label' ] = 'R' return graph
9,206
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L395-L419
[ "def", "update_classroom", "(", "self", ",", "course", ",", "classroomid", ",", "new_data", ")", ":", "student_list", ",", "tutor_list", ",", "other_students", ",", "_", "=", "self", ".", "get_user_lists", "(", "course", ",", "classroomid", ")", "# Check tutors", "new_data", "[", "\"tutors\"", "]", "=", "[", "tutor", "for", "tutor", "in", "map", "(", "str", ".", "strip", ",", "new_data", "[", "\"tutors\"", "]", ")", "if", "tutor", "in", "tutor_list", "]", "students", ",", "groups", ",", "errored_students", "=", "[", "]", ",", "[", "]", ",", "[", "]", "new_data", "[", "\"students\"", "]", "=", "map", "(", "str", ".", "strip", ",", "new_data", "[", "\"students\"", "]", ")", "# Check the students", "for", "student", "in", "new_data", "[", "\"students\"", "]", ":", "if", "student", "in", "student_list", ":", "students", ".", "append", "(", "student", ")", "else", ":", "if", "student", "in", "other_students", ":", "# Remove user from the other classroom", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"groups.students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"groups.$.students\"", ":", "student", ",", "\"students\"", ":", "student", "}", "}", ")", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"students\"", ":", "student", "}", "}", ")", "students", ".", "append", "(", "student", ")", "else", ":", "# Check if user can be registered", "user_info", "=", "self", ".", "user_manager", ".", "get_user_info", "(", "student", ")", "if", "user_info", "is", "None", "or", "student", "in", "tutor_list", ":", "errored_students", ".", "append", "(", "student", ")", "else", ":", "students", ".", "append", "(", "student", ")", "removed_students", "=", "[", "student", "for", "student", "in", "student_list", "if", "student", "not", "in", "new_data", "[", "\"students\"", "]", "]", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"default\"", ":", "True", "}", ",", "{", "\"$push\"", ":", "{", "\"students\"", ":", "{", "\"$each\"", ":", "removed_students", "}", "}", "}", ")", "new_data", "[", "\"students\"", "]", "=", "students", "# Check the groups", "for", "group", "in", "new_data", "[", "\"groups\"", "]", ":", "group", "[", "\"students\"", "]", "=", "[", "student", "for", "student", "in", "map", "(", "str", ".", "strip", ",", "group", "[", "\"students\"", "]", ")", "if", "student", "in", "new_data", "[", "\"students\"", "]", "]", "if", "len", "(", "group", "[", "\"students\"", "]", ")", "<=", "group", "[", "\"size\"", "]", ":", "groups", ".", "append", "(", "group", ")", "new_data", "[", "\"groups\"", "]", "=", "groups", "classroom", "=", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"_id\"", ":", "ObjectId", "(", "classroomid", ")", "}", ",", "{", "\"$set\"", ":", "{", "\"description\"", ":", "new_data", "[", "\"description\"", "]", ",", "\"students\"", ":", "students", ",", "\"tutors\"", ":", "new_data", "[", "\"tutors\"", "]", ",", "\"groups\"", ":", "groups", "}", "}", ",", "return_document", "=", "ReturnDocument", ".", "AFTER", ")", "return", "classroom", ",", "errored_students" ]
reconstruct represented tree as a DiGraph to preserve the current rootedness
def repr_tree ( self ) : import utool as ut import networkx as nx repr_tree = nx . DiGraph ( ) for u , v in ut . itertwo ( self . values ( ) ) : if not repr_tree . has_edge ( v , u ) : repr_tree . add_edge ( u , v ) return repr_tree
9,207
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L422-L433
[ "def", "get_dbs", "(", ")", ":", "url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "'DBS'", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "dbs", "=", "response", ".", "content", ".", "decode", "(", "'ascii'", ")", ".", "splitlines", "(", ")", "dbs", "=", "[", "re", ".", "sub", "(", "'\\t{2,}'", ",", "'\\t'", ",", "line", ")", ".", "split", "(", "'\\t'", ")", "for", "line", "in", "dbs", "]", "return", "dbs" ]
Like os . path . join but uses forward slashes on win32
def unixjoin ( * args ) : isabs_list = list ( map ( isabs , args ) ) if any ( isabs_list ) : poslist = [ count for count , flag in enumerate ( isabs_list ) if flag ] pos = poslist [ - 1 ] return '/' . join ( args [ pos : ] ) else : return '/' . join ( args )
9,208
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/meta_util_path.py#L25-L35
[ "def", "update", "(", "self", ",", "other", ")", ":", "self", ".", "update_ttl", "(", "other", ".", "ttl", ")", "super", "(", "Rdataset", ",", "self", ")", ".", "update", "(", "other", ")" ]
Loops through peptides stores sequences mapped to PSM ids .
def create_merge_psm_map ( peptides , ns ) : psmmap = { } for peptide in peptides : seq = reader . get_peptide_seq ( peptide , ns ) psm_ids = reader . get_psm_ids_from_peptide ( peptide , ns ) for psm_id in psm_ids : try : psmmap [ seq ] [ psm_id . text ] = 1 except KeyError : psmmap [ seq ] = { psm_id . text : 2 } for seq , psm_id_dict in psmmap . items ( ) : psmmap [ seq ] = [ x for x in psm_id_dict ] return psmmap
9,209
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/pycolator/splitmerge.py#L8-L21
[ "def", "weighted_expiration", "(", "weights", ",", "contract_dates", ")", ":", "# NOQA", "cols", "=", "weights", ".", "columns", "weights", "=", "weights", ".", "reset_index", "(", "level", "=", "-", "1", ")", "expiries", "=", "contract_dates", ".", "to_dict", "(", ")", "weights", ".", "loc", "[", ":", ",", "\"expiry\"", "]", "=", "weights", ".", "iloc", "[", ":", ",", "0", "]", ".", "apply", "(", "lambda", "x", ":", "expiries", "[", "x", "]", ")", "diffs", "=", "(", "pd", ".", "DatetimeIndex", "(", "weights", ".", "expiry", ")", "-", "pd", ".", "Series", "(", "weights", ".", "index", ",", "weights", ".", "index", ")", ")", ".", "apply", "(", "lambda", "x", ":", "x", ".", "days", ")", "weights", "=", "weights", ".", "loc", "[", ":", ",", "cols", "]", "wexp", "=", "weights", ".", "mul", "(", "diffs", ",", "axis", "=", "0", ")", ".", "groupby", "(", "level", "=", "0", ")", ".", "sum", "(", ")", "return", "wexp" ]
Create a connection pool .
def create_pool_b ( dsn = None , * , min_size = 10 , max_size = 10 , max_queries = 50000 , max_inactive_connection_lifetime = 300.0 , setup = None , init = None , loop = None , connection_class = BuildPgConnection , * * connect_kwargs , ) : return BuildPgPool ( dsn , connection_class = connection_class , min_size = min_size , max_size = max_size , max_queries = max_queries , loop = loop , setup = setup , init = init , max_inactive_connection_lifetime = max_inactive_connection_lifetime , * * connect_kwargs , )
9,210
https://github.com/samuelcolvin/buildpg/blob/33cccff45279834d02ec7e97d8417da8fd2a875d/buildpg/asyncpg.py#L85-L119
[ "def", "setOverlayTransformTrackedDeviceRelative", "(", "self", ",", "ulOverlayHandle", ",", "unTrackedDevice", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTransformTrackedDeviceRelative", "pmatTrackedDeviceToOverlayTransform", "=", "HmdMatrix34_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "unTrackedDevice", ",", "byref", "(", "pmatTrackedDeviceToOverlayTransform", ")", ")", "return", "result", ",", "pmatTrackedDeviceToOverlayTransform" ]
Adds a runnable component to the list of runnable components in this simulation .
def add_runnable ( self , runnable ) : if runnable . id in self . runnables : raise SimError ( 'Duplicate runnable component {0}' . format ( runnable . id ) ) self . runnables [ runnable . id ] = runnable
9,211
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/sim/sim.py#L37-L49
[ "def", "decode_iter", "(", "data", ",", "codec_options", "=", "DEFAULT_CODEC_OPTIONS", ")", ":", "if", "not", "isinstance", "(", "codec_options", ",", "CodecOptions", ")", ":", "raise", "_CODEC_OPTIONS_TYPE_ERROR", "position", "=", "0", "end", "=", "len", "(", "data", ")", "-", "1", "while", "position", "<", "end", ":", "obj_size", "=", "_UNPACK_INT", "(", "data", "[", "position", ":", "position", "+", "4", "]", ")", "[", "0", "]", "elements", "=", "data", "[", "position", ":", "position", "+", "obj_size", "]", "position", "+=", "obj_size", "yield", "_bson_to_dict", "(", "elements", ",", "codec_options", ")" ]
Runs the simulation .
def run ( self ) : self . init_run ( ) if self . debug : self . dump ( "AfterInit: " ) #print("++++++++++++++++ Time: %f"%self.current_time) while self . step ( ) : #self.dump("Time: %f"%self.current_time) #print("++++++++++++++++ Time: %f"%self.current_time) pass
9,212
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/sim/sim.py#L86-L97
[ "def", "permission_request_delete_link", "(", "context", ",", "perm", ")", ":", "user", "=", "context", "[", "'request'", "]", ".", "user", "if", "user", ".", "is_authenticated", "(", ")", ":", "link_kwargs", "=", "base_link", "(", "context", ",", "perm", ",", "'authority-delete-permission-request'", ")", "if", "user", ".", "has_perm", "(", "'authority.delete_permission'", ")", ":", "link_kwargs", "[", "'is_requestor'", "]", "=", "False", "return", "link_kwargs", "if", "not", "perm", ".", "approved", "and", "perm", ".", "user", "==", "user", ":", "link_kwargs", "[", "'is_requestor'", "]", "=", "True", "return", "link_kwargs", "return", "{", "'url'", ":", "None", "}" ]
Name of the controller file to be created
def controller_creatr ( filename ) : if not check ( ) : click . echo ( Fore . RED + 'ERROR: Ensure you are in a bast app to run the create:controller command' ) return path = os . path . abspath ( '.' ) + '/controller' if not os . path . exists ( path ) : os . makedirs ( path ) # if os.path.isfile(path + ) file_name = str ( filename + '.py' ) if os . path . isfile ( path + "/" + file_name ) : click . echo ( Fore . WHITE + Back . RED + "ERROR: Controller file exists" ) return controller_file = open ( os . path . abspath ( '.' ) + '/controller/' + file_name , 'w+' ) compose = "from bast import Controller\n\nclass " + filename + "(Controller):\n pass" controller_file . write ( compose ) controller_file . close ( ) click . echo ( Fore . GREEN + "Controller " + filename + " created successfully" )
9,213
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/cli.py#L41-L61
[ "def", "set_info_handler", "(", "codec", ",", "handler", ",", "data", "=", "None", ")", ":", "OPENJP2", ".", "opj_set_info_handler", ".", "argtypes", "=", "[", "CODEC_TYPE", ",", "ctypes", ".", "c_void_p", ",", "ctypes", ".", "c_void_p", "]", "OPENJP2", ".", "opj_set_info_handler", ".", "restype", "=", "check_error", "OPENJP2", ".", "opj_set_info_handler", "(", "codec", ",", "handler", ",", "data", ")" ]
Name of the View File to be created
def view_creatr ( filename ) : if not check ( ) : click . echo ( Fore . RED + 'ERROR: Ensure you are in a bast app to run the create:view command' ) return path = os . path . abspath ( '.' ) + '/public/templates' if not os . path . exists ( path ) : os . makedirs ( path ) filename_ = str ( filename + ".html" ) . lower ( ) view_file = open ( path + "/" + filename_ , 'w+' ) view_file . write ( "" ) view_file . close ( ) click . echo ( Fore . GREEN + "View file " + filename_ + "created in public/template folder" )
9,214
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/cli.py#L85-L99
[ "def", "indication", "(", "self", ",", "apdu", ")", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\"indication %r\"", ",", "apdu", ")", "if", "self", ".", "state", "==", "IDLE", ":", "self", ".", "idle", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_REQUEST", ":", "self", ".", "segmented_request", "(", "apdu", ")", "elif", "self", ".", "state", "==", "AWAIT_RESPONSE", ":", "self", ".", "await_response", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_RESPONSE", ":", "self", ".", "segmented_response", "(", "apdu", ")", "else", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\" - invalid state\"", ")" ]
Name of the migration file
def migration_creatr ( migration_file , create , table ) : if not check ( ) : click . echo ( Fore . RED + 'ERROR: Ensure you are in a bast app to run the create:migration command' ) return migration = CreateMigration ( ) if table is None : table = snake_case ( migration_file ) file = migration . create_file ( snake_case ( migration_file ) , table = table , create = create ) click . echo ( Fore . GREEN + 'Migration file created at %s' % file )
9,215
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/cli.py#L175-L185
[ "def", "_get_port_speed_price_id", "(", "items", ",", "port_speed", ",", "no_public", ",", "location", ")", ":", "for", "item", "in", "items", ":", "if", "utils", ".", "lookup", "(", "item", ",", "'itemCategory'", ",", "'categoryCode'", ")", "!=", "'port_speed'", ":", "continue", "# Check for correct capacity and if the item matches private only", "if", "any", "(", "[", "int", "(", "utils", ".", "lookup", "(", "item", ",", "'capacity'", ")", ")", "!=", "port_speed", ",", "_is_private_port_speed_item", "(", "item", ")", "!=", "no_public", ",", "not", "_is_bonded", "(", "item", ")", "]", ")", ":", "continue", "for", "price", "in", "item", "[", "'prices'", "]", ":", "if", "not", "_matches_location", "(", "price", ",", "location", ")", ":", "continue", "return", "price", "[", "'id'", "]", "raise", "SoftLayer", ".", "SoftLayerError", "(", "\"Could not find valid price for port speed: '%s'\"", "%", "port_speed", ")" ]
Quit socket server
def quit ( self ) : logging . info ( "quiting sock server" ) if self . __quit is not None : self . __quit . set ( ) self . join ( ) return
9,216
https://github.com/mbunse/socket_client_server/blob/8e884925cf887d386554c1859f626d8f01bd0036/socket_client_server/socket_client_server.py#L146-L154
[ "def", "get_magnet", "(", "self", ",", "url", ")", ":", "content_most_rated", "=", "requests", ".", "get", "(", "url", ")", "rated_soup", "=", "BeautifulSoup", "(", "content_most_rated", ".", "content", ",", "'lxml'", ")", "if", "self", ".", "page", "==", "'torrent_project'", ":", "self", ".", "magnet", "=", "rated_soup", ".", "find", "(", "'a'", ",", "href", "=", "True", ",", "text", "=", "re", ".", "compile", "(", "'Download'", ")", ")", "[", "'href'", "]", "elif", "self", ".", "page", "==", "'the_pirate_bay'", ":", "self", ".", "magnet", "=", "rated_soup", ".", "find", "(", "'a'", ",", "href", "=", "True", ",", "text", "=", "re", ".", "compile", "(", "'Get this torrent'", ")", ")", "[", "'href'", "]", "elif", "self", ".", "page", "==", "'1337x'", ":", "div1337", "=", "rated_soup", ".", "find", "(", "'div'", ",", "{", "'class'", ":", "'torrent-category-detail'", "}", ")", "self", ".", "magnet", "=", "div1337", ".", "find", "(", "'a'", ",", "href", "=", "re", ".", "compile", "(", "'magnet'", ")", ")", "[", "'href'", "]", "elif", "self", ".", "page", "==", "'isohunt'", ":", "self", ".", "magnet", "=", "rated_soup", ".", "find", "(", "'a'", ",", "href", "=", "re", ".", "compile", "(", "'magnet'", ")", ")", "[", "'href'", "]", "else", ":", "print", "(", "'Wrong page to get magnet!'", ")", "sys", ".", "exit", "(", "1", ")" ]
Searches for quantification columns using pattern and header list . Calls reader function to do regexp . Returns a single column for precursor quant .
def get_quantcols ( pattern , oldheader , coltype ) : if pattern is None : return False if coltype == 'precur' : return reader . get_cols_in_file ( pattern , oldheader , single_col = True )
9,217
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/peptable/psmtopeptable.py#L10-L17
[ "def", "create", "(", "self", ")", ":", "self", ".", "_storage_broker", ".", "create_structure", "(", ")", "self", ".", "_storage_broker", ".", "put_admin_metadata", "(", "self", ".", "_admin_metadata", ")" ]
Parses lists of quantdata and returns maxvalue from them . Strips NA
def get_peptide_quant ( quantdata , quanttype ) : parsefnx = { 'precur' : max } quantfloats = [ ] for q in quantdata : try : quantfloats . append ( float ( q ) ) except ( TypeError , ValueError ) : pass if not quantfloats : return 'NA' return str ( parsefnx [ quanttype ] ( quantfloats ) )
9,218
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/peptable/psmtopeptable.py#L50-L61
[ "def", "synchronize_switch", "(", "self", ",", "switch_ip", ",", "expected_acls", ",", "expected_bindings", ")", ":", "# Get ACL rules and interface mappings from the switch", "switch_acls", ",", "switch_bindings", "=", "self", ".", "_get_dynamic_acl_info", "(", "switch_ip", ")", "# Adjust expected bindings for switch LAG config", "expected_bindings", "=", "self", ".", "adjust_bindings_for_lag", "(", "switch_ip", ",", "expected_bindings", ")", "# Get synchronization commands", "switch_cmds", "=", "list", "(", ")", "switch_cmds", ".", "extend", "(", "self", ".", "get_sync_acl_cmds", "(", "switch_acls", ",", "expected_acls", ")", ")", "switch_cmds", ".", "extend", "(", "self", ".", "get_sync_binding_cmds", "(", "switch_bindings", ",", "expected_bindings", ")", ")", "# Update switch config", "self", ".", "run_openstack_sg_cmds", "(", "switch_cmds", ",", "self", ".", "_switches", ".", "get", "(", "switch_ip", ")", ")" ]
reads csv in unicode
def read_csv ( fpath ) : import csv import utool as ut #csvfile = open(fpath, 'rb') with open ( fpath , 'rb' ) as csvfile : row_iter = csv . reader ( csvfile , delimiter = str ( ',' ) , quotechar = str ( '|' ) ) row_list = [ ut . lmap ( ut . ensure_unicode , row ) for row in row_iter ] return row_list
9,219
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_csv.py#L151-L159
[ "def", "newest", "(", "cls", ",", "session", ")", ":", "media_type", "=", "cls", ".", "__name__", ".", "lower", "(", ")", "p", "=", "session", ".", "session", ".", "get", "(", "u'http://myanimelist.net/'", "+", "media_type", "+", "'.php?o=9&c[]=a&c[]=d&cv=2&w=1'", ")", ".", "text", "soup", "=", "utilities", ".", "get_clean_dom", "(", "p", ")", "latest_entry", "=", "soup", ".", "find", "(", "u\"div\"", ",", "{", "u\"class\"", ":", "u\"hoverinfo\"", "}", ")", "if", "not", "latest_entry", ":", "raise", "MalformedMediaPageError", "(", "0", ",", "p", ",", "u\"No media entries found on recently-added page\"", ")", "latest_id", "=", "int", "(", "latest_entry", "[", "u'rel'", "]", "[", "1", ":", "]", ")", "return", "getattr", "(", "session", ",", "media_type", ")", "(", "latest_id", ")" ]
Standalone version of get_caller_name
def get_caller_name ( N = 0 , strict = True ) : if isinstance ( N , ( list , tuple ) ) : name_list = [ ] for N_ in N : try : name_list . append ( get_caller_name ( N_ ) ) except AssertionError : name_list . append ( 'X' ) return '[' + '][' . join ( name_list ) + ']' # <get_parent_frame> parent_frame = get_stack_frame ( N = N + 2 , strict = strict ) # </get_parent_frame> caller_name = parent_frame . f_code . co_name if caller_name == '<module>' : co_filename = parent_frame . f_code . co_filename caller_name = splitext ( split ( co_filename ) [ 1 ] ) [ 0 ] if caller_name == '__init__' : co_filename = parent_frame . f_code . co_filename caller_name = basename ( dirname ( co_filename ) ) + '.' + caller_name return caller_name
9,220
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/meta_util_dbg.py#L27-L47
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
Responds to pings from registry_client only if the node_ids present in the ping payload are registered
def _handle_ping ( self , packet , protocol ) : if 'payload' in packet : is_valid_node = True node_ids = list ( packet [ 'payload' ] . values ( ) ) for node_id in node_ids : if self . _repository . get_node ( node_id ) is None : is_valid_node = False break if is_valid_node : self . _pong ( packet , protocol ) else : self . _pong ( packet , protocol )
9,221
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/registry.py#L379-L395
[ "def", "Initialize", "(", "self", ")", ":", "super", "(", "AFF4MemoryStreamBase", ",", "self", ")", ".", "Initialize", "(", ")", "contents", "=", "b\"\"", "if", "\"r\"", "in", "self", ".", "mode", ":", "contents", "=", "self", ".", "Get", "(", "self", ".", "Schema", ".", "CONTENT", ")", ".", "AsBytes", "(", ")", "try", ":", "if", "contents", "is", "not", "None", ":", "contents", "=", "zlib", ".", "decompress", "(", "contents", ")", "except", "zlib", ".", "error", ":", "pass", "self", ".", "fd", "=", "io", ".", "BytesIO", "(", "contents", ")", "self", ".", "size", "=", "len", "(", "contents", ")", "self", ".", "offset", "=", "0" ]
Merge all psms and peptides
def set_features ( self ) : allpsms_str = readers . generate_psms_multiple_fractions_strings ( self . mergefiles , self . ns ) allpeps = preparation . merge_peptides ( self . mergefiles , self . ns ) self . features = { 'psm' : allpsms_str , 'peptide' : allpeps }
9,222
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/pycolator/splitmerge.py#L94-L99
[ "def", "match_color_index", "(", "self", ",", "color", ")", ":", "from", "jcvi", ".", "utils", ".", "webcolors", "import", "color_diff", "if", "isinstance", "(", "color", ",", "int", ")", ":", "return", "color", "if", "color", ":", "if", "isinstance", "(", "color", ",", "six", ".", "string_types", ")", ":", "rgb", "=", "map", "(", "int", ",", "color", ".", "split", "(", "','", ")", ")", "else", ":", "rgb", "=", "color", ".", "Get", "(", ")", "logging", ".", "disable", "(", "logging", ".", "DEBUG", ")", "distances", "=", "[", "color_diff", "(", "rgb", ",", "x", ")", "for", "x", "in", "self", ".", "xlwt_colors", "]", "logging", ".", "disable", "(", "logging", ".", "NOTSET", ")", "result", "=", "distances", ".", "index", "(", "min", "(", "distances", ")", ")", "self", ".", "unused_colors", ".", "discard", "(", "self", ".", "xlwt_colors", "[", "result", "]", ")", "return", "result" ]
r squashes wip messages
def git_sequence_editor_squash ( fpath ) : # print(sys.argv) import utool as ut text = ut . read_from ( fpath ) # print('fpath = %r' % (fpath,)) print ( text ) # Doesnt work because of fixed witdth requirement # search = (ut.util_regex.positive_lookbehind('[a-z]* [a-z0-9]* wip\n') + 'pick ' + # ut.reponamed_field('hash', '[a-z0-9]*') + ' wip') # repl = ('squash ' + ut.bref_field('hash') + ' wip') # import re # new_text = re.sub(search, repl, text, flags=re.MULTILINE) # print(new_text) prev_msg = None prev_dt = None new_lines = [ ] def get_commit_date ( hashid ) : out , err , ret = ut . cmd ( 'git show -s --format=%ci ' + hashid , verbose = False , quiet = True , pad_stdout = False ) # from datetime import datetime from dateutil import parser # print('out = %r' % (out,)) stamp = out . strip ( '\n' ) # print('stamp = %r' % (stamp,)) dt = parser . parse ( stamp ) # dt = datetime.strptime(stamp, '%Y-%m-%d %H:%M:%S %Z') # print('dt = %r' % (dt,)) return dt for line in text . split ( '\n' ) : commit_line = line . split ( ' ' ) if len ( commit_line ) < 3 : prev_msg = None prev_dt = None new_lines += [ line ] continue action = commit_line [ 0 ] hashid = commit_line [ 1 ] msg = ' ' . join ( commit_line [ 2 : ] ) try : dt = get_commit_date ( hashid ) except ValueError : prev_msg = None prev_dt = None new_lines += [ line ] continue orig_msg = msg can_squash = action == 'pick' and msg == 'wip' and prev_msg == 'wip' if prev_dt is not None and prev_msg == 'wip' : tdelta = dt - prev_dt # Only squash closely consecutive commits threshold_minutes = 45 td_min = ( tdelta . total_seconds ( ) / 60. ) # print(tdelta) can_squash &= td_min < threshold_minutes msg = msg + ' -- tdelta=%r' % ( ut . get_timedelta_str ( tdelta ) , ) if can_squash : new_line = ' ' . join ( [ 'squash' , hashid , msg ] ) new_lines += [ new_line ] else : new_lines += [ line ] prev_msg = orig_msg prev_dt = dt new_text = '\n' . join ( new_lines ) def get_commit_date ( hashid ) : out = ut . cmd ( 'git show -s --format=%ci ' + hashid , verbose = False ) print ( 'out = %r' % ( out , ) ) # print('Dry run') # ut.dump_autogen_code(fpath, new_text) print ( new_text ) ut . write_to ( fpath , new_text , n = None )
9,223
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_git.py#L876-L1016
[ "def", "format_cert_name", "(", "env", "=", "''", ",", "account", "=", "''", ",", "region", "=", "''", ",", "certificate", "=", "None", ")", ":", "cert_name", "=", "None", "if", "certificate", ":", "if", "certificate", ".", "startswith", "(", "'arn'", ")", ":", "LOG", ".", "info", "(", "\"Full ARN provided...skipping lookup.\"", ")", "cert_name", "=", "certificate", "else", ":", "generated_cert_name", "=", "generate_custom_cert_name", "(", "env", ",", "region", ",", "account", ",", "certificate", ")", "if", "generated_cert_name", ":", "LOG", ".", "info", "(", "\"Found generated certificate %s from template\"", ",", "generated_cert_name", ")", "cert_name", "=", "generated_cert_name", "else", ":", "LOG", ".", "info", "(", "\"Using default certificate name logic\"", ")", "cert_name", "=", "(", "'arn:aws:iam::{account}:server-certificate/{name}'", ".", "format", "(", "account", "=", "account", ",", "name", "=", "certificate", ")", ")", "LOG", ".", "debug", "(", "'Certificate name: %s'", ",", "cert_name", ")", "return", "cert_name" ]
DEPRICATE My standard build script names .
def std_build_command ( repo = '.' ) : import utool as ut print ( '+**** stdbuild *******' ) print ( 'repo = %r' % ( repo , ) ) if sys . platform . startswith ( 'win32' ) : # vtool --rebuild-sver didnt work with this line #scriptname = './mingw_build.bat' scriptname = 'mingw_build.bat' else : scriptname = './unix_build.sh' if repo == '' : # default to cwd repo = '.' else : os . chdir ( repo ) ut . assert_exists ( scriptname ) normbuild_flag = '--no-rmbuild' if ut . get_argflag ( normbuild_flag ) : scriptname += ' ' + normbuild_flag # Execute build ut . cmd ( scriptname ) #os.system(scriptname) print ( 'L**** stdbuild *******' )
9,224
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_git.py#L1019-L1047
[ "def", "consumer_commit_for_times", "(", "consumer", ",", "partition_to_offset", ",", "atomic", "=", "False", ")", ":", "no_offsets", "=", "set", "(", ")", "for", "tp", ",", "offset", "in", "six", ".", "iteritems", "(", "partition_to_offset", ")", ":", "if", "offset", "is", "None", ":", "logging", ".", "error", "(", "\"No offsets found for topic-partition {tp}. Either timestamps not supported\"", "\" for the topic {tp}, or no offsets found after timestamp specified, or there is no\"", "\" data in the topic-partition.\"", ".", "format", "(", "tp", "=", "tp", ")", ",", ")", "no_offsets", ".", "add", "(", "tp", ")", "if", "atomic", "and", "len", "(", "no_offsets", ")", ">", "0", ":", "logging", ".", "error", "(", "\"Commit aborted; offsets were not found for timestamps in\"", "\" topics {}\"", ".", "format", "(", "\",\"", ".", "join", "(", "[", "str", "(", "tp", ")", "for", "tp", "in", "no_offsets", "]", ")", ")", ",", ")", "return", "offsets_metadata", "=", "{", "tp", ":", "OffsetAndMetadata", "(", "partition_to_offset", "[", "tp", "]", ".", "offset", ",", "metadata", "=", "None", ")", "for", "tp", "in", "six", ".", "iterkeys", "(", "partition_to_offset", ")", "if", "tp", "not", "in", "no_offsets", "}", "if", "len", "(", "offsets_metadata", ")", "!=", "0", ":", "consumer", ".", "commit", "(", "offsets_metadata", ")" ]
Wait until connection state is no longer IMPORT_CONFIGURATION .
def wait_for_import ( self , connection_id , wait_interval ) : self . stdout . write ( self . style . NOTICE ( 'Waiting for import' ) , ending = '' ) state = utils . ConnectionStates . IMPORT_CONFIGURATION while state == utils . ConnectionStates . IMPORT_CONFIGURATION : # before you get the first state, the API can be a bit behind self . stdout . write ( self . style . NOTICE ( '.' ) , ending = '' ) time . sleep ( wait_interval ) # take a breath try : connection = utils . get_connection ( connection_id ) except requests . HTTPError as e : raise CommandError ( "Failed to fetch connection information." ) from e else : state = connection [ 'state' ] self . stdout . write ( self . style . NOTICE ( ' Done!' ) )
9,225
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/management/commands/import_mappings.py#L76-L100
[ "def", "_CalculateHashesFileEntry", "(", "self", ",", "file_system", ",", "file_entry", ",", "parent_full_path", ",", "output_writer", ")", ":", "# Since every file system implementation can have their own path", "# segment separator we are using JoinPath to be platform and file system", "# type independent.", "full_path", "=", "file_system", ".", "JoinPath", "(", "[", "parent_full_path", ",", "file_entry", ".", "name", "]", ")", "for", "data_stream", "in", "file_entry", ".", "data_streams", ":", "hash_value", "=", "self", ".", "_CalculateHashDataStream", "(", "file_entry", ",", "data_stream", ".", "name", ")", "display_path", "=", "self", ".", "_GetDisplayPath", "(", "file_entry", ".", "path_spec", ",", "full_path", ",", "data_stream", ".", "name", ")", "output_writer", ".", "WriteFileHash", "(", "display_path", ",", "hash_value", "or", "'N/A'", ")", "for", "sub_file_entry", "in", "file_entry", ".", "sub_file_entries", ":", "self", ".", "_CalculateHashesFileEntry", "(", "file_system", ",", "sub_file_entry", ",", "full_path", ",", "output_writer", ")" ]
Method runs the plugin
def setup ( self ) : if self . dry_run is not True : self . client = self . _get_client ( ) self . _disable_access_key ( )
9,226
https://github.com/ThreatResponse/aws_ir_plugins/blob/b5128ef5cbd91fc0b5d55615f1c14cb036ae7c73/aws_ir_plugins/disableaccess_key.py#L29-L33
[ "def", "from_dict", "(", "cls", ",", "json_dict", ")", ":", "json_macaroon", "=", "json_dict", ".", "get", "(", "'m'", ")", "if", "json_macaroon", "is", "None", ":", "# Try the v1 format if we don't have a macaroon field.", "m", "=", "pymacaroons", ".", "Macaroon", ".", "deserialize", "(", "json", ".", "dumps", "(", "json_dict", ")", ",", "json_serializer", ".", "JsonSerializer", "(", ")", ")", "macaroon", "=", "Macaroon", "(", "root_key", "=", "None", ",", "id", "=", "None", ",", "namespace", "=", "legacy_namespace", "(", ")", ",", "version", "=", "_bakery_version", "(", "m", ".", "version", ")", ")", "macaroon", ".", "_macaroon", "=", "m", "return", "macaroon", "version", "=", "json_dict", ".", "get", "(", "'v'", ",", "None", ")", "if", "version", "is", "None", ":", "raise", "ValueError", "(", "'no version specified'", ")", "if", "(", "version", "<", "VERSION_3", "or", "version", ">", "LATEST_VERSION", ")", ":", "raise", "ValueError", "(", "'unknown bakery version {}'", ".", "format", "(", "version", ")", ")", "m", "=", "pymacaroons", ".", "Macaroon", ".", "deserialize", "(", "json", ".", "dumps", "(", "json_macaroon", ")", ",", "json_serializer", ".", "JsonSerializer", "(", ")", ")", "if", "m", ".", "version", "!=", "macaroon_version", "(", "version", ")", ":", "raise", "ValueError", "(", "'underlying macaroon has inconsistent version; '", "'got {} want {}'", ".", "format", "(", "m", ".", "version", ",", "macaroon_version", "(", "version", ")", ")", ")", "namespace", "=", "checkers", ".", "deserialize_namespace", "(", "json_dict", ".", "get", "(", "'ns'", ")", ")", "cdata", "=", "json_dict", ".", "get", "(", "'cdata'", ",", "{", "}", ")", "caveat_data", "=", "{", "}", "for", "id64", "in", "cdata", ":", "id", "=", "b64decode", "(", "id64", ")", "data", "=", "b64decode", "(", "cdata", "[", "id64", "]", ")", "caveat_data", "[", "id", "]", "=", "data", "macaroon", "=", "Macaroon", "(", "root_key", "=", "None", ",", "id", "=", "None", ",", "namespace", "=", "namespace", ",", "version", "=", "version", ")", "macaroon", ".", "_caveat_data", "=", "caveat_data", "macaroon", ".", "_macaroon", "=", "m", "return", "macaroon" ]
Returns whether this plugin does what it claims to have done
def validate ( self ) : try : response = self . client . get_access_key_last_used ( AccessKeyId = self . access_key_id ) username = response [ 'UserName' ] access_keys = self . client . list_access_keys ( UserName = username ) for key in access_keys [ 'AccessKeyMetadata' ] : if ( key [ 'AccessKeyId' ] == self . access_key_id ) and ( key [ 'Status' ] == 'Inactive' ) : return True return False except Exception as e : logger . info ( "Failed to validate key disable for " "key {id} due to: {e}." . format ( e = e , id = self . access_key_id ) ) return False
9,227
https://github.com/ThreatResponse/aws_ir_plugins/blob/b5128ef5cbd91fc0b5d55615f1c14cb036ae7c73/aws_ir_plugins/disableaccess_key.py#L35-L61
[ "def", "compose", "(", "list_of_files", ",", "destination_file", ",", "files_metadata", "=", "None", ",", "content_type", "=", "None", ",", "retry_params", "=", "None", ",", "_account_id", "=", "None", ")", ":", "api", "=", "storage_api", ".", "_get_storage_api", "(", "retry_params", "=", "retry_params", ",", "account_id", "=", "_account_id", ")", "if", "os", ".", "getenv", "(", "'SERVER_SOFTWARE'", ")", ".", "startswith", "(", "'Dev'", ")", ":", "def", "_temp_func", "(", "file_list", ",", "destination_file", ",", "content_type", ")", ":", "bucket", "=", "'/'", "+", "destination_file", ".", "split", "(", "'/'", ")", "[", "1", "]", "+", "'/'", "with", "open", "(", "destination_file", ",", "'w'", ",", "content_type", "=", "content_type", ")", "as", "gcs_merge", ":", "for", "source_file", "in", "file_list", ":", "with", "open", "(", "bucket", "+", "source_file", "[", "'Name'", "]", ",", "'r'", ")", "as", "gcs_source", ":", "gcs_merge", ".", "write", "(", "gcs_source", ".", "read", "(", ")", ")", "compose_object", "=", "_temp_func", "else", ":", "compose_object", "=", "api", ".", "compose_object", "file_list", ",", "_", "=", "_validate_compose_list", "(", "destination_file", ",", "list_of_files", ",", "files_metadata", ",", "32", ")", "compose_object", "(", "file_list", ",", "destination_file", ",", "content_type", ")" ]
This function first checks to see if the key is already disabled \
def _disable_access_key ( self , force_disable_self = False ) : client = self . client if self . validate is True : return else : try : client . update_access_key ( UserName = self . _search_user_for_key ( ) , AccessKeyId = self . access_key_id , Status = 'Inactive' ) logger . info ( "Access key {id} has " "been disabled." . format ( id = self . access_key_id ) ) except Exception as e : logger . info ( "Access key {id} could not " "be disabled due to: {e}." . format ( e = e , id = self . access_key_id ) )
9,228
https://github.com/ThreatResponse/aws_ir_plugins/blob/b5128ef5cbd91fc0b5d55615f1c14cb036ae7c73/aws_ir_plugins/disableaccess_key.py#L90-L115
[ "def", "_integerValue_to_int", "(", "value_str", ")", ":", "m", "=", "BINARY_VALUE", ".", "match", "(", "value_str", ")", "if", "m", ":", "value", "=", "int", "(", "m", ".", "group", "(", "1", ")", ",", "2", ")", "elif", "OCTAL_VALUE", ".", "match", "(", "value_str", ")", ":", "value", "=", "int", "(", "value_str", ",", "8", ")", "elif", "DECIMAL_VALUE", ".", "match", "(", "value_str", ")", ":", "value", "=", "int", "(", "value_str", ")", "elif", "HEX_VALUE", ".", "match", "(", "value_str", ")", ":", "value", "=", "int", "(", "value_str", ",", "16", ")", "else", ":", "value", "=", "None", "return", "value" ]
Fed with a psms generator this returns the master proteins present in the PSM table . PSMs with multiple master proteins are excluded .
def generate_master_proteins ( psms , protcol ) : master_proteins = { } if not protcol : protcol = mzidtsvdata . HEADER_MASTER_PROT for psm in psms : protacc = psm [ protcol ] if ';' in protacc : continue master_proteins [ protacc ] = 1 if 'NA' in master_proteins : master_proteins . pop ( 'NA' ) if '' in master_proteins : master_proteins . pop ( '' ) for protacc in master_proteins : yield { prottabledata . HEADER_PROTEIN : protacc }
9,229
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/create_empty.py#L5-L21
[ "def", "generate_timeline", "(", "usnjrnl", ",", "filesystem_content", ")", ":", "journal_content", "=", "defaultdict", "(", "list", ")", "for", "event", "in", "usnjrnl", ":", "journal_content", "[", "event", ".", "inode", "]", ".", "append", "(", "event", ")", "for", "event", "in", "usnjrnl", ":", "try", ":", "dirent", "=", "lookup_dirent", "(", "event", ",", "filesystem_content", ",", "journal_content", ")", "yield", "UsnJrnlEvent", "(", "dirent", ".", "inode", ",", "dirent", ".", "path", ",", "dirent", ".", "size", ",", "dirent", ".", "allocated", ",", "event", ".", "timestamp", ",", "event", ".", "changes", ",", "event", ".", "attributes", ")", "except", "LookupError", "as", "error", ":", "LOGGER", ".", "debug", "(", "error", ")" ]
Returns namespace and static xml from percolator output file
def prepare_percolator_output ( self , fn ) : ns = xml . get_namespace ( fn ) static = readers . get_percolator_static_xml ( fn , ns ) return ns , static
9,230
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/pycolator/base.py#L13-L17
[ "def", "get_reqs", "(", "which", "=", "\"main\"", ")", ":", "reqs", "=", "[", "]", "for", "req", "in", "all_reqs", "[", "which", "]", ":", "req_str", "=", "req", "+", "\">=\"", "+", "ver_tuple_to_str", "(", "min_versions", "[", "req", "]", ")", "if", "req", "in", "version_strictly", ":", "req_str", "+=", "\",<\"", "+", "ver_tuple_to_str", "(", "min_versions", "[", "req", "]", "[", ":", "-", "1", "]", ")", "+", "\".\"", "+", "str", "(", "min_versions", "[", "req", "]", "[", "-", "1", "]", "+", "1", ")", "reqs", ".", "append", "(", "req_str", ")", "return", "reqs" ]
Check if a git repository exists in the given folder .
def git_available ( func ) : def inner ( * args ) : os . chdir ( APISettings . GIT_DIR ) if call ( [ 'git' , 'rev-parse' ] ) == 0 : return func ( * args ) Shell . fail ( 'There is no git repository!' ) return exit ( 1 ) return inner
9,231
https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/git/decorators.py#L8-L21
[ "def", "MAKE_WPARAM", "(", "wParam", ")", ":", "wParam", "=", "ctypes", ".", "cast", "(", "wParam", ",", "LPVOID", ")", ".", "value", "if", "wParam", "is", "None", ":", "wParam", "=", "0", "return", "wParam" ]
Build a GPU id string to be used for CUDA_VISIBLE_DEVICES .
def _cuda_get_gpu_spec_string ( gpu_ids = None ) : if gpu_ids is None : return '' if isinstance ( gpu_ids , list ) : return ',' . join ( str ( gpu_id ) for gpu_id in gpu_ids ) if isinstance ( gpu_ids , int ) : return str ( gpu_ids ) return gpu_ids
9,232
https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/gpu.py#L4-L18
[ "def", "delete_file", "(", "self", ",", "fmfile", ")", ":", "if", "not", "isinstance", "(", "fmfile", ",", "dict", ")", ":", "raise", "FMFileError", "(", "'fmfile must be a <dict>'", ")", "method", ",", "url", "=", "get_URL", "(", "'file_delete'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'logintoken'", ":", "self", ".", "session", ".", "cookies", ".", "get", "(", "'logintoken'", ")", ",", "'fileid'", ":", "fmfile", ".", "get", "(", "'fileid'", ")", "}", "res", "=", "getattr", "(", "self", ".", "session", ",", "method", ")", "(", "url", ",", "params", "=", "payload", ")", "if", "res", ".", "status_code", "==", "200", ":", "self", ".", "_complete", "=", "True", "return", "True", "hellraiser", "(", "res", ")" ]
Handle Exceptions from the server . Formats the HTML into readable form
def write_error ( self , status_code , * * kwargs ) : reason = self . _reason if self . settings . get ( "serve_traceback" ) and "exc_info" in kwargs : error = [ ] for line in traceback . format_exception ( * kwargs [ "exc_info" ] ) : error . append ( line ) else : error = None data = { '_traceback' : error , 'message' : reason , 'code' : status_code } content = self . render_exception ( * * data ) self . write ( content )
9,233
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L37-L51
[ "def", "move_where_clause_to_column", "(", "self", ",", "column", "=", "'condition'", ",", "key", "=", "None", ")", ":", "if", "self", ".", "conditions", ":", "expr", "=", "\" AND \"", ".", "join", "(", "self", ".", "conditions", ")", "params", "=", "self", ".", "params", "self", ".", "params", "=", "[", "]", "self", ".", "conditions", "=", "[", "]", "else", ":", "expr", "=", "'1'", "params", "=", "[", "]", "self", ".", "add_column", "(", "'({0}) AS {1}'", ".", "format", "(", "expr", ",", "column", ")", ",", "key", "or", "column", ",", "params", ")" ]
Used to render template to view
def view ( self , template_name , kwargs = None ) : if kwargs is None : kwargs = dict ( ) self . add_ ( 'session' , self . session ) content = self . render_template ( template_name , * * kwargs ) self . write ( content )
9,234
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L53-L73
[ "def", "bandwidth", "(", "self", ")", ":", "return", "np", ".", "abs", "(", "np", ".", "diff", "(", "self", ".", "pairs", "(", ")", ",", "axis", "=", "1", ")", ")", ".", "max", "(", ")" ]
Overridden initialize method from Tornado . Assigns the controller method and middleware attached to the route being executed to global variables to be used
def initialize ( self , method , middleware , request_type ) : self . method = method self . middleware = middleware self . request_type = request_type
9,235
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L98-L105
[ "def", "clean_text", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "normalize_url", "(", "url", ")", "except", "UnicodeDecodeError", ":", "log", ".", "warning", "(", "\"Invalid URL: %r\"", ",", "url", ")" ]
returns the key value pair of the arguments passed as a dict object
def only ( self , arguments ) : data = { } if not isinstance ( arguments , list ) : arguments = list ( arguments ) for i in arguments : data [ i ] = self . get_argument ( i ) return data
9,236
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L107-L129
[ "def", "read_stb", "(", "library", ",", "session", ")", ":", "status", "=", "ViUInt16", "(", ")", "ret", "=", "library", ".", "viReadSTB", "(", "session", ",", "byref", "(", "status", ")", ")", "return", "status", ".", "value", ",", "ret" ]
Returns all the arguments passed with the request
def all ( self ) : data = { } args = self . request . arguments for key , value in args . items ( ) : data [ key ] = self . get_argument ( key ) return data
9,237
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L131-L153
[ "def", "_get_partition_info", "(", "storage_system", ",", "device_path", ")", ":", "try", ":", "partition_infos", "=", "storage_system", ".", "RetrieveDiskPartitionInfo", "(", "devicePath", "=", "[", "device_path", "]", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "log", ".", "trace", "(", "'partition_info = %s'", ",", "partition_infos", "[", "0", "]", ")", "return", "partition_infos", "[", "0", "]" ]
returns the arguments passed to the route except that set by user
def except_ ( self , arguments ) : if not isinstance ( arguments , list ) : arguments = list ( arguments ) args = self . request . arguments data = { } for key , value in args . items ( ) : if key not in arguments : data [ key ] = self . get_argument ( key ) return data
9,238
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L155-L180
[ "def", "head", "(", "self", ")", ":", "summary", "=", "list", "(", ")", "summary", ".", "append", "(", "\"%s %s\"", "%", "(", "self", ".", "geotype", ",", "self", ".", "name", ")", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "\" - Metadata:\"", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "\"\\n\"", ".", "join", "(", "self", ".", "_get_metadata_as_string", "(", ")", ".", "split", "(", "\"\\n\"", ")", "[", ":", "5", "]", ")", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "\"\\n\"", ")", "summary", ".", "append", "(", "\" - Columns:\"", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "self", ".", "columns", ".", "to_string", "(", ")", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "\"\\n\"", ")", "summary", ".", "append", "(", "\" - Table:\"", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "\"\\t\"", ".", "join", "(", "[", "\"Index\"", "]", "+", "self", ".", "table", ".", "columns", ".", "tolist", "(", ")", ")", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "self", ".", "table", ".", "head", "(", ")", ".", "to_string", "(", "header", "=", "None", ")", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "\" \"", "*", "40", "+", "\"...\"", "+", "\" \"", "*", "40", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "\" \"", "*", "40", "+", "\"...\"", "+", "\" \"", "*", "40", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "\" \"", "*", "40", "+", "\"...\"", "+", "\" \"", "*", "40", "+", "\"\\n\"", ")", "summary", ".", "append", "(", "self", ".", "table", ".", "tail", "(", ")", ".", "to_string", "(", "header", "=", "None", ")", "+", "\"\\n\"", ")", "return", "\"\\n\"", ".", "join", "(", "[", "str", "(", "s", ")", "for", "s", "in", "summary", "]", ")" ]
Returns an object that corresponds to given query or None .
def get_one ( cls , db , * args , * * kwargs ) : data = db [ cls . collection ] . find_one ( * args , * * kwargs ) if data : return cls . wrap_incoming ( data , db ) else : return None
9,239
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L186-L199
[ "def", "write_FORCE_CONSTANTS", "(", "force_constants", ",", "filename", "=", "'FORCE_CONSTANTS'", ",", "p2s_map", "=", "None", ")", ":", "lines", "=", "get_FORCE_CONSTANTS_lines", "(", "force_constants", ",", "p2s_map", "=", "p2s_map", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "w", ":", "w", ".", "write", "(", "\"\\n\"", ".", "join", "(", "lines", ")", ")" ]
Returns object id or None .
def get_id ( self ) : import warnings warnings . warn ( '{0}.get_id() is deprecated, ' 'use {0}.id instead' . format ( type ( self ) . __name__ ) , DeprecationWarning ) return self . get ( '_id' )
9,240
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L232-L239
[ "def", "mv_connect_generators", "(", "mv_grid_district", ",", "graph", ",", "debug", "=", "False", ")", ":", "generator_buffer_radius", "=", "cfg_ding0", ".", "get", "(", "'mv_connect'", ",", "'generator_buffer_radius'", ")", "generator_buffer_radius_inc", "=", "cfg_ding0", ".", "get", "(", "'mv_connect'", ",", "'generator_buffer_radius_inc'", ")", "# WGS84 (conformal) to ETRS (equidistant) projection", "proj1", "=", "partial", "(", "pyproj", ".", "transform", ",", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:4326'", ")", ",", "# source coordinate system", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:3035'", ")", ")", "# destination coordinate system", "# ETRS (equidistant) to WGS84 (conformal) projection", "proj2", "=", "partial", "(", "pyproj", ".", "transform", ",", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:3035'", ")", ",", "# source coordinate system", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:4326'", ")", ")", "# destination coordinate system", "for", "generator", "in", "sorted", "(", "mv_grid_district", ".", "mv_grid", ".", "generators", "(", ")", ",", "key", "=", "lambda", "x", ":", "repr", "(", "x", ")", ")", ":", "# ===== voltage level 4: generator has to be connected to MV station =====", "if", "generator", ".", "v_level", "==", "4", ":", "mv_station", "=", "mv_grid_district", ".", "mv_grid", ".", "station", "(", ")", "branch_length", "=", "calc_geo_dist_vincenty", "(", "generator", ",", "mv_station", ")", "# TODO: set branch type to something reasonable (to be calculated)", "branch_kind", "=", "mv_grid_district", ".", "mv_grid", ".", "default_branch_kind", "branch_type", "=", "mv_grid_district", ".", "mv_grid", ".", "default_branch_type", "branch", "=", "BranchDing0", "(", "length", "=", "branch_length", ",", "kind", "=", "branch_kind", ",", "type", "=", "branch_type", ",", "ring", "=", "None", ")", "graph", ".", "add_edge", "(", "generator", ",", "mv_station", ",", "branch", "=", "branch", ")", "if", "debug", ":", "logger", ".", "debug", "(", "'Generator {0} was connected to {1}'", ".", "format", "(", "generator", ",", "mv_station", ")", ")", "# ===== voltage level 5: generator has to be connected to MV grid (next-neighbor) =====", "elif", "generator", ".", "v_level", "==", "5", ":", "generator_shp", "=", "transform", "(", "proj1", ",", "generator", ".", "geo_data", ")", "# get branches within a the predefined radius `generator_buffer_radius`", "branches", "=", "calc_geo_branches_in_buffer", "(", "generator", ",", "mv_grid_district", ".", "mv_grid", ",", "generator_buffer_radius", ",", "generator_buffer_radius_inc", ",", "proj1", ")", "# calc distance between generator and grid's lines -> find nearest line", "conn_objects_min_stack", "=", "find_nearest_conn_objects", "(", "generator_shp", ",", "branches", ",", "proj1", ",", "conn_dist_weight", "=", "1", ",", "debug", "=", "debug", ",", "branches_only", "=", "False", ")", "# connect!", "# go through the stack (from nearest to most far connection target object)", "generator_connected", "=", "False", "for", "dist_min_obj", "in", "conn_objects_min_stack", ":", "# Note 1: conn_dist_ring_mod=0 to avoid re-routing of existent lines", "# Note 2: In connect_node(), the default cable/line type of grid is used. This is reasonable since", "# the max. allowed power of the smallest possible cable/line type (3.64 MVA for overhead", "# line of type 48-AL1/8-ST1A) exceeds the max. allowed power of a generator (4.5 MVA (dena))", "# (if connected separately!)", "target_obj_result", "=", "connect_node", "(", "generator", ",", "generator_shp", ",", "mv_grid_district", ".", "mv_grid", ",", "dist_min_obj", ",", "proj2", ",", "graph", ",", "conn_dist_ring_mod", "=", "0", ",", "debug", "=", "debug", ")", "if", "target_obj_result", "is", "not", "None", ":", "if", "debug", ":", "logger", ".", "debug", "(", "'Generator {0} was connected to {1}'", ".", "format", "(", "generator", ",", "target_obj_result", ")", ")", "generator_connected", "=", "True", "break", "if", "not", "generator_connected", "and", "debug", ":", "logger", ".", "debug", "(", "'Generator {0} could not be connected, try to '", "'increase the parameter `generator_buffer_radius` in '", "'config file `config_calc.cfg` to gain more possible '", "'connection points.'", ".", "format", "(", "generator", ")", ")", "return", "graph" ]
Returns a DBRef for this object or None .
def get_ref ( self ) : _id = self . id if _id is None : return None else : return DBRef ( self . collection , _id )
9,241
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L241-L248
[ "def", "stop_experiment", "(", "args", ")", ":", "experiment_id_list", "=", "parse_ids", "(", "args", ")", "if", "experiment_id_list", ":", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "for", "experiment_id", "in", "experiment_id_list", ":", "print_normal", "(", "'Stoping experiment %s'", "%", "experiment_id", ")", "nni_config", "=", "Config", "(", "experiment_dict", "[", "experiment_id", "]", "[", "'fileName'", "]", ")", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "rest_pid", ":", "kill_command", "(", "rest_pid", ")", "tensorboard_pid_list", "=", "nni_config", ".", "get_config", "(", "'tensorboardPidList'", ")", "if", "tensorboard_pid_list", ":", "for", "tensorboard_pid", "in", "tensorboard_pid_list", ":", "try", ":", "kill_command", "(", "tensorboard_pid", ")", "except", "Exception", "as", "exception", ":", "print_error", "(", "exception", ")", "nni_config", ".", "set_config", "(", "'tensorboardPidList'", ",", "[", "]", ")", "print_normal", "(", "'Stop experiment success!'", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'status'", ",", "'STOPPED'", ")", "time_now", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "time", ".", "localtime", "(", "time", ".", "time", "(", ")", ")", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'endTime'", ",", "str", "(", "time_now", ")", ")" ]
Overrides formatTime method to use datetime module instead of time module to display time in microseconds . Time module by default does not resolve time to microseconds .
def formatTime ( self , record , datefmt = None ) : # noqa if datefmt : s = datetime . datetime . now ( ) . strftime ( datefmt ) else : t = datetime . datetime . now ( ) . strftime ( self . default_time_format ) s = self . default_msec_format % ( t , record . msecs ) return s
9,242
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/utils/log.py#L21-L32
[ "def", "bind_and_save", "(", "self", ",", "lxc", ")", ":", "bound_meta", "=", "self", ".", "bind", "(", "lxc", ")", "bound_meta", ".", "save", "(", ")", "return", "bound_meta" ]
Filter for all log objects of the same connected model as the given instance .
def related_to ( self , instance ) : return self . filter ( table_name = instance . table_name , record_id = instance . record_id )
9,243
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/models.py#L18-L20
[ "def", "validate", "(", "self", ",", "vat_deets", ")", ":", "request", "=", "self", ".", "_get", "(", "'validation'", ",", "vat_deets", ")", "return", "self", ".", "responder", "(", "request", ")" ]
Create a fresh insert record from the current model state in the database .
def capture_insert_from_model ( cls , table_name , record_id , * , exclude_fields = ( ) ) : exclude_cols = ( ) if exclude_fields : model_cls = get_connected_model_for_table_name ( table_name ) exclude_cols = cls . _fieldnames_to_colnames ( model_cls , exclude_fields ) raw_query = sql . SQL ( """ SELECT {schema}.hc_capture_insert_from_row( hstore({schema}.{table_name}.*), %(table_name)s, ARRAY[{exclude_cols}]::text[] -- cast to type expected by stored procedure ) AS id FROM {schema}.{table_name} WHERE id = %(record_id)s """ ) . format ( schema = sql . Identifier ( settings . HEROKU_CONNECT_SCHEMA ) , table_name = sql . Identifier ( table_name ) , exclude_cols = sql . SQL ( ', ' ) . join ( sql . Identifier ( col ) for col in exclude_cols ) , ) params = { 'record_id' : record_id , 'table_name' : table_name } result_qs = TriggerLog . objects . raw ( raw_query , params ) return list ( result_qs )
9,244
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/models.py#L130-L170
[ "def", "_getBusVoltageLambdaSensor", "(", "self", ")", ":", "muVmin", "=", "array", "(", "[", "b", ".", "mu_vmin", "for", "b", "in", "self", ".", "market", ".", "case", ".", "connected_buses", "]", ")", "muVmax", "=", "array", "(", "[", "b", ".", "mu_vmax", "for", "b", "in", "self", ".", "market", ".", "case", ".", "connected_buses", "]", ")", "muVmin", "=", "-", "1.0", "*", "muVmin", "diff", "=", "muVmin", "+", "muVmax", "return", "diff" ]
Create a fresh update record from the current model state in the database .
def capture_update_from_model ( cls , table_name , record_id , * , update_fields = ( ) ) : include_cols = ( ) if update_fields : model_cls = get_connected_model_for_table_name ( table_name ) include_cols = cls . _fieldnames_to_colnames ( model_cls , update_fields ) raw_query = sql . SQL ( """ SELECT {schema}.hc_capture_update_from_row( hstore({schema}.{table_name}.*), %(table_name)s, ARRAY[{include_cols}]::text[] -- cast to type expected by stored procedure ) AS id FROM {schema}.{table_name} WHERE id = %(record_id)s """ ) . format ( schema = sql . Identifier ( settings . HEROKU_CONNECT_SCHEMA ) , table_name = sql . Identifier ( table_name ) , include_cols = sql . SQL ( ', ' ) . join ( sql . Identifier ( col ) for col in include_cols ) , ) params = { 'record_id' : record_id , 'table_name' : table_name } result_qs = TriggerLog . objects . raw ( raw_query , params ) return list ( result_qs )
9,245
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/models.py#L173-L212
[ "def", "get", "(", "self", ",", "accountID", ",", "*", "*", "kwargs", ")", ":", "request", "=", "Request", "(", "'GET'", ",", "'/v3/accounts/{accountID}/pricing'", ")", "request", ".", "set_path_param", "(", "'accountID'", ",", "accountID", ")", "request", ".", "set_param", "(", "'instruments'", ",", "kwargs", ".", "get", "(", "'instruments'", ")", ")", "request", ".", "set_param", "(", "'since'", ",", "kwargs", ".", "get", "(", "'since'", ")", ")", "request", ".", "set_param", "(", "'includeUnitsAvailable'", ",", "kwargs", ".", "get", "(", "'includeUnitsAvailable'", ")", ")", "request", ".", "set_param", "(", "'includeHomeConversions'", ",", "kwargs", ".", "get", "(", "'includeHomeConversions'", ")", ")", "response", "=", "self", ".", "ctx", ".", "request", "(", "request", ")", "if", "response", ".", "content_type", "is", "None", ":", "return", "response", "if", "not", "response", ".", "content_type", ".", "startswith", "(", "\"application/json\"", ")", ":", "return", "response", "jbody", "=", "json", ".", "loads", "(", "response", ".", "raw_body", ")", "parsed_body", "=", "{", "}", "#", "# Parse responses as defined by the API specification", "#", "if", "str", "(", "response", ".", "status", ")", "==", "\"200\"", ":", "if", "jbody", ".", "get", "(", "'prices'", ")", "is", "not", "None", ":", "parsed_body", "[", "'prices'", "]", "=", "[", "self", ".", "ctx", ".", "pricing", ".", "ClientPrice", ".", "from_dict", "(", "d", ",", "self", ".", "ctx", ")", "for", "d", "in", "jbody", ".", "get", "(", "'prices'", ")", "]", "if", "jbody", ".", "get", "(", "'homeConversions'", ")", "is", "not", "None", ":", "parsed_body", "[", "'homeConversions'", "]", "=", "[", "self", ".", "ctx", ".", "pricing", ".", "HomeConversions", ".", "from_dict", "(", "d", ",", "self", ".", "ctx", ")", "for", "d", "in", "jbody", ".", "get", "(", "'homeConversions'", ")", "]", "if", "jbody", ".", "get", "(", "'time'", ")", "is", "not", "None", ":", "parsed_body", "[", "'time'", "]", "=", "jbody", ".", "get", "(", "'time'", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"400\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"401\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"404\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"405\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "#", "# Unexpected response status", "#", "else", ":", "parsed_body", "=", "jbody", "response", ".", "body", "=", "parsed_body", "return", "response" ]
Fetch the instance of the connected model referenced by this log record .
def get_model ( self ) : model_cls = get_connected_model_for_table_name ( self . table_name ) return model_cls . _default_manager . filter ( id = self . record_id ) . first ( )
9,246
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/models.py#L223-L232
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
Get a QuerySet for all trigger log objects for the same connected model .
def related ( self , * , exclude_self = False ) : manager = type ( self ) . _default_manager queryset = manager . related_to ( self ) if exclude_self : queryset = queryset . exclude ( id = self . id ) return queryset
9,247
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/models.py#L234-L245
[ "def", "mol_supplier", "(", "lines", ",", "no_halt", ",", "assign_descriptors", ")", ":", "def", "sdf_block", "(", "lns", ")", ":", "mol", "=", "[", "]", "opt", "=", "[", "]", "is_mol", "=", "True", "for", "line", "in", "lns", ":", "if", "line", ".", "startswith", "(", "\"$$$$\"", ")", ":", "yield", "mol", "[", ":", "]", ",", "opt", "[", ":", "]", "is_mol", "=", "True", "mol", ".", "clear", "(", ")", "opt", ".", "clear", "(", ")", "elif", "line", ".", "startswith", "(", "\"M END\"", ")", ":", "is_mol", "=", "False", "elif", "is_mol", ":", "mol", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "else", ":", "opt", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "if", "mol", ":", "yield", "mol", ",", "opt", "for", "i", ",", "(", "mol", ",", "opt", ")", "in", "enumerate", "(", "sdf_block", "(", "lines", ")", ")", ":", "try", ":", "c", "=", "molecule", "(", "mol", ")", "if", "assign_descriptors", ":", "molutil", ".", "assign_descriptors", "(", "c", ")", "except", "ValueError", "as", "err", ":", "if", "no_halt", ":", "print", "(", "\"Unsupported symbol: {} (#{} in v2000reader)\"", ".", "format", "(", "err", ",", "i", "+", "1", ")", ")", "c", "=", "molutil", ".", "null_molecule", "(", "assign_descriptors", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported symbol: {}\"", ".", "format", "(", "err", ")", ")", "except", "RuntimeError", "as", "err", ":", "if", "no_halt", ":", "print", "(", "\"Failed to minimize ring: {} (#{} in v2000reader)\"", ".", "format", "(", "err", ",", "i", "+", "1", ")", ")", "else", ":", "raise", "RuntimeError", "(", "\"Failed to minimize ring: {}\"", ".", "format", "(", "err", ")", ")", "except", ":", "if", "no_halt", ":", "print", "(", "\"Unexpected error (#{} in v2000reader)\"", ".", "format", "(", "i", "+", "1", ")", ")", "c", "=", "molutil", ".", "null_molecule", "(", "assign_descriptors", ")", "c", ".", "data", "=", "optional_data", "(", "opt", ")", "yield", "c", "continue", "else", ":", "print", "(", "traceback", ".", "format_exc", "(", ")", ")", "raise", "Exception", "(", "\"Unsupported Error\"", ")", "c", ".", "data", "=", "optional_data", "(", "opt", ")", "yield", "c" ]
Get the names of columns referenced by the given model fields .
def _fieldnames_to_colnames ( model_cls , fieldnames ) : get_field = model_cls . _meta . get_field fields = map ( get_field , fieldnames ) return { f . column for f in fields }
9,248
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/models.py#L258-L262
[ "def", "communityvisibilitystate", "(", "self", ")", ":", "if", "self", ".", "_communityvisibilitystate", "==", "None", ":", "return", "None", "elif", "self", ".", "_communityvisibilitystate", "in", "self", ".", "VisibilityState", ":", "return", "self", ".", "VisibilityState", "[", "self", ".", "_communityvisibilitystate", "]", "else", ":", "#Invalid State", "return", "None" ]
Re - sync the change recorded in this trigger log .
def redo ( self ) : trigger_log = self . _to_live_trigger_log ( state = TRIGGER_LOG_STATE [ 'NEW' ] ) trigger_log . save ( force_insert = True ) # make sure we get a fresh row self . state = TRIGGER_LOG_STATE [ 'REQUEUED' ] self . save ( update_fields = [ 'state' ] ) return trigger_log
9,249
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/models.py#L308-L325
[ "def", "is_address_reserved", "(", "self", ",", "address", ")", ":", "try", ":", "mbi", "=", "self", ".", "mquery", "(", "address", ")", "except", "WindowsError", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "e", ".", "winerror", "==", "win32", ".", "ERROR_INVALID_PARAMETER", ":", "return", "False", "raise", "return", "mbi", ".", "is_reserved", "(", ")" ]
Runs through a protein table and adds quant data from ANOTHER protein table that contains that data .
def add_isoquant_data ( proteins , quantproteins , quantacc , quantfields ) : for protein in base_add_isoquant_data ( proteins , quantproteins , prottabledata . HEADER_PROTEIN , quantacc , quantfields ) : yield protein
9,250
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/isoquant.py#L15-L21
[ "def", "pull_session", "(", "session_id", "=", "None", ",", "url", "=", "'default'", ",", "io_loop", "=", "None", ",", "arguments", "=", "None", ")", ":", "coords", "=", "_SessionCoordinates", "(", "session_id", "=", "session_id", ",", "url", "=", "url", ")", "session", "=", "ClientSession", "(", "session_id", "=", "session_id", ",", "websocket_url", "=", "websocket_url_for_server_url", "(", "coords", ".", "url", ")", ",", "io_loop", "=", "io_loop", ",", "arguments", "=", "arguments", ")", "session", ".", "pull", "(", ")", "return", "session" ]
Runs through a peptide table and adds quant data from ANOTHER peptide table that contains that data .
def add_isoquant_data ( peptides , quantpeptides , quantacc , quantfields ) : for peptide in base_add_isoquant_data ( peptides , quantpeptides , peptabledata . HEADER_PEPTIDE , quantacc , quantfields ) : yield peptide
9,251
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/peptable/isoquant.py#L5-L11
[ "def", "folder_shared_message", "(", "self", ",", "request", ",", "user", ",", "folder", ")", ":", "messages", ".", "success", "(", "request", ",", "_", "(", "\"Folder {} is now shared with {}\"", ".", "format", "(", "folder", ",", "user", ")", ")", ")" ]
Run a map function across all y points in the series
def map ( self , fn ) : return TimeSeries ( [ ( x , fn ( y ) ) for x , y in self . points ] )
9,252
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/results/time_series.py#L43-L45
[ "def", "cublasZgemm", "(", "handle", ",", "transa", ",", "transb", ",", "m", ",", "n", ",", "k", ",", "alpha", ",", "A", ",", "lda", ",", "B", ",", "ldb", ",", "beta", ",", "C", ",", "ldc", ")", ":", "status", "=", "_libcublas", ".", "cublasZgemm_v2", "(", "handle", ",", "_CUBLAS_OP", "[", "transa", "]", ",", "_CUBLAS_OP", "[", "transb", "]", ",", "m", ",", "n", ",", "k", ",", "ctypes", ".", "byref", "(", "cuda", ".", "cuDoubleComplex", "(", "alpha", ".", "real", ",", "alpha", ".", "imag", ")", ")", ",", "int", "(", "A", ")", ",", "lda", ",", "int", "(", "B", ")", ",", "ldb", ",", "ctypes", ".", "byref", "(", "cuda", ".", "cuDoubleComplex", "(", "beta", ".", "real", ",", "beta", ".", "imag", ")", ")", ",", "int", "(", "C", ")", ",", "ldc", ")", "cublasCheckStatus", "(", "status", ")" ]
Fetches proteins and quants from joined lookup table loops through them and when all of a protein s quants have been collected yields the protein quant information .
def build_proteintable ( pqdb , headerfields , mergecutoff , isobaric = False , precursor = False , probability = False , fdr = False , pep = False , genecentric = False ) : pdmap = create_featuredata_map ( pqdb , genecentric = genecentric , psm_fill_fun = pinfo . add_psms_to_proteindata , pgene_fill_fun = pinfo . add_protgene_to_protdata , count_fun = pinfo . count_peps_psms , get_uniques = True ) empty_return = lambda x , y , z : { } iso_fun = { True : get_isobaric_quant , False : empty_return } [ isobaric ] ms1_fun = { True : get_precursor_quant , False : empty_return } [ precursor ] prob_fun = { True : get_prot_probability , False : empty_return } [ probability ] fdr_fun = { True : get_prot_fdr , False : empty_return } [ fdr ] pep_fun = { True : get_prot_pep , False : empty_return } [ pep ] pdata_fun = { True : get_protein_data_genecentric , False : get_protein_data } [ genecentric is not False ] protein_sql , sqlfieldmap = pqdb . prepare_mergetable_sql ( precursor , isobaric , probability , fdr , pep ) accession_field = prottabledata . ACCESSIONS [ genecentric ] proteins = pqdb . get_merged_features ( protein_sql ) protein = next ( proteins ) outprotein = { accession_field : protein [ sqlfieldmap [ 'p_acc' ] ] } check_prot = { k : v for k , v in outprotein . items ( ) } if not mergecutoff or protein_pool_fdr_cutoff ( protein , sqlfieldmap , mergecutoff ) : fill_mergefeature ( outprotein , iso_fun , ms1_fun , prob_fun , fdr_fun , pep_fun , pdata_fun , protein , sqlfieldmap , headerfields , pdmap , accession_field ) for protein in proteins : if mergecutoff and not protein_pool_fdr_cutoff ( protein , sqlfieldmap , mergecutoff ) : continue p_acc = protein [ sqlfieldmap [ 'p_acc' ] ] if p_acc != outprotein [ accession_field ] : # check if protein has been filled, otherwise do not output # sometimes proteins have NA in all fields if outprotein != check_prot : yield outprotein outprotein = { accession_field : p_acc } check_prot = { k : v for k , v in outprotein . items ( ) } fill_mergefeature ( outprotein , iso_fun , ms1_fun , prob_fun , fdr_fun , pep_fun , pdata_fun , protein , sqlfieldmap , headerfields , pdmap , accession_field ) if outprotein != check_prot : yield outprotein
9,253
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/merge.py#L9-L60
[ "def", "validate", "(", "self", ")", ":", "for", "client", "in", "self", ".", "clients", ":", "for", "key", "in", "REQUIRED_KEYS", ":", "if", "key", "not", "in", "client", ":", "raise", "MissingConfigValue", "(", "key", ")", "if", "'revision_file'", "not", "in", "client", ":", "client", ".", "revision_file", "=", "DEFAULT_REVISION_FILEPATH", ".", "format", "(", "client", ".", "key", ")" ]
Takes a list of protein accessions and a list of protein groups content from DB . Counts for each group in list how many proteins are found in lineproteins . Returns list of str amounts .
def count_protein_group_hits ( lineproteins , groups ) : hits = [ ] for group in groups : hits . append ( 0 ) for protein in lineproteins : if protein in group : hits [ - 1 ] += 1 return [ str ( x ) for x in hits ]
9,254
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/proteingrouping.py#L57-L68
[ "def", "asrgb", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_keyframe", "is", "None", ":", "raise", "RuntimeError", "(", "'keyframe not set'", ")", "kwargs", "[", "'validate'", "]", "=", "False", "return", "TiffPage", ".", "asrgb", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
The default log dir is in the system resource directory But the utool global cache allows for the user to override where the logs for a specific app should be stored .
def get_logging_dir ( appname = 'default' ) : from utool . _internal import meta_util_cache from utool . _internal import meta_util_cplat from utool import util_cache if appname is None or appname == 'default' : appname = util_cache . get_default_appname ( ) resource_dpath = meta_util_cplat . get_resource_dir ( ) default = join ( resource_dpath , appname , 'logs' ) # Check global cache for a custom logging dir otherwise # use the default. log_dir = meta_util_cache . global_cache_read ( logdir_cacheid , appname = appname , default = default ) log_dir_realpath = realpath ( log_dir ) return log_dir_realpath
9,255
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_logging.py#L186-L208
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_progressing", ":", "self", ".", "_progressing", "=", "False", "self", ".", "_thread", ".", "join", "(", ")" ]
mostly for util_logging internals
def add_logging_handler ( handler , format_ = 'file' ) : global __UTOOL_ROOT_LOGGER__ if __UTOOL_ROOT_LOGGER__ is None : builtins . print ( '[WARNING] logger not started, cannot add handler' ) return # create formatter and add it to the handlers #logformat = '%Y-%m-%d %H:%M:%S' #logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' timeformat = '%H:%M:%S' if format_ == 'file' : logformat = '[%(asctime)s]%(message)s' elif format_ == 'stdout' : logformat = '%(message)s' else : raise AssertionError ( 'unknown logging format_: %r' % format_ ) # Create formatter for handlers formatter = logging . Formatter ( logformat , timeformat ) handler . setLevel ( logging . DEBUG ) handler . setFormatter ( formatter ) __UTOOL_ROOT_LOGGER__ . addHandler ( handler )
9,256
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_logging.py#L280-L302
[ "def", "_process_pair", "(", "first_fn", ",", "second_fn", ",", "error_protocol", ")", ":", "ebook", "=", "None", "metadata", "=", "None", "if", "_is_meta", "(", "first_fn", ")", "and", "not", "_is_meta", "(", "second_fn", ")", ":", "# 1st meta, 2nd data", "logger", ".", "debug", "(", "\"Parsed: '%s' as meta, '%s' as data.\"", "%", "(", "first_fn", ",", "second_fn", ")", ")", "metadata", ",", "ebook", "=", "first_fn", ",", "second_fn", "elif", "not", "_is_meta", "(", "first_fn", ")", "and", "_is_meta", "(", "second_fn", ")", ":", "# 1st data, 2nd meta", "logger", ".", "debug", "(", "\"Parsed: '%s' as meta, '%s' as data.\"", "%", "(", "second_fn", ",", "first_fn", ")", ")", "metadata", ",", "ebook", "=", "second_fn", ",", "first_fn", "elif", "_is_meta", "(", "first_fn", ")", "and", "_is_meta", "(", "second_fn", ")", ":", "# both metadata", "logger", ".", "debug", "(", "\"Parsed: both '%s' and '%s' as meta.\"", "%", "(", "first_fn", ",", "second_fn", ")", ")", "return", "[", "_safe_read_meta_file", "(", "first_fn", ",", "error_protocol", ")", ",", "_safe_read_meta_file", "(", "second_fn", ",", "error_protocol", ")", "]", "else", ":", "# both data", "logger", ".", "debug", "(", "\"Parsed: both '%s' and '%s' as data.\"", "%", "(", "first_fn", ",", "second_fn", ")", ")", "return", "[", "EbookFile", "(", "first_fn", ")", ",", "EbookFile", "(", "second_fn", ")", "]", "# process pairs, which were created in first two branches of the if", "# statement above", "pair", "=", "DataPair", "(", "metadata_file", "=", "_safe_read_meta_file", "(", "metadata", ",", "error_protocol", ")", ",", "ebook_file", "=", "EbookFile", "(", "ebook", ")", ")", "if", "not", "pair", ".", "metadata_file", ":", "logger", ".", "error", "(", "\"Can't parse MetadataFile '%s'. Continuing with data file '%s'.\"", "%", "(", "metadata", ",", "ebook", ")", ")", "return", "[", "pair", ".", "ebook_file", "]", "return", "[", "pair", "]" ]
r Overwrites utool print functions to use a logger
def start_logging ( log_fpath = None , mode = 'a' , appname = 'default' , log_dir = None ) : global __UTOOL_ROOT_LOGGER__ global __UTOOL_PRINT__ global __UTOOL_WRITE__ global __UTOOL_FLUSH__ global __CURRENT_LOG_FPATH__ if LOGGING_VERBOSE : print ( '[utool] start_logging()' ) # FIXME: The test for doctest may not work if __UTOOL_ROOT_LOGGER__ is None and __IN_MAIN_PROCESS__ and not __inside_doctest ( ) : if LOGGING_VERBOSE : print ( '[utool] start_logging()... rootcheck OK' ) #logging.config.dictConfig(LOGGING) if log_fpath is None : log_fpath = get_log_fpath ( num = 'next' , appname = appname , log_dir = log_dir ) __CURRENT_LOG_FPATH__ = log_fpath # Print what is about to happen if VERBOSE or LOGGING_VERBOSE : startmsg = ( 'logging to log_fpath=%r' % log_fpath ) _utool_print ( ) ( startmsg ) # Create root logger __UTOOL_ROOT_LOGGER__ = logging . getLogger ( 'root' ) __UTOOL_ROOT_LOGGER__ . setLevel ( 'DEBUG' ) # create file handler which logs even debug messages #fh = logging.handlers.WatchedFileHandler(log_fpath) logfile_handler = logging . FileHandler ( log_fpath , mode = mode ) #stdout_handler = logging.StreamHandler(__UTOOL_STDOUT__) stdout_handler = CustomStreamHandler ( __UTOOL_STDOUT__ ) stdout_handler . terminator = '' # http://stackoverflow.com/questions/7168790/suppress-newline-in-python-logging-module #stdout_handler.terminator = '' add_logging_handler ( logfile_handler , format_ = 'file' ) add_logging_handler ( stdout_handler , format_ = 'stdout' ) __UTOOL_ROOT_LOGGER__ . propagate = False __UTOOL_ROOT_LOGGER__ . setLevel ( logging . DEBUG ) # Overwrite utool functions with the logging functions def utool_flush ( * args ) : """ flushes whatever is in the current utool write buffer """ # Flushes only the stdout handler stdout_handler . flush ( ) #__UTOOL_ROOT_LOGGER__.flush() #global __UTOOL_WRITE_BUFFER__ #if len(__UTOOL_WRITE_BUFFER__) > 0: # msg = ''.join(__UTOOL_WRITE_BUFFER__) # #sys.stdout.write('FLUSHING %r\n' % (len(__UTOOL_WRITE_BUFFER__))) # __UTOOL_WRITE_BUFFER__ = [] # return __UTOOL_ROOT_LOGGER__.info(msg) #__PYTHON_FLUSH__() def utool_write ( * args ) : """ writes to current utool logs and to sys.stdout.write """ #global __UTOOL_WRITE_BUFFER__ #sys.stdout.write('WRITEING\n') msg = ', ' . join ( map ( six . text_type , args ) ) #__UTOOL_WRITE_BUFFER__.append(msg) __UTOOL_ROOT_LOGGER__ . info ( msg ) #if msg.endswith('\n'): # # Flush on newline, and remove newline # __UTOOL_WRITE_BUFFER__[-1] = __UTOOL_WRITE_BUFFER__[-1][:-1] # utool_flush() #elif len(__UTOOL_WRITE_BUFFER__) > 32: # # Flush if buffer is too large # utool_flush() if not PRINT_ALL_CALLERS : def utool_print ( * args ) : """ standard utool print function """ #sys.stdout.write('PRINT\n') endline = '\n' try : msg = ', ' . join ( map ( six . text_type , args ) ) return __UTOOL_ROOT_LOGGER__ . info ( msg + endline ) except UnicodeDecodeError : new_msg = ', ' . join ( map ( meta_util_six . ensure_unicode , args ) ) #print(new_msg) return __UTOOL_ROOT_LOGGER__ . info ( new_msg + endline ) else : def utool_print ( * args ) : """ debugging utool print function """ import utool as ut utool_flush ( ) endline = '\n' __UTOOL_ROOT_LOGGER__ . info ( '\n\n----------' ) __UTOOL_ROOT_LOGGER__ . info ( ut . get_caller_name ( range ( 0 , 20 ) ) ) return __UTOOL_ROOT_LOGGER__ . info ( ', ' . join ( map ( six . text_type , args ) ) + endline ) def utool_printdbg ( * args ) : """ DRPRICATE standard utool print debug function """ return __UTOOL_ROOT_LOGGER__ . debug ( ', ' . join ( map ( six . text_type , args ) ) ) # overwrite the utool printers __UTOOL_WRITE__ = utool_write __UTOOL_FLUSH__ = utool_flush __UTOOL_PRINT__ = utool_print # Test out our shiney new logger if VERBOSE or LOGGING_VERBOSE : __UTOOL_PRINT__ ( '<__LOG_START__>' ) __UTOOL_PRINT__ ( startmsg ) else : if LOGGING_VERBOSE : print ( '[utool] start_logging()... FAILED TO START' ) print ( 'DEBUG INFO' ) print ( '__inside_doctest() = %r' % ( __inside_doctest ( ) , ) ) print ( '__IN_MAIN_PROCESS__ = %r' % ( __IN_MAIN_PROCESS__ , ) ) print ( '__UTOOL_ROOT_LOGGER__ = %r' % ( __UTOOL_ROOT_LOGGER__ , ) )
9,257
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_logging.py#L375-L520
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
Restores utool print functions to python defaults
def stop_logging ( ) : global __UTOOL_ROOT_LOGGER__ global __UTOOL_PRINT__ global __UTOOL_WRITE__ global __UTOOL_FLUSH__ if __UTOOL_ROOT_LOGGER__ is not None : # Flush remaining buffer if VERBOSE or LOGGING_VERBOSE : _utool_print ( ) ( ) ( '<__LOG_STOP__>' ) _utool_flush ( ) ( ) # Remove handlers for h in __UTOOL_ROOT_LOGGER__ . handlers [ : ] : __UTOOL_ROOT_LOGGER__ . removeHandler ( h ) # Reset objects __UTOOL_ROOT_LOGGER__ = None __UTOOL_PRINT__ = None __UTOOL_WRITE__ = None __UTOOL_FLUSH__ = None
9,258
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_logging.py#L523-L543
[ "def", "surviors_are_inconsistent", "(", "survivor_mapping", ":", "Mapping", "[", "BaseEntity", ",", "Set", "[", "BaseEntity", "]", "]", ")", "->", "Set", "[", "BaseEntity", "]", ":", "victim_mapping", "=", "set", "(", ")", "for", "victim", "in", "itt", ".", "chain", ".", "from_iterable", "(", "survivor_mapping", ".", "values", "(", ")", ")", ":", "if", "victim", "in", "survivor_mapping", ":", "victim_mapping", ".", "add", "(", "victim", ")", "return", "victim_mapping" ]
r Recursively removes Nones in all lists and sublists and replaces them with the repl variable
def replace_nones ( list_ , repl = - 1 ) : repl_list = [ repl if item is None else ( replace_nones ( item , repl ) if isinstance ( item , list ) else item ) for item in list_ ] return repl_list
9,259
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L78-L113
[ "def", "get_issue_classes", "(", "self", ",", "backend", "=", "None", ",", "enabled", "=", "True", ",", "sort", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "backend", "is", "None", ":", "backend", "=", "self", ".", "backend", "query", "=", "{", "'project_issue_classes.project'", ":", "self", "}", "if", "enabled", "is", "not", "None", ":", "query", "[", "'project_issue_classes.enabled'", "]", "=", "enabled", "issue_classes", "=", "backend", ".", "filter", "(", "self", ".", "IssueClass", ",", "query", ",", "*", "*", "kwargs", ")", "if", "sort", "is", "not", "None", ":", "issue_classes", "=", "issue_classes", ".", "sort", "(", "sort", ")", "return", "issue_classes" ]
r Recursively removes target in all lists and sublists and replaces them with the repl variable
def recursive_replace ( list_ , target , repl = - 1 ) : repl_list = [ recursive_replace ( item , target , repl ) if isinstance ( item , ( list , np . ndarray ) ) else ( repl if item == target else item ) for item in list_ ] return repl_list
9,260
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L116-L126
[ "def", "vgcsUplinkGrant", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "0x6", ")", "b", "=", "MessageType", "(", "mesType", "=", "0x9", ")", "# 00001001", "c", "=", "RrCause", "(", ")", "d", "=", "RequestReference", "(", ")", "e", "=", "TimingAdvance", "(", ")", "packet", "=", "a", "/", "b", "/", "c", "/", "d", "/", "e", "return", "packet" ]
Allocates more space if needbe .
def ensure_list_size ( list_ , size_ ) : lendiff = ( size_ ) - len ( list_ ) if lendiff > 0 : extension = [ None for _ in range ( lendiff ) ] list_ . extend ( extension )
9,261
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L150-L162
[ "def", "list_streams", "(", "self", ")", ":", "# Server does not do pagination on listings of this resource.", "# Return an iterator anyway for similarity with other API methods", "path", "=", "'/archive/{}/streams'", ".", "format", "(", "self", ".", "_instance", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ")", "message", "=", "rest_pb2", ".", "ListStreamsResponse", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "streams", "=", "getattr", "(", "message", ",", "'stream'", ")", "return", "iter", "(", "[", "Stream", "(", "stream", ")", "for", "stream", "in", "streams", "]", ")" ]
Does a string replace with a list of search and replacements
def multi_replace ( instr , search_list = [ ] , repl_list = None ) : repl_list = [ '' ] * len ( search_list ) if repl_list is None else repl_list for ser , repl in zip ( search_list , repl_list ) : instr = instr . replace ( ser , repl ) return instr
9,262
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L387-L396
[ "def", "translateprotocolmask", "(", "protocol", ")", ":", "pcscprotocol", "=", "0", "if", "None", "!=", "protocol", ":", "if", "CardConnection", ".", "T0_protocol", "&", "protocol", ":", "pcscprotocol", "|=", "SCARD_PROTOCOL_T0", "if", "CardConnection", ".", "T1_protocol", "&", "protocol", ":", "pcscprotocol", "|=", "SCARD_PROTOCOL_T1", "if", "CardConnection", ".", "RAW_protocol", "&", "protocol", ":", "pcscprotocol", "|=", "SCARD_PROTOCOL_RAW", "if", "CardConnection", ".", "T15_protocol", "&", "protocol", ":", "pcscprotocol", "|=", "SCARD_PROTOCOL_T15", "return", "pcscprotocol" ]
r Flattens unflat_list but remember how to reconstruct the unflat_list Returns flat_list and the reverse_list with indexes into the flat_list
def invertible_flatten1 ( unflat_list ) : nextnum = functools . partial ( six . next , itertools . count ( 0 ) ) # Build an unflat list of flat indexes reverse_list = [ [ nextnum ( ) for _ in tup ] for tup in unflat_list ] flat_list = flatten ( unflat_list ) return flat_list , reverse_list
9,263
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L423-L454
[ "def", "_attempt_to_raise_license_error", "(", "data_dir", ")", ":", "if", "isinstance", "(", "data_dir", ",", "bytes", ")", ":", "data_dir", "=", "_decode", "(", "data_dir", ")", "data_dir", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "'Data'", ")", "current_date", "=", "dt", ".", "date", ".", "today", "(", ")", ".", "strftime", "(", "'%Y%m%d'", ")", "timestamp", "=", "dt", ".", "datetime", ".", "today", "(", ")", ".", "strftime", "(", "'[%Y-%m-%d %H:%M:%S]'", ")", "data_files", "=", "os", ".", "listdir", "(", "data_dir", ")", "for", "f", "in", "data_files", ":", "if", "f", "==", "(", "current_date", "+", "'.err'", ")", ":", "file_name", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "f", ")", "with", "fopen", "(", "file_name", ")", "as", "error_file", ":", "for", "line", "in", "error_file", ":", "if", "not", "line", ".", "startswith", "(", "timestamp", ")", ":", "continue", "if", "'Not valid license'", "in", "line", ":", "raise", "LicenseError", "(", "'Your license appears to have '", "'expired. Try running \"pynlpir '", "'update\".'", ")", "elif", "'Can not open License file'", "in", "line", ":", "raise", "LicenseError", "(", "'Your license appears to be '", "'missing. Try running \"pynlpir '", "'update\".'", ")" ]
An alternative to invertible_flatten1 which uses cumsum
def invertible_flatten2 ( unflat_list ) : sublen_list = list ( map ( len , unflat_list ) ) if not util_type . HAVE_NUMPY : cumlen_list = np . cumsum ( sublen_list ) # Build an unflat list of flat indexes else : cumlen_list = list ( accumulate ( sublen_list ) ) flat_list = flatten ( unflat_list ) return flat_list , cumlen_list
9,264
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L594-L665
[ "async", "def", "_wait_exponentially", "(", "self", ",", "exception", ",", "max_wait_time", "=", "300", ")", ":", "wait_time", "=", "min", "(", "(", "2", "**", "self", ".", "_connection_attempts", ")", "+", "random", ".", "random", "(", ")", ",", "max_wait_time", ")", "try", ":", "wait_time", "=", "exception", ".", "response", "[", "\"headers\"", "]", "[", "\"Retry-After\"", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "self", ".", "_logger", ".", "debug", "(", "\"Waiting %s seconds before reconnecting.\"", ",", "wait_time", ")", "await", "asyncio", ".", "sleep", "(", "float", "(", "wait_time", ")", ")" ]
more numpy version
def invertible_flatten2_numpy ( unflat_arrs , axis = 0 ) : cumlen_list = np . cumsum ( [ arr . shape [ axis ] for arr in unflat_arrs ] ) flat_list = np . concatenate ( unflat_arrs , axis = axis ) return flat_list , cumlen_list
9,265
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L668-L693
[ "def", "_timestamp_regulator", "(", "self", ")", ":", "unified_timestamps", "=", "_PrettyDefaultDict", "(", "list", ")", "staged_files", "=", "self", ".", "_list_audio_files", "(", "sub_dir", "=", "\"staging\"", ")", "for", "timestamp_basename", "in", "self", ".", "__timestamps_unregulated", ":", "if", "len", "(", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", ")", ">", "1", ":", "# File has been splitted", "timestamp_name", "=", "''", ".", "join", "(", "timestamp_basename", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "staged_splitted_files_of_timestamp", "=", "list", "(", "filter", "(", "lambda", "staged_file", ":", "(", "timestamp_name", "==", "staged_file", "[", ":", "-", "3", "]", "and", "all", "(", "[", "(", "x", "in", "set", "(", "map", "(", "str", ",", "range", "(", "10", ")", ")", ")", ")", "for", "x", "in", "staged_file", "[", "-", "3", ":", "]", "]", ")", ")", ",", "staged_files", ")", ")", "if", "len", "(", "staged_splitted_files_of_timestamp", ")", "==", "0", ":", "self", ".", "__errors", "[", "(", "time", "(", ")", ",", "timestamp_basename", ")", "]", "=", "{", "\"reason\"", ":", "\"Missing staged file\"", ",", "\"current_staged_files\"", ":", "staged_files", "}", "continue", "staged_splitted_files_of_timestamp", ".", "sort", "(", ")", "unified_timestamp", "=", "list", "(", ")", "for", "staging_digits", ",", "splitted_file", "in", "enumerate", "(", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", ")", ":", "prev_splits_sec", "=", "0", "if", "int", "(", "staging_digits", ")", "!=", "0", ":", "prev_splits_sec", "=", "self", ".", "_get_audio_duration_seconds", "(", "\"{}/staging/{}{:03d}\"", ".", "format", "(", "self", ".", "src_dir", ",", "timestamp_name", ",", "staging_digits", "-", "1", ")", ")", "for", "word_block", "in", "splitted_file", ":", "unified_timestamp", ".", "append", "(", "_WordBlock", "(", "word", "=", "word_block", ".", "word", ",", "start", "=", "round", "(", "word_block", ".", "start", "+", "prev_splits_sec", ",", "2", ")", ",", "end", "=", "round", "(", "word_block", ".", "end", "+", "prev_splits_sec", ",", "2", ")", ")", ")", "unified_timestamps", "[", "str", "(", "timestamp_basename", ")", "]", "+=", "unified_timestamp", "else", ":", "unified_timestamps", "[", "timestamp_basename", "]", "+=", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", "[", "0", "]", "self", ".", "__timestamps", ".", "update", "(", "unified_timestamps", ")", "self", ".", "__timestamps_unregulated", "=", "_PrettyDefaultDict", "(", "list", ")" ]
performs only one call to the underlying func with unique rowids the func must be some lookup function
def unflat_unique_rowid_map ( func , unflat_rowids , * * kwargs ) : import utool as ut # First flatten the list, and remember the original dimensions flat_rowids , reverse_list = ut . invertible_flatten2 ( unflat_rowids ) # Then make the input unique flat_rowids_arr = np . array ( flat_rowids ) unique_flat_rowids , inverse_unique = np . unique ( flat_rowids_arr , return_inverse = True ) # Then preform the lookup / implicit mapping unique_flat_vals = func ( unique_flat_rowids , * * kwargs ) # Then broadcast unique values back to original flat positions flat_vals_ = np . array ( unique_flat_vals ) [ inverse_unique ] #flat_vals_ = np.array(unique_flat_vals).take(inverse_unique, axis=0) output_shape = tuple ( list ( flat_rowids_arr . shape ) + list ( flat_vals_ . shape [ 1 : ] ) ) flat_vals = np . array ( flat_vals_ ) . reshape ( output_shape ) # Then _unflatten the results to the original input dimensions unflat_vals = ut . unflatten2 ( flat_vals , reverse_list ) return unflat_vals
9,266
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L728-L796
[ "def", "sections", "(", "self", ")", ":", "secs", "=", "OrderedDict", "(", ")", "secs", "[", "'Overview'", "]", "=", "self", ".", "sec_overview", "secs", "[", "'Communication Channels'", "]", "=", "self", ".", "sec_com_channels", "secs", "[", "'Detailed Activity by Project'", "]", "=", "self", ".", "sec_projects", "return", "secs" ]
checks to see if list is equal everywhere
def allsame ( list_ , strict = True ) : if len ( list_ ) == 0 : return True first_item = list_ [ 0 ] return list_all_eq_to ( list_ , first_item , strict )
9,267
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L832-L845
[ "def", "ReadFD", "(", "self", ",", "Channel", ")", ":", "try", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "msg", "=", "TPCANMsgFDMac", "(", ")", "else", ":", "msg", "=", "TPCANMsgFD", "(", ")", "timestamp", "=", "TPCANTimestampFD", "(", ")", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_ReadFD", "(", "Channel", ",", "byref", "(", "msg", ")", ",", "byref", "(", "timestamp", ")", ")", "return", "TPCANStatus", "(", "res", ")", ",", "msg", ",", "timestamp", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.ReadFD\"", ")", "raise" ]
checks to see if list is equal everywhere to a value
def list_all_eq_to ( list_ , val , strict = True ) : if util_type . HAVE_NUMPY and isinstance ( val , np . ndarray ) : return all ( [ np . all ( item == val ) for item in list_ ] ) try : # FUTURE WARNING # FutureWarning: comparison to `None` will result in an elementwise object comparison in the future. with warnings . catch_warnings ( ) : warnings . filterwarnings ( 'ignore' , category = FutureWarning ) flags = [ item == val for item in list_ ] return all ( [ np . all ( flag ) if hasattr ( flag , '__array__' ) else flag for flag in flags ] ) #return all([item == val for item in list_]) except ValueError : if not strict : return all ( [ repr ( item ) == repr ( val ) for item in list_ ] ) else : raise
9,268
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L848-L874
[ "def", "ReadFD", "(", "self", ",", "Channel", ")", ":", "try", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "msg", "=", "TPCANMsgFDMac", "(", ")", "else", ":", "msg", "=", "TPCANMsgFD", "(", ")", "timestamp", "=", "TPCANTimestampFD", "(", ")", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_ReadFD", "(", "Channel", ",", "byref", "(", "msg", ")", ",", "byref", "(", "timestamp", ")", ")", "return", "TPCANStatus", "(", "res", ")", ",", "msg", ",", "timestamp", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.ReadFD\"", ")", "raise" ]
Returns each item in item_list where not flag in flag_list
def get_dirty_items ( item_list , flag_list ) : assert len ( item_list ) == len ( flag_list ) dirty_items = [ item for ( item , flag ) in zip ( item_list , flag_list ) if not flag ] #print('num_dirty_items = %r' % len(dirty_items)) #print('item_list = %r' % (item_list,)) #print('flag_list = %r' % (flag_list,)) return dirty_items
9,269
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L877-L895
[ "def", "_sys_mgr", "(", ")", ":", "thrift_port", "=", "six", ".", "text_type", "(", "__salt__", "[", "'config.option'", "]", "(", "'cassandra.THRIFT_PORT'", ")", ")", "host", "=", "__salt__", "[", "'config.option'", "]", "(", "'cassandra.host'", ")", "return", "SystemManager", "(", "'{0}:{1}'", ".", "format", "(", "host", ",", "thrift_port", ")", ")" ]
Returns items in item list where the corresponding item in flag list is true
def filterfalse_items ( item_list , flag_list ) : assert len ( item_list ) == len ( flag_list ) filtered_items = list ( util_iter . ifilterfalse_items ( item_list , flag_list ) ) return filtered_items
9,270
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L966-L982
[ "def", "_safe_unicode", "(", "o", ")", ":", "def", "clean", "(", "s", ")", ":", "return", "u''", ".", "join", "(", "[", "c", "if", "c", "in", "ASCII_PRINTABLE", "else", "'?'", "for", "c", "in", "s", "]", ")", "if", "USING_PYTHON2", ":", "try", ":", "return", "unicode", "(", "o", ")", "except", ":", "try", ":", "s", "=", "str", "(", "o", ")", "try", ":", "return", "s", ".", "decode", "(", "\"utf-8\"", ")", "except", ":", "return", "clean", "(", "s", "[", ":", "2048", "]", ")", "+", "u\" [Raw error message: \"", "+", "unicode", "(", "s", ".", "encode", "(", "\"hex\"", ")", ",", "'utf-8'", ")", "+", "u\"]\"", "except", ":", "return", "u\"(Unable to decode Python exception message)\"", "else", ":", "return", "str", "(", "o", ")" ]
r returns list1 elements that are also in list2 . preserves order of list1
def isect ( list1 , list2 ) : set2 = set ( list2 ) return [ item for item in list1 if item in set2 ]
9,271
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1001-L1088
[ "def", "_stop_vibration_win", "(", "self", ")", ":", "xinput_set_state", "=", "self", ".", "manager", ".", "xinput", ".", "XInputSetState", "xinput_set_state", ".", "argtypes", "=", "[", "ctypes", ".", "c_uint", ",", "ctypes", ".", "POINTER", "(", "XinputVibration", ")", "]", "xinput_set_state", ".", "restype", "=", "ctypes", ".", "c_uint", "stop_vibration", "=", "ctypes", ".", "byref", "(", "XinputVibration", "(", "0", ",", "0", ")", ")", "xinput_set_state", "(", "self", ".", "__device_number", ",", "stop_vibration", ")" ]
returns True if set_ is a subset of any set in other_sets
def is_subset_of_any ( set_ , other_sets ) : set_ = set ( set_ ) other_sets = map ( set , other_sets ) return any ( [ set_ . issubset ( other_set ) for other_set in other_sets ] )
9,272
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1157-L1197
[ "def", "websocktunnelToken", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"websocktunnelToken\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Returns unique items in list_ in the order they were seen .
def unique_ordered ( list_ ) : list_ = list ( list_ ) flag_list = flag_unique_items ( list_ ) unique_list = compress ( list_ , flag_list ) return unique_list
9,273
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1352-L1377
[ "def", "_stop_vibration_win", "(", "self", ")", ":", "xinput_set_state", "=", "self", ".", "manager", ".", "xinput", ".", "XInputSetState", "xinput_set_state", ".", "argtypes", "=", "[", "ctypes", ".", "c_uint", ",", "ctypes", ".", "POINTER", "(", "XinputVibration", ")", "]", "xinput_set_state", ".", "restype", "=", "ctypes", ".", "c_uint", "stop_vibration", "=", "ctypes", ".", "byref", "(", "XinputVibration", "(", "0", ",", "0", ")", ")", "xinput_set_state", "(", "self", ".", "__device_number", ",", "stop_vibration", ")" ]
returns list1 elements that are not in list2 . preserves order of list1
def setdiff ( list1 , list2 ) : set2 = set ( list2 ) return [ item for item in list1 if item not in set2 ]
9,274
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1407-L1430
[ "def", "_start_vibration_win", "(", "self", ",", "left_motor", ",", "right_motor", ")", ":", "xinput_set_state", "=", "self", ".", "manager", ".", "xinput", ".", "XInputSetState", "xinput_set_state", ".", "argtypes", "=", "[", "ctypes", ".", "c_uint", ",", "ctypes", ".", "POINTER", "(", "XinputVibration", ")", "]", "xinput_set_state", ".", "restype", "=", "ctypes", ".", "c_uint", "vibration", "=", "XinputVibration", "(", "int", "(", "left_motor", "*", "65535", ")", ",", "int", "(", "right_motor", "*", "65535", ")", ")", "xinput_set_state", "(", "self", ".", "__device_number", ",", "ctypes", ".", "byref", "(", "vibration", ")", ")" ]
move to util_iter
def isetdiff_flags ( list1 , list2 ) : set2 = set ( list2 ) return ( item not in set2 for item in list1 )
9,275
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1437-L1442
[ "def", "touch", "(", "self", ",", "message_id", ",", "reservation_id", ",", "timeout", "=", "None", ")", ":", "url", "=", "\"queues/%s/messages/%s/touch\"", "%", "(", "self", ".", "name", ",", "message_id", ")", "qitems", "=", "{", "'reservation_id'", ":", "reservation_id", "}", "if", "timeout", "is", "not", "None", ":", "qitems", "[", "'timeout'", "]", "=", "timeout", "body", "=", "json", ".", "dumps", "(", "qitems", ")", "response", "=", "self", ".", "client", ".", "post", "(", "url", ",", "body", "=", "body", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ")", "return", "response", "[", "'body'", "]" ]
r Returns nested subset of items_list
def unflat_take ( items_list , unflat_index_list ) : return [ unflat_take ( items_list , xs ) if isinstance ( xs , list ) else take ( items_list , xs ) for xs in unflat_index_list ]
9,276
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1568-L1594
[ "def", "ReadFD", "(", "self", ",", "Channel", ")", ":", "try", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "msg", "=", "TPCANMsgFDMac", "(", ")", "else", ":", "msg", "=", "TPCANMsgFD", "(", ")", "timestamp", "=", "TPCANTimestampFD", "(", ")", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_ReadFD", "(", "Channel", ",", "byref", "(", "msg", ")", ",", "byref", "(", "timestamp", ")", ")", "return", "TPCANStatus", "(", "res", ")", ",", "msg", ",", "timestamp", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.ReadFD\"", ")", "raise" ]
like np . argsort but for lists
def argsort ( * args , * * kwargs ) : if len ( args ) == 1 and isinstance ( args [ 0 ] , dict ) : dict_ = args [ 0 ] index_list = list ( dict_ . keys ( ) ) value_list = list ( dict_ . values ( ) ) return sortedby2 ( index_list , value_list ) else : index_list = list ( range ( len ( args [ 0 ] ) ) ) return sortedby2 ( index_list , * args , * * kwargs )
9,277
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1597-L1622
[ "def", "scrape_links", "(", "html", ",", "engine", ")", ":", "soup", "=", "BeautifulSoup", "(", "html", ",", "'lxml'", ")", "links", "=", "[", "]", "if", "engine", "==", "'d'", ":", "results", "=", "soup", ".", "findAll", "(", "'a'", ",", "{", "'class'", ":", "'result__a'", "}", ")", "for", "result", "in", "results", ":", "link", "=", "result", ".", "get", "(", "'href'", ")", "[", "15", ":", "]", "link", "=", "link", ".", "replace", "(", "'/blob/'", ",", "'/raw/'", ")", "links", ".", "append", "(", "link", ")", "elif", "engine", "==", "'g'", ":", "results", "=", "soup", ".", "findAll", "(", "'h3'", ",", "{", "'class'", ":", "'r'", "}", ")", "for", "result", "in", "results", ":", "link", "=", "result", ".", "a", "[", "'href'", "]", "[", "7", ":", "]", ".", "split", "(", "'&'", ")", "[", "0", "]", "link", "=", "link", ".", "replace", "(", "'/blob/'", ",", "'/raw/'", ")", "links", ".", "append", "(", "link", ")", "return", "links" ]
Returns the indices that would sort a indexable object .
def argsort2 ( indexable , key = None , reverse = False ) : # Create an iterator of value/key pairs if isinstance ( indexable , dict ) : vk_iter = ( ( v , k ) for k , v in indexable . items ( ) ) else : vk_iter = ( ( v , k ) for k , v in enumerate ( indexable ) ) # Sort by values and extract the keys if key is None : indices = [ k for v , k in sorted ( vk_iter , reverse = reverse ) ] else : indices = [ k for v , k in sorted ( vk_iter , key = lambda vk : key ( vk [ 0 ] ) , reverse = reverse ) ] return indices
9,278
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1625-L1665
[ "def", "unregister_callback", "(", "self", ",", "callback_url", ",", "*", "*", "kwargs", ")", ":", "if", "callback_url", "is", "None", ":", "raise", "ValueError", "(", "'callback_url must be provided'", ")", "headers", "=", "{", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'speech_to_text'", ",", "'V1'", ",", "'unregister_callback'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'callback_url'", ":", "callback_url", "}", "url", "=", "'/v1/unregister_callback'", "response", "=", "self", ".", "request", "(", "method", "=", "'POST'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "accept_json", "=", "False", ")", "return", "response" ]
Returns the other indicies in a list of length len_
def index_complement ( index_list , len_ = None ) : mask1 = index_to_boolmask ( index_list , len_ ) mask2 = not_list ( mask1 ) index_list_bar = list_where ( mask2 ) return index_list_bar
9,279
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1746-L1753
[ "def", "unindex_layers_with_issues", "(", "self", ",", "use_cache", "=", "False", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Issue", ",", "Layer", ",", "Service", "from", "django", ".", "contrib", ".", "contenttypes", ".", "models", "import", "ContentType", "layer_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "Layer", ")", "service_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "Service", ")", "for", "issue", "in", "Issue", ".", "objects", ".", "filter", "(", "content_type__pk", "=", "layer_type", ".", "id", ")", ":", "unindex_layer", "(", "issue", ".", "content_object", ".", "id", ",", "use_cache", ")", "for", "issue", "in", "Issue", ".", "objects", ".", "filter", "(", "content_type__pk", "=", "service_type", ".", "id", ")", ":", "for", "layer", "in", "issue", ".", "content_object", ".", "layer_set", ".", "all", "(", ")", ":", "unindex_layer", "(", "layer", ".", "id", ",", "use_cache", ")" ]
Returns items in list_ not indexed by index_list
def take_complement ( list_ , index_list ) : mask = not_list ( index_to_boolmask ( index_list , len ( list_ ) ) ) return compress ( list_ , mask )
9,280
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1756-L1759
[ "def", "WriteFD", "(", "self", ",", "Channel", ",", "MessageBuffer", ")", ":", "try", ":", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_WriteFD", "(", "Channel", ",", "byref", "(", "MessageBuffer", ")", ")", "return", "TPCANStatus", "(", "res", ")", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.WriteFD\"", ")", "raise" ]
Selects a subset of a list based on a list of indices . This is similar to np . take but pure python .
def take ( list_ , index_list ) : try : return [ list_ [ index ] for index in index_list ] except TypeError : return list_ [ index_list ]
9,281
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1772-L1823
[ "def", "api_headers_tween_factory", "(", "handler", ",", "registry", ")", ":", "def", "api_headers_tween", "(", "request", ")", ":", "response", "=", "handler", "(", "request", ")", "set_version", "(", "request", ",", "response", ")", "set_req_guid", "(", "request", ",", "response", ")", "return", "response", "return", "api_headers_tween" ]
take the top percent items in a list rounding up
def take_percentile ( arr , percent ) : size = len ( arr ) stop = min ( int ( size * percent ) , len ( arr ) ) return arr [ 0 : stop ]
9,282
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1834-L1838
[ "def", "serial_login", "(", "self", ")", ":", "if", "not", "self", ".", "_serial_connected", ":", "raise", "termineter", ".", "errors", ".", "FrameworkRuntimeError", "(", "'the serial interface is disconnected'", ")", "username", "=", "self", ".", "options", "[", "'USERNAME'", "]", "user_id", "=", "self", ".", "options", "[", "'USER_ID'", "]", "password", "=", "self", ".", "options", "[", "'PASSWORD'", "]", "if", "self", ".", "options", "[", "'PASSWORD_HEX'", "]", ":", "hex_regex", "=", "re", ".", "compile", "(", "'^([0-9a-fA-F]{2})+$'", ")", "if", "hex_regex", ".", "match", "(", "password", ")", "is", "None", ":", "self", ".", "print_error", "(", "'Invalid characters in password'", ")", "raise", "termineter", ".", "errors", ".", "FrameworkConfigurationError", "(", "'invalid characters in password'", ")", "password", "=", "binascii", ".", "a2b_hex", "(", "password", ")", "if", "len", "(", "username", ")", ">", "10", ":", "self", ".", "print_error", "(", "'Username cannot be longer than 10 characters'", ")", "raise", "termineter", ".", "errors", ".", "FrameworkConfigurationError", "(", "'username cannot be longer than 10 characters'", ")", "if", "not", "(", "0", "<=", "user_id", "<=", "0xffff", ")", ":", "self", ".", "print_error", "(", "'User id must be between 0 and 0xffff'", ")", "raise", "termineter", ".", "errors", ".", "FrameworkConfigurationError", "(", "'user id must be between 0 and 0xffff'", ")", "if", "len", "(", "password", ")", ">", "20", ":", "self", ".", "print_error", "(", "'Password cannot be longer than 20 characters'", ")", "raise", "termineter", ".", "errors", ".", "FrameworkConfigurationError", "(", "'password cannot be longer than 20 characters'", ")", "if", "not", "self", ".", "serial_connection", ".", "login", "(", "username", ",", "user_id", ",", "password", ")", ":", "return", "False", "return", "True" ]
r Creates a slice spanning n items in a list of length size at position frac .
def snapped_slice ( size , frac , n ) : if size < n : n = size start = int ( size * frac - ceil ( n / 2 ) ) + 1 stop = int ( size * frac + floor ( n / 2 ) ) + 1 # slide to the front or the back buf = 0 if stop >= size : buf = ( size - stop ) elif start < 0 : buf = 0 - start stop += buf start += buf assert stop <= size , 'out of bounds [%r, %r]' % ( stop , start ) sl = slice ( start , stop ) return sl
9,283
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1841-L1884
[ "def", "backend_calibration", "(", "self", ",", "backend", "=", "'ibmqx4'", ",", "hub", "=", "None", ",", "access_token", "=", "None", ",", "user_id", "=", "None", ")", ":", "if", "access_token", ":", "self", ".", "req", ".", "credential", ".", "set_token", "(", "access_token", ")", "if", "user_id", ":", "self", ".", "req", ".", "credential", ".", "set_user_id", "(", "user_id", ")", "if", "not", "self", ".", "check_credentials", "(", ")", ":", "raise", "CredentialsError", "(", "'credentials invalid'", ")", "backend_type", "=", "self", ".", "_check_backend", "(", "backend", ",", "'calibration'", ")", "if", "not", "backend_type", ":", "raise", "BadBackendError", "(", "backend", ")", "if", "backend_type", "in", "self", ".", "__names_backend_simulator", ":", "ret", "=", "{", "}", "return", "ret", "url", "=", "get_backend_stats_url", "(", "self", ".", "config", ",", "hub", ",", "backend_type", ")", "ret", "=", "self", ".", "req", ".", "get", "(", "url", "+", "'/calibration'", ")", "if", "not", "bool", "(", "ret", ")", ":", "ret", "=", "{", "}", "else", ":", "ret", "[", "\"backend\"", "]", "=", "backend_type", "return", "ret" ]
r Take parts from front back or middle of a list
def take_percentile_parts ( arr , front = None , mid = None , back = None ) : slices = [ ] if front : slices += [ snapped_slice ( len ( arr ) , 0.0 , front ) ] if mid : slices += [ snapped_slice ( len ( arr ) , 0.5 , mid ) ] if back : slices += [ snapped_slice ( len ( arr ) , 1.0 , back ) ] parts = flatten ( [ arr [ sl ] for sl in slices ] ) return parts
9,284
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1907-L1931
[ "def", "get_expr_summ_id", "(", "self", ",", "experiment_id", ",", "time_slide_id", ",", "veto_def_name", ",", "datatype", ",", "sim_proc_id", "=", "None", ")", ":", "# look for the ID", "for", "row", "in", "self", ":", "if", "(", "row", ".", "experiment_id", ",", "row", ".", "time_slide_id", ",", "row", ".", "veto_def_name", ",", "row", ".", "datatype", ",", "row", ".", "sim_proc_id", ")", "==", "(", "experiment_id", ",", "time_slide_id", ",", "veto_def_name", ",", "datatype", ",", "sim_proc_id", ")", ":", "# found it", "return", "row", ".", "experiment_summ_id", "# if get to here, experiment not found in table", "return", "None" ]
r Zips elementwise pairs between list1 and list2 . Broadcasts the first dimension if a single list is of length 1 .
def broadcast_zip ( list1 , list2 ) : try : len ( list1 ) except TypeError : list1 = list ( list1 ) try : len ( list2 ) except TypeError : list2 = list ( list2 ) # if len(list1) == 0 or len(list2) == 0: # # Corner case where either list is empty # return [] if len ( list1 ) == 1 and len ( list2 ) > 1 : list1 = list1 * len ( list2 ) elif len ( list1 ) > 1 and len ( list2 ) == 1 : list2 = list2 * len ( list1 ) elif len ( list1 ) != len ( list2 ) : raise ValueError ( 'out of alignment len(list1)=%r, len(list2)=%r' % ( len ( list1 ) , len ( list2 ) ) ) # return list(zip(list1, list2)) return zip ( list1 , list2 )
9,285
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1979-L2031
[ "def", "is_", "(", "self", ",", "state", ")", ":", "translator", "=", "self", ".", "_meta", "[", "'translator'", "]", "state", "=", "translator", ".", "translate", "(", "state", ")", "return", "self", ".", "actual_state", "==", "state" ]
takes flags returns indexes of True values
def equal ( list1 , list2 ) : return [ item1 == item2 for item1 , item2 in broadcast_zip ( list1 , list2 ) ]
9,286
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2039-L2041
[ "def", "public_broadcaster", "(", ")", ":", "while", "__websocket_server_running__", ":", "pipein", "=", "open", "(", "PUBLIC_PIPE", ",", "'r'", ")", "line", "=", "pipein", ".", "readline", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "if", "line", "!=", "''", ":", "WebSocketHandler", ".", "broadcast", "(", "line", ")", "print", "line", "remaining_lines", "=", "pipein", ".", "read", "(", ")", "pipein", ".", "close", "(", ")", "pipeout", "=", "open", "(", "PUBLIC_PIPE", ",", "'w'", ")", "pipeout", ".", "write", "(", "remaining_lines", ")", "pipeout", ".", "close", "(", ")", "else", ":", "pipein", ".", "close", "(", ")", "time", ".", "sleep", "(", "0.05", ")" ]
Map like function
def scalar_input_map ( func , input_ ) : if util_iter . isiterable ( input_ ) : return list ( map ( func , input_ ) ) else : return func ( input_ )
9,287
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2061-L2076
[ "def", "rate_limit", "(", "f", ")", ":", "def", "new_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "errors", "=", "0", "while", "True", ":", "resp", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "resp", ".", "status_code", "==", "200", ":", "errors", "=", "0", "return", "resp", "elif", "resp", ".", "status_code", "==", "401", ":", "# Hack to retain the original exception, but augment it with", "# additional context for the user to interpret it. In a Python", "# 3 only future we can raise a new exception of the same type", "# with a new message from the old error.", "try", ":", "resp", ".", "raise_for_status", "(", ")", "except", "requests", ".", "HTTPError", "as", "e", ":", "message", "=", "\"\\nThis is a protected or locked account, or\"", "+", "\" the credentials provided are no longer valid.\"", "e", ".", "args", "=", "(", "e", ".", "args", "[", "0", "]", "+", "message", ",", ")", "+", "e", ".", "args", "[", "1", ":", "]", "log", ".", "warning", "(", "\"401 Authentication required for %s\"", ",", "resp", ".", "url", ")", "raise", "elif", "resp", ".", "status_code", "==", "429", ":", "reset", "=", "int", "(", "resp", ".", "headers", "[", "'x-rate-limit-reset'", "]", ")", "now", "=", "time", ".", "time", "(", ")", "seconds", "=", "reset", "-", "now", "+", "10", "if", "seconds", "<", "1", ":", "seconds", "=", "10", "log", ".", "warning", "(", "\"rate limit exceeded: sleeping %s secs\"", ",", "seconds", ")", "time", ".", "sleep", "(", "seconds", ")", "elif", "resp", ".", "status_code", ">=", "500", ":", "errors", "+=", "1", "if", "errors", ">", "30", ":", "log", ".", "warning", "(", "\"too many errors from Twitter, giving up\"", ")", "resp", ".", "raise_for_status", "(", ")", "seconds", "=", "60", "*", "errors", "log", ".", "warning", "(", "\"%s from Twitter API, sleeping %s\"", ",", "resp", ".", "status_code", ",", "seconds", ")", "time", ".", "sleep", "(", "seconds", ")", "else", ":", "resp", ".", "raise_for_status", "(", ")", "return", "new_f" ]
a bit messy
def partial_imap_1to1 ( func , si_func ) : @ functools . wraps ( si_func ) def wrapper ( input_ ) : if not util_iter . isiterable ( input_ ) : return func ( si_func ( input_ ) ) else : return list ( map ( func , si_func ( input_ ) ) ) set_funcname ( wrapper , util_str . get_callable_name ( func ) + '_mapper_' + get_funcname ( si_func ) ) return wrapper
9,288
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2079-L2091
[ "def", "read_struct", "(", "fstream", ")", ":", "line", "=", "fstream", ".", "readline", "(", ")", ".", "strip", "(", ")", "fragments", "=", "line", ".", "split", "(", "\",\"", ")", "fragments", "=", "[", "x", "for", "x", "in", "fragments", "if", "x", "is", "not", "None", "]", "partition", "=", "dict", "(", ")", "if", "not", "len", "(", "fragments", ")", ">=", "3", ":", "return", "None", "partition", "[", "\"struct\"", "]", "=", "fragments", "[", "0", "]", "partition", "[", "\"info\"", "]", "=", "fragments", "[", "1", "]", "partition", "[", "\"num_lines\"", "]", "=", "fragments", "[", "2", "]", "struct", "=", "None", "if", "partition", "is", "not", "None", "and", "partition", "[", "\"struct\"", "]", "==", "\"STRUCT\"", ":", "num_lines", "=", "int", "(", "partition", "[", "\"num_lines\"", "]", ".", "strip", "(", ")", ")", "struct", "=", "{", "}", "for", "_", "in", "range", "(", "num_lines", ")", ":", "cols", "=", "fetch_cols", "(", "fstream", ")", "struct", ".", "update", "(", "{", "cols", "[", "0", "]", ":", "cols", "[", "1", ":", "]", "}", ")", "return", "struct" ]
Helper for sampling
def sample_zip ( items_list , num_samples , allow_overflow = False , per_bin = 1 ) : # Prealloc a list of lists samples_list = [ [ ] for _ in range ( num_samples ) ] # Sample the ix-th value from every list samples_iter = zip_longest ( * items_list ) sx = 0 for ix , samples_ in zip ( range ( num_samples ) , samples_iter ) : samples = filter_Nones ( samples_ ) samples_list [ sx ] . extend ( samples ) # Put per_bin from each sublist into a sample if ( ix + 1 ) % per_bin == 0 : sx += 1 # Check for overflow if allow_overflow : overflow_samples = flatten ( [ filter_Nones ( samples_ ) for samples_ in samples_iter ] ) return samples_list , overflow_samples else : try : samples_iter . next ( ) except StopIteration : pass else : raise AssertionError ( 'Overflow occured' ) return samples_list
9,289
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2094-L2152
[ "def", "serverinfo", "(", "url", "=", "'http://localhost:8080/manager'", ",", "timeout", "=", "180", ")", ":", "data", "=", "_wget", "(", "'serverinfo'", ",", "{", "}", ",", "url", ",", "timeout", "=", "timeout", ")", "if", "data", "[", "'res'", "]", "is", "False", ":", "return", "{", "'error'", ":", "data", "[", "'msg'", "]", "}", "ret", "=", "{", "}", "data", "[", "'msg'", "]", ".", "pop", "(", "0", ")", "for", "line", "in", "data", "[", "'msg'", "]", ":", "tmp", "=", "line", ".", "split", "(", "':'", ")", "ret", "[", "tmp", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "tmp", "[", "1", "]", ".", "strip", "(", ")", "return", "ret" ]
Determines if a list is sorted
def issorted ( list_ , op = operator . le ) : return all ( op ( list_ [ ix ] , list_ [ ix + 1 ] ) for ix in range ( len ( list_ ) - 1 ) )
9,290
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2213-L2224
[ "def", "parse_target", "(", "target_expression", ")", ":", "match", "=", "TARGET_REX", ".", "match", "(", "target_expression", ")", "if", "not", "match", ":", "log", ".", "warning", "(", "'Unable to parse target \"%s\"'", ",", "target_expression", ")", "ret", "=", "{", "'engine'", ":", "None", ",", "'delimiter'", ":", "None", ",", "'pattern'", ":", "target_expression", ",", "}", "else", ":", "ret", "=", "match", ".", "groupdict", "(", ")", "return", "ret" ]
Returns the deepest level of nesting within a list of lists
def list_depth ( list_ , func = max , _depth = 0 ) : depth_list = [ list_depth ( item , func = func , _depth = _depth + 1 ) for item in list_ if util_type . is_listlike ( item ) ] if len ( depth_list ) > 0 : return func ( depth_list ) else : return _depth
9,291
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2434-L2456
[ "def", "kernelDriverActive", "(", "self", ",", "interface", ")", ":", "result", "=", "libusb1", ".", "libusb_kernel_driver_active", "(", "self", ".", "__handle", ",", "interface", ")", "if", "result", "==", "0", ":", "return", "False", "elif", "result", "==", "1", ":", "return", "True", "raiseUSBError", "(", "result", ")" ]
Find the nesting depth of a nested sequence
def depth ( sequence , func = max , _depth = 0 ) : if isinstance ( sequence , dict ) : sequence = list ( sequence . values ( ) ) depth_list = [ depth ( item , func = func , _depth = _depth + 1 ) for item in sequence if ( isinstance ( item , dict ) or util_type . is_listlike ( item ) ) ] if len ( depth_list ) > 0 : return func ( depth_list ) else : return _depth
9,292
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2459-L2470
[ "def", "is_client_ip_address_whitelisted", "(", "request", ":", "AxesHttpRequest", ")", ":", "if", "settings", ".", "AXES_NEVER_LOCKOUT_WHITELIST", "and", "is_ip_address_in_whitelist", "(", "request", ".", "axes_ip_address", ")", ":", "return", "True", "if", "settings", ".", "AXES_ONLY_WHITELIST", "and", "is_ip_address_in_whitelist", "(", "request", ".", "axes_ip_address", ")", ":", "return", "True", "return", "False" ]
Returns all types in a deep list
def list_deep_types ( list_ ) : type_list = [ ] for item in list_ : if util_type . is_listlike ( item ) : type_list . extend ( list_deep_types ( item ) ) else : type_list . append ( type ( item ) ) return type_list
9,293
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2473-L2483
[ "def", "post", "(", "self", ",", "command", ",", "data", "=", "None", ")", ":", "now", "=", "calendar", ".", "timegm", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", "if", "now", ">", "self", ".", "expiration", ":", "auth", "=", "self", ".", "__open", "(", "\"/oauth/token\"", ",", "data", "=", "self", ".", "oauth", ")", "self", ".", "__sethead", "(", "auth", "[", "'access_token'", "]", ")", "return", "self", ".", "__open", "(", "\"%s%s\"", "%", "(", "self", ".", "api", ",", "command", ")", ",", "headers", "=", "self", ".", "head", ",", "data", "=", "data", ")" ]
r Returns a nested list corresponding the shape of the nested structures lists represent depth tuples represent shape . The values of the items do not matter . only the lengths .
def depth_profile ( list_ , max_depth = None , compress_homogenous = True , compress_consecutive = False , new_depth = False ) : if isinstance ( list_ , dict ) : list_ = list ( list_ . values ( ) ) # handle dict level_shape_list = [ ] # For a pure bottom level list return the length if not any ( map ( util_type . is_listlike , list_ ) ) : return len ( list_ ) if False and new_depth : pass # max_depth_ = None if max_depth is None else max_depth - 1 # if max_depth_ is None or max_depth_ > 0: # pass # else: # for item in list_: # if isinstance(item, dict): # item = list(item.values()) # handle dict # if util_type.is_listlike(item): # if max_depth is None: # level_shape_list.append(depth_profile(item, None)) # else: # if max_depth >= 0: # level_shape_list.append(depth_profile(item, max_depth - 1)) # else: # level_shape_list.append(str(len(item))) # else: # level_shape_list.append(1) else : for item in list_ : if isinstance ( item , dict ) : item = list ( item . values ( ) ) # handle dict if util_type . is_listlike ( item ) : if max_depth is None : level_shape_list . append ( depth_profile ( item , None ) ) else : if max_depth >= 0 : level_shape_list . append ( depth_profile ( item , max_depth - 1 ) ) else : level_shape_list . append ( str ( len ( item ) ) ) else : level_shape_list . append ( 1 ) if compress_homogenous : # removes redudant information by returning a shape duple if allsame ( level_shape_list ) : dim_ = level_shape_list [ 0 ] len_ = len ( level_shape_list ) if isinstance ( dim_ , tuple ) : level_shape_list = tuple ( [ len_ ] + list ( dim_ ) ) else : level_shape_list = tuple ( [ len_ , dim_ ] ) if compress_consecutive : hash_list = list ( map ( hash , map ( str , level_shape_list ) ) ) consec_list = group_consecutives ( hash_list , 0 ) if len ( consec_list ) != len ( level_shape_list ) : len_list = list ( map ( len , consec_list ) ) cumsum_list = np . cumsum ( len_list ) consec_str = '[' thresh = 1 for len_ , cumsum in zip ( len_list , cumsum_list ) : value = level_shape_list [ cumsum - 1 ] if len_ > thresh : consec_str += str ( value ) + '] * ' + str ( len_ ) consec_str += ' + [' else : consec_str += str ( value ) + ', ' if consec_str . endswith ( ', ' ) : consec_str = consec_str [ : - 2 ] #consec_str += ']' #consec_str = consec_str.rstrip(', ').rstrip(']') #consec_str = consec_str.rstrip(', ') #if consec_str.endswith(']'): # consec_str = consec_str[:-1] consec_str += ']' level_shape_list = consec_str return level_shape_list
9,294
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2486-L2665
[ "def", "user_agent", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "indicator_obj", "=", "UserAgent", "(", "text", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_indicator", "(", "indicator_obj", ")" ]
r returns boolean for each position in list1 if it is in list2
def list_cover ( list1 , list2 ) : set2 = set ( list2 ) incover_list = [ item1 in set2 for item1 in list1 ] return incover_list
9,295
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2846-L2875
[ "def", "win32_refresh_window", "(", "cls", ")", ":", "# Get console handle", "handle", "=", "windll", ".", "kernel32", ".", "GetConsoleWindow", "(", ")", "RDW_INVALIDATE", "=", "0x0001", "windll", ".", "user32", ".", "RedrawWindow", "(", "handle", ",", "None", ",", "None", ",", "c_uint", "(", "RDW_INVALIDATE", ")", ")" ]
Assumes list items are unique
def list_alignment ( list1 , list2 , missing = False ) : import utool as ut item1_to_idx = make_index_lookup ( list1 ) if missing : sortx = ut . dict_take ( item1_to_idx , list2 , None ) else : sortx = ut . take ( item1_to_idx , list2 ) return sortx
9,296
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L3008-L3052
[ "def", "write", "(", "self", ",", "data", ")", ":", "begin", ",", "end", ",", "size", "=", "0", ",", "0", ",", "len", "(", "data", ")", "bytes_sent", "=", "0", "raw_write", "=", "super", "(", "USBRawDevice", ",", "self", ")", ".", "write", "while", "not", "end", ">", "size", ":", "begin", "=", "end", "end", "=", "begin", "+", "self", ".", "RECV_CHUNK", "bytes_sent", "+=", "raw_write", "(", "data", "[", "begin", ":", "end", "]", ")", "return", "bytes_sent" ]
r Swaps rows and columns . nCols should be specified if the initial list is empty .
def list_transpose ( list_ , shape = None ) : num_cols_set = unique ( [ len ( x ) for x in list_ ] ) if shape is None : if len ( num_cols_set ) == 0 : raise ValueError ( 'listT does not support empty transpose without shapes' ) else : assert len ( shape ) == 2 , 'shape must be a 2-tuple' if len ( num_cols_set ) == 0 : return [ [ ] for _ in range ( shape [ 1 ] ) ] elif num_cols_set [ 0 ] == 0 : return [ ] if len ( num_cols_set ) != 1 : raise ValueError ( 'inconsistent column lengths=%r' % ( num_cols_set , ) ) return list ( zip ( * list_ ) )
9,297
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L3065-L3122
[ "def", "get_current_program_progress", "(", "program", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "program_duration", "=", "get_program_duration", "(", "program", ")", "if", "not", "program_duration", ":", "return", "progress", "=", "now", "-", "program", ".", "get", "(", "'start_time'", ")", "return", "progress", ".", "seconds", "*", "100", "/", "program_duration" ]
Remove items from list_ at positions specified in index_list The original list_ is preserved if copy is True
def delete_items_by_index ( list_ , index_list , copy = False ) : if copy : list_ = list_ [ : ] # Rectify negative indicies index_list_ = [ ( len ( list_ ) + x if x < 0 else x ) for x in index_list ] # Remove largest indicies first index_list_ = sorted ( index_list_ , reverse = True ) for index in index_list_ : del list_ [ index ] return list_
9,298
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L3128-L3155
[ "def", "handle_error", "(", "self", ",", "error", ",", "download_request", ")", ":", "if", "hasattr", "(", "error", ",", "\"errno\"", ")", "and", "error", ".", "errno", "==", "errno", ".", "EACCES", ":", "self", ".", "handle_certificate_problem", "(", "str", "(", "error", ")", ")", "else", ":", "self", ".", "handle_general_download_error", "(", "str", "(", "error", ")", ",", "download_request", ")" ]
r Remove items in item_list from list_ . The original list_ is preserved if copy is True
def delete_list_items ( list_ , item_list , copy = False ) : if copy : list_ = list_ [ : ] for item in item_list : list_ . remove ( item ) return list_
9,299
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L3158-L3167
[ "def", "_build_trial_meta", "(", "cls", ",", "expr_dir", ")", ":", "meta_file", "=", "os", ".", "path", ".", "join", "(", "expr_dir", ",", "EXPR_META_FILE", ")", "meta", "=", "parse_json", "(", "meta_file", ")", "if", "not", "meta", ":", "job_id", "=", "expr_dir", ".", "split", "(", "\"/\"", ")", "[", "-", "2", "]", "trial_id", "=", "expr_dir", "[", "-", "8", ":", "]", "params", "=", "parse_json", "(", "os", ".", "path", ".", "join", "(", "expr_dir", ",", "EXPR_PARARM_FILE", ")", ")", "meta", "=", "{", "\"trial_id\"", ":", "trial_id", ",", "\"job_id\"", ":", "job_id", ",", "\"status\"", ":", "\"RUNNING\"", ",", "\"type\"", ":", "\"TUNE\"", ",", "\"start_time\"", ":", "os", ".", "path", ".", "getctime", "(", "expr_dir", ")", ",", "\"end_time\"", ":", "None", ",", "\"progress_offset\"", ":", "0", ",", "\"result_offset\"", ":", "0", ",", "\"params\"", ":", "params", "}", "if", "not", "meta", ".", "get", "(", "\"start_time\"", ",", "None", ")", ":", "meta", "[", "\"start_time\"", "]", "=", "os", ".", "path", ".", "getctime", "(", "expr_dir", ")", "if", "isinstance", "(", "meta", "[", "\"start_time\"", "]", ",", "float", ")", ":", "meta", "[", "\"start_time\"", "]", "=", "timestamp2date", "(", "meta", "[", "\"start_time\"", "]", ")", "if", "meta", ".", "get", "(", "\"end_time\"", ",", "None", ")", ":", "meta", "[", "\"end_time\"", "]", "=", "timestamp2date", "(", "meta", "[", "\"end_time\"", "]", ")", "meta", "[", "\"params\"", "]", "=", "parse_json", "(", "os", ".", "path", ".", "join", "(", "expr_dir", ",", "EXPR_PARARM_FILE", ")", ")", "return", "meta" ]