query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Parse entire file and return relevant object .
def parse ( self , file_name ) : self . object = self . parsed_class ( ) with open ( file_name , encoding = 'utf-8' ) as f : self . parse_str ( f . read ( ) ) return self . object
2,400
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/parsers.py#L95-L106
[ "def", "cmd_oreoled", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "4", ":", "print", "(", "\"Usage: oreoled LEDNUM RED GREEN BLUE <RATE>\"", ")", "return", "lednum", "=", "int", "(", "args", "[", "0", "]", ")", "pattern", "=", "[", "0", "]", "*", "24", "pattern", "[", "0", "]", "=", "ord", "(", "'R'", ")", "pattern", "[", "1", "]", "=", "ord", "(", "'G'", ")", "pattern", "[", "2", "]", "=", "ord", "(", "'B'", ")", "pattern", "[", "3", "]", "=", "ord", "(", "'0'", ")", "pattern", "[", "4", "]", "=", "0", "pattern", "[", "5", "]", "=", "int", "(", "args", "[", "1", "]", ")", "pattern", "[", "6", "]", "=", "int", "(", "args", "[", "2", "]", ")", "pattern", "[", "7", "]", "=", "int", "(", "args", "[", "3", "]", ")", "self", ".", "master", ".", "mav", ".", "led_control_send", "(", "self", ".", "settings", ".", "target_system", ",", "self", ".", "settings", ".", "target_component", ",", "lednum", ",", "255", ",", "8", ",", "pattern", ")" ]
Checks for one more item than last on this page .
def has_next ( self ) : try : next_item = self . paginator . object_list [ self . paginator . per_page ] except IndexError : return False return True
2,401
https://github.com/staticdev/django-pagination-bootstrap/blob/b4bf8352a364b223babbc5f33e14ecabd82c0886/pagination_bootstrap/paginator.py#L157-L165
[ "def", "_ProcessRegistryKeySource", "(", "self", ",", "source", ")", ":", "keys", "=", "source", ".", "base_source", ".", "attributes", ".", "get", "(", "\"keys\"", ",", "[", "]", ")", "if", "not", "keys", ":", "return", "interpolated_paths", "=", "artifact_utils", ".", "InterpolateListKbAttributes", "(", "input_list", "=", "keys", ",", "knowledge_base", "=", "self", ".", "knowledge_base", ",", "ignore_errors", "=", "self", ".", "ignore_interpolation_errors", ")", "glob_expressions", "=", "map", "(", "rdf_paths", ".", "GlobExpression", ",", "interpolated_paths", ")", "patterns", "=", "[", "]", "for", "pattern", "in", "glob_expressions", ":", "patterns", ".", "extend", "(", "pattern", ".", "Interpolate", "(", "knowledge_base", "=", "self", ".", "knowledge_base", ")", ")", "patterns", ".", "sort", "(", "key", "=", "len", ",", "reverse", "=", "True", ")", "file_finder_action", "=", "rdf_file_finder", ".", "FileFinderAction", ".", "Stat", "(", ")", "request", "=", "rdf_file_finder", ".", "FileFinderArgs", "(", "paths", "=", "patterns", ",", "action", "=", "file_finder_action", ",", "follow_links", "=", "True", ",", "pathtype", "=", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", "action", "=", "vfs_file_finder", ".", "RegistryKeyFromClient", "yield", "action", ",", "request" ]
extracts names from the node to get counts of miss + cann on both sides
def parse_miss_cann ( node , m , c ) : if node [ 2 ] : m1 = node [ 0 ] m2 = m - node [ 0 ] c1 = node [ 1 ] c2 = c - node [ 1 ] else : m1 = m - node [ 0 ] m2 = node [ 0 ] c1 = c - node [ 1 ] c2 = node [ 1 ] return m1 , c1 , m2 , c2
2,402
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/puzzle_missions_canninballs.py#L167-L183
[ "def", "VerifyStructure", "(", "self", ",", "parser_mediator", ",", "lines", ")", ":", "try", ":", "structure", "=", "self", ".", "_GDS_LINE", ".", "parseString", "(", "lines", ")", "except", "pyparsing", ".", "ParseException", "as", "exception", ":", "logger", ".", "debug", "(", "'Not a Google Drive Sync log file: {0!s}'", ".", "format", "(", "exception", ")", ")", "return", "False", "date_time", "=", "dfdatetime_time_elements", ".", "TimeElementsInMilliseconds", "(", ")", "try", ":", "datetime_iso8601", "=", "self", ".", "_GetISO8601String", "(", "structure", ".", "date_time", ")", "date_time", ".", "CopyFromStringISO8601", "(", "datetime_iso8601", ")", "except", "ValueError", "as", "exception", ":", "logger", ".", "debug", "(", "(", "'Not a Google Drive Sync log file, invalid date/time: {0!s} '", "'with error: {1!s}'", ")", ".", "format", "(", "structure", ".", "date_time", ",", "exception", ")", ")", "return", "False", "return", "True" ]
run the algorithm to find the path list
def solve ( m , c ) : G = { ( m , c , 1 ) : [ ] } frontier = [ ( m , c , 1 ) ] # 1 as boat starts on left bank while len ( frontier ) > 0 : hold = list ( frontier ) for node in hold : newnode = [ ] frontier . remove ( node ) newnode . extend ( pick_next_boat_trip ( node , m , c , frontier ) ) for neighbor in newnode : if neighbor not in G : G [ node ] . append ( neighbor ) G [ neighbor ] = [ node ] frontier . append ( neighbor ) return mod_plan . find_path_BFS ( G , ( m , c , 1 ) , ( 0 , 0 , 0 ) )
2,403
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/puzzle_missions_canninballs.py#L225-L242
[ "def", "_bqschema_to_nullsafe_dtypes", "(", "schema_fields", ")", ":", "# If you update this mapping, also update the table at", "# `docs/source/reading.rst`.", "dtype_map", "=", "{", "\"FLOAT\"", ":", "np", ".", "dtype", "(", "float", ")", ",", "# pandas doesn't support timezone-aware dtype in DataFrame/Series", "# constructors. It's more idiomatic to localize after construction.", "# https://github.com/pandas-dev/pandas/issues/25843", "\"TIMESTAMP\"", ":", "\"datetime64[ns]\"", ",", "\"TIME\"", ":", "\"datetime64[ns]\"", ",", "\"DATE\"", ":", "\"datetime64[ns]\"", ",", "\"DATETIME\"", ":", "\"datetime64[ns]\"", ",", "}", "dtypes", "=", "{", "}", "for", "field", "in", "schema_fields", ":", "name", "=", "str", "(", "field", "[", "\"name\"", "]", ")", "if", "field", "[", "\"mode\"", "]", ".", "upper", "(", ")", "==", "\"REPEATED\"", ":", "continue", "dtype", "=", "dtype_map", ".", "get", "(", "field", "[", "\"type\"", "]", ".", "upper", "(", ")", ")", "if", "dtype", ":", "dtypes", "[", "name", "]", "=", "dtype", "return", "dtypes" ]
appends the CREATE TABLE index etc to self . ddl_text
def create_script_fact ( self ) : self . ddl_text += '---------------------------------------------\n' self . ddl_text += '-- CREATE Fact Table - ' + self . fact_table + '\n' self . ddl_text += '---------------------------------------------\n' self . ddl_text += 'DROP TABLE ' + self . fact_table + ' CASCADE CONSTRAINTS;\n' self . ddl_text += 'CREATE TABLE ' + self . fact_table + ' (\n' self . ddl_text += ' ' . join ( [ col + ' VARCHAR2(200), \n' for col in self . col_list ] ) self . ddl_text += ' ' + self . date_updated_col + ' DATE \n' # + src_table + '; \n' self . ddl_text += ');\n'
2,404
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_sql_code_generator.py#L60-L71
[ "async", "def", "handle_request", "(", "self", ",", "request", ")", ":", "service_name", "=", "request", ".", "rel_url", ".", "query", "[", "'servicename'", "]", "received_code", "=", "request", ".", "rel_url", ".", "query", "[", "'pairingcode'", "]", ".", "lower", "(", ")", "_LOGGER", ".", "info", "(", "'Got pairing request from %s with code %s'", ",", "service_name", ",", "received_code", ")", "if", "self", ".", "_verify_pin", "(", "received_code", ")", ":", "cmpg", "=", "tags", ".", "uint64_tag", "(", "'cmpg'", ",", "int", "(", "self", ".", "_pairing_guid", ",", "16", ")", ")", "cmnm", "=", "tags", ".", "string_tag", "(", "'cmnm'", ",", "self", ".", "_name", ")", "cmty", "=", "tags", ".", "string_tag", "(", "'cmty'", ",", "'iPhone'", ")", "response", "=", "tags", ".", "container_tag", "(", "'cmpa'", ",", "cmpg", "+", "cmnm", "+", "cmty", ")", "self", ".", "_has_paired", "=", "True", "return", "web", ".", "Response", "(", "body", "=", "response", ")", "# Code did not match, generate an error", "return", "web", ".", "Response", "(", "status", "=", "500", ")" ]
appends the CREATE TABLE index etc to another table
def create_script_staging_table ( self , output_table , col_list ) : self . ddl_text += '---------------------------------------------\n' self . ddl_text += '-- CREATE Staging Table - ' + output_table + '\n' self . ddl_text += '---------------------------------------------\n' self . ddl_text += 'DROP TABLE ' + output_table + ' CASCADE CONSTRAINTS;\n' self . ddl_text += 'CREATE TABLE ' + output_table + ' (\n ' self . ddl_text += ' ' . join ( [ col + ' VARCHAR2(200), \n' for col in col_list ] ) self . ddl_text += ' ' + self . date_updated_col + ' DATE \n' # + src_table + '; \n' self . ddl_text += ');\n'
2,405
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_sql_code_generator.py#L73-L84
[ "def", "extract_secrets_from_android_rooted", "(", "adb_path", "=", "'adb'", ")", ":", "data", "=", "subprocess", ".", "check_output", "(", "[", "adb_path", ",", "'shell'", ",", "'su'", ",", "'-c'", ",", "\"'cat /data/data/com.valvesoftware.android.steam.community/files/Steamguard*'\"", "]", ")", "# When adb daemon is not running, `adb` will print a couple of lines before our data.", "# The data doesn't have new lines and its always on the last line.", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "'\\n'", ")", "[", "-", "1", "]", "if", "data", "[", "0", "]", "!=", "\"{\"", ":", "raise", "RuntimeError", "(", "\"Got invalid data: %s\"", "%", "repr", "(", "data", ")", ")", "return", "{", "int", "(", "x", "[", "'steamid'", "]", ")", ":", "x", "for", "x", "in", "map", "(", "json", ".", "loads", ",", "data", ".", "replace", "(", "\"}{\"", ",", "'}|||||{'", ")", ".", "split", "(", "'|||||'", ")", ")", "}" ]
for all columns check which values are not in the other table
def distinct_values ( t_old , t_new ) : res = [ ] res . append ( [ ' -- NOT IN check -- ' ] ) for new_col in t_new . header : dist_new = t_new . get_distinct_values_from_cols ( [ new_col ] ) #print('NEW Distinct values for ' + new_col + ' = ' + str(dist_new)) for old_col in t_old . header : if old_col == new_col : dist_old = t_old . get_distinct_values_from_cols ( [ old_col ] ) #print('OLD Distinct values for ' + old_col + ' = ' + str(dist_old)) # Now compare the old and new values to see what is different not_in_new = [ x for x in dist_old [ 0 ] if x not in dist_new [ 0 ] ] if not_in_new != [ ] : #print(old_col + ' not_in_new = ' , not_in_new) res . append ( [ 'Not in New' , old_col , not_in_new ] ) not_in_old = [ x for x in dist_new [ 0 ] if x not in dist_old [ 0 ] ] if not_in_old != [ ] : #print(new_col + ' not_in_old = ' , not_in_old) res . append ( [ 'Not in Old' , new_col , not_in_old ] ) return sorted ( res )
2,406
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/table_compare.py#L123-L151
[ "def", "setup_environment", "(", ")", ":", "osinter", "=", "ostool", ".", "get_interface", "(", ")", "pypath", "=", "osinter", ".", "get_maya_envpath", "(", ")", "for", "p", "in", "sys", ".", "path", ":", "pypath", "=", "os", ".", "pathsep", ".", "join", "(", "(", "pypath", ",", "p", ")", ")", "os", ".", "environ", "[", "'PYTHONPATH'", "]", "=", "pypath" ]
returns the web page header containing standard AIKIF top level web menu
def aikif_web_menu ( cur = '' ) : pgeHdg = '' pgeBlurb = '' if cur == '' : cur = 'Home' txt = get_header ( cur ) #"<div id=top_menu>" txt += '<div id = "container">\n' txt += ' <div id = "header">\n' txt += ' <!-- Banner -->\n' txt += ' <img src = "' + os . path . join ( '/static' , 'aikif_banner.jpg' ) + '" alt="AIKIF Banner"/>\n' txt += ' <ul id = "menu_list">\n' for m in menu : if m [ 1 ] == cur : txt += ' <LI id="top_menu_selected"><a href=' + m [ 0 ] + '>' + m [ 1 ] + '</a></li>\n' pgeHdg = m [ 1 ] try : pgeBlurb = m [ 2 ] except Exception : pass else : txt += ' <LI id="top_menu"><a href=' + m [ 0 ] + '>' + m [ 1 ] + '</a></li>\n' txt += " </ul>\n </div>\n\n" txt += '<H1>AIKIF ' + pgeHdg + '</H1>\n' txt += '<H4>' + pgeBlurb + '</H4>\n' return txt
2,407
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_aikif.py#L183-L208
[ "def", "uploadByParts", "(", "self", ",", "registerID", ",", "filePath", ",", "commit", "=", "True", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/%s/uploadPart\"", "%", "registerID", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "with", "open", "(", "filePath", ",", "'rb'", ")", "as", "f", ":", "mm", "=", "mmap", ".", "mmap", "(", "f", ".", "fileno", "(", ")", ",", "0", ",", "access", "=", "mmap", ".", "ACCESS_READ", ")", "size", "=", "1000000", "steps", "=", "int", "(", "os", ".", "fstat", "(", "f", ".", "fileno", "(", ")", ")", ".", "st_size", "/", "size", ")", "if", "os", ".", "fstat", "(", "f", ".", "fileno", "(", ")", ")", ".", "st_size", "%", "size", ">", "0", ":", "steps", "+=", "1", "for", "i", "in", "range", "(", "steps", ")", ":", "files", "=", "{", "}", "tempFile", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'TEMP'", "]", ",", "\"split.part%s\"", "%", "i", ")", "if", "os", ".", "path", ".", "isfile", "(", "tempFile", ")", ":", "os", ".", "remove", "(", "tempFile", ")", "with", "open", "(", "tempFile", ",", "'wb'", ")", "as", "writer", ":", "writer", ".", "write", "(", "mm", ".", "read", "(", "size", ")", ")", "writer", ".", "flush", "(", ")", "writer", ".", "close", "(", ")", "del", "writer", "files", "[", "'file'", "]", "=", "tempFile", "params", "[", "'partNum'", "]", "=", "i", "+", "1", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "files", "=", "files", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "os", ".", "remove", "(", "tempFile", ")", "del", "files", "del", "mm", "return", "self", ".", "commit", "(", "registerID", ")" ]
This generates the research document based on the results of the various programs and includes RST imports for introduction and summary
def main ( ) : print ( "Generating research notes..." ) if os . path . exists ( fname ) : os . remove ( fname ) append_rst ( '================================================\n' ) append_rst ( 'Comparison of Information Aggregation Techniques\n' ) append_rst ( '================================================\n\n' ) append_rst ( '.. contents::\n\n' ) # import header append_rst ( open ( 'res_core_data_HEADER.rst' , 'r' ) . read ( ) ) append_rst ( res_core_data_mthd1 . get_method ( ) ) append_rst ( res_core_data_mthd2 . get_method ( ) ) # call programs append_rst ( 'Results\n' ) append_rst ( '=====================================\n' ) for dat in data_files : append_rst ( '\nData File : ' + dat + '\n---------------------------------------\n\n' ) res_core_data_mthd1 . get_results ( fname , dat ) res_core_data_mthd2 . get_results ( fname , dat ) # import footer append_rst ( open ( 'res_core_data_FOOTER.rst' , 'r' ) . read ( ) ) print ( "Done!" )
2,408
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/res_core_data_NOTES.py#L20-L50
[ "def", "not_storable", "(", "_type", ")", ":", "return", "Storable", "(", "_type", ",", "handlers", "=", "StorableHandler", "(", "poke", "=", "fake_poke", ",", "peek", "=", "fail_peek", "(", "_type", ")", ")", ")" ]
returns a list of records containing text
def find ( self , txt ) : result = [ ] for d in self . data : if txt in d : result . append ( d ) return result
2,409
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/knowledge.py#L56-L64
[ "def", "build_socket", "(", "self", ")", ":", "try", ":", "self", ".", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "self", ".", "sock", ".", "settimeout", "(", "self", ".", "SOCKET_TIMEOUT", ")", "self", ".", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "# Check if TLS", "if", "self", ".", "request_object", ".", "protocol", "==", "'https'", ":", "self", ".", "sock", "=", "ssl", ".", "wrap_socket", "(", "self", ".", "sock", ",", "ciphers", "=", "self", ".", "CIPHERS", ")", "self", ".", "sock", ".", "connect", "(", "(", "self", ".", "request_object", ".", "dest_addr", ",", "self", ".", "request_object", ".", "port", ")", ")", "except", "socket", ".", "error", "as", "msg", ":", "raise", "errors", ".", "TestError", "(", "'Failed to connect to server'", ",", "{", "'host'", ":", "self", ".", "request_object", ".", "dest_addr", ",", "'port'", ":", "self", ".", "request_object", ".", "port", ",", "'proto'", ":", "self", ".", "request_object", ".", "protocol", ",", "'message'", ":", "msg", ",", "'function'", ":", "'http.HttpUA.build_socket'", "}", ")" ]
Schema for data in CollectorUpdate .
def schema_complete ( ) : return Schema ( { 'stage' : And ( str , len ) , 'timestamp' : int , 'status' : And ( str , lambda s : s in [ 'started' , 'succeeded' , 'failed' ] ) , # optional matrix Optional ( 'matrix' , default = 'default' ) : And ( str , len ) , # optional information Optional ( 'information' , default = { } ) : { Optional ( Regex ( r'([a-z][_a-z]*)' ) ) : object } } )
2,410
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L56-L68
[ "def", "_is_valid_relpath", "(", "relpath", ",", "maxdepth", "=", "None", ")", ":", "# Check relpath surrounded by slashes, so that `..` can be caught as", "# a path component at the start, end, and in the middle of the path.", "sep", ",", "pardir", "=", "posixpath", ".", "sep", ",", "posixpath", ".", "pardir", "if", "sep", "+", "pardir", "+", "sep", "in", "sep", "+", "relpath", "+", "sep", ":", "return", "False", "# Check that the relative path's depth does not exceed maxdepth", "if", "maxdepth", "is", "not", "None", ":", "path_depth", "=", "relpath", ".", "strip", "(", "sep", ")", ".", "count", "(", "sep", ")", "if", "path_depth", ">", "maxdepth", ":", "return", "False", "return", "True" ]
Schema for event items .
def schema_event_items ( ) : return { 'timestamp' : And ( int , lambda n : n > 0 ) , Optional ( 'information' , default = { } ) : { Optional ( Regex ( r'([a-z][_a-z]*)' ) ) : object } }
2,411
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L115-L122
[ "def", "write", "(", "self", ",", "df", ",", "table_name", ",", "temp_dir", "=", "CACHE_DIR", ",", "overwrite", "=", "False", ",", "lnglat", "=", "None", ",", "encode_geom", "=", "False", ",", "geom_col", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# noqa", "tqdm", ".", "write", "(", "'Params: encode_geom, geom_col and everything in kwargs are deprecated and not being used any more'", ")", "dataset", "=", "Dataset", "(", "self", ",", "table_name", ",", "df", "=", "df", ")", "if_exists", "=", "Dataset", ".", "FAIL", "if", "overwrite", ":", "if_exists", "=", "Dataset", ".", "REPLACE", "dataset", "=", "dataset", ".", "upload", "(", "with_lonlat", "=", "lnglat", ",", "if_exists", "=", "if_exists", ")", "tqdm", ".", "write", "(", "'Table successfully written to CARTO: {table_url}'", ".", "format", "(", "table_url", "=", "utils", ".", "join_url", "(", "self", ".", "creds", ".", "base_url", "(", ")", ",", "'dataset'", ",", "dataset", ".", "table_name", ")", ")", ")", "return", "dataset" ]
Schema for data in CollectorStage .
def schema_complete ( ) : return Schema ( { 'stage' : And ( str , len ) , 'status' : And ( str , lambda s : s in [ 'started' , 'succeeded' , 'failed' ] ) , Optional ( 'events' , default = [ ] ) : And ( len , [ CollectorStage . schema_event_items ( ) ] ) } )
2,412
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L125-L131
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", "=", "None", ",", "spatialReferenceID", "=", "None", ",", "replaceParamFile", "=", "None", ")", ":", "yml_events", "=", "[", "]", "with", "open", "(", "path", ")", "as", "fo", ":", "yml_events", "=", "yaml", ".", "load", "(", "fo", ")", "for", "yml_event", "in", "yml_events", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "yml_event", ".", "subfolder", ")", ")", ":", "orm_event", "=", "yml_event", ".", "as_orm", "(", ")", "if", "not", "self", ".", "_similar_event_exists", "(", "orm_event", ".", "subfolder", ")", ":", "session", ".", "add", "(", "orm_event", ")", "self", ".", "events", ".", "append", "(", "orm_event", ")", "session", ".", "commit", "(", ")" ]
Add event information .
def add ( self , timestamp , information ) : try : item = Schema ( CollectorStage . schema_event_items ( ) ) . validate ( { 'timestamp' : timestamp , 'information' : information } ) self . events . append ( item ) except SchemaError as exception : Logger . get_logger ( __name__ ) . error ( exception ) raise RuntimeError ( str ( exception ) )
2,413
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L152-L170
[ "def", "validate_arguments", "(", "log", ",", "whitelisted_args", ",", "args", ")", ":", "valid_patterns", "=", "{", "re", ".", "compile", "(", "p", ")", ":", "v", "for", "p", ",", "v", "in", "whitelisted_args", ".", "items", "(", ")", "}", "def", "validate", "(", "idx", ")", ":", "arg", "=", "args", "[", "idx", "]", "for", "pattern", ",", "has_argument", "in", "valid_patterns", ".", "items", "(", ")", ":", "if", "pattern", ".", "match", "(", "arg", ")", ":", "return", "2", "if", "has_argument", "else", "1", "log", ".", "warn", "(", "\"Zinc argument '{}' is not supported, and is subject to change/removal!\"", ".", "format", "(", "arg", ")", ")", "return", "1", "arg_index", "=", "0", "while", "arg_index", "<", "len", "(", "args", ")", ":", "arg_index", "+=", "validate", "(", "arg_index", ")" ]
Calculate how long the stage took .
def duration ( self ) : duration = 0.0 if len ( self . events ) > 0 : first = datetime . fromtimestamp ( self . events [ 0 ] [ 'timestamp' ] ) last = datetime . fromtimestamp ( self . events [ - 1 ] [ 'timestamp' ] ) duration = ( last - first ) . total_seconds ( ) return duration
2,414
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L172-L184
[ "def", "join_formatted", "(", "text", ",", "new_text", ",", "glue_format_if_true", "=", "u'%s%s'", ",", "glue_format_if_false", "=", "u'%s%s'", ",", "condition", "=", "None", ",", "format", "=", "u'%s'", ",", "escape", "=", "False", ")", ":", "if", "condition", "is", "None", ":", "condition", "=", "text", "and", "new_text", "add_text", "=", "new_text", "if", "escape", ":", "add_text", "=", "conditional_escape", "(", "add_text", ")", "if", "add_text", ":", "add_text", "=", "format", "%", "add_text", "glue_format", "=", "glue_format_if_true", "if", "condition", "else", "glue_format_if_false", "return", "glue_format", "%", "(", "text", ",", "add_text", ")" ]
Number of registered stages for given matrix name .
def count_stages ( self , matrix_name ) : return len ( self . data [ matrix_name ] ) if matrix_name in self . data else 0
2,415
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L229-L239
[ "def", "RestrictFeedItemToAdGroup", "(", "client", ",", "feed_item", ",", "adgroup_id", ")", ":", "# Get the FeedItemTargetService", "feed_item_target_service", "=", "client", ".", "GetService", "(", "'FeedItemTargetService'", ",", "'v201809'", ")", "# Optional: Restrict the first feed item to only serve with ads for the", "# specified ad group ID.", "ad_group_target", "=", "{", "'xsi_type'", ":", "'FeedItemAdGroupTarget'", ",", "'feedId'", ":", "feed_item", "[", "'feedId'", "]", ",", "'feedItemId'", ":", "feed_item", "[", "'feedItemId'", "]", ",", "'adGroupId'", ":", "adgroup_id", "}", "operation", "=", "{", "'operator'", ":", "'ADD'", ",", "'operand'", ":", "ad_group_target", "}", "response", "=", "feed_item_target_service", ".", "mutate", "(", "[", "operation", "]", ")", "new_ad_group_target", "=", "response", "[", "'value'", "]", "[", "0", "]", "print", "(", "'Feed item target for feed ID %s and feed item ID %s was created to '", "'restrict serving to ad group ID %s'", "%", "(", "new_ad_group_target", "[", "'feedId'", "]", ",", "new_ad_group_target", "[", "'feedItemId'", "]", ",", "new_ad_group_target", "[", "'adGroupId'", "]", ")", ")" ]
Get Stage of a concrete matrix .
def get_stage ( self , matrix_name , stage_name ) : found_stage = None if matrix_name in self . data : result = Select ( self . data [ matrix_name ] ) . where ( lambda entry : entry . stage == stage_name ) . build ( ) found_stage = result [ 0 ] if len ( result ) > 0 else None return found_stage
2,416
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L245-L261
[ "def", "delete_datapoints_in_time_range", "(", "self", ",", "start_dt", "=", "None", ",", "end_dt", "=", "None", ")", ":", "start_dt", "=", "to_none_or_dt", "(", "validate_type", "(", "start_dt", ",", "datetime", ".", "datetime", ",", "type", "(", "None", ")", ")", ")", "end_dt", "=", "to_none_or_dt", "(", "validate_type", "(", "end_dt", ",", "datetime", ".", "datetime", ",", "type", "(", "None", ")", ")", ")", "params", "=", "{", "}", "if", "start_dt", "is", "not", "None", ":", "params", "[", "'startTime'", "]", "=", "isoformat", "(", "start_dt", ")", "if", "end_dt", "is", "not", "None", ":", "params", "[", "'endTime'", "]", "=", "isoformat", "(", "end_dt", ")", "self", ".", "_conn", ".", "delete", "(", "\"/ws/DataPoint/{stream_id}{querystring}\"", ".", "format", "(", "stream_id", "=", "self", ".", "get_stream_id", "(", ")", ",", "querystring", "=", "\"?\"", "+", "urllib", ".", "parse", ".", "urlencode", "(", "params", ")", "if", "params", "else", "\"\"", ",", ")", ")" ]
Get duration for a concrete matrix .
def get_duration ( self , matrix_name ) : duration = 0.0 if matrix_name in self . data : duration = sum ( [ stage . duration ( ) for stage in self . data [ matrix_name ] ] ) return duration
2,417
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L263-L276
[ "def", "delete_datapoints_in_time_range", "(", "self", ",", "start_dt", "=", "None", ",", "end_dt", "=", "None", ")", ":", "start_dt", "=", "to_none_or_dt", "(", "validate_type", "(", "start_dt", ",", "datetime", ".", "datetime", ",", "type", "(", "None", ")", ")", ")", "end_dt", "=", "to_none_or_dt", "(", "validate_type", "(", "end_dt", ",", "datetime", ".", "datetime", ",", "type", "(", "None", ")", ")", ")", "params", "=", "{", "}", "if", "start_dt", "is", "not", "None", ":", "params", "[", "'startTime'", "]", "=", "isoformat", "(", "start_dt", ")", "if", "end_dt", "is", "not", "None", ":", "params", "[", "'endTime'", "]", "=", "isoformat", "(", "end_dt", ")", "self", ".", "_conn", ".", "delete", "(", "\"/ws/DataPoint/{stream_id}{querystring}\"", ".", "format", "(", "stream_id", "=", "self", ".", "get_stream_id", "(", ")", ",", "querystring", "=", "\"?\"", "+", "urllib", ".", "parse", ".", "urlencode", "(", "params", ")", "if", "params", "else", "\"\"", ",", ")", ")" ]
Add a collector item .
def update ( self , item ) : if item . matrix not in self . data : self . data [ item . matrix ] = [ ] result = Select ( self . data [ item . matrix ] ) . where ( lambda entry : entry . stage == item . stage ) . build ( ) if len ( result ) > 0 : stage = result [ 0 ] stage . status = item . status stage . add ( item . timestamp , item . information ) else : stage = CollectorStage ( stage = item . stage , status = item . status ) stage . add ( item . timestamp , item . information ) self . data [ item . matrix ] . append ( stage )
2,418
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L278-L298
[ "def", "load_stl_ascii", "(", "file_obj", ")", ":", "# the first line is the header", "header", "=", "file_obj", ".", "readline", "(", ")", "# make sure header is a string, not bytes", "if", "hasattr", "(", "header", ",", "'decode'", ")", ":", "try", ":", "header", "=", "header", ".", "decode", "(", "'utf-8'", ")", "except", "BaseException", ":", "header", "=", "''", "# save header to metadata", "metadata", "=", "{", "'header'", ":", "header", "}", "# read all text into one string", "text", "=", "file_obj", ".", "read", "(", ")", "# convert bytes to string", "if", "hasattr", "(", "text", ",", "'decode'", ")", ":", "text", "=", "text", ".", "decode", "(", "'utf-8'", ")", "# split by endsolid keyword", "text", "=", "text", ".", "lower", "(", ")", ".", "split", "(", "'endsolid'", ")", "[", "0", "]", "# create array of splits", "blob", "=", "np", ".", "array", "(", "text", ".", "strip", "(", ")", ".", "split", "(", ")", ")", "# there are 21 'words' in each face", "face_len", "=", "21", "# length of blob should be multiple of face_len", "if", "(", "len", "(", "blob", ")", "%", "face_len", ")", "!=", "0", ":", "raise", "HeaderError", "(", "'Incorrect length STL file!'", ")", "face_count", "=", "int", "(", "len", "(", "blob", ")", "/", "face_len", ")", "# this offset is to be added to a fixed set of tiled indices", "offset", "=", "face_len", "*", "np", ".", "arange", "(", "face_count", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "normal_index", "=", "np", ".", "tile", "(", "[", "2", ",", "3", ",", "4", "]", ",", "(", "face_count", ",", "1", ")", ")", "+", "offset", "vertex_index", "=", "np", ".", "tile", "(", "[", "8", ",", "9", ",", "10", ",", "12", ",", "13", ",", "14", ",", "16", ",", "17", ",", "18", "]", ",", "(", "face_count", ",", "1", ")", ")", "+", "offset", "# faces are groups of three sequential vertices", "faces", "=", "np", ".", "arange", "(", "face_count", "*", "3", ")", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "face_normals", "=", "blob", "[", "normal_index", "]", ".", "astype", "(", "'<f8'", ")", "vertices", "=", "blob", "[", "vertex_index", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "]", ".", "astype", "(", "'<f8'", ")", "return", "{", "'vertices'", ":", "vertices", ",", "'faces'", ":", "faces", ",", "'metadata'", ":", "metadata", ",", "'face_normals'", ":", "face_normals", "}" ]
Collector main loop .
def run ( self ) : while True : data = self . queue . get ( ) if data is None : Logger . get_logger ( __name__ ) . info ( "Stopping collector process ..." ) break # updating the report data self . store . update ( data ) # writing the report generate ( self . store , 'html' , os . getcwd ( ) )
2,419
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L310-L321
[ "def", "price_and_currency", "(", "self", ")", ":", "price", "=", "self", ".", "_safe_get_element_text", "(", "'Offers.Offer.OfferListing.SalePrice.Amount'", ")", "if", "price", ":", "currency", "=", "self", ".", "_safe_get_element_text", "(", "'Offers.Offer.OfferListing.SalePrice.CurrencyCode'", ")", "else", ":", "price", "=", "self", ".", "_safe_get_element_text", "(", "'Offers.Offer.OfferListing.Price.Amount'", ")", "if", "price", ":", "currency", "=", "self", ".", "_safe_get_element_text", "(", "'Offers.Offer.OfferListing.Price.CurrencyCode'", ")", "else", ":", "price", "=", "self", ".", "_safe_get_element_text", "(", "'OfferSummary.LowestNewPrice.Amount'", ")", "currency", "=", "self", ".", "_safe_get_element_text", "(", "'OfferSummary.LowestNewPrice.CurrencyCode'", ")", "if", "price", ":", "dprice", "=", "Decimal", "(", "price", ")", "/", "100", "if", "'JP'", "not", "in", "self", ".", "region", "else", "Decimal", "(", "price", ")", "return", "dprice", ",", "currency", "else", ":", "return", "None", ",", "None" ]
reads a saved text file to list
def read_map ( fname ) : lst = [ ] with open ( fname , "r" ) as f : for line in f : lst . append ( line ) return lst
2,420
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L39-L47
[ "def", "_validate_checksum", "(", "self", ",", "buffer", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"Validating the buffer\"", ")", "if", "len", "(", "buffer", ")", "==", "0", ":", "self", ".", "_log", ".", "debug", "(", "\"Buffer was empty\"", ")", "if", "self", ".", "_conn", ".", "isOpen", "(", ")", ":", "self", ".", "_log", ".", "debug", "(", "'Closing connection'", ")", "self", ".", "_conn", ".", "close", "(", ")", "return", "False", "p0", "=", "hex2int", "(", "buffer", "[", "0", "]", ")", "p1", "=", "hex2int", "(", "buffer", "[", "1", "]", ")", "checksum", "=", "sum", "(", "[", "hex2int", "(", "c", ")", "for", "c", "in", "buffer", "[", ":", "35", "]", "]", ")", "&", "0xFF", "p35", "=", "hex2int", "(", "buffer", "[", "35", "]", ")", "if", "p0", "!=", "165", "or", "p1", "!=", "150", "or", "p35", "!=", "checksum", ":", "self", ".", "_log", ".", "debug", "(", "\"Buffer checksum was not valid\"", ")", "return", "False", "return", "True" ]
reads a saved grid file and paints it on the canvas
def show_grid_from_file ( self , fname ) : with open ( fname , "r" ) as f : for y , row in enumerate ( f ) : for x , val in enumerate ( row ) : self . draw_cell ( y , x , val )
2,421
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L98-L105
[ "def", "_get_domain_event_detail", "(", "event", ",", "detail", ")", ":", "event_name", "=", "_get_libvirt_enum_string", "(", "'VIR_DOMAIN_EVENT_'", ",", "event", ")", "if", "event_name", "==", "'unknown'", ":", "return", "event_name", ",", "'unknown'", "prefix", "=", "'VIR_DOMAIN_EVENT_{0}_'", ".", "format", "(", "event_name", ".", "upper", "(", ")", ")", "detail_name", "=", "_get_libvirt_enum_string", "(", "prefix", ",", "detail", ")", "return", "event_name", ",", "detail_name" ]
draw a cell as position row col containing val
def draw_cell ( self , row , col , val ) : if val == 'T' : self . paint_target ( row , col ) elif val == '#' : self . paint_block ( row , col ) elif val == 'X' : self . paint_hill ( row , col ) elif val == '.' : self . paint_land ( row , col ) elif val in [ 'A' ] : self . paint_agent_location ( row , col ) elif val in [ '1' , '2' , '3' , '4' , '5' , '6' , '7' , '8' , '9' ] : self . paint_agent_trail ( row , col , val )
2,422
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L108-L123
[ "def", "to_hdf5", "(", "input", ")", ":", "with", "performance", ".", "Monitor", "(", "'to_hdf5'", ")", "as", "mon", ":", "for", "input_file", "in", "input", ":", "if", "input_file", ".", "endswith", "(", "'.npz'", ")", ":", "output", "=", "convert_npz_hdf5", "(", "input_file", ",", "input_file", "[", ":", "-", "3", "]", "+", "'hdf5'", ")", "elif", "input_file", ".", "endswith", "(", "'.xml'", ")", ":", "# for source model files", "output", "=", "convert_xml_hdf5", "(", "input_file", ",", "input_file", "[", ":", "-", "3", "]", "+", "'hdf5'", ")", "else", ":", "continue", "print", "(", "'Generated %s'", "%", "output", ")", "print", "(", "mon", ")" ]
paint an agent trail as ONE pixel to allow for multiple agent trails to be seen in the same cell
def paint_agent_trail ( self , y , x , val ) : for j in range ( 1 , self . cell_height - 1 ) : for i in range ( 1 , self . cell_width - 1 ) : self . img . put ( self . agent_color ( val ) , ( x * self . cell_width + i , y * self . cell_height + j ) )
2,423
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L150-L157
[ "def", "unshare", "(", "flags", ")", ":", "res", "=", "lib", ".", "unshare", "(", "flags", ")", "if", "res", "!=", "0", ":", "_check_error", "(", "ffi", ".", "errno", ")" ]
gets a colour for agent 0 - 9
def agent_color ( self , val ) : if val == '0' : colour = 'blue' elif val == '1' : colour = 'navy' elif val == '2' : colour = 'firebrick' elif val == '3' : colour = 'blue' elif val == '4' : colour = 'blue2' elif val == '5' : colour = 'blue4' elif val == '6' : colour = 'gray22' elif val == '7' : colour = 'gray57' elif val == '8' : colour = 'red4' elif val == '9' : colour = 'red3' return colour
2,424
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L167-L194
[ "def", "rename_file", "(", "self", ",", "fmfile", ",", "newname", ")", ":", "if", "not", "isinstance", "(", "fmfile", ",", "dict", ")", ":", "raise", "FMBaseError", "(", "'fmfile must be a <dict>'", ")", "method", ",", "url", "=", "get_URL", "(", "'file_rename'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'logintoken'", ":", "self", ".", "session", ".", "cookies", ".", "get", "(", "'logintoken'", ")", ",", "'fileid'", ":", "fmfile", ".", "get", "(", "'fileid'", ")", ",", "'filename'", ":", "newname", "}", "res", "=", "getattr", "(", "self", ".", "session", ",", "method", ")", "(", "url", ",", "params", "=", "payload", ")", "if", "res", ".", "status_code", "==", "200", ":", "self", ".", "_complete", "=", "True", "return", "True", "hellraiser", "(", "res", ")" ]
create a list of people with randomly generated names and stats
def create_random_population ( num = 100 ) : people = [ ] for _ in range ( num ) : nme = 'blah' tax_min = random . randint ( 1 , 40 ) / 100 tax_max = tax_min + random . randint ( 1 , 40 ) / 100 tradition = random . randint ( 1 , 100 ) / 100 equity = random . randint ( 1 , 100 ) / 100 pers = mod_hap_env . Person ( nme , { 'tax_min' : tax_min , 'tax_max' : tax_max , 'tradition' : tradition , 'equity' : equity } ) people . append ( pers ) print ( pers ) return people
2,425
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/happiness_solver.py#L29-L44
[ "def", "main", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", "!=", "3", ":", "print", "(", "'Usage: {} <file1> <file2>'", ".", "format", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "exit", "(", "-", "1", ")", "file1", "=", "sys", ".", "argv", "[", "1", "]", "file2", "=", "sys", ".", "argv", "[", "2", "]", "with", "open", "(", "file1", ")", "as", "f1", ",", "open", "(", "file2", ")", "as", "f2", ":", "for", "line1", ",", "line2", "in", "zip", "(", "f1", ",", "f2", ")", ":", "print", "(", "\"Line 1: {}\"", ".", "format", "(", "line1", ".", "strip", "(", ")", ")", ")", "print", "(", "\"Line 2: {}\"", ".", "format", "(", "line2", ".", "strip", "(", ")", ")", ")", "dist", ",", "_", ",", "_", "=", "edit_distance_backpointer", "(", "line1", ".", "split", "(", ")", ",", "line2", ".", "split", "(", ")", ")", "print", "(", "'Distance: {}'", ".", "format", "(", "dist", ")", ")", "print", "(", "'='", "*", "80", ")" ]
Run cleanup script of pipeline when hook is configured .
def cleanup ( self ) : if self . data . hooks and len ( self . data . hooks . cleanup ) > 0 : env = self . data . env_list [ 0 ] . copy ( ) env . update ( { 'PIPELINE_RESULT' : 'SUCCESS' , 'PIPELINE_SHELL_EXIT_CODE' : '0' } ) config = ShellConfig ( script = self . data . hooks . cleanup , model = self . model , env = env , dry_run = self . options . dry_run , debug = self . options . debug , strict = self . options . strict , temporary_scripts_path = self . options . temporary_scripts_path ) cleanup_shell = Bash ( config ) for line in cleanup_shell . process ( ) : yield line
2,426
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/pipeline.py#L68-L79
[ "def", "delete_group", "(", "group_id", ",", "purge_data", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "try", ":", "group_i", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceGroup", ")", ".", "filter", "(", "ResourceGroup", ".", "id", "==", "group_id", ")", ".", "one", "(", ")", "except", "NoResultFound", ":", "raise", "ResourceNotFoundError", "(", "\"Group %s not found\"", "%", "(", "group_id", ")", ")", "group_items", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceGroupItem", ")", ".", "filter", "(", "ResourceGroupItem", ".", "group_id", "==", "group_id", ")", ".", "all", "(", ")", "for", "gi", "in", "group_items", ":", "db", ".", "DBSession", ".", "delete", "(", "gi", ")", "if", "purge_data", "==", "'Y'", ":", "_purge_datasets_unique_to_resource", "(", "'GROUP'", ",", "group_id", ")", "log", ".", "info", "(", "\"Deleting group %s, id=%s\"", ",", "group_i", ".", "name", ",", "group_id", ")", "group_i", ".", "network", ".", "check_write_permission", "(", "user_id", ")", "db", ".", "DBSession", ".", "delete", "(", "group_i", ")", "db", ".", "DBSession", ".", "flush", "(", ")" ]
Processing the whole pipeline definition .
def process ( self , pipeline ) : output = [ ] for entry in pipeline : key = list ( entry . keys ( ) ) [ 0 ] # an environment block can be repeated if key == "env" : self . data . env_list [ 0 ] . update ( entry [ key ] ) self . logger . debug ( "Updating environment at level 0 with %s" , self . data . env_list [ 0 ] ) continue # after validation it can't be anything else but a stage # and the title is inside the round brackets: stage = Stage ( self , re . match ( r"stage\((?P<title>.*)\)" , key ) . group ( "title" ) ) result = stage . process ( entry [ key ] ) output += result [ 'output' ] if not result [ 'success' ] : return { 'success' : False , 'output' : output } # logging the output of the cleanup shell when registered for line in self . cleanup ( ) : output . append ( line ) self . logger . info ( " | %s" , line ) self . event . succeeded ( ) return { 'success' : True , 'output' : output }
2,427
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/pipeline.py#L81-L107
[ "def", "register_dataframe_method", "(", "method", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "class", "AccessorMethod", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "pandas_obj", ")", ":", "self", ".", "_obj", "=", "pandas_obj", "@", "wraps", "(", "method", ")", "def", "__call__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "method", "(", "self", ".", "_obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "register_dataframe_accessor", "(", "method", ".", "__name__", ")", "(", "AccessorMethod", ")", "return", "method", "return", "inner", "(", ")" ]
Top level function to process the command mainly depending on mode . This should work by using the function name defined in all_commamnds
def process ( self , txt , mode ) : result = '' if mode == 'ADD' : # already in add mode, so add data if txt in self . all_commands [ 'cmd' ] [ 0 ] : self . show_output ( 'Returning to Command mode' ) mode = 'COMMAND' self . prompt = '> ' else : self . show_output ( 'Adding Text : ' , txt ) result = self . cmd_add ( txt ) elif mode == 'QUERY' : if txt in self . all_commands [ 'cmd' ] [ 0 ] : self . show_output ( 'Returning to Command mode' ) mode = 'COMMAND' self . prompt = '> ' else : self . show_output ( 'Query : ' , txt ) result = self . cmd_query ( txt ) else : if txt in self . all_commands [ 'exit' ] [ 0 ] : self . cmd_exit ( ) elif txt in self . all_commands [ 'help' ] [ 0 ] : self . cmd_help ( ) elif txt in self . all_commands [ 'cmd' ] [ 0 ] : result = 'Returning to Command mode' mode = 'COMMAND' self . prompt = '> ' elif txt in self . all_commands [ 'add' ] [ 0 ] : result = 'Entering Add mode' mode = 'ADD' self . prompt = 'ADD > ' elif txt in self . all_commands [ 'query' ] [ 0 ] : result = 'Entering Query mode' mode = 'QUERY' self . prompt = '?? > ' else : result = 'Unknown command - type help for list of commands' return result , mode
2,428
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/AI_CLI.py#L62-L110
[ "def", "to_pb", "(", "self", ")", ":", "max_age", "=", "_helpers", ".", "_timedelta_to_duration_pb", "(", "self", ".", "max_age", ")", "return", "table_v2_pb2", ".", "GcRule", "(", "max_age", "=", "max_age", ")" ]
Enter add mode - all text entered now will be processed as adding information until cancelled
def cmd_add ( self , txt ) : self . show_output ( 'Adding ' , txt ) self . raw . add ( txt ) print ( self . raw ) return 'Added ' + txt
2,429
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/AI_CLI.py#L135-L143
[ "def", "weld_variance", "(", "array", ",", "weld_type", ")", ":", "weld_obj_mean", "=", "weld_mean", "(", "array", ",", "weld_type", ")", "obj_id", ",", "weld_obj", "=", "create_weld_object", "(", "array", ")", "weld_obj_mean_id", "=", "get_weld_obj_id", "(", "weld_obj", ",", "weld_obj_mean", ")", "weld_template", "=", "_weld_variance_code", "weld_obj", ".", "weld_code", "=", "weld_template", ".", "format", "(", "array", "=", "obj_id", ",", "type", "=", "weld_type", ",", "mean", "=", "weld_obj_mean_id", ")", "return", "weld_obj" ]
search and query the AIKIF
def cmd_query ( self , txt ) : self . show_output ( 'Searching for ' , txt ) res = self . raw . find ( txt ) for d in res : self . show_output ( d ) return str ( len ( res ) ) + ' results for ' + txt
2,430
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/AI_CLI.py#L146-L154
[ "def", "thread_partition_array", "(", "x", ")", ":", "n_threads", "=", "get_threadpool_size", "(", ")", "if", "len", "(", "x", ".", "shape", ")", ">", "1", ":", "maxind", "=", "x", ".", "shape", "[", "1", "]", "else", ":", "maxind", "=", "x", ".", "shape", "[", "0", "]", "bounds", "=", "np", ".", "array", "(", "np", ".", "linspace", "(", "0", ",", "maxind", ",", "n_threads", "+", "1", ")", ",", "dtype", "=", "'int'", ")", "cmin", "=", "bounds", "[", ":", "-", "1", "]", "cmax", "=", "bounds", "[", "1", ":", "]", "return", "cmin", ",", "cmax" ]
Verifies that all required functions been injected .
def verify_integrity ( self ) : if not self . __integrity_check : if not self . __appid : raise Exception ( 'U2F_APPID was not defined! Please define it in configuration file.' ) if self . __facets_enabled and not len ( self . __facets_list ) : raise Exception ( """U2F facets been enabled, but U2F facet list is empty. Please either disable facets by setting U2F_FACETS_ENABLED to False. Or add facets list using, by assigning it to U2F_FACETS_LIST. """ ) # Injection undefined_message = 'U2F {name} handler is not defined! Please import {name} through {method}!' if not self . __get_u2f_devices : raise Exception ( undefined_message . format ( name = 'Read' , method = '@u2f.read' ) ) if not self . __save_u2f_devices : raise Exception ( undefined_message . format ( name = 'Save' , method = '@u2f.save' ) ) if not self . __call_success_enroll : raise Exception ( undefined_message . format ( name = 'enroll onSuccess' , method = '@u2f.enroll_on_success' ) ) if not self . __call_success_sign : raise Exception ( undefined_message . format ( name = 'sign onSuccess' , method = '@u2f.sign_on_success' ) ) self . __integrity_check = True return True
2,431
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L132-L163
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
Manages users enrolled u2f devices
def devices ( self ) : self . verify_integrity ( ) if session . get ( 'u2f_device_management_authorized' , False ) : if request . method == 'GET' : return jsonify ( self . get_devices ( ) ) , 200 elif request . method == 'DELETE' : response = self . remove_device ( request . json ) if response [ 'status' ] == 'ok' : return jsonify ( response ) , 200 else : return jsonify ( response ) , 404 return jsonify ( { 'status' : 'failed' , 'error' : 'Unauthorized!' } ) , 401
2,432
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L208-L224
[ "def", "is_citeable", "(", "publication_info", ")", ":", "def", "_item_has_pub_info", "(", "item", ")", ":", "return", "all", "(", "key", "in", "item", "for", "key", "in", "(", "'journal_title'", ",", "'journal_volume'", ")", ")", "def", "_item_has_page_or_artid", "(", "item", ")", ":", "return", "any", "(", "key", "in", "item", "for", "key", "in", "(", "'page_start'", ",", "'artid'", ")", ")", "has_pub_info", "=", "any", "(", "_item_has_pub_info", "(", "item", ")", "for", "item", "in", "publication_info", ")", "has_page_or_artid", "=", "any", "(", "_item_has_page_or_artid", "(", "item", ")", "for", "item", "in", "publication_info", ")", "return", "has_pub_info", "and", "has_page_or_artid" ]
Provides facets support . REQUIRES VALID HTTPS!
def facets ( self ) : self . verify_integrity ( ) if self . __facets_enabled : data = json . dumps ( { 'trustedFacets' : [ { 'version' : { 'major' : 1 , 'minor' : 0 } , 'ids' : self . __facets_list } ] } , sort_keys = True , indent = 2 , separators = ( ',' , ': ' ) ) mime = 'application/fido.trusted-apps+json' resp = Response ( data , mimetype = mime ) return resp , 200 else : return jsonify ( { } ) , 404
2,433
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L226-L243
[ "def", "write_burn_in", "(", "self", ",", "burn_in", ")", ":", "group", "=", "self", "[", "self", ".", "sampler_group", "]", "group", ".", "attrs", "[", "'burn_in_test'", "]", "=", "burn_in", ".", "burn_in_test", "group", ".", "attrs", "[", "'is_burned_in'", "]", "=", "burn_in", ".", "is_burned_in", "group", ".", "attrs", "[", "'burn_in_iteration'", "]", "=", "burn_in", ".", "burn_in_iteration", "# set the defaut thin_start to be the burn_in_index", "self", ".", "thin_start", "=", "burn_in", ".", "burn_in_index", "# write individual test data", "for", "tst", "in", "burn_in", ".", "burn_in_data", ":", "key", "=", "'burn_in_tests/{}'", ".", "format", "(", "tst", ")", "try", ":", "attrs", "=", "group", "[", "key", "]", ".", "attrs", "except", "KeyError", ":", "group", ".", "create_group", "(", "key", ")", "attrs", "=", "group", "[", "key", "]", ".", "attrs", "self", ".", "write_kwargs_to_attrs", "(", "attrs", ",", "*", "*", "burn_in", ".", "burn_in_data", "[", "tst", "]", ")" ]
Returns new enroll seed
def get_enroll ( self ) : devices = [ DeviceRegistration . wrap ( device ) for device in self . __get_u2f_devices ( ) ] enroll = start_register ( self . __appid , devices ) enroll [ 'status' ] = 'ok' session [ '_u2f_enroll_' ] = enroll . json return enroll
2,434
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L247-L255
[ "def", "_closed_cb", "(", "self", ",", "final_frame", "=", "None", ")", ":", "# delete all pending data and send final frame if thre is one. note", "# that it bypasses send_frame so that even if the closed state is set,", "# the frame is published.", "if", "final_frame", ":", "self", ".", "_connection", ".", "send_frame", "(", "final_frame", ")", "try", ":", "self", ".", "_notify_close_listeners", "(", ")", "finally", ":", "self", ".", "_pending_events", "=", "deque", "(", ")", "self", ".", "_frame_buffer", "=", "deque", "(", ")", "# clear out other references for faster cleanup", "for", "protocol_class", "in", "self", ".", "_class_map", ".", "values", "(", ")", ":", "protocol_class", ".", "_cleanup", "(", ")", "delattr", "(", "self", ",", "protocol_class", ".", "name", ")", "self", ".", "_connection", "=", "None", "self", ".", "_class_map", "=", "None", "self", ".", "_close_listeners", "=", "set", "(", ")" ]
Verifies and saves U2F enroll
def verify_enroll ( self , response ) : seed = session . pop ( '_u2f_enroll_' ) try : new_device , cert = complete_register ( seed , response , self . __facets_list ) except Exception as e : if self . __call_fail_enroll : self . __call_fail_enroll ( e ) return { 'status' : 'failed' , 'error' : 'Invalid key handle!' } finally : pass devices = self . __get_u2f_devices ( ) # Setting new device counter to 0 new_device [ 'counter' ] = 0 new_device [ 'index' ] = 0 for device in devices : if new_device [ 'index' ] <= device [ 'index' ] : new_device [ 'index' ] = device [ 'index' ] + 1 devices . append ( new_device ) self . __save_u2f_devices ( devices ) self . __call_success_enroll ( ) return { 'status' : 'ok' , 'message' : 'Successfully enrolled new U2F device!' }
2,435
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L257-L293
[ "def", "delete_attachments", "(", "self", ",", "volumeID", ",", "attachmentsID", ")", ":", "log", ".", "debug", "(", "\"deleting attachments from volume '{}': {}\"", ".", "format", "(", "volumeID", ",", "attachmentsID", ")", ")", "rawVolume", "=", "self", ".", "_req_raw_volume", "(", "volumeID", ")", "insID", "=", "[", "a", "[", "'id'", "]", "for", "a", "in", "rawVolume", "[", "'_source'", "]", "[", "'_attachments'", "]", "]", "# check that all requested file are present", "for", "id", "in", "attachmentsID", ":", "if", "id", "not", "in", "insID", ":", "raise", "NotFoundException", "(", "\"could not found attachment '{}' of the volume '{}'\"", ".", "format", "(", "id", ",", "volumeID", ")", ")", "for", "index", ",", "id", "in", "enumerate", "(", "attachmentsID", ")", ":", "rawVolume", "[", "'_source'", "]", "[", "'_attachments'", "]", ".", "pop", "(", "insID", ".", "index", "(", "id", ")", ")", "self", ".", "_db", ".", "modify_book", "(", "volumeID", ",", "rawVolume", "[", "'_source'", "]", ",", "version", "=", "rawVolume", "[", "'_version'", "]", ")" ]
Returns new signature challenge
def get_signature_challenge ( self ) : devices = [ DeviceRegistration . wrap ( device ) for device in self . __get_u2f_devices ( ) ] if devices == [ ] : return { 'status' : 'failed' , 'error' : 'No devices been associated with the account!' } challenge = start_authenticate ( devices ) challenge [ 'status' ] = 'ok' session [ '_u2f_challenge_' ] = challenge . json return challenge
2,436
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L296-L312
[ "def", "setup", "(", "self", ")", ":", "# get start and end of the mask and set a start_limit", "if", "not", "self", ".", "mask_source", ".", "start", ":", "raise", "SystemExit", "(", "\"Can't parse format of %s. Is this a log file or \"", "\"system.profile collection?\"", "%", "self", ".", "mlogfilter", ".", "args", "[", "'mask'", "]", ")", "self", ".", "mask_half_td", "=", "timedelta", "(", "seconds", "=", "self", ".", "mlogfilter", ".", "args", "[", "'mask_size'", "]", "/", "2", ")", "# load filter mask file", "logevent_list", "=", "list", "(", "self", ".", "mask_source", ")", "# define start and end of total mask", "self", ".", "mask_start", "=", "self", ".", "mask_source", ".", "start", "-", "self", ".", "mask_half_td", "self", ".", "mask_end", "=", "self", ".", "mask_source", ".", "end", "+", "self", ".", "mask_half_td", "# consider --mask-center", "if", "self", ".", "mlogfilter", ".", "args", "[", "'mask_center'", "]", "in", "[", "'start'", ",", "'both'", "]", ":", "if", "logevent_list", "[", "0", "]", ".", "duration", ":", "self", ".", "mask_start", "-=", "timedelta", "(", "milliseconds", "=", "logevent_list", "[", "0", "]", ".", "duration", ")", "if", "self", ".", "mlogfilter", ".", "args", "[", "'mask_center'", "]", "==", "'start'", ":", "if", "logevent_list", "[", "-", "1", "]", ".", "duration", ":", "self", ".", "mask_end", "-=", "timedelta", "(", "milliseconds", "=", "logevent_list", "[", "-", "1", "]", ".", "duration", ")", "self", ".", "start_limit", "=", "self", ".", "mask_start", "# different center points", "if", "'mask_center'", "in", "self", ".", "mlogfilter", ".", "args", ":", "if", "self", ".", "mlogfilter", ".", "args", "[", "'mask_center'", "]", "in", "[", "'start'", ",", "'both'", "]", ":", "starts", "=", "(", "[", "(", "le", ".", "datetime", "-", "timedelta", "(", "milliseconds", "=", "le", ".", "duration", ")", ")", "if", "le", ".", "duration", "is", "not", "None", "else", "le", ".", "datetime", "for", "le", "in", "logevent_list", "if", "le", ".", "datetime", "]", ")", "if", "self", ".", "mlogfilter", ".", "args", "[", "'mask_center'", "]", "in", "[", "'end'", ",", "'both'", "]", ":", "ends", "=", "[", "le", ".", "datetime", "for", "le", "in", "logevent_list", "if", "le", ".", "datetime", "]", "if", "self", ".", "mlogfilter", ".", "args", "[", "'mask_center'", "]", "==", "'start'", ":", "event_list", "=", "sorted", "(", "starts", ")", "elif", "self", ".", "mlogfilter", ".", "args", "[", "'mask_center'", "]", "==", "'end'", ":", "event_list", "=", "sorted", "(", "ends", ")", "elif", "self", ".", "mlogfilter", ".", "args", "[", "'mask_center'", "]", "==", "'both'", ":", "event_list", "=", "sorted", "(", "zip", "(", "starts", ",", "ends", ")", ")", "mask_list", "=", "[", "]", "if", "len", "(", "event_list", ")", "==", "0", ":", "return", "start_point", "=", "end_point", "=", "None", "for", "e", "in", "event_list", ":", "if", "start_point", "is", "None", ":", "start_point", ",", "end_point", "=", "self", ".", "_pad_event", "(", "e", ")", "continue", "next_start", "=", "(", "e", "[", "0", "]", "if", "type", "(", "e", ")", "==", "tuple", "else", "e", ")", "-", "self", ".", "mask_half_td", "if", "next_start", "<=", "end_point", ":", "end_point", "=", "(", "(", "e", "[", "1", "]", "if", "type", "(", "e", ")", "==", "tuple", "else", "e", ")", "+", "self", ".", "mask_half_td", ")", "else", ":", "mask_list", ".", "append", "(", "(", "start_point", ",", "end_point", ")", ")", "start_point", ",", "end_point", "=", "self", ".", "_pad_event", "(", "e", ")", "if", "start_point", ":", "mask_list", ".", "append", "(", "(", "start_point", ",", "end_point", ")", ")", "self", ".", "mask_list", "=", "mask_list" ]
Removes device specified by id
def remove_device ( self , request ) : devices = self . __get_u2f_devices ( ) for i in range ( len ( devices ) ) : if devices [ i ] [ 'keyHandle' ] == request [ 'id' ] : del devices [ i ] self . __save_u2f_devices ( devices ) return { 'status' : 'ok' , 'message' : 'Successfully deleted your device!' } return { 'status' : 'failed' , 'error' : 'No device with such an id been found!' }
2,437
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L367-L385
[ "def", "_get_data", "(", "self", ",", "time", ",", "site_id", ",", "pressure", "=", "None", ")", ":", "json_data", "=", "self", ".", "_get_data_raw", "(", "time", ",", "site_id", ",", "pressure", ")", "data", "=", "{", "}", "for", "profile", "in", "json_data", "[", "'profiles'", "]", ":", "for", "pt", "in", "profile", "[", "'profile'", "]", ":", "for", "field", "in", "(", "'drct'", ",", "'dwpc'", ",", "'hght'", ",", "'pres'", ",", "'sknt'", ",", "'tmpc'", ")", ":", "data", ".", "setdefault", "(", "field", ",", "[", "]", ")", ".", "append", "(", "np", ".", "nan", "if", "pt", "[", "field", "]", "is", "None", "else", "pt", "[", "field", "]", ")", "for", "field", "in", "(", "'station'", ",", "'valid'", ")", ":", "data", ".", "setdefault", "(", "field", ",", "[", "]", ")", ".", "append", "(", "np", ".", "nan", "if", "profile", "[", "field", "]", "is", "None", "else", "profile", "[", "field", "]", ")", "# Make sure that the first entry has a valid temperature and dewpoint", "idx", "=", "np", ".", "argmax", "(", "~", "(", "np", ".", "isnan", "(", "data", "[", "'tmpc'", "]", ")", "|", "np", ".", "isnan", "(", "data", "[", "'dwpc'", "]", ")", ")", ")", "# Stuff data into a pandas dataframe", "df", "=", "pd", ".", "DataFrame", "(", ")", "df", "[", "'pressure'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'pres'", "]", "[", "idx", ":", "]", ")", "df", "[", "'height'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'hght'", "]", "[", "idx", ":", "]", ")", "df", "[", "'temperature'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'tmpc'", "]", "[", "idx", ":", "]", ")", "df", "[", "'dewpoint'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'dwpc'", "]", "[", "idx", ":", "]", ")", "df", "[", "'direction'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'drct'", "]", "[", "idx", ":", "]", ")", "df", "[", "'speed'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'sknt'", "]", "[", "idx", ":", "]", ")", "df", "[", "'station'", "]", "=", "data", "[", "'station'", "]", "[", "idx", ":", "]", "df", "[", "'time'", "]", "=", "[", "datetime", ".", "strptime", "(", "valid", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", "for", "valid", "in", "data", "[", "'valid'", "]", "[", "idx", ":", "]", "]", "# Calculate the u and v winds", "df", "[", "'u_wind'", "]", ",", "df", "[", "'v_wind'", "]", "=", "get_wind_components", "(", "df", "[", "'speed'", "]", ",", "np", ".", "deg2rad", "(", "df", "[", "'direction'", "]", ")", ")", "# Drop any rows with all NaN values for T, Td, winds", "df", "=", "df", ".", "dropna", "(", "subset", "=", "(", "'temperature'", ",", "'dewpoint'", ",", "'direction'", ",", "'speed'", ",", "'u_wind'", ",", "'v_wind'", ")", ",", "how", "=", "'all'", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "# Add unit dictionary", "df", ".", "units", "=", "{", "'pressure'", ":", "'hPa'", ",", "'height'", ":", "'meter'", ",", "'temperature'", ":", "'degC'", ",", "'dewpoint'", ":", "'degC'", ",", "'direction'", ":", "'degrees'", ",", "'speed'", ":", "'knot'", ",", "'u_wind'", ":", "'knot'", ",", "'v_wind'", ":", "'knot'", ",", "'station'", ":", "None", ",", "'time'", ":", "None", "}", "return", "df" ]
Verifies that counter value is greater than previous signature
def verify_counter ( self , signature , counter ) : devices = self . __get_u2f_devices ( ) for device in devices : # Searching for specific keyhandle if device [ 'keyHandle' ] == signature [ 'keyHandle' ] : if counter > device [ 'counter' ] : # Updating counter record device [ 'counter' ] = counter self . __save_u2f_devices ( devices ) return True else : return False
2,438
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L393-L409
[ "def", "purge", "(", "self", ")", ":", "while", "not", "self", ".", "stopped", ".", "isSet", "(", ")", ":", "self", ".", "stopped", ".", "wait", "(", "timeout", "=", "defines", ".", "EXCHANGE_LIFETIME", ")", "self", ".", "_messageLayer", ".", "purge", "(", ")" ]
Validate data against the schema .
def validate ( data ) : try : return Schema ( Validator . SCHEMA ) . validate ( data ) except SchemaError as exception : logging . getLogger ( __name__ ) . error ( exception ) return None
2,439
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/validation.py#L159-L173
[ "def", "_parse_guild_members", "(", "self", ",", "parsed_content", ")", ":", "member_rows", "=", "parsed_content", ".", "find_all", "(", "\"tr\"", ",", "{", "'bgcolor'", ":", "[", "\"#D4C0A1\"", ",", "\"#F1E0C6\"", "]", "}", ")", "previous_rank", "=", "{", "}", "for", "row", "in", "member_rows", ":", "columns", "=", "row", ".", "find_all", "(", "'td'", ")", "values", "=", "tuple", "(", "c", ".", "text", ".", "replace", "(", "\"\\u00a0\"", ",", "\" \"", ")", "for", "c", "in", "columns", ")", "if", "len", "(", "columns", ")", "==", "COLS_GUILD_MEMBER", ":", "self", ".", "_parse_current_member", "(", "previous_rank", ",", "values", ")", "if", "len", "(", "columns", ")", "==", "COLS_INVITED_MEMBER", ":", "self", ".", "_parse_invited_member", "(", "values", ")" ]
Include the defined yaml file .
def include ( self , node ) : result = None if isinstance ( node , ScalarNode ) : result = Loader . include_file ( self . construct_scalar ( node ) ) else : raise RuntimeError ( "Not supported !include on type %s" % type ( node ) ) return result
2,440
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loader.py#L32-L39
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Load yaml file with specific include loader .
def load ( filename ) : if os . path . isfile ( filename ) : with open ( filename ) as handle : return yaml_load ( handle , Loader = Loader ) # nosec raise RuntimeError ( "File %s doesn't exist!" % filename )
2,441
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loader.py#L50-L55
[ "def", "interevent_time_recharges", "(", "recharges", ")", ":", "time_pairs", "=", "pairwise", "(", "r", ".", "datetime", "for", "r", "in", "recharges", ")", "times", "=", "[", "(", "new", "-", "old", ")", ".", "total_seconds", "(", ")", "for", "old", ",", "new", "in", "time_pairs", "]", "return", "summary_stats", "(", "times", ")" ]
transposes rows and columns
def pivot ( self ) : self . op_data = [ list ( i ) for i in zip ( * self . ip_data ) ]
2,442
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/transpose.py#L20-L24
[ "def", "parse_invocation", "(", "self", ",", "invocation", ",", "controller_tag", ")", ":", "if", "invocation", ".", "endswith", "(", "'/'", ")", ":", "invocation", "=", "invocation", "[", ":", "-", "1", "]", "if", "not", "invocation", ".", "startswith", "(", "'/'", ")", ":", "invocation", "=", "'/'", "+", "invocation", "if", "invocation", "==", "''", ":", "invocation", "=", "'/'", "all_programs", "=", "self", ".", "get_urls", "(", "controllers", "=", "[", "controller_tag", "]", ")", "matching_paths", "=", "set", "(", ")", "for", "program_path", "in", "sorted", "(", "all_programs", ")", ":", "if", "invocation", ".", "startswith", "(", "program_path", ")", ":", "matching_paths", ".", "add", "(", "program_path", ")", "longest", "=", "\"\"", "for", "path", "in", "matching_paths", ":", "longest", "=", "path", "if", "len", "(", "path", ")", ">", "len", "(", "longest", ")", "else", "longest", "matching_path", "=", "longest", "program", "=", "self", ".", "get_program", "(", "matching_path", ",", "controller", "=", "controller_tag", ")", "if", "not", "matching_path", ":", "raise", "ProgramNotFound", "(", "\"Can't find %s\"", "%", "invocation", ")", "program_name", "=", "matching_path", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "path", "=", "\"/\"", ".", "join", "(", "matching_path", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", ")", "+", "'/'", "args_fragment", "=", "invocation", "[", "len", "(", "matching_path", ")", ":", "]", "superformat", "=", "None", "if", "args_fragment", ".", "startswith", "(", "'.'", ")", ":", "# args_fragment will be something like \".html/arg1/arg2\" or just \".html\"", "superformat", "=", "args_fragment", ".", "split", "(", "'/'", ")", "[", "0", "]", "[", "1", ":", "]", "args", "=", "args_fragment", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", "args_fragment", "=", "'/'", ".", "join", "(", "args", ")", "else", ":", "args", "=", "args_fragment", ".", "split", "(", "\"/\"", ")", "[", "1", ":", "]", "if", "args_fragment", "else", "[", "]", "args_fragment", "=", "args_fragment", "[", "1", ":", "]", "if", "(", "args_fragment", "and", "args_fragment", "[", "0", "]", "==", "'/'", ")", "else", "args_fragment", "return", "{", "'program'", ":", "program", ",", "'program_name'", ":", "program_name", ",", "'superformat'", ":", "superformat", ",", "'superformat_mime'", ":", "super_accept_to_mimetype", "(", "superformat", ")", ",", "'args'", ":", "args", ",", "'raw_args'", ":", "args_fragment", ",", "'path'", ":", "path", ",", "'invocation'", ":", "invocation", ",", "}" ]
convert list to key value pairs This should also create unique id s to allow for any dataset to be transposed and then later manipulated r1c1 r1c2 r1c3 r2c1 r2c2 r2c3 should be converted to ID COLNUM VAL r1c1
def key_value_pairs ( self ) : self . op_data = [ ] hdrs = self . ip_data [ 0 ] for row in self . ip_data [ 1 : ] : id_col = row [ 0 ] for col_num , col in enumerate ( row ) : self . op_data . append ( [ id_col , hdrs [ col_num ] , col ] )
2,443
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/transpose.py#L26-L44
[ "def", "_on_timeout", "(", "self", ",", "info", ":", "str", "=", "None", ")", "->", "None", ":", "self", ".", "_timeout", "=", "None", "error_message", "=", "\"Timeout {0}\"", ".", "format", "(", "info", ")", "if", "info", "else", "\"Timeout\"", "if", "self", ".", "final_callback", "is", "not", "None", ":", "self", ".", "_handle_exception", "(", "HTTPTimeoutError", ",", "HTTPTimeoutError", "(", "error_message", ")", ",", "None", ")" ]
This is the reverse of data_to_links and takes a links table and generates a data table as follows Input Table Output Table Cat_Name CAT_val Person_a person_b NAME Location Location Perth John Fred John Perth Location Perth John Cindy Cindy Perth Location Perth Fred Cindy Fred Perth
def links_to_data ( self , col_name_col_num , col_val_col_num , id_a_col_num , id_b_col_num ) : print ( 'Converting links to data' ) self . op_data unique_ids = [ ] unique_vals = [ ] self . op_data . append ( [ 'Name' , self . ip_data [ 1 ] [ col_name_col_num ] ] ) for r in self . ip_data [ 1 : ] : if r [ id_a_col_num ] not in unique_ids : unique_ids . append ( r [ id_a_col_num ] ) self . op_data . append ( [ r [ id_a_col_num ] , r [ col_val_col_num ] ] ) if r [ id_b_col_num ] not in unique_ids : unique_ids . append ( r [ id_b_col_num ] ) if r [ col_val_col_num ] not in unique_vals : unique_vals . append ( r [ col_val_col_num ] ) #for id in unique_ids: # self.op_data.append([id, '']) print ( 'unique_ids = ' , unique_ids ) print ( 'unique_vals= ' , unique_vals ) print ( 'op_data = ' , self . op_data ) return self . op_data
2,444
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/transpose.py#L94-L129
[ "def", "acquire_writer", "(", "self", ")", ":", "with", "self", ".", "mutex", ":", "while", "self", ".", "rwlock", "!=", "0", ":", "self", ".", "_writer_wait", "(", ")", "self", ".", "rwlock", "=", "-", "1" ]
try each strategy with different amounts
def find_best_plan ( self ) : for plan in self . plans : for strat in self . strategy : self . run_plan ( plan , strat )
2,445
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_goal_friendly.py#L37-L43
[ "def", "_error_messages", "(", "self", ",", "driver_id", ")", ":", "assert", "isinstance", "(", "driver_id", ",", "ray", ".", "DriverID", ")", "message", "=", "self", ".", "redis_client", ".", "execute_command", "(", "\"RAY.TABLE_LOOKUP\"", ",", "ray", ".", "gcs_utils", ".", "TablePrefix", ".", "ERROR_INFO", ",", "\"\"", ",", "driver_id", ".", "binary", "(", ")", ")", "# If there are no errors, return early.", "if", "message", "is", "None", ":", "return", "[", "]", "gcs_entries", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "message", ",", "0", ")", "error_messages", "=", "[", "]", "for", "i", "in", "range", "(", "gcs_entries", ".", "EntriesLength", "(", ")", ")", ":", "error_data", "=", "ray", ".", "gcs_utils", ".", "ErrorTableData", ".", "GetRootAsErrorTableData", "(", "gcs_entries", ".", "Entries", "(", "i", ")", ",", "0", ")", "assert", "driver_id", ".", "binary", "(", ")", "==", "error_data", ".", "DriverId", "(", ")", "error_message", "=", "{", "\"type\"", ":", "decode", "(", "error_data", ".", "Type", "(", ")", ")", ",", "\"message\"", ":", "decode", "(", "error_data", ".", "ErrorMessage", "(", ")", ")", ",", "\"timestamp\"", ":", "error_data", ".", "Timestamp", "(", ")", ",", "}", "error_messages", ".", "append", "(", "error_message", ")", "return", "error_messages" ]
loads previously exported CSV file to redis database
def load_data ( fname ) : print ( 'Loading ' + fname + ' to redis' ) r = redis . StrictRedis ( host = '127.0.0.1' , port = 6379 , db = 0 ) with open ( fname , 'r' ) as f : for line_num , row in enumerate ( f ) : if row . strip ( '' ) != '' : if line_num < 100000000 : l_key , l_val = parse_n3 ( row , 'csv' ) if line_num % 1000 == 0 : print ( 'loading line #' , line_num , 'key=' , l_key , ' = ' , l_val ) if l_key != '' : r . set ( l_key , l_val )
2,446
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/read_opencyc.py#L28-L40
[ "def", "prior_sediment_memory", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# \"plot the prior for the memory (= accumulation rate varibility between neighbouring depths)\"", "# PlotMemPrior @ Bacon.R ln 114 -> ln 1119 - 1141", "# w_a = mem_strength * mem_mean, w_b = mem_strength * (1 - mem_mean)", "# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.", "mem_shape", "=", "kwargs", "[", "'mem_strength'", "]", "# aka. `mem_shape`", "mem_mean", "=", "kwargs", "[", "'mem_mean'", "]", "x", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "100", ")", "y", "=", "stats", ".", "beta", ".", "pdf", "(", "x", ",", "a", "=", "mem_shape", "*", "mem_mean", ",", "b", "=", "mem_shape", "*", "(", "1", "-", "mem_mean", ")", ")", "return", "y", ",", "x" ]
takes a row from an n3 file and returns the triple NOTE - currently parses a CSV line already split via cyc_extract . py
def parse_n3 ( row , src = 'csv' ) : if row . strip ( ) == '' : return '' , '' l_root = 'opencyc' key = '' val = '' if src == 'csv' : cols = row . split ( ',' ) if len ( cols ) < 3 : #print('PARSE ISSUE : ', row) return '' , '' key = '' val = '' key = l_root + ':' + cols [ 1 ] . strip ( '"' ) . strip ( ) + ':' + cols [ 2 ] . strip ( '"' ) . strip ( ) try : val = cols [ 3 ] . strip ( '"' ) . strip ( ) except Exception : val = "Error parsing " + row elif src == 'n3' : pass return key , val
2,447
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/read_opencyc.py#L42-L67
[ "def", "set_ram", "(", "self", ",", "ram", ")", ":", "if", "ram", "==", "0", ":", "return", "yield", "from", "self", ".", "_modify_vm", "(", "'--memory {}'", ".", "format", "(", "ram", ")", ")", "log", ".", "info", "(", "\"VirtualBox VM '{name}' [{id}] has set amount of RAM to {ram}\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "ram", "=", "ram", ")", ")", "self", ".", "_ram", "=", "ram" ]
takes a large data file and produces a HTML summary as html
def summarise_file_as_html ( fname ) : txt = '<H1>' + fname + '</H1>' num_lines = 0 print ( 'Reading OpenCyc file - ' , fname ) with open ( ip_folder + os . sep + fname , 'r' ) as f : txt += '<PRE>' for line in f : if line . strip ( ) != '' : num_lines += 1 if num_lines < 80 : txt += str ( num_lines ) + ': ' + escape_html ( line ) + '' txt += '</PRE>' txt += 'Total lines = ' + str ( num_lines ) + '<BR><BR>' return txt
2,448
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/read_opencyc.py#L88-L105
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
Example to show AIKIF logging of results . Generates a sequence of random grids and runs the Game of Life saving results
def main ( ) : iterations = 9 # how many simulations to run years = 3 # how many times to run each simulation width = 22 # grid height height = 78 # grid width time_delay = 0.03 # delay when printing on screen lg = mod_log . Log ( 'test' ) lg . record_process ( 'Game of Life' , 'game_of_life_console.py' ) for _ in range ( iterations ) : s , e = run_game_of_life ( years , width , height , time_delay , 'N' ) lg . record_result ( "Started with " + str ( s ) + " cells and ended with " + str ( e ) + " cells" )
2,449
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L28-L43
[ "def", "data", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "self", ".", "_enforceDataType", "(", "data", ")", "# Enforce self._data to be a QFont", "self", ".", "familyCti", ".", "data", "=", "fontFamilyIndex", "(", "self", ".", "data", ",", "list", "(", "self", ".", "familyCti", ".", "iterConfigValues", ")", ")", "self", ".", "pointSizeCti", ".", "data", "=", "self", ".", "data", ".", "pointSize", "(", ")", "self", ".", "weightCti", ".", "data", "=", "fontWeightIndex", "(", "self", ".", "data", ",", "list", "(", "self", ".", "weightCti", ".", "iterConfigValues", ")", ")", "self", ".", "italicCti", ".", "data", "=", "self", ".", "data", ".", "italic", "(", ")" ]
run a single game of life for years and log start and end living cells to aikif
def run_game_of_life ( years , width , height , time_delay , silent = "N" ) : lfe = mod_grid . GameOfLife ( width , height , [ '.' , 'x' ] , 1 ) set_random_starting_grid ( lfe ) lg . record_source ( lfe , 'game_of_life_console.py' ) print ( lfe ) start_cells = lfe . count_filled_positions ( ) for ndx , dummy_idx in enumerate ( range ( years ) ) : lfe . update_gol ( ) if silent == "N" : print_there ( 1 , 1 , "Game of Life - Iteration # " + str ( ndx ) ) print_there ( 1 , 2 , lfe ) time . sleep ( time_delay ) end_cells = lfe . count_filled_positions ( ) return start_cells , end_cells
2,450
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L45-L62
[ "def", "get_share_metadata", "(", "self", ",", "share_name", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "_get_path", "(", "share_name", ")", "request", ".", "query", "=", "[", "(", "'restype'", ",", "'share'", ")", ",", "(", "'comp'", ",", "'metadata'", ")", ",", "(", "'timeout'", ",", "_int_to_str", "(", "timeout", ")", ")", ",", "]", "response", "=", "self", ".", "_perform_request", "(", "request", ")", "return", "_parse_metadata", "(", "response", ")" ]
allows display of a game of life on a console via resetting cursor position to a set point - looks ok for testing but not production quality .
def print_there ( x , y , text ) : sys . stdout . write ( "\x1b7\x1b[%d;%df%s\x1b8" % ( x , y , text ) ) sys . stdout . flush ( )
2,451
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L79-L86
[ "def", "get_relation_cnt", "(", "self", ")", ":", "ctr", "=", "cx", ".", "Counter", "(", ")", "for", "ntgpad", "in", "self", ".", "associations", ":", "if", "ntgpad", ".", "Extension", "is", "not", "None", ":", "ctr", "+=", "ntgpad", ".", "Extension", ".", "get_relations_cnt", "(", ")", "return", "ctr" ]
assume no delimiter in this file so guess the best fixed column widths to split by
def identify_col_pos ( txt ) : res = [ ] #res.append(0) lines = txt . split ( '\n' ) prev_ch = '' for col_pos , ch in enumerate ( lines [ 0 ] ) : if _is_white_space ( ch ) is False and _is_white_space ( prev_ch ) is True : res . append ( col_pos ) prev_ch = ch res . append ( col_pos ) return res
2,452
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/text_tools.py#L27-L41
[ "def", "image_to_texture", "(", "image", ")", ":", "vtex", "=", "vtk", ".", "vtkTexture", "(", ")", "vtex", ".", "SetInputDataObject", "(", "image", ")", "vtex", ".", "Update", "(", ")", "return", "vtex" ]
read a CSV file to list without worrying about odd characters
def load_tbl_from_csv ( fname ) : import csv rows_to_load = [ ] with open ( fname , 'r' , encoding = 'cp1252' , errors = 'ignore' ) as csvfile : csvreader = csv . reader ( csvfile , delimiter = ',' ) reader = csv . reader ( csvfile ) rows_to_load = list ( reader ) return rows_to_load
2,453
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/text_tools.py#L51-L66
[ "def", "display", "(", "self", ")", ":", "lg", ".", "debug", "(", "'GraphicsScene is between {}s and {}s'", ".", "format", "(", "self", ".", "minimum", ",", "self", ".", "maximum", ")", ")", "x_scale", "=", "1", "/", "self", ".", "parent", ".", "value", "(", "'overview_scale'", ")", "lg", ".", "debug", "(", "'Set scene x-scaling to {}'", ".", "format", "(", "x_scale", ")", ")", "self", ".", "scale", "(", "1", "/", "self", ".", "transform", "(", ")", ".", "m11", "(", ")", ",", "1", ")", "# reset to 1", "self", ".", "scale", "(", "x_scale", ",", "1", ")", "self", ".", "scene", "=", "QGraphicsScene", "(", "self", ".", "minimum", ",", "0", ",", "self", ".", "maximum", ",", "TOTAL_HEIGHT", ")", "self", ".", "setScene", "(", "self", ".", "scene", ")", "# reset annotations", "self", ".", "idx_markers", "=", "[", "]", "self", ".", "idx_annot", "=", "[", "]", "self", ".", "display_current", "(", ")", "for", "name", ",", "pos", "in", "BARS", ".", "items", "(", ")", ":", "item", "=", "QGraphicsRectItem", "(", "self", ".", "minimum", ",", "pos", "[", "'pos0'", "]", ",", "self", ".", "maximum", ",", "pos", "[", "'pos1'", "]", ")", "item", ".", "setToolTip", "(", "pos", "[", "'tip'", "]", ")", "self", ".", "scene", ".", "addItem", "(", "item", ")", "self", ".", "add_timestamps", "(", ")" ]
reads the characters in txt and returns a dictionary of all letters
def _get_dict_char_count ( txt ) : dct = { } for letter in txt : if letter in dct : dct [ letter ] += 1 else : dct [ letter ] = 1 return dct
2,454
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/text_tools.py#L100-L111
[ "async", "def", "open_session", "(", "self", ",", "request", ":", "BaseRequestWebsocket", ")", "->", "Session", ":", "return", "await", "ensure_coroutine", "(", "self", ".", "session_interface", ".", "open_session", ")", "(", "self", ",", "request", ")" ]
Creator function for creating an instance of a Bash .
def creator ( entry , config ) : template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/docker-container.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) # all fields are re-rendered via the Bash script wrapped_script = render ( template , container = { 'image' : 'centos:7' if 'image' not in entry else entry [ 'image' ] , 'remove' : True if 'remove' not in entry else str ( entry [ 'remove' ] ) . lower ( ) , 'background' : False if 'background' not in entry else str ( entry [ 'background' ] ) . lower ( ) , 'mount' : False if 'mount' not in entry else str ( entry [ 'mount' ] ) . lower ( ) , 'network' : '' if 'network' not in entry else entry [ 'network' ] , 'labels' : { } if 'labels' not in entry else entry [ 'labels' ] , 'script' : config . script } ) config . script = wrapped_script return Container ( config )
2,455
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/docker.py#L51-L70
[ "def", "set_table_data", "(", "self", ",", "tableid", ",", "data", ",", "offset", "=", "None", ")", ":", "self", ".", "send", "(", "C1218WriteRequest", "(", "tableid", ",", "data", ",", "offset", ")", ")", "data", "=", "self", ".", "recv", "(", ")", "if", "data", "[", "0", "]", "!=", "0x00", ":", "status", "=", "data", "[", "0", "]", "details", "=", "(", "C1218_RESPONSE_CODES", ".", "get", "(", "status", ")", "or", "'unknown response code'", ")", "self", ".", "logger", ".", "error", "(", "'could not write data to the table, error: '", "+", "details", ")", "raise", "C1218WriteTableError", "(", "'could not write data to the table, error: '", "+", "details", ",", "status", ")", "return" ]
Creator function for creating an instance of a Docker image script .
def creator ( entry , config ) : # writing Dockerfile dockerfile = render ( config . script , model = config . model , env = config . env , variables = config . variables , item = config . item ) filename = "dockerfile.dry.run.see.comment" if not config . dry_run : temp = tempfile . NamedTemporaryFile ( prefix = "dockerfile-" , mode = 'w+t' , delete = False ) temp . writelines ( dockerfile ) temp . close ( ) filename = temp . name dockerfile = '' # rendering the Bash script for generating the Docker image name = entry [ 'name' ] + "-%s" % os . getpid ( ) if entry [ 'unique' ] else entry [ 'name' ] tag = render ( entry [ 'tag' ] , model = config . model , env = config . env , item = config . item ) template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/docker-image.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) config . script = render ( template , name = name , tag = tag , dockerfile_content = dockerfile , dockerfile_filename = filename ) return Image ( config )
2,456
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/docker.py#L85-L111
[ "def", "console_wait_for_keypress", "(", "flush", ":", "bool", ")", "->", "Key", ":", "key", "=", "Key", "(", ")", "lib", ".", "TCOD_console_wait_for_keypress_wrapper", "(", "key", ".", "key_p", ",", "flush", ")", "return", "key" ]
Simplify redirect of stdout .
def stdout_redirector ( ) : old_stdout = sys . stdout sys . stdout = Stream ( ) try : yield sys . stdout finally : sys . stdout . close ( ) sys . stdout = old_stdout
2,457
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/stream.py#L34-L46
[ "def", "coordination_geometry_symmetry_measures_fallback_random", "(", "self", ",", "coordination_geometry", ",", "NRANDOM", "=", "10", ",", "points_perfect", "=", "None", ")", ":", "permutations_symmetry_measures", "=", "[", "None", "]", "*", "NRANDOM", "permutations", "=", "list", "(", ")", "algos", "=", "list", "(", ")", "perfect2local_maps", "=", "list", "(", ")", "local2perfect_maps", "=", "list", "(", ")", "for", "iperm", "in", "range", "(", "NRANDOM", ")", ":", "perm", "=", "np", ".", "random", ".", "permutation", "(", "coordination_geometry", ".", "coordination_number", ")", "permutations", ".", "append", "(", "perm", ")", "p2l", "=", "{", "}", "l2p", "=", "{", "}", "for", "i_p", ",", "pp", "in", "enumerate", "(", "perm", ")", ":", "p2l", "[", "i_p", "]", "=", "pp", "l2p", "[", "pp", "]", "=", "i_p", "perfect2local_maps", ".", "append", "(", "p2l", ")", "local2perfect_maps", ".", "append", "(", "l2p", ")", "points_distorted", "=", "self", ".", "local_geometry", ".", "points_wcs_ctwcc", "(", "permutation", "=", "perm", ")", "sm_info", "=", "symmetry_measure", "(", "points_distorted", "=", "points_distorted", ",", "points_perfect", "=", "points_perfect", ")", "sm_info", "[", "'translation_vector'", "]", "=", "self", ".", "local_geometry", ".", "centroid_with_centre", "permutations_symmetry_measures", "[", "iperm", "]", "=", "sm_info", "algos", ".", "append", "(", "'APPROXIMATE_FALLBACK'", ")", "return", "permutations_symmetry_measures", ",", "permutations", ",", "algos", ",", "local2perfect_maps", ",", "perfect2local_maps" ]
Generating a temporary file with content .
def write_temporary_file ( content , prefix = '' , suffix = '' ) : temp = tempfile . NamedTemporaryFile ( prefix = prefix , suffix = suffix , mode = 'w+t' , delete = False ) temp . writelines ( content ) temp . close ( ) return temp . name
2,458
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/stream.py#L49-L67
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Prints filename of a new migration
def print_new ( ctx , name , migration_type ) : click . echo ( ctx . obj . repository . generate_migration_name ( name , migration_type ) )
2,459
https://github.com/aartur/mschematool/blob/57ec9541f80b44890294126eab92ce243c8833c4/mschematool/cli.py#L81-L83
[ "def", "translate_github_exception", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "UnknownObjectException", "as", "e", ":", "logger", ".", "exception", "(", "'GitHub API 404 Exception'", ")", "raise", "NotFoundError", "(", "str", "(", "e", ")", ")", "except", "GithubException", "as", "e", ":", "logger", ".", "exception", "(", "'GitHub API Exception'", ")", "raise", "GitClientError", "(", "str", "(", "e", ")", ")", "return", "_wrapper" ]
Starts an agent with standard logging
def start ( self ) : self . running = True self . status = 'RUNNING' self . mylog . record_process ( 'agent' , self . name + ' - starting' )
2,460
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/agent.py#L96-L102
[ "def", "match_template", "(", "template", ",", "image", ",", "options", "=", "None", ")", ":", "# If the input has max of 3 channels, use the faster OpenCV matching", "if", "len", "(", "image", ".", "shape", ")", "<=", "3", "and", "image", ".", "shape", "[", "2", "]", "<=", "3", ":", "return", "match_template_opencv", "(", "template", ",", "image", ",", "options", ")", "op", "=", "_DEF_TM_OPT", ".", "copy", "(", ")", "if", "options", "is", "not", "None", ":", "op", ".", "update", "(", "options", ")", "template", "=", "img_utils", ".", "gray3", "(", "template", ")", "image", "=", "img_utils", ".", "gray3", "(", "image", ")", "h", ",", "w", ",", "d", "=", "template", ".", "shape", "im_h", ",", "im_w", "=", "image", ".", "shape", "[", ":", "2", "]", "template_v", "=", "template", ".", "flatten", "(", ")", "heatmap", "=", "np", ".", "zeros", "(", "(", "im_h", "-", "h", ",", "im_w", "-", "w", ")", ")", "for", "col", "in", "range", "(", "0", ",", "im_w", "-", "w", ")", ":", "for", "row", "in", "range", "(", "0", ",", "im_h", "-", "h", ")", ":", "cropped_im", "=", "image", "[", "row", ":", "row", "+", "h", ",", "col", ":", "col", "+", "w", ",", ":", "]", "cropped_v", "=", "cropped_im", ".", "flatten", "(", ")", "if", "op", "[", "'distance'", "]", "==", "'euclidean'", ":", "heatmap", "[", "row", ",", "col", "]", "=", "scipy", ".", "spatial", ".", "distance", ".", "euclidean", "(", "template_v", ",", "cropped_v", ")", "elif", "op", "[", "'distance'", "]", "==", "'correlation'", ":", "heatmap", "[", "row", ",", "col", "]", "=", "scipy", ".", "spatial", ".", "distance", ".", "correlation", "(", "template_v", ",", "cropped_v", ")", "# normalize", "if", "op", "[", "'normalize'", "]", ":", "heatmap", "/=", "heatmap", ".", "max", "(", ")", "# size", "if", "op", "[", "'retain_size'", "]", ":", "hmap", "=", "np", ".", "ones", "(", "image", ".", "shape", "[", ":", "2", "]", ")", "*", "heatmap", ".", "max", "(", ")", "h", ",", "w", "=", "heatmap", ".", "shape", "hmap", "[", ":", "h", ",", ":", "w", "]", "=", "heatmap", "heatmap", "=", "hmap", "return", "heatmap" ]
set coords of agent in an arbitrary world
def set_coords ( self , x = 0 , y = 0 , z = 0 , t = 0 ) : self . coords = { } self . coords [ 'x' ] = x self . coords [ 'y' ] = y self . coords [ 'z' ] = z self . coords [ 't' ] = t
2,461
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/agent.py#L104-L112
[ "def", "_get_to_many_relationship_value", "(", "self", ",", "obj", ",", "column", ")", ":", "related_key", "=", "column", ".", "get", "(", "'related_key'", ",", "None", ")", "related", "=", "getattr", "(", "obj", ",", "column", "[", "'__col__'", "]", ".", "key", ")", "value", "=", "{", "}", "if", "related", ":", "total", "=", "len", "(", "related", ")", "for", "index", ",", "rel_obj", "in", "enumerate", "(", "related", ")", ":", "if", "related_key", ":", "compiled_res", "=", "self", ".", "_get_formatted_val", "(", "rel_obj", ",", "related_key", ",", "column", ")", "else", ":", "compiled_res", "=", "column", "[", "'__prop__'", "]", ".", "compile_obj", "(", "rel_obj", ")", "value", "[", "'item_%d'", "%", "index", "]", "=", "compiled_res", "value", "[", "str", "(", "index", ")", "]", "=", "compiled_res", "value", "[", "\"_\"", "+", "str", "(", "index", ")", "]", "=", "compiled_res", "if", "index", "==", "0", ":", "value", "[", "'first'", "]", "=", "compiled_res", "if", "index", "==", "total", "-", "1", ":", "value", "[", "'last'", "]", "=", "compiled_res", "return", "value" ]
Load catchment object from a . CD3 or . xml file .
def from_file ( file_path , incl_pot = True ) : filename , ext = os . path . splitext ( file_path ) am_file_path = filename + '.AM' pot_file_path = filename + '.PT' parser_by_ext = { '.cd3' : parsers . Cd3Parser , '.xml' : parsers . XmlCatchmentParser } catchment = parser_by_ext [ ext . lower ( ) ] ( ) . parse ( file_path ) # AMAX records try : catchment . amax_records = parsers . AmaxParser ( ) . parse ( am_file_path ) except FileNotFoundError : catchment . amax_records = [ ] # POT records if incl_pot : try : catchment . pot_dataset = parsers . PotParser ( ) . parse ( pot_file_path ) except FileNotFoundError : pass return catchment
2,462
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/loaders.py#L30-L66
[ "async", "def", "set_room_temperatures_by_name", "(", "self", ",", "room_name", ",", "sleep_temp", "=", "None", ",", "comfort_temp", "=", "None", ",", "away_temp", "=", "None", ")", ":", "if", "sleep_temp", "is", "None", "and", "comfort_temp", "is", "None", "and", "away_temp", "is", "None", ":", "return", "for", "room_id", ",", "_room", "in", "self", ".", "rooms", ".", "items", "(", ")", ":", "if", "_room", ".", "name", "==", "room_name", ":", "await", "self", ".", "set_room_temperatures", "(", "room_id", ",", "sleep_temp", ",", "comfort_temp", ",", "away_temp", ")", "return", "_LOGGER", ".", "error", "(", "\"Could not find a room with name %s\"", ",", "room_name", ")" ]
Load catchment object into the database .
def to_db ( catchment , session , method = 'create' , autocommit = False ) : if not catchment . id : raise ValueError ( "Catchment/station number (`catchment.id`) must be set." ) if method == 'create' : session . add ( catchment ) elif method == 'update' : session . merge ( catchment ) else : raise ValueError ( "Method `{}` invalid. Use either `create` or `update`." ) if autocommit : session . commit ( )
2,463
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/loaders.py#L69-L96
[ "def", "_getBusVoltageLambdaSensor", "(", "self", ")", ":", "muVmin", "=", "array", "(", "[", "b", ".", "mu_vmin", "for", "b", "in", "self", ".", "market", ".", "case", ".", "connected_buses", "]", ")", "muVmax", "=", "array", "(", "[", "b", ".", "mu_vmax", "for", "b", "in", "self", ".", "market", ".", "case", ".", "connected_buses", "]", ")", "muVmin", "=", "-", "1.0", "*", "muVmin", "diff", "=", "muVmin", "+", "muVmax", "return", "diff" ]
Add catchments from a user folder to the database .
def userdata_to_db ( session , method = 'update' , autocommit = False ) : try : folder = config [ 'import' ] [ 'folder' ] except KeyError : return if folder : folder_to_db ( folder , session , method = method , autocommit = autocommit )
2,464
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/loaders.py#L151-L176
[ "def", "_hcsi_null_range", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "[", "HCSINullField", "(", "'Reserved{:02d}'", ".", "format", "(", "x", ")", ")", "for", "x", "in", "range", "(", "*", "args", ",", "*", "*", "kwargs", ")", "]" ]
sends the text txt to the window handle hwnd using SendMessage
def send_text ( hwnd , txt ) : try : for c in txt : if c == '\n' : win32api . SendMessage ( hwnd , win32con . WM_KEYDOWN , win32con . VK_RETURN , 0 ) win32api . SendMessage ( hwnd , win32con . WM_KEYUP , win32con . VK_RETURN , 0 ) else : win32api . SendMessage ( hwnd , win32con . WM_CHAR , ord ( c ) , 0 ) except Exception as ex : print ( 'error calling SendMessage ' + str ( ex ) )
2,465
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/interface_windows_tools.py#L43-L55
[ "def", "tag_supplementary_material_sibling_ordinal", "(", "tag", ")", ":", "if", "hasattr", "(", "tag", ",", "'name'", ")", "and", "tag", ".", "name", "!=", "'supplementary-material'", ":", "return", "None", "nodenames", "=", "[", "'fig'", ",", "'media'", ",", "'sub-article'", "]", "first_parent_tag", "=", "first_parent", "(", "tag", ",", "nodenames", ")", "sibling_ordinal", "=", "1", "if", "first_parent_tag", ":", "# Within the parent tag of interest, count the tags", "# having the same asset value", "for", "supp_tag", "in", "first_parent_tag", ".", "find_all", "(", "tag", ".", "name", ")", ":", "if", "tag", "==", "supp_tag", ":", "# Stop once we reach the same tag we are checking", "break", "if", "supp_asset", "(", "supp_tag", ")", "==", "supp_asset", "(", "tag", ")", ":", "sibling_ordinal", "+=", "1", "else", ":", "# Look in all previous elements that do not have a parent", "# and count the tags having the same asset value", "for", "prev_tag", "in", "tag", ".", "find_all_previous", "(", "tag", ".", "name", ")", ":", "if", "not", "first_parent", "(", "prev_tag", ",", "nodenames", ")", ":", "if", "supp_asset", "(", "prev_tag", ")", "==", "supp_asset", "(", "tag", ")", ":", "sibling_ordinal", "+=", "1", "return", "sibling_ordinal" ]
start an app
def launch_app ( app_path , params = [ ] , time_before_kill_app = 15 ) : import subprocess try : res = subprocess . call ( [ app_path , params ] , timeout = time_before_kill_app , shell = True ) print ( 'res = ' , res ) if res == 0 : return True else : return False except Exception as ex : print ( 'error launching app ' + str ( app_path ) + ' with params ' + str ( params ) + '\n' + str ( ex ) ) return False
2,466
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/interface_windows_tools.py#L57-L71
[ "def", "authenticate", "(", "username", ",", "password", ",", "permission", "=", "None", ")", ":", "try", ":", "author", "=", "Author", ".", "objects", ".", "get", "(", "*", "*", "{", "'%s__exact'", "%", "Author", ".", "USERNAME_FIELD", ":", "username", "}", ")", "except", "Author", ".", "DoesNotExist", ":", "raise", "Fault", "(", "LOGIN_ERROR", ",", "_", "(", "'Username is incorrect.'", ")", ")", "if", "not", "author", ".", "check_password", "(", "password", ")", ":", "raise", "Fault", "(", "LOGIN_ERROR", ",", "_", "(", "'Password is invalid.'", ")", ")", "if", "not", "author", ".", "is_staff", "or", "not", "author", ".", "is_active", ":", "raise", "Fault", "(", "PERMISSION_DENIED", ",", "_", "(", "'User account unavailable.'", ")", ")", "if", "permission", ":", "if", "not", "author", ".", "has_perm", "(", "permission", ")", ":", "raise", "Fault", "(", "PERMISSION_DENIED", ",", "_", "(", "'User cannot %s.'", ")", "%", "permission", ")", "return", "author" ]
use shell to bring the application with caption to front
def app_activate ( caption ) : try : shell = win32com . client . Dispatch ( "WScript.Shell" ) shell . AppActivate ( caption ) except Exception as ex : print ( 'error calling win32com.client.Dispatch (AppActivate)' )
2,467
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/interface_windows_tools.py#L74-L82
[ "def", "parse_bismark_report", "(", "self", ",", "report", ",", "regexes", ")", ":", "parsed_data", "=", "{", "}", "for", "k", ",", "r", "in", "regexes", ".", "items", "(", ")", ":", "r_search", "=", "re", ".", "search", "(", "r", ",", "report", ",", "re", ".", "MULTILINE", ")", "if", "r_search", ":", "try", ":", "parsed_data", "[", "k", "]", "=", "float", "(", "r_search", ".", "group", "(", "1", ")", ")", "except", "ValueError", ":", "parsed_data", "[", "k", "]", "=", "r_search", ".", "group", "(", "1", ")", "# NaN", "if", "len", "(", "parsed_data", ")", "==", "0", ":", "return", "None", "return", "parsed_data" ]
Return a list of catchments sorted by hydrological similarity defined by similarity_distance_function
def most_similar_catchments ( self , subject_catchment , similarity_dist_function , records_limit = 500 , include_subject_catchment = 'auto' ) : if include_subject_catchment not in [ 'auto' , 'force' , 'exclude' ] : raise ValueError ( "Parameter `include_subject_catchment={}` invalid." . format ( include_subject_catchment ) + "Must be one of `auto`, `force` or `exclude`." ) query = ( self . db_session . query ( Catchment ) . join ( Catchment . descriptors ) . join ( Catchment . amax_records ) . filter ( Catchment . id != subject_catchment . id , Catchment . is_suitable_for_pooling , or_ ( Descriptors . urbext2000 < 0.03 , Descriptors . urbext2000 == None ) , AmaxRecord . flag == 0 ) . group_by ( Catchment ) . having ( func . count ( AmaxRecord . catchment_id ) >= 10 ) ) # At least 10 AMAX records catchments = query . all ( ) # Add subject catchment if required (may not exist in database, so add after querying db if include_subject_catchment == 'force' : if len ( subject_catchment . amax_records ) >= 10 : # Never include short-record catchments catchments . append ( subject_catchment ) elif include_subject_catchment == 'auto' : if len ( subject_catchment . amax_records ) >= 10 and subject_catchment . is_suitable_for_pooling and ( subject_catchment . descriptors . urbext2000 < 0.03 or subject_catchment . descriptors . urbext2000 is None ) : catchments . append ( subject_catchment ) # Store the similarity distance as an additional attribute for each catchment for catchment in catchments : catchment . similarity_dist = similarity_dist_function ( subject_catchment , catchment ) # Then simply sort by this attribute catchments . sort ( key = attrgetter ( 'similarity_dist' ) ) # Limit catchments until total amax_records counts is at least `records_limit`, default 500 amax_records_count = 0 catchments_limited = [ ] for catchment in catchments : catchments_limited . append ( catchment ) amax_records_count += catchment . record_length if amax_records_count >= records_limit : break return catchments_limited
2,468
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/collections.py#L118-L173
[ "def", "calc_regenerated", "(", "self", ",", "lastvotetime", ")", ":", "delta", "=", "datetime", ".", "utcnow", "(", ")", "-", "datetime", ".", "strptime", "(", "lastvotetime", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "td", "=", "delta", ".", "days", "ts", "=", "delta", ".", "seconds", "tt", "=", "(", "td", "*", "86400", ")", "+", "ts", "return", "tt", "*", "10000", "/", "86400", "/", "5" ]
Reads and parses a sam file .
def readSAM ( SAMfile , header = False ) : if header == True : f = open ( SAMfile , "r+" ) head = [ ] for line in f . readlines ( ) : if line [ 0 ] == "@" : head . append ( line ) else : continue f . close ( ) sam = pd . read_table ( SAMfile , sep = "this_gives_one_column" , comment = "@" , header = None ) sam = pd . DataFrame ( sam [ 0 ] . str . split ( "\t" ) . tolist ( ) ) acols = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] sam_ = sam [ acols ] samcols = sam . columns . tolist ( ) bcols = [ s for s in samcols if s not in acols ] sam_ [ 10 ] = sam [ bcols [ 0 ] ] if len ( bcols ) > 1 : for c in bcols [ 1 : ] : sam_ [ 10 ] = sam_ [ 10 ] . astype ( str ) sam [ c ] = sam [ c ] . astype ( str ) sam_ [ 10 ] = sam_ [ 10 ] + "\t" + sam [ c ] sam_ . columns = [ 'QNAME' , 'FLAG' , 'RNAME' , 'POS' , 'MAPQ' , 'CIGAR' , 'RNEXT' , 'PNEXT' , 'TLEN' , 'SEQ' , 'QUAL' ] if header == True : return sam_ , head else : return sam_
2,469
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/sam.py#L4-L42
[ "def", "saturation", "(", "self", ",", "value", ")", ":", "value", "=", "clean_float", "(", "value", ")", "if", "value", "is", "None", ":", "return", "try", ":", "unit_moisture_weight", "=", "self", ".", "unit_moist_weight", "-", "self", ".", "unit_dry_weight", "unit_moisture_volume", "=", "unit_moisture_weight", "/", "self", ".", "_pw", "saturation", "=", "unit_moisture_volume", "/", "self", ".", "_calc_unit_void_volume", "(", ")", "if", "saturation", "is", "not", "None", "and", "not", "ct", ".", "isclose", "(", "saturation", ",", "value", ",", "rel_tol", "=", "self", ".", "_tolerance", ")", ":", "raise", "ModelError", "(", "\"New saturation (%.3f) is inconsistent \"", "\"with calculated value (%.3f)\"", "%", "(", "value", ",", "saturation", ")", ")", "except", "TypeError", ":", "pass", "old_value", "=", "self", ".", "saturation", "self", ".", "_saturation", "=", "value", "try", ":", "self", ".", "recompute_all_weights_and_void", "(", ")", "self", ".", "_add_to_stack", "(", "\"saturation\"", ",", "value", ")", "except", "ModelError", "as", "e", ":", "self", ".", "_saturation", "=", "old_value", "raise", "ModelError", "(", "e", ")" ]
Explains a SAM flag .
def SAMflags ( x ) : flags = [ ] if x & 1 : l = "1: Read paired" else : l = "0: Read unpaired" flags . append ( l ) if x & 2 : l = "1: Read mapped in proper pair" else : l = "0: Read not mapped in proper pair" flags . append ( l ) if x & 4 : l = "1: Read unmapped" else : l = "0: Read mapped" flags . append ( l ) if x & 8 : l = "1: Mate unmapped" else : l = "0: Mate mapped" flags . append ( l ) if x & 16 : l = "1: Read reverse strand" else : l = "0: Read direct strand" flags . append ( l ) if x & 32 : l = "1: Mate reverse strand" else : l = "0: Mate direct strand" flags . append ( l ) if x & 64 : l = "1: First in pair" else : l = "0: Second in pair" flags . append ( l ) if x & 128 : l = "1: Second in pair" else : l = "0: First in pair" flags . append ( l ) if x & 256 : l = "1: Not primary alignment" else : l = "0: Primary alignment" flags . append ( l ) if x & 512 : l = "1: Read fails platform/vendor quality checks" else : l = "0: Read passes platform/vendor quality checks" flags . append ( l ) if x & 1024 : l = "1: Read is PCR or optical duplicate" else : l = "0: Read is not PCR or optical duplicate" flags . append ( l ) if x & 2048 : l = "1: Supplementary alignment" else : l = "0: Not supplementary alignment" flags . append ( l ) return flags
2,470
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/sam.py#L83-L165
[ "def", "_merge_pool_kwargs", "(", "self", ",", "override", ")", ":", "base_pool_kwargs", "=", "self", ".", "connection_pool_kw", ".", "copy", "(", ")", "if", "override", ":", "for", "key", ",", "value", "in", "override", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "try", ":", "del", "base_pool_kwargs", "[", "key", "]", "except", "KeyError", ":", "pass", "else", ":", "base_pool_kwargs", "[", "key", "]", "=", "value", "return", "base_pool_kwargs" ]
returns a string representation of the bias details
def get_bias_details ( self ) : res = 'Bias File Details\n' for b in self . bias_details : if len ( b ) > 2 : res += b [ 0 ] . ljust ( 35 ) res += b [ 1 ] . ljust ( 35 ) res += b [ 2 ] . ljust ( 9 ) res += '\n' return res
2,471
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/bias.py#L85-L96
[ "def", "_dataset_merge_filestore_newresource", "(", "self", ",", "new_resource", ",", "ignore_fields", ",", "filestore_resources", ")", ":", "# type: (hdx.data.Resource, List[str], List[hdx.data.Resource]) -> None", "new_resource", ".", "check_required_fields", "(", "ignore_fields", "=", "ignore_fields", ")", "self", ".", "resources", ".", "append", "(", "new_resource", ")", "if", "new_resource", ".", "get_file_to_upload", "(", ")", ":", "filestore_resources", ".", "append", "(", "new_resource", ")", "new_resource", "[", "'url'", "]", "=", "Dataset", ".", "temporary_url" ]
read the bias file based on the short_filename and return as a dictionary
def _read_bias_rating ( self , short_filename ) : res = { } full_name = os . path . join ( root_fldr , 'aikif' , 'data' , 'ref' , short_filename ) lg . record_process ( 'bias.py' , 'reading ' + full_name ) with open ( full_name , 'r' ) as f : for line in f : if line . strip ( '' ) == '' : break bias_line = [ ] cols = line . split ( ',' ) bias_line . extend ( [ short_filename ] ) for col in cols : bias_line . extend ( [ col . strip ( '"' ) . strip ( '\n' ) ] ) self . bias_details . append ( bias_line )
2,472
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/bias.py#L119-L137
[ "def", "bounding_sphere", "(", "self", ")", ":", "from", ".", "import", "primitives", ",", "nsphere", "center", ",", "radius", "=", "nsphere", ".", "minimum_nsphere", "(", "self", ")", "minball", "=", "primitives", ".", "Sphere", "(", "center", "=", "center", ",", "radius", "=", "radius", ",", "mutable", "=", "False", ")", "return", "minball" ]
returns the home folder and program root depending on OS
def get_root_folder ( ) : locations = { 'linux' : { 'hme' : '/home/duncan/' , 'core_folder' : '/home/duncan/dev/src/python/AIKIF' } , 'win32' : { 'hme' : 'T:\\user\\' , 'core_folder' : 'T:\\user\\dev\\src\\python\\AIKIF' } , 'cygwin' : { 'hme' : os . getcwd ( ) + os . sep , 'core_folder' : os . getcwd ( ) } , 'darwin' : { 'hme' : os . getcwd ( ) + os . sep , 'core_folder' : os . getcwd ( ) } } hme = locations [ sys . platform ] [ 'hme' ] core_folder = locations [ sys . platform ] [ 'core_folder' ] if not os . path . exists ( core_folder ) : hme = os . getcwd ( ) core_folder = os . getcwd ( ) print ( 'config.py : running on CI build (or you need to modify the paths in config.py)' ) return hme , core_folder
2,473
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/config.py#L28-L45
[ "async", "def", "_wait_exponentially", "(", "self", ",", "exception", ",", "max_wait_time", "=", "300", ")", ":", "wait_time", "=", "min", "(", "(", "2", "**", "self", ".", "_connection_attempts", ")", "+", "random", ".", "random", "(", ")", ",", "max_wait_time", ")", "try", ":", "wait_time", "=", "exception", ".", "response", "[", "\"headers\"", "]", "[", "\"Retry-After\"", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "self", ".", "_logger", ".", "debug", "(", "\"Waiting %s seconds before reconnecting.\"", ",", "wait_time", ")", "await", "asyncio", ".", "sleep", "(", "float", "(", "wait_time", ")", ")" ]
read a simple text file from a private location to get username and password
def read_credentials ( fname ) : with open ( fname , 'r' ) as f : username = f . readline ( ) . strip ( '\n' ) password = f . readline ( ) . strip ( '\n' ) return username , password
2,474
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/config.py#L84-L92
[ "def", "_DeleteClientActionRequest", "(", "self", ",", "to_delete", ",", "cursor", "=", "None", ")", ":", "query", "=", "\"DELETE FROM client_action_requests WHERE \"", "conditions", "=", "[", "]", "args", "=", "[", "]", "for", "client_id", ",", "flow_id", ",", "request_id", "in", "to_delete", ":", "conditions", ".", "append", "(", "\"(client_id=%s AND flow_id=%s AND request_id=%s)\"", ")", "args", ".", "append", "(", "db_utils", ".", "ClientIDToInt", "(", "client_id", ")", ")", "args", ".", "append", "(", "db_utils", ".", "FlowIDToInt", "(", "flow_id", ")", ")", "args", ".", "append", "(", "request_id", ")", "query", "+=", "\" OR \"", ".", "join", "(", "conditions", ")", "cursor", ".", "execute", "(", "query", ",", "args", ")" ]
module intended to be imported in most AIKIF utils to manage folder paths user settings etc . Modify the parameters at the top of this file to suit
def show_config ( ) : res = '' res += '\n---------- Folder Locations ---------\n' for k , v in fldrs . items ( ) : res += str ( k ) + ' = ' + str ( v ) + '\n' res += '\n---------- Logfiles ---------\n' for k , v in logs . items ( ) : res += str ( k ) + ' = ' + str ( v ) + '\n' res += '\n---------- Parameters ---------\n' for k , v in params . items ( ) : res += str ( k ) + ' = ' + str ( v ) + '\n' print ( "\nusage from other programs - returns " + fldr_root ( ) ) return res
2,475
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/config.py#L95-L114
[ "def", "_lock_renewer", "(", "lockref", ",", "interval", ",", "stop", ")", ":", "log", "=", "getLogger", "(", "\"%s.lock_refresher\"", "%", "__name__", ")", "while", "not", "stop", ".", "wait", "(", "timeout", "=", "interval", ")", ":", "log", ".", "debug", "(", "\"Refreshing lock\"", ")", "lock", "=", "lockref", "(", ")", "if", "lock", "is", "None", ":", "log", ".", "debug", "(", "\"The lock no longer exists, \"", "\"stopping lock refreshing\"", ")", "break", "lock", ".", "extend", "(", "expire", "=", "lock", ".", "_expire", ")", "del", "lock", "log", ".", "debug", "(", "\"Exit requested, stopping lock refreshing\"", ")" ]
Selectes motifs from a meme file based on the number of sites .
def filterMotifs ( memeFile , outFile , minSites ) : with open ( memeFile , "r" ) as mF : oldMEME = mF . readlines ( ) newMEME = oldMEME [ : 7 ] i = 7 while i < len ( oldMEME ) : if oldMEME [ i ] . split ( " " ) [ 0 ] == "MOTIF" : print ( oldMEME [ i ] . split ( "\n" ) [ 0 ] , int ( oldMEME [ i + 2 ] . split ( "nsites= " ) [ 1 ] . split ( " " ) [ 0 ] ) ) sys . stdout . flush ( ) if int ( oldMEME [ i + 2 ] . split ( "nsites= " ) [ 1 ] . split ( " " ) [ 0 ] ) > minSites : newMEME . append ( oldMEME [ i ] ) f = i + 1 while oldMEME [ f ] . split ( " " ) [ 0 ] != "MOTIF" : newMEME . append ( oldMEME [ f ] ) f = f + 1 i = i + 1 else : i = i + 1 else : i = i + 1 with open ( outFile , "w+" ) as out : out . write ( "" . join ( newMEME ) ) return newMEME
2,476
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/meme.py#L2-L35
[ "def", "_patch_for_tf1_13", "(", "tf", ")", ":", "if", "not", "hasattr", "(", "tf", ".", "io", ".", "gfile", ",", "\"GFile\"", ")", ":", "tf", ".", "io", ".", "gfile", ".", "GFile", "=", "tf", ".", "gfile", ".", "GFile", "if", "not", "hasattr", "(", "tf", ",", "\"nest\"", ")", ":", "tf", ".", "nest", "=", "tf", ".", "contrib", ".", "framework", ".", "nest", "if", "not", "hasattr", "(", "tf", ".", "compat", ",", "\"v2\"", ")", ":", "tf", ".", "compat", ".", "v2", "=", "types", ".", "ModuleType", "(", "\"tf.compat.v2\"", ")", "tf", ".", "compat", ".", "v2", ".", "data", "=", "types", ".", "ModuleType", "(", "\"tf.compat.v2.data\"", ")", "from", "tensorflow", ".", "python", ".", "data", ".", "ops", "import", "dataset_ops", "tf", ".", "compat", ".", "v2", ".", "data", ".", "Dataset", "=", "dataset_ops", ".", "DatasetV2", "if", "not", "hasattr", "(", "tf", ".", "compat", ".", "v2", ".", "data", ".", "Dataset", ",", "\"output_shapes\"", ")", ":", "from", "tensorflow", ".", "python", ".", "data", ".", "ops", "import", "dataset_ops", "if", "hasattr", "(", "dataset_ops", ",", "\"get_legacy_output_shapes\"", ")", ":", "tf", ".", "compat", ".", "v2", ".", "data", ".", "Dataset", ".", "output_shapes", "=", "property", "(", "dataset_ops", ".", "get_legacy_output_shapes", ")", "tf", ".", "compat", ".", "v2", ".", "data", ".", "Dataset", ".", "output_types", "=", "property", "(", "dataset_ops", ".", "get_legacy_output_types", ")" ]
reads the file and cleans into standard text ready for parsing
def _read_file ( self ) : self . raw = [ ] with open ( self . fname , 'r' ) as f : for line in f : #print(line) if line . startswith ( '#' ) : pass # comment elif line . strip ( '\n' ) == '' : pass # space else : self . raw . append ( line . strip ( '\n' ) )
2,477
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/parse_desc.py#L46-L59
[ "def", "sortProperties", "(", "self", ",", "properties", ")", ":", "# modified to sort objects using their global rank", "# Sort object lists", "for", "prop", ",", "objects", "in", "properties", ".", "items", "(", ")", ":", "objects", ".", "sort", "(", "key", "=", "self", ".", "_globalSortKey", ")", "# Make sorted list of properties", "return", "sorted", "(", "properties", ",", "key", "=", "lambda", "p", ":", "self", ".", "predicate_rank", "[", "p", "]", ")" ]
Restore the default configuration and remove the user s config file .
def reset ( self ) : # Delete user config file try : os . remove ( self . _user_config_file ) except FileNotFoundError : pass # Empty and refill the config object for section_name in self . sections ( ) : self . remove_section ( section_name ) self . read_defaults ( )
2,478
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/settings.py#L49-L63
[ "async", "def", "update_lease_async", "(", "self", ",", "lease", ")", ":", "if", "lease", "is", "None", ":", "return", "False", "if", "not", "lease", ".", "token", ":", "return", "False", "_logger", ".", "debug", "(", "\"Updating lease %r %r\"", ",", "self", ".", "host", ".", "guid", ",", "lease", ".", "partition_id", ")", "# First, renew the lease to make sure the update will go through.", "if", "await", "self", ".", "renew_lease_async", "(", "lease", ")", ":", "try", ":", "await", "self", ".", "host", ".", "loop", ".", "run_in_executor", "(", "self", ".", "executor", ",", "functools", ".", "partial", "(", "self", ".", "storage_client", ".", "create_blob_from_text", ",", "self", ".", "lease_container_name", ",", "lease", ".", "partition_id", ",", "json", ".", "dumps", "(", "lease", ".", "serializable", "(", ")", ")", ",", "lease_id", "=", "lease", ".", "token", ")", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_logger", ".", "error", "(", "\"Failed to update lease %r %r %r\"", ",", "self", ".", "host", ".", "guid", ",", "lease", ".", "partition_id", ",", "err", ")", "raise", "err", "else", ":", "return", "False", "return", "True" ]
Write data to user config file .
def save ( self ) : with open ( self . _user_config_file , 'w' , encoding = 'utf-8' ) as f : self . write ( f )
2,479
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/settings.py#L87-L92
[ "def", "Nu_vertical_cylinder", "(", "Pr", ",", "Gr", ",", "L", "=", "None", ",", "D", "=", "None", ",", "Method", "=", "None", ",", "AvailableMethods", "=", "False", ")", ":", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "for", "key", ",", "values", "in", "vertical_cylinder_correlations", ".", "items", "(", ")", ":", "if", "values", "[", "4", "]", "or", "all", "(", "(", "L", ",", "D", ")", ")", ":", "methods", ".", "append", "(", "key", ")", "if", "'Popiel & Churchill'", "in", "methods", ":", "methods", ".", "remove", "(", "'Popiel & Churchill'", ")", "methods", ".", "insert", "(", "0", ",", "'Popiel & Churchill'", ")", "elif", "'McAdams, Weiss & Saunders'", "in", "methods", ":", "methods", ".", "remove", "(", "'McAdams, Weiss & Saunders'", ")", "methods", ".", "insert", "(", "0", ",", "'McAdams, Weiss & Saunders'", ")", "return", "methods", "if", "AvailableMethods", ":", "return", "list_methods", "(", ")", "if", "not", "Method", ":", "Method", "=", "list_methods", "(", ")", "[", "0", "]", "if", "Method", "in", "vertical_cylinder_correlations", ":", "if", "vertical_cylinder_correlations", "[", "Method", "]", "[", "4", "]", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ")", "else", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ",", "L", "=", "L", ",", "D", "=", "D", ")", "else", ":", "raise", "Exception", "(", "\"Correlation name not recognized; see the \"", "\"documentation for the available options.\"", ")" ]
Read the magic file
def _magic_data ( filename = os . path . join ( here , 'magic_data.json' ) ) : with open ( filename ) as f : data = json . load ( f ) headers = [ _create_puremagic ( x ) for x in data [ 'headers' ] ] footers = [ _create_puremagic ( x ) for x in data [ 'footers' ] ] return headers , footers
2,480
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L39-L45
[ "def", "reset", "(", "self", ")", ":", "# Reset Union Temporal Pooler fields", "self", ".", "_poolingActivation", "=", "numpy", ".", "zeros", "(", "self", ".", "getNumColumns", "(", ")", ",", "dtype", "=", "REAL_DTYPE", ")", "self", ".", "_unionSDR", "=", "numpy", ".", "array", "(", "[", "]", ",", "dtype", "=", "UINT_DTYPE", ")", "self", ".", "_poolingTimer", "=", "numpy", ".", "ones", "(", "self", ".", "getNumColumns", "(", ")", ",", "dtype", "=", "REAL_DTYPE", ")", "*", "1000", "self", ".", "_poolingActivationInitLevel", "=", "numpy", ".", "zeros", "(", "self", ".", "getNumColumns", "(", ")", ",", "dtype", "=", "REAL_DTYPE", ")", "self", ".", "_preActiveInput", "=", "numpy", ".", "zeros", "(", "self", ".", "getNumInputs", "(", ")", ",", "dtype", "=", "REAL_DTYPE", ")", "self", ".", "_prePredictedActiveInput", "=", "numpy", ".", "zeros", "(", "(", "self", ".", "getNumInputs", "(", ")", ",", "self", ".", "_historyLength", ")", ",", "dtype", "=", "REAL_DTYPE", ")", "# Reset Spatial Pooler fields", "self", ".", "setOverlapDutyCycles", "(", "numpy", ".", "zeros", "(", "self", ".", "getNumColumns", "(", ")", ",", "dtype", "=", "REAL_DTYPE", ")", ")", "self", ".", "setActiveDutyCycles", "(", "numpy", ".", "zeros", "(", "self", ".", "getNumColumns", "(", ")", ",", "dtype", "=", "REAL_DTYPE", ")", ")", "self", ".", "setMinOverlapDutyCycles", "(", "numpy", ".", "zeros", "(", "self", ".", "getNumColumns", "(", ")", ",", "dtype", "=", "REAL_DTYPE", ")", ")", "self", ".", "setBoostFactors", "(", "numpy", ".", "ones", "(", "self", ".", "getNumColumns", "(", ")", ",", "dtype", "=", "REAL_DTYPE", ")", ")" ]
The length of the largest magic string + its offset
def _max_lengths ( ) : max_header_length = max ( [ len ( x . byte_match ) + x . offset for x in magic_header_array ] ) max_footer_length = max ( [ len ( x . byte_match ) + abs ( x . offset ) for x in magic_footer_array ] ) return max_header_length , max_footer_length
2,481
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L59-L65
[ "def", "failing", "(", "self", ")", ":", "log_levels", "=", "{", "key", ":", "conn", ".", "log_level", "for", "key", ",", "conn", "in", "self", ".", "conns", ".", "items", "(", ")", "if", "hasattr", "(", "conn", ",", "'log_level'", ")", "}", "for", "key", "in", "log_levels", ":", "self", ".", "conns", "[", "key", "]", ".", "log_level", "=", "logging", ".", "CRITICAL", "failing_conns", "=", "{", "key", ":", "conn", "for", "key", ",", "conn", "in", "self", ".", "active", ".", "items", "(", ")", "if", "not", "conn", ".", "check_status", "}", "for", "key", ",", "level", "in", "log_levels", ".", "items", "(", ")", ":", "self", ".", "conns", "[", "key", "]", ".", "log_level", "=", "level", "return", "failing_conns" ]
Rough confidence based on string length and file extension
def _confidence ( matches , ext = None ) : results = [ ] for match in matches : con = ( 0.8 if len ( match . extension ) > 9 else float ( "0.{0}" . format ( len ( match . extension ) ) ) ) if ext == match . extension : con = 0.9 results . append ( PureMagicWithConfidence ( confidence = con , * * match . _asdict ( ) ) ) return sorted ( results , key = lambda x : x . confidence , reverse = True )
2,482
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L68-L78
[ "def", "invalidate", "(", "self", ")", ":", "for", "row", "in", "self", ".", "rows", ":", "for", "key", "in", "row", ".", "keys", ":", "key", ".", "state", "=", "0" ]
Attempt to identify data by its magic numbers
def _identify_all ( header , footer , ext = None ) : # Capture the length of the data # That way we do not try to identify bytes that don't exist matches = list ( ) for magic_row in magic_header_array : start = magic_row . offset end = magic_row . offset + len ( magic_row . byte_match ) if end > len ( header ) : continue if header [ start : end ] == magic_row . byte_match : matches . append ( magic_row ) for magic_row in magic_footer_array : start = magic_row . offset if footer [ start : ] == magic_row . byte_match : matches . append ( magic_row ) if not matches : raise PureError ( "Could not identify file" ) return _confidence ( matches , ext )
2,483
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L81-L102
[ "def", "write_backup_state_to_json_file", "(", "self", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "state_file_path", "=", "self", ".", "config", "[", "\"json_state_file_path\"", "]", "self", ".", "state", "[", "\"walreceivers\"", "]", "=", "{", "key", ":", "{", "\"latest_activity\"", ":", "value", ".", "latest_activity", ",", "\"running\"", ":", "value", ".", "running", ",", "\"last_flushed_lsn\"", ":", "value", ".", "last_flushed_lsn", "}", "for", "key", ",", "value", "in", "self", ".", "walreceivers", ".", "items", "(", ")", "}", "self", ".", "state", "[", "\"pg_receivexlogs\"", "]", "=", "{", "key", ":", "{", "\"latest_activity\"", ":", "value", ".", "latest_activity", ",", "\"running\"", ":", "value", ".", "running", "}", "for", "key", ",", "value", "in", "self", ".", "receivexlogs", ".", "items", "(", ")", "}", "self", ".", "state", "[", "\"pg_basebackups\"", "]", "=", "{", "key", ":", "{", "\"latest_activity\"", ":", "value", ".", "latest_activity", ",", "\"running\"", ":", "value", ".", "running", "}", "for", "key", ",", "value", "in", "self", ".", "basebackups", ".", "items", "(", ")", "}", "self", ".", "state", "[", "\"compressors\"", "]", "=", "[", "compressor", ".", "state", "for", "compressor", "in", "self", ".", "compressors", "]", "self", ".", "state", "[", "\"transfer_agents\"", "]", "=", "[", "ta", ".", "state", "for", "ta", "in", "self", ".", "transfer_agents", "]", "self", ".", "state", "[", "\"queues\"", "]", "=", "{", "\"compression_queue\"", ":", "self", ".", "compression_queue", ".", "qsize", "(", ")", ",", "\"transfer_queue\"", ":", "self", ".", "transfer_queue", ".", "qsize", "(", ")", ",", "}", "self", ".", "log", ".", "debug", "(", "\"Writing JSON state file to %r\"", ",", "state_file_path", ")", "write_json_file", "(", "state_file_path", ",", "self", ".", "state", ")", "self", ".", "log", ".", "debug", "(", "\"Wrote JSON state file to disk, took %.4fs\"", ",", "time", ".", "time", "(", ")", "-", "start_time", ")" ]
Discover what type of file it is based on the incoming string
def _magic ( header , footer , mime , ext = None ) : if not header : raise ValueError ( "Input was empty" ) info = _identify_all ( header , footer , ext ) [ 0 ] if mime : return info . mime_type return info . extension if not isinstance ( info . extension , list ) else info [ 0 ] . extension
2,484
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L105-L113
[ "def", "get_enroll", "(", "self", ")", ":", "devices", "=", "[", "DeviceRegistration", ".", "wrap", "(", "device", ")", "for", "device", "in", "self", ".", "__get_u2f_devices", "(", ")", "]", "enroll", "=", "start_register", "(", "self", ".", "__appid", ",", "devices", ")", "enroll", "[", "'status'", "]", "=", "'ok'", "session", "[", "'_u2f_enroll_'", "]", "=", "enroll", ".", "json", "return", "enroll" ]
Grab the start and end of the file
def _file_details ( filename ) : max_head , max_foot = _max_lengths ( ) with open ( filename , "rb" ) as fin : head = fin . read ( max_head ) try : fin . seek ( - max_foot , os . SEEK_END ) except IOError : fin . seek ( 0 ) foot = fin . read ( ) return head , foot
2,485
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L116-L126
[ "def", "forget", "(", "empowered", ",", "powerupClass", ",", "interface", ")", ":", "className", "=", "fullyQualifiedName", "(", "powerupClass", ")", "withThisName", "=", "_StoredByName", ".", "className", "==", "className", "items", "=", "empowered", ".", "store", ".", "query", "(", "_StoredByName", ",", "withThisName", ")", "if", "items", ".", "count", "(", ")", "==", "0", ":", "template", "=", "\"No named powerups for {} (interface: {})\"", ".", "format", "raise", "ValueError", "(", "template", "(", "powerupClass", ",", "interface", ")", ")", "for", "stored", "in", "items", ":", "empowered", ".", "powerDown", "(", "stored", ",", "interface", ")", "stored", ".", "deleteFromStore", "(", ")" ]
Scan a filename for it s extension .
def ext_from_filename ( filename ) : try : base , ext = filename . lower ( ) . rsplit ( "." , 1 ) except ValueError : return '' ext = ".{0}" . format ( ext ) all_exts = [ x . extension for x in chain ( magic_header_array , magic_footer_array ) ] if base [ - 4 : ] . startswith ( "." ) : # For double extensions like like .tar.gz long_ext = base [ - 4 : ] + ext if long_ext in all_exts : return long_ext return ext
2,486
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L135-L154
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Opens file attempts to identify content based off magic number and will return the file extension . If mime is True it will return the mime type instead .
def from_file ( filename , mime = False ) : head , foot = _file_details ( filename ) return _magic ( head , foot , mime , ext_from_filename ( filename ) )
2,487
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L157-L168
[ "def", "win32_refresh_window", "(", "cls", ")", ":", "# Get console handle", "handle", "=", "windll", ".", "kernel32", ".", "GetConsoleWindow", "(", ")", "RDW_INVALIDATE", "=", "0x0001", "windll", ".", "user32", ".", "RedrawWindow", "(", "handle", ",", "None", ",", "None", ",", "c_uint", "(", "RDW_INVALIDATE", ")", ")" ]
Reads in string attempts to identify content based off magic number and will return the file extension . If mime is True it will return the mime type instead . If filename is provided it will be used in the computation .
def from_string ( string , mime = False , filename = None ) : head , foot = _string_details ( string ) ext = ext_from_filename ( filename ) if filename else None return _magic ( head , foot , mime , ext )
2,488
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L171-L184
[ "def", "setRandomSeed", "(", "self", ",", "seed", ")", ":", "self", ".", "seed", "=", "seed", "self", ".", "_random", "=", "Random", "(", ")", "self", ".", "_random", ".", "setSeed", "(", "seed", ")" ]
Returns a field of choice from the attribute column of the GTF
def retrieve_GTF_field ( field , gtf ) : inGTF = gtf . copy ( ) def splits ( x ) : l = x . split ( ";" ) l = [ s . split ( " " ) for s in l ] res = np . nan for s in l : if field in s : if '"' in s [ - 1 ] : res = s [ - 1 ] [ 1 : - 1 ] else : res = s [ - 1 ] return res inGTF [ field ] = inGTF [ 'attribute' ] . apply ( lambda x : splits ( x ) ) return inGTF [ [ field ] ]
2,489
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L18-L40
[ "def", "driver", "(", "self", ")", ":", "if", "not", "self", ".", "_driver", ":", "raise", "AttributeError", "(", "\"`driver` is not bound on this agent implementation({}). \"", "\"Do you forget to call `super().on_bind_driver` when you override the method \"", "\"`on_bind_driver` in your sub class?\"", ".", "format", "(", "repr", "(", "self", ")", ")", ")", "return", "self", ".", "_driver" ]
List the type of attributes in a the attribute section of a GTF file
def attributesGTF ( inGTF ) : df = pd . DataFrame ( inGTF [ 'attribute' ] . str . split ( ";" ) . tolist ( ) ) desc = [ ] for i in df . columns . tolist ( ) : val = df [ [ i ] ] . dropna ( ) val = pd . DataFrame ( val [ i ] . str . split ( ' "' ) . tolist ( ) ) [ 0 ] val = list ( set ( val ) ) for v in val : if len ( v ) > 0 : l = v . split ( " " ) if len ( l ) > 1 : l = l [ 1 ] else : l = l [ 0 ] desc . append ( l ) desc = list ( set ( desc ) ) finaldesc = [ ] for d in desc : if len ( d ) > 0 : finaldesc . append ( d ) return finaldesc
2,490
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L42-L69
[ "def", "hide", "(", "self", ",", "selections", ")", ":", "if", "'atoms'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'atoms'", "]", "=", "selections", "[", "'atoms'", "]", "self", ".", "on_atom_hidden_changed", "(", ")", "if", "'bonds'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'bonds'", "]", "=", "selections", "[", "'bonds'", "]", "self", ".", "on_bond_hidden_changed", "(", ")", "if", "'box'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'box'", "]", "=", "box_s", "=", "selections", "[", "'box'", "]", "if", "box_s", ".", "mask", "[", "0", "]", ":", "if", "self", ".", "viewer", ".", "has_renderer", "(", "self", ".", "box_renderer", ")", ":", "self", ".", "viewer", ".", "remove_renderer", "(", "self", ".", "box_renderer", ")", "else", ":", "if", "not", "self", ".", "viewer", ".", "has_renderer", "(", "self", ".", "box_renderer", ")", ":", "self", ".", "viewer", ".", "add_renderer", "(", "self", ".", "box_renderer", ")", "return", "self", ".", "hidden_state" ]
Reads an extracts all attributes in the attributes section of a GTF and constructs a new dataframe wiht one collumn per attribute instead of the attributes column
def parseGTF ( inGTF ) : desc = attributesGTF ( inGTF ) ref = inGTF . copy ( ) ref . reset_index ( inplace = True , drop = True ) df = ref . drop ( [ 'attribute' ] , axis = 1 ) . copy ( ) for d in desc : field = retrieve_GTF_field ( d , ref ) df = pd . concat ( [ df , field ] , axis = 1 ) return df
2,491
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L71-L87
[ "def", "ubridge", "(", "self", ")", ":", "if", "self", ".", "_ubridge_hypervisor", "and", "not", "self", ".", "_ubridge_hypervisor", ".", "is_running", "(", ")", ":", "self", ".", "_ubridge_hypervisor", "=", "None", "return", "self", ".", "_ubridge_hypervisor" ]
Write a GTF dataframe into a file
def writeGTF ( inGTF , file_path ) : cols = inGTF . columns . tolist ( ) if len ( cols ) == 9 : if 'attribute' in cols : df = inGTF else : df = inGTF [ cols [ : 8 ] ] df [ 'attribute' ] = "" for c in cols [ 8 : ] : if c == cols [ len ( cols ) - 1 ] : df [ 'attribute' ] = df [ 'attribute' ] + c + ' "' + inGTF [ c ] . astype ( str ) + '";' else : df [ 'attribute' ] = df [ 'attribute' ] + c + ' "' + inGTF [ c ] . astype ( str ) + '"; ' df . to_csv ( file_path , sep = "\t" , header = None , index = None , quoting = csv . QUOTE_NONE )
2,492
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L89-L109
[ "def", "status", "(", "config", "=", "'root'", ",", "num_pre", "=", "None", ",", "num_post", "=", "None", ")", ":", "try", ":", "pre", ",", "post", "=", "_get_num_interval", "(", "config", ",", "num_pre", ",", "num_post", ")", "snapper", ".", "CreateComparison", "(", "config", ",", "int", "(", "pre", ")", ",", "int", "(", "post", ")", ")", "files", "=", "snapper", ".", "GetFiles", "(", "config", ",", "int", "(", "pre", ")", ",", "int", "(", "post", ")", ")", "status_ret", "=", "{", "}", "SUBVOLUME", "=", "list_configs", "(", ")", "[", "config", "]", "[", "'SUBVOLUME'", "]", "for", "file", "in", "files", ":", "# In case of SUBVOLUME is included in filepath we remove it", "# to prevent from filepath starting with double '/'", "_filepath", "=", "file", "[", "0", "]", "[", "len", "(", "SUBVOLUME", ")", ":", "]", "if", "file", "[", "0", "]", ".", "startswith", "(", "SUBVOLUME", ")", "else", "file", "[", "0", "]", "status_ret", "[", "os", ".", "path", ".", "normpath", "(", "SUBVOLUME", "+", "_filepath", ")", "]", "=", "{", "'status'", ":", "status_to_string", "(", "file", "[", "1", "]", ")", "}", "return", "status_ret", "except", "dbus", ".", "DBusException", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Error encountered while listing changed files: {0}'", ".", "format", "(", "_dbus_exception_to_reason", "(", "exc", ",", "locals", "(", ")", ")", ")", ")" ]
Transform a GTF dataframe into a bed dataframe
def GTFtoBED ( inGTF , name ) : bed = inGTF . copy ( ) bed . reset_index ( inplace = True , drop = True ) if name not in bed . columns . tolist ( ) : field = retrieve_GTF_field ( name , bed ) bed = pd . concat ( [ bed , field ] , axis = 1 ) bed = bed [ [ 'seqname' , 'start' , 'end' , name , 'score' , 'strand' ] ] bed . columns = [ 'chrom' , 'chromStart' , 'chromEnd' , 'name' , 'score' , 'strand' ] bed . drop_duplicates ( inplace = True ) bed . reset_index ( inplace = True , drop = True ) return bed
2,493
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L111-L130
[ "def", "delete_all_volumes", "(", "self", ")", ":", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Volumes can only be deleted '", "'on swarm manager nodes'", ")", "volume_list", "=", "self", ".", "get_volume_list", "(", ")", "for", "volumes", "in", "volume_list", ":", "# Remove all the services", "self", ".", "_api_client", ".", "remove_volume", "(", "volumes", ",", "force", "=", "True", ")" ]
Gets all positions of all bases in an exon
def MAPGenoToTrans ( parsedGTF , feature ) : GenTransMap = parsedGTF [ parsedGTF [ "feature" ] == feature ] def getExonsPositions ( df ) : start = int ( df [ "start" ] ) stop = int ( df [ "end" ] ) strand = df [ "strand" ] r = range ( start , stop + 1 ) if strand == "-" : r . sort ( reverse = True ) r = [ str ( s ) for s in r ] return "," . join ( r ) GenTransMap [ "feature_bases" ] = GenTransMap . apply ( getExonsPositions , axis = 1 ) GenTransMap = GenTransMap . sort_values ( by = [ "transcript_id" , "exon_number" ] , ascending = True ) def CombineExons ( df ) : return pd . Series ( dict ( feature_bases = ',' . join ( df [ 'feature_bases' ] ) ) ) GenTransMap = GenTransMap . groupby ( "transcript_id" ) . apply ( CombineExons ) GenTransMap = GenTransMap . to_dict ( ) . get ( "feature_bases" ) return GenTransMap
2,494
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L132-L161
[ "def", "handleServerEvents", "(", "self", ",", "msg", ")", ":", "self", ".", "log", ".", "debug", "(", "'MSG %s'", ",", "msg", ")", "self", ".", "handleConnectionState", "(", "msg", ")", "if", "msg", ".", "typeName", "==", "\"error\"", ":", "self", ".", "handleErrorEvents", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_CURRENT_TIME\"", "]", ":", "if", "self", ".", "time", "<", "msg", ".", "time", ":", "self", ".", "time", "=", "msg", ".", "time", "elif", "(", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_MKT_DEPTH\"", "]", "or", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_MKT_DEPTH_L2\"", "]", ")", ":", "self", ".", "handleMarketDepth", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_STRING\"", "]", ":", "self", ".", "handleTickString", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_PRICE\"", "]", ":", "self", ".", "handleTickPrice", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_GENERIC\"", "]", ":", "self", ".", "handleTickGeneric", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_SIZE\"", "]", ":", "self", ".", "handleTickSize", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_OPTION\"", "]", ":", "self", ".", "handleTickOptionComputation", "(", "msg", ")", "elif", "(", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_OPEN_ORDER\"", "]", "or", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_OPEN_ORDER_END\"", "]", "or", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_ORDER_STATUS\"", "]", ")", ":", "self", ".", "handleOrders", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_HISTORICAL_DATA\"", "]", ":", "self", ".", "handleHistoricalData", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_ACCOUNT_UPDATES\"", "]", ":", "self", ".", "handleAccount", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_PORTFOLIO_UPDATES\"", "]", ":", "self", ".", "handlePortfolio", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_POSITION\"", "]", ":", "self", ".", "handlePosition", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_NEXT_ORDER_ID\"", "]", ":", "self", ".", "handleNextValidId", "(", "msg", ".", "orderId", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_CONNECTION_CLOSED\"", "]", ":", "self", ".", "handleConnectionClosed", "(", "msg", ")", "# elif msg.typeName == dataTypes[\"MSG_TYPE_MANAGED_ACCOUNTS\"]:", "# self.accountCode = msg.accountsList", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_COMMISSION_REPORT\"", "]", ":", "self", ".", "commission", "=", "msg", ".", "commissionReport", ".", "m_commission", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_CONTRACT_DETAILS\"", "]", ":", "self", ".", "handleContractDetails", "(", "msg", ",", "end", "=", "False", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_CONTRACT_DETAILS_END\"", "]", ":", "self", ".", "handleContractDetails", "(", "msg", ",", "end", "=", "True", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TICK_SNAPSHOT_END\"", "]", ":", "self", ".", "ibCallback", "(", "caller", "=", "\"handleTickSnapshotEnd\"", ",", "msg", "=", "msg", ")", "else", ":", "# log handler msg", "self", ".", "log_msg", "(", "\"server\"", ",", "msg", ")" ]
Maps a genome position to transcript positon
def GetTransPosition ( df , field , dic , refCol = "transcript_id" ) : try : gen = str ( int ( df [ field ] ) ) transid = df [ refCol ] bases = dic . get ( transid ) . split ( "," ) bases = bases . index ( str ( gen ) ) + 1 except : bases = np . nan return bases
2,495
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L163-L181
[ "def", "remove_server", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "server", "=", "self", ".", "_get_server", "(", "server_id", ")", "# Delete any instances we recorded to be cleaned up", "if", "server_id", "in", "self", ".", "_owned_subscriptions", ":", "inst_list", "=", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_filters", ":", "inst_list", "=", "self", ".", "_owned_filters", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_filters", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_destinations", ":", "inst_list", "=", "self", ".", "_owned_destinations", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_destinations", "[", "server_id", "]", "# Remove server from this listener", "del", "self", ".", "_servers", "[", "server_id", "]" ]
having problems with urllib on a specific site so trying requests
def get_protected_page ( url , user , pwd , filename ) : import requests r = requests . get ( url , auth = ( user , pwd ) ) print ( r . status_code ) if r . status_code == 200 : print ( 'success' ) with open ( filename , 'wb' ) as fd : for chunk in r . iter_content ( 4096 ) : fd . write ( chunk ) lg . record_result ( "Success - downloaded " + url ) else : lg . record_result ( 'network_tools.get_protected_page:Failed to downloaded ' + url + ' (status code = ' + str ( r . status_code ) + ')' )
2,496
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/network_tools.py#L67-L81
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
parse the rawFilesTable . txt file into a pandas dataframe
def read_rawFilesTable ( filename ) : exp = pd . read_table ( filename ) expected_columns = { 'File' , 'Exists' , 'Size' , 'Data format' , 'Parameter group' , 'Experiment' , 'Fraction' } found_columns = set ( exp . columns ) if len ( expected_columns - found_columns ) > 0 : message = '\n' . join ( [ 'The raw files table has the wrong format!' , 'It should contain columns:' , ', ' . join ( sorted ( expected_columns ) ) , 'Found columns:' , ', ' . join ( sorted ( found_columns ) ) ] ) raise ValueError ( message ) exp [ 'Raw file' ] = exp [ 'File' ] . apply ( path . basename ) . apply ( path . splitext ) . str . get ( 0 ) exp [ 'Experiment' ] = exp [ 'Experiment' ] . astype ( str ) return exp
2,497
https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/maxquant.py#L8-L22
[ "def", "accept_record", "(", "self", ",", "record", ")", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "req", "=", "InclusionRequest", ".", "get", "(", "self", ".", "id", ",", "record", ".", "id", ")", "if", "req", "is", "None", ":", "raise", "InclusionRequestMissingError", "(", "community", "=", "self", ",", "record", "=", "record", ")", "req", ".", "delete", "(", ")", "self", ".", "add_record", "(", "record", ")", "self", ".", "last_record_accepted", "=", "datetime", ".", "utcnow", "(", ")" ]
Add an instance method or function
def add_method ( self , m , * * kwargs ) : if isinstance ( m , types . FunctionType ) : self [ 'function' , id ( m ) ] = m else : f , obj = get_method_vars ( m ) wrkey = ( f , id ( obj ) ) self [ wrkey ] = obj
2,498
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L49-L60
[ "def", "_get_all_volumes_paths", "(", "conn", ")", ":", "volumes", "=", "[", "vol", "for", "l", "in", "[", "obj", ".", "listAllVolumes", "(", ")", "for", "obj", "in", "conn", ".", "listAllStoragePools", "(", ")", "]", "for", "vol", "in", "l", "]", "return", "{", "vol", ".", "path", "(", ")", ":", "[", "path", ".", "text", "for", "path", "in", "ElementTree", ".", "fromstring", "(", "vol", ".", "XMLDesc", "(", ")", ")", ".", "findall", "(", "'.//backingStore/path'", ")", "]", "for", "vol", "in", "volumes", "if", "_is_valid_volume", "(", "vol", ")", "}" ]
Remove an instance method or function if it exists
def del_method ( self , m ) : if isinstance ( m , types . FunctionType ) and not iscoroutinefunction ( m ) : wrkey = ( 'function' , id ( m ) ) else : f , obj = get_method_vars ( m ) wrkey = ( f , id ( obj ) ) if wrkey in self : del self [ wrkey ]
2,499
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L61-L73
[ "def", "_get_all_volumes_paths", "(", "conn", ")", ":", "volumes", "=", "[", "vol", "for", "l", "in", "[", "obj", ".", "listAllVolumes", "(", ")", "for", "obj", "in", "conn", ".", "listAllStoragePools", "(", ")", "]", "for", "vol", "in", "l", "]", "return", "{", "vol", ".", "path", "(", ")", ":", "[", "path", ".", "text", "for", "path", "in", "ElementTree", ".", "fromstring", "(", "vol", ".", "XMLDesc", "(", ")", ")", ".", "findall", "(", "'.//backingStore/path'", ")", "]", "for", "vol", "in", "volumes", "if", "_is_valid_volume", "(", "vol", ")", "}" ]