query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Remove any stored instance methods that belong to an object
def del_instance ( self , obj ) : to_remove = set ( ) for wrkey , _obj in self . iter_instances ( ) : if obj is _obj : to_remove . add ( wrkey ) for wrkey in to_remove : del self [ wrkey ]
2,500
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L74-L85
[ "def", "price", "(", "self", ",", "instrument", ",", "*", "*", "kwargs", ")", ":", "request", "=", "Request", "(", "'GET'", ",", "'/v3/instruments/{instrument}/price'", ")", "request", ".", "set_path_param", "(", "'instrument'", ",", "instrument", ")", "request", ".", "set_param", "(", "'time'", ",", "kwargs", ".", "get", "(", "'time'", ")", ")", "response", "=", "self", ".", "ctx", ".", "request", "(", "request", ")", "if", "response", ".", "content_type", "is", "None", ":", "return", "response", "if", "not", "response", ".", "content_type", ".", "startswith", "(", "\"application/json\"", ")", ":", "return", "response", "jbody", "=", "json", ".", "loads", "(", "response", ".", "raw_body", ")", "parsed_body", "=", "{", "}", "#", "# Parse responses as defined by the API specification", "#", "if", "str", "(", "response", ".", "status", ")", "==", "\"200\"", ":", "if", "jbody", ".", "get", "(", "'price'", ")", "is", "not", "None", ":", "parsed_body", "[", "'price'", "]", "=", "self", ".", "ctx", ".", "pricing_common", ".", "Price", ".", "from_dict", "(", "jbody", "[", "'price'", "]", ",", "self", ".", "ctx", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"400\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"401\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"404\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "elif", "str", "(", "response", ".", "status", ")", "==", "\"405\"", ":", "if", "jbody", ".", "get", "(", "'errorCode'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorCode'", "]", "=", "jbody", ".", "get", "(", "'errorCode'", ")", "if", "jbody", ".", "get", "(", "'errorMessage'", ")", "is", "not", "None", ":", "parsed_body", "[", "'errorMessage'", "]", "=", "jbody", ".", "get", "(", "'errorMessage'", ")", "#", "# Unexpected response status", "#", "else", ":", "parsed_body", "=", "jbody", "response", ".", "body", "=", "parsed_body", "return", "response" ]
Iterate over the stored objects
def iter_instances ( self ) : for wrkey in set ( self . keys ( ) ) : obj = self . get ( wrkey ) if obj is None : continue yield wrkey , obj
2,501
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L86-L97
[ "def", "RetryOnUnavailable", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "Wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "while", "True", ":", "try", ":", "response", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "(", "httpclient", ".", "HTTPException", ",", "socket", ".", "error", ",", "urlerror", ".", "URLError", ")", "as", "e", ":", "time", ".", "sleep", "(", "5", ")", "if", "(", "isinstance", "(", "e", ",", "urlerror", ".", "HTTPError", ")", "and", "e", ".", "getcode", "(", ")", "==", "httpclient", ".", "SERVICE_UNAVAILABLE", ")", ":", "continue", "elif", "isinstance", "(", "e", ",", "socket", ".", "timeout", ")", ":", "continue", "raise", "else", ":", "if", "response", ".", "getcode", "(", ")", "==", "httpclient", ".", "OK", ":", "return", "response", "else", ":", "raise", "StatusException", "(", "response", ")", "return", "Wrapper" ]
Iterate over stored functions and instance methods
def iter_methods ( self ) : for wrkey , obj in self . iter_instances ( ) : f , obj_id = wrkey if f == 'function' : yield self [ wrkey ] else : yield getattr ( obj , f . __name__ )
2,502
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L98-L109
[ "def", "tag_pos_volume", "(", "line", ")", ":", "def", "tagger", "(", "match", ")", ":", "groups", "=", "match", ".", "groupdict", "(", ")", "try", ":", "year", "=", "match", ".", "group", "(", "'year'", ")", "except", "IndexError", ":", "# Extract year from volume name", "# which should always include the year", "g", "=", "re", ".", "search", "(", "re_pos_year_num", ",", "match", ".", "group", "(", "'volume_num'", ")", ",", "re", ".", "UNICODE", ")", "year", "=", "g", ".", "group", "(", "0", ")", "if", "year", ":", "groups", "[", "'year'", "]", "=", "' <cds.YR>(%s)</cds.YR>'", "%", "year", ".", "strip", "(", ")", ".", "strip", "(", "'()'", ")", "else", ":", "groups", "[", "'year'", "]", "=", "''", "return", "'<cds.JOURNAL>PoS</cds.JOURNAL>'", "' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>'", "'%(year)s'", "' <cds.PG>%(page)s</cds.PG>'", "%", "groups", "for", "p", "in", "re_pos", ":", "line", "=", "p", ".", "sub", "(", "tagger", ",", "line", ")", "return", "line" ]
reads the subject file to a list to confirm config is setup
def load_data_subject_areas ( subject_file ) : lst = [ ] if os . path . exists ( subject_file ) : with open ( subject_file , 'r' ) as f : for line in f : lst . append ( line . strip ( ) ) else : print ( 'MISSING DATA FILE (subject_file) ' , subject_file ) print ( 'update your config.py or config.txt' ) return lst
2,503
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L27-L39
[ "def", "delete_datapoint", "(", "self", ",", "datapoint", ")", ":", "datapoint", "=", "validate_type", "(", "datapoint", ",", "DataPoint", ")", "self", ".", "_conn", ".", "delete", "(", "\"/ws/DataPoint/{stream_id}/{datapoint_id}\"", ".", "format", "(", "stream_id", "=", "self", ".", "get_stream_id", "(", ")", ",", "datapoint_id", "=", "datapoint", ".", "get_id", "(", ")", ",", ")", ")" ]
reads the ontology yaml file and does basic verifcation
def check_ontology ( fname ) : with open ( fname , 'r' ) as stream : y = yaml . safe_load ( stream ) import pprint pprint . pprint ( y )
2,504
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L135-L142
[ "def", "return_port", "(", "port", ")", ":", "if", "port", "in", "_random_ports", ":", "_random_ports", ".", "remove", "(", "port", ")", "elif", "port", "in", "_owned_ports", ":", "_owned_ports", ".", "remove", "(", "port", ")", "_free_ports", ".", "add", "(", "port", ")", "elif", "port", "in", "_free_ports", ":", "logging", ".", "info", "(", "\"Returning a port that was already returned: %s\"", ",", "port", ")", "else", ":", "logging", ".", "info", "(", "\"Returning a port that wasn't given by portpicker: %s\"", ",", "port", ")" ]
top level function used to simply return the ONE ACTUAL string used for data types
def find_type ( self , txt ) : searchString = txt . upper ( ) match = 'Unknown' for i in self . lst_type : if searchString in i : match = i return match
2,505
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L105-L115
[ "def", "send", "(", "self", ",", "request", ")", ":", "if", "not", "self", ".", "_initialized", ":", "self", ".", "_initialize", "(", "request", ")", "return", "try", ":", "self", ".", "_rpc", ".", "send", "(", "request", ")", "except", "grpc", ".", "RpcError", "as", "e", ":", "logging", ".", "info", "(", "'Found rpc error %s'", ",", "e", ",", "exc_info", "=", "True", ")", "# If stream has closed due to error, attempt to reopen with the", "# incoming request.", "self", ".", "_initialize", "(", "request", ")" ]
returns the file based on dataType and subjectArea
def get_full_filename ( self , dataType , subjectArea ) : return dataPath + os . sep + 'core' + os . sep + dataType + '_' + subjectArea + '.CSV'
2,506
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L117-L121
[ "def", "unwatch", "(", "self", ")", ":", "if", "self", ".", "watchers", "is", "not", "None", ":", "unwatched", "=", "[", "]", "for", "watcher", "in", "self", ".", "watchers", ":", "watcher", ".", "inst", ".", "param", ".", "unwatch", "(", "watcher", ")", "unwatched", ".", "append", "(", "watcher", ")", "self", ".", "watchers", "=", "[", "w", "for", "w", "in", "self", ".", "watchers", "if", "w", "not", "in", "unwatched", "]" ]
read the list of thoughts from a text file
def load_plan ( self , fname ) : with open ( fname , "r" ) as f : for line in f : if line != '' : tpe , txt = self . parse_plan_from_string ( line ) #print('tpe= "' + tpe + '"', txt) if tpe == 'name' : self . name = txt elif tpe == 'version' : self . plan_version = txt elif tpe == 'belief' : self . beliefs . add ( txt ) elif tpe == 'desire' : self . desires . add ( txt ) elif tpe == 'intention' : self . intentions . add ( txt )
2,507
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_plan_BDI.py#L48-L64
[ "def", "communityvisibilitystate", "(", "self", ")", ":", "if", "self", ".", "_communityvisibilitystate", "==", "None", ":", "return", "None", "elif", "self", ".", "_communityvisibilitystate", "in", "self", ".", "VisibilityState", ":", "return", "self", ".", "VisibilityState", "[", "self", ".", "_communityvisibilitystate", "]", "else", ":", "#Invalid State", "return", "None" ]
adds a constraint for the plan
def add_constraint ( self , name , tpe , val ) : self . constraint . append ( [ name , tpe , val ] )
2,508
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_plan_BDI.py#L100-L104
[ "def", "to_json_serializable", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "Entity", ")", ":", "return", "obj", ".", "to_json_dict", "(", ")", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "{", "k", ":", "to_json_serializable", "(", "v", ")", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "to_json_serializable", "(", "v", ")", "for", "v", "in", "obj", "]", "elif", "isinstance", "(", "obj", ",", "datetime", ")", ":", "return", "obj", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "elif", "isinstance", "(", "obj", ",", "date", ")", ":", "return", "obj", ".", "strftime", "(", "'%Y-%m-%d'", ")", "return", "obj" ]
calculates basic stats on the MapRule elements of the maps to give a quick overview .
def get_maps_stats ( self ) : tpes = { } for m in self . maps : if m . tpe in tpes : tpes [ m . tpe ] += 1 else : tpes [ m . tpe ] = 1 return tpes
2,509
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L40-L51
[ "def", "start", "(", "self", ",", "device_uuid", ")", ":", "status_code", ",", "_", ",", "session", "=", "self", ".", "http_client", ".", "post", "(", "'/sync/start'", ",", "body", "=", "None", ",", "headers", "=", "self", ".", "build_headers", "(", "device_uuid", ")", ")", "return", "None", "if", "status_code", "==", "204", "else", "session" ]
save the rules to file after web updates or program changes
def save_rules ( self , op_file ) : with open ( op_file , 'w' ) as f : for m in self . maps : f . write ( m . format_for_file_output ( ) )
2,510
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L65-L71
[ "def", "not_storable", "(", "_type", ")", ":", "return", "Storable", "(", "_type", ",", "handlers", "=", "StorableHandler", "(", "poke", "=", "fake_poke", ",", "peek", "=", "fail_peek", "(", "_type", ")", ")", ")" ]
uses the MapRule m to run through the dict and extract data based on the rule
def process_rule ( self , m , dct , tpe ) : print ( 'TODO - ' + tpe + ' + applying rule ' + str ( m ) . replace ( '\n' , '' ) )
2,511
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L123-L128
[ "def", "isrchi", "(", "value", ",", "ndim", ",", "array", ")", ":", "value", "=", "ctypes", ".", "c_int", "(", "value", ")", "ndim", "=", "ctypes", ".", "c_int", "(", "ndim", ")", "array", "=", "stypes", ".", "toIntVector", "(", "array", ")", "return", "libspice", ".", "isrchi_c", "(", "value", ",", "ndim", ",", "array", ")" ]
uses type to format the raw information to a dictionary usable by the mapper
def format_raw_data ( self , tpe , raw_data ) : if tpe == 'text' : formatted_raw_data = self . parse_text_to_dict ( raw_data ) elif tpe == 'file' : formatted_raw_data = self . parse_file_to_dict ( raw_data ) else : formatted_raw_data = { 'ERROR' : 'unknown data type' , 'data' : [ raw_data ] } return formatted_raw_data
2,512
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L131-L143
[ "async", "def", "delete_lease_async", "(", "self", ",", "lease", ")", ":", "await", "self", ".", "host", ".", "loop", ".", "run_in_executor", "(", "self", ".", "executor", ",", "functools", ".", "partial", "(", "self", ".", "storage_client", ".", "delete_blob", ",", "self", ".", "lease_container_name", ",", "lease", ".", "partition_id", ",", "lease_id", "=", "lease", ".", "token", ")", ")" ]
takes a string and parses via NLP ready for mapping
def parse_text_to_dict ( self , txt ) : op = { } print ( 'TODO - import NLP, split into verbs / nouns' ) op [ 'nouns' ] = txt op [ 'verbs' ] = txt return op
2,513
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L145-L154
[ "def", "exclude_time", "(", "self", ",", "start", ",", "end", ",", "days", ")", ":", "self", ".", "_excluded_times", ".", "append", "(", "TimeRange", "(", "start", ",", "end", ",", "days", ")", ")", "return", "self" ]
process the file according to the mapping rules . The cols list must match the columns in the filename
def parse_file_to_dict ( self , fname ) : print ( 'TODO - parse_file_to_dict' + fname ) for m in self . maps : if m . tpe == 'file' : if m . key [ 0 : 3 ] == 'col' : print ( 'reading column..' )
2,514
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L156-L165
[ "def", "main", "(", "dimension", ",", "iterations", ")", ":", "optimizer", "=", "PSOOptimizer", "(", ")", "solution", "=", "optimizer", ".", "minimize", "(", "sphere", ",", "-", "5.12", ",", "5.12", ",", "dimension", ",", "max_iterations", "(", "iterations", ")", ")", "return", "solution", ",", "optimizer" ]
reads the data_filename into a matrix and calls the main function to generate a . rule file based on the data in the map For all datafiles mapped there exists a . rule file to define it
def create_map_from_file ( self , data_filename ) : op_filename = data_filename + '.rule' dataset = mod_datatable . DataTable ( data_filename , ',' ) dataset . load_to_array ( ) l_map = self . generate_map_from_dataset ( dataset ) with open ( op_filename , 'w' ) as f : f . write ( '# rules file autogenerated by mapper.py v0.1\n' ) f . write ( 'filename:source=' + data_filename + '\n' ) f . write ( 'filename:rule=' + op_filename + '\n\n' ) for row in l_map : #print('ROW = ' , row) if type ( row ) is str : f . write ( row + '\n' ) else : for v in row : f . write ( v )
2,515
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/mapper.py#L198-L222
[ "def", "ideal_gas", "(", "target", ",", "pressure", "=", "'pore.pressure'", ",", "temperature", "=", "'pore.temperature'", ")", ":", "R", "=", "8.31447", "P", "=", "target", "[", "pressure", "]", "T", "=", "target", "[", "temperature", "]", "value", "=", "P", "/", "(", "R", "*", "T", ")", "return", "value" ]
loops until exit command given
def run ( self ) : while self . status != 'EXIT' : print ( self . process_input ( self . get_input ( ) ) ) print ( 'Bye' )
2,516
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/aggie/aggie.py#L45-L52
[ "def", "array", "(", "source_array", ",", "ctx", "=", "None", ",", "dtype", "=", "None", ")", ":", "ctx", "=", "current_context", "(", ")", "if", "ctx", "is", "None", "else", "ctx", "if", "isinstance", "(", "source_array", ",", "NDArray", ")", ":", "assert", "(", "source_array", ".", "stype", "!=", "'default'", ")", ",", "\"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray\"", "# prepare dtype and ctx based on source_array, if not provided", "dtype", "=", "_prepare_default_dtype", "(", "source_array", ",", "dtype", ")", "# if both dtype and ctx are different from source_array, we cannot copy directly", "if", "source_array", ".", "dtype", "!=", "dtype", "and", "source_array", ".", "context", "!=", "ctx", ":", "arr", "=", "empty", "(", "source_array", ".", "stype", ",", "source_array", ".", "shape", ",", "dtype", "=", "dtype", ")", "arr", "[", ":", "]", "=", "source_array", "arr", "=", "arr", ".", "as_in_context", "(", "ctx", ")", "else", ":", "arr", "=", "empty", "(", "source_array", ".", "stype", ",", "source_array", ".", "shape", ",", "dtype", "=", "dtype", ",", "ctx", "=", "ctx", ")", "arr", "[", ":", "]", "=", "source_array", "return", "arr", "elif", "spsp", "and", "isinstance", "(", "source_array", ",", "spsp", ".", "csr", ".", "csr_matrix", ")", ":", "# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy", "# preprocess scipy csr to canonical form", "csr", "=", "source_array", ".", "sorted_indices", "(", ")", "csr", ".", "sum_duplicates", "(", ")", "dtype", "=", "_prepare_default_dtype", "(", "source_array", ",", "dtype", ")", "return", "csr_matrix", "(", "(", "csr", ".", "data", ",", "csr", ".", "indices", ",", "csr", ".", "indptr", ")", ",", "shape", "=", "csr", ".", "shape", ",", "dtype", "=", "dtype", ",", "ctx", "=", "ctx", ")", "elif", "isinstance", "(", "source_array", ",", "(", "np", ".", "ndarray", ",", "np", ".", "generic", ")", ")", ":", "raise", "ValueError", "(", "\"Please use mx.nd.array to create an NDArray with source_array of type \"", ",", "type", "(", "source_array", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Unexpected source_array type: \"", ",", "type", "(", "source_array", ")", ")" ]
takes a question and returns the best answer based on known skills
def process_input ( self , question ) : ans = '' if self . status == 'EXIT' : print ( 'bye' ) sys . exit ( ) if '?' in question : ans = self . info . find_answer ( question ) elif question . startswith ( ':LIST' ) : ans = 'List of Raw Input\n' for i in self . info . raw_input : ans += str ( i ) + '\n' else : #ans = 'I dont'' know' ans = 'Adding info..' self . info . raw_input . append ( question ) self . lg . record_process ( 'aggie.py' , 'Question > ' + question ) self . lg . record_process ( 'aggie.py' , 'Answer > ' + ans ) return ans
2,517
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/aggie/aggie.py#L60-L83
[ "def", "_create_storage_directories", "(", ")", ":", "# Create configuration directory", "if", "not", "os", ".", "path", ".", "exists", "(", "common", ".", "CONFIG_DIR", ")", ":", "os", ".", "makedirs", "(", "common", ".", "CONFIG_DIR", ")", "# Create data directory (for log file)", "if", "not", "os", ".", "path", ".", "exists", "(", "common", ".", "DATA_DIR", ")", ":", "os", ".", "makedirs", "(", "common", ".", "DATA_DIR", ")", "# Create run directory (for lock file)", "if", "not", "os", ".", "path", ".", "exists", "(", "common", ".", "RUN_DIR", ")", ":", "os", ".", "makedirs", "(", "common", ".", "RUN_DIR", ")" ]
shows a data file in CSV format - all files live in CORE folder
def show_data_file ( fname ) : txt = '<H2>' + fname + '</H2>' print ( fname ) #try: txt += web . read_csv_to_html_table ( fname , 'Y' ) # it is ok to use a table for actual table data #except: # txt += '<H2>ERROR - cant read file</H2>' #txt += web.read_csv_to_html_list(fname) # only use this for single column lists txt += '</div>\n' return txt
2,518
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_data.py#L28-L39
[ "def", "_max_weight_operator", "(", "ops", ":", "Iterable", "[", "PauliTerm", "]", ")", "->", "Union", "[", "None", ",", "PauliTerm", "]", ":", "mapping", "=", "dict", "(", ")", "# type: Dict[int, str]", "for", "op", "in", "ops", ":", "for", "idx", ",", "op_str", "in", "op", ":", "if", "idx", "in", "mapping", ":", "if", "mapping", "[", "idx", "]", "!=", "op_str", ":", "return", "None", "else", ":", "mapping", "[", "idx", "]", "=", "op_str", "op", "=", "functools", ".", "reduce", "(", "mul", ",", "(", "PauliTerm", "(", "op", ",", "q", ")", "for", "q", ",", "op", "in", "mapping", ".", "items", "(", ")", ")", ",", "sI", "(", ")", ")", "return", "op" ]
Wrapper for subprocess . Popen to work across various Python versions when using the with syntax .
def managed_process ( process ) : try : yield process finally : for stream in [ process . stdout , process . stdin , process . stderr ] : if stream : stream . close ( ) process . wait ( )
2,519
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L33-L41
[ "def", "cancelOrder", "(", "self", ",", "orderId", ")", ":", "self", ".", "ibConn", ".", "cancelOrder", "(", "orderId", ")", "# update order id for next time", "self", ".", "requestOrderIds", "(", ")", "return", "orderId" ]
Get path for temporary scripts .
def get_temporary_scripts_path ( self ) : result = None if len ( self . config . temporary_scripts_path ) > 0 : if os . path . isdir ( self . config . temporary_scripts_path ) : result = self . config . temporary_scripts_path return result
2,520
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L84-L95
[ "def", "render_category_averages", "(", "obj", ",", "normalize_to", "=", "100", ")", ":", "context", "=", "{", "'reviewed_item'", ":", "obj", "}", "ctype", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "obj", ")", "reviews", "=", "models", ".", "Review", ".", "objects", ".", "filter", "(", "content_type", "=", "ctype", ",", "object_id", "=", "obj", ".", "id", ")", "category_averages", "=", "{", "}", "for", "review", "in", "reviews", ":", "review_category_averages", "=", "review", ".", "get_category_averages", "(", "normalize_to", ")", "if", "review_category_averages", ":", "for", "category", ",", "average", "in", "review_category_averages", ".", "items", "(", ")", ":", "if", "category", "not", "in", "category_averages", ":", "category_averages", "[", "category", "]", "=", "review_category_averages", "[", "category", "]", "else", ":", "category_averages", "[", "category", "]", "+=", "review_category_averages", "[", "category", "]", "if", "reviews", "and", "category_averages", ":", "for", "category", ",", "average", "in", "category_averages", ".", "items", "(", ")", ":", "category_averages", "[", "category", "]", "=", "category_averages", "[", "category", "]", "/", "models", ".", "Rating", ".", "objects", ".", "filter", "(", "category", "=", "category", ",", "value__isnull", "=", "False", ",", "review__content_type", "=", "ctype", ",", "review__object_id", "=", "obj", ".", "id", ")", ".", "exclude", "(", "value", "=", "''", ")", ".", "count", "(", ")", "else", ":", "category_averages", "=", "{", "}", "for", "category", "in", "models", ".", "RatingCategory", ".", "objects", ".", "filter", "(", "counts_for_average", "=", "True", ")", ":", "category_averages", "[", "category", "]", "=", "0.0", "context", ".", "update", "(", "{", "'category_averages'", ":", "category_averages", "}", ")", "return", "context" ]
Create a temporary executable bash file .
def create_file_for ( self , script ) : temp = tempfile . NamedTemporaryFile ( prefix = "pipeline-script-" , mode = 'w+t' , suffix = ".sh" , delete = False , dir = self . get_temporary_scripts_path ( ) ) self . update_environment_variables ( temp . name ) rendered_script = render ( script , model = self . config . model , env = self . env , item = self . config . item , variables = self . config . variables ) if rendered_script is None : self . success = False temp . close ( ) os . remove ( temp . name ) return None to_file_map = { 2 : lambda s : s . encode ( 'utf-8' ) , 3 : lambda s : s } if all ( ord ( ch ) < 128 for ch in rendered_script ) and os . path . isfile ( rendered_script ) : with open ( rendered_script ) as handle : content = str ( handle . read ( ) ) temp . writelines ( content ) else : temp . write ( u"#!/bin/bash\n%s" % self . render_bash_options ( ) ) temp . write ( to_file_map [ sys . version_info . major ] ( rendered_script ) ) temp . close ( ) # make Bash script executable os . chmod ( temp . name , 0o700 ) return temp . name
2,521
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L97-L136
[ "def", "_check_rest_version", "(", "self", ",", "version", ")", ":", "version", "=", "str", "(", "version", ")", "if", "version", "not", "in", "self", ".", "supported_rest_versions", ":", "msg", "=", "\"Library is incompatible with REST API version {0}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "version", ")", ")", "array_rest_versions", "=", "self", ".", "_list_available_rest_versions", "(", ")", "if", "version", "not", "in", "array_rest_versions", ":", "msg", "=", "\"Array is incompatible with REST API version {0}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "version", ")", ")", "return", "LooseVersion", "(", "version", ")" ]
Rendering Bash options .
def render_bash_options ( self ) : options = '' if self . config . debug : options += "set -x\n" if self . config . strict : options += "set -euo pipefail\n" return options
2,522
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L138-L145
[ "def", "_cb_inform_interface_change", "(", "self", ",", "msg", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'cb_inform_interface_change(%s)'", ",", "msg", ")", "self", ".", "_interface_changed", ".", "set", "(", ")" ]
Processing one file .
def process_file ( self , filename ) : if self . config . dry_run : if not self . config . internal : self . logger . info ( "Dry run mode for script %s" , filename ) with open ( filename ) as handle : for line in handle : yield line [ 0 : - 1 ] if line [ - 1 ] == '\n' else line else : if not self . config . internal : self . logger . info ( "Running script %s" , filename ) for line in self . process_script ( filename ) : yield line
2,523
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L170-L182
[ "def", "_generate_noise_system", "(", "dimensions_tr", ",", "spatial_sd", ",", "temporal_sd", ",", "spatial_noise_type", "=", "'gaussian'", ",", "temporal_noise_type", "=", "'gaussian'", ",", ")", ":", "def", "noise_volume", "(", "dimensions", ",", "noise_type", ",", ")", ":", "if", "noise_type", "==", "'rician'", ":", "# Generate the Rician noise (has an SD of 1)", "noise", "=", "stats", ".", "rice", ".", "rvs", "(", "b", "=", "0", ",", "loc", "=", "0", ",", "scale", "=", "1.527", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'exponential'", ":", "# Make an exponential distribution (has an SD of 1)", "noise", "=", "stats", ".", "expon", ".", "rvs", "(", "0", ",", "scale", "=", "1", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'gaussian'", ":", "noise", "=", "np", ".", "random", ".", "randn", "(", "np", ".", "prod", "(", "dimensions", ")", ")", ".", "reshape", "(", "dimensions", ")", "# Return the noise", "return", "noise", "# Get just the xyz coordinates", "dimensions", "=", "np", ".", "asarray", "(", "[", "dimensions_tr", "[", "0", "]", ",", "dimensions_tr", "[", "1", "]", ",", "dimensions_tr", "[", "2", "]", ",", "1", "]", ")", "# Generate noise", "spatial_noise", "=", "noise_volume", "(", "dimensions", ",", "spatial_noise_type", ")", "temporal_noise", "=", "noise_volume", "(", "dimensions_tr", ",", "temporal_noise_type", ")", "# Make the system noise have a specific spatial variability", "spatial_noise", "*=", "spatial_sd", "# Set the size of the noise", "temporal_noise", "*=", "temporal_sd", "# The mean in time of system noise needs to be zero, so subtract the", "# means of the temporal noise in time", "temporal_noise_mean", "=", "np", ".", "mean", "(", "temporal_noise", ",", "3", ")", ".", "reshape", "(", "dimensions", "[", "0", "]", ",", "dimensions", "[", "1", "]", ",", "dimensions", "[", "2", "]", ",", "1", ")", "temporal_noise", "=", "temporal_noise", "-", "temporal_noise_mean", "# Save the combination", "system_noise", "=", "spatial_noise", "+", "temporal_noise", "return", "system_noise" ]
Unregister a file object from being monitored .
def unregister ( self , fileobj ) : try : key = self . _fd_to_key . pop ( self . _fileobj_lookup ( fileobj ) ) except KeyError : raise KeyError ( "{0!r} is not registered" . format ( fileobj ) ) # Getting the fileno of a closed socket on Windows errors with EBADF. except socket . error as err : if err . errno != errno . EBADF : raise else : for key in self . _fd_to_key . values ( ) : if key . fileobj is fileobj : self . _fd_to_key . pop ( key . fd ) break else : raise KeyError ( "{0!r} is not registered" . format ( fileobj ) ) return key
2,524
https://github.com/sethmlarson/selectors2/blob/9bdf3d86578d1a84738cac6eb4127281b75bd669/selectors2.py#L161-L179
[ "def", "authors_titles_validator", "(", "record", ",", "result", ")", ":", "record_authors", "=", "get_value", "(", "record", ",", "'authors'", ",", "[", "]", ")", "result_authors", "=", "get_value", "(", "result", ",", "'_source.authors'", ",", "[", "]", ")", "author_score", "=", "compute_author_match_score", "(", "record_authors", ",", "result_authors", ")", "title_max_score", "=", "0.0", "record_titles", "=", "get_value", "(", "record", ",", "'titles.title'", ",", "[", "]", ")", "result_titles", "=", "get_value", "(", "result", ",", "'_source.titles.title'", ",", "[", "]", ")", "for", "cartesian_pair", "in", "product", "(", "record_titles", ",", "result_titles", ")", ":", "record_title_tokens", "=", "get_tokenized_title", "(", "cartesian_pair", "[", "0", "]", ")", "result_title_tokens", "=", "get_tokenized_title", "(", "cartesian_pair", "[", "1", "]", ")", "current_title_jaccard", "=", "compute_jaccard_index", "(", "record_title_tokens", ",", "result_title_tokens", ")", "if", "current_title_jaccard", ">", "title_max_score", "and", "current_title_jaccard", ">=", "0.5", ":", "title_max_score", "=", "current_title_jaccard", "return", "(", "author_score", "+", "title_max_score", ")", "/", "2", ">", "0.5" ]
does some basic validation
def prepare ( self ) : try : assert ( type ( self . sender ) is Channel ) assert ( type ( self . receiver ) is Channel ) return True except : return False
2,525
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/comms.py#L97-L106
[ "def", "get_listing", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'listing'", ")", ":", "allEvents", "=", "self", ".", "get_allEvents", "(", ")", "openEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", "closedEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", "publicEvents", "=", "allEvents", ".", "instance_of", "(", "PublicEvent", ")", "allSeries", "=", "allEvents", ".", "instance_of", "(", "Series", ")", "self", ".", "listing", "=", "{", "'allEvents'", ":", "allEvents", ",", "'openEvents'", ":", "openEvents", ",", "'closedEvents'", ":", "closedEvents", ",", "'publicEvents'", ":", "publicEvents", ",", "'allSeries'", ":", "allSeries", ",", "'regOpenEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateEvents'", ":", "publicEvents", ".", "filter", "(", "publicevent__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'publicevent__category'", ")", ",", "'regOpenSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateSeries'", ":", "allSeries", ".", "filter", "(", "series__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'series__category'", ")", ",", "}", "return", "self", ".", "listing" ]
this handles the message transmission
def send ( self ) : #print('sending message to ' + self.receiver) if self . prepare ( ) : ## TODO - send message via library print ( 'sending message' ) lg . record_process ( 'comms.py' , 'Sending message ' + self . title ) return True else : return False
2,526
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/comms.py#L109-L121
[ "def", "list_benchmarks", "(", "root", ",", "fp", ")", ":", "update_sys_path", "(", "root", ")", "# Streaming of JSON back out to the master process", "fp", ".", "write", "(", "'['", ")", "first", "=", "True", "for", "benchmark", "in", "disc_benchmarks", "(", "root", ")", ":", "if", "not", "first", ":", "fp", ".", "write", "(", "', '", ")", "clean", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "benchmark", ".", "__dict__", ".", "items", "(", ")", "if", "isinstance", "(", "v", ",", "(", "str", ",", "int", ",", "float", ",", "list", ",", "dict", ",", "bool", ")", ")", "and", "not", "k", ".", "startswith", "(", "'_'", ")", ")", "json", ".", "dump", "(", "clean", ",", "fp", ",", "skipkeys", "=", "True", ")", "first", "=", "False", "fp", ".", "write", "(", "']'", ")" ]
this creates an index of a text file specifically for use in AIKIF separates the ontology descriptions highest followed by values and lastly a final pass to get all delimited word parts .
def buildIndex ( ipFile , ndxFile , append = 'Y' , silent = 'N' , useShortFileName = 'Y' ) : if silent == 'N' : pass if append == 'N' : try : os . remove ( ndxFile ) except Exception as ex : print ( 'file already deleted - ignore' + str ( ex ) ) delims = [ ',' , chr ( 31 ) , '' , '$' , '&' , '"' , '%' , '/' , '\\' , '.' , ';' , ':' , '!' , '?' , '-' , '_' , ' ' , '\n' , '*' , '\'' , '(' , ')' , '[' , ']' , '{' , '}' ] # 1st pass - index the ontologies, including 2 depths up (later - TODO) #buildIndex(ipFile, ndxFile, ' ', 1, 'Y') # 2nd pass - use ALL delims to catch each word as part of hyphenated - eg AI Build py totWords , totLines , uniqueWords = getWordList ( ipFile , delims ) AppendIndexDictionaryToFile ( uniqueWords , ndxFile , ipFile , useShortFileName ) if silent == 'N' : print ( format_op_row ( ipFile , totLines , totWords , uniqueWords ) ) show ( 'uniqueWords' , uniqueWords , 5 ) DisplayIndexAsDictionary ( uniqueWords )
2,527
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L61-L87
[ "def", "tostype", "(", "self", ",", "stype", ")", ":", "# pylint: disable= no-member, protected-access", "if", "stype", "==", "'csr'", ":", "raise", "ValueError", "(", "\"cast_storage from row_sparse to csr is not supported\"", ")", "return", "op", ".", "cast_storage", "(", "self", ",", "stype", "=", "stype", ")" ]
Format the output row with stats
def format_op_row ( ipFile , totLines , totWords , uniqueWords ) : txt = os . path . basename ( ipFile ) . ljust ( 36 ) + ' ' txt += str ( totLines ) . rjust ( 7 ) + ' ' txt += str ( totWords ) . rjust ( 7 ) + ' ' txt += str ( len ( uniqueWords ) ) . rjust ( 7 ) + ' ' return txt
2,528
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L89-L97
[ "def", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ":", "v", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "a", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "v", "-=", "a", "*", "np", ".", "dot", "(", "a", ",", "v", ")", "# on plane", "n", "=", "vector_norm", "(", "v", ")", "if", "n", ">", "_EPS", ":", "if", "v", "[", "2", "]", "<", "0.0", ":", "np", ".", "negative", "(", "v", ",", "v", ")", "v", "/=", "n", "return", "v", "if", "a", "[", "2", "]", "==", "1.0", ":", "return", "np", ".", "array", "(", "[", "1.0", ",", "0.0", ",", "0.0", "]", ")", "return", "unit_vector", "(", "[", "-", "a", "[", "1", "]", ",", "a", "[", "0", "]", ",", "0.0", "]", ")" ]
Build the header
def format_op_hdr ( ) : txt = 'Base Filename' . ljust ( 36 ) + ' ' txt += 'Lines' . rjust ( 7 ) + ' ' txt += 'Words' . rjust ( 7 ) + ' ' txt += 'Unique' . ljust ( 8 ) + '' return txt
2,529
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L99-L107
[ "def", "_timestamp_regulator", "(", "self", ")", ":", "unified_timestamps", "=", "_PrettyDefaultDict", "(", "list", ")", "staged_files", "=", "self", ".", "_list_audio_files", "(", "sub_dir", "=", "\"staging\"", ")", "for", "timestamp_basename", "in", "self", ".", "__timestamps_unregulated", ":", "if", "len", "(", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", ")", ">", "1", ":", "# File has been splitted", "timestamp_name", "=", "''", ".", "join", "(", "timestamp_basename", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "staged_splitted_files_of_timestamp", "=", "list", "(", "filter", "(", "lambda", "staged_file", ":", "(", "timestamp_name", "==", "staged_file", "[", ":", "-", "3", "]", "and", "all", "(", "[", "(", "x", "in", "set", "(", "map", "(", "str", ",", "range", "(", "10", ")", ")", ")", ")", "for", "x", "in", "staged_file", "[", "-", "3", ":", "]", "]", ")", ")", ",", "staged_files", ")", ")", "if", "len", "(", "staged_splitted_files_of_timestamp", ")", "==", "0", ":", "self", ".", "__errors", "[", "(", "time", "(", ")", ",", "timestamp_basename", ")", "]", "=", "{", "\"reason\"", ":", "\"Missing staged file\"", ",", "\"current_staged_files\"", ":", "staged_files", "}", "continue", "staged_splitted_files_of_timestamp", ".", "sort", "(", ")", "unified_timestamp", "=", "list", "(", ")", "for", "staging_digits", ",", "splitted_file", "in", "enumerate", "(", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", ")", ":", "prev_splits_sec", "=", "0", "if", "int", "(", "staging_digits", ")", "!=", "0", ":", "prev_splits_sec", "=", "self", ".", "_get_audio_duration_seconds", "(", "\"{}/staging/{}{:03d}\"", ".", "format", "(", "self", ".", "src_dir", ",", "timestamp_name", ",", "staging_digits", "-", "1", ")", ")", "for", "word_block", "in", "splitted_file", ":", "unified_timestamp", ".", "append", "(", "_WordBlock", "(", "word", "=", "word_block", ".", "word", ",", "start", "=", "round", "(", "word_block", ".", "start", "+", "prev_splits_sec", ",", "2", ")", ",", "end", "=", "round", "(", "word_block", ".", "end", "+", "prev_splits_sec", ",", "2", ")", ")", ")", "unified_timestamps", "[", "str", "(", "timestamp_basename", ")", "]", "+=", "unified_timestamp", "else", ":", "unified_timestamps", "[", "timestamp_basename", "]", "+=", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", "[", "0", "]", "self", ".", "__timestamps", ".", "update", "(", "unified_timestamps", ")", "self", ".", "__timestamps_unregulated", "=", "_PrettyDefaultDict", "(", "list", ")" ]
Save the list of unique words to the master list
def AppendIndexDictionaryToFile ( uniqueWords , ndxFile , ipFile , useShortFileName = 'Y' ) : if useShortFileName == 'Y' : f = os . path . basename ( ipFile ) else : f = ipFile with open ( ndxFile , "a" , encoding = 'utf-8' , errors = 'replace' ) as ndx : word_keys = uniqueWords . keys ( ) #uniqueWords.sort() for word in sorted ( word_keys ) : if word != '' : line_nums = uniqueWords [ word ] ndx . write ( f + ', ' + word + ', ' ) for line_num in line_nums : ndx . write ( str ( line_num ) ) ndx . write ( '\n' )
2,530
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L110-L127
[ "def", "log", "(", "self", ",", "subscription_id", "=", "None", ",", "page", "=", "None", ",", "per_page", "=", "None", ",", "order_by", "=", "None", ",", "order_dir", "=", "None", ")", ":", "params", "=", "{", "}", "if", "subscription_id", ":", "params", "[", "'id'", "]", "=", "subscription_id", "if", "page", ":", "params", "[", "'page'", "]", "=", "page", "if", "per_page", ":", "params", "[", "'per_page'", "]", "=", "per_page", "if", "order_by", ":", "params", "[", "'order_by'", "]", "=", "order_by", "if", "order_dir", ":", "params", "[", "'order_dir'", "]", "=", "order_dir", "return", "self", ".", "request", ".", "get", "(", "'log'", ",", "params", "=", "params", ")" ]
print the index as a dict
def DisplayIndexAsDictionary ( word_occurrences ) : word_keys = word_occurrences . keys ( ) for num , word in enumerate ( word_keys ) : line_nums = word_occurrences [ word ] print ( word + " " ) if num > 3 : break
2,531
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L129-L138
[ "def", "get_license_assignment_manager", "(", "service_instance", ")", ":", "log", ".", "debug", "(", "'Retrieving license assignment manager'", ")", "try", ":", "lic_assignment_manager", "=", "service_instance", ".", "content", ".", "licenseManager", ".", "licenseAssignmentManager", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "if", "not", "lic_assignment_manager", ":", "raise", "salt", ".", "exceptions", ".", "VMwareObjectRetrievalError", "(", "'License assignment manager was not retrieved'", ")", "return", "lic_assignment_manager" ]
for testing simply shows a list details
def show ( title , lst , full = - 1 ) : txt = title + ' (' + str ( len ( lst ) ) + ') items :\n ' num = 0 for i in lst : if full == - 1 or num < full : if type ( i ) is str : txt = txt + i + ',\n ' else : txt = txt + i + ', [' for j in i : txt = txt + j + ', ' txt = txt + ']\n' num = num + 1 try : print ( txt ) except Exception as ex : print ( 'index.show() - cant print line, error ' + str ( ex ) )
2,532
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L140-L159
[ "def", "create_api_call", "(", "func", ",", "settings", ")", ":", "def", "base_caller", "(", "api_call", ",", "_", ",", "*", "args", ")", ":", "\"\"\"Simply call api_call and ignore settings.\"\"\"", "return", "api_call", "(", "*", "args", ")", "def", "inner", "(", "request", ",", "options", "=", "None", ")", ":", "\"\"\"Invoke with the actual settings.\"\"\"", "this_options", "=", "_merge_options_metadata", "(", "options", ",", "settings", ")", "this_settings", "=", "settings", ".", "merge", "(", "this_options", ")", "if", "this_settings", ".", "retry", "and", "this_settings", ".", "retry", ".", "retry_codes", ":", "api_call", "=", "gax", ".", "retry", ".", "retryable", "(", "func", ",", "this_settings", ".", "retry", ",", "*", "*", "this_settings", ".", "kwargs", ")", "else", ":", "api_call", "=", "gax", ".", "retry", ".", "add_timeout_arg", "(", "func", ",", "this_settings", ".", "timeout", ",", "*", "*", "this_settings", ".", "kwargs", ")", "api_call", "=", "_catch_errors", "(", "api_call", ",", "gax", ".", "config", ".", "API_ERRORS", ")", "return", "api_caller", "(", "api_call", ",", "this_settings", ",", "request", ")", "if", "settings", ".", "page_descriptor", ":", "if", "settings", ".", "bundler", "and", "settings", ".", "bundle_descriptor", ":", "raise", "ValueError", "(", "'The API call has incompatible settings: '", "'bundling and page streaming'", ")", "api_caller", "=", "_page_streamable", "(", "settings", ".", "page_descriptor", ")", "elif", "settings", ".", "bundler", "and", "settings", ".", "bundle_descriptor", ":", "api_caller", "=", "_bundleable", "(", "settings", ".", "bundle_descriptor", ")", "else", ":", "api_caller", "=", "base_caller", "return", "inner" ]
extract a unique list of words and have line numbers that word appears
def getWordList ( ipFile , delim ) : indexedWords = { } totWords = 0 totLines = 0 with codecs . open ( ipFile , "r" , encoding = 'utf-8' , errors = 'replace' ) as f : for line in f : totLines = totLines + 1 words = multi_split ( line , delim ) totWords = totWords + len ( words ) for word in words : cleanedWord = word . lower ( ) . strip ( ) if cleanedWord not in indexedWords : indexedWords [ cleanedWord ] = str ( totLines ) else : indexedWords [ cleanedWord ] = indexedWords [ cleanedWord ] + ' ' + str ( totLines ) return totWords , totLines , indexedWords
2,533
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L161-L179
[ "def", "base_mortality_rate", "(", "self", ",", "index", ":", "pd", ".", "Index", ")", "->", "pd", ".", "Series", ":", "return", "pd", ".", "Series", "(", "self", ".", "config", ".", "mortality_rate", ",", "index", "=", "index", ")" ]
split by multiple delimiters
def multi_split ( txt , delims ) : res = [ txt ] for delimChar in delims : txt , res = res , [ ] for word in txt : if len ( word ) > 1 : res += word . split ( delimChar ) return res
2,534
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L181-L191
[ "def", "to_super_model", "(", "self", ",", "thing", ")", ":", "if", "api", ".", "is_uid", "(", "thing", ")", ":", "return", "SuperModel", "(", "thing", ")", "if", "not", "api", ".", "is_object", "(", "thing", ")", ":", "raise", "TypeError", "(", "\"Expected a portal object, got '{}'\"", ".", "format", "(", "type", "(", "thing", ")", ")", ")", "return", "thing" ]
Preparing and creating script .
def creator ( entry , config ) : script = render ( config . script , model = config . model , env = config . env , item = config . item ) temp = tempfile . NamedTemporaryFile ( prefix = "script-" , suffix = ".py" , mode = 'w+t' , delete = False ) temp . writelines ( script ) temp . close ( ) language = 'python' if 'type' not in entry else entry [ 'type' ] template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/%s-script.sh.j2' % language ) with open ( template_file ) as handle : template = handle . read ( ) config . script = render ( template , script = temp . name ) return Script ( config )
2,535
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/script.py#L43-L58
[ "def", "clean_previous_run", "(", "self", ")", ":", "# Execute the base class treatment...", "super", "(", "Alignak", ",", "self", ")", ".", "clean_previous_run", "(", ")", "# Clean all lists", "self", ".", "pollers", ".", "clear", "(", ")", "self", ".", "reactionners", ".", "clear", "(", ")", "self", ".", "brokers", ".", "clear", "(", ")" ]
converts and unknown type to string for display purposes .
def force_to_string ( unknown ) : result = '' if type ( unknown ) is str : result = unknown if type ( unknown ) is int : result = str ( unknown ) if type ( unknown ) is float : result = str ( unknown ) if type ( unknown ) is dict : result = Dict2String ( unknown ) if type ( unknown ) is list : result = List2String ( unknown ) return result
2,536
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L311-L327
[ "def", "lock", "(", "ctx", ",", "state", ",", "*", "*", "kwargs", ")", ":", "from", ".", ".", "core", "import", "ensure_project", ",", "do_init", ",", "do_lock", "# Ensure that virtualenv is available.", "ensure_project", "(", "three", "=", "state", ".", "three", ",", "python", "=", "state", ".", "python", ",", "pypi_mirror", "=", "state", ".", "pypi_mirror", ")", "if", "state", ".", "installstate", ".", "requirementstxt", ":", "do_init", "(", "dev", "=", "state", ".", "installstate", ".", "dev", ",", "requirements", "=", "state", ".", "installstate", ".", "requirementstxt", ",", "pypi_mirror", "=", "state", ".", "pypi_mirror", ",", "pre", "=", "state", ".", "installstate", ".", "pre", ",", ")", "do_lock", "(", "ctx", "=", "ctx", ",", "clear", "=", "state", ".", "clear", ",", "pre", "=", "state", ".", "installstate", ".", "pre", ",", "keep_outdated", "=", "state", ".", "installstate", ".", "keep_outdated", ",", "pypi_mirror", "=", "state", ".", "pypi_mirror", ",", ")" ]
For a log session you can add as many watch points which are used in the aggregation and extraction of key things that happen . Each watch point has a rating ( up to you and can range from success to total failure and an importance for finer control of display
def add_watch_point ( self , string , rating , importance = 5 ) : d = { } d [ 'string' ] = string d [ 'rating' ] = rating d [ 'importance' ] = importance self . watch_points . append ( d )
2,537
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L46-L59
[ "def", "load_fixture", "(", "self", ",", "body", ",", "attachment_bodies", "=", "{", "}", ")", ":", "doc", "=", "json", ".", "loads", "(", "body", ")", "self", ".", "_documents", "[", "doc", "[", "'_id'", "]", "]", "=", "doc", "self", ".", "_attachments", "[", "doc", "[", "'_id'", "]", "]", "=", "dict", "(", ")", "for", "name", "in", "doc", ".", "get", "(", "'_attachments'", ",", "list", "(", ")", ")", ":", "attachment_body", "=", "attachment_bodies", ".", "get", "(", "name", ",", "'stub'", ")", "self", ".", "_attachments", "[", "doc", "[", "'_id'", "]", "]", "[", "name", "]", "=", "attachment_body" ]
calculates a rough guess of runtime based on product of parameters
def estimate_complexity ( self , x , y , z , n ) : num_calculations = x * y * z * n run_time = num_calculations / 100000 # a 2014 PC does about 100k calcs in a second (guess based on prior logs) return self . show_time_as_short_string ( run_time )
2,538
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L89-L95
[ "def", "unpack", "(", "cls", ",", "msg", ",", "client", ",", "server", ",", "request_id", ")", ":", "flags", ",", "=", "_UNPACK_INT", "(", "msg", "[", ":", "4", "]", ")", "namespace", ",", "pos", "=", "_get_c_string", "(", "msg", ",", "4", ")", "docs", "=", "bson", ".", "decode_all", "(", "msg", "[", "pos", ":", "]", ",", "CODEC_OPTIONS", ")", "return", "cls", "(", "*", "docs", ",", "namespace", "=", "namespace", ",", "flags", "=", "flags", ",", "_client", "=", "client", ",", "request_id", "=", "request_id", ",", "_server", "=", "server", ")" ]
converts seconds to a string in terms of seconds - > years to show complexity of algorithm
def show_time_as_short_string ( self , seconds ) : if seconds < 60 : return str ( seconds ) + ' seconds' elif seconds < 3600 : return str ( round ( seconds / 60 , 1 ) ) + ' minutes' elif seconds < 3600 * 24 : return str ( round ( seconds / ( 60 * 24 ) , 1 ) ) + ' hours' elif seconds < 3600 * 24 * 365 : return str ( round ( seconds / ( 3600 * 24 ) , 1 ) ) + ' days' else : print ( 'WARNING - this will take ' + str ( seconds / ( 60 * 24 * 365 ) ) + ' YEARS to run' ) return str ( round ( seconds / ( 60 * 24 * 365 ) , 1 ) ) + ' years'
2,539
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L98-L113
[ "def", "load_bookmarks_without_file", "(", "filename", ")", ":", "bookmarks", "=", "_load_all_bookmarks", "(", ")", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "bookmarks", ".", "items", "(", ")", "if", "v", "[", "0", "]", "!=", "filename", "}" ]
logs an entry to fname along with standard date and user details
def _log ( self , fname , txt , prg = '' ) : if os . sep not in fname : fname = self . log_folder + os . sep + fname delim = ',' q = '"' dte = TodayAsString ( ) usr = GetUserName ( ) hst = GetHostName ( ) i = self . session_id if prg == '' : prg = 'cls_log.log' logEntry = q + dte + q + delim + q + i + q + delim + q + usr + q + delim + q + hst + q + delim + q + prg + q + delim + q + txt + q + delim + '\n' with open ( fname , "a" , encoding = 'utf-8' , errors = 'replace' ) as myfile : myfile . write ( logEntry )
2,540
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L115-L132
[ "def", "cycle_file", "(", "source_plaintext_filename", ")", ":", "# Create a static random master key provider", "key_id", "=", "os", ".", "urandom", "(", "8", ")", "master_key_provider", "=", "StaticRandomMasterKeyProvider", "(", ")", "master_key_provider", ".", "add_master_key", "(", "key_id", ")", "ciphertext_filename", "=", "source_plaintext_filename", "+", "\".encrypted\"", "cycled_plaintext_filename", "=", "source_plaintext_filename", "+", "\".decrypted\"", "# Encrypt the plaintext source data", "with", "open", "(", "source_plaintext_filename", ",", "\"rb\"", ")", "as", "plaintext", ",", "open", "(", "ciphertext_filename", ",", "\"wb\"", ")", "as", "ciphertext", ":", "with", "aws_encryption_sdk", ".", "stream", "(", "mode", "=", "\"e\"", ",", "source", "=", "plaintext", ",", "key_provider", "=", "master_key_provider", ")", "as", "encryptor", ":", "for", "chunk", "in", "encryptor", ":", "ciphertext", ".", "write", "(", "chunk", ")", "# Decrypt the ciphertext", "with", "open", "(", "ciphertext_filename", ",", "\"rb\"", ")", "as", "ciphertext", ",", "open", "(", "cycled_plaintext_filename", ",", "\"wb\"", ")", "as", "plaintext", ":", "with", "aws_encryption_sdk", ".", "stream", "(", "mode", "=", "\"d\"", ",", "source", "=", "ciphertext", ",", "key_provider", "=", "master_key_provider", ")", "as", "decryptor", ":", "for", "chunk", "in", "decryptor", ":", "plaintext", ".", "write", "(", "chunk", ")", "# Verify that the \"cycled\" (encrypted, then decrypted) plaintext is identical to the source", "# plaintext", "assert", "filecmp", ".", "cmp", "(", "source_plaintext_filename", ",", "cycled_plaintext_filename", ")", "# Verify that the encryption context used in the decrypt operation includes all key pairs from", "# the encrypt operation", "#", "# In production, always use a meaningful encryption context. In this sample, we omit the", "# encryption context (no key pairs).", "assert", "all", "(", "pair", "in", "decryptor", ".", "header", ".", "encryption_context", ".", "items", "(", ")", "for", "pair", "in", "encryptor", ".", "header", ".", "encryption_context", ".", "items", "(", ")", ")", "return", "ciphertext_filename", ",", "cycled_plaintext_filename" ]
function to collect raw data from the web and hard drive Examples - new source file for ontologies email contacts list folder for xmas photos
def record_source ( self , src , prg = '' ) : self . _log ( self . logFileSource , force_to_string ( src ) , prg )
2,541
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L135-L140
[ "def", "_summarize_accessible_fields", "(", "field_descriptions", ",", "width", "=", "40", ",", "section_title", "=", "'Accessible fields'", ")", ":", "key_str", "=", "\"{:<{}}: {}\"", "items", "=", "[", "]", "items", ".", "append", "(", "section_title", ")", "items", ".", "append", "(", "\"-\"", "*", "len", "(", "section_title", ")", ")", "for", "field_name", ",", "field_desc", "in", "field_descriptions", ".", "items", "(", ")", ":", "items", ".", "append", "(", "key_str", ".", "format", "(", "field_name", ",", "width", ",", "field_desc", ")", ")", "return", "\"\\n\"", ".", "join", "(", "items", ")" ]
record the command passed - this is usually the name of the program being run or task being run
def record_command ( self , cmd , prg = '' ) : self . _log ( self . logFileCommand , force_to_string ( cmd ) , prg )
2,542
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L148-L153
[ "def", "ensure_compatible_admin", "(", "view", ")", ":", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "user_roles", "=", "request", ".", "user", ".", "user_data", ".", "get", "(", "'roles'", ",", "[", "]", ")", "if", "len", "(", "user_roles", ")", "!=", "1", ":", "context", "=", "{", "'message'", ":", "'I need to be able to manage user accounts. '", "'My username is %s'", "%", "request", ".", "user", ".", "username", "}", "return", "render", "(", "request", ",", "'mtp_common/user_admin/incompatible-admin.html'", ",", "context", "=", "context", ")", "return", "view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
record the output of the command . Records the result can have multiple results so will need to work out a consistent way to aggregate this
def record_result ( self , res , prg = '' ) : self . _log ( self . logFileResult , force_to_string ( res ) , prg )
2,543
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L155-L160
[ "def", "cublasGetVersion", "(", "handle", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetVersion_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
read a logfile and return entries for a program
def extract_logs ( self , fname , prg ) : op = [ ] with open ( fname , 'r' ) as f : for line in f : if prg in line : op . append ( line ) return op
2,544
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L216-L225
[ "def", "ensure_compatible_admin", "(", "view", ")", ":", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "user_roles", "=", "request", ".", "user", ".", "user_data", ".", "get", "(", "'roles'", ",", "[", "]", ")", "if", "len", "(", "user_roles", ")", "!=", "1", ":", "context", "=", "{", "'message'", ":", "'I need to be able to manage user accounts. '", "'My username is %s'", "%", "request", ".", "user", ".", "username", "}", "return", "render", "(", "request", ",", "'mtp_common/user_admin/incompatible-admin.html'", ",", "context", "=", "context", ")", "return", "view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
takes the logfiles and produces an event summary matrix date command result process source 20140421 9 40 178 9 20140423 0 0 6 0 20140424 19 1 47 19 20140425 24 0 117 24 20140426 16 0 83 16 20140427 1 0 6 1 20140429 0 0 0 4
def summarise_events ( self ) : all_dates = [ ] d_command = self . _count_by_date ( self . command_file , all_dates ) d_result = self . _count_by_date ( self . result_file , all_dates ) d_process = self . _count_by_date ( self . process_file , all_dates ) d_source = self . _count_by_date ( self . source_file , all_dates ) with open ( self . log_sum , "w" ) as sum_file : sum_file . write ( 'date,command,result,process,source\n' ) for dte in sorted ( set ( all_dates ) ) : sum_file . write ( dte + ',' ) if dte in d_command : sum_file . write ( str ( d_command [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_result : sum_file . write ( str ( d_result [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_process : sum_file . write ( str ( d_process [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_source : sum_file . write ( str ( d_source [ dte ] ) + '\n' ) else : sum_file . write ( '0\n' )
2,545
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L227-L265
[ "def", "remove_all_timers", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "if", "self", ".", "rtimer", "is", "not", "None", ":", "self", ".", "rtimer", ".", "cancel", "(", ")", "self", ".", "timers", "=", "{", "}", "self", ".", "heap", "=", "[", "]", "self", ".", "rtimer", "=", "None", "self", ".", "expiring", "=", "False" ]
reads a logfile and returns a dictionary by date showing the count of log entries
def _count_by_date ( self , fname , all_dates ) : if not os . path . isfile ( fname ) : return { } d_log_sum = { } with open ( fname , "r" ) as raw_log : for line in raw_log : cols = line . split ( ',' ) dte = cols [ 0 ] . strip ( '"' ) [ 0 : 10 ] . replace ( '-' , '' ) all_dates . append ( dte ) if dte in d_log_sum : d_log_sum [ dte ] += 1 else : d_log_sum [ dte ] = 1 return d_log_sum
2,546
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L267-L284
[ "def", "get_vlan_from_associate_reply", "(", "self", ",", "reply", ",", "vsiid", ",", "mac", ")", ":", "try", ":", "verify_flag", ",", "fail_reason", "=", "self", ".", "crosscheck_reply_vsiid_mac", "(", "reply", ",", "vsiid", ",", "mac", ")", "if", "not", "verify_flag", ":", "return", "constants", ".", "INVALID_VLAN", ",", "fail_reason", "mode_str", "=", "reply", ".", "partition", "(", "\"mode = \"", ")", "[", "2", "]", ".", "split", "(", ")", "[", "0", "]", "if", "mode_str", "!=", "\"assoc\"", ":", "fail_reason", "=", "self", ".", "get_vdp_failure_reason", "(", "reply", ")", "return", "constants", ".", "INVALID_VLAN", ",", "fail_reason", "except", "Exception", ":", "fail_reason", "=", "vdp_const", ".", "mode_failure_reason", "%", "(", "reply", ")", "LOG", ".", "error", "(", "\"%s\"", ",", "fail_reason", ")", "return", "constants", ".", "INVALID_VLAN", ",", "fail_reason", "check_filter", ",", "fail_reason", "=", "self", ".", "check_filter_validity", "(", "reply", ",", "\"filter = \"", ")", "if", "not", "check_filter", ":", "return", "constants", ".", "INVALID_VLAN", ",", "fail_reason", "try", ":", "vlan_val", "=", "reply", ".", "partition", "(", "\"filter = \"", ")", "[", "2", "]", ".", "split", "(", "'-'", ")", "[", "0", "]", "vlan", "=", "int", "(", "vlan_val", ")", "except", "ValueError", ":", "fail_reason", "=", "vdp_const", ".", "format_failure_reason", "%", "(", "reply", ")", "LOG", ".", "error", "(", "\"%s\"", ",", "fail_reason", ")", "return", "constants", ".", "INVALID_VLAN", ",", "fail_reason", "return", "vlan", ",", "None" ]
provides a mapping from the CSV file to the aikif data structures .
def map_data ( self ) : with open ( self . src_file , "r" ) as f : for line in f : cols = line . split ( ',' ) print ( cols )
2,547
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/agent_map_data.py#L29-L37
[ "def", "IsHuntStarted", "(", "self", ")", ":", "state", "=", "self", ".", "hunt_obj", ".", "Get", "(", "self", ".", "hunt_obj", ".", "Schema", ".", "STATE", ")", "if", "state", "!=", "\"STARTED\"", ":", "return", "False", "# Stop the hunt due to expiry.", "if", "self", ".", "CheckExpiry", "(", ")", ":", "return", "False", "return", "True" ]
Returns the string of a variable name .
def variablename ( var ) : s = [ tpl [ 0 ] for tpl in itertools . ifilter ( lambda x : var is x [ 1 ] , globals ( ) . items ( ) ) ] s = s [ 0 ] . upper ( ) return s
2,548
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/blast.py#L6-L12
[ "def", "url_to_resource", "(", "url", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "request", "=", "get_current_request", "(", ")", "# cnv = request.registry.getAdapter(request, IResourceUrlConverter)", "reg", "=", "get_current_registry", "(", ")", "cnv", "=", "reg", ".", "getAdapter", "(", "request", ",", "IResourceUrlConverter", ")", "return", "cnv", ".", "url_to_resource", "(", "url", ")" ]
Performs a blast query online .
def BLASTquery ( query , database , program , filter = None , format_type = None , expect = None , nucl_reward = None , nucl_penalty = None , gapcosts = None , matrix = None , hitlist_size = None , descriptions = None , alignments = None , ncbi_gi = None , threshold = None , word_size = None , composition_based_statistics = None , organism = None , others = None , num_threads = None , baseURL = "http://blast.ncbi.nlm.nih.gov" , verbose = False ) : if organism : organism = organism . replace ( " " , "%20" ) . replace ( "(" , "%28" ) . replace ( ")" , "%29" ) . replace ( ":" , "%3A" ) EQ_MENU = organism else : EQ_MENU = None URL = baseURL + "/Blast.cgi?" URL = URL + "QUERY=" + str ( query ) + "&DATABASE=" + str ( database ) + "&PROGRAM=" + str ( program ) for o , varname in zip ( [ filter , format_type , expect , nucl_reward , nucl_penalty , gapcosts , matrix , hitlist_size , descriptions , alignments , ncbi_gi , threshold , word_size , composition_based_statistics , EQ_MENU , num_threads ] , [ 'FILTER' , 'FORMAT_TYPE' , 'EXPECT' , 'NUCL_REWARD' , 'NUCL_PENALTY' , 'GAPCOSTS' , 'MATRIX' , 'HITLIST_SIZE' , 'DESCRIPTIONS' , 'ALIGNMENTS' , 'NCBI_GI' , 'THRESHOLD' , 'WORD_SIZE' , 'COMPOSITION_BASED_STATISTICS' , 'EQ_MENU' , 'NUM_THREADS' ] ) : if o : URL = URL + "&" + varname + "=" + str ( o ) if others : URL = URL + "&" + others URL = URL + "&CMD=Put" if verbose : print ( URL ) sys . stdout . flush ( ) response = requests . get ( url = URL ) r = response . content . split ( "\n" ) RID = [ s for s in r if "RID = " in s ] if len ( RID ) > 0 : RID = RID [ 0 ] . split ( " " ) [ - 1 ] else : print ( "Could not return an RID for this query." ) RID = None return RID
2,549
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/blast.py#L14-L94
[ "def", "set_content_permissions", "(", "self", ",", "user", ",", "obj", ",", "payload", ")", ":", "for", "entity", "in", "obj", ".", "entity_set", ".", "all", "(", ")", ":", "if", "user", ".", "has_perm", "(", "'share_entity'", ",", "entity", ")", ":", "update_permission", "(", "entity", ",", "payload", ")", "# Data doesn't have \"ADD\" permission, so it has to be removed", "payload", "=", "remove_permission", "(", "payload", ",", "'add'", ")", "for", "data", "in", "obj", ".", "data", ".", "all", "(", ")", ":", "if", "user", ".", "has_perm", "(", "'share_data'", ",", "data", ")", ":", "update_permission", "(", "data", ",", "payload", ")" ]
Checks the status of a query .
def BLASTcheck ( rid , baseURL = "http://blast.ncbi.nlm.nih.gov" ) : URL = baseURL + "/Blast.cgi?" URL = URL + "FORMAT_OBJECT=SearchInfo&RID=" + rid + "&CMD=Get" response = requests . get ( url = URL ) r = response . content . split ( "\n" ) try : status = [ s for s in r if "Status=" in s ] [ 0 ] . split ( "=" ) [ - 1 ] ThereAreHits = [ s for s in r if "ThereAreHits=" in s ] [ 0 ] . split ( "=" ) [ - 1 ] except : status = None ThereAreHits = None print ( rid , status , ThereAreHits ) sys . stdout . flush ( ) return status , ThereAreHits
2,550
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/blast.py#L96-L121
[ "def", "removefromreadergroup", "(", "self", ",", "groupname", ")", ":", "hresult", ",", "hcontext", "=", "SCardEstablishContext", "(", "SCARD_SCOPE_USER", ")", "if", "0", "!=", "hresult", ":", "raise", "EstablishContextException", "(", "hresult", ")", "try", ":", "hresult", "=", "SCardRemoveReaderFromGroup", "(", "hcontext", ",", "self", ".", "name", ",", "groupname", ")", "if", "0", "!=", "hresult", ":", "raise", "RemoveReaderFromGroupException", "(", "hresult", ",", "self", ".", "name", ",", "groupname", ")", "finally", ":", "hresult", "=", "SCardReleaseContext", "(", "hcontext", ")", "if", "0", "!=", "hresult", ":", "raise", "ReleaseContextException", "(", "hresult", ")" ]
Retrieves results for an RID .
def BLASTresults ( rid , format_type = "Tabular" , hitlist_size = None , alignments = None , ncbi_gi = None , format_object = None , baseURL = "http://blast.ncbi.nlm.nih.gov" ) : URL = baseURL + "/Blast.cgi?" URL = URL + "RID=" + str ( rid ) + "&FORMAT_TYPE=" + str ( format_type ) for o in [ hitlist_size , alignments , ncbi_gi , format_object ] : if o : URL = URL + "&" + variablename ( var ) + "=" + str ( o ) URL = URL + "&CMD=Get" response = requests . get ( url = URL ) response = response . content if format_type == "Tabular" : result = response . split ( "\n" ) result = [ s . split ( "\t" ) for s in result ] [ 6 : ] header = result [ : 7 ] content = result [ 7 : ] fields = header [ 5 ] [ 0 ] . strip ( "# Fields: " ) . split ( ", " ) result = pd . DataFrame ( content , columns = fields ) response = result [ : int ( header [ - 1 ] [ 0 ] . split ( " " ) [ 1 ] ) ] return response
2,551
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/blast.py#L123-L160
[ "def", "handle_current_state", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'_current_state_hydrated_changed'", ",", "False", ")", "and", "self", ".", "save_on_change", ":", "new_base_state", "=", "json", ".", "dumps", "(", "getattr", "(", "self", ",", "'_current_state_hydrated'", ",", "{", "}", ")", ")", "if", "new_base_state", "!=", "self", ".", "base_state", ":", "self", ".", "base_state", "=", "new_base_state", "self", ".", "save", "(", ")" ]
Generating HTML report .
def generate_html ( store ) : spline = { 'version' : VERSION , 'url' : 'https://github.com/Nachtfeuer/pipeline' , 'generated' : datetime . now ( ) . strftime ( "%A, %d. %B %Y - %I:%M:%S %p" ) } html_template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/report.html.j2' ) with open ( html_template_file ) as handle : html_template = handle . read ( ) return render ( html_template , spline = spline , store = store )
2,552
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/generator.py#L26-L45
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Handle begin of a list .
def __begin_of_list ( self , ast_token ) : self . list_level += 1 if self . list_level == 1 : self . final_ast_tokens . append ( ast_token )
2,553
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L41-L45
[ "def", "delete_entity", "(", "self", ",", "entity_id", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/entity/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "entity_id", ",", ")", "return", "self", ".", "_adapter", ".", "delete", "(", "url", "=", "api_path", ",", ")" ]
Handle end of a list .
def __end_of_list ( self , ast_token ) : self . list_level -= 1 if self . list_level == 0 : if self . list_entry is not None : self . final_ast_tokens . append ( self . list_entry ) self . list_entry = None self . final_ast_tokens . append ( ast_token )
2,554
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L47-L54
[ "def", "delete_entity", "(", "self", ",", "entity_id", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/entity/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "entity_id", ",", ")", "return", "self", ".", "_adapter", ".", "delete", "(", "url", "=", "api_path", ",", ")" ]
Handle tokens inside the list or outside the list .
def __default ( self , ast_token ) : if self . list_level == 1 : if self . list_entry is None : self . list_entry = ast_token elif not isinstance ( ast_token , type ( self . list_entry ) ) : self . final_ast_tokens . append ( ast_token ) elif self . list_level == 0 : self . final_ast_tokens . append ( ast_token )
2,555
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L56-L64
[ "def", "calculate_md5", "(", "fileobject", ",", "size", "=", "2", "**", "16", ")", ":", "fileobject", ".", "seek", "(", "0", ")", "md5", "=", "hashlib", ".", "md5", "(", ")", "for", "data", "in", "iter", "(", "lambda", ":", "fileobject", ".", "read", "(", "size", ")", ",", "b''", ")", ":", "if", "not", "data", ":", "break", "if", "isinstance", "(", "data", ",", "six", ".", "text_type", ")", ":", "data", "=", "data", ".", "encode", "(", "'utf-8'", ")", "# md5 needs a byte string", "md5", ".", "update", "(", "data", ")", "fileobject", ".", "seek", "(", "0", ")", "# rewind read head", "return", "md5", ".", "hexdigest", "(", ")" ]
Main function of compression .
def compress ( self ) : for ast_token in self . ast_tokens : if type ( ast_token ) in self . dispatcher : # pylint: disable=unidiomatic-typecheck self . dispatcher [ type ( ast_token ) ] ( ast_token ) else : self . dispatcher [ 'default' ] ( ast_token )
2,556
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L66-L72
[ "def", "describe_topic", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "topics", "=", "list_topics", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "ret", "=", "{", "}", "for", "topic", ",", "arn", "in", "topics", ".", "items", "(", ")", ":", "if", "name", "in", "(", "topic", ",", "arn", ")", ":", "ret", "=", "{", "'TopicArn'", ":", "arn", "}", "ret", "[", "'Attributes'", "]", "=", "get_topic_attributes", "(", "arn", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "ret", "[", "'Subscriptions'", "]", "=", "list_subscriptions_by_topic", "(", "arn", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "# Grab extended attributes for the above subscriptions", "for", "sub", "in", "range", "(", "len", "(", "ret", "[", "'Subscriptions'", "]", ")", ")", ":", "sub_arn", "=", "ret", "[", "'Subscriptions'", "]", "[", "sub", "]", "[", "'SubscriptionArn'", "]", "if", "not", "sub_arn", ".", "startswith", "(", "'arn:aws:sns:'", ")", ":", "# Sometimes a sub is in e.g. PendingAccept or other", "# wierd states and doesn't have an ARN yet", "log", ".", "debug", "(", "'Subscription with invalid ARN %s skipped...'", ",", "sub_arn", ")", "continue", "deets", "=", "get_subscription_attributes", "(", "SubscriptionArn", "=", "sub_arn", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "ret", "[", "'Subscriptions'", "]", "[", "sub", "]", ".", "update", "(", "deets", ")", "return", "ret" ]
Get AST tokens for Python condition .
def get_tokens ( condition ) : try : ast_tokens = list ( ast . walk ( ast . parse ( condition . strip ( ) ) ) ) except SyntaxError as exception : Logger . get_logger ( __name__ ) . error ( "Syntax error: %s" , exception ) ast_tokens = [ ] return ast_tokens
2,557
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L129-L141
[ "def", "cache_affected_objects_review_history", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Caching review_history ...\"", ")", "query", "=", "dict", "(", "portal_type", "=", "NEW_SENAITE_WORKFLOW_BINDINGS", ")", "brains", "=", "api", ".", "search", "(", "query", ",", "UID_CATALOG", ")", "total", "=", "len", "(", "brains", ")", "for", "num", ",", "brain", "in", "enumerate", "(", "brains", ")", ":", "if", "num", "%", "100", "==", "0", ":", "logger", ".", "info", "(", "\"Caching review_history: {}/{}\"", ".", "format", "(", "num", ",", "total", ")", ")", "review_history", "=", "get_review_history_for", "(", "brain", ")", "review_history_cache", "[", "api", ".", "get_uid", "(", "brain", ")", "]", "=", "review_history" ]
Verify that each token in order does match the expected types .
def match_tokens ( ast_tokens , ast_types ) : ast_final_types = [ ast . Module , ast . Expr ] + ast_types return all ( isinstance ( ast_token , ast_type ) for ast_token , ast_type in zip ( ast_tokens , ast_final_types ) )
2,558
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L144-L167
[ "def", "_decompress", "(", "self", ",", "fp", ")", ":", "decompressor", "=", "zlib", ".", "decompressobj", "(", ")", "if", "self", ".", "stream", ":", "return", "Proxy", "(", "decompressor", ".", "decompress", ",", "fp", ")", "else", ":", "out", "=", "io", ".", "BytesIO", "(", "decompressor", ".", "decompress", "(", "fp", ".", "read", "(", ")", ")", ")", "out", ".", "write", "(", "decompressor", ".", "flush", "(", ")", ")", "out", ".", "seek", "(", "0", ")", "return", "out" ]
Find rule for given condition .
def find_rule ( condition ) : final_condition = re . sub ( '{{.*}}' , '42' , condition ) ast_tokens = Condition . get_tokens ( final_condition ) ast_compressed_tokens = Condition . compress_tokens ( ast_tokens ) name = 'undefined' function = lambda tokens : False if len ( ast_compressed_tokens ) > 0 : for rule in Condition . RULES : if Condition . match_tokens ( ast_compressed_tokens , rule [ 'types' ] ) : name = rule [ 'name' ] function = rule [ 'evaluate' ] break return name , ast_tokens , function
2,559
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L229-L253
[ "def", "create_pgroup_snapshot", "(", "self", ",", "source", ",", "*", "*", "kwargs", ")", ":", "# In REST 1.4, support was added for snapshotting multiple pgroups. As a", "# result, the endpoint response changed from an object to an array of", "# objects. To keep the response type consistent between REST versions,", "# we unbox the response when creating a single snapshot.", "result", "=", "self", ".", "create_pgroup_snapshots", "(", "[", "source", "]", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_rest_version", ">=", "LooseVersion", "(", "\"1.4\"", ")", ":", "headers", "=", "result", ".", "headers", "result", "=", "ResponseDict", "(", "result", "[", "0", "]", ")", "result", ".", "headers", "=", "headers", "return", "result" ]
Evaluate simple condition .
def evaluate ( condition ) : success = False if len ( condition ) > 0 : try : rule_name , ast_tokens , evaluate_function = Condition . find_rule ( condition ) if not rule_name == 'undefined' : success = evaluate_function ( ast_tokens ) except AttributeError as exception : Logger . get_logger ( __name__ ) . error ( "Attribute error: %s" , exception ) else : success = True return success
2,560
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L256-L293
[ "def", "create_pgroup_snapshot", "(", "self", ",", "source", ",", "*", "*", "kwargs", ")", ":", "# In REST 1.4, support was added for snapshotting multiple pgroups. As a", "# result, the endpoint response changed from an object to an array of", "# objects. To keep the response type consistent between REST versions,", "# we unbox the response when creating a single snapshot.", "result", "=", "self", ".", "create_pgroup_snapshots", "(", "[", "source", "]", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_rest_version", ">=", "LooseVersion", "(", "\"1.4\"", ")", ":", "headers", "=", "result", ".", "headers", "result", "=", "ResponseDict", "(", "result", "[", "0", "]", ")", "result", ".", "headers", "=", "headers", "return", "result" ]
starts the web interface and possibly other processes
def start_aikif ( ) : if sys . platform [ 0 : 3 ] == 'win' : os . system ( "start go_web_aikif.bat" ) else : os . system ( "../aikif/web_app/web_aikif.py" ) import webbrowser import time time . sleep ( 1 ) webbrowser . open ( 'http://127.0.0.1:5000' )
2,561
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/run.py#L49-L60
[ "def", "change_column", "(", "self", ",", "name", ",", "options", ")", ":", "column", "=", "self", ".", "get_column", "(", "name", ")", "column", ".", "set_options", "(", "options", ")", "return", "self" ]
Get creator function by name .
def get_creator_by_name ( name ) : return { 'docker(container)' : Container . creator , 'shell' : Bash . creator , 'docker(image)' : Image . creator , 'python' : Script . creator , 'packer' : Packer . creator , 'ansible(simple)' : Ansible . creator } [ name ]
2,562
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L36-L49
[ "def", "merge_entities", "(", "self", ",", "from_entity_ids", ",", "to_entity_id", ",", "force", "=", "False", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'from_entity_ids'", ":", "from_entity_ids", ",", "'to_entity_id'", ":", "to_entity_id", ",", "'force'", ":", "force", ",", "}", "api_path", "=", "'/v1/{mount_point}/entity/merge'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
Running on shell via multiprocessing .
def worker ( data ) : creator = get_creator_by_name ( data [ 'creator' ] ) shell = creator ( data [ 'entry' ] , ShellConfig ( script = data [ 'entry' ] [ 'script' ] , title = data [ 'entry' ] [ 'title' ] if 'title' in data [ 'entry' ] else '' , model = data [ 'model' ] , env = data [ 'env' ] , item = data [ 'item' ] , dry_run = data [ 'dry_run' ] , debug = data [ 'debug' ] , strict = data [ 'strict' ] , variables = data [ 'variables' ] , temporary_scripts_path = data [ 'temporary_scripts_path' ] ) ) output = [ ] for line in shell . process ( ) : output . append ( line ) Logger . get_logger ( __name__ + '.worker' ) . info ( " | %s" , line ) return { 'id' : data [ 'id' ] , 'success' : shell . success , 'output' : output }
2,563
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L52-L66
[ "def", "from_dataframe", "(", "cls", ",", "name", ",", "df", ",", "indices", ",", "primary_key", "=", "None", ")", ":", "# ordered list (column_name, column_type) pairs", "column_types", "=", "[", "]", "# which columns have nullable values", "nullable", "=", "set", "(", ")", "# tag cached database by dataframe's number of rows and columns", "for", "column_name", "in", "df", ".", "columns", ":", "values", "=", "df", "[", "column_name", "]", "if", "values", ".", "isnull", "(", ")", ".", "any", "(", ")", ":", "nullable", ".", "add", "(", "column_name", ")", "column_db_type", "=", "db_type", "(", "values", ".", "dtype", ")", "column_types", ".", "append", "(", "(", "column_name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", ",", "column_db_type", ")", ")", "def", "make_rows", "(", ")", ":", "return", "list", "(", "tuple", "(", "row", ")", "for", "row", "in", "df", ".", "values", ")", "return", "cls", "(", "name", "=", "name", ",", "column_types", "=", "column_types", ",", "make_rows", "=", "make_rows", ",", "indices", "=", "indices", ",", "nullable", "=", "nullable", ",", "primary_key", "=", "primary_key", ")" ]
Copying and merging environment variables .
def get_merged_env ( self , include_os = False ) : env = { } if include_os : env . update ( os . environ . copy ( ) ) for level in range ( 3 ) : env . update ( self . pipeline . data . env_list [ level ] . copy ( ) ) return env
2,564
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L80-L96
[ "def", "cache_train", "(", "self", ")", ":", "filename", "=", "self", ".", "get_cache_location", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", "False", "categories", "=", "pickle", ".", "load", "(", "open", "(", "filename", ",", "'rb'", ")", ")", "assert", "isinstance", "(", "categories", ",", "BayesCategories", ")", ",", "\"Cache data is either corrupt or invalid\"", "self", ".", "categories", "=", "categories", "# Updating our per-category overall probabilities", "self", ".", "calculate_category_probability", "(", ")", "return", "True" ]
Prepare one shell or docker task .
def prepare_shell_data ( self , shells , key , entry ) : if self . can_process_shell ( entry ) : if key in [ 'python' ] : entry [ 'type' ] = key if 'with' in entry and isinstance ( entry [ 'with' ] , str ) : rendered_with = ast . literal_eval ( render ( entry [ 'with' ] , variables = self . pipeline . variables , model = self . pipeline . model , env = self . get_merged_env ( include_os = True ) ) ) elif 'with' in entry : rendered_with = entry [ 'with' ] else : rendered_with = [ '' ] for item in rendered_with : shells . append ( { 'id' : self . next_task_id , 'creator' : key , 'entry' : entry , 'model' : self . pipeline . model , 'env' : self . get_merged_env ( ) , 'item' : item , 'dry_run' : self . pipeline . options . dry_run , 'debug' : self . pipeline . options . debug , 'strict' : self . pipeline . options . strict , 'variables' : self . pipeline . variables , 'temporary_scripts_path' : self . pipeline . options . temporary_scripts_path } ) self . next_task_id += 1
2,565
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L98-L127
[ "def", "read_frames", "(", "cls", ",", "reader", ")", ":", "rval", "=", "deque", "(", ")", "while", "True", ":", "frame_start_pos", "=", "reader", ".", "tell", "(", ")", "try", ":", "frame", "=", "Frame", ".", "_read_frame", "(", "reader", ")", "except", "Reader", ".", "BufferUnderflow", ":", "# No more data in the stream", "frame", "=", "None", "except", "Reader", ".", "ReaderError", "as", "e", ":", "# Some other format error", "raise", "Frame", ".", "FormatError", ",", "str", "(", "e", ")", ",", "sys", ".", "exc_info", "(", ")", "[", "-", "1", "]", "except", "struct", ".", "error", "as", "e", ":", "raise", "Frame", ".", "FormatError", ",", "str", "(", "e", ")", ",", "sys", ".", "exc_info", "(", ")", "[", "-", "1", "]", "if", "frame", "is", "None", ":", "reader", ".", "seek", "(", "frame_start_pos", ")", "break", "rval", ".", "append", "(", "frame", ")", "return", "rval" ]
Processing a group of tasks .
def process ( self , document ) : self . logger . info ( "Processing group of tasks (parallel=%s)" , self . get_parallel_mode ( ) ) self . pipeline . data . env_list [ 2 ] = { } output , shells = [ ] , [ ] result = Adapter ( { 'success' : True , 'output' : [ ] } ) for task_entry in document : key , entry = list ( task_entry . items ( ) ) [ 0 ] if ( not self . parallel or key == 'env' ) and len ( shells ) > 0 : result = Adapter ( self . process_shells ( shells ) ) output += result . output shells = [ ] if not result . success : break if key == 'env' : self . pipeline . data . env_list [ 2 ] . update ( entry ) elif key in [ 'shell' , 'docker(container)' , 'docker(image)' , 'python' , 'packer' , 'ansible(simple)' ] : self . prepare_shell_data ( shells , key , entry ) if result . success : result = Adapter ( self . process_shells ( shells ) ) output += result . output self . event . delegate ( result . success ) return { 'success' : result . success , 'output' : output }
2,566
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L135-L164
[ "def", "save_spectral_lines_ds9", "(", "rectwv_coeff", ",", "debugplot", "=", "0", ")", ":", "for", "spectral_lines", ",", "rectified", ",", "suffix", "in", "zip", "(", "[", "'arc'", ",", "'arc'", ",", "'oh'", ",", "'oh'", "]", ",", "[", "False", ",", "True", ",", "False", ",", "True", "]", ",", "[", "'rawimage'", ",", "'rectified'", ",", "'rawimage'", ",", "'rectified'", "]", ")", ":", "output", "=", "spectral_lines_to_ds9", "(", "rectwv_coeff", "=", "rectwv_coeff", ",", "spectral_lines", "=", "spectral_lines", ",", "rectified", "=", "rectified", ")", "filename", "=", "'ds9_'", "+", "spectral_lines", "+", "'_'", "+", "suffix", "+", "'.reg'", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "print", "(", "'>>> Saving: '", ",", "filename", ")", "save_ds9", "(", "output", ",", "filename", ")" ]
Processing a list of shells parallel .
def process_shells_parallel ( self , shells ) : output = [ ] success = True with closing ( multiprocessing . Pool ( multiprocessing . cpu_count ( ) ) ) as pool : for result in [ Adapter ( entry ) for entry in pool . map ( worker , [ shell for shell in shells ] ) ] : output += result . output the_shell = [ shell for shell in shells if shell [ 'id' ] == result . id ] [ 0 ] self . __handle_variable ( the_shell [ 'entry' ] , result . output ) if not result . success : success = False if success : self . logger . info ( "Parallel Processing Bash code: finished" ) return { 'success' : True , 'output' : output } for line in self . run_cleanup ( shells [ 0 ] [ 'env' ] , 99 ) : output . append ( line ) self . logger . error ( "Pipeline has failed: immediately leaving!" ) self . event . failed ( ) return { 'success' : False , 'output' : output }
2,567
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L166-L185
[ "def", "LogHttpAdminUIAccess", "(", "self", ",", "request", ",", "response", ")", ":", "# TODO(user): generate event_id elsewhere and use it for all the log", "# messages that have to do with handling corresponding request.", "event_id", "=", "self", ".", "GetNewEventId", "(", ")", "api_method", "=", "response", ".", "headers", ".", "get", "(", "\"X-API-Method\"", ",", "\"unknown\"", ")", "api_reason", "=", "response", ".", "headers", ".", "get", "(", "\"X-GRR-Reason\"", ",", "\"none\"", ")", "log_msg", "=", "\"%s API call [%s] by %s (reason: %s): %s [%d]\"", "%", "(", "event_id", ",", "api_method", ",", "request", ".", "user", ",", "api_reason", ",", "request", ".", "full_path", ",", "response", ".", "status_code", ")", "logging", ".", "info", "(", "log_msg", ")", "if", "response", ".", "headers", ".", "get", "(", "\"X-No-Log\"", ")", "!=", "\"True\"", ":", "if", "data_store", ".", "RelationalDBEnabled", "(", ")", ":", "entry", "=", "rdf_objects", ".", "APIAuditEntry", ".", "FromHttpRequestResponse", "(", "request", ",", "response", ")", "data_store", ".", "REL_DB", ".", "WriteAPIAuditEntry", "(", "entry", ")" ]
Processing a list of shells one after the other .
def process_shells_ordered ( self , shells ) : output = [ ] for shell in shells : entry = shell [ 'entry' ] config = ShellConfig ( script = entry [ 'script' ] , title = entry [ 'title' ] if 'title' in entry else '' , model = shell [ 'model' ] , env = shell [ 'env' ] , item = shell [ 'item' ] , dry_run = shell [ 'dry_run' ] , debug = shell [ 'debug' ] , strict = shell [ 'strict' ] , variables = shell [ 'variables' ] , temporary_scripts_path = shell [ 'temporary_scripts_path' ] ) result = Adapter ( self . process_shell ( get_creator_by_name ( shell [ 'creator' ] ) , entry , config ) ) output += result . output self . __handle_variable ( entry , result . output ) if not result . success : return { 'success' : False , 'output' : output } return { 'success' : True , 'output' : output }
2,568
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L187-L202
[ "def", "url", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "indicator_obj", "=", "URL", "(", "text", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_indicator", "(", "indicator_obj", ")" ]
Processing a list of shells .
def process_shells ( self , shells ) : result = { 'success' : True , 'output' : [ ] } if self . parallel and len ( shells ) > 1 : result = self . process_shells_parallel ( shells ) elif len ( shells ) > 0 : result = self . process_shells_ordered ( shells ) return result
2,569
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L204-L211
[ "def", "load", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "reader", "=", "T7Reader", "(", "f", ",", "*", "*", "kwargs", ")", "return", "reader", ".", "read_obj", "(", ")" ]
Processing a shell entry .
def process_shell ( self , creator , entry , config ) : self . logger . info ( "Processing Bash code: start" ) output = [ ] shell = creator ( entry , config ) for line in shell . process ( ) : output . append ( line ) self . logger . info ( " | %s" , line ) if shell . success : self . logger . info ( "Processing Bash code: finished" ) return { 'success' : True , 'output' : output } for line in self . run_cleanup ( config . env , shell . exit_code ) : output . append ( line ) self . logger . error ( "Pipeline has failed: leaving as soon as possible!" ) self . event . failed ( ) return { 'success' : False , 'output' : output }
2,570
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L230-L249
[ "def", "fetch_additional_posts", "(", "self", ",", "uid", ")", ":", "cats", "=", "MPost2Catalog", ".", "query_by_entity_uid", "(", "uid", ",", "kind", "=", "self", ".", "kind", ")", "cat_uid_arr", "=", "[", "]", "for", "cat_rec", "in", "cats", ":", "cat_uid", "=", "cat_rec", ".", "tag_id", "cat_uid_arr", ".", "append", "(", "cat_uid", ")", "logger", ".", "info", "(", "'info category: {0}'", ".", "format", "(", "cat_uid_arr", ")", ")", "rel_recs", "=", "MRelation", ".", "get_app_relations", "(", "uid", ",", "8", ",", "kind", "=", "self", ".", "kind", ")", ".", "objects", "(", ")", "logger", ".", "info", "(", "'rel_recs count: {0}'", ".", "format", "(", "rel_recs", ".", "count", "(", ")", ")", ")", "if", "cat_uid_arr", ":", "rand_recs", "=", "MPost", ".", "query_cat_random", "(", "cat_uid_arr", "[", "0", "]", ",", "limit", "=", "4", "-", "rel_recs", ".", "count", "(", ")", "+", "4", ")", "else", ":", "rand_recs", "=", "MPost", ".", "query_random", "(", "num", "=", "4", "-", "rel_recs", ".", "count", "(", ")", "+", "4", ",", "kind", "=", "self", ".", "kind", ")", "return", "rand_recs", ",", "rel_recs" ]
Run cleanup hook when configured .
def run_cleanup ( self , env , exit_code ) : output = [ ] if self . pipeline . data . hooks and len ( self . pipeline . data . hooks . cleanup ) > 0 : env . update ( { 'PIPELINE_RESULT' : 'FAILURE' } ) env . update ( { 'PIPELINE_SHELL_EXIT_CODE' : str ( exit_code ) } ) config = ShellConfig ( script = self . pipeline . data . hooks . cleanup , model = self . pipeline . model , env = env , dry_run = self . pipeline . options . dry_run , debug = self . pipeline . options . debug , strict = self . pipeline . options . strict , temporary_scripts_path = self . pipeline . options . temporary_scripts_path ) cleanup_shell = Bash ( config ) for line in cleanup_shell . process ( ) : output . append ( line ) self . logger . info ( " | %s" , line ) return output
2,571
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L251-L267
[ "def", "_exchange_refresh_tokens", "(", "self", ")", ":", "if", "self", ".", "token_cache", "is", "not", "None", "and", "'refresh'", "in", "self", ".", "token_cache", ":", "# Attempt to use the refresh token to get a new access token.", "refresh_form", "=", "{", "'grant_type'", ":", "'refresh_token'", ",", "'refresh_token'", ":", "self", ".", "token_cache", "[", "'refresh'", "]", ",", "'client_id'", ":", "self", ".", "client_id", ",", "'client_secret'", ":", "self", ".", "client_secret", ",", "}", "try", ":", "tokens", "=", "self", ".", "_request_tokens_from_token_endpoint", "(", "refresh_form", ")", "tokens", "[", "'refresh'", "]", "=", "self", ".", "token_cache", "[", "'refresh'", "]", "return", "tokens", "except", "OAuth2Exception", ":", "logging", ".", "exception", "(", "'Encountered an exception during refresh token flow.'", ")", "return", "None" ]
Saving output for configured variable name .
def __handle_variable ( self , shell_entry , output ) : if 'variable' in shell_entry : variable_name = shell_entry [ 'variable' ] self . pipeline . variables [ variable_name ] = "\n" . join ( output )
2,572
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L269-L279
[ "def", "_resample", "(", "self", ",", "arrays", ",", "ji_windows", ")", ":", "# get a destination array template", "win_dst", "=", "ji_windows", "[", "self", ".", "dst_res", "]", "aff_dst", "=", "self", ".", "_layer_meta", "[", "self", ".", "_res_indices", "[", "self", ".", "dst_res", "]", "[", "0", "]", "]", "[", "\"transform\"", "]", "arrays_dst", "=", "list", "(", ")", "for", "i", ",", "array", "in", "enumerate", "(", "arrays", ")", ":", "arr_dst", "=", "np", ".", "zeros", "(", "(", "int", "(", "win_dst", ".", "height", ")", ",", "int", "(", "win_dst", ".", "width", ")", ")", ")", "if", "self", ".", "_layer_resolution", "[", "i", "]", ">", "self", ".", "dst_res", ":", "resampling", "=", "getattr", "(", "Resampling", ",", "self", ".", "upsampler", ")", "elif", "self", ".", "_layer_resolution", "[", "i", "]", "<", "self", ".", "dst_res", ":", "resampling", "=", "getattr", "(", "Resampling", ",", "self", ".", "downsampler", ")", "else", ":", "arrays_dst", ".", "append", "(", "array", ".", "copy", "(", ")", ")", "continue", "reproject", "(", "array", ",", "arr_dst", ",", "# arr_dst[0, :, :, i],", "src_transform", "=", "self", ".", "_layer_meta", "[", "i", "]", "[", "\"transform\"", "]", ",", "dst_transform", "=", "aff_dst", ",", "src_crs", "=", "self", ".", "_layer_meta", "[", "0", "]", "[", "\"crs\"", "]", ",", "dst_crs", "=", "self", ".", "_layer_meta", "[", "0", "]", "[", "\"crs\"", "]", ",", "resampling", "=", "resampling", ")", "arrays_dst", ".", "append", "(", "arr_dst", ".", "copy", "(", ")", ")", "arrays_dst", "=", "np", ".", "stack", "(", "arrays_dst", ",", "axis", "=", "2", ")", "# n_images x n x m x 10 would be the synergise format", "return", "arrays_dst" ]
This is the main body of the process that does the work .
def main ( ) : print ( 'AIKIF example: Processing Finance data\n' ) data = read_bank_statements ( 'your_statement.csv' ) print ( data ) maps = load_column_maps ( ) rules = load_rules ( ) for m in maps : print ( 'AIKIF mapping : ' + m [ 0 ] + ' => ' + m [ 1 ] ) for rule in rules : #print(rule) if rule [ 0 ] == 'agg' : print ( 'summing : ' + rule [ 1 ] + ' into ' + rule [ 2 ] ) elif rule [ 0 ] == 'derive' : print ( 'New column : ' + rule [ 1 ] + ' = ' + rule [ 2 ] + ' WHERE ' + rule [ 1 ] + ' ELSE ' + rule [ 3 ] ) print ( 'Done\n' )
2,573
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/finance_example.py#L25-L61
[ "def", "merge_date_ranges", "(", "dates", ")", ":", "if", "not", "dates", ":", "return", "sorted_dates", "=", "sorted", "(", "[", "sorted", "(", "t", ")", "for", "t", "in", "dates", "]", ")", "saved", "=", "list", "(", "sorted_dates", "[", "0", "]", ")", "for", "st", ",", "en", "in", "sorted_dates", ":", "if", "st", "<", "MIN_PERIOD_DATE", "or", "st", ">", "MAX_PERIOD_DATE", ":", "raise", "ValueError", "(", "\"start date %s is out of bounds\"", "%", "str", "(", "st", ")", ")", "if", "en", "<", "MIN_PERIOD_DATE", "or", "en", ">", "MAX_PERIOD_DATE", ":", "raise", "ValueError", "(", "\"end date %s is out of bounds\"", "%", "str", "(", "en", ")", ")", "if", "st", "<=", "saved", "[", "1", "]", ":", "if", "saved", "[", "0", "]", "==", "MIN_PERIOD_DATE", ":", "saved", "[", "0", "]", "=", "st", "if", "MAX_PERIOD_DATE", "in", "(", "en", ",", "saved", "[", "1", "]", ")", ":", "saved", "[", "1", "]", "=", "min", "(", "saved", "[", "1", "]", ",", "en", ")", "else", ":", "saved", "[", "1", "]", "=", "max", "(", "saved", "[", "1", "]", ",", "en", ")", "else", ":", "yield", "tuple", "(", "saved", ")", "saved", "[", "0", "]", "=", "st", "saved", "[", "1", "]", "=", "en", "yield", "tuple", "(", "saved", ")" ]
Removes parameters whose values are set to None .
def _clean_params ( self , params ) : clean_params = { } for key , value in params . iteritems ( ) : if value is not None : clean_params [ key ] = value return clean_params
2,574
https://github.com/HackerEarth/he-sdk-python/blob/ca718afaf70a4239af1adf09ee248a076864b5fe/hackerearth/parameters.py#L48-L56
[ "def", "load_internal_cache", "(", "cls", ",", "pex", ",", "pex_info", ")", ":", "internal_cache", "=", "os", ".", "path", ".", "join", "(", "pex", ",", "pex_info", ".", "internal_cache", ")", "with", "TRACER", ".", "timed", "(", "'Searching dependency cache: %s'", "%", "internal_cache", ",", "V", "=", "2", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "pex", ")", ":", "for", "dist", "in", "find_distributions", "(", "internal_cache", ")", ":", "yield", "dist", "else", ":", "for", "dist", "in", "itertools", ".", "chain", "(", "*", "cls", ".", "write_zipped_internal_cache", "(", "pex", ",", "pex_info", ")", ")", ":", "yield", "dist" ]
Returns the distance between the centroids of two catchments in kilometers .
def distance_to ( self , other_catchment ) : try : if self . country == other_catchment . country : try : return 0.001 * hypot ( self . descriptors . centroid_ngr . x - other_catchment . descriptors . centroid_ngr . x , self . descriptors . centroid_ngr . y - other_catchment . descriptors . centroid_ngr . y ) except TypeError : # In case no centroid available, just return infinity which is helpful in most cases return float ( '+inf' ) else : # If the catchments are in a different country (e.g. `ni` versus `gb`) then set distance to infinity. return float ( '+inf' ) except ( TypeError , KeyError ) : raise InsufficientDataError ( "Catchment `descriptors` attribute must be set first." )
2,575
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/entities.py#L137-L158
[ "def", "add", "(", "name", ",", "keystore", ",", "passphrase", ",", "certificate", ",", "private_key", "=", "None", ")", ":", "ASN1", "=", "OpenSSL", ".", "crypto", ".", "FILETYPE_ASN1", "PEM", "=", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", "certs_list", "=", "[", "]", "if", "os", ".", "path", ".", "isfile", "(", "keystore", ")", ":", "keystore_object", "=", "jks", ".", "KeyStore", ".", "load", "(", "keystore", ",", "passphrase", ")", "for", "alias", ",", "loaded_cert", "in", "keystore_object", ".", "entries", ".", "items", "(", ")", ":", "certs_list", ".", "append", "(", "loaded_cert", ")", "try", ":", "cert_string", "=", "__salt__", "[", "'x509.get_pem_entry'", "]", "(", "certificate", ")", "except", "SaltInvocationError", ":", "raise", "SaltInvocationError", "(", "'Invalid certificate file or string: {0}'", ".", "format", "(", "certificate", ")", ")", "if", "private_key", ":", "# Accept PEM input format, but convert to DES for loading into new keystore", "key_string", "=", "__salt__", "[", "'x509.get_pem_entry'", "]", "(", "private_key", ")", "loaded_cert", "=", "OpenSSL", ".", "crypto", ".", "load_certificate", "(", "PEM", ",", "cert_string", ")", "loaded_key", "=", "OpenSSL", ".", "crypto", ".", "load_privatekey", "(", "PEM", ",", "key_string", ")", "dumped_cert", "=", "OpenSSL", ".", "crypto", ".", "dump_certificate", "(", "ASN1", ",", "loaded_cert", ")", "dumped_key", "=", "OpenSSL", ".", "crypto", ".", "dump_privatekey", "(", "ASN1", ",", "loaded_key", ")", "new_entry", "=", "jks", ".", "PrivateKeyEntry", ".", "new", "(", "name", ",", "[", "dumped_cert", "]", ",", "dumped_key", ",", "'rsa_raw'", ")", "else", ":", "new_entry", "=", "jks", ".", "TrustedCertEntry", ".", "new", "(", "name", ",", "cert_string", ")", "certs_list", ".", "append", "(", "new_entry", ")", "keystore_object", "=", "jks", ".", "KeyStore", ".", "new", "(", "'jks'", ",", "certs_list", ")", "keystore_object", ".", "save", "(", "keystore", ",", "passphrase", ")", "return", "True" ]
Estimate the urbext2000 parameter for a given year assuming a nation - wide urbanisation curve .
def urbext ( self , year ) : # Decimal places increased to ensure year 2000 corresponds with 1 urban_expansion = 0.7851 + 0.2124 * atan ( ( year - 1967.5 ) / 20.331792998 ) try : return self . catchment . descriptors . urbext2000 * urban_expansion except TypeError : # Sometimes urbext2000 is not set, assume zero return 0
2,576
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/entities.py#L273-L291
[ "def", "ack", "(", "self", ",", "msg", ")", ":", "message_id", "=", "msg", "[", "'headers'", "]", "[", "'message-id'", "]", "subscription", "=", "msg", "[", "'headers'", "]", "[", "'subscription'", "]", "transaction_id", "=", "None", "if", "'transaction-id'", "in", "msg", "[", "'headers'", "]", ":", "transaction_id", "=", "msg", "[", "'headers'", "]", "[", "'transaction-id'", "]", "# print \"acknowledging message id <%s>.\" % message_id", "return", "ack", "(", "message_id", ",", "subscription", ",", "transaction_id", ")" ]
Return a list of continuous data periods by removing the data gaps from the overall record .
def continuous_periods ( self ) : result = [ ] # For the first period start_date = self . start_date for gap in self . pot_data_gaps : end_date = gap . start_date - timedelta ( days = 1 ) result . append ( PotPeriod ( start_date , end_date ) ) # For the next period start_date = gap . end_date + timedelta ( days = 1 ) # For the last period end_date = self . end_date result . append ( PotPeriod ( start_date , end_date ) ) return result
2,577
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/entities.py#L401-L418
[ "def", "cublasGetVersion", "(", "handle", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetVersion_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
Adding all files from given path to the object .
def add_path ( self , path , path_filter = None ) : for root , _ , files in os . walk ( path ) : for filename in files : full_path_and_filename = os . path . join ( root , filename ) if path_filter is None or path_filter ( full_path_and_filename ) : relative_path_and_filename = full_path_and_filename . replace ( path + '/' , '' ) with open ( full_path_and_filename , 'rb' ) as handle : self . files [ relative_path_and_filename ] = b64encode ( handle . read ( ) ) . decode ( 'utf-8' )
2,578
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/memfiles.py#L42-L55
[ "def", "supervisor_command", "(", "parser_args", ")", ":", "import", "logging", "from", "synergy", ".", "supervisor", ".", "supervisor_configurator", "import", "SupervisorConfigurator", ",", "set_box_id", "if", "parser_args", ".", "boxid", ":", "set_box_id", "(", "logging", ",", "parser_args", ".", "argument", ")", "return", "sc", "=", "SupervisorConfigurator", "(", ")", "if", "parser_args", ".", "reset", ":", "sc", ".", "reset_db", "(", ")", "elif", "parser_args", ".", "start", ":", "sc", ".", "mark_for_start", "(", "parser_args", ".", "argument", ")", "elif", "parser_args", ".", "stop", ":", "sc", ".", "mark_for_stop", "(", "parser_args", ".", "argument", ")", "elif", "parser_args", ".", "query", ":", "sc", ".", "query", "(", ")" ]
Convert JSON into a in memory file storage .
def from_json ( data ) : memfiles = InMemoryFiles ( ) memfiles . files = json . loads ( data ) return memfiles
2,579
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/memfiles.py#L84-L97
[ "async", "def", "_body_callback", "(", "self", ",", "h11_connection", ")", ":", "# pylint: disable=not-callable", "while", "True", ":", "next_event", "=", "await", "self", ".", "_recv_event", "(", "h11_connection", ")", "if", "isinstance", "(", "next_event", ",", "h11", ".", "Data", ")", ":", "await", "self", ".", "callback", "(", "next_event", ".", "data", ")", "else", ":", "return", "next_event" ]
delete a single file
def delete_file ( f , ignore_errors = False ) : try : os . remove ( f ) except Exception as ex : if ignore_errors : return print ( 'ERROR deleting file ' + str ( ex ) )
2,580
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/file_tools.py#L36-L45
[ "def", "auto_slug", "(", "self", ")", ":", "slug", "=", "self", ".", "name", "if", "slug", "is", "not", "None", ":", "slug", "=", "slugify", "(", "slug", ",", "separator", "=", "self", ".", "SLUG_SEPARATOR", ")", "session", "=", "sa", ".", "orm", ".", "object_session", "(", "self", ")", "if", "not", "session", ":", "return", "None", "query", "=", "session", ".", "query", "(", "Entity", ".", "slug", ")", ".", "filter", "(", "Entity", ".", "_entity_type", "==", "self", ".", "object_type", ")", "if", "self", ".", "id", "is", "not", "None", ":", "query", "=", "query", ".", "filter", "(", "Entity", ".", "id", "!=", "self", ".", "id", ")", "slug_re", "=", "re", ".", "compile", "(", "re", ".", "escape", "(", "slug", ")", "+", "r\"-?(-\\d+)?\"", ")", "results", "=", "[", "int", "(", "m", ".", "group", "(", "1", ")", "or", "0", ")", "# 0: for the unnumbered slug", "for", "m", "in", "(", "slug_re", ".", "match", "(", "s", ".", "slug", ")", "for", "s", "in", "query", ".", "all", "(", ")", "if", "s", ".", "slug", ")", "if", "m", "]", "max_id", "=", "max", "(", "-", "1", ",", "-", "1", ",", "*", "results", ")", "+", "1", "if", "max_id", ":", "slug", "=", "f\"{slug}-{max_id}\"", "return", "slug" ]
delete all files in folder fldr
def delete_files_in_folder ( fldr ) : fl = glob . glob ( fldr + os . sep + '*.*' ) for f in fl : delete_file ( f , True )
2,581
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/file_tools.py#L47-L53
[ "async", "def", "_wait_exponentially", "(", "self", ",", "exception", ",", "max_wait_time", "=", "300", ")", ":", "wait_time", "=", "min", "(", "(", "2", "**", "self", ".", "_connection_attempts", ")", "+", "random", ".", "random", "(", ")", ",", "max_wait_time", ")", "try", ":", "wait_time", "=", "exception", ".", "response", "[", "\"headers\"", "]", "[", "\"Retry-After\"", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "self", ".", "_logger", ".", "debug", "(", "\"Waiting %s seconds before reconnecting.\"", ",", "wait_time", ")", "await", "asyncio", ".", "sleep", "(", "float", "(", "wait_time", ")", ")" ]
copy single file
def copy_file ( src , dest ) : try : shutil . copy2 ( src , dest ) except Exception as ex : print ( 'ERROR copying file' + str ( ex ) )
2,582
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/file_tools.py#L55-L62
[ "def", "cmd_oreoled", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "4", ":", "print", "(", "\"Usage: oreoled LEDNUM RED GREEN BLUE <RATE>\"", ")", "return", "lednum", "=", "int", "(", "args", "[", "0", "]", ")", "pattern", "=", "[", "0", "]", "*", "24", "pattern", "[", "0", "]", "=", "ord", "(", "'R'", ")", "pattern", "[", "1", "]", "=", "ord", "(", "'G'", ")", "pattern", "[", "2", "]", "=", "ord", "(", "'B'", ")", "pattern", "[", "3", "]", "=", "ord", "(", "'0'", ")", "pattern", "[", "4", "]", "=", "0", "pattern", "[", "5", "]", "=", "int", "(", "args", "[", "1", "]", ")", "pattern", "[", "6", "]", "=", "int", "(", "args", "[", "2", "]", ")", "pattern", "[", "7", "]", "=", "int", "(", "args", "[", "3", "]", ")", "self", ".", "master", ".", "mav", ".", "led_control_send", "(", "self", ".", "settings", ".", "target_system", ",", "self", ".", "settings", ".", "target_component", ",", "lednum", ",", "255", ",", "8", ",", "pattern", ")" ]
copies all the files from src to dest folder
def copy_files_to_folder ( src , dest , xtn = '*.txt' ) : try : all_files = glob . glob ( os . path . join ( src , xtn ) ) for f in all_files : copy_file ( f , dest ) except Exception as ex : print ( 'ERROR copy_files_to_folder - ' + str ( ex ) )
2,583
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/file_tools.py#L68-L78
[ "def", "create_alarm_subscription", "(", "self", ",", "on_data", "=", "None", ",", "timeout", "=", "60", ")", ":", "manager", "=", "WebSocketSubscriptionManager", "(", "self", ".", "_client", ",", "resource", "=", "'alarms'", ")", "# Represent subscription as a future", "subscription", "=", "AlarmSubscription", "(", "manager", ")", "wrapped_callback", "=", "functools", ".", "partial", "(", "_wrap_callback_parse_alarm_data", ",", "subscription", ",", "on_data", ")", "manager", ".", "open", "(", "wrapped_callback", ",", "instance", "=", "self", ".", "_instance", ",", "processor", "=", "self", ".", "_processor", ")", "# Wait until a reply or exception is received", "subscription", ".", "reply", "(", "timeout", "=", "timeout", ")", "return", "subscription" ]
script to setup folder structures for AIKIF and prepare data tables .
def main ( ) : print ( '\n\n /------- AIKIF Installation --------\\' ) print ( ' | s. show current setup |' ) print ( ' | f. setup folder structures |' ) print ( ' | c. create sample data |' ) # not yet - wait for beta release print(' w. wipe data and install everything from scratch') print ( ' | q. quit |' ) print ( ' \\-----------------------------------/' ) cmd = input ( '?' ) if cmd == 's' : show_setup ( ) elif cmd == 'f' : setup_folders ( ) elif cmd == 'c' : create_sample_data ( ) #elif cmd == 'w': # wipe_and_rebuild_all() elif cmd == 'q' : exit ( 0 ) main ( )
2,584
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/install_data.py#L8-L31
[ "def", "make_update_loop", "(", "thread", ",", "update_func", ")", ":", "while", "not", "thread", ".", "should_stop", "(", ")", ":", "if", "thread", ".", "should_pause", "(", ")", ":", "thread", ".", "wait_to_resume", "(", ")", "start", "=", "time", ".", "time", "(", ")", "if", "hasattr", "(", "thread", ",", "'_updated'", ")", ":", "thread", ".", "_updated", ".", "clear", "(", ")", "update_func", "(", ")", "if", "hasattr", "(", "thread", ",", "'_updated'", ")", ":", "thread", ".", "_updated", ".", "set", "(", ")", "end", "=", "time", ".", "time", "(", ")", "dt", "=", "thread", ".", "period", "-", "(", "end", "-", "start", ")", "if", "dt", ">", "0", ":", "time", ".", "sleep", "(", "dt", ")" ]
reads an RDF file into a graph
def load_graph_from_rdf ( fname ) : print ( "reading RDF from " + fname + "...." ) store = Graph ( ) store . parse ( fname , format = "n3" ) print ( "Loaded " + str ( len ( store ) ) + " tuples" ) return store
2,585
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L20-L26
[ "def", "get_stats_display_width", "(", "self", ",", "curse_msg", ",", "without_option", "=", "False", ")", ":", "try", ":", "if", "without_option", ":", "# Size without options", "c", "=", "len", "(", "max", "(", "''", ".", "join", "(", "[", "(", "u", "(", "u", "(", "nativestr", "(", "i", "[", "'msg'", "]", ")", ")", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", ")", "if", "not", "i", "[", "'optional'", "]", "else", "\"\"", ")", "for", "i", "in", "curse_msg", "[", "'msgdict'", "]", "]", ")", ".", "split", "(", "'\\n'", ")", ",", "key", "=", "len", ")", ")", "else", ":", "# Size with all options", "c", "=", "len", "(", "max", "(", "''", ".", "join", "(", "[", "u", "(", "u", "(", "nativestr", "(", "i", "[", "'msg'", "]", ")", ")", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", ")", "for", "i", "in", "curse_msg", "[", "'msgdict'", "]", "]", ")", ".", "split", "(", "'\\n'", ")", ",", "key", "=", "len", ")", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "'ERROR: Can not compute plugin width ({})'", ".", "format", "(", "e", ")", ")", "return", "0", "else", ":", "return", "c" ]
display sample data from a graph
def show_graph_summary ( g ) : sample_data = [ ] print ( "list(g[RDFS.Class]) = " + str ( len ( list ( g [ RDFS . Class ] ) ) ) ) # Get Subject Lists num_subj = 0 for subj in g . subjects ( RDF . type ) : num_subj += 1 if num_subj < 5 : sample_data . append ( "subjects.subject: " + get_string_from_rdf ( subj ) ) print ( "g.subjects(RDF.type) = " + str ( num_subj ) ) # Get Sample of Subjects, Predicates, Objects num_subj = 0 for subj , pred , obj in g : num_subj += 1 if num_subj < 5 : sample_data . append ( "g.subject : " + get_string_from_rdf ( pred ) ) sample_data . append ( "g.predicate : " + get_string_from_rdf ( subj ) ) sample_data . append ( "g.object : " + get_string_from_rdf ( obj ) ) print ( "g.obj(RDF.type) = " + str ( num_subj ) ) print ( "------ Sample Data ------" ) for line in sample_data : print ( line )
2,586
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L28-L54
[ "def", "ekpsel", "(", "query", ",", "msglen", ",", "tablen", ",", "collen", ")", ":", "query", "=", "stypes", ".", "stringToCharP", "(", "query", ")", "msglen", "=", "ctypes", ".", "c_int", "(", "msglen", ")", "tablen", "=", "ctypes", ".", "c_int", "(", "tablen", ")", "collen", "=", "ctypes", ".", "c_int", "(", "collen", ")", "n", "=", "ctypes", ".", "c_int", "(", ")", "xbegs", "=", "stypes", ".", "emptyIntVector", "(", "_SPICE_EK_MAXQSEL", ")", "xends", "=", "stypes", ".", "emptyIntVector", "(", "_SPICE_EK_MAXQSEL", ")", "xtypes", "=", "stypes", ".", "emptyIntVector", "(", "_SPICE_EK_MAXQSEL", ")", "xclass", "=", "stypes", ".", "emptyIntVector", "(", "_SPICE_EK_MAXQSEL", ")", "tabs", "=", "stypes", ".", "emptyCharArray", "(", "yLen", "=", "_SPICE_EK_MAXQSEL", ",", "xLen", "=", "tablen", ")", "cols", "=", "stypes", ".", "emptyCharArray", "(", "yLen", "=", "_SPICE_EK_MAXQSEL", ",", "xLen", "=", "collen", ")", "error", "=", "ctypes", ".", "c_int", "(", ")", "errmsg", "=", "stypes", ".", "stringToCharP", "(", "msglen", ")", "libspice", ".", "ekpsel_c", "(", "query", ",", "msglen", ",", "tablen", ",", "collen", ",", "ctypes", ".", "byref", "(", "n", ")", ",", "xbegs", ",", "xends", ",", "xtypes", ",", "xclass", ",", "ctypes", ".", "byref", "(", "tabs", ")", ",", "ctypes", ".", "byref", "(", "cols", ")", ",", "ctypes", ".", "byref", "(", "error", ")", ",", "errmsg", ")", "return", "(", "n", ".", "value", ",", "stypes", ".", "cVectorToPython", "(", "xbegs", ")", "[", ":", "n", ".", "value", "]", ",", "stypes", ".", "cVectorToPython", "(", "xends", ")", "[", ":", "n", ".", "value", "]", ",", "stypes", ".", "cVectorToPython", "(", "xtypes", ")", "[", ":", "n", ".", "value", "]", ",", "stypes", ".", "cVectorToPython", "(", "xclass", ")", "[", ":", "n", ".", "value", "]", ",", "stypes", ".", "cVectorToPython", "(", "tabs", ")", "[", ":", "n", ".", "value", "]", ",", "stypes", ".", "cVectorToPython", "(", "cols", ")", "[", ":", "n", ".", "value", "]", ",", "error", ".", "value", ",", "stypes", ".", "toPythonString", "(", "errmsg", ")", ")" ]
export a graph to CSV for simpler viewing
def export ( g , csv_fname ) : with open ( csv_fname , "w" ) as f : num_tuples = 0 f . write ( '"num","subject","predicate","object"\n' ) for subj , pred , obj in g : num_tuples += 1 f . write ( '"' + str ( num_tuples ) + '",' ) f . write ( '"' + get_string_from_rdf ( subj ) + '",' ) f . write ( '"' + get_string_from_rdf ( pred ) + '",' ) f . write ( '"' + get_string_from_rdf ( obj ) + '"\n' ) print ( "Finished exporting " , num_tuples , " tuples" )
2,587
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L56-L67
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
extracts the real content from an RDF info object
def get_string_from_rdf ( src ) : res = src . split ( "/" ) #[:-1] return "" . join ( [ l . replace ( '"' , '""' ) for l in res [ len ( res ) - 1 ] ] )
2,588
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L69-L72
[ "def", "run", "(", "self", ")", ":", "self", ".", "run_plugins", "(", ")", "while", "True", ":", "# Reload plugins and config if either the config file or plugin", "# directory are modified.", "if", "self", ".", "_config_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_config_file_path", ")", "or", "self", ".", "_plugin_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_plugin_path", ")", ":", "self", ".", "thread_manager", ".", "kill_all_threads", "(", ")", "self", ".", "output_dict", ".", "clear", "(", ")", "self", ".", "reload", "(", ")", "self", ".", "run_plugins", "(", ")", "self", ".", "output_to_bar", "(", "json", ".", "dumps", "(", "self", ".", "_remove_empty_output", "(", ")", ")", ")", "time", ".", "sleep", "(", "self", ".", "config", ".", "general", "[", "'interval'", "]", ")" ]
make a short version of an RDF file
def create_sample_file ( ip , op , num_lines ) : with open ( ip , "rb" ) as f : with open ( op , "wb" ) as fout : for _ in range ( num_lines ) : fout . write ( f . readline ( ) )
2,589
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L75-L80
[ "def", "from_job_desc", "(", "cls", ",", "warm_start_config", ")", ":", "if", "not", "warm_start_config", "or", "WARM_START_TYPE", "not", "in", "warm_start_config", "or", "PARENT_HYPERPARAMETER_TUNING_JOBS", "not", "in", "warm_start_config", ":", "return", "None", "parents", "=", "[", "]", "for", "parent", "in", "warm_start_config", "[", "PARENT_HYPERPARAMETER_TUNING_JOBS", "]", ":", "parents", ".", "append", "(", "parent", "[", "HYPERPARAMETER_TUNING_JOB_NAME", "]", ")", "return", "cls", "(", "warm_start_type", "=", "WarmStartTypes", "(", "warm_start_config", "[", "WARM_START_TYPE", "]", ")", ",", "parents", "=", "parents", ")" ]
Flatten nested sequences into one .
def flatten ( * sequence ) : result = [ ] for entry in sequence : if isinstance ( entry , list ) : result += Select . flatten ( * entry ) elif isinstance ( entry , tuple ) : result += Select . flatten ( * entry ) else : result . append ( entry ) return result
2,590
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/query.py#L42-L52
[ "def", "set_energy_range", "(", "self", ",", "logemin", ",", "logemax", ")", ":", "if", "logemin", "is", "None", ":", "logemin", "=", "self", ".", "log_energies", "[", "0", "]", "if", "logemax", "is", "None", ":", "logemax", "=", "self", ".", "log_energies", "[", "-", "1", "]", "imin", "=", "int", "(", "utils", ".", "val_to_edge", "(", "self", ".", "log_energies", ",", "logemin", ")", "[", "0", "]", ")", "imax", "=", "int", "(", "utils", ".", "val_to_edge", "(", "self", ".", "log_energies", ",", "logemax", ")", "[", "0", "]", ")", "if", "imin", "-", "imax", "==", "0", ":", "imin", "=", "int", "(", "len", "(", "self", ".", "log_energies", ")", "-", "1", ")", "imax", "=", "int", "(", "len", "(", "self", ".", "log_energies", ")", "-", "1", ")", "klims", "=", "self", ".", "like", ".", "logLike", ".", "klims", "(", ")", "if", "imin", "!=", "klims", "[", "0", "]", "or", "imax", "!=", "klims", "[", "1", "]", ":", "self", ".", "like", ".", "selectEbounds", "(", "imin", ",", "imax", ")", "return", "np", ".", "array", "(", "[", "self", ".", "log_energies", "[", "imin", "]", ",", "self", ".", "log_energies", "[", "imax", "]", "]", ")" ]
Do the query .
def build ( self ) : result = [ ] for entry in self . sequence : ignore = False for filter_function in self . filter_functions : if not filter_function ( entry ) : ignore = True break if not ignore : value = entry for transform_function in self . transform_functions : value = transform_function ( value ) result . append ( value ) return result
2,591
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/query.py#L64-L78
[ "def", "create_or_update_secret", "(", "self", ",", "path", ",", "secret", ",", "cas", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'options'", ":", "{", "}", ",", "'data'", ":", "secret", ",", "}", "if", "cas", "is", "not", "None", ":", "params", "[", "'options'", "]", "[", "'cas'", "]", "=", "cas", "api_path", "=", "'/v1/{mount_point}/data/{path}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "path", "=", "path", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]
reads the zip file determines compression and unzips recursively until source files are extracted
def extract_all ( zipfile , dest_folder ) : z = ZipFile ( zipfile ) print ( z ) z . extract ( dest_folder )
2,592
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/zip_tools.py#L11-L19
[ "def", "classes", "(", "request", ")", ":", "if", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", "or", "not", "hasattr", "(", "request", ".", "user", ",", "\"userprofile\"", ")", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'User is not logged in'", ")", ",", "'error_type'", ":", "'user_unauthorized'", "}", ",", "template", "=", "'user_json.html'", ",", "status", "=", "401", ")", "clss", "=", "[", "c", ".", "to_json", "(", ")", "for", "c", "in", "Class", ".", "objects", ".", "filter", "(", "owner", "=", "request", ".", "user", ".", "userprofile", ")", "]", "return", "render_json", "(", "request", ",", "clss", ",", "status", "=", "200", ",", "template", "=", "'user_json.html'", ",", "help_text", "=", "classes", ".", "__doc__", ")" ]
add a file to the archive
def create_zip_from_file ( zip_file , fname ) : with zipfile . ZipFile ( zip_file , 'w' ) as myzip : myzip . write ( fname )
2,593
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/zip_tools.py#L21-L26
[ "def", "load_toml_rest_api_config", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "LOGGER", ".", "info", "(", "\"Skipping rest api loading from non-existent config file: %s\"", ",", "filename", ")", "return", "RestApiConfig", "(", ")", "LOGGER", ".", "info", "(", "\"Loading rest api information from config: %s\"", ",", "filename", ")", "try", ":", "with", "open", "(", "filename", ")", "as", "fd", ":", "raw_config", "=", "fd", ".", "read", "(", ")", "except", "IOError", "as", "e", ":", "raise", "RestApiConfigurationError", "(", "\"Unable to load rest api configuration file: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "toml_config", "=", "toml", ".", "loads", "(", "raw_config", ")", "invalid_keys", "=", "set", "(", "toml_config", ".", "keys", "(", ")", ")", ".", "difference", "(", "[", "'bind'", ",", "'connect'", ",", "'timeout'", ",", "'opentsdb_db'", ",", "'opentsdb_url'", ",", "'opentsdb_username'", ",", "'opentsdb_password'", ",", "'client_max_size'", "]", ")", "if", "invalid_keys", ":", "raise", "RestApiConfigurationError", "(", "\"Invalid keys in rest api config: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "sorted", "(", "list", "(", "invalid_keys", ")", ")", ")", ")", ")", "config", "=", "RestApiConfig", "(", "bind", "=", "toml_config", ".", "get", "(", "\"bind\"", ",", "None", ")", ",", "connect", "=", "toml_config", ".", "get", "(", "'connect'", ",", "None", ")", ",", "timeout", "=", "toml_config", ".", "get", "(", "'timeout'", ",", "None", ")", ",", "opentsdb_url", "=", "toml_config", ".", "get", "(", "'opentsdb_url'", ",", "None", ")", ",", "opentsdb_db", "=", "toml_config", ".", "get", "(", "'opentsdb_db'", ",", "None", ")", ",", "opentsdb_username", "=", "toml_config", ".", "get", "(", "'opentsdb_username'", ",", "None", ")", ",", "opentsdb_password", "=", "toml_config", ".", "get", "(", "'opentsdb_password'", ",", "None", ")", ",", "client_max_size", "=", "toml_config", ".", "get", "(", "'client_max_size'", ",", "None", ")", ")", "return", "config" ]
add all the files from the folder fldr to the archive
def create_zip_from_folder ( zip_file , fldr , mode = "r" ) : #print('zip from folder - adding folder : ', fldr) zipf = zipfile . ZipFile ( zip_file , 'w' ) for root , dirs , files in os . walk ( fldr ) : for file in files : fullname = os . path . join ( root , file ) #print('zip - adding file : ', fullname) zipf . write ( fullname ) zipf . close ( )
2,594
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/zip_tools.py#L38-L52
[ "def", "_connect", "(", "self", ",", "config", ")", ":", "if", "'connection_timeout'", "not", "in", "self", ".", "_config", ":", "self", ".", "_config", "[", "'connection_timeout'", "]", "=", "480", "try", ":", "self", ".", "_cnx", "=", "connect", "(", "*", "*", "config", ")", "self", ".", "_cursor", "=", "self", ".", "_cnx", ".", "cursor", "(", ")", "self", ".", "_printer", "(", "'\\tMySQL DB connection established with db'", ",", "config", "[", "'database'", "]", ")", "except", "Error", "as", "err", ":", "if", "err", ".", "errno", "==", "errorcode", ".", "ER_ACCESS_DENIED_ERROR", ":", "print", "(", "\"Something is wrong with your user name or password\"", ")", "elif", "err", ".", "errno", "==", "errorcode", ".", "ER_BAD_DB_ERROR", ":", "print", "(", "\"Database does not exist\"", ")", "raise", "err" ]
Add a coroutine function
def add_method ( self , loop , callback ) : f , obj = get_method_vars ( callback ) wrkey = ( f , id ( obj ) ) self [ wrkey ] = obj self . event_loop_map [ wrkey ] = loop
2,595
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L229-L240
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
Iterate over stored coroutine functions
def iter_methods ( self ) : for wrkey , obj in self . iter_instances ( ) : f , obj_id = wrkey loop = self . event_loop_map [ wrkey ] m = getattr ( obj , f . __name__ ) yield loop , m
2,596
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L248-L260
[ "def", "_parse_parallel_sentences", "(", "f1", ",", "f2", ")", ":", "def", "_parse_text", "(", "path", ")", ":", "\"\"\"Returns the sentences from a single text file, which may be gzipped.\"\"\"", "split_path", "=", "path", ".", "split", "(", "\".\"", ")", "if", "split_path", "[", "-", "1", "]", "==", "\"gz\"", ":", "lang", "=", "split_path", "[", "-", "2", "]", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "path", ")", "as", "f", ",", "gzip", ".", "GzipFile", "(", "fileobj", "=", "f", ")", "as", "g", ":", "return", "g", ".", "read", "(", ")", ".", "split", "(", "\"\\n\"", ")", ",", "lang", "if", "split_path", "[", "-", "1", "]", "==", "\"txt\"", ":", "# CWMT", "lang", "=", "split_path", "[", "-", "2", "]", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "lang", "=", "\"zh\"", "if", "lang", "in", "(", "\"ch\"", ",", "\"cn\"", ")", "else", "lang", "else", ":", "lang", "=", "split_path", "[", "-", "1", "]", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "path", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", ".", "split", "(", "\"\\n\"", ")", ",", "lang", "def", "_parse_sgm", "(", "path", ")", ":", "\"\"\"Returns sentences from a single SGML file.\"\"\"", "lang", "=", "path", ".", "split", "(", "\".\"", ")", "[", "-", "2", "]", "sentences", "=", "[", "]", "# Note: We can't use the XML parser since some of the files are badly", "# formatted.", "seg_re", "=", "re", ".", "compile", "(", "r\"<seg id=\\\"\\d+\\\">(.*)</seg>\"", ")", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "path", ")", "as", "f", ":", "for", "line", "in", "f", ":", "seg_match", "=", "re", ".", "match", "(", "seg_re", ",", "line", ")", "if", "seg_match", ":", "assert", "len", "(", "seg_match", ".", "groups", "(", ")", ")", "==", "1", "sentences", ".", "append", "(", "seg_match", ".", "groups", "(", ")", "[", "0", "]", ")", "return", "sentences", ",", "lang", "parse_file", "=", "_parse_sgm", "if", "f1", ".", "endswith", "(", "\".sgm\"", ")", "else", "_parse_text", "# Some datasets (e.g., CWMT) contain multiple parallel files specified with", "# a wildcard. We sort both sets to align them and parse them one by one.", "f1_files", "=", "tf", ".", "io", ".", "gfile", ".", "glob", "(", "f1", ")", "f2_files", "=", "tf", ".", "io", ".", "gfile", ".", "glob", "(", "f2", ")", "assert", "f1_files", "and", "f2_files", ",", "\"No matching files found: %s, %s.\"", "%", "(", "f1", ",", "f2", ")", "assert", "len", "(", "f1_files", ")", "==", "len", "(", "f2_files", ")", ",", "(", "\"Number of files do not match: %d vs %d for %s vs %s.\"", "%", "(", "len", "(", "f1_files", ")", ",", "len", "(", "f2_files", ")", ",", "f1", ",", "f2", ")", ")", "for", "f1_i", ",", "f2_i", "in", "zip", "(", "sorted", "(", "f1_files", ")", ",", "sorted", "(", "f2_files", ")", ")", ":", "l1_sentences", ",", "l1", "=", "parse_file", "(", "f1_i", ")", "l2_sentences", ",", "l2", "=", "parse_file", "(", "f2_i", ")", "assert", "len", "(", "l1_sentences", ")", "==", "len", "(", "l2_sentences", ")", ",", "(", "\"Sizes do not match: %d vs %d for %s vs %s.\"", "%", "(", "len", "(", "l1_sentences", ")", ",", "len", "(", "l2_sentences", ")", ",", "f1_i", ",", "f2_i", ")", ")", "for", "s1", ",", "s2", "in", "zip", "(", "l1_sentences", ",", "l2_sentences", ")", ":", "yield", "{", "l1", ":", "s1", ",", "l2", ":", "s2", "}" ]
Schedule and await a coroutine on the specified loop
def submit_coroutine ( self , coro , loop ) : async def _do_call ( _coro ) : with _IterationGuard ( self ) : await _coro asyncio . run_coroutine_threadsafe ( _do_call ( coro ) , loop = loop )
2,597
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L264-L283
[ "def", "per_file_type_data", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "cache_date", "in", "self", ".", "cache_dates", ":", "data", "=", "self", ".", "_cache_get", "(", "cache_date", ")", "if", "len", "(", "data", "[", "'by_file_type'", "]", ")", "==", "0", ":", "data", "[", "'by_file_type'", "]", "=", "{", "'other'", ":", "0", "}", "ret", "[", "cache_date", "]", "=", "data", "[", "'by_file_type'", "]", "return", "ret" ]
launch a file - used for starting html pages
def launch ( self ) : #os.system(self.fullname) # gives permission denied seeing it needs to be chmod +x import subprocess try : retcode = subprocess . call ( self . fullname , shell = True ) if retcode < 0 : print ( "Child was terminated by signal" , - retcode , file = sys . stderr ) return False else : print ( "Child returned" , retcode , file = sys . stderr ) return True except OSError as e : print ( "Execution failed:" , e , file = sys . stderr ) return False
2,598
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L57-L71
[ "def", "setGroups", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "requests", "=", "0", "groups", "=", "[", "]", "try", ":", "for", "gk", "in", "self", "[", "'groupKeys'", "]", ":", "try", ":", "g", "=", "self", ".", "mambugroupclass", "(", "entid", "=", "gk", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambugroup", "import", "MambuGroup", "self", ".", "mambugroupclass", "=", "MambuGroup", "g", "=", "self", ".", "mambugroupclass", "(", "entid", "=", "gk", ",", "*", "args", ",", "*", "*", "kwargs", ")", "requests", "+=", "1", "groups", ".", "append", "(", "g", ")", "except", "KeyError", ":", "pass", "self", "[", "'groups'", "]", "=", "groups", "return", "requests" ]
delete a file don t really care if it doesn t exist
def delete ( self ) : if self . fullname != "" : try : os . remove ( self . fullname ) except IOError : print ( "Cant delete " , self . fullname )
2,599
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L76-L82
[ "def", "_get_api_id", "(", "self", ",", "event_properties", ")", ":", "api_id", "=", "event_properties", ".", "get", "(", "\"RestApiId\"", ")", "if", "isinstance", "(", "api_id", ",", "dict", ")", "and", "\"Ref\"", "in", "api_id", ":", "api_id", "=", "api_id", "[", "\"Ref\"", "]", "return", "api_id" ]