query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Create a blob generator .
def blob_generator ( self ) : # pylint: disable:F0401,W0612 import aa # pylint: disablF0401 # noqa from ROOT import EventFile # pylint: disable F0401 filename = self . filename log . info ( "Reading from file: {0}" . format ( filename ) ) if not os . path . exists ( filename ) : log . warning ( filename + " not available: continue without it" ) try : event_file = EventFile ( filename ) except Exception : raise SystemExit ( "Could not open file" ) log . info ( "Generating blobs through new aanet API..." ) self . print ( "Reading metadata using 'JPrintMeta'" ) meta_parser = MetaParser ( filename = filename ) meta = meta_parser . get_table ( ) if meta is None : self . log . warning ( "No metadata found, this means no data provenance!" ) if self . bare : log . info ( "Skipping data conversion, only passing bare aanet data" ) for event in event_file : yield Blob ( { 'evt' : event , 'event_file' : event_file } ) else : log . info ( "Unpacking aanet header into dictionary..." ) hdr = self . _parse_header ( event_file . header ) if not hdr : log . info ( "Empty header dict found, skipping..." ) self . raw_header = None else : log . info ( "Converting Header dict to Table..." ) self . raw_header = self . _convert_header_dict_to_table ( hdr ) log . info ( "Creating HDF5Header" ) self . header = HDF5Header . from_table ( self . raw_header ) for event in event_file : log . debug ( 'Reading event...' ) blob = self . _read_event ( event , filename ) log . debug ( 'Reading header...' ) blob [ "RawHeader" ] = self . raw_header blob [ "Header" ] = self . header if meta is not None : blob [ 'Meta' ] = meta self . group_id += 1 yield blob del event_file
9,700
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L264-L320
[ "def", "_log10_Inorm_extern_planckint", "(", "self", ",", "Teff", ")", ":", "log10_Inorm", "=", "libphoebe", ".", "wd_planckint", "(", "Teff", ",", "self", ".", "extern_wd_idx", ",", "self", ".", "wd_data", "[", "\"planck_table\"", "]", ")", "return", "log10_Inorm" ]
Parse ASCII output of JPrintMeta
def parse_string ( self , string ) : self . log . info ( "Parsing ASCII data" ) if not string : self . log . warning ( "Empty metadata" ) return lines = string . splitlines ( ) application_data = [ ] application = lines [ 0 ] . split ( ) [ 0 ] self . log . debug ( "Reading meta information for '%s'" % application ) for line in lines : if application is None : self . log . debug ( "Reading meta information for '%s'" % application ) application = line . split ( ) [ 0 ] application_data . append ( line ) if line . startswith ( application + b' Linux' ) : self . _record_app_data ( application_data ) application_data = [ ] application = None
9,701
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L692-L716
[ "def", "reject_record", "(", "self", ",", "record", ")", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "req", "=", "InclusionRequest", ".", "get", "(", "self", ".", "id", ",", "record", ".", "id", ")", "if", "req", "is", "None", ":", "raise", "InclusionRequestMissingError", "(", "community", "=", "self", ",", "record", "=", "record", ")", "req", ".", "delete", "(", ")" ]
Parse raw metadata output for a single application
def _record_app_data ( self , data ) : name , revision = data [ 0 ] . split ( ) root_version = data [ 1 ] . split ( ) [ 1 ] command = b'\n' . join ( data [ 3 : ] ) . split ( b'\n' + name + b' Linux' ) [ 0 ] self . meta . append ( { 'application_name' : np . string_ ( name ) , 'revision' : np . string_ ( revision ) , 'root_version' : np . string_ ( root_version ) , 'command' : np . string_ ( command ) } )
9,702
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L718-L741
[ "def", "_safemembers", "(", "members", ")", ":", "base", "=", "_resolved", "(", "\".\"", ")", "for", "finfo", "in", "members", ":", "if", "_badpath", "(", "finfo", ".", "name", ",", "base", ")", ":", "print", "(", "finfo", ".", "name", ",", "\"is blocked (illegal path)\"", ")", "elif", "finfo", ".", "issym", "(", ")", "and", "_badlink", "(", "finfo", ",", "base", ")", ":", "print", "(", "finfo", ".", "name", ",", "\"is blocked: Hard link to\"", ",", "finfo", ".", "linkname", ")", "elif", "finfo", ".", "islnk", "(", ")", "and", "_badlink", "(", "finfo", ",", "base", ")", ":", "print", "(", "finfo", ".", "name", ",", "\"is blocked: Symlink to\"", ",", "finfo", ".", "linkname", ")", "else", ":", "yield", "finfo" ]
Convert metadata to a KM3Pipe Table .
def get_table ( self , name = 'Meta' , h5loc = '/meta' ) : if not self . meta : return None data = defaultdict ( list ) for entry in self . meta : for key , value in entry . items ( ) : data [ key ] . append ( value ) dtypes = [ ] for key , values in data . items ( ) : max_len = max ( map ( len , values ) ) dtype = 'S{}' . format ( max_len ) dtypes . append ( ( key , dtype ) ) tab = Table ( data , dtype = dtypes , h5loc = h5loc , name = 'Meta' , h5singleton = True ) return tab
9,703
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L743-L767
[ "def", "_grad_sparsity", "(", "self", ")", ":", "# If the sparse minibatch gradient has 10 percent of its entries", "# non-zero, its sparsity is 0.1.", "# The norm of dense gradient averaged from full dataset", "# are roughly estimated norm of minibatch", "# sparse gradient norm * sqrt(sparsity)", "# An extension maybe only correct the sparse blob.", "non_zero_cnt", "=", "tf", ".", "add_n", "(", "[", "tf", ".", "count_nonzero", "(", "g", ")", "for", "g", "in", "self", ".", "_grad", "]", ")", "all_entry_cnt", "=", "tf", ".", "add_n", "(", "[", "tf", ".", "size", "(", "g", ")", "for", "g", "in", "self", ".", "_grad", "]", ")", "self", ".", "_sparsity", "=", "tf", ".", "cast", "(", "non_zero_cnt", ",", "self", ".", "_grad", "[", "0", "]", ".", "dtype", ")", "self", ".", "_sparsity", "/=", "tf", ".", "cast", "(", "all_entry_cnt", ",", "self", ".", "_grad", "[", "0", "]", ".", "dtype", ")", "avg_op", "=", "self", ".", "_moving_averager", ".", "apply", "(", "[", "self", ".", "_sparsity", ",", "]", ")", "with", "tf", ".", "control_dependencies", "(", "[", "avg_op", "]", ")", ":", "self", ".", "_sparsity_avg", "=", "self", ".", "_moving_averager", ".", "average", "(", "self", ".", "_sparsity", ")", "return", "avg_op" ]
Iterate over movies hash stored in the database .
def itermovieshash ( self ) : cur = self . _db . firstkey ( ) while cur is not None : yield cur cur = self . _db . nextkey ( cur )
9,704
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/db.py#L12-L18
[ "async", "def", "_wait_exponentially", "(", "self", ",", "exception", ",", "max_wait_time", "=", "300", ")", ":", "wait_time", "=", "min", "(", "(", "2", "**", "self", ".", "_connection_attempts", ")", "+", "random", ".", "random", "(", ")", ",", "max_wait_time", ")", "try", ":", "wait_time", "=", "exception", ".", "response", "[", "\"headers\"", "]", "[", "\"Retry-After\"", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "self", ".", "_logger", ".", "debug", "(", "\"Waiting %s seconds before reconnecting.\"", ",", "wait_time", ")", "await", "asyncio", ".", "sleep", "(", "float", "(", "wait_time", ")", ")" ]
Handles Datastore response errors according to their documentation .
def _max_retries_for_error ( self , error ) : status = error . get ( "status" ) if status == "ABORTED" and get_transactions ( ) > 0 : # Avoids retrying Conflicts when inside a transaction. return None return self . _MAX_RETRIES . get ( status )
9,705
https://github.com/LeadPages/gcloud_requests/blob/8933363c4e9fa1e5ec0e90d683fca8ef8a949752/gcloud_requests/datastore.py#L56-L73
[ "def", "_get_property_columns", "(", "tabletype", ",", "columns", ")", ":", "from", "ligo", ".", "lw", ".", "lsctables", "import", "gpsproperty", "as", "GpsProperty", "# get properties for row object", "rowvars", "=", "vars", "(", "tabletype", ".", "RowType", ")", "# build list of real column names for fancy properties", "extracols", "=", "{", "}", "for", "key", "in", "columns", ":", "prop", "=", "rowvars", "[", "key", "]", "if", "isinstance", "(", "prop", ",", "GpsProperty", ")", ":", "extracols", "[", "key", "]", "=", "(", "prop", ".", "s_name", ",", "prop", ".", "ns_name", ")", "return", "extracols" ]
Initialize services without authenticating to Globus Auth .
def anonymous_login ( services ) : if isinstance ( services , str ) : services = [ services ] clients = { } # Initialize valid services for serv in services : try : clients [ serv ] = KNOWN_CLIENTS [ serv ] ( http_timeout = STD_TIMEOUT ) except KeyError : # No known client print ( "Error: No known client for '{}' service." . format ( serv ) ) except Exception : # Other issue, probably auth print ( "Error: Unable to create client for '{}' service.\n" "Anonymous access may not be allowed." . format ( serv ) ) return clients
9,706
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L364-L390
[ "def", "delete_annotation", "(", "self", ",", "term_ilx_id", ":", "str", ",", "annotation_type_ilx_id", ":", "str", ",", "annotation_value", ":", "str", ")", "->", "dict", ":", "term_data", "=", "self", ".", "get_entity", "(", "term_ilx_id", ")", "if", "not", "term_data", "[", "'id'", "]", ":", "exit", "(", "'term_ilx_id: '", "+", "term_ilx_id", "+", "' does not exist'", ")", "anno_data", "=", "self", ".", "get_entity", "(", "annotation_type_ilx_id", ")", "if", "not", "anno_data", "[", "'id'", "]", ":", "exit", "(", "'annotation_type_ilx_id: '", "+", "annotation_type_ilx_id", "+", "' does not exist'", ")", "entity_annotations", "=", "self", ".", "get_annotation_via_tid", "(", "term_data", "[", "'id'", "]", ")", "annotation_id", "=", "''", "for", "annotation", "in", "entity_annotations", ":", "if", "str", "(", "annotation", "[", "'tid'", "]", ")", "==", "str", "(", "term_data", "[", "'id'", "]", ")", ":", "if", "str", "(", "annotation", "[", "'annotation_tid'", "]", ")", "==", "str", "(", "anno_data", "[", "'id'", "]", ")", ":", "if", "str", "(", "annotation", "[", "'value'", "]", ")", "==", "str", "(", "annotation_value", ")", ":", "annotation_id", "=", "annotation", "[", "'id'", "]", "break", "if", "not", "annotation_id", ":", "print", "(", "'''WARNING: Annotation you wanted to delete does not exist '''", ")", "return", "None", "url", "=", "self", ".", "base_url", "+", "'term/edit-annotation/{annotation_id}'", ".", "format", "(", "annotation_id", "=", "annotation_id", ")", "data", "=", "{", "'tid'", ":", "' '", ",", "# for delete", "'annotation_tid'", ":", "' '", ",", "# for delete", "'value'", ":", "' '", ",", "# for delete", "'term_version'", ":", "' '", ",", "'annotation_term_version'", ":", "' '", ",", "}", "output", "=", "self", ".", "post", "(", "url", "=", "url", ",", "data", "=", "data", ",", ")", "# check output", "return", "output" ]
Remove ALL tokens in the token directory . This will force re - authentication to all services .
def logout ( token_dir = DEFAULT_CRED_PATH ) : for f in os . listdir ( token_dir ) : if f . endswith ( "tokens.json" ) : try : os . remove ( os . path . join ( token_dir , f ) ) except OSError as e : # Eat ENOENT (no such file/dir, tokens already deleted) only, # raise any other issue (bad permissions, etc.) if e . errno != errno . ENOENT : raise
9,707
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L393-L411
[ "def", "set_burn_in_from_config", "(", "self", ",", "cp", ")", ":", "try", ":", "bit", "=", "self", ".", "burn_in_class", ".", "from_config", "(", "cp", ",", "self", ")", "except", "ConfigParser", ".", "Error", ":", "bit", "=", "None", "self", ".", "set_burn_in", "(", "bit", ")" ]
Format input into GMeta format suitable for ingesting into Globus Search . Formats a dictionary into a GMetaEntry . Formats a list of GMetaEntry into a GMetaList inside a GMetaIngest .
def format_gmeta ( data , acl = None , identifier = None ) : if isinstance ( data , dict ) : if acl is None or identifier is None : raise ValueError ( "acl and identifier are required when formatting a GMetaEntry." ) if isinstance ( acl , str ) : acl = [ acl ] # "Correctly" format ACL entries into URNs prefixed_acl = [ ] for uuid in acl : # If entry is not special value "public" and is not a URN, make URN # It is not known what the type of UUID is, so use both # This solution is known to be hacky if uuid != "public" and not uuid . lower ( ) . startswith ( "urn:" ) : prefixed_acl . append ( "urn:globus:auth:identity:" + uuid . lower ( ) ) prefixed_acl . append ( "urn:globus:groups:id:" + uuid . lower ( ) ) # Otherwise, no modification else : prefixed_acl . append ( uuid ) return { "@datatype" : "GMetaEntry" , "@version" : "2016-11-09" , "subject" : identifier , "visible_to" : prefixed_acl , "content" : data } elif isinstance ( data , list ) : return { "@datatype" : "GIngest" , "@version" : "2016-11-09" , "ingest_type" : "GMetaList" , "ingest_data" : { "@datatype" : "GMetaList" , "@version" : "2016-11-09" , "gmeta" : data } } else : raise TypeError ( "Cannot format '" + str ( type ( data ) ) + "' into GMeta." )
9,708
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L471-L539
[ "def", "on_connection_blocked", "(", "self", ",", "method_frame", ")", ":", "LOGGER", ".", "warning", "(", "'Connection blocked: %s'", ",", "method_frame", ")", "self", ".", "state", "=", "self", ".", "STATE_BLOCKED", "if", "self", ".", "on_unavailable", ":", "self", ".", "on_unavailable", "(", "self", ")" ]
Remove GMeta wrapping from a Globus Search result . This function can be called on the raw GlobusHTTPResponse that Search returns or a string or dictionary representation of it .
def gmeta_pop ( gmeta , info = False ) : if type ( gmeta ) is GlobusHTTPResponse : gmeta = json . loads ( gmeta . text ) elif type ( gmeta ) is str : gmeta = json . loads ( gmeta ) elif type ( gmeta ) is not dict : raise TypeError ( "gmeta must be dict, GlobusHTTPResponse, or JSON string" ) results = [ ] for res in gmeta [ "gmeta" ] : for con in res [ "content" ] : results . append ( con ) if info : fyi = { "total_query_matches" : gmeta . get ( "total" ) } return results , fyi else : return results
9,709
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L542-L574
[ "def", "delete_persistent_data", "(", "role", ",", "zk_node", ")", ":", "if", "role", ":", "destroy_volumes", "(", "role", ")", "unreserve_resources", "(", "role", ")", "if", "zk_node", ":", "delete_zk_node", "(", "zk_node", ")" ]
Translate a known Globus Search index into the index UUID . The UUID is the proper way to access indices and will eventually be the only way . This method will return names it cannot disambiguate .
def translate_index ( index_name ) : uuid = SEARCH_INDEX_UUIDS . get ( index_name . strip ( ) . lower ( ) ) if not uuid : try : index_info = globus_sdk . SearchClient ( ) . get_index ( index_name ) . data if not isinstance ( index_info , dict ) : raise ValueError ( "Multiple UUIDs possible" ) uuid = index_info . get ( "id" , index_name ) except Exception : uuid = index_name return uuid
9,710
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L577-L598
[ "def", "_CompressHistogram", "(", "self", ",", "histo_ev", ")", ":", "return", "CompressedHistogramEvent", "(", "histo_ev", ".", "wall_time", ",", "histo_ev", ".", "step", ",", "compressor", ".", "compress_histogram_proto", "(", "histo_ev", ".", "histogram_value", ",", "self", ".", "_compression_bps", ")", ")" ]
Perform a Globus Transfer and monitor for success .
def quick_transfer ( transfer_client , source_ep , dest_ep , path_list , interval = None , retries = 10 , notify = True ) : if retries is None : retries = 0 iterations = 0 transfer = custom_transfer ( transfer_client , source_ep , dest_ep , path_list , notify = notify ) res = next ( transfer ) try : # Loop ends on StopIteration from generator exhaustion while True : if iterations < retries or retries == - 1 : res = transfer . send ( True ) iterations += 1 else : res = transfer . send ( False ) except StopIteration : pass if res [ "success" ] : error = "No error" else : error = "{}: {}" . format ( res . get ( "fatal_error" , { } ) . get ( "code" , "Error" ) , res . get ( "fatal_error" , { } ) . get ( "description" , "Unknown" ) ) return { "success" : res [ "success" ] , "task_id" : res [ "task_id" ] , "error" : error }
9,711
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L805-L863
[ "def", "_generate_examples_validation", "(", "self", ",", "archive", ",", "labels", ")", ":", "# Get the current random seeds.", "numpy_st0", "=", "np", ".", "random", ".", "get_state", "(", ")", "# Set new random seeds.", "np", ".", "random", ".", "seed", "(", "135", ")", "logging", ".", "warning", "(", "'Overwriting cv2 RNG seed.'", ")", "tfds", ".", "core", ".", "lazy_imports", ".", "cv2", ".", "setRNGSeed", "(", "357", ")", "for", "example", "in", "super", "(", "Imagenet2012Corrupted", ",", "self", ")", ".", "_generate_examples_validation", "(", "archive", ",", "labels", ")", ":", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "tf_img", "=", "tf", ".", "image", ".", "decode_jpeg", "(", "example", "[", "'image'", "]", ".", "read", "(", ")", ",", "channels", "=", "3", ")", "image_np", "=", "tfds", ".", "as_numpy", "(", "tf_img", ")", "example", "[", "'image'", "]", "=", "self", ".", "_get_corrupted_example", "(", "image_np", ")", "yield", "example", "# Reset the seeds back to their original values.", "np", ".", "random", ".", "set_state", "(", "numpy_st0", ")" ]
Compare two items without regard to order .
def insensitive_comparison ( item1 , item2 , type_insensitive = False , string_insensitive = False ) : # If type-sensitive, check types if not type_insensitive and type ( item1 ) != type ( item2 ) : return False # Handle Mapping objects (dict) if isinstance ( item1 , Mapping ) : # Second item must be Mapping if not isinstance ( item2 , Mapping ) : return False # Items must have the same number of elements if not len ( item1 ) == len ( item2 ) : return False # Keys must be the same if not insensitive_comparison ( list ( item1 . keys ( ) ) , list ( item2 . keys ( ) ) , type_insensitive = True ) : return False # Each key's value must be the same # We can just check item1.items because the keys are the same for key , val in item1 . items ( ) : if not insensitive_comparison ( item1 [ key ] , item2 [ key ] , type_insensitive = type_insensitive , string_insensitive = string_insensitive ) : return False # Keys and values are the same return True # Handle strings elif isinstance ( item1 , str ) : # Second item must be string if not isinstance ( item2 , str ) : return False # Items must have the same number of elements (except string_insensitive) if not len ( item1 ) == len ( item2 ) and not string_insensitive : return False # If we're insensitive to case, spaces, and order, compare characters if string_insensitive : # If the string is one character long, skip additional comparison if len ( item1 ) <= 1 : return item1 . lower ( ) == item2 . lower ( ) # Make strings into containers (lists) and discard whitespace item1_list = [ c for c in item1 . lower ( ) if not c . isspace ( ) ] item2_list = [ c for c in item2 . lower ( ) if not c . isspace ( ) ] # The insensitive args shouldn't matter, but they're here just in case return insensitive_comparison ( item1_list , item2_list , type_insensitive = type_insensitive , string_insensitive = string_insensitive ) # Otherwise, case and order matter else : return item1 == item2 # Handle other Iterable Containers elif isinstance ( item1 , Container ) and isinstance ( item1 , Iterable ) : # Second item must be an Iterable Container if not isinstance ( item2 , Container ) or not isinstance ( item2 , Iterable ) : return False # Items must have the same number of elements if not len ( item1 ) == len ( item2 ) : return False # Every element in item1 must be in item2, and vice-versa # Painfully slow, but unavoidable for deep comparison # Each match in item1 removes the corresponding element from item2_copy # If they're the same, item2_copy should be empty at the end, # unless a .remove() failed, in which case we have to re-match using item2 item2_copy = list ( deepcopy ( item2 ) ) remove_failed = False for elem in item1 : matched = False # Try every element for candidate in item2 : # If comparison succeeds, flag a match, remove match from copy, and dump out if insensitive_comparison ( elem , candidate , type_insensitive = type_insensitive , string_insensitive = string_insensitive ) : matched = True try : item2_copy . remove ( candidate ) except ValueError : # list.remove(x): x not in list remove_failed = True break # One failure indicates unequivalence if not matched : return False # If all removes succeeded, we can shortcut checking all item2 elements in item1 if not remove_failed : # If the Containers are equivalent, all elements in item2_copy should be removed # Otherwise return len ( item2_copy ) == 0 # If something failed, we have to verify all of item2 # We can't assume item2 != item1, because removal is comparative else : for elem in item2 : matched = False # Try every element for candidate in item1 : # If comparison succeeds, flag a match, remove match from copy, and dump out if insensitive_comparison ( elem , candidate , type_insensitive = type_insensitive , string_insensitive = string_insensitive ) : matched = True break # One failure indicates unequivalence if not matched : return False # All elements have a match return True # Handle otherwise unhandled type (catchall) else : return item1 == item2
9,712
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L933-L1071
[ "def", "ValidateEndConfig", "(", "self", ",", "config_obj", ",", "errors_fatal", "=", "True", ")", ":", "errors", "=", "super", "(", "WindowsClientRepacker", ",", "self", ")", ".", "ValidateEndConfig", "(", "config_obj", ",", "errors_fatal", "=", "errors_fatal", ")", "install_dir", "=", "config_obj", "[", "\"Client.install_path\"", "]", "for", "path", "in", "config_obj", "[", "\"Client.tempdir_roots\"", "]", ":", "if", "path", ".", "startswith", "(", "\"/\"", ")", ":", "errors", ".", "append", "(", "\"Client.tempdir_root %s starts with /, probably has Unix path.\"", "%", "path", ")", "if", "not", "path", ".", "startswith", "(", "install_dir", ")", ":", "errors", ".", "append", "(", "\"Client.tempdir_root %s is not inside the install_dir %s, this is \"", "\"a security risk\"", "%", "(", "(", "path", ",", "install_dir", ")", ")", ")", "if", "config_obj", ".", "Get", "(", "\"Logging.path\"", ")", ".", "startswith", "(", "\"/\"", ")", ":", "errors", ".", "append", "(", "\"Logging.path starts with /, probably has Unix path. %s\"", "%", "config_obj", "[", "\"Logging.path\"", "]", ")", "if", "\"Windows\\\\\"", "in", "config_obj", ".", "GetRaw", "(", "\"Logging.path\"", ")", ":", "errors", ".", "append", "(", "\"Windows in Logging.path, you probably want \"", "\"%(WINDIR|env) instead\"", ")", "if", "not", "config_obj", "[", "\"Client.binary_name\"", "]", ".", "endswith", "(", "\".exe\"", ")", ":", "errors", ".", "append", "(", "\"Missing .exe extension on binary_name %s\"", "%", "config_obj", "[", "\"Client.binary_name\"", "]", ")", "if", "not", "config_obj", "[", "\"Nanny.binary\"", "]", ".", "endswith", "(", "\".exe\"", ")", ":", "errors", ".", "append", "(", "\"Missing .exe extension on nanny_binary\"", ")", "if", "errors_fatal", "and", "errors", ":", "for", "error", "in", "errors", ":", "logging", ".", "error", "(", "\"Build Config Error: %s\"", ",", "error", ")", "raise", "RuntimeError", "(", "\"Bad configuration generated. Terminating.\"", ")", "else", ":", "return", "errors" ]
Yield screen_name text tuples from a json file .
def parse_json ( json_file , include_date = False ) : if json_file [ - 2 : ] == 'gz' : fh = gzip . open ( json_file , 'rt' ) else : fh = io . open ( json_file , mode = 'rt' , encoding = 'utf8' ) for line in fh : try : jj = json . loads ( line ) if type ( jj ) is not list : jj = [ jj ] for j in jj : if include_date : yield ( j [ 'user' ] [ 'screen_name' ] . lower ( ) , j [ 'text' ] , j [ 'created_at' ] ) else : if 'full_text' in j : # get untruncated text if available. yield ( j [ 'user' ] [ 'screen_name' ] . lower ( ) , j [ 'full_text' ] ) else : yield ( j [ 'user' ] [ 'screen_name' ] . lower ( ) , j [ 'text' ] ) except Exception as e : sys . stderr . write ( 'skipping json error: %s\n' % e )
9,713
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L50-L71
[ "def", "DeleteCampaignFeed", "(", "client", ",", "campaign_feed", ")", ":", "campaign_feed_service", "=", "client", ".", "GetService", "(", "'CampaignFeedService'", ",", "'v201809'", ")", "operation", "=", "{", "'operand'", ":", "campaign_feed", ",", "'operator'", ":", "'REMOVE'", "}", "campaign_feed_service", ".", "mutate", "(", "[", "operation", "]", ")" ]
Yield screen_name string tuples where the string is the concatenation of all tweets of this user .
def extract_tweets ( json_file ) : for screen_name , tweet_iter in groupby ( parse_json ( json_file ) , lambda x : x [ 0 ] ) : tweets = [ t [ 1 ] for t in tweet_iter ] yield screen_name , ' ' . join ( tweets )
9,714
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L74-L79
[ "def", "read_inputs", "(", "self", ")", ":", "oq", "=", "self", ".", "oqparam", "self", ".", "_read_risk_data", "(", ")", "self", ".", "check_overflow", "(", ")", "# check if self.sitecol is too large", "if", "(", "'source_model_logic_tree'", "in", "oq", ".", "inputs", "and", "oq", ".", "hazard_calculation_id", "is", "None", ")", ":", "self", ".", "csm", "=", "readinput", ".", "get_composite_source_model", "(", "oq", ",", "self", ".", "monitor", "(", ")", ",", "srcfilter", "=", "self", ".", "src_filter", ")", "self", ".", "init", "(", ")" ]
Return a matrix where each row corresponds to a Twitter account and each column corresponds to the number of times a term is used by that account .
def vectorize ( json_file , vec , dofit = True ) : ## CountVectorizer, efficiently. screen_names = [ x [ 0 ] for x in extract_tweets ( json_file ) ] if dofit : X = vec . fit_transform ( x [ 1 ] for x in extract_tweets ( json_file ) ) else : X = vec . transform ( x [ 1 ] for x in extract_tweets ( json_file ) ) return screen_names , X
9,715
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L99-L109
[ "def", "cli", "(", "ctx", ",", "project_dir", ")", ":", "exit_code", "=", "SCons", "(", "project_dir", ")", ".", "verify", "(", ")", "ctx", ".", "exit", "(", "exit_code", ")" ]
Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids .
def read_follower_file ( fname , min_followers = 0 , max_followers = 1e10 , blacklist = set ( ) ) : result = { } with open ( fname , 'rt' ) as f : for line in f : parts = line . split ( ) if len ( parts ) > 3 : if parts [ 1 ] . lower ( ) not in blacklist : followers = set ( int ( x ) for x in parts [ 2 : ] ) if len ( followers ) > min_followers and len ( followers ) <= max_followers : result [ parts [ 1 ] . lower ( ) ] = followers else : print ( 'skipping exemplar' , parts [ 1 ] . lower ( ) ) return result
9,716
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L171-L184
[ "def", "delete_expired_requests", "(", ")", ":", "InclusionRequest", ".", "query", ".", "filter_by", "(", "InclusionRequest", ".", "expiry_date", ">", "datetime", ".", "utcnow", "(", ")", ")", ".", "delete", "(", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Return the average Jaccard similarity between a brand s followers and the followers of each exemplar . We merge all exemplar followers into one big pseudo - account .
def jaccard_merge ( brands , exemplars ) : scores = { } exemplar_followers = set ( ) for followers in exemplars . values ( ) : exemplar_followers |= followers for brand , followers in brands : scores [ brand ] = _jaccard ( followers , exemplar_followers ) return scores
9,717
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L235-L246
[ "def", "is_valid_variable_name", "(", "string_to_check", ")", ":", "try", ":", "parse", "(", "'{} = None'", ".", "format", "(", "string_to_check", ")", ")", "return", "True", "except", "(", "SyntaxError", ",", "ValueError", ",", "TypeError", ")", ":", "return", "False" ]
Return the cosine similarity betwee a brand s followers and the exemplars .
def cosine ( brands , exemplars , weighted_avg = False , sqrt = False ) : scores = { } for brand , followers in brands : if weighted_avg : scores [ brand ] = np . average ( [ _cosine ( followers , others ) for others in exemplars . values ( ) ] , weights = [ 1. / len ( others ) for others in exemplars . values ( ) ] ) else : scores [ brand ] = 1. * sum ( _cosine ( followers , others ) for others in exemplars . values ( ) ) / len ( exemplars ) if sqrt : scores = dict ( [ ( b , math . sqrt ( s ) ) for b , s in scores . items ( ) ] ) return scores
9,718
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L319-L332
[ "def", "clean_for_storage", "(", "self", ",", "data", ")", ":", "data", "=", "self", ".", "data_to_unicode", "(", "data", ")", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "for", "k", "in", "dict", "(", "data", ")", ".", "keys", "(", ")", ":", "if", "k", "==", "'_id'", ":", "del", "data", "[", "k", "]", "continue", "if", "'.'", "in", "k", ":", "new_k", "=", "k", ".", "replace", "(", "'.'", ",", "'_'", ")", "data", "[", "new_k", "]", "=", "data", "[", "k", "]", "del", "data", "[", "k", "]", "k", "=", "new_k", "if", "isinstance", "(", "data", "[", "k", "]", ",", "dict", ")", ":", "data", "[", "k", "]", "=", "self", ".", "clean_for_storage", "(", "data", "[", "k", "]", ")", "elif", "isinstance", "(", "data", "[", "k", "]", ",", "list", ")", ":", "data", "[", "k", "]", "=", "[", "self", ".", "clean_for_storage", "(", "item", ")", "for", "item", "in", "data", "[", "k", "]", "]", "return", "data" ]
Generate a filename like Google for a song based on metadata .
def suggest_filename ( metadata ) : if 'title' in metadata and 'track_number' in metadata : # Music Manager. suggested_filename = f"{metadata['track_number']:0>2} {metadata['title']}" elif 'title' in metadata and 'trackNumber' in metadata : # Mobile. suggested_filename = f"{metadata['trackNumber']:0>2} {metadata['title']}" elif 'title' in metadata and 'tracknumber' in metadata : # audio-metadata/mutagen. track_number = _split_number_field ( list_to_single_value ( metadata [ 'tracknumber' ] ) ) title = list_to_single_value ( metadata [ 'title' ] ) suggested_filename = f"{track_number:0>2} {title}" else : suggested_filename = f"00 {list_to_single_value(metadata.get('title', ['']))}" return _replace_invalid_characters ( suggested_filename )
9,719
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/misc.py#L25-L51
[ "def", "bitwise_xor", "(", "bs0", ":", "str", ",", "bs1", ":", "str", ")", "->", "str", ":", "if", "len", "(", "bs0", ")", "!=", "len", "(", "bs1", ")", ":", "raise", "ValueError", "(", "\"Bit strings are not of equal length\"", ")", "n_bits", "=", "len", "(", "bs0", ")", "return", "PADDED_BINARY_BIT_STRING", ".", "format", "(", "xor", "(", "int", "(", "bs0", ",", "2", ")", ",", "int", "(", "bs1", ",", "2", ")", ")", ",", "n_bits", ")" ]
Create directory structure and file name based on metadata template .
def template_to_filepath ( template , metadata , template_patterns = None ) : path = Path ( template ) if template_patterns is None : template_patterns = TEMPLATE_PATTERNS suggested_filename = suggest_filename ( metadata ) if ( path == Path . cwd ( ) or path == Path ( '%suggested%' ) ) : filepath = Path ( suggested_filename ) elif any ( template_pattern in path . parts for template_pattern in template_patterns ) : if template . endswith ( ( '/' , '\\' ) ) : template += suggested_filename path = Path ( template . replace ( '%suggested%' , suggested_filename ) ) parts = [ ] for part in path . parts : if part == path . anchor : parts . append ( part ) else : for key in template_patterns : if ( # pragma: no branch key in part and any ( field in metadata for field in template_patterns [ key ] ) ) : field = more_itertools . first_true ( template_patterns [ key ] , pred = lambda k : k in metadata ) if key . startswith ( ( '%disc' , '%track' ) ) : number = _split_number_field ( str ( list_to_single_value ( metadata [ field ] ) ) ) if key . endswith ( '2%' ) : metadata [ field ] = number . zfill ( 2 ) else : metadata [ field ] = number part = part . replace ( key , list_to_single_value ( metadata [ field ] ) ) parts . append ( _replace_invalid_characters ( part ) ) filepath = Path ( * parts ) elif '%suggested%' in template : filepath = Path ( template . replace ( '%suggested%' , suggested_filename ) ) elif template . endswith ( ( '/' , '\\' ) ) : filepath = path / suggested_filename else : filepath = path return filepath
9,720
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/misc.py#L54-L138
[ "def", "conditions", "(", "self", ",", "start", ",", "last_attempt", ")", ":", "if", "time", ".", "time", "(", ")", "-", "start", ">", "self", ".", "timeout", ":", "yield", "WaitCondition", ".", "Timedout", "return", "if", "last_attempt", "is", "not", "None", "and", "time", ".", "time", "(", ")", "-", "last_attempt", "<", "self", ".", "wait_between_attempts", ":", "yield", "WaitCondition", ".", "KeepWaiting", "return", "if", "self", ".", "greps", "is", "not", "NotSpecified", ":", "for", "name", ",", "val", "in", "self", ".", "greps", ".", "items", "(", ")", ":", "yield", "'grep \"{0}\" \"{1}\"'", ".", "format", "(", "val", ",", "name", ")", "if", "self", ".", "file_value", "is", "not", "NotSpecified", ":", "for", "name", ",", "val", "in", "self", ".", "file_value", ".", "items", "(", ")", ":", "command", "=", "'diff <(echo {0}) <(cat {1})'", ".", "format", "(", "val", ",", "name", ")", "if", "not", "self", ".", "harpoon", ".", "debug", ":", "command", "=", "\"{0} > /dev/null\"", ".", "format", "(", "command", ")", "yield", "command", "if", "self", ".", "port_open", "is", "not", "NotSpecified", ":", "for", "port", "in", "self", ".", "port_open", ":", "yield", "'nc -z 127.0.0.1 {0}'", ".", "format", "(", "port", ")", "if", "self", ".", "curl_result", "is", "not", "NotSpecified", ":", "for", "url", ",", "content", "in", "self", ".", "curl_result", ".", "items", "(", ")", ":", "yield", "'diff <(curl \"{0}\") <(echo {1})'", ".", "format", "(", "url", ",", "content", ")", "if", "self", ".", "file_exists", "is", "not", "NotSpecified", ":", "for", "path", "in", "self", ".", "file_exists", ":", "yield", "'cat {0} > /dev/null'", ".", "format", "(", "path", ")", "if", "self", ".", "command", "not", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ")", ":", "for", "command", "in", "self", ".", "command", ":", "yield", "command" ]
Match an item metadata field value by pattern .
def _match_field ( field_value , pattern , ignore_case = False , normalize_values = False ) : if normalize_values : ignore_case = True normalize = normalize_value if normalize_values else lambda x : str ( x ) search = functools . partial ( re . search , flags = re . I ) if ignore_case else re . search # audio_metadata fields contain a list of values. if isinstance ( field_value , list ) : return any ( search ( pattern , normalize ( value ) ) for value in field_value ) else : return search ( pattern , normalize ( field_value ) )
9,721
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/filter.py#L14-L43
[ "def", "urisToBrowser", "(", "uris", "=", "[", "]", ",", "autoraise", "=", "True", ")", ":", "# Cloning stdout (1) and stderr (2)", "savout1", "=", "os", ".", "dup", "(", "1", ")", "savout2", "=", "os", ".", "dup", "(", "2", ")", "# Closing them", "os", ".", "close", "(", "1", ")", "os", ".", "close", "(", "2", ")", "os", ".", "open", "(", "os", ".", "devnull", ",", "os", ".", "O_RDWR", ")", "try", ":", "for", "uri", "in", "uris", ":", "# Opening the Tor URI using onion.cab proxy", "if", "\".onion\"", "in", "uri", ":", "wb", ".", "open", "(", "uri", ".", "replace", "(", "\".onion\"", ",", "\".onion.city\"", ")", ",", "new", "=", "2", ",", "autoraise", "=", "autoraise", ")", "else", ":", "wb", ".", "open", "(", "uri", ",", "new", "=", "2", ",", "autoraise", "=", "autoraise", ")", "finally", ":", "# Reopening them...", "os", ".", "dup2", "(", "savout1", ",", "1", ")", "os", ".", "dup2", "(", "savout2", ",", "2", ")" ]
Match items by metadata .
def _match_item ( item , any_all = any , ignore_case = False , normalize_values = False , * * kwargs ) : it = get_item_tags ( item ) return any_all ( _match_field ( get_field ( it , field ) , pattern , ignore_case = ignore_case , normalize_values = normalize_values ) for field , patterns in kwargs . items ( ) for pattern in patterns )
9,722
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/filter.py#L46-L73
[ "def", "replace_postgres_db", "(", "self", ",", "file_url", ")", ":", "self", ".", "print_message", "(", "\"Replacing postgres database\"", ")", "if", "file_url", ":", "self", ".", "print_message", "(", "\"Sourcing data from online backup file '%s'\"", "%", "file_url", ")", "source_file", "=", "self", ".", "download_file_from_url", "(", "self", ".", "args", ".", "source_app", ",", "file_url", ")", "elif", "self", ".", "databases", "[", "'source'", "]", "[", "'name'", "]", ":", "self", ".", "print_message", "(", "\"Sourcing data from database '%s'\"", "%", "self", ".", "databases", "[", "'source'", "]", "[", "'name'", "]", ")", "source_file", "=", "self", ".", "dump_database", "(", ")", "else", ":", "self", ".", "print_message", "(", "\"Sourcing data from local backup file %s\"", "%", "self", ".", "args", ".", "file", ")", "source_file", "=", "self", ".", "args", ".", "file", "self", ".", "drop_database", "(", ")", "self", ".", "create_database", "(", ")", "source_file", "=", "self", ".", "unzip_file_if_necessary", "(", "source_file", ")", "self", ".", "print_message", "(", "\"Importing '%s' into database '%s'\"", "%", "(", "source_file", ",", "self", ".", "databases", "[", "'destination'", "]", "[", "'name'", "]", ")", ")", "args", "=", "[", "\"pg_restore\"", ",", "\"--no-acl\"", ",", "\"--no-owner\"", ",", "\"--dbname=%s\"", "%", "self", ".", "databases", "[", "'destination'", "]", "[", "'name'", "]", ",", "source_file", ",", "]", "args", ".", "extend", "(", "self", ".", "databases", "[", "'destination'", "]", "[", "'args'", "]", ")", "subprocess", ".", "check_call", "(", "args", ")" ]
Exclude items by matching metadata .
def exclude_items ( items , any_all = any , ignore_case = False , normalize_values = False , * * kwargs ) : if kwargs : match = functools . partial ( _match_item , any_all = any_all , ignore_case = ignore_case , normalize_values = normalize_values , * * kwargs ) return filterfalse ( match , items ) else : return iter ( items )
9,723
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/filter.py#L76-L108
[ "def", "set_state", "(", "self", ",", "state", ")", ":", "prior_state", "=", "self", ".", "state", "if", "state", "==", "prior_state", ":", "return", "state", "if", "state", "==", "tds_base", ".", "TDS_PENDING", ":", "if", "prior_state", "in", "(", "tds_base", ".", "TDS_READING", ",", "tds_base", ".", "TDS_QUERYING", ")", ":", "self", ".", "state", "=", "tds_base", ".", "TDS_PENDING", "else", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot chage query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "elif", "state", "==", "tds_base", ".", "TDS_READING", ":", "# transition to READING are valid only from PENDING", "if", "self", ".", "state", "!=", "tds_base", ".", "TDS_PENDING", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot change query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "else", ":", "self", ".", "state", "=", "state", "elif", "state", "==", "tds_base", ".", "TDS_IDLE", ":", "if", "prior_state", "==", "tds_base", ".", "TDS_DEAD", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot change query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "self", ".", "state", "=", "state", "elif", "state", "==", "tds_base", ".", "TDS_DEAD", ":", "self", ".", "state", "=", "state", "elif", "state", "==", "tds_base", ".", "TDS_QUERYING", ":", "if", "self", ".", "state", "==", "tds_base", ".", "TDS_DEAD", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot change query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "elif", "self", ".", "state", "!=", "tds_base", ".", "TDS_IDLE", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot change query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "else", ":", "self", ".", "rows_affected", "=", "tds_base", ".", "TDS_NO_COUNT", "self", ".", "internal_sp_called", "=", "0", "self", ".", "state", "=", "state", "else", ":", "assert", "False", "return", "self", ".", "state" ]
Include items by matching metadata .
def include_items ( items , any_all = any , ignore_case = False , normalize_values = False , * * kwargs ) : if kwargs : match = functools . partial ( _match_item , any_all = any_all , ignore_case = ignore_case , normalize_values = normalize_values , * * kwargs ) return filter ( match , items ) else : return iter ( items )
9,724
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/filter.py#L111-L143
[ "def", "set_state", "(", "self", ",", "state", ")", ":", "prior_state", "=", "self", ".", "state", "if", "state", "==", "prior_state", ":", "return", "state", "if", "state", "==", "tds_base", ".", "TDS_PENDING", ":", "if", "prior_state", "in", "(", "tds_base", ".", "TDS_READING", ",", "tds_base", ".", "TDS_QUERYING", ")", ":", "self", ".", "state", "=", "tds_base", ".", "TDS_PENDING", "else", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot chage query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "elif", "state", "==", "tds_base", ".", "TDS_READING", ":", "# transition to READING are valid only from PENDING", "if", "self", ".", "state", "!=", "tds_base", ".", "TDS_PENDING", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot change query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "else", ":", "self", ".", "state", "=", "state", "elif", "state", "==", "tds_base", ".", "TDS_IDLE", ":", "if", "prior_state", "==", "tds_base", ".", "TDS_DEAD", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot change query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "self", ".", "state", "=", "state", "elif", "state", "==", "tds_base", ".", "TDS_DEAD", ":", "self", ".", "state", "=", "state", "elif", "state", "==", "tds_base", ".", "TDS_QUERYING", ":", "if", "self", ".", "state", "==", "tds_base", ".", "TDS_DEAD", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot change query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "elif", "self", ".", "state", "!=", "tds_base", ".", "TDS_IDLE", ":", "raise", "tds_base", ".", "InterfaceError", "(", "'logic error: cannot change query state from {0} to {1}'", ".", "format", "(", "tds_base", ".", "state_names", "[", "prior_state", "]", ",", "tds_base", ".", "state_names", "[", "state", "]", ")", ")", "else", ":", "self", ".", "rows_affected", "=", "tds_base", ".", "TDS_NO_COUNT", "self", ".", "internal_sp_called", "=", "0", "self", ".", "state", "=", "state", "else", ":", "assert", "False", "return", "self", ".", "state" ]
Compute the qth percentile of the data along the specified axis . Simpler version than the numpy version that always flattens input arrays .
def percentile ( a , q ) : if not a : return None if isinstance ( q , ( float , int ) ) : qq = [ q ] elif isinstance ( q , ( tuple , list ) ) : qq = q else : raise ValueError ( "Quantile type {} not understood" . format ( type ( q ) ) ) if isinstance ( a , ( float , int ) ) : a = [ a ] for i in range ( len ( qq ) ) : if qq [ i ] < 0. or qq [ i ] > 100. : raise ValueError ( "Percentiles must be in the range [0,100]" ) qq [ i ] /= 100. a = sorted ( flatten ( a ) ) r = [ ] for q in qq : k = ( len ( a ) - 1 ) * q f = math . floor ( k ) c = math . ceil ( k ) if f == c : r . append ( float ( a [ int ( k ) ] ) ) continue d0 = a [ int ( f ) ] * ( c - k ) d1 = a [ int ( c ) ] * ( k - f ) r . append ( float ( d0 + d1 ) ) if len ( r ) == 1 : return r [ 0 ] return r
9,725
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/statistics/percentile.py#L33-L90
[ "def", "_check", "(", "self", ")", ":", "import", "time", "if", "self", ".", "expires_in", "is", "None", "or", "self", ".", "authenticated", "is", "None", ":", "return", "False", "current", "=", "time", ".", "time", "(", ")", "expire_time", "=", "self", ".", "authenticated", "+", "self", ".", "expires_in", "return", "expire_time", ">", "current" ]
Helper to filter the closest station to a given location .
def _filter_closest ( self , lat , lon , stations ) : current_location = ( lat , lon ) closest = None closest_distance = None for station in stations : station_loc = ( station . latitude , station . longitude ) station_distance = distance . distance ( current_location , station_loc ) . km if not closest or station_distance < closest_distance : closest = station closest_distance = station_distance return closest
9,726
https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/station.py#L20-L34
[ "def", "sample", "(", "config", ",", "samples", ")", ":", "url", "=", "get_api_path", "(", "'sample.json'", ")", "multiple_files", "=", "[", "]", "images", "=", "[", "s", "[", "'image'", "]", "for", "s", "in", "samples", "]", "labels", "=", "[", "s", "[", "'label'", "]", "for", "s", "in", "samples", "]", "for", "image", "in", "images", ":", "multiple_files", ".", "append", "(", "(", "'images'", ",", "(", "image", ",", "open", "(", "image", ",", "'rb'", ")", ",", "'image/png'", ")", ")", ")", "headers", "=", "get_headers", "(", "no_content_type", "=", "True", ")", "headers", "[", "\"config\"", "]", "=", "json", ".", "dumps", "(", "config", ",", "cls", "=", "HCEncoder", ")", "headers", "[", "\"labels\"", "]", "=", "json", ".", "dumps", "(", "labels", ")", "print", "(", "\"With headers\"", ",", "headers", ")", "try", ":", "r", "=", "requests", ".", "post", "(", "url", ",", "files", "=", "multiple_files", ",", "headers", "=", "headers", ",", "timeout", "=", "30", ")", "return", "r", ".", "text", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "0", "]", "print", "(", "\"Error while calling hyperchamber - \"", ",", "e", ")", "return", "None" ]
Retrieve the nearest station .
async def get ( cls , websession , lat , lon ) : self = Station ( websession ) stations = await self . api . stations ( ) self . station = self . _filter_closest ( lat , lon , stations ) logger . info ( "Using %s as weather station" , self . station . local ) return self
9,727
https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/station.py#L37-L48
[ "def", "sample", "(", "config", ",", "samples", ")", ":", "url", "=", "get_api_path", "(", "'sample.json'", ")", "multiple_files", "=", "[", "]", "images", "=", "[", "s", "[", "'image'", "]", "for", "s", "in", "samples", "]", "labels", "=", "[", "s", "[", "'label'", "]", "for", "s", "in", "samples", "]", "for", "image", "in", "images", ":", "multiple_files", ".", "append", "(", "(", "'images'", ",", "(", "image", ",", "open", "(", "image", ",", "'rb'", ")", ",", "'image/png'", ")", ")", ")", "headers", "=", "get_headers", "(", "no_content_type", "=", "True", ")", "headers", "[", "\"config\"", "]", "=", "json", ".", "dumps", "(", "config", ",", "cls", "=", "HCEncoder", ")", "headers", "[", "\"labels\"", "]", "=", "json", ".", "dumps", "(", "labels", ")", "print", "(", "\"With headers\"", ",", "headers", ")", "try", ":", "r", "=", "requests", ".", "post", "(", "url", ",", "files", "=", "multiple_files", ",", "headers", "=", "headers", ",", "timeout", "=", "30", ")", "return", "r", ".", "text", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "0", "]", "print", "(", "\"Error while calling hyperchamber - \"", ",", "e", ")", "return", "None" ]
Loads the channels and tools given the plugin path specified
def load_channels ( self ) : channels = [ ] # Try to get channels for channel_name in self . channel_names : channel_path = os . path . join ( self . path , "channels" ) sys . path . append ( self . path ) mod = imp . load_module ( channel_name , * imp . find_module ( channel_name , [ channel_path ] ) ) cls = getattr ( mod , channel_name . title ( ) . replace ( "_" , "" ) ) channel_id = channel_name . split ( "_" ) [ 0 ] # TODO: what about up_to_timestamp? try : channels . append ( cls ( channel_id , up_to_timestamp = None ) ) except TypeError : channels . append ( cls ( channel_id ) ) # Try to get tools if self . has_tools : tool_path = os . path . join ( self . path , "tools" ) # Create a tool channel using this path channel_id = self . channel_id_prefix + "_" + "tools" channel = ToolChannel ( channel_id , tool_path , up_to_timestamp = utcnow ( ) ) channels . append ( channel ) if self . has_assets : assset_path = os . path . join ( os . path . abspath ( self . path ) , "assets" ) channel_id = self . channel_id_prefix + "_" + "assets" channel = AssetsFileChannel ( channel_id , assset_path , up_to_timestamp = utcnow ( ) ) channels . append ( channel ) # # from . import TimeInterval # channel.streams.values()[0].window(TimeInterval.up_to_now()).items() return channels
9,728
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/plugin_manager.py#L38-L76
[ "def", "mark_as_forwarded", "(", "self", ",", "req", ":", "Request", ",", "to", ":", "int", ")", ":", "self", "[", "req", ".", "key", "]", ".", "forwarded", "=", "True", "self", "[", "req", ".", "key", "]", ".", "forwardedTo", "=", "to", "self", "[", "req", ".", "key", "]", ".", "unordered_by_replicas_num", "=", "to" ]
return the number of byte - chunks in a swatch object
def chunk_count ( swatch ) : if type ( swatch ) is dict : if 'data' in swatch : return 1 if 'swatches' in swatch : return 2 + len ( swatch [ 'swatches' ] ) else : return sum ( map ( chunk_count , swatch ) )
9,729
https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/writer.py#L18-L30
[ "def", "trace_job", "(", "self", ",", "jobId", ")", ":", "header", "=", "self", ".", "__check_authentication", "(", ")", "status_url", "=", "self", ".", "address", "+", "\"/jobs/\"", "+", "jobId", "+", "\"/trace\"", "status_resp", "=", "requests", ".", "get", "(", "status_url", ",", "headers", "=", "header", ")", "if", "status_resp", ".", "status_code", "!=", "200", ":", "raise", "ValueError", "(", "\"Code {}. {}\"", ".", "format", "(", "status_resp", ".", "status_code", ",", "status_resp", ".", "json", "(", ")", ".", "get", "(", "\"error\"", ")", ")", ")", "return", "status_resp", ".", "json", "(", ")" ]
builds up a byte - chunk for a color
def chunk_for_color ( obj ) : title = obj [ 'name' ] + '\0' title_length = len ( title ) chunk = struct . pack ( '>H' , title_length ) chunk += title . encode ( 'utf-16be' ) mode = obj [ 'data' ] [ 'mode' ] . encode ( ) values = obj [ 'data' ] [ 'values' ] color_type = obj [ 'type' ] fmt = { b'RGB' : '!fff' , b'Gray' : '!f' , b'CMYK' : '!ffff' , b'LAB' : '!fff' } if mode in fmt : padded_mode = mode . decode ( ) . ljust ( 4 ) . encode ( ) chunk += struct . pack ( '!4s' , padded_mode ) # the color mode chunk += struct . pack ( fmt [ mode ] , * values ) # the color values color_types = [ 'Global' , 'Spot' , 'Process' ] if color_type in color_types : color_int = color_types . index ( color_type ) chunk += struct . pack ( '>h' , color_int ) # append swatch mode chunk = struct . pack ( '>I' , len ( chunk ) ) + chunk # prepend the chunk size return b'\x00\x01' + chunk
9,730
https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/writer.py#L39-L83
[ "def", "load", "(", "filename", ")", ":", "filename", "=", "os", ".", "path", ".", "normcase", "(", "filename", ")", "try", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "u", "=", "pickle", ".", "Unpickler", "(", "f", ")", "return", "u", ".", "load", "(", ")", "except", "IOError", ":", "raise", "LogOpeningError", "(", "'No file found for %s'", "%", "filename", ",", "filename", ")" ]
produce a byte - chunk for a folder of colors
def chunk_for_folder ( obj ) : title = obj [ 'name' ] + '\0' title_length = len ( title ) chunk_body = struct . pack ( '>H' , title_length ) # title length chunk_body += title . encode ( 'utf-16be' ) # title chunk_head = b'\xC0\x01' # folder header chunk_head += struct . pack ( '>I' , len ( chunk_body ) ) # precede entire chunk by folder header and size of folder chunk = chunk_head + chunk_body chunk += b'' . join ( [ chunk_for_color ( c ) for c in obj [ 'swatches' ] ] ) chunk += b'\xC0\x02' # folder terminator chunk chunk += b'\x00\x00\x00\x00' # folder terminator return chunk
9,731
https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/writer.py#L85-L121
[ "def", "status", "(", "self", ",", "remote", "=", "False", ")", ":", "if", "remote", ":", "components", "=", "urlparse", ".", "urlparse", "(", "self", ".", "endpoint", ")", "try", ":", "result", "=", "self", ".", "session", ".", "get", "(", "components", "[", "0", "]", "+", "\"://\"", "+", "components", "[", "1", "]", "+", "\"/status\"", ",", "timeout", "=", "self", ".", "timeout", ")", "except", "Exception", "as", "e", ":", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "debug", "(", "\"Failed to connect to server for status: %s\"", ",", "e", ")", "result", "=", "None", "if", "result", "and", "result", ".", "status_code", "==", "200", ":", "self", ".", "server_status", "=", "result", ".", "json", "(", ")", "self", ".", "server_status", "[", "\"endpoint\"", "]", "=", "self", ".", "endpoint", "elif", "result", ":", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "debug", "(", "\"Server status response not understandable: Status: %d, Body: %s\"", ",", "result", ".", "status_code", ",", "result", ".", "text", ")", "self", ".", "server_status", "=", "{", "\"endpoint\"", ":", "self", ".", "endpoint", ",", "\"status\"", ":", "(", "\"Unexpected HTTP status \"", "+", "str", "(", "result", ".", "status_code", ")", "+", "\" at: \"", "+", "strftime", "(", "\"%d %b %Y %H:%M:%S +0000\"", ",", "gmtime", "(", ")", ")", ")", "}", "else", ":", "self", ".", "server_status", "=", "{", "\"endpoint\"", ":", "self", ".", "endpoint", ",", "\"status\"", ":", "\"Unreachable at: \"", "+", "strftime", "(", "\"%d %b %Y %H:%M:%S +0000\"", ",", "gmtime", "(", ")", ")", "}", "return", "self", ".", "local_status", ",", "self", ".", "server_status" ]
Iterate over screen names in a file one per line .
def iter_lines ( filename ) : with open ( filename , 'rt' ) as idfile : for line in idfile : screen_name = line . strip ( ) if len ( screen_name ) > 0 : yield screen_name . split ( ) [ 0 ]
9,732
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L47-L53
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Fetch up to limit tweets for each account in account_file and write to outfile .
def fetch_tweets ( account_file , outfile , limit ) : print ( 'fetching tweets for accounts in' , account_file ) outf = io . open ( outfile , 'wt' ) for screen_name in iter_lines ( account_file ) : print ( '\nFetching tweets for %s' % screen_name ) for tweet in twutil . collect . tweets_for_user ( screen_name , limit ) : tweet [ 'user' ] [ 'screen_name' ] = screen_name outf . write ( '%s\n' % json . dumps ( tweet , ensure_ascii = False ) ) outf . flush ( )
9,733
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L85-L95
[ "def", "verify_security_data", "(", "security", ")", ":", "random_token", "=", "security", "[", "TOKEN_RANDOM", "]", "hashed_token", "=", "security", "[", "TOKEN_HASHED", "]", "return", "str", "(", "hashed_token", ")", "==", "str", "(", "compute_token", "(", "random_token", ")", ")" ]
Fetch top lists matching this keyword then return Twitter screen names along with the number of different lists on which each appers ..
def fetch_exemplars ( keyword , outfile , n = 50 ) : list_urls = fetch_lists ( keyword , n ) print ( 'found %d lists for %s' % ( len ( list_urls ) , keyword ) ) counts = Counter ( ) for list_url in list_urls : counts . update ( fetch_list_members ( list_url ) ) # Write to file. outf = io . open ( outfile , 'wt' ) for handle in sorted ( counts ) : outf . write ( '%s\t%d\n' % ( handle , counts [ handle ] ) ) outf . close ( ) print ( 'saved exemplars to' , outfile )
9,734
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L171-L184
[ "def", "_check_rr_name", "(", "self", ",", "rr_name", ")", ":", "# type: (Optional[str]) -> bytes", "if", "self", ".", "rock_ridge", ":", "if", "not", "rr_name", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'A rock ridge name must be passed for a rock-ridge ISO'", ")", "if", "rr_name", ".", "count", "(", "'/'", ")", "!=", "0", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'A rock ridge name must be relative'", ")", "return", "rr_name", ".", "encode", "(", "'utf-8'", ")", "if", "rr_name", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'A rock ridge name can only be specified for a rock-ridge ISO'", ")", "return", "b''" ]
Set up the controlhost connection
def _init_controlhost ( self ) : log . debug ( "Connecting to JLigier" ) self . client = Client ( self . host , self . port ) self . client . _connect ( ) log . debug ( "Subscribing to tags: {0}" . format ( self . tags ) ) for tag in self . tags . split ( ',' ) : self . client . subscribe ( tag . strip ( ) , mode = self . subscription_mode ) log . debug ( "Controlhost initialisation done." )
9,735
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/ch.py#L88-L96
[ "def", "stats", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "cbook", "import", "percentage", "p", "=", "OptionParser", "(", "stats", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "blocksfile", ",", "=", "args", "fp", "=", "open", "(", "blocksfile", ")", "counts", "=", "defaultdict", "(", "int", ")", "total", "=", "orthologous", "=", "0", "for", "row", "in", "fp", ":", "atoms", "=", "row", ".", "rstrip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "hits", "=", "[", "x", "for", "x", "in", "atoms", "[", "1", ":", "]", "if", "x", "!=", "'.'", "]", "counts", "[", "len", "(", "hits", ")", "]", "+=", "1", "total", "+=", "1", "if", "atoms", "[", "1", "]", "!=", "'.'", ":", "orthologous", "+=", "1", "print", "(", "\"Total lines: {0}\"", ".", "format", "(", "total", ")", ",", "file", "=", "sys", ".", "stderr", ")", "for", "i", ",", "n", "in", "sorted", "(", "counts", ".", "items", "(", ")", ")", ":", "print", "(", "\"Count {0}: {1}\"", ".", "format", "(", "i", ",", "percentage", "(", "n", ",", "total", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "file", "=", "sys", ".", "stderr", ")", "matches", "=", "sum", "(", "n", "for", "i", ",", "n", "in", "counts", ".", "items", "(", ")", "if", "i", "!=", "0", ")", "print", "(", "\"Total lines with matches: {0}\"", ".", "format", "(", "percentage", "(", "matches", ",", "total", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "for", "i", ",", "n", "in", "sorted", "(", "counts", ".", "items", "(", ")", ")", ":", "if", "i", "==", "0", ":", "continue", "print", "(", "\"Count {0}: {1}\"", ".", "format", "(", "i", ",", "percentage", "(", "n", ",", "matches", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"Orthologous matches: {0}\"", ".", "format", "(", "percentage", "(", "orthologous", ",", "matches", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
Wait for the next packet and put it in the blob
def process ( self , blob ) : # self._add_process_dt() try : log . debug ( "Waiting for queue items." ) prefix , data = self . queue . get ( timeout = self . timeout ) log . debug ( "Got {0} bytes from queue." . format ( len ( data ) ) ) except Empty : log . warning ( "ControlHost timeout ({0}s) reached" . format ( self . timeout ) ) raise StopIteration ( "ControlHost timeout reached." ) blob [ self . key_for_prefix ] = prefix blob [ self . key_for_data ] = data return blob
9,736
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/ch.py#L140-L154
[ "def", "update_security_of_password", "(", "self", ",", "ID", ",", "data", ")", ":", "# http://teampasswordmanager.com/docs/api-passwords/#update_security_password", "log", ".", "info", "(", "'Update security of password %s with %s'", "%", "(", "ID", ",", "data", ")", ")", "self", ".", "put", "(", "'passwords/%s/security.json'", "%", "ID", ",", "data", ")" ]
Clean up the JLigier controlhost connection
def finish ( self ) : log . debug ( "Disconnecting from JLigier." ) self . client . socket . shutdown ( socket . SHUT_RDWR ) self . client . _disconnect ( )
9,737
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/ch.py#L176-L180
[ "def", "thaw", "(", "vault_client", ",", "src_file", ",", "opt", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "src_file", ")", ":", "raise", "aomi", ".", "exceptions", ".", "AomiFile", "(", "\"%s does not exist\"", "%", "src_file", ")", "tmp_dir", "=", "ensure_tmpdir", "(", ")", "zip_file", "=", "thaw_decrypt", "(", "vault_client", ",", "src_file", ",", "tmp_dir", ",", "opt", ")", "archive", "=", "zipfile", ".", "ZipFile", "(", "zip_file", ",", "'r'", ")", "for", "archive_file", "in", "archive", ".", "namelist", "(", ")", ":", "archive", ".", "extract", "(", "archive_file", ",", "tmp_dir", ")", "os", ".", "chmod", "(", "\"%s/%s\"", "%", "(", "tmp_dir", ",", "archive_file", ")", ",", "0o640", ")", "LOG", ".", "debug", "(", "\"Extracted %s from archive\"", ",", "archive_file", ")", "LOG", ".", "info", "(", "\"Thawing secrets into %s\"", ",", "opt", ".", "secrets", ")", "config", "=", "get_secretfile", "(", "opt", ")", "Context", ".", "load", "(", "config", ",", "opt", ")", ".", "thaw", "(", "tmp_dir", ")" ]
List available variables and applies any filters .
def list_variables ( self ) : station_codes = self . _get_station_codes ( ) station_codes = self . _apply_features_filter ( station_codes ) variables = self . _list_variables ( station_codes ) if hasattr ( self , "_variables" ) and self . variables is not None : variables . intersection_update ( set ( self . variables ) ) return list ( variables )
9,738
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L50-L61
[ "def", "ekappr", "(", "handle", ",", "segno", ")", ":", "handle", "=", "ctypes", ".", "c_int", "(", "handle", ")", "segno", "=", "ctypes", ".", "c_int", "(", "segno", ")", "recno", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "ekappr_c", "(", "handle", ",", "segno", ",", "ctypes", ".", "byref", "(", "recno", ")", ")", "return", "recno", ".", "value" ]
Internal helper to list the variables for the given station codes .
def _list_variables ( self , station_codes ) : # sample output from obs retrieval: # # DD9452D0 # HP(SRBM5) # 2013-07-22 19:30 45.97 # HT(SRBM5) # 2013-07-22 19:30 44.29 # PC(SRBM5) # 2013-07-22 19:30 36.19 # rvar = re . compile ( r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)" ) variables = set ( ) resp = requests . post ( self . obs_retrieval_url , data = { "state" : "nil" , "hsa" : "nil" , "of" : "3" , "extraids" : " " . join ( station_codes ) , "sinceday" : - 1 , } , ) resp . raise_for_status ( ) list ( map ( variables . add , rvar . findall ( resp . text ) ) ) return variables
9,739
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L63-L93
[ "def", "guess_extension", "(", "amimetype", ",", "normalize", "=", "False", ")", ":", "ext", "=", "_mimes", ".", "guess_extension", "(", "amimetype", ")", "if", "ext", "and", "normalize", ":", "# Normalize some common magic mis-interpreation", "ext", "=", "{", "'.asc'", ":", "'.txt'", ",", "'.obj'", ":", "'.bin'", "}", ".", "get", "(", "ext", ",", "ext", ")", "from", "invenio", ".", "legacy", ".", "bibdocfile", ".", "api_normalizer", "import", "normalize_format", "return", "normalize_format", "(", "ext", ")", "return", "ext" ]
If the features filter is set this will return the intersection of those filter items and the given station codes .
def _apply_features_filter ( self , station_codes ) : # apply features filter if hasattr ( self , "features" ) and self . features is not None : station_codes = set ( station_codes ) station_codes = list ( station_codes . intersection ( set ( self . features ) ) ) return station_codes
9,740
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L124-L136
[ "def", "to_glyphs_blue_values", "(", "self", ",", "ufo", ",", "master", ")", ":", "zones", "=", "[", "]", "blue_values", "=", "_pairs", "(", "ufo", ".", "info", ".", "postscriptBlueValues", ")", "other_blues", "=", "_pairs", "(", "ufo", ".", "info", ".", "postscriptOtherBlues", ")", "for", "y1", ",", "y2", "in", "blue_values", ":", "size", "=", "y2", "-", "y1", "if", "y2", "==", "0", ":", "pos", "=", "0", "size", "=", "-", "size", "else", ":", "pos", "=", "y1", "zones", ".", "append", "(", "self", ".", "glyphs_module", ".", "GSAlignmentZone", "(", "pos", ",", "size", ")", ")", "for", "y1", ",", "y2", "in", "other_blues", ":", "size", "=", "y1", "-", "y2", "pos", "=", "y2", "zones", ".", "append", "(", "self", ".", "glyphs_module", ".", "GSAlignmentZone", "(", "pos", ",", "size", ")", ")", "master", ".", "alignmentZones", "=", "sorted", "(", "zones", ",", "key", "=", "lambda", "zone", ":", "-", "zone", ".", "position", ")" ]
Gets and caches a list of station codes optionally within a bbox .
def _get_station_codes ( self , force = False ) : if not force and self . station_codes is not None : return self . station_codes state_urls = self . _get_state_urls ( ) # filter by bounding box against a shapefile state_matches = None if self . bbox : with collection ( os . path . join ( "resources" , "ne_50m_admin_1_states_provinces_lakes_shp.shp" , ) , "r" , ) as c : geom_matches = [ x [ "properties" ] for x in c . filter ( bbox = self . bbox ) ] state_matches = [ x [ "postal" ] if x [ "admin" ] != "Canada" else "CN" for x in geom_matches ] self . station_codes = [ ] for state_url in state_urls : if state_matches is not None : state_abbr = state_url . split ( "/" ) [ - 1 ] . split ( "." ) [ 0 ] if state_abbr not in state_matches : continue self . station_codes . extend ( self . _get_stations_for_state ( state_url ) ) if self . bbox : # retrieve metadata for all stations to properly filter them metadata = self . _get_metadata ( self . station_codes ) parsed_metadata = self . parser . _parse_metadata ( metadata ) def in_bbox ( code ) : lat = parsed_metadata [ code ] [ "latitude" ] lon = parsed_metadata [ code ] [ "longitude" ] return ( lon >= self . bbox [ 0 ] and lon <= self . bbox [ 2 ] and lat >= self . bbox [ 1 ] and lat <= self . bbox [ 3 ] ) self . station_codes = list ( filter ( in_bbox , self . station_codes ) ) return self . station_codes
9,741
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L158-L216
[ "def", "scanning", "(", "reducer", ",", "init", "=", "UNSET", ")", ":", "reducer2", "=", "reducer", "def", "scanning_transducer", "(", "reducer", ")", ":", "return", "Scanning", "(", "reducer", ",", "reducer2", ",", "init", ")", "return", "scanning_transducer" ]
Wrapper that handles the actual asynchronous monitoring of the task state .
def _monitor_task ( self ) : if self . task . state in states . UNREADY_STATES : reactor . callLater ( self . POLL_PERIOD , self . _monitor_task ) return if self . task . state == 'SUCCESS' : self . callback ( self . task . result ) elif self . task . state == 'FAILURE' : self . errback ( Failure ( self . task . result ) ) elif self . task . state == 'REVOKED' : self . errback ( Failure ( defer . CancelledError ( 'Task {0}' . format ( self . task . id ) ) ) ) else : self . errback ( ValueError ( 'Cannot respond to `{}` state' . format ( self . task . state ) ) )
9,742
https://github.com/SentimensRG/txCelery/blob/15b9705198009f5ce6db1bfd0a8af9b8949d6277/txcelery/defer.py#L52-L71
[ "def", "isObjectClassified", "(", "self", ",", "objectName", ",", "minOverlap", "=", "None", ",", "maxL2Size", "=", "None", ")", ":", "L2Representation", "=", "self", ".", "getL2Representations", "(", ")", "objectRepresentation", "=", "self", ".", "objectL2Representations", "[", "objectName", "]", "sdrSize", "=", "self", ".", "config", "[", "\"L2Params\"", "]", "[", "\"sdrSize\"", "]", "if", "minOverlap", "is", "None", ":", "minOverlap", "=", "sdrSize", "/", "2", "if", "maxL2Size", "is", "None", ":", "maxL2Size", "=", "1.5", "*", "sdrSize", "numCorrectClassifications", "=", "0", "for", "col", "in", "xrange", "(", "self", ".", "numColumns", ")", ":", "overlapWithObject", "=", "len", "(", "objectRepresentation", "[", "col", "]", "&", "L2Representation", "[", "col", "]", ")", "if", "(", "overlapWithObject", ">=", "minOverlap", "and", "len", "(", "L2Representation", "[", "col", "]", ")", "<=", "maxL2Size", ")", ":", "numCorrectClassifications", "+=", "1", "return", "numCorrectClassifications", "==", "self", ".", "numColumns" ]
Clean up a query string for searching .
def _clean_query_string ( q ) : q = q . replace ( "()" , "" ) . strip ( ) if q . endswith ( "(" ) : q = q [ : - 1 ] . strip ( ) # Remove misplaced AND/OR/NOT at end if q [ - 3 : ] == "AND" or q [ - 3 : ] == "NOT" : q = q [ : - 3 ] elif q [ - 2 : ] == "OR" : q = q [ : - 2 ] # Balance parentheses while q . count ( "(" ) > q . count ( ")" ) : q += ")" while q . count ( ")" ) > q . count ( "(" ) : q = "(" + q return q . strip ( )
9,743
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L40-L66
[ "def", "_intermediary_to_markdown", "(", "tables", ",", "relationships", ")", ":", "t", "=", "'\\n'", ".", "join", "(", "t", ".", "to_markdown", "(", ")", "for", "t", "in", "tables", ")", "r", "=", "'\\n'", ".", "join", "(", "r", ".", "to_markdown", "(", ")", "for", "r", "in", "relationships", ")", "return", "'{}\\n{}'", ".", "format", "(", "t", ",", "r", ")" ]
Validate and clean up a query to be sent to Search . Cleans the query string removes unneeded parameters and validates for correctness . Does not modify the original argument . Raises an Exception on invalid input .
def _validate_query ( query ) : query = deepcopy ( query ) # q is always required if query [ "q" ] == BLANK_QUERY [ "q" ] : raise ValueError ( "No query specified." ) query [ "q" ] = _clean_query_string ( query [ "q" ] ) # limit should be set to appropriate default if not specified if query [ "limit" ] is None : query [ "limit" ] = SEARCH_LIMIT if query [ "advanced" ] else NONADVANCED_LIMIT # If specified, the limit should not be greater than the Search maximum elif query [ "limit" ] > SEARCH_LIMIT : warnings . warn ( 'Reduced result limit from {} to the Search maximum: {}' . format ( query [ "limit" ] , SEARCH_LIMIT ) , RuntimeWarning ) query [ "limit" ] = SEARCH_LIMIT # Remove all blank/default values for key , val in BLANK_QUERY . items ( ) : # Default for get is NaN so comparison is always False if query . get ( key , float ( 'nan' ) ) == val : query . pop ( key ) # Remove unsupported fields to_remove = [ field for field in query . keys ( ) if field not in BLANK_QUERY . keys ( ) ] [ query . pop ( field ) for field in to_remove ] return query
9,744
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L69-L107
[ "def", "wait", "(", "self", ")", ":", "self", ".", "_done_event", ".", "wait", "(", "MAXINT", ")", "return", "self", ".", "_status", ",", "self", ".", "_exception" ]
Add a term to the query .
def _term ( self , term ) : # All terms must be strings for Elasticsearch term = str ( term ) if term : self . __query [ "q" ] += term return self
9,745
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L197-L210
[ "def", "make_datastore_api", "(", "client", ")", ":", "parse_result", "=", "six", ".", "moves", ".", "urllib_parse", ".", "urlparse", "(", "client", ".", "_base_url", ")", "host", "=", "parse_result", ".", "netloc", "if", "parse_result", ".", "scheme", "==", "\"https\"", ":", "channel", "=", "make_secure_channel", "(", "client", ".", "_credentials", ",", "DEFAULT_USER_AGENT", ",", "host", ")", "else", ":", "channel", "=", "insecure_channel", "(", "host", ")", "return", "datastore_client", ".", "DatastoreClient", "(", "channel", "=", "channel", ",", "client_info", "=", "client_info", ".", "ClientInfo", "(", "client_library_version", "=", "__version__", ",", "gapic_version", "=", "__version__", ")", ",", ")" ]
Add an operator between terms . There must be a term added before using this method . All operators have helpers so this method is usually not necessary to directly invoke .
def _operator ( self , op , close_group = False ) : op = op . upper ( ) . strip ( ) if op not in OP_LIST : raise ValueError ( "Error: '{}' is not a valid operator." . format ( op ) ) else : if close_group : op = ") " + op + " (" else : op = " " + op + " " self . __query [ "q" ] += op return self
9,746
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L244-L271
[ "def", "imsave", "(", "filename", ",", "data", ",", "maxval", "=", "None", ",", "pam", "=", "False", ")", ":", "try", ":", "netpbm", "=", "NetpbmFile", "(", "data", ",", "maxval", "=", "maxval", ")", "netpbm", ".", "write", "(", "filename", ",", "pam", "=", "pam", ")", "finally", ":", "netpbm", ".", "close", "(", ")" ]
Combine terms with AND . There must be a term added before using this method .
def _and_join ( self , close_group = False ) : if not self . initialized : raise ValueError ( "You must add a search term before adding an operator." ) else : self . _operator ( "AND" , close_group = close_group ) return self
9,747
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L273-L294
[ "def", "create_supercut_in_batches", "(", "composition", ",", "outputfile", ",", "padding", ")", ":", "total_clips", "=", "len", "(", "composition", ")", "start_index", "=", "0", "end_index", "=", "BATCH_SIZE", "batch_comp", "=", "[", "]", "while", "start_index", "<", "total_clips", ":", "filename", "=", "outputfile", "+", "'.tmp'", "+", "str", "(", "start_index", ")", "+", "'.mp4'", "try", ":", "create_supercut", "(", "composition", "[", "start_index", ":", "end_index", "]", ",", "filename", ",", "padding", ")", "batch_comp", ".", "append", "(", "filename", ")", "gc", ".", "collect", "(", ")", "start_index", "+=", "BATCH_SIZE", "end_index", "+=", "BATCH_SIZE", "except", ":", "start_index", "+=", "BATCH_SIZE", "end_index", "+=", "BATCH_SIZE", "next", "clips", "=", "[", "VideoFileClip", "(", "filename", ")", "for", "filename", "in", "batch_comp", "]", "video", "=", "concatenate", "(", "clips", ")", "video", ".", "to_videofile", "(", "outputfile", ",", "codec", "=", "\"libx264\"", ",", "temp_audiofile", "=", "'temp-audio.m4a'", ",", "remove_temp", "=", "True", ",", "audio_codec", "=", "'aac'", ")", "# remove partial video files", "for", "filename", "in", "batch_comp", ":", "os", ".", "remove", "(", "filename", ")", "cleanup_log_files", "(", "outputfile", ")" ]
Combine terms with OR . There must be a term added before using this method .
def _or_join ( self , close_group = False ) : if not self . initialized : raise ValueError ( "You must add a search term before adding an operator." ) else : self . _operator ( "OR" , close_group = close_group ) return self
9,748
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L296-L317
[ "def", "get_library_progress", "(", "self", ")", ":", "kbp_dict", "=", "self", ".", "_get_api_call", "(", "'get_library_progress'", ")", "return", "{", "asin", ":", "KindleCloudReaderAPI", ".", "_kbp_to_progress", "(", "kbp", ")", "for", "asin", ",", "kbp", "in", "kbp_dict", ".", "iteritems", "(", ")", "}" ]
Fetch the entire mapping for the specified index .
def _mapping ( self ) : return ( self . __search_client . get ( "/unstable/index/{}/mapping" . format ( mdf_toolbox . translate_index ( self . index ) ) ) [ "mappings" ] )
9,749
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L418-L426
[ "def", "density_hub", "(", "self", ",", "weather_df", ")", ":", "if", "self", ".", "density_model", "!=", "'interpolation_extrapolation'", ":", "temperature_hub", "=", "self", ".", "temperature_hub", "(", "weather_df", ")", "# Calculation of density in kg/m³ at hub height", "if", "self", ".", "density_model", "==", "'barometric'", ":", "logging", ".", "debug", "(", "'Calculating density using barometric height '", "'equation.'", ")", "closest_height", "=", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "min", "(", "range", "(", "len", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", ")", ")", ",", "key", "=", "lambda", "i", ":", "abs", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "i", "]", "-", "self", ".", "power_plant", ".", "hub_height", ")", ")", "]", "density_hub", "=", "density", ".", "barometric", "(", "weather_df", "[", "'pressure'", "]", "[", "closest_height", "]", ",", "closest_height", ",", "self", ".", "power_plant", ".", "hub_height", ",", "temperature_hub", ")", "elif", "self", ".", "density_model", "==", "'ideal_gas'", ":", "logging", ".", "debug", "(", "'Calculating density using ideal gas equation.'", ")", "closest_height", "=", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "min", "(", "range", "(", "len", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", ")", ")", ",", "key", "=", "lambda", "i", ":", "abs", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "i", "]", "-", "self", ".", "power_plant", ".", "hub_height", ")", ")", "]", "density_hub", "=", "density", ".", "ideal_gas", "(", "weather_df", "[", "'pressure'", "]", "[", "closest_height", "]", ",", "closest_height", ",", "self", ".", "power_plant", ".", "hub_height", ",", "temperature_hub", ")", "elif", "self", ".", "density_model", "==", "'interpolation_extrapolation'", ":", "logging", ".", "debug", "(", "'Calculating density using linear inter- or '", "'extrapolation.'", ")", "density_hub", "=", "tools", ".", "linear_interpolation_extrapolation", "(", "weather_df", "[", "'density'", "]", ",", "self", ".", "power_plant", ".", "hub_height", ")", "else", ":", "raise", "ValueError", "(", "\"'{0}' is an invalid value. \"", ".", "format", "(", "self", ".", "density_model", ")", "+", "\"`density_model` \"", "+", "\"must be 'barometric', 'ideal_gas' or \"", "+", "\"'interpolation_extrapolation'.\"", ")", "return", "density_hub" ]
Add a fulltext search term to the query .
def match_term ( self , value , required = True , new_group = False ) : # If not the start of the query string, add an AND or OR if self . initialized : if required : self . _and_join ( new_group ) else : self . _or_join ( new_group ) self . _term ( value ) return self
9,750
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L435-L463
[ "def", "process_data", "(", "self", ",", "sockets", ")", ":", "with", "self", ".", "mutex", ":", "log", ".", "log", "(", "logging", ".", "DEBUG", "-", "2", ",", "\"process_data()\"", ")", "for", "sock", ",", "conn", "in", "itertools", ".", "product", "(", "sockets", ",", "self", ".", "connections", ")", ":", "if", "sock", "==", "conn", ".", "socket", ":", "conn", ".", "process_data", "(", ")" ]
Require a field to exist in the results . Matches will have some value in field .
def match_exists ( self , field , required = True , new_group = False ) : return self . match_field ( field , "*" , required = required , new_group = new_group )
9,751
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L522-L541
[ "def", "Write", "(", "self", ",", "Text", ")", ":", "self", ".", "Application", ".", "_Alter", "(", "'WRITE'", ",", "'%s %s'", "%", "(", "self", ".", "Handle", ",", "tounicode", "(", "Text", ")", ")", ")" ]
Require a field to not exist in the results . Matches will not have field present .
def match_not_exists ( self , field , new_group = False ) : return self . exclude_field ( field , "*" , new_group = new_group )
9,752
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L543-L560
[ "def", "Write", "(", "self", ",", "Text", ")", ":", "self", ".", "Application", ".", "_Alter", "(", "'WRITE'", ",", "'%s %s'", "%", "(", "self", ".", "Handle", ",", "tounicode", "(", "Text", ")", ")", ")" ]
Retrieve and return the mapping for the given metadata block .
def show_fields ( self , block = None ) : mapping = self . _mapping ( ) if block is None : return mapping elif block == "top" : blocks = set ( ) for key in mapping . keys ( ) : blocks . add ( key . split ( "." ) [ 0 ] ) block_map = { } for b in blocks : block_map [ b ] = "object" else : block_map = { } for key , value in mapping . items ( ) : if key . startswith ( block ) : block_map [ key ] = value return block_map
9,753
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L764-L792
[ "def", "saturation", "(", "self", ",", "value", ")", ":", "value", "=", "clean_float", "(", "value", ")", "if", "value", "is", "None", ":", "return", "try", ":", "unit_moisture_weight", "=", "self", ".", "unit_moist_weight", "-", "self", ".", "unit_dry_weight", "unit_moisture_volume", "=", "unit_moisture_weight", "/", "self", ".", "_pw", "saturation", "=", "unit_moisture_volume", "/", "self", ".", "_calc_unit_void_volume", "(", ")", "if", "saturation", "is", "not", "None", "and", "not", "ct", ".", "isclose", "(", "saturation", ",", "value", ",", "rel_tol", "=", "self", ".", "_tolerance", ")", ":", "raise", "ModelError", "(", "\"New saturation (%.3f) is inconsistent \"", "\"with calculated value (%.3f)\"", "%", "(", "value", ",", "saturation", ")", ")", "except", "TypeError", ":", "pass", "old_value", "=", "self", ".", "saturation", "self", ".", "_saturation", "=", "value", "try", ":", "self", ".", "recompute_all_weights_and_void", "(", ")", "self", ".", "_add_to_stack", "(", "\"saturation\"", ",", "value", ")", "except", "ModelError", "as", "e", ":", "self", ".", "_saturation", "=", "old_value", "raise", "ModelError", "(", "e", ")" ]
Create structured dtype from a 2d ndarray with unstructured dtype .
def inflate_dtype ( arr , names ) : arr = np . asanyarray ( arr ) if has_structured_dt ( arr ) : return arr . dtype s_dt = arr . dtype dt = [ ( n , s_dt ) for n in names ] dt = np . dtype ( dt ) return dt
9,754
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L48-L56
[ "def", "synchronize_simultaneous", "(", "self", ",", "node_ip", ")", ":", "for", "candidate", "in", "self", ".", "factory", ".", "candidates", "[", "node_ip", "]", ":", "# Only if candidate is connected.\r", "if", "not", "candidate", "[", "\"con\"", "]", ".", "connected", ":", "continue", "# Synchronise simultaneous node.\r", "if", "candidate", "[", "\"time\"", "]", "-", "self", ".", "factory", ".", "nodes", "[", "\"simultaneous\"", "]", "[", "node_ip", "]", "[", "\"time\"", "]", ">", "self", ".", "challege_timeout", ":", "msg", "=", "\"RECONNECT\"", "self", ".", "factory", ".", "nodes", "[", "\"simultaneous\"", "]", "[", "node_ip", "]", "[", "\"con\"", "]", ".", "send_line", "(", "msg", ")", "return", "self", ".", "cleanup_candidates", "(", "node_ip", ")", "self", ".", "propogate_candidates", "(", "node_ip", ")" ]
Generate a table from a dictionary of arrays .
def from_dict ( cls , arr_dict , dtype = None , fillna = False , * * kwargs ) : # i hope order of keys == order or values if dtype is None : names = sorted ( list ( arr_dict . keys ( ) ) ) else : dtype = np . dtype ( dtype ) dt_names = [ f for f in dtype . names ] dict_names = [ k for k in arr_dict . keys ( ) ] missing_names = set ( dt_names ) - set ( dict_names ) if missing_names : if fillna : dict_names = dt_names for missing_name in missing_names : arr_dict [ missing_name ] = np . nan else : raise KeyError ( 'Dictionary keys and dtype fields do not match!' ) names = list ( dtype . names ) arr_dict = cls . _expand_scalars ( arr_dict ) data = [ arr_dict [ key ] for key in names ] return cls ( np . rec . fromarrays ( data , names = names , dtype = dtype ) , * * kwargs )
9,755
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L246-L270
[ "def", "get_owned_subscriptions", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "self", ".", "_get_server", "(", "server_id", ")", "return", "list", "(", "self", ".", "_owned_subscriptions", "[", "server_id", "]", ")" ]
Create a table from a predefined datatype .
def from_template ( cls , data , template ) : name = DEFAULT_NAME if isinstance ( template , str ) : name = template table_info = TEMPLATES [ name ] else : table_info = template if 'name' in table_info : name = table_info [ 'name' ] dt = table_info [ 'dtype' ] loc = table_info [ 'h5loc' ] split = table_info [ 'split_h5' ] h5singleton = table_info [ 'h5singleton' ] return cls ( data , h5loc = loc , dtype = dt , split_h5 = split , name = name , h5singleton = h5singleton )
9,756
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L314-L348
[ "def", "run", "(", "self", ")", ":", "port", ",", "tensorboard_process", "=", "self", ".", "create_tensorboard_process", "(", ")", "LOGGER", ".", "info", "(", "'TensorBoard 0.1.7 at http://localhost:{}'", ".", "format", "(", "port", ")", ")", "while", "not", "self", ".", "estimator", ".", "checkpoint_path", ":", "self", ".", "event", ".", "wait", "(", "1", ")", "with", "self", ".", "_temporary_directory", "(", ")", "as", "aws_sync_dir", ":", "while", "not", "self", ".", "event", ".", "is_set", "(", ")", ":", "args", "=", "[", "'aws'", ",", "'s3'", ",", "'sync'", ",", "self", ".", "estimator", ".", "checkpoint_path", ",", "aws_sync_dir", "]", "subprocess", ".", "call", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "self", ".", "_sync_directories", "(", "aws_sync_dir", ",", "self", ".", "logdir", ")", "self", ".", "event", ".", "wait", "(", "10", ")", "tensorboard_process", ".", "terminate", "(", ")" ]
Append new columns to the table .
def append_columns ( self , colnames , values , * * kwargs ) : n = len ( self ) if np . isscalar ( values ) : values = np . full ( n , values ) values = np . atleast_1d ( values ) if not isinstance ( colnames , str ) and len ( colnames ) > 1 : values = np . atleast_2d ( values ) self . _check_column_length ( values , n ) if values . ndim == 1 : if len ( values ) > n : raise ValueError ( "New Column is longer than existing table!" ) elif len ( values ) > 1 and len ( values ) < n : raise ValueError ( "New Column is shorter than existing table, " "but not just one element!" ) elif len ( values ) == 1 : values = np . full ( n , values [ 0 ] ) new_arr = rfn . append_fields ( self , colnames , values , usemask = False , asrecarray = True , * * kwargs ) return self . __class__ ( new_arr , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name , h5singleton = self . h5singleton )
9,757
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L362-L402
[ "def", "stop", "(", "self", ")", ":", "yield", "from", "self", ".", "_stop_ubridge", "(", ")", "if", "self", ".", "_nvram_watcher", ":", "self", ".", "_nvram_watcher", ".", "close", "(", ")", "self", ".", "_nvram_watcher", "=", "None", "if", "self", ".", "_telnet_server", ":", "self", ".", "_telnet_server", ".", "close", "(", ")", "self", ".", "_telnet_server", "=", "None", "if", "self", ".", "is_running", "(", ")", ":", "self", ".", "_terminate_process_iou", "(", ")", "if", "self", ".", "_iou_process", ".", "returncode", "is", "None", ":", "try", ":", "yield", "from", "gns3server", ".", "utils", ".", "asyncio", ".", "wait_for_process_termination", "(", "self", ".", "_iou_process", ",", "timeout", "=", "3", ")", "except", "asyncio", ".", "TimeoutError", ":", "if", "self", ".", "_iou_process", ".", "returncode", "is", "None", ":", "log", ".", "warning", "(", "\"IOU process {} is still running... killing it\"", ".", "format", "(", "self", ".", "_iou_process", ".", "pid", ")", ")", "try", ":", "self", ".", "_iou_process", ".", "kill", "(", ")", "except", "ProcessLookupError", ":", "pass", "self", ".", "_iou_process", "=", "None", "self", ".", "_started", "=", "False", "self", ".", "save_configs", "(", ")" ]
Drop columns from the table .
def drop_columns ( self , colnames , * * kwargs ) : new_arr = rfn . drop_fields ( self , colnames , usemask = False , asrecarray = True , * * kwargs ) return self . __class__ ( new_arr , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name , h5singleton = self . h5singleton )
9,758
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L404-L419
[ "async", "def", "synchronize", "(", "self", ",", "pid", ",", "vendor_specific", "=", "None", ")", ":", "return", "await", "self", ".", "_request_pyxb", "(", "\"post\"", ",", "[", "\"synchronize\"", ",", "pid", "]", ",", "{", "}", ",", "mmp_dict", "=", "{", "\"pid\"", ":", "pid", "}", ",", "vendor_specific", "=", "vendor_specific", ",", ")" ]
Sort array by a column .
def sorted ( self , by , * * kwargs ) : sort_idc = np . argsort ( self [ by ] , * * kwargs ) return self . __class__ ( self [ sort_idc ] , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name )
9,759
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L421-L435
[ "def", "fetch_from_sdr", "(", "folder", "=", "data_folder", ",", "data", "=", "'test'", ")", ":", "url", "=", "\"https://stacks.stanford.edu/file/druid:fn662rv4961/\"", "if", "data", "==", "'test'", ":", "md5_dict", "=", "{", "'5182_1_1.nii.gz'", ":", "'0656e59818538baa7d45311f2581bb4e'", ",", "'5182_15_1.nii.gz'", ":", "'a5a307b581620184baf868cd0df81f89'", ",", "'data.mat'", ":", "'a6275698f2220c65994354d412e6d82e'", ",", "'pure_gaba_P64024.nii.gz'", ":", "'f3e09ec0f00bd9a03910b19bfe731afb'", "}", "elif", "data", "==", "'example'", ":", "md5_dict", "=", "{", "'12_1_PROBE_MEGA_L_Occ.nii.gz'", ":", "'a0571606c1caa16a9d9b00847771bc94'", ",", "'5062_2_1.nii.gz'", ":", "'6f77fb5134bc2841bdfc954390f0f4a4'", "}", "if", "not", "os", ".", "path", ".", "exists", "(", "folder", ")", ":", "print", "(", "'Creating new directory %s'", "%", "folder", ")", "os", ".", "makedirs", "(", "folder", ")", "for", "k", ",", "v", "in", "md5_dict", ".", "items", "(", ")", ":", "fname", "=", "pjoin", "(", "folder", ",", "k", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "print", "(", "'Downloading %s from SDR ...'", "%", "k", ")", "_get_file_data", "(", "fname", ",", "url", "+", "k", ")", "check_md5", "(", "fname", ",", "v", ")", "else", ":", "print", "(", "'File %s is already in place. If you want to fetch it again, please first remove it from the folder %s '", "%", "(", "fname", ",", "folder", ")", ")", "print", "(", "'Done.'", ")", "print", "(", "'Files copied in folder %s'", "%", "folder", ")" ]
Merge a list of tables
def merge ( cls , tables , fillna = False ) : cols = set ( itertools . chain ( * [ table . dtype . descr for table in tables ] ) ) tables_to_merge = [ ] for table in tables : missing_cols = cols - set ( table . dtype . descr ) if missing_cols : if fillna : n = len ( table ) n_cols = len ( missing_cols ) col_names = [ ] for col_name , col_dtype in missing_cols : if 'f' not in col_dtype : raise ValueError ( "Cannot create NaNs for non-float" " type column '{}'" . format ( col_name ) ) col_names . append ( col_name ) table = table . append_columns ( col_names , np . full ( ( n_cols , n ) , np . nan ) ) else : raise ValueError ( "Table columns do not match. Use fill_na=True" " if you want to append missing values with NaNs" ) tables_to_merge . append ( table ) first_table = tables_to_merge [ 0 ] merged_table = sum ( tables_to_merge [ 1 : ] , first_table ) merged_table . h5loc = first_table . h5loc merged_table . h5singleton = first_table . h5singleton merged_table . split_h5 = first_table . split_h5 merged_table . name = first_table . name return merged_table
9,760
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L447-L487
[ "def", "_do_http", "(", "opts", ",", "profile", "=", "'default'", ")", ":", "ret", "=", "{", "}", "url", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:url'", ".", "format", "(", "profile", ")", ",", "''", ")", "user", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:user'", ".", "format", "(", "profile", ")", ",", "''", ")", "passwd", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:pass'", ".", "format", "(", "profile", ")", ",", "''", ")", "realm", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:realm'", ".", "format", "(", "profile", ")", ",", "''", ")", "timeout", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:timeout'", ".", "format", "(", "profile", ")", ",", "''", ")", "if", "not", "url", ":", "raise", "Exception", "(", "'missing url in profile {0}'", ".", "format", "(", "profile", ")", ")", "if", "user", "and", "passwd", ":", "auth", "=", "_auth", "(", "url", "=", "url", ",", "realm", "=", "realm", ",", "user", "=", "user", ",", "passwd", "=", "passwd", ")", "_install_opener", "(", "auth", ")", "url", "+=", "'?{0}'", ".", "format", "(", "_urlencode", "(", "opts", ")", ")", "for", "line", "in", "_urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", ".", "read", "(", ")", ".", "splitlines", "(", ")", ":", "splt", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "if", "splt", "[", "0", "]", "in", "ret", ":", "ret", "[", "splt", "[", "0", "]", "]", "+=", "',{0}'", ".", "format", "(", "splt", "[", "1", "]", ")", "else", ":", "ret", "[", "splt", "[", "0", "]", "]", "=", "splt", "[", "1", "]", "return", "ret" ]
An helper function to create index tuples for fast lookup in HDF5Pump
def create_index_tuple ( group_ids ) : max_group_id = np . max ( group_ids ) start_idx_arr = np . full ( max_group_id + 1 , 0 ) n_items_arr = np . full ( max_group_id + 1 , 0 ) current_group_id = group_ids [ 0 ] current_idx = 0 item_count = 0 for group_id in group_ids : if group_id != current_group_id : start_idx_arr [ current_group_id ] = current_idx n_items_arr [ current_group_id ] = item_count current_idx += item_count item_count = 0 current_group_id = group_id item_count += 1 else : start_idx_arr [ current_group_id ] = current_idx n_items_arr [ current_group_id ] = item_count return ( start_idx_arr , n_items_arr )
9,761
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/hdf5.py#L892-L915
[ "def", "plot_sphere", "(", "ax", ",", "center", ",", "radius", ",", "color", "=", "'black'", ",", "alpha", "=", "1.", ",", "linspace_count", "=", "_LINSPACE_COUNT", ")", ":", "u", "=", "np", ".", "linspace", "(", "0", ",", "2", "*", "np", ".", "pi", ",", "linspace_count", ")", "v", "=", "np", ".", "linspace", "(", "0", ",", "np", ".", "pi", ",", "linspace_count", ")", "sin_v", "=", "np", ".", "sin", "(", "v", ")", "x", "=", "center", "[", "0", "]", "+", "radius", "*", "np", ".", "outer", "(", "np", ".", "cos", "(", "u", ")", ",", "sin_v", ")", "y", "=", "center", "[", "1", "]", "+", "radius", "*", "np", ".", "outer", "(", "np", ".", "sin", "(", "u", ")", ",", "sin_v", ")", "z", "=", "center", "[", "2", "]", "+", "radius", "*", "np", ".", "outer", "(", "np", ".", "ones_like", "(", "u", ")", ",", "np", ".", "cos", "(", "v", ")", ")", "ax", ".", "plot_surface", "(", "x", ",", "y", ",", "z", ",", "linewidth", "=", "0.0", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")" ]
Traverse the internal dictionary and set the getters
def _set_attributes ( self ) : for parameter , data in self . _data . items ( ) : if isinstance ( data , dict ) or isinstance ( data , OrderedDict ) : field_names , field_values = zip ( * data . items ( ) ) sorted_indices = np . argsort ( field_names ) attr = namedtuple ( parameter , [ field_names [ i ] for i in sorted_indices ] ) setattr ( self , parameter , attr ( * [ field_values [ i ] for i in sorted_indices ] ) ) else : setattr ( self , parameter , data )
9,762
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/hdf5.py#L74-L88
[ "def", "log_likelihood", "(", "C", ",", "T", ")", ":", "C", "=", "C", ".", "tocsr", "(", ")", "T", "=", "T", ".", "tocsr", "(", ")", "ind", "=", "scipy", ".", "nonzero", "(", "C", ")", "relT", "=", "np", ".", "array", "(", "T", "[", "ind", "]", ")", "[", "0", ",", ":", "]", "relT", "=", "np", ".", "log", "(", "relT", ")", "relC", "=", "np", ".", "array", "(", "C", "[", "ind", "]", ")", "[", "0", ",", ":", "]", "return", "relT", ".", "dot", "(", "relC", ")" ]
Writes all the cached NDArrays to disk and empties the cache
def _write_ndarrays_cache_to_disk ( self ) : for h5loc , arrs in self . _ndarrays_cache . items ( ) : title = arrs [ 0 ] . title chunkshape = ( self . chunksize , ) + arrs [ 0 ] . shape [ 1 : ] if self . chunksize is not None else None arr = NDArray ( np . concatenate ( arrs ) , h5loc = h5loc , title = title ) if h5loc not in self . _ndarrays : loc , tabname = os . path . split ( h5loc ) ndarr = self . h5file . create_earray ( loc , tabname , tb . Atom . from_dtype ( arr . dtype ) , ( 0 , ) + arr . shape [ 1 : ] , chunkshape = chunkshape , title = title , filters = self . filters , createparents = True , ) self . _ndarrays [ h5loc ] = ndarr else : ndarr = self . _ndarrays [ h5loc ] idx_table_h5loc = h5loc + '_indices' if idx_table_h5loc not in self . indices : self . indices [ idx_table_h5loc ] = HDF5IndexTable ( idx_table_h5loc ) idx_tab = self . indices [ idx_table_h5loc ] for arr_length in ( len ( a ) for a in arrs ) : idx_tab . append ( arr_length ) ndarr . append ( arr ) self . _ndarrays_cache = defaultdict ( list )
9,763
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/hdf5.py#L254-L289
[ "def", "Modify", "(", "self", ",", "client_limit", "=", "None", ",", "client_rate", "=", "None", ",", "duration", "=", "None", ")", ":", "args", "=", "hunt_pb2", ".", "ApiModifyHuntArgs", "(", "hunt_id", "=", "self", ".", "hunt_id", ")", "if", "client_limit", "is", "not", "None", ":", "args", ".", "client_limit", "=", "client_limit", "if", "client_rate", "is", "not", "None", ":", "args", ".", "client_rate", "=", "client_rate", "if", "duration", "is", "not", "None", ":", "args", ".", "duration", "=", "duration", "data", "=", "self", ".", "_context", ".", "SendRequest", "(", "\"ModifyHunt\"", ",", "args", ")", "return", "Hunt", "(", "data", "=", "data", ",", "context", "=", "self", ".", "_context", ")" ]
Flush tables and arrays to disk
def flush ( self ) : self . log . info ( 'Flushing tables and arrays to disk...' ) for tab in self . _tables . values ( ) : tab . flush ( ) self . _write_ndarrays_cache_to_disk ( )
9,764
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/hdf5.py#L450-L455
[ "def", "defBoundary", "(", "self", ")", ":", "self", ".", "BoroCnstNatAll", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "# Find the natural borrowing constraint conditional on next period's state", "for", "j", "in", "range", "(", "self", ".", "StateCount", ")", ":", "PermShkMinNext", "=", "np", ".", "min", "(", "self", ".", "IncomeDstn_list", "[", "j", "]", "[", "1", "]", ")", "TranShkMinNext", "=", "np", ".", "min", "(", "self", ".", "IncomeDstn_list", "[", "j", "]", "[", "2", "]", ")", "self", ".", "BoroCnstNatAll", "[", "j", "]", "=", "(", "self", ".", "solution_next", ".", "mNrmMin", "[", "j", "]", "-", "TranShkMinNext", ")", "*", "(", "self", ".", "PermGroFac_list", "[", "j", "]", "*", "PermShkMinNext", ")", "/", "self", ".", "Rfree_list", "[", "j", "]", "self", ".", "BoroCnstNat_list", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "self", ".", "mNrmMin_list", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "self", ".", "BoroCnstDependency", "=", "np", ".", "zeros", "(", "(", "self", ".", "StateCount", ",", "self", ".", "StateCount", ")", ")", "+", "np", ".", "nan", "# The natural borrowing constraint in each current state is the *highest*", "# among next-state-conditional natural borrowing constraints that could", "# occur from this current state.", "for", "i", "in", "range", "(", "self", ".", "StateCount", ")", ":", "possible_next_states", "=", "self", ".", "MrkvArray", "[", "i", ",", ":", "]", ">", "0", "self", ".", "BoroCnstNat_list", "[", "i", "]", "=", "np", ".", "max", "(", "self", ".", "BoroCnstNatAll", "[", "possible_next_states", "]", ")", "# Explicitly handle the \"None\" case: ", "if", "self", ".", "BoroCnstArt", "is", "None", ":", "self", ".", "mNrmMin_list", "[", "i", "]", "=", "self", ".", "BoroCnstNat_list", "[", "i", "]", "else", ":", "self", ".", "mNrmMin_list", "[", "i", "]", "=", "np", ".", "max", "(", "[", "self", ".", "BoroCnstNat_list", "[", "i", "]", ",", "self", ".", "BoroCnstArt", "]", ")", "self", ".", "BoroCnstDependency", "[", "i", ",", ":", "]", "=", "self", ".", "BoroCnstNat_list", "[", "i", "]", "==", "self", ".", "BoroCnstNatAll" ]
Sample program to test GLWindow .
def main ( ) : print ( 'GLWindow:' , GLWindow . __version__ ) print ( 'Python:' , sys . version ) print ( 'Platform:' , sys . platform ) wnd = GLWindow . create_window ( ( 480 , 480 ) , title = 'GLWindow Sample' ) wnd . vsync = False ctx = ModernGL . create_context ( ) prog = ctx . program ( [ ctx . vertex_shader ( ''' #version 330 in vec2 vert; in vec4 vert_color; out vec4 frag_color; uniform vec2 scale; uniform float rotation; void main() { frag_color = vert_color; float r = rotation * (0.5 + gl_InstanceID * 0.05); mat2 rot = mat2(cos(r), sin(r), -sin(r), cos(r)); gl_Position = vec4((rot * vert) * scale, 0.0, 1.0); } ''' ) , ctx . fragment_shader ( ''' #version 330 in vec4 frag_color; out vec4 color; void main() { color = vec4(frag_color); } ''' ) , ] ) scale = prog . uniforms [ 'scale' ] rotation = prog . uniforms [ 'rotation' ] vbo = ctx . buffer ( struct . pack ( '18f' , 1.0 , 0.0 , 1.0 , 0.0 , 0.0 , 0.5 , - 0.5 , 0.86 , 0.0 , 1.0 , 0.0 , 0.5 , - 0.5 , - 0.86 , 0.0 , 0.0 , 1.0 , 0.5 , ) ) vao = ctx . simple_vertex_array ( prog , vbo , [ 'vert' , 'vert_color' ] ) while wnd . update ( ) : wnd . clear ( 0.95 , 0.95 , 0.95 ) width , height = wnd . size scale . value = ( height / width * 0.75 , 0.75 ) ctx . viewport = wnd . viewport ctx . enable ( ModernGL . BLEND ) rotation . value = wnd . time vao . render ( instances = 10 )
9,765
https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__main__.py#L12-L71
[ "def", "listrecords", "(", "*", "*", "kwargs", ")", ":", "record_dumper", "=", "serializer", "(", "kwargs", "[", "'metadataPrefix'", "]", ")", "e_tree", ",", "e_listrecords", "=", "verb", "(", "*", "*", "kwargs", ")", "result", "=", "get_records", "(", "*", "*", "kwargs", ")", "for", "record", "in", "result", ".", "items", ":", "pid", "=", "oaiid_fetcher", "(", "record", "[", "'id'", "]", ",", "record", "[", "'json'", "]", "[", "'_source'", "]", ")", "e_record", "=", "SubElement", "(", "e_listrecords", ",", "etree", ".", "QName", "(", "NS_OAIPMH", ",", "'record'", ")", ")", "header", "(", "e_record", ",", "identifier", "=", "pid", ".", "pid_value", ",", "datestamp", "=", "record", "[", "'updated'", "]", ",", "sets", "=", "record", "[", "'json'", "]", "[", "'_source'", "]", ".", "get", "(", "'_oai'", ",", "{", "}", ")", ".", "get", "(", "'sets'", ",", "[", "]", ")", ",", ")", "e_metadata", "=", "SubElement", "(", "e_record", ",", "etree", ".", "QName", "(", "NS_OAIPMH", ",", "'metadata'", ")", ")", "e_metadata", ".", "append", "(", "record_dumper", "(", "pid", ",", "record", "[", "'json'", "]", ")", ")", "resumption_token", "(", "e_listrecords", ",", "result", ",", "*", "*", "kwargs", ")", "return", "e_tree" ]
Add the header to the CSV file
def write_header ( fobj ) : fobj . write ( "# K40 calibration results\n" ) fobj . write ( "det_id\trun_id\tdom_id" ) for param in [ 't0' , 'qe' ] : for i in range ( 31 ) : fobj . write ( "\t{}_ch{}" . format ( param , i ) )
9,766
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/examples/offline_analysis/k40summary.py#L30-L36
[ "def", "bounds", "(", "self", ",", "vertices", ")", ":", "if", "util", ".", "is_shape", "(", "vertices", ",", "(", "-", "1", ",", "2", ")", ")", "and", "self", ".", "closed", ":", "# if we have a closed arc (a circle), we can return the actual bounds", "# this only works in two dimensions, otherwise this would return the", "# AABB of an sphere", "info", "=", "self", ".", "center", "(", "vertices", ")", "bounds", "=", "np", ".", "array", "(", "[", "info", "[", "'center'", "]", "-", "info", "[", "'radius'", "]", ",", "info", "[", "'center'", "]", "+", "info", "[", "'radius'", "]", "]", ",", "dtype", "=", "np", ".", "float64", ")", "else", ":", "# since the AABB of a partial arc is hard, approximate", "# the bounds by just looking at the discrete values", "discrete", "=", "self", ".", "discrete", "(", "vertices", ")", "bounds", "=", "np", ".", "array", "(", "[", "discrete", ".", "min", "(", "axis", "=", "0", ")", ",", "discrete", ".", "max", "(", "axis", "=", "0", ")", "]", ",", "dtype", "=", "np", ".", "float64", ")", "return", "bounds" ]
Return the azimuth angle in radians .
def azimuth ( v ) : v = np . atleast_2d ( v ) azi = phi ( v ) - np . pi azi [ azi < 0 ] += 2 * np . pi if len ( azi ) == 1 : return azi [ 0 ] return azi
9,767
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L119-L133
[ "def", "_pool_event_lifecycle_cb", "(", "conn", ",", "pool", ",", "event", ",", "detail", ",", "opaque", ")", ":", "_salt_send_event", "(", "opaque", ",", "conn", ",", "{", "'pool'", ":", "{", "'name'", ":", "pool", ".", "name", "(", ")", ",", "'uuid'", ":", "pool", ".", "UUIDString", "(", ")", "}", ",", "'event'", ":", "_get_libvirt_enum_string", "(", "'VIR_STORAGE_POOL_EVENT_'", ",", "event", ")", ",", "'detail'", ":", "'unknown'", "# currently unused", "}", ")" ]
Returns the unit vector of the vector .
def unit_vector ( vector , * * kwargs ) : # This also works for a dataframe with columns ['x', 'y', 'z'] # However, the division operation is picky about the shapes # So, remember input vector shape, cast all up to 2d, # do the (ugly) conversion, then return unit in same shape as input # of course, the numpy-ized version of the input... vector = np . array ( vector ) out_shape = vector . shape vector = np . atleast_2d ( vector ) unit = vector / np . linalg . norm ( vector , axis = 1 , * * kwargs ) [ : , None ] return unit . reshape ( out_shape )
9,768
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L175-L186
[ "def", "_init_baremetal_trunk_interfaces", "(", "self", ",", "port_seg", ",", "segment", ")", ":", "# interfaces list requiring switch initialization and", "# reserved port and port_binding db entry creation", "list_to_init", "=", "[", "]", "# interfaces list requiring reserved port and port_binding", "# db entry creation", "inactive_switch", "=", "[", "]", "connections", "=", "self", ".", "_get_baremetal_connections", "(", "port_seg", ",", "False", ",", "True", ")", "for", "switch_ip", ",", "intf_type", ",", "port", ",", "is_native", ",", "_", "in", "connections", ":", "try", ":", "nxos_db", ".", "get_switch_if_host_mappings", "(", "switch_ip", ",", "nexus_help", ".", "format_interface_name", "(", "intf_type", ",", "port", ")", ")", "except", "excep", ".", "NexusHostMappingNotFound", ":", "if", "self", ".", "is_switch_active", "(", "switch_ip", ")", ":", "# channel-group added later", "list_to_init", ".", "append", "(", "(", "switch_ip", ",", "intf_type", ",", "port", ",", "is_native", ",", "0", ")", ")", "else", ":", "inactive_switch", ".", "append", "(", "(", "switch_ip", ",", "intf_type", ",", "port", ",", "is_native", ",", "0", ")", ")", "# channel_group is appended to tuples in list_to_init", "self", ".", "driver", ".", "initialize_baremetal_switch_interfaces", "(", "list_to_init", ")", "host_id", "=", "port_seg", ".", "get", "(", "'dns_name'", ")", "if", "host_id", "is", "None", ":", "host_id", "=", "const", ".", "RESERVED_PORT_HOST_ID", "# Add inactive list to list_to_init to create RESERVED", "# port data base entries", "list_to_init", "+=", "inactive_switch", "for", "switch_ip", ",", "intf_type", ",", "port", ",", "is_native", ",", "ch_grp", "in", "list_to_init", ":", "nxos_db", ".", "add_host_mapping", "(", "host_id", ",", "switch_ip", ",", "nexus_help", ".", "format_interface_name", "(", "intf_type", ",", "port", ")", ",", "ch_grp", ",", "False", ")" ]
Calculate the point - line - distance for given point and line .
def pld3 ( pos , line_vertex , line_dir ) : pos = np . atleast_2d ( pos ) line_vertex = np . atleast_1d ( line_vertex ) line_dir = np . atleast_1d ( line_dir ) c = np . cross ( line_dir , line_vertex - pos ) n1 = np . linalg . norm ( c , axis = 1 ) n2 = np . linalg . norm ( line_dir ) out = n1 / n2 if out . ndim == 1 and len ( out ) == 1 : return out [ 0 ] return out
9,769
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L189-L200
[ "def", "get_traindata", "(", "self", ")", "->", "np", ".", "ndarray", ":", "traindata", "=", "None", "for", "key", ",", "value", "in", "self", ".", "data", ".", "items", "(", ")", ":", "if", "key", "not", "in", "[", "'__header__'", ",", "'__version__'", ",", "'__globals__'", "]", ":", "if", "traindata", "is", "None", ":", "traindata", "=", "value", "[", "np", ".", "where", "(", "value", "[", ":", ",", "4", "]", "!=", "0", ")", "]", "else", ":", "traindata", "=", "np", ".", "concatenate", "(", "(", "traindata", ",", "value", "[", "np", ".", "where", "(", "value", "[", ":", ",", "4", "]", "!=", "0", ")", "]", ")", ")", "return", "traindata" ]
Return the distance between two points .
def dist ( x1 , x2 , axis = 0 ) : return np . linalg . norm ( x2 - x1 , axis = axis )
9,770
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L207-L212
[ "def", "_init_libcrypto", "(", ")", ":", "libcrypto", "=", "_load_libcrypto", "(", ")", "try", ":", "libcrypto", ".", "OPENSSL_init_crypto", "(", ")", "except", "AttributeError", ":", "# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)", "libcrypto", ".", "OPENSSL_no_config", "(", ")", "libcrypto", ".", "OPENSSL_add_all_algorithms_noconf", "(", ")", "libcrypto", ".", "RSA_new", ".", "argtypes", "=", "(", ")", "libcrypto", ".", "RSA_new", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "RSA_size", ".", "argtype", "=", "(", "c_void_p", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "argtypes", "=", "(", "c_char_p", ",", "c_int", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "restype", "=", "c_void_p", "libcrypto", ".", "BIO_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "restype", "=", "c_void_p", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_private_encrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "libcrypto", ".", "RSA_public_decrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "return", "libcrypto" ]
Calculate center of mass for given points . If masses is not set assume equal masses .
def com ( points , masses = None ) : if masses is None : return np . average ( points , axis = 0 ) else : return np . average ( points , axis = 0 , weights = masses )
9,771
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L215-L221
[ "def", "dump", "(", "self", ")", ":", "assert", "self", ".", "database", "is", "not", "None", "cmd", "=", "\"SELECT count from {} WHERE rowid={}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "STATE_INFO_ROW", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "self", ".", "_from_sqlite", "(", "ret", "[", "0", "]", "[", "0", "]", ")", "+", "self", ".", "inserts", "if", "count", ">", "self", ".", "row_limit", ":", "msg", "=", "\"cleaning up state, this might take a while.\"", "logger", ".", "warning", "(", "msg", ")", "delete", "=", "count", "-", "self", ".", "row_limit", "delete", "+=", "int", "(", "self", ".", "row_limit", "*", "(", "self", ".", "row_cleanup_quota", "/", "100.0", ")", ")", "cmd", "=", "(", "\"DELETE FROM {} WHERE timestamp IN (\"", "\"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});\"", ")", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ",", "self", ".", "STATE_TABLE", ",", "delete", ")", ")", "self", ".", "_vacuum", "(", ")", "cmd", "=", "\"SELECT COUNT(*) FROM {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "ret", "[", "0", "]", "[", "0", "]", "cmd", "=", "\"UPDATE {} SET count = {} WHERE rowid = {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "_to_sqlite", "(", "count", ")", ",", "self", ".", "STATE_INFO_ROW", ",", ")", ")", "self", ".", "_update_cache_directory_state", "(", ")", "self", ".", "database", ".", "commit", "(", ")", "self", ".", "cursor", ".", "close", "(", ")", "self", ".", "database", ".", "close", "(", ")", "self", ".", "database", "=", "None", "self", ".", "cursor", "=", "None", "self", ".", "inserts", "=", "0" ]
Calculate the circular permutation for a given list of items .
def circ_permutation ( items ) : permutations = [ ] for i in range ( len ( items ) ) : permutations . append ( items [ i : ] + items [ : i ] ) return permutations
9,772
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L224-L229
[ "def", "_ParseLogonApplications", "(", "self", ",", "parser_mediator", ",", "registry_key", ")", ":", "for", "application", "in", "self", ".", "_LOGON_APPLICATIONS", ":", "command_value", "=", "registry_key", ".", "GetValueByName", "(", "application", ")", "if", "not", "command_value", ":", "continue", "values_dict", "=", "{", "'Application'", ":", "application", ",", "'Command'", ":", "command_value", ".", "GetDataAsObject", "(", ")", ",", "'Trigger'", ":", "'Logon'", "}", "event_data", "=", "windows_events", ".", "WindowsRegistryEventData", "(", ")", "event_data", ".", "key_path", "=", "registry_key", ".", "path", "event_data", ".", "offset", "=", "registry_key", ".", "offset", "event_data", ".", "regvalue", "=", "values_dict", "event_data", ".", "source_append", "=", "': Winlogon'", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "registry_key", ".", "last_written_time", ",", "definitions", ".", "TIME_DESCRIPTION_WRITTEN", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Inertia tensor stolen of thomas
def inertia ( x , y , z , weight = None ) : if weight is None : weight = 1 tensor_of_inertia = np . zeros ( ( 3 , 3 ) , dtype = float ) tensor_of_inertia [ 0 ] [ 0 ] = ( y * y + z * z ) * weight tensor_of_inertia [ 0 ] [ 1 ] = ( - 1 ) * x * y * weight tensor_of_inertia [ 0 ] [ 2 ] = ( - 1 ) * x * z * weight tensor_of_inertia [ 1 ] [ 0 ] = ( - 1 ) * x * y * weight tensor_of_inertia [ 1 ] [ 1 ] = ( x * x + z * z ) * weight tensor_of_inertia [ 1 ] [ 2 ] = ( - 1 ) * y * z * weight tensor_of_inertia [ 2 ] [ 0 ] = ( - 1 ) * x * z * weight tensor_of_inertia [ 2 ] [ 1 ] = ( - 1 ) * z * y * weight tensor_of_inertia [ 2 ] [ 2 ] = ( x * x + y * y ) * weight eigen_values = np . linalg . eigvals ( tensor_of_inertia ) small_inertia = eigen_values [ 2 ] [ 2 ] middle_inertia = eigen_values [ 1 ] [ 1 ] big_inertia = eigen_values [ 0 ] [ 0 ] return small_inertia , middle_inertia , big_inertia
9,773
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L381-L400
[ "def", "parse_events", "(", "content", ",", "start", "=", "None", ",", "end", "=", "None", ",", "default_span", "=", "timedelta", "(", "days", "=", "7", ")", ")", ":", "if", "not", "start", ":", "start", "=", "now", "(", ")", "if", "not", "end", ":", "end", "=", "start", "+", "default_span", "if", "not", "content", ":", "raise", "ValueError", "(", "'Content is invalid!'", ")", "calendar", "=", "Calendar", ".", "from_ical", "(", "content", ")", "# Find the calendar's timezone info, or use UTC", "for", "c", "in", "calendar", ".", "walk", "(", ")", ":", "if", "c", ".", "name", "==", "'VTIMEZONE'", ":", "cal_tz", "=", "gettz", "(", "str", "(", "c", "[", "'TZID'", "]", ")", ")", "break", "else", ":", "cal_tz", "=", "UTC", "start", "=", "normalize", "(", "start", ",", "cal_tz", ")", "end", "=", "normalize", "(", "end", ",", "cal_tz", ")", "found", "=", "[", "]", "for", "component", "in", "calendar", ".", "walk", "(", ")", ":", "if", "component", ".", "name", "==", "\"VEVENT\"", ":", "e", "=", "create_event", "(", "component", ")", "if", "e", ".", "recurring", ":", "# Unfold recurring events according to their rrule", "rule", "=", "parse_rrule", "(", "component", ",", "cal_tz", ")", "dur", "=", "e", ".", "end", "-", "e", ".", "start", "found", ".", "extend", "(", "e", ".", "copy_to", "(", "dt", ")", "for", "dt", "in", "rule", ".", "between", "(", "start", "-", "dur", ",", "end", ",", "inc", "=", "True", ")", ")", "elif", "e", ".", "end", ">=", "start", "and", "e", ".", "start", "<=", "end", ":", "found", ".", "append", "(", "e", ")", "return", "found" ]
Rotate a 3D vector using quaternion algebra .
def qrot ( vector , quaternion ) : t = 2 * np . cross ( quaternion [ 1 : ] , vector ) v_rot = vector + quaternion [ 0 ] * t + np . cross ( quaternion [ 1 : ] , t ) return v_rot
9,774
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L425-L442
[ "def", "on_exception", "(", "self", ",", "exception", ")", ":", "logger", ".", "error", "(", "'Exception from stream!'", ",", "exc_info", "=", "True", ")", "self", ".", "streaming_exception", "=", "exception" ]
Convert Euler angle to quaternion .
def qeuler ( yaw , pitch , roll ) : yaw = np . radians ( yaw ) pitch = np . radians ( pitch ) roll = np . radians ( roll ) cy = np . cos ( yaw * 0.5 ) sy = np . sin ( yaw * 0.5 ) cr = np . cos ( roll * 0.5 ) sr = np . sin ( roll * 0.5 ) cp = np . cos ( pitch * 0.5 ) sp = np . sin ( pitch * 0.5 ) q = np . array ( ( cy * cr * cp + sy * sr * sp , cy * sr * cp - sy * cr * sp , cy * cr * sp + sy * sr * cp , sy * cr * cp - cy * sr * sp ) ) return q
9,775
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L445-L474
[ "def", "_post_clean", "(", "self", ")", ":", "super", "(", ")", ".", "_post_clean", "(", ")", "# updates self.instance with form data", "password", "=", "self", ".", "cleaned_data", ".", "get", "(", "'password1'", ")", "if", "password", ":", "try", ":", "password_validation", ".", "validate_password", "(", "password", ",", "self", ".", "instance", ")", "except", "ValidationError", "as", "error", ":", "self", ".", "add_error", "(", "'password1'", ",", "error", ")" ]
Find the closes point for a given set of lines in 3D .
def intersect_3d ( p1 , p2 ) : v = p2 - p1 normed_v = unit_vector ( v ) nx = normed_v [ : , 0 ] ny = normed_v [ : , 1 ] nz = normed_v [ : , 2 ] xx = np . sum ( nx ** 2 - 1 ) yy = np . sum ( ny ** 2 - 1 ) zz = np . sum ( nz ** 2 - 1 ) xy = np . sum ( nx * ny ) xz = np . sum ( nx * nz ) yz = np . sum ( ny * nz ) M = np . array ( [ ( xx , xy , xz ) , ( xy , yy , yz ) , ( xz , yz , zz ) ] ) x = np . sum ( p1 [ : , 0 ] * ( nx ** 2 - 1 ) + p1 [ : , 1 ] * ( nx * ny ) + p1 [ : , 2 ] * ( nx * nz ) ) y = np . sum ( p1 [ : , 0 ] * ( nx * ny ) + p1 [ : , 1 ] * ( ny * ny - 1 ) + p1 [ : , 2 ] * ( ny * nz ) ) z = np . sum ( p1 [ : , 0 ] * ( nx * nz ) + p1 [ : , 1 ] * ( ny * nz ) + p1 [ : , 2 ] * ( nz ** 2 - 1 ) ) return np . linalg . lstsq ( M , np . array ( ( x , y , z ) ) , rcond = None ) [ 0 ]
9,776
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L494-L536
[ "def", "serial", "(", "self", ")", ":", "asnint", "=", "libcrypto", ".", "X509_get_serialNumber", "(", "self", ".", "cert", ")", "bio", "=", "Membio", "(", ")", "libcrypto", ".", "i2a_ASN1_INTEGER", "(", "bio", ".", "bio", ",", "asnint", ")", "return", "int", "(", "str", "(", "bio", ")", ",", "16", ")" ]
For Python 2 3 compatibility .
def compat_py2_py3 ( ) : if ( sys . version_info > ( 3 , 0 ) ) : def iteritems ( dictionary ) : return dictionary . items ( ) def itervalues ( dictionary ) : return dictionary . values ( ) else : def iteritems ( dictionary ) : return dictionary . iteritems ( ) def itervalues ( dictionary ) : return dictionary . itervalues ( ) return iteritems , itervalues
9,777
https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/util.py#L20-L36
[ "def", "get_template_name", "(", "self", ",", "request", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"template_name\"", ")", ":", "raise", "AttributeError", "(", "\"%s must have a template_name attribute or \"", "\"override the get_template_name method.\"", "%", "self", ".", "__class__", ".", "__name__", ")", "return", "self", ".", "template_name" ]
Uses slice ID as iterator
def timeslice_generator ( self ) : slice_id = 0 while slice_id < self . n_timeslices : blob = self . get_blob ( slice_id ) yield blob slice_id += 1
9,778
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/jpp.py#L185-L191
[ "def", "__validate_enrollment_periods", "(", "self", ",", "enrollments", ")", ":", "for", "a", ",", "b", "in", "itertools", ".", "combinations", "(", "enrollments", ",", "2", ")", ":", "max_start", "=", "max", "(", "a", ".", "start", ",", "b", ".", "start", ")", "min_end", "=", "min", "(", "a", ".", "end", ",", "b", ".", "end", ")", "if", "max_start", "<", "min_end", ":", "msg", "=", "\"invalid GrimoireLab enrollment dates. \"", "\"Organization dates overlap.\"", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")", "return", "enrollments" ]
Index is slice ID
def get_blob ( self , index ) : blob = self . _current_blob self . r . retrieve_timeslice ( index ) timeslice_info = Table . from_template ( { 'frame_index' : self . r . frame_index , 'slice_id' : index , 'timestamp' : self . r . utc_seconds , 'nanoseconds' : self . r . utc_nanoseconds , 'n_frames' : self . r . n_frames , } , 'TimesliceInfo' ) hits = self . _extract_hits ( ) hits . group_id = index blob [ 'TimesliceInfo' ] = timeslice_info blob [ self . _hits_blob_key ] = hits return blob
9,779
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/jpp.py#L193-L208
[ "def", "get_login_info", "(", ")", ":", "connections", "=", "{", "}", "_defaults", "=", "{", "}", "_defaults", "[", "'start_in'", "]", "=", "''", "_defaults", "[", "'rpm_sign_plugin'", "]", "=", "''", "config", "=", "_config_file", "(", ")", "_config_test", "(", "config", ")", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"Loading connection information:\"", ")", "for", "section", "in", "config", ".", "sections", "(", ")", ":", "cfg", "=", "dict", "(", "config", ".", "items", "(", "section", ")", ")", "connections", "[", "section", "]", "=", "Connectors", "(", "cfg", ")", "if", "'start_in'", "in", "cfg", ":", "_defaults", "[", "'start_in'", "]", "=", "cfg", "[", "'start_in'", "]", "if", "'rpm_sign_plugin'", "in", "cfg", ":", "_defaults", "[", "'rpm_sign_plugin'", "]", "=", "cfg", "[", "'rpm_sign_plugin'", "]", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"[%s] username: %s, base_url: %s\"", "%", "(", "section", ",", "cfg", "[", "'username'", "]", ",", "cfg", "[", "'base_url'", "]", ")", ")", "_defaults", "[", "'environments'", "]", "=", "config", ".", "sections", "(", ")", "return", "(", "connections", ",", "_defaults", ")" ]
A simple slice generator for iterations
def _slice_generator ( self , index ) : start , stop , step = index . indices ( len ( self ) ) for i in range ( start , stop , step ) : yield self . get_blob ( i )
9,780
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/jpp.py#L270-L274
[ "def", "configure_bigchaindb", "(", "command", ")", ":", "@", "functools", ".", "wraps", "(", "command", ")", "def", "configure", "(", "args", ")", ":", "config_from_cmdline", "=", "None", "try", ":", "if", "args", ".", "log_level", "is", "not", "None", ":", "config_from_cmdline", "=", "{", "'log'", ":", "{", "'level_console'", ":", "args", ".", "log_level", ",", "'level_logfile'", ":", "args", ".", "log_level", ",", "}", ",", "'server'", ":", "{", "'loglevel'", ":", "args", ".", "log_level", "}", ",", "}", "except", "AttributeError", ":", "pass", "bigchaindb", ".", "config_utils", ".", "autoconfigure", "(", "filename", "=", "args", ".", "config", ",", "config", "=", "config_from_cmdline", ",", "force", "=", "True", ")", "command", "(", "args", ")", "return", "configure" ]
Report the overall correlation with the validation scores using each exemplar in isolation .
def correlation_by_exemplar ( brands , exemplars , validation_scores , analyze_fn_str , outf ) : analyze_fn = getattr ( analyze , analyze_fn_str ) keys = sorted ( k for k in validation_scores . keys ( ) if k in set ( x [ 0 ] for x in brands ) ) truth = [ validation_scores [ k ] for k in keys ] result = { } outf . write ( 'exemplar\tcorr\tn_followers\n' ) outf . flush ( ) for exemplar in exemplars : single_exemplar = { exemplar : exemplars [ exemplar ] } social_scores = analyze_fn ( brands , single_exemplar ) predicted = [ social_scores [ k ] for k in keys ] outf . write ( '%s\t%g\t%d\n' % ( exemplar , scistat . pearsonr ( predicted , truth ) [ 0 ] , len ( exemplars [ exemplar ] ) ) ) outf . flush ( ) result [ exemplar ] = scistat . pearsonr ( predicted , truth ) [ 0 ] outf . close ( ) return result
9,781
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/diagnose.py#L50-L66
[ "def", "sink_pubsub", "(", "client", ",", "to_delete", ")", ":", "topic", "=", "_sink_pubsub_setup", "(", "client", ")", "to_delete", ".", "append", "(", "topic", ")", "SINK_NAME", "=", "\"robots-pubsub-%d\"", "%", "(", "_millis", "(", ")", ",", ")", "FILTER", "=", "\"logName:apache-access AND textPayload:robot\"", "UPDATED_FILTER", "=", "\"textPayload:robot\"", "# [START sink_pubsub_create]", "DESTINATION", "=", "\"pubsub.googleapis.com/%s\"", "%", "(", "topic", ".", "full_name", ",", ")", "sink", "=", "client", ".", "sink", "(", "SINK_NAME", ",", "filter_", "=", "FILTER", ",", "destination", "=", "DESTINATION", ")", "assert", "not", "sink", ".", "exists", "(", ")", "# API call", "sink", ".", "create", "(", ")", "# API call", "assert", "sink", ".", "exists", "(", ")", "# API call", "# [END sink_pubsub_create]", "to_delete", ".", "insert", "(", "0", ",", "sink", ")", "# delete sink before topic", "# [START client_list_sinks]", "for", "sink", "in", "client", ".", "list_sinks", "(", ")", ":", "# API call(s)", "do_something_with", "(", "sink", ")", "# [END client_list_sinks]", "# [START sink_reload]", "existing_sink", "=", "client", ".", "sink", "(", "SINK_NAME", ")", "existing_sink", ".", "reload", "(", ")", "# [END sink_reload]", "assert", "existing_sink", ".", "filter_", "==", "FILTER", "assert", "existing_sink", ".", "destination", "==", "DESTINATION", "# [START sink_update]", "existing_sink", ".", "filter_", "=", "UPDATED_FILTER", "existing_sink", ".", "update", "(", ")", "# [END sink_update]", "existing_sink", ".", "reload", "(", ")", "assert", "existing_sink", ".", "filter_", "==", "UPDATED_FILTER", "# [START sink_delete]", "sink", ".", "delete", "(", ")", "# [END sink_delete]", "to_delete", ".", "pop", "(", "0", ")" ]
Summarise the differences between this node and the other node .
def difference ( self , other ) : diff = ( tuple ( set ( self . plates ) - set ( other . plates ) ) , tuple ( set ( other . plates ) - set ( self . plates ) ) ) counts = map ( len , diff ) # is_sub_plate = counts == [1, 1] and diff[1][0].is_sub_plate(diff[0][0]) is_sub_plate = counts == [ 1 , 1 ] and diff [ 0 ] [ 0 ] . is_sub_plate ( diff [ 1 ] [ 0 ] ) # MK fixed if len ( other . plates ) == 1 and counts == [ 1 , 0 ] and diff [ 0 ] [ 0 ] . parent == other . plates [ 0 ] . parent : is_sub_plate = True return diff , counts , is_sub_plate
9,782
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/node/node.py#L113-L127
[ "def", "_ParseStorageMediaOptions", "(", "self", ",", "options", ")", ":", "self", ".", "_ParseStorageMediaImageOptions", "(", "options", ")", "self", ".", "_ParseVSSProcessingOptions", "(", "options", ")", "self", ".", "_ParseCredentialOptions", "(", "options", ")", "self", ".", "_ParseSourcePathOption", "(", "options", ")" ]
Produce a formatted report of the current timing data .
def report ( times = None , include_itrs = True , include_stats = True , delim_mode = False , format_options = None ) : if times is None : if f . root . stopped : return report_loc . report ( f . root . times , include_itrs , include_stats , delim_mode , format_options ) else : t = timer ( ) rep = report_loc . report ( collapse . collapse_times ( ) , include_itrs , include_stats , delim_mode , format_options , timer_state = 'running' ) f . root . self_cut += timer ( ) - t return rep else : if not isinstance ( times , Times ) : raise TypeError ( "Expected Times instance for param 'times' (default is root)." ) return report_loc . report ( times , include_itrs , include_stats , delim_mode , format_options )
9,783
https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/report.py#L22-L86
[ "def", "destroy_sns_event", "(", "app_name", ",", "env", ",", "region", ")", ":", "session", "=", "boto3", ".", "Session", "(", "profile_name", "=", "env", ",", "region_name", "=", "region", ")", "sns_client", "=", "session", ".", "client", "(", "'sns'", ")", "lambda_subscriptions", "=", "get_sns_subscriptions", "(", "app_name", "=", "app_name", ",", "env", "=", "env", ",", "region", "=", "region", ")", "for", "subscription_arn", "in", "lambda_subscriptions", ":", "sns_client", ".", "unsubscribe", "(", "SubscriptionArn", "=", "subscription_arn", ")", "LOG", ".", "debug", "(", "\"Lambda SNS event deleted\"", ")", "return", "True" ]
Produce a formatted comparison of timing datas .
def compare ( times_list = None , name = None , include_list = True , include_stats = True , delim_mode = False , format_options = None ) : if times_list is None : rep = '' for par_dict in itervalues ( f . root . times . par_subdvsn ) : for par_name , par_list in iteritems ( par_dict ) : rep += report_loc . compare ( par_list , par_name , include_list , include_stats , delim_mode , format_options ) else : if not isinstance ( times_list , ( list , tuple ) ) : raise TypeError ( "Expected a list/tuple of times instances for param 'times_list'." ) if not all ( [ isinstance ( times , Times ) for times in times_list ] ) : raise TypeError ( "At least one member of param 'times_list' is not a Times object." ) rep = report_loc . compare ( times_list , name , include_list , include_stats , delim_mode , format_options ) return rep
9,784
https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/report.py#L89-L155
[ "def", "destroy_sns_event", "(", "app_name", ",", "env", ",", "region", ")", ":", "session", "=", "boto3", ".", "Session", "(", "profile_name", "=", "env", ",", "region_name", "=", "region", ")", "sns_client", "=", "session", ".", "client", "(", "'sns'", ")", "lambda_subscriptions", "=", "get_sns_subscriptions", "(", "app_name", "=", "app_name", ",", "env", "=", "env", ",", "region", "=", "region", ")", "for", "subscription_arn", "in", "lambda_subscriptions", ":", "sns_client", ".", "unsubscribe", "(", "SubscriptionArn", "=", "subscription_arn", ")", "LOG", ".", "debug", "(", "\"Lambda SNS event deleted\"", ")", "return", "True" ]
Produce a formatted record of a times data structure .
def write_structure ( times = None ) : if times is None : return report_loc . write_structure ( f . root . times ) else : if not isinstance ( times , Times ) : raise TypeError ( "Expected Times instance for param 'times' (default is root)." ) return report_loc . write_structure ( times )
9,785
https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/report.py#L158-L176
[ "def", "removeApplicationManifest", "(", "self", ",", "pchApplicationManifestFullPath", ")", ":", "fn", "=", "self", ".", "function_table", ".", "removeApplicationManifest", "result", "=", "fn", "(", "pchApplicationManifestFullPath", ")", "return", "result" ]
Write all muons from McTracks to Muons .
def filter_muons ( blob ) : tracks = blob [ 'McTracks' ] muons = tracks [ tracks . type == - 13 ] # PDG particle code blob [ "Muons" ] = Table ( muons ) return blob
9,786
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/examples/plot_dom_hits.py#L33-L38
[ "def", "verify_and_extract_time", "(", "self", ",", "log_file", ",", "division", ",", "result_name", ")", ":", "expected_level", "=", "constants", ".", "DIVISION_COMPLIANCE_CHECK_LEVEL", ".", "get", "(", "division", ",", "None", ")", "print", "(", "result_name", ")", "if", "expected_level", "is", "None", ":", "raise", "Exception", "(", "'Unknown division: {}'", ".", "format", "(", "division", ")", ")", "start_time", ",", "level", ",", "dt", ",", "_", ",", "success", "=", "self", ".", "get_compliance", "(", "log_file", ")", "print", "(", "float", "(", "start_time", ")", ")", "if", "int", "(", "level", ")", "!=", "expected_level", ":", "raise", "Exception", "(", "'Error Level {} does not match needed level {}:{}'", ".", "format", "(", "level", ",", "expected_level", ",", "log_file", ")", ")", "# Sets failure to converge to \"infinite time\" per the rules", "if", "success", "and", "dt", ":", "return", "dt", ",", "start_time", "else", ":", "print", "(", "'Result was not a success set to INFINITE_TIME({})'", ".", "format", "(", "INFINITE_TIME", ")", ")", "return", "INFINITE_TIME", ",", "start_time" ]
Parse the configuration file and return dictionary of configuration options .
def parse_conf_files ( conf_paths ) : conf_file = ConfigParser . RawConfigParser ( ) conf_read = conf_file . read ( conf_paths ) conf = { } try : if conf_read : conf [ 'client_id' ] = conf_file . get ( 'runkeeper' , 'client_id' ) conf [ 'client_secret' ] = conf_file . get ( 'runkeeper' , 'client_secret' ) if conf_file . has_option ( 'runkeeper' , 'bindport' ) : conf [ 'bindport' ] = conf_file . getint ( 'runkeeper' , 'bindport' ) if conf_file . has_option ( 'runkeeper' , 'bindaddr' ) : conf [ 'bindaddr' ] = conf_file . get ( 'runkeeper' , 'bindaddr' ) if conf_file . has_option ( 'runkeeper' , 'baseurl' ) : conf [ 'baseurl' ] = conf_file . get ( 'runkeeper' , 'baseurl' ) return conf except ConfigParser . Error : raise ConfigurationError ( "Error parsing configuration file(s): %s\n" % sys . exc_info ( ) [ 1 ] ) else : raise ConfigurationError ( "No valid configuration file (%s) found." % defaultConfFilename )
9,787
https://github.com/aouyar/healthgraph-api/blob/fc5135ab353ca1f05e8a70ec784ff921e686c072/samples/bottle/runkeeper_demo.py#L148-L175
[ "def", "_try_free_lease", "(", "self", ",", "shard_state", ",", "slice_retry", "=", "False", ")", ":", "@", "db", ".", "transactional", "def", "_tx", "(", ")", ":", "fresh_state", "=", "model", ".", "ShardState", ".", "get_by_shard_id", "(", "shard_state", ".", "shard_id", ")", "if", "fresh_state", "and", "fresh_state", ".", "active", ":", "# Free lease.", "fresh_state", ".", "slice_start_time", "=", "None", "fresh_state", ".", "slice_request_id", "=", "None", "if", "slice_retry", ":", "fresh_state", ".", "slice_retries", "+=", "1", "fresh_state", ".", "put", "(", ")", "try", ":", "_tx", "(", ")", "# pylint: disable=broad-except", "except", "Exception", ",", "e", ":", "logging", ".", "warning", "(", "e", ")", "logging", ".", "warning", "(", "\"Release lock for shard %s failed. Wait for lease to expire.\"", ",", "shard_state", ".", "shard_id", ")" ]
Main Block - Configure and run the Bottle Web Server .
def main ( argv = None ) : cmd_opts = parse_cmdline ( argv ) [ 0 ] if cmd_opts . confpath is not None : if os . path . exists ( cmd_opts . confpath ) : conf_paths = [ cmd_opts . confpath , ] else : return "Configuration file not found: %s" % cmd_opts . confpath else : conf_paths = [ os . path . join ( path , defaultConfFilename ) for path in ( '/etc' , '.' , ) ] try : conf . update ( parse_conf_files ( conf_paths ) ) except ConfigurationError : return ( sys . exc_info ( ) [ 1 ] ) if cmd_opts . bindport is not None : conf [ 'bindport' ] = cmd_opts . bindport if cmd_opts . bindaddr is not None : conf [ 'bindaddr' ] = cmd_opts . bindaddr if cmd_opts . baseurl is not None : conf [ 'baseurl' ] = cmd_opts . baseurl if cmd_opts . devel : from bottle import debug debug ( True ) app = SessionMiddleware ( bottle . app ( ) , sessionOpts ) bottle . run ( app = app , host = conf [ 'bindaddr' ] , port = conf [ 'bindport' ] , reloader = cmd_opts . devel )
9,788
https://github.com/aouyar/healthgraph-api/blob/fc5135ab353ca1f05e8a70ec784ff921e686c072/samples/bottle/runkeeper_demo.py#L178-L204
[ "def", "restore_descriptor", "(", "self", ",", "table_name", ",", "columns", ",", "constraints", ",", "autoincrement_column", "=", "None", ")", ":", "# Fields", "fields", "=", "[", "]", "for", "column", "in", "columns", ":", "if", "column", ".", "name", "==", "autoincrement_column", ":", "continue", "field_type", "=", "self", ".", "restore_type", "(", "column", ".", "type", ")", "field", "=", "{", "'name'", ":", "column", ".", "name", ",", "'type'", ":", "field_type", "}", "if", "not", "column", ".", "nullable", ":", "field", "[", "'constraints'", "]", "=", "{", "'required'", ":", "True", "}", "fields", ".", "append", "(", "field", ")", "# Primary key", "pk", "=", "[", "]", "for", "constraint", "in", "constraints", ":", "if", "isinstance", "(", "constraint", ",", "sa", ".", "PrimaryKeyConstraint", ")", ":", "for", "column", "in", "constraint", ".", "columns", ":", "if", "column", ".", "name", "==", "autoincrement_column", ":", "continue", "pk", ".", "append", "(", "column", ".", "name", ")", "# Foreign keys", "fks", "=", "[", "]", "if", "self", ".", "__dialect", "==", "'postgresql'", ":", "for", "constraint", "in", "constraints", ":", "if", "isinstance", "(", "constraint", ",", "sa", ".", "ForeignKeyConstraint", ")", ":", "resource", "=", "''", "own_fields", "=", "[", "]", "foreign_fields", "=", "[", "]", "for", "element", "in", "constraint", ".", "elements", ":", "own_fields", ".", "append", "(", "element", ".", "parent", ".", "name", ")", "if", "element", ".", "column", ".", "table", ".", "name", "!=", "table_name", ":", "resource", "=", "self", ".", "restore_bucket", "(", "element", ".", "column", ".", "table", ".", "name", ")", "foreign_fields", ".", "append", "(", "element", ".", "column", ".", "name", ")", "if", "len", "(", "own_fields", ")", "==", "len", "(", "foreign_fields", ")", "==", "1", ":", "own_fields", "=", "own_fields", ".", "pop", "(", ")", "foreign_fields", "=", "foreign_fields", ".", "pop", "(", ")", "fks", ".", "append", "(", "{", "'fields'", ":", "own_fields", ",", "'reference'", ":", "{", "'resource'", ":", "resource", ",", "'fields'", ":", "foreign_fields", "}", ",", "}", ")", "# Desscriptor", "descriptor", "=", "{", "}", "descriptor", "[", "'fields'", "]", "=", "fields", "if", "len", "(", "pk", ")", ">", "0", ":", "if", "len", "(", "pk", ")", "==", "1", ":", "pk", "=", "pk", ".", "pop", "(", ")", "descriptor", "[", "'primaryKey'", "]", "=", "pk", "if", "len", "(", "fks", ")", ">", "0", ":", "descriptor", "[", "'foreignKeys'", "]", "=", "fks", "return", "descriptor" ]
Return the hash of the movie depending on the input string .
def get_hash ( input_string ) : # Check if the input looks like a link to a movie: if os . path . islink ( input_string ) : directory , movie_hash = os . path . split ( os . readlink ( input_string ) ) input_string = movie_hash return input_string . lower ( )
9,789
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/helpers.py#L8-L20
[ "def", "read", "(", "self", ")", ":", "line", "=", "self", ".", "trace_file", ".", "readline", "(", ")", "if", "line", "==", "''", ":", "if", "self", ".", "loop", ":", "self", ".", "_reopen_file", "(", ")", "else", ":", "self", ".", "trace_file", ".", "close", "(", ")", "self", ".", "trace_file", "=", "None", "raise", "DataSourceError", "(", ")", "message", "=", "JsonFormatter", ".", "deserialize", "(", "line", ")", "timestamp", "=", "message", ".", "get", "(", "'timestamp'", ",", "None", ")", "if", "self", ".", "realtime", "and", "timestamp", "is", "not", "None", ":", "self", ".", "_store_timestamp", "(", "timestamp", ")", "self", ".", "_wait", "(", "self", ".", "starting_time", ",", "self", ".", "first_timestamp", ",", "timestamp", ")", "return", "line", "+", "\"\\x00\"" ]
Get data associated with provided key .
def get ( self , key ) : return self . _object_class ( json . loads ( self . _db [ key ] ) )
9,790
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/helpers.py#L35-L38
[ "async", "def", "prepare_decrypter", "(", "client", ",", "cdn_client", ",", "cdn_redirect", ")", ":", "cdn_aes", "=", "AESModeCTR", "(", "key", "=", "cdn_redirect", ".", "encryption_key", ",", "# 12 first bytes of the IV..4 bytes of the offset (0, big endian)", "iv", "=", "cdn_redirect", ".", "encryption_iv", "[", ":", "12", "]", "+", "bytes", "(", "4", ")", ")", "# We assume that cdn_redirect.cdn_file_hashes are ordered by offset,", "# and that there will be enough of these to retrieve the whole file.", "decrypter", "=", "CdnDecrypter", "(", "cdn_client", ",", "cdn_redirect", ".", "file_token", ",", "cdn_aes", ",", "cdn_redirect", ".", "cdn_file_hashes", ")", "cdn_file", "=", "await", "cdn_client", "(", "GetCdnFileRequest", "(", "file_token", "=", "cdn_redirect", ".", "file_token", ",", "offset", "=", "cdn_redirect", ".", "cdn_file_hashes", "[", "0", "]", ".", "offset", ",", "limit", "=", "cdn_redirect", ".", "cdn_file_hashes", "[", "0", "]", ".", "limit", ")", ")", "if", "isinstance", "(", "cdn_file", ",", "CdnFileReuploadNeeded", ")", ":", "# We need to use the original client here", "await", "client", "(", "ReuploadCdnFileRequest", "(", "file_token", "=", "cdn_redirect", ".", "file_token", ",", "request_token", "=", "cdn_file", ".", "request_token", ")", ")", "# We want to always return a valid upload.CdnFile", "cdn_file", "=", "decrypter", ".", "get_file", "(", ")", "else", ":", "cdn_file", ".", "bytes", "=", "decrypter", ".", "cdn_aes", ".", "encrypt", "(", "cdn_file", ".", "bytes", ")", "cdn_hash", "=", "decrypter", ".", "cdn_file_hashes", ".", "pop", "(", "0", ")", "decrypter", ".", "check", "(", "cdn_file", ".", "bytes", ",", "cdn_hash", ")", "return", "decrypter", ",", "cdn_file" ]
Save data associated with key .
def save ( self , key , data ) : self . _db [ key ] = json . dumps ( data ) self . _db . sync ( )
9,791
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/helpers.py#L45-L49
[ "def", "get_unreconstructed_slabs", "(", "self", ")", ":", "slabs", "=", "[", "]", "for", "slab", "in", "SlabGenerator", "(", "*", "*", "self", ".", "slabgen_params", ")", ".", "get_slabs", "(", ")", ":", "slab", ".", "make_supercell", "(", "self", ".", "trans_matrix", ")", "slabs", ".", "append", "(", "slab", ")", "return", "slabs" ]
Get the global meta data which will be stored in a tree structure
def global_meta_data ( self ) : with switch_db ( MetaDataModel , 'hyperstream' ) : return sorted ( map ( lambda x : x . to_dict ( ) , MetaDataModel . objects ) , key = lambda x : len ( x [ 'identifier' ] . split ( '.' ) ) , reverse = True )
9,792
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/meta_data/meta_data_manager.py#L56-L65
[ "def", "login", "(", "self", ")", ":", "access_token", "=", "self", ".", "_get_access_token", "(", ")", "try", ":", "super", "(", "IAMSession", ",", "self", ")", ".", "request", "(", "'POST'", ",", "self", ".", "_session_url", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ",", "data", "=", "json", ".", "dumps", "(", "{", "'access_token'", ":", "access_token", "}", ")", ")", ".", "raise_for_status", "(", ")", "except", "RequestException", ":", "raise", "CloudantException", "(", "'Failed to exchange IAM token with Cloudant'", ")" ]
Insert the given meta data into the database
def insert ( self , tag , identifier , parent , data ) : # First try to add it into the tree if self . global_plate_definitions . contains ( identifier ) : raise KeyError ( "Identifier {} already exists in tree" . format ( identifier ) ) self . global_plate_definitions . create_node ( tag = tag , identifier = identifier , parent = parent , data = data ) # Now try to add it into the database with switch_db ( MetaDataModel , 'hyperstream' ) : meta_data = MetaDataModel ( tag = tag , parent = parent , data = data ) meta_data . save ( ) logging . info ( "Meta data {} inserted" . format ( identifier ) )
9,793
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/meta_data/meta_data_manager.py#L76-L97
[ "def", "get_expressions", "(", ")", ":", "if", "len", "(", "_EXPRESSIONS", ")", "==", "len", "(", "FILENAME_PATTERNS", ")", ":", "return", "_EXPRESSIONS", "for", "cpattern", "in", "FILENAME_PATTERNS", ":", "_EXPRESSIONS", ".", "append", "(", "re", ".", "compile", "(", "cpattern", ",", "re", ".", "VERBOSE", ")", ")", "return", "_EXPRESSIONS" ]
Delete the meta data with the given identifier from the database
def delete ( self , identifier ) : try : node = self . global_plate_definitions [ identifier ] except NodeIDAbsentError : logging . info ( "Meta data {} not present during deletion" . format ( identifier ) ) return # First delete any children of the node: REMOVED as this seemed to be unreliable # It's now better to call delete_plate with delete_meta_data=True # for child in node.fpointer: # self.delete(child) self . global_plate_definitions . remove_node ( identifier ) with switch_db ( MetaDataModel , 'hyperstream' ) : meta_data = MetaDataModel . objects ( tag = node . tag , data = node . data , parent = node . bpointer ) . first ( ) if meta_data is not None : meta_data . delete ( ) logging . info ( "Meta data {} deleted" . format ( identifier ) )
9,794
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/meta_data/meta_data_manager.py#L99-L125
[ "def", "smoothed_path", "(", "path", ",", "maxjointsize", "=", "3", ",", "tightness", "=", "1.99", ",", "ignore_unfixable_kinks", "=", "False", ")", ":", "if", "len", "(", "path", ")", "==", "1", ":", "return", "path", "assert", "path", ".", "iscontinuous", "(", ")", "sharp_kinks", "=", "[", "]", "new_path", "=", "[", "path", "[", "0", "]", "]", "for", "idx", "in", "range", "(", "len", "(", "path", ")", ")", ":", "if", "idx", "==", "len", "(", "path", ")", "-", "1", ":", "if", "not", "path", ".", "isclosed", "(", ")", ":", "continue", "else", ":", "seg1", "=", "new_path", "[", "0", "]", "else", ":", "seg1", "=", "path", "[", "idx", "+", "1", "]", "seg0", "=", "new_path", "[", "-", "1", "]", "try", ":", "unit_tangent0", "=", "seg0", ".", "unit_tangent", "(", "1", ")", "unit_tangent1", "=", "seg1", ".", "unit_tangent", "(", "0", ")", "flag", "=", "False", "except", "ValueError", ":", "flag", "=", "True", "# unit tangent not well-defined", "if", "not", "flag", "and", "isclose", "(", "unit_tangent0", ",", "unit_tangent1", ")", ":", "# joint is already smooth", "if", "idx", "!=", "len", "(", "path", ")", "-", "1", ":", "new_path", ".", "append", "(", "seg1", ")", "continue", "else", ":", "kink_idx", "=", "(", "idx", "+", "1", ")", "%", "len", "(", "path", ")", "# kink at start of this seg", "if", "not", "flag", "and", "isclose", "(", "-", "unit_tangent0", ",", "unit_tangent1", ")", ":", "# joint is sharp 180 deg (must be fixed manually)", "new_path", ".", "append", "(", "seg1", ")", "sharp_kinks", ".", "append", "(", "kink_idx", ")", "else", ":", "# joint is not smooth, let's smooth it.", "args", "=", "(", "seg0", ",", "seg1", ",", "maxjointsize", ",", "tightness", ")", "new_seg0", ",", "elbow_segs", ",", "new_seg1", "=", "smoothed_joint", "(", "*", "args", ")", "new_path", "[", "-", "1", "]", "=", "new_seg0", "new_path", "+=", "elbow_segs", "if", "idx", "==", "len", "(", "path", ")", "-", "1", ":", "new_path", "[", "0", "]", "=", "new_seg1", "else", ":", "new_path", ".", "append", "(", "new_seg1", ")", "# If unfixable kinks were found, let the user know", "if", "sharp_kinks", "and", "not", "ignore_unfixable_kinks", ":", "_report_unfixable_kinks", "(", "path", ",", "sharp_kinks", ")", "return", "Path", "(", "*", "new_path", ")" ]
Loads this stream by calling River View for data .
def load ( self ) : print "Loading data for %s..." % self . getName ( ) self . _dataHandle = self . _stream . data ( since = self . _since , until = self . _until , limit = self . _limit , aggregate = self . _aggregate ) self . _data = self . _dataHandle . data ( ) self . _headers = self . _dataHandle . headers ( ) print "Loaded %i rows." % len ( self )
9,795
https://github.com/htm-community/menorah/blob/1991b01eda3f6361b22ed165b4a688ae3fb2deaf/menorah/riverstream.py#L80-L91
[ "def", "bind_texture", "(", "texture", ")", ":", "if", "not", "getattr", "(", "texture", ",", "'image'", ",", "None", ")", ":", "texture", ".", "image", "=", "load_image", "(", "texture", ".", "path", ")", "glEnable", "(", "texture", ".", "image", ".", "target", ")", "glBindTexture", "(", "texture", ".", "image", ".", "target", ",", "texture", ".", "image", ".", "id", ")", "gl", ".", "glTexParameterf", "(", "texture", ".", "image", ".", "target", ",", "gl", ".", "GL_TEXTURE_WRAP_S", ",", "gl", ".", "GL_CLAMP_TO_EDGE", ")", "gl", ".", "glTexParameterf", "(", "texture", ".", "image", ".", "target", ",", "gl", ".", "GL_TEXTURE_WRAP_T", ",", "gl", ".", "GL_CLAMP_TO_EDGE", ")" ]
Seaborn - compatible hexbin plot .
def hexbin ( x , y , color = "purple" , * * kwargs ) : if HAS_SEABORN : cmap = sns . light_palette ( color , as_cmap = True ) else : cmap = "Purples" plt . hexbin ( x , y , cmap = cmap , * * kwargs )
9,796
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L33-L42
[ "def", "shutdown", "(", "self", ")", ":", "vm", "=", "self", ".", "get_vm_failfast", "(", "self", ".", "config", "[", "'name'", "]", ")", "if", "vm", ".", "runtime", ".", "powerState", "==", "vim", ".", "VirtualMachinePowerState", ".", "poweredOff", ":", "print", "(", "\"%s already poweredOff\"", "%", "vm", ".", "name", ")", "else", ":", "if", "self", ".", "guestToolsRunning", "(", "vm", ")", ":", "timeout_minutes", "=", "10", "print", "(", "\"waiting for %s to shutdown \"", "\"(%s minutes before forced powerOff)\"", "%", "(", "vm", ".", "name", ",", "str", "(", "timeout_minutes", ")", ")", ")", "vm", ".", "ShutdownGuest", "(", ")", "if", "self", ".", "WaitForVirtualMachineShutdown", "(", "vm", ",", "timeout_minutes", "*", "60", ")", ":", "print", "(", "\"shutdown complete\"", ")", "print", "(", "\"%s poweredOff\"", "%", "vm", ".", "name", ")", "else", ":", "print", "(", "\"%s has not shutdown after %s minutes:\"", "\"will powerOff\"", "%", "(", "vm", ".", "name", ",", "str", "(", "timeout_minutes", ")", ")", ")", "self", ".", "powerOff", "(", ")", "else", ":", "print", "(", "\"GuestTools not running or not installed: will powerOff\"", ")", "self", ".", "powerOff", "(", ")" ]
Plot the diagonal .
def diag ( ax = None , linecolor = '0.0' , linestyle = '--' , * * kwargs ) : ax = get_ax ( ax ) xy_min = np . min ( ( ax . get_xlim ( ) , ax . get_ylim ( ) ) ) xy_max = np . max ( ( ax . get_ylim ( ) , ax . get_xlim ( ) ) ) return ax . plot ( [ xy_min , xy_max ] , [ xy_min , xy_max ] , ls = linestyle , c = linecolor , * * kwargs )
9,797
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L52-L60
[ "def", "set_temperature", "(", "self", ",", "zone", ",", "temperature", ",", "until", "=", "None", ")", ":", "if", "until", "is", "None", ":", "data", "=", "{", "\"Value\"", ":", "temperature", ",", "\"Status\"", ":", "\"Hold\"", ",", "\"NextTime\"", ":", "None", "}", "else", ":", "data", "=", "{", "\"Value\"", ":", "temperature", ",", "\"Status\"", ":", "\"Temporary\"", ",", "\"NextTime\"", ":", "until", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")", "}", "self", ".", "_set_heat_setpoint", "(", "zone", ",", "data", ")" ]
Make a meshgrid inferred from data .
def automeshgrid ( x , y , step = 0.02 , xstep = None , ystep = None , pad = 0.5 , xpad = None , ypad = None ) : if xpad is None : xpad = pad if xstep is None : xstep = step if ypad is None : ypad = pad if ystep is None : ystep = step xmin = x . min ( ) - xpad xmax = x . max ( ) + xpad ymin = y . min ( ) - ypad ymax = y . max ( ) + ypad return meshgrid ( xmin , xmax , step , ymin , ymax , ystep )
9,798
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L63-L79
[ "def", "_init_libcrypto", "(", ")", ":", "libcrypto", "=", "_load_libcrypto", "(", ")", "try", ":", "libcrypto", ".", "OPENSSL_init_crypto", "(", ")", "except", "AttributeError", ":", "# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)", "libcrypto", ".", "OPENSSL_no_config", "(", ")", "libcrypto", ".", "OPENSSL_add_all_algorithms_noconf", "(", ")", "libcrypto", ".", "RSA_new", ".", "argtypes", "=", "(", ")", "libcrypto", ".", "RSA_new", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "RSA_size", ".", "argtype", "=", "(", "c_void_p", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "argtypes", "=", "(", "c_char_p", ",", "c_int", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "restype", "=", "c_void_p", "libcrypto", ".", "BIO_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "restype", "=", "c_void_p", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_private_encrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "libcrypto", ".", "RSA_public_decrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "return", "libcrypto" ]
Plot a histogram with counts binlims already given .
def prebinned_hist ( counts , binlims , ax = None , * args , * * kwargs ) : ax = get_ax ( ax ) x = bincenters ( binlims ) weights = counts return ax . hist ( x , bins = binlims , weights = weights , * args , * * kwargs )
9,799
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L96-L108
[ "def", "make_token_post", "(", "server", ",", "data", ")", ":", "try", ":", "response", "=", "requests", ".", "post", "(", "server", "+", "TOKEN_ENDPOINT", ",", "data", "=", "data", ",", "timeout", "=", "TIMEOUT", ")", "body", "=", "response", ".", "json", "(", ")", "except", "Exception", "as", "e", ":", "log", ".", "warning", "(", "'Other error when exchanging code'", ",", "exc_info", "=", "True", ")", "raise", "OAuthException", "(", "error", "=", "'Authentication Failed'", ",", "error_description", "=", "str", "(", "e", ")", ")", "if", "'error'", "in", "body", ":", "log", ".", "error", "(", "body", ")", "raise", "OAuthException", "(", "error", "=", "body", ".", "get", "(", "'error'", ",", "'Unknown Error'", ")", ",", "error_description", "=", "body", ".", "get", "(", "'error_description'", ",", "''", ")", ")", "return", "body" ]