query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Names all the tree nodes that are not named or have non - unique names with unique names .
def name_tree ( tree ) : existing_names = Counter ( ( _ . name for _ in tree . traverse ( ) if _ . name ) ) if sum ( 1 for _ in tree . traverse ( ) ) == len ( existing_names ) : return i = 0 existing_names = Counter ( ) for node in tree . traverse ( 'preorder' ) : name = node . name if node . is_leaf ( ) else ( 'root' if node . is_root ( ) else None ) while name is None or name in existing_names : name = '{}{}' . format ( 't' if node . is_leaf ( ) else 'n' , i ) i += 1 node . name = name existing_names [ name ] += 1
8,500
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/tree.py#L46-L66
[ "def", "decompress_messages", "(", "self", ",", "partitions_offmsgs", ")", ":", "for", "pomsg", "in", "partitions_offmsgs", ":", "if", "pomsg", "[", "'message'", "]", ":", "pomsg", "[", "'message'", "]", "=", "self", ".", "decompress_fun", "(", "pomsg", "[", "'message'", "]", ")", "yield", "pomsg" ]
Decorator that tries to use the object provided using a kwarg called electrode_transformator to transform the return values of an import function . It is intended to be used to transform electrode numbers and locations i . e . for use in roll - along - measurement schemes .
def enable_result_transforms ( func ) : @ functools . wraps ( func ) def wrapper ( * args , * * kwargs ) : func_transformator = kwargs . pop ( 'electrode_transformator' , None ) data , electrodes , topography = func ( * args , * * kwargs ) if func_transformator is not None : data_transformed , electrodes_transformed , topography_transformed = func_transformator . transform ( data , electrodes , topography ) return data_transformed , electrodes_transformed , topography_transformed else : return data , electrodes , topography return wrapper
8,501
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/utils/decorators.py#L4-L28
[ "def", "acquire_writer", "(", "self", ")", ":", "with", "self", ".", "mutex", ":", "while", "self", ".", "rwlock", "!=", "0", ":", "self", ".", "_writer_wait", "(", ")", "self", ".", "rwlock", "=", "-", "1" ]
If recording is not enabled return None as record path .
def record_path ( self ) : if self . record_button . get_property ( 'active' ) and ( self . record_path_selector . selected_path ) : return self . record_path_selector . selected_path else : return None
8,502
https://github.com/cfobel/webcam-recorder/blob/ffeb57c9044033fbea6372b3e642b83fd42dea87/webcam_recorder/video_view.py#L196-L204
[ "def", "unbind", "(", "self", ")", ":", "# Unbind the FBO", "if", "self", ".", "texture", ".", "mipmap", ":", "with", "self", ".", "texture", ":", "self", ".", "texture", ".", "generate_mipmap", "(", ")", "gl", ".", "glBindFramebufferEXT", "(", "gl", ".", "GL_FRAMEBUFFER_EXT", ",", "0", ")", "# Restore the old viewport size", "gl", ".", "glViewport", "(", "*", "self", ".", "_old_viewport", ")" ]
Write a valid crmod configuration file to filename .
def _write_crmod_file ( filename ) : crmod_lines = [ '***FILES***' , '../grid/elem.dat' , '../grid/elec.dat' , '../rho/rho.dat' , '../config/config.dat' , 'F ! potentials ?' , '../mod/pot/pot.dat' , 'T ! measurements ?' , '../mod/volt.dat' , 'F ! sensitivities ?' , '../mod/sens/sens.dat' , 'F ! another dataset ?' , '1 ! 2D (=0) or 2.5D (=1)' , 'F ! fictitious sink ?' , '1660 ! fictitious sink node number' , 'F ! boundary values ?' , 'boundary.dat' , ] with open ( filename , 'w' ) as fid : [ fid . write ( line + '\n' ) for line in crmod_lines ]
8,503
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/geom_fac_crtomo.py#L39-L65
[ "def", "alloc_data", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "binary_type", ")", ":", "return", "self", ".", "_alloc_data", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "return", "self", ".", "_alloc_data", "(", "value", ".", "encode", "(", "'utf-8'", ")", "+", "b'\\0'", ")", "else", ":", "raise", "TypeError", "(", "'No idea how to encode %s'", "%", "repr", "(", "value", ")", ")" ]
Shortcut funnction for encoding given text with utf - 8
def utf ( text ) : try : output = unicode ( text , encoding = 'utf-8' ) except UnicodeDecodeError : output = text except TypeError : output = text return output
8,504
https://github.com/kaustavdm/pyAvroPhonetic/blob/26b7d567d8db025f2cac4de817e716390d7ac337/pyavrophonetic/utils/__init__.py#L26-L34
[ "def", "parse_dict", "(", "value", ")", ":", "lines", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "value", ".", "strip", "(", ")", ".", "splitlines", "(", ")", "]", "pairs", "=", "[", "line", ".", "split", "(", "':'", ",", "1", ")", "for", "line", "in", "lines", "if", "line", "]", "return", "dict", "(", "(", "k", ".", "strip", "(", ")", ",", "v", ".", "strip", "(", ")", ")", "for", "k", ",", "v", "in", "pairs", ")" ]
Determines file codec from from its BOM record .
def check_bom ( file ) : # try to read first three bytes lead = file . read ( 3 ) if len ( lead ) == 3 and lead == codecs . BOM_UTF8 : # UTF-8, position is already OK, use canonical name return codecs . lookup ( 'utf-8' ) . name elif len ( lead ) >= 2 and lead [ : 2 ] == codecs . BOM_UTF16_BE : # need to backup one character if len ( lead ) == 3 : file . seek ( - 1 , os . SEEK_CUR ) return codecs . lookup ( 'utf-16-be' ) . name elif len ( lead ) >= 2 and lead [ : 2 ] == codecs . BOM_UTF16_LE : # need to backup one character if len ( lead ) == 3 : file . seek ( - 1 , os . SEEK_CUR ) return codecs . lookup ( 'utf-16-le' ) . name else : # no BOM, rewind file . seek ( - len ( lead ) , os . SEEK_CUR ) return None
8,505
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/io.py#L10-L37
[ "def", "remote_port_uneq_store", "(", "self", ",", "remote_port", ")", ":", "if", "remote_port", "!=", "self", ".", "remote_port", ":", "self", ".", "remote_port", "=", "remote_port", "return", "True", "return", "False" ]
Guess current line number in a file .
def guess_lineno ( file ) : offset = file . tell ( ) file . seek ( 0 ) startpos = 0 lineno = 1 # looks like file.read() return bytes in python3 # so I need more complicated algorithm here while True : line = file . readline ( ) if not line : break endpos = file . tell ( ) if startpos <= offset < endpos : break lineno += 1 file . seek ( offset ) return lineno
8,506
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/io.py#L40-L62
[ "def", "remove_group_role", "(", "request", ",", "role", ",", "group", ",", "domain", "=", "None", ",", "project", "=", "None", ")", ":", "manager", "=", "keystoneclient", "(", "request", ",", "admin", "=", "True", ")", ".", "roles", "return", "manager", ".", "revoke", "(", "role", "=", "role", ",", "group", "=", "group", ",", "project", "=", "project", ",", "domain", "=", "domain", ")" ]
Search Penn Libraries Franklin for documents The maximum pagesize currently is 50 .
def search ( query ) : params = { 's.cmd' : 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query } return requests . get ( BASE_URL , params = params , timeout = 10 ) . json ( )
8,507
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/libraries.py#L7-L14
[ "def", "get_experiment_time", "(", "port", ")", ":", "response", "=", "rest_get", "(", "experiment_url", "(", "port", ")", ",", "REST_TIME_OUT", ")", "if", "response", "and", "check_response", "(", "response", ")", ":", "content", "=", "convert_time_stamp_to_date", "(", "json", ".", "loads", "(", "response", ".", "text", ")", ")", "return", "content", ".", "get", "(", "'startTime'", ")", ",", "content", ".", "get", "(", "'endTime'", ")", "return", "None", ",", "None" ]
Create Record instance based on parameters .
def make_record ( level , xref_id , tag , value , sub_records , offset , dialect , parser = None ) : # value can be bytes or string so we check for both, 64 is code for '@' if value and len ( value ) > 2 and ( ( value [ 0 ] == '@' and value [ - 1 ] == '@' ) or ( value [ 0 ] == 64 and value [ - 1 ] == 64 ) ) : # this looks like a <pointer>, make a Pointer record klass = Pointer rec = klass ( parser ) else : klass = _tag_class . get ( tag , Record ) rec = klass ( ) rec . level = level rec . xref_id = xref_id rec . tag = tag rec . value = value rec . sub_records = sub_records rec . offset = offset rec . dialect = dialect return rec
8,508
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L413-L450
[ "def", "_abort_all_transfers", "(", "self", ",", "exception", ")", ":", "pending_reads", "=", "len", "(", "self", ".", "_commands_to_read", ")", "# invalidate _transfer_list", "for", "transfer", "in", "self", ".", "_transfer_list", ":", "transfer", ".", "add_error", "(", "exception", ")", "# clear all deferred buffers", "self", ".", "_init_deferred_buffers", "(", ")", "# finish all pending reads and ignore the data", "# Only do this if the error is a tranfer error.", "# Otherwise this could cause another exception", "if", "isinstance", "(", "exception", ",", "DAPAccessIntf", ".", "TransferError", ")", ":", "for", "_", "in", "range", "(", "pending_reads", ")", ":", "self", ".", "_interface", ".", "read", "(", ")" ]
Returns direct sub - record with given tag name or None .
def sub_tag ( self , path , follow = True ) : tags = path . split ( '/' ) rec = self for tag in tags : recs = [ x for x in ( rec . sub_records or [ ] ) if x . tag == tag ] if not recs : return None rec = recs [ 0 ] if follow and isinstance ( rec , Pointer ) : rec = rec . ref return rec
8,509
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L69-L95
[ "def", "start", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "is_running", "(", ")", ":", "self", ".", "websock_url", "=", "self", ".", "chrome", ".", "start", "(", "*", "*", "kwargs", ")", "self", ".", "websock", "=", "websocket", ".", "WebSocketApp", "(", "self", ".", "websock_url", ")", "self", ".", "websock_thread", "=", "WebsockReceiverThread", "(", "self", ".", "websock", ",", "name", "=", "'WebsockThread:%s'", "%", "self", ".", "chrome", ".", "port", ")", "self", ".", "websock_thread", ".", "start", "(", ")", "self", ".", "_wait_for", "(", "lambda", ":", "self", ".", "websock_thread", ".", "is_open", ",", "timeout", "=", "30", ")", "# tell browser to send us messages we're interested in", "self", ".", "send_to_chrome", "(", "method", "=", "'Network.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Page.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Console.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Runtime.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'ServiceWorker.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'ServiceWorker.setForceUpdateOnPageLoad'", ")", "# disable google analytics", "self", ".", "send_to_chrome", "(", "method", "=", "'Network.setBlockedURLs'", ",", "params", "=", "{", "'urls'", ":", "[", "'*google-analytics.com/analytics.js'", ",", "'*google-analytics.com/ga.js'", "]", "}", ")" ]
Returns value of a direct sub - record or None .
def sub_tag_value ( self , path , follow = True ) : rec = self . sub_tag ( path , follow ) if rec : return rec . value return None
8,510
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L97-L111
[ "def", "sync_hooks", "(", "user_id", ",", "repositories", ")", ":", "from", ".", "api", "import", "GitHubAPI", "try", ":", "# Sync hooks", "gh", "=", "GitHubAPI", "(", "user_id", "=", "user_id", ")", "for", "repo_id", "in", "repositories", ":", "try", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "gh", ".", "sync_repo_hook", "(", "repo_id", ")", "# We commit per repository, because while the task is running", "# the user might enable/disable a hook.", "db", ".", "session", ".", "commit", "(", ")", "except", "RepositoryAccessError", "as", "e", ":", "current_app", ".", "logger", ".", "warning", "(", "e", ".", "message", ",", "exc_info", "=", "True", ")", "except", "NoResultFound", ":", "pass", "# Repository not in DB yet", "except", "Exception", "as", "exc", ":", "sync_hooks", ".", "retry", "(", "exc", "=", "exc", ")" ]
Returns list of direct sub - records matching any tag name .
def sub_tags ( self , * tags , * * kw ) : records = [ x for x in self . sub_records if x . tag in tags ] if kw . get ( 'follow' , True ) : records = [ rec . ref if isinstance ( rec , Pointer ) else rec for rec in records ] return records
8,511
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L113-L128
[ "def", "login", "(", "config", ",", "api_key", "=", "\"\"", ")", ":", "if", "not", "api_key", ":", "info_out", "(", "\"If you don't have an API Key, go to:\\n\"", "\"https://bugzilla.mozilla.org/userprefs.cgi?tab=apikey\\n\"", ")", "api_key", "=", "getpass", ".", "getpass", "(", "\"API Key: \"", ")", "# Before we store it, let's test it.", "url", "=", "urllib", ".", "parse", ".", "urljoin", "(", "config", ".", "bugzilla_url", ",", "\"/rest/whoami\"", ")", "assert", "url", ".", "startswith", "(", "\"https://\"", ")", ",", "url", "response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "{", "\"api_key\"", ":", "api_key", "}", ")", "if", "response", ".", "status_code", "==", "200", ":", "if", "response", ".", "json", "(", ")", ".", "get", "(", "\"error\"", ")", ":", "error_out", "(", "\"Failed - {}\"", ".", "format", "(", "response", ".", "json", "(", ")", ")", ")", "else", ":", "update", "(", "config", ".", "configfile", ",", "{", "\"BUGZILLA\"", ":", "{", "\"bugzilla_url\"", ":", "config", ".", "bugzilla_url", ",", "\"api_key\"", ":", "api_key", ",", "# \"login\": login,", "}", "}", ",", ")", "success_out", "(", "\"Yay! It worked!\"", ")", "else", ":", "error_out", "(", "\"Failed - {} ({})\"", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "json", "(", ")", ")", ")" ]
Method called by parser when updates to this record finish .
def freeze ( self ) : # None is the same as empty string if self . value is None : self . value = "" if self . dialect in [ DIALECT_ALTREE ] : name_tuple = parse_name_altree ( self ) elif self . dialect in [ DIALECT_MYHERITAGE ] : name_tuple = parse_name_myher ( self ) elif self . dialect in [ DIALECT_ANCESTRIS ] : name_tuple = parse_name_ancestris ( self ) else : name_tuple = split_name ( self . value ) self . value = name_tuple return self
8,512
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L200-L217
[ "def", "permission_denied", "(", "request", ",", "template_name", "=", "None", ",", "extra_context", "=", "None", ")", ":", "if", "template_name", "is", "None", ":", "template_name", "=", "(", "'403.html'", ",", "'authority/403.html'", ")", "context", "=", "{", "'request_path'", ":", "request", ".", "path", ",", "}", "if", "extra_context", ":", "context", ".", "update", "(", "extra_context", ")", "return", "HttpResponseForbidden", "(", "loader", ".", "render_to_string", "(", "template_name", "=", "template_name", ",", "context", "=", "context", ",", "request", "=", "request", ",", ")", ")" ]
Given name could include both first and middle name
def given ( self ) : if self . _primary . value [ 0 ] and self . _primary . value [ 2 ] : return self . _primary . value [ 0 ] + ' ' + self . _primary . value [ 2 ] return self . _primary . value [ 0 ] or self . _primary . value [ 2 ]
8,513
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L268-L272
[ "def", "run", "(", "self", ")", ":", "self", ".", "run_plugins", "(", ")", "while", "True", ":", "# Reload plugins and config if either the config file or plugin", "# directory are modified.", "if", "self", ".", "_config_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_config_file_path", ")", "or", "self", ".", "_plugin_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_plugin_path", ")", ":", "self", ".", "thread_manager", ".", "kill_all_threads", "(", ")", "self", ".", "output_dict", ".", "clear", "(", ")", "self", ".", "reload", "(", ")", "self", ".", "run_plugins", "(", ")", "self", ".", "output_to_bar", "(", "json", ".", "dumps", "(", "self", ".", "_remove_empty_output", "(", ")", ")", ")", "time", ".", "sleep", "(", "self", ".", "config", ".", "general", "[", "'interval'", "]", ")" ]
Maiden last name can be None
def maiden ( self ) : if self . _dialect == DIALECT_DEFAULT : # for default/unknown dialect try "maiden" name record first for name in self . _names : if name . type == "maiden" : return name . value [ 1 ] # rely on NameRec extracting it from other source if self . _primary and len ( self . _primary . value ) > 3 : return self . _primary . value [ 3 ] return None
8,514
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L283-L293
[ "def", "tan_rand", "(", "q", ",", "seed", "=", "9", ")", ":", "# probably need a check in case we get a parallel vector", "rs", "=", "np", ".", "random", ".", "RandomState", "(", "seed", ")", "rvec", "=", "rs", ".", "rand", "(", "q", ".", "shape", "[", "0", "]", ")", "qd", "=", "np", ".", "cross", "(", "rvec", ",", "q", ")", "qd", "=", "qd", "/", "np", ".", "linalg", ".", "norm", "(", "qd", ")", "while", "np", ".", "dot", "(", "q", ",", "qd", ")", ">", "1e-6", ":", "rvec", "=", "rs", ".", "rand", "(", "q", ".", "shape", "[", "0", "]", ")", "qd", "=", "np", ".", "cross", "(", "rvec", ",", "q", ")", "qd", "=", "qd", "/", "np", ".", "linalg", ".", "norm", "(", "qd", ")", "return", "qd" ]
Returns name order key .
def order ( self , order ) : given = self . given surname = self . surname if order in ( ORDER_MAIDEN_GIVEN , ORDER_GIVEN_MAIDEN ) : surname = self . maiden or self . surname # We are collating empty names to come after non-empty, # so instead of empty we return "2" and add "1" as prefix to others given = ( "1" + given ) if given else "2" surname = ( "1" + surname ) if surname else "2" if order in ( ORDER_SURNAME_GIVEN , ORDER_MAIDEN_GIVEN ) : return ( surname , given ) elif order in ( ORDER_GIVEN_SURNAME , ORDER_GIVEN_MAIDEN ) : return ( given , surname ) else : raise ValueError ( "unexpected order: {}" . format ( order ) )
8,515
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L295-L321
[ "def", "connect", "(", "self", ",", "db_uri", ",", "debug", "=", "False", ")", ":", "kwargs", "=", "{", "'echo'", ":", "debug", ",", "'convert_unicode'", ":", "True", "}", "# connect to the SQL database", "if", "'mysql'", "in", "db_uri", ":", "kwargs", "[", "'pool_recycle'", "]", "=", "3600", "elif", "'://'", "not", "in", "db_uri", ":", "logger", ".", "debug", "(", "\"detected sqlite path URI: {}\"", ".", "format", "(", "db_uri", ")", ")", "db_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "db_uri", ")", ")", "db_uri", "=", "\"sqlite:///{}\"", ".", "format", "(", "db_path", ")", "self", ".", "engine", "=", "create_engine", "(", "db_uri", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "'connection established successfully'", ")", "# make sure the same engine is propagated to the BASE classes", "BASE", ".", "metadata", ".", "bind", "=", "self", ".", "engine", "# start a session", "self", ".", "session", "=", "scoped_session", "(", "sessionmaker", "(", "bind", "=", "self", ".", "engine", ")", ")", "# shortcut to query method", "self", ".", "query", "=", "self", ".", "session", ".", "query", "return", "self" ]
Format name for output .
def format ( self ) : name = self . _primary . value [ 0 ] if self . surname : if name : name += ' ' name += self . surname if self . _primary . value [ 2 ] : if name : name += ' ' name += self . _primary . value [ 2 ] return name
8,516
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L323-L337
[ "def", "delete_share", "(", "self", ",", "share_name", ",", "fail_not_exist", "=", "False", ",", "timeout", "=", "None", ",", "snapshot", "=", "None", ",", "delete_snapshots", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'DELETE'", "request", ".", "host_locations", "=", "self", ".", "_get_host_locations", "(", ")", "request", ".", "path", "=", "_get_path", "(", "share_name", ")", "request", ".", "headers", "=", "{", "'x-ms-delete-snapshots'", ":", "_to_str", "(", "delete_snapshots", ")", "}", "request", ".", "query", "=", "{", "'restype'", ":", "'share'", ",", "'timeout'", ":", "_int_to_str", "(", "timeout", ")", ",", "'sharesnapshot'", ":", "_to_str", "(", "snapshot", ")", ",", "}", "if", "not", "fail_not_exist", ":", "try", ":", "self", ".", "_perform_request", "(", "request", ",", "expected_errors", "=", "[", "_SHARE_NOT_FOUND_ERROR_CODE", "]", ")", "return", "True", "except", "AzureHttpError", "as", "ex", ":", "_dont_fail_not_exist", "(", "ex", ")", "return", "False", "else", ":", "self", ".", "_perform_request", "(", "request", ")", "return", "True" ]
Processes a transaction against this rule
def match ( self , xn ) : if all ( map ( lambda x : x . match ( xn ) , self . conditions ) ) : return self . outcomes return None
8,517
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/rule.py#L152-L160
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Import ALL data from the result files
def import_sip04_data_all ( data_filename ) : filename , fformat = os . path . splitext ( data_filename ) if fformat == '.csv' : print ( 'Import SIP04 data from .csv file' ) df_all = _import_csv_file ( data_filename ) elif fformat == '.mat' : print ( 'Import SIP04 data from .mat file' ) df_all = _import_mat_file ( data_filename ) else : print ( 'Please use .csv or .mat format.' ) df_all = None return df_all
8,518
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/sip04.py#L94-L121
[ "def", "string_sanitize", "(", "string", ",", "tab_width", "=", "8", ")", ":", "string", "=", "string", ".", "replace", "(", "'\\r'", ",", "''", ")", "lines", "=", "list", "(", ")", "for", "line", "in", "string", ".", "split", "(", "'\\n'", ")", ":", "tab_count", "=", "line", ".", "count", "(", "'\\t'", ")", "if", "tab_count", ">", "0", ":", "line_length", "=", "0", "new_line", "=", "list", "(", ")", "for", "i", ",", "chunk", "in", "enumerate", "(", "line", ".", "split", "(", "'\\t'", ")", ")", ":", "line_length", "+=", "len", "(", "chunk", ")", "new_line", ".", "append", "(", "chunk", ")", "if", "i", "<", "tab_count", ":", "next_tab_stop_in", "=", "tab_width", "-", "(", "line_length", "%", "tab_width", ")", "new_line", ".", "append", "(", "' '", "*", "next_tab_stop_in", ")", "line_length", "+=", "next_tab_stop_in", "lines", ".", "append", "(", "''", ".", "join", "(", "new_line", ")", ")", "else", ":", "lines", ".", "append", "(", "line", ")", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
A SQLAlchemy Session requires that an engine be initialized if one isn t provided .
def init_session ( db_url = None , echo = False , engine = None , settings = None ) : if engine is None : engine = init_engine ( db_url = db_url , echo = echo , settings = settings ) return sessionmaker ( bind = engine )
8,519
https://github.com/YosaiProject/yosai_alchemystore/blob/6479c159ab2ac357e6b70cdd71a2d673279e86bb/yosai_alchemystore/meta/meta.py#L58-L65
[ "def", "slurpChompedLines", "(", "file", ",", "expand", "=", "False", ")", ":", "f", "=", "_normalizeToFile", "(", "file", ",", "\"r\"", ",", "expand", ")", "try", ":", "return", "list", "(", "chompLines", "(", "f", ")", ")", "finally", ":", "f", ".", "close", "(", ")" ]
Radic SIP256c data import
def import_sip256c ( self , filename , settings = None , reciprocal = None , * * kwargs ) : if settings is None : settings = { } # we get not electrode positions (dummy1) and no topography data # (dummy2) df , dummy1 , dummy2 = reda_sip256c . parse_radic_file ( filename , settings , reciprocal = reciprocal , * * kwargs ) self . _add_to_container ( df ) print ( 'Summary:' ) self . _describe_data ( df )
8,520
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L72-L84
[ "def", "directory_opener", "(", "path", ",", "pattern", "=", "''", ",", "verbose", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "raise", "NotADirectoryError", "else", ":", "openers_list", "=", "[", "opener", "for", "opener", "in", "openers", "if", "not", "opener", ".", "__name__", ".", "startswith", "(", "'directory'", ")", "]", "# remove directory", "for", "root", ",", "dirlist", ",", "filelist", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "filename", "in", "filelist", ":", "if", "pattern", "and", "not", "re", ".", "match", "(", "pattern", ",", "filename", ")", ":", "logger", ".", "verbose", "(", "'Skipping file: {}, did not match regex pattern \"{}\"'", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ",", "pattern", ")", ")", "continue", "filename_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "for", "filehandle", "in", "filehandles", "(", "filename_path", ",", "openers_list", "=", "openers_list", ",", "pattern", "=", "pattern", ",", "verbose", "=", "verbose", ")", ":", "yield", "filehandle" ]
EIT data import for FZJ Medusa systems
def import_eit_fzj ( self , filename , configfile , correction_file = None , timestep = None , * * kwargs ) : # we get not electrode positions (dummy1) and no topography data # (dummy2) df_emd , dummy1 , dummy2 = eit_fzj . read_3p_data ( filename , configfile , * * kwargs ) if correction_file is not None : eit_fzj_utils . apply_correction_factors ( df_emd , correction_file ) if timestep is not None : df_emd [ 'timestep' ] = timestep self . _add_to_container ( df_emd ) print ( 'Summary:' ) self . _describe_data ( df_emd )
8,521
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L86-L105
[ "def", "acquire", "(", "self", ",", "*", "*", "kwargs", ")", ":", "token", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "attempted", "=", "False", "while", "self", ".", "token", "is", "None", ":", "try", ":", "self", ".", "client", ".", "test_and_set", "(", "self", ".", "key", ",", "token", ",", "\"0\"", ",", "ttl", "=", "self", ".", "ttl", ")", "self", ".", "token", "=", "token", "except", "etcd", ".", "EtcdKeyNotFound", ",", "e", ":", "try", ":", "self", ".", "client", ".", "write", "(", "self", ".", "key", ",", "token", ",", "prevExist", "=", "False", ",", "recursive", "=", "True", ",", "ttl", "=", "self", ".", "ttl", ")", "self", ".", "token", "=", "token", "except", "etcd", ".", "EtcdAlreadyExist", ",", "e", ":", "pass", "# someone created the right before us", "except", "ValueError", ",", "e", ":", "# someone else has the lock", "if", "'timeout'", "in", "kwargs", "or", "self", ".", "timeout", "is", "not", "None", ":", "if", "attempted", "is", "True", ":", "return", "False", "kwargs", ".", "setdefault", "(", "\"timeout\"", ",", "self", ".", "timeout", ")", "try", ":", "self", ".", "client", ".", "read", "(", "self", ".", "key", ",", "wait", "=", "True", ",", "timeout", "=", "kwargs", "[", "\"timeout\"", "]", ")", "attempted", "=", "True", "except", "etcd", ".", "EtcdException", ",", "e", ":", "return", "False", "else", ":", "self", ".", "client", ".", "watch", "(", "self", ".", "key", ")", "if", "self", ".", "renewSecondsPrior", "is", "not", "None", ":", "timer_ttl", "=", "self", ".", "ttl", "-", "self", ".", "renewSecondsPrior", "if", "timer_ttl", ">", "0", ":", "def", "renew", "(", ")", ":", "if", "self", ".", "renew", "(", ")", ":", "Timer", "(", "timer_ttl", ",", "renew", ")", ".", "start", "(", ")", "Timer", "(", "timer_ttl", ",", "renew", ")", ".", "start", "(", ")", "else", ":", "def", "cleanup", "(", ")", ":", "if", "self", ".", "token", "is", "token", ":", "self", ".", "token", "=", "None", "Timer", "(", "self", ".", "ttl", ",", "cleanup", ")", ".", "start", "(", ")", "return", "True" ]
Check the given dataframe for the required columns
def check_dataframe ( self , dataframe ) : required_columns = ( 'a' , 'b' , 'm' , 'n' , 'r' , ) for column in required_columns : if column not in dataframe : raise Exception ( 'Required column not in dataframe: {0}' . format ( column ) )
8,522
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L118-L132
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
State what you want to keep
def query ( self , query , inplace = True ) : # TODO: add to queue result = self . data . query ( query , inplace = inplace ) return result
8,523
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L163-L169
[ "def", "read_fault_geometry", "(", "self", ",", "geo_dict", ",", "mesh_spacing", "=", "1.0", ")", ":", "if", "geo_dict", "[", "'Fault_Typology'", "]", "==", "'Simple'", ":", "# Simple fault geometry", "raw_trace", "=", "geo_dict", "[", "'Fault_Trace'", "]", "trace", "=", "Line", "(", "[", "Point", "(", "raw_trace", "[", "ival", "]", ",", "raw_trace", "[", "ival", "+", "1", "]", ")", "for", "ival", "in", "range", "(", "0", ",", "len", "(", "raw_trace", ")", ",", "2", ")", "]", ")", "geometry", "=", "SimpleFaultGeometry", "(", "trace", ",", "geo_dict", "[", "'Dip'", "]", ",", "geo_dict", "[", "'Upper_Depth'", "]", ",", "geo_dict", "[", "'Lower_Depth'", "]", ",", "mesh_spacing", ")", "elif", "geo_dict", "[", "'Fault_Typology'", "]", "==", "'Complex'", ":", "# Complex Fault Typology", "trace", "=", "[", "]", "for", "raw_trace", "in", "geo_dict", "[", "'Fault_Trace'", "]", ":", "fault_edge", "=", "Line", "(", "[", "Point", "(", "raw_trace", "[", "ival", "]", ",", "raw_trace", "[", "ival", "+", "1", "]", ",", "raw_trace", "[", "ival", "+", "2", "]", ")", "for", "ival", "in", "range", "(", "0", ",", "len", "(", "raw_trace", ")", ",", "3", ")", "]", ")", "trace", ".", "append", "(", "fault_edge", ")", "geometry", "=", "ComplexFaultGeometry", "(", "trace", ",", "mesh_spacing", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognised or unsupported fault geometry!'", ")", "return", "geometry" ]
Remove frequencies from the dataset
def remove_frequencies ( self , fmin , fmax ) : self . data . query ( 'frequency > {0} and frequency < {1}' . format ( fmin , fmax ) , inplace = True ) g = self . data . groupby ( 'frequency' ) print ( 'Remaining frequencies:' ) print ( sorted ( g . groups . keys ( ) ) )
8,524
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L196-L205
[ "def", "load_plugins", "(", "self", ",", "plugin_class_name", ")", ":", "# imp.findmodule('atomic_reactor') doesn't work", "plugins_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'plugins'", ")", "logger", ".", "debug", "(", "\"loading plugins from dir '%s'\"", ",", "plugins_dir", ")", "files", "=", "[", "os", ".", "path", ".", "join", "(", "plugins_dir", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "plugins_dir", ")", "if", "f", ".", "endswith", "(", "\".py\"", ")", "]", "if", "self", ".", "plugin_files", ":", "logger", ".", "debug", "(", "\"loading additional plugins from files '%s'\"", ",", "self", ".", "plugin_files", ")", "files", "+=", "self", ".", "plugin_files", "plugin_class", "=", "globals", "(", ")", "[", "plugin_class_name", "]", "plugin_classes", "=", "{", "}", "for", "f", "in", "files", ":", "module_name", "=", "os", ".", "path", ".", "basename", "(", "f", ")", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "# Do not reload plugins", "if", "module_name", "in", "sys", ".", "modules", ":", "f_module", "=", "sys", ".", "modules", "[", "module_name", "]", "else", ":", "try", ":", "logger", ".", "debug", "(", "\"load file '%s'\"", ",", "f", ")", "f_module", "=", "imp", ".", "load_source", "(", "module_name", ",", "f", ")", "except", "(", "IOError", ",", "OSError", ",", "ImportError", ",", "SyntaxError", ")", "as", "ex", ":", "logger", ".", "warning", "(", "\"can't load module '%s': %r\"", ",", "f", ",", "ex", ")", "continue", "for", "name", "in", "dir", "(", "f_module", ")", ":", "binding", "=", "getattr", "(", "f_module", ",", "name", ",", "None", ")", "try", ":", "# if you try to compare binding and PostBuildPlugin, python won't match them", "# if you call this script directly b/c:", "# ! <class 'plugins.plugin_rpmqa.PostBuildRPMqaPlugin'> <= <class", "# '__main__.PostBuildPlugin'>", "# but", "# <class 'plugins.plugin_rpmqa.PostBuildRPMqaPlugin'> <= <class", "# 'atomic_reactor.plugin.PostBuildPlugin'>", "is_sub", "=", "issubclass", "(", "binding", ",", "plugin_class", ")", "except", "TypeError", ":", "is_sub", "=", "False", "if", "binding", "and", "is_sub", "and", "plugin_class", ".", "__name__", "!=", "binding", ".", "__name__", ":", "plugin_classes", "[", "binding", ".", "key", "]", "=", "binding", "return", "plugin_classes" ]
Assuming an equal electrode spacing compute the K - factor over a homogeneous half - space .
def compute_K_analytical ( self , spacing ) : assert isinstance ( spacing , Number ) K = geometric_factors . compute_K_analytical ( self . data , spacing ) self . data = geometric_factors . apply_K ( self . data , K ) fix_sign_with_K ( self . data )
8,525
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L207-L223
[ "def", "run", "(", "self", ")", ":", "states", "=", "open", "(", "self", ".", "states", ",", "'r'", ")", ".", "read", "(", ")", ".", "splitlines", "(", ")", "for", "state", "in", "states", ":", "url", "=", "self", ".", "build_url", "(", "state", ")", "log", "=", "\"Downloading State < {0} > from < {1} >\"", "logging", ".", "info", "(", "log", ".", "format", "(", "state", ",", "url", ")", ")", "tmp", "=", "self", ".", "download", "(", "self", ".", "output", ",", "url", ",", "self", ".", "overwrite", ")", "self", ".", "s3", ".", "store", "(", "self", ".", "extract", "(", "tmp", ",", "self", ".", "tmp2poi", "(", "tmp", ")", ")", ")" ]
Create a scatter plot for all diff pairs
def scatter_norrec ( self , filename = None , individual = False ) : # if not otherwise specified, use these column pairs: std_diff_labels = { 'r' : 'rdiff' , 'rpha' : 'rphadiff' , } diff_labels = std_diff_labels # check which columns are present in the data labels_to_use = { } for key , item in diff_labels . items ( ) : # only use if BOTH columns are present if key in self . data . columns and item in self . data . columns : labels_to_use [ key ] = item g_freq = self . data . groupby ( 'frequency' ) frequencies = list ( sorted ( g_freq . groups . keys ( ) ) ) if individual : figures = { } axes_all = { } else : Nx = len ( labels_to_use . keys ( ) ) Ny = len ( frequencies ) fig , axes = plt . subplots ( Ny , Nx , figsize = ( Nx * 2.5 , Ny * 2.5 ) ) for row , ( name , item ) in enumerate ( g_freq ) : if individual : fig , axes_row = plt . subplots ( 1 , 2 , figsize = ( 16 / 2.54 , 6 / 2.54 ) ) else : axes_row = axes [ row , : ] # loop over the various columns for col_nr , ( key , diff_column ) in enumerate ( sorted ( labels_to_use . items ( ) ) ) : indices = np . where ( ~ np . isnan ( item [ diff_column ] ) ) [ 0 ] ax = axes_row [ col_nr ] ax . scatter ( item [ key ] , item [ diff_column ] , ) ax . set_xlabel ( key ) ax . set_ylabel ( diff_column ) ax . set_title ( 'N: {}' . format ( len ( indices ) ) ) if individual : fig . tight_layout ( ) figures [ name ] = fig axes_all [ name ] = axes_row if individual : return figures , axes_all else : fig . tight_layout ( ) return fig , axes
8,526
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L230-L305
[ "def", "_start_vibration_win", "(", "self", ",", "left_motor", ",", "right_motor", ")", ":", "xinput_set_state", "=", "self", ".", "manager", ".", "xinput", ".", "XInputSetState", "xinput_set_state", ".", "argtypes", "=", "[", "ctypes", ".", "c_uint", ",", "ctypes", ".", "POINTER", "(", "XinputVibration", ")", "]", "xinput_set_state", ".", "restype", "=", "ctypes", ".", "c_uint", "vibration", "=", "XinputVibration", "(", "int", "(", "left_motor", "*", "65535", ")", ",", "int", "(", "right_motor", "*", "65535", ")", ")", "xinput_set_state", "(", "self", ".", "__device_number", ",", "ctypes", ".", "byref", "(", "vibration", ")", ")" ]
Return a spectrum and its reciprocal counter part if present in the dataset . Optimally refer to the spectrum by its normal - reciprocal id .
def get_spectrum ( self , nr_id = None , abmn = None , plot_filename = None ) : assert nr_id is None or abmn is None # determine nr_id for given abmn tuple if abmn is not None : subdata = self . data . query ( 'a == {} and b == {} and m == {} and n == {}' . format ( * abmn ) ) . sort_values ( 'frequency' ) if subdata . shape [ 0 ] == 0 : return None , None # determine the norrec-id of this spectrum nr_id = subdata [ 'id' ] . iloc [ 0 ] # get spectra subdata_nor = self . data . query ( 'id == {} and norrec=="nor"' . format ( nr_id ) ) . sort_values ( 'frequency' ) subdata_rec = self . data . query ( 'id == {} and norrec=="rec"' . format ( nr_id ) ) . sort_values ( 'frequency' ) # create spectrum objects spectrum_nor = None spectrum_rec = None if subdata_nor . shape [ 0 ] > 0 : spectrum_nor = eis_plot . sip_response ( frequencies = subdata_nor [ 'frequency' ] . values , rmag = subdata_nor [ 'r' ] , rpha = subdata_nor [ 'rpha' ] , ) if subdata_rec . shape [ 0 ] > 0 : spectrum_rec = eis_plot . sip_response ( frequencies = subdata_rec [ 'frequency' ] . values , rmag = subdata_rec [ 'r' ] , rpha = subdata_rec [ 'rpha' ] , ) if plot_filename is not None : if spectrum_nor is not None : fig = spectrum_nor . plot ( plot_filename , reciprocal = spectrum_rec , return_fig = True , title = 'a: {} b: {} m: {}: n: {}' . format ( * subdata_nor [ [ 'a' , 'b' , 'm' , 'n' ] ] . values [ 0 , : ] ) ) return spectrum_nor , spectrum_rec , fig return spectrum_nor , spectrum_rec
8,527
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L357-L419
[ "def", "get_new_members", "(", "self", ",", "results", ")", ":", "for", "member", "in", "results", ":", "guid", "=", "member", ".", "pop", "(", "'guid'", ")", "yield", "Member", "(", "self", ".", "manager", ",", "self", ".", "group_id", ",", "*", "*", "member", ")", "member", "[", "'guid'", "]", "=", "guid" ]
This is a convenience function to plot ALL spectra currently stored in the container . It is useful to asses whether data filters do perform correctly .
def plot_all_spectra ( self , outdir ) : os . makedirs ( outdir , exist_ok = True ) g = self . data . groupby ( 'id' ) for nr , ( name , item ) in enumerate ( g ) : print ( 'Plotting spectrum with id {} ({} / {})' . format ( name , nr , len ( g . groups . keys ( ) ) ) ) plot_filename = '' . join ( ( outdir + os . sep , '{:04}_spectrum_id_{}.png' . format ( nr , name ) ) ) spec_nor , spec_rec , spec_fig = self . get_spectrum ( nr_id = name , plot_filename = plot_filename ) plt . close ( spec_fig )
8,528
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L421-L453
[ "def", "compact", "(", "self", ",", "revision", ",", "physical", "=", "False", ")", ":", "compact_request", "=", "etcdrpc", ".", "CompactionRequest", "(", "revision", "=", "revision", ",", "physical", "=", "physical", ")", "self", ".", "kvstub", ".", "Compact", "(", "compact_request", ",", "self", ".", "timeout", ",", "credentials", "=", "self", ".", "call_credentials", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Create a multi - plot with one pseudosection for each frequency .
def plot_pseudosections ( self , column , filename = None , return_fig = False ) : assert column in self . data . columns g = self . data . groupby ( 'frequency' ) fig , axes = plt . subplots ( 4 , 2 , figsize = ( 15 / 2.54 , 20 / 2.54 ) , sharex = True , sharey = True ) for ax , ( key , item ) in zip ( axes . flat , g ) : fig , ax , cb = PS . plot_pseudosection_type2 ( item , ax = ax , column = column ) ax . set_title ( 'f: {} Hz' . format ( key ) ) fig . tight_layout ( ) if filename is not None : fig . savefig ( filename , dpi = 300 ) if return_fig : return fig else : plt . close ( fig )
8,529
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L455-L493
[ "async", "def", "invite_details", "(", "self", ",", "abbreviated", ":", "bool", ")", "->", "dict", ":", "if", "not", "hasattr", "(", "Connection", ".", "invite_details", ",", "\"cb\"", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"vcx_connection_invite_details: Creating callback\"", ")", "Connection", ".", "invite_details", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_uint32", ",", "c_uint32", ",", "c_char_p", ")", ")", "c_connection_handle", "=", "c_uint32", "(", "self", ".", "handle", ")", "c_abbreviated", "=", "c_bool", "(", "abbreviated", ")", "details", "=", "await", "do_call", "(", "'vcx_connection_invite_details'", ",", "c_connection_handle", ",", "c_abbreviated", ",", "Connection", ".", "invite_details", ".", "cb", ")", "return", "json", ".", "loads", "(", "details", ".", "decode", "(", ")", ")" ]
Export the sEIT data into data files that can be read by CRTomo .
def export_to_directory_crtomo ( self , directory , norrec = 'norrec' ) : exporter_crtomo . write_files_to_directory ( self . data , directory , norrec = norrec )
8,530
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L495-L508
[ "def", "disassociate_address", "(", "self", ",", "public_ip", "=", "None", ",", "association_id", "=", "None", ")", ":", "params", "=", "{", "}", "if", "public_ip", "is", "not", "None", ":", "params", "[", "'PublicIp'", "]", "=", "public_ip", "elif", "association_id", "is", "not", "None", ":", "params", "[", "'AssociationId'", "]", "=", "association_id", "return", "self", ".", "get_status", "(", "'DisassociateAddress'", ",", "params", ",", "verb", "=", "'POST'", ")" ]
Return a ready - initialized seit - manager object from the CRTomo tools . This function only works if the crtomo_tools are installed .
def export_to_crtomo_seit_manager ( self , grid ) : import crtomo g = self . data . groupby ( 'frequency' ) seit_data = { } for name , item in g : print ( name , item . shape , item . size ) if item . shape [ 0 ] > 0 : seit_data [ name ] = item [ [ 'a' , 'b' , 'm' , 'n' , 'r' , 'rpha' ] ] . values seit = crtomo . eitMan ( grid = grid , seit_data = seit_data ) return seit
8,531
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L510-L524
[ "def", "from_lines", "(", "lines", ":", "Iterable", "[", "str", "]", ",", "*", "*", "kwargs", ")", "->", "BELGraph", ":", "graph", "=", "BELGraph", "(", ")", "parse_lines", "(", "graph", "=", "graph", ",", "lines", "=", "lines", ",", "*", "*", "kwargs", ")", "return", "graph" ]
Pretty prints the tape values
def get_tape ( self , start = 0 , end = 10 ) : self . tape_start = start self . tape_end = end self . tape_length = end - start tmp = '\n' + "|" + str ( start ) + "| " for i in xrange ( len ( self . tape [ start : end ] ) ) : if i == self . cur_cell : tmp += "[" + str ( self . tape [ i ] ) + "] " else : tmp += ":" + str ( self . tape [ i ] ) + ": " tmp += " |" + str ( end ) + "|" return tmp
8,532
https://github.com/joelbm24/brainy/blob/bc3e1d6e020f1bb884a9bbbda834dac3a7a7fdb4/lib/bfinter.py#L79-L90
[ "def", "create_stream_subscription", "(", "self", ",", "stream", ",", "on_data", ",", "timeout", "=", "60", ")", ":", "options", "=", "rest_pb2", ".", "StreamSubscribeRequest", "(", ")", "options", ".", "stream", "=", "stream", "manager", "=", "WebSocketSubscriptionManager", "(", "self", ".", "_client", ",", "resource", "=", "'stream'", ",", "options", "=", "options", ")", "# Represent subscription as a future", "subscription", "=", "WebSocketSubscriptionFuture", "(", "manager", ")", "wrapped_callback", "=", "functools", ".", "partial", "(", "_wrap_callback_parse_stream_data", ",", "subscription", ",", "on_data", ")", "manager", ".", "open", "(", "wrapped_callback", ",", "instance", "=", "self", ".", "_instance", ")", "# Wait until a reply or exception is received", "subscription", ".", "reply", "(", "timeout", "=", "timeout", ")", "return", "subscription" ]
SIP04 data import
def import_sip04 ( self , filename , timestep = None ) : df = reda_sip04 . import_sip04_data ( filename ) if timestep is not None : print ( 'adding timestep' ) df [ 'timestep' ] = timestep self . _add_to_container ( df ) print ( 'Summary:' ) self . _describe_data ( df )
8,533
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/SIP.py#L26-L54
[ "def", "requires_open_handle", "(", "method", ")", ":", "# pylint: disable=invalid-name", "@", "functools", ".", "wraps", "(", "method", ")", "def", "wrapper_requiring_open_handle", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"The wrapper to be returned.\"\"\"", "if", "self", ".", "is_closed", "(", ")", ":", "raise", "usb_exceptions", ".", "HandleClosedError", "(", ")", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper_requiring_open_handle" ]
Check the given dataframe for the required type and columns
def check_dataframe ( self , dataframe ) : if dataframe is None : return None # is this a DataFrame if not isinstance ( dataframe , pd . DataFrame ) : raise Exception ( 'The provided dataframe object is not a pandas.DataFrame' ) for column in self . required_columns : if column not in dataframe : raise Exception ( 'Required column not in dataframe: {0}' . format ( column ) ) return dataframe
8,534
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/SIP.py#L73-L90
[ "def", "delete", "(", "self", ",", "path", ")", ":", "path", "=", "sanitize_mount", "(", "path", ")", "val", "=", "None", "if", "path", ".", "startswith", "(", "'cubbyhole'", ")", ":", "self", ".", "token", "=", "self", ".", "initial_token", "val", "=", "super", "(", "Client", ",", "self", ")", ".", "delete", "(", "path", ")", "self", ".", "token", "=", "self", ".", "operational_token", "else", ":", "super", "(", "Client", ",", "self", ")", ".", "delete", "(", "path", ")", "return", "val" ]
In case multiple frequencies were measured average them and compute std min max values for zt .
def reduce_duplicate_frequencies ( self ) : group_keys = [ 'frequency' , ] if 'timestep' in self . data . columns : group_keys = group_keys + [ 'timestep' , ] g = self . data . groupby ( group_keys ) def group_apply ( item ) : y = item [ [ 'zt_1' , 'zt_2' , 'zt_3' ] ] . values . flatten ( ) zt_imag_std = np . std ( y . imag ) zt_real_std = np . std ( y . real ) zt_imag_min = np . min ( y . imag ) zt_real_min = np . min ( y . real ) zt_imag_max = np . max ( y . imag ) zt_real_max = np . max ( y . real ) zt_imag_mean = np . mean ( y . imag ) zt_real_mean = np . mean ( y . real ) dfn = pd . DataFrame ( { 'zt_real_mean' : zt_real_mean , 'zt_real_std' : zt_real_std , 'zt_real_min' : zt_real_min , 'zt_real_max' : zt_real_max , 'zt_imag_mean' : zt_imag_mean , 'zt_imag_std' : zt_imag_std , 'zt_imag_min' : zt_imag_min , 'zt_imag_max' : zt_imag_max , } , index = [ 0 , ] ) dfn [ 'count' ] = len ( y ) dfn . index . name = 'index' return dfn p = g . apply ( group_apply ) p . index = p . index . droplevel ( 'index' ) if len ( group_keys ) > 1 : p = p . swaplevel ( 0 , 1 ) . sort_index ( ) return p
8,535
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/SIP.py#L92-L153
[ "def", "reject_record", "(", "self", ",", "record", ")", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "req", "=", "InclusionRequest", ".", "get", "(", "self", ".", "id", ",", "record", ".", "id", ")", "if", "req", "is", "None", ":", "raise", "InclusionRequestMissingError", "(", "community", "=", "self", ",", "record", "=", "record", ")", "req", ".", "delete", "(", ")" ]
Load the module and return the required class .
def _load_class ( class_path ) : parts = class_path . rsplit ( '.' , 1 ) module = __import__ ( parts [ 0 ] , fromlist = parts [ 1 ] ) return getattr ( module , parts [ 1 ] )
8,536
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/config/factory.py#L22-L26
[ "def", "character_offset_begin", "(", "self", ")", ":", "if", "self", ".", "_character_offset_begin", "is", "None", ":", "offsets", "=", "self", ".", "_element", ".", "xpath", "(", "'CharacterOffsetBegin/text()'", ")", "if", "len", "(", "offsets", ")", ">", "0", ":", "self", ".", "_character_offset_begin", "=", "int", "(", "offsets", "[", "0", "]", ")", "return", "self", ".", "_character_offset_begin" ]
DNA|RNA seq - > reverse complement
def rev_comp ( seq , molecule = 'dna' ) : if molecule == 'dna' : nuc_dict = { "A" : "T" , "B" : "V" , "C" : "G" , "D" : "H" , "G" : "C" , "H" : "D" , "K" : "M" , "M" : "K" , "N" : "N" , "R" : "Y" , "S" : "S" , "T" : "A" , "V" : "B" , "W" : "W" , "Y" : "R" } elif molecule == 'rna' : nuc_dict = { "A" : "U" , "B" : "V" , "C" : "G" , "D" : "H" , "G" : "C" , "H" : "D" , "K" : "M" , "M" : "K" , "N" : "N" , "R" : "Y" , "S" : "S" , "U" : "A" , "V" : "B" , "W" : "W" , "Y" : "R" } else : raise ValueError ( "rev_comp requires molecule to be dna or rna" ) if not isinstance ( seq , six . string_types ) : raise TypeError ( "seq must be a string!" ) return '' . join ( [ nuc_dict [ c ] for c in seq . upper ( ) [ : : - 1 ] ] )
8,537
https://github.com/RobersonLab/motif_scraper/blob/382dcb5932d9750282906c356ca35e802bd68bd0/motif_scraper/__init__.py#L28-L42
[ "def", "get_experiment_from_key", "(", "self", ",", "experiment_key", ")", ":", "experiment", "=", "self", ".", "experiment_key_map", ".", "get", "(", "experiment_key", ")", "if", "experiment", ":", "return", "experiment", "self", ".", "logger", ".", "error", "(", "'Experiment key \"%s\" is not in datafile.'", "%", "experiment_key", ")", "self", ".", "error_handler", ".", "handle_error", "(", "exceptions", ".", "InvalidExperimentException", "(", "enums", ".", "Errors", ".", "INVALID_EXPERIMENT_KEY_ERROR", ")", ")", "return", "None" ]
Alternate constructor intended for using JSON format of private key .
def from_json ( cls , key , scopes , subject = None ) : credentials_type = key [ 'type' ] if credentials_type != 'service_account' : raise ValueError ( 'key: expected type service_account ' '(got %s)' % credentials_type ) email = key [ 'client_email' ] key = OpenSSL . crypto . load_privatekey ( OpenSSL . crypto . FILETYPE_PEM , key [ 'private_key' ] ) return cls ( key = key , email = email , scopes = scopes , subject = subject )
8,538
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L75-L96
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
Alternate constructor intended for using . p12 files .
def from_pkcs12 ( cls , key , email , scopes , subject = None , passphrase = PKCS12_PASSPHRASE ) : key = OpenSSL . crypto . load_pkcs12 ( key , passphrase ) . get_privatekey ( ) return cls ( key = key , email = email , scopes = scopes , subject = subject )
8,539
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L99-L119
[ "def", "reboot", "(", "name", ",", "call", "=", "None", ",", "session", "=", "None", ")", ":", "if", "call", "==", "'function'", ":", "raise", "SaltCloudException", "(", "'The show_instnce function must be called with -a or --action.'", ")", "if", "session", "is", "None", ":", "session", "=", "_get_session", "(", ")", "log", ".", "info", "(", "'Starting VM %s'", ",", "name", ")", "vm", "=", "_get_vm", "(", "name", ",", "session", ")", "power_state", "=", "session", ".", "xenapi", ".", "VM", ".", "get_power_state", "(", "vm", ")", "if", "power_state", "==", "'Running'", ":", "task", "=", "session", ".", "xenapi", ".", "Async", ".", "VM", ".", "clean_reboot", "(", "vm", ")", "_run_async_task", "(", "task", ",", "session", ")", "return", "show_instance", "(", "name", ")", "else", ":", "return", "'{} is not running to be rebooted'", ".", "format", "(", "name", ")" ]
Time when access token was requested as seconds since epoch .
def issued_at ( self ) : issued_at = self . _issued_at if issued_at is None : self . _issued_at = int ( time . time ( ) ) return self . _issued_at
8,540
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L181-L194
[ "def", "remove_volume", "(", "self", ",", "volume_name", ")", ":", "logger", ".", "info", "(", "\"removing volume '%s'\"", ",", "volume_name", ")", "try", ":", "self", ".", "d", ".", "remove_volume", "(", "volume_name", ")", "except", "APIError", "as", "ex", ":", "if", "ex", ".", "response", ".", "status_code", "==", "requests", ".", "codes", ".", "CONFLICT", ":", "logger", ".", "debug", "(", "\"ignoring a conflict when removing volume %s\"", ",", "volume_name", ")", "else", ":", "raise", "ex" ]
Stores always valid OAuth2 access token .
def access_token ( self ) : if ( self . _access_token is None or self . expiration_time <= int ( time . time ( ) ) ) : resp = self . make_access_request ( ) self . _access_token = resp . json ( ) [ 'access_token' ] return self . _access_token
8,541
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L214-L228
[ "def", "_enforceDataType", "(", "self", ",", "data", ")", ":", "value", "=", "float", "(", "data", ")", "if", "math", ".", "isnan", "(", "value", ")", ":", "raise", "ValueError", "(", "\"FloatCti can't store NaNs\"", ")", "if", "math", ".", "isinf", "(", "value", ")", ":", "if", "value", ">", "0", ":", "logger", ".", "warn", "(", "\"Replacing inf by the largest representable float\"", ")", "value", "=", "sys", ".", "float_info", ".", "max", "else", ":", "logger", ".", "warn", "(", "\"Replacing -inf by the smallest representable float\"", ")", "value", "=", "-", "sys", ".", "float_info", ".", "max", "return", "value" ]
Makes an OAuth2 access token request with crafted JWT and signature .
def make_access_request ( self ) : del self . issued_at assertion = b'.' . join ( ( self . header ( ) , self . claims ( ) , self . signature ( ) ) ) post_data = { 'grant_type' : GRANT_TYPE , 'assertion' : assertion , } resp = requests . post ( AUDIENCE , post_data ) if resp . status_code != 200 : raise AuthenticationError ( resp ) return resp
8,542
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L259-L290
[ "def", "del_variables", "(", "self", ",", "variables", ")", ":", "variables", "=", "[", "variables", "]", "if", "isinstance", "(", "variables", ",", "six", ".", "string_types", ")", "else", "set", "(", "variables", ")", "indices", "=", "[", "index", "for", "index", ",", "variable", "in", "enumerate", "(", "self", ".", "variables", ")", "if", "variable", "in", "variables", "]", "self", ".", "variables", "=", "np", ".", "delete", "(", "self", ".", "variables", ",", "indices", ",", "0", ")", "self", ".", "cardinality", "=", "np", ".", "delete", "(", "self", ".", "cardinality", ",", "indices", ",", "0", ")", "self", ".", "inhibitor_probability", "=", "[", "prob_array", "for", "index", ",", "prob_array", "in", "enumerate", "(", "self", ".", "inhibitor_probability", ")", "if", "index", "not", "in", "indices", "]" ]
Shortcut for requests . request with proper Authorization header .
def authorized_request ( self , method , url , * * kwargs ) : headers = kwargs . pop ( 'headers' , { } ) if headers . get ( 'Authorization' ) or kwargs . get ( 'auth' ) : raise ValueError ( "Found custom Authorization header, " "method call would override it." ) headers [ 'Authorization' ] = 'Bearer ' + self . access_token return requests . request ( method , url , headers = headers , * * kwargs )
8,543
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L292-L321
[ "def", "_load_bundle_map", "(", "self", ")", ":", "bundle_map", "=", "{", "}", "next_href", "=", "None", "has_next", "=", "True", "while", "has_next", ":", "bundles", "=", "self", ".", "clarify_client", ".", "get_bundle_list", "(", "href", "=", "next_href", ",", "embed_items", "=", "True", ")", "items", "=", "get_embedded_items", "(", "bundles", ")", "for", "item", "in", "items", ":", "bc_video_id", "=", "item", ".", "get", "(", "'external_id'", ")", "if", "bc_video_id", "is", "not", "None", "and", "len", "(", "bc_video_id", ")", ">", "0", ":", "bundle_map", "[", "bc_video_id", "]", "=", "item", "next_href", "=", "get_link_href", "(", "bundles", ",", "'next'", ")", "if", "next_href", "is", "None", ":", "has_next", "=", "False", "return", "bundle_map" ]
Import Syscal measurements from a text file exported as Spreadsheet .
def import_txt ( filename , * * kwargs ) : # read in text file into a buffer with open ( filename , 'r' ) as fid : text = fid . read ( ) strings_to_replace = { 'Mixed / non conventional' : 'Mixed/non-conventional' , 'Date' : 'Date Time AM-PM' , } for key in strings_to_replace . keys ( ) : text = text . replace ( key , strings_to_replace [ key ] ) buffer = StringIO ( text ) # read data file data_raw = pd . read_csv ( buffer , # sep='\t', delim_whitespace = True , ) # clean up column names data_raw . columns = [ x . strip ( ) for x in data_raw . columns . tolist ( ) ] # generate electrode positions data = _convert_coords_to_abmn_X ( data_raw [ [ 'Spa.1' , 'Spa.2' , 'Spa.3' , 'Spa.4' ] ] , * * kwargs ) # [mV] / [mA] data [ 'r' ] = data_raw [ 'Vp' ] / data_raw [ 'In' ] data [ 'Vmn' ] = data_raw [ 'Vp' ] data [ 'Iab' ] = data_raw [ 'In' ] # rename electrode denotations rec_max = kwargs . get ( 'reciprocals' , None ) if rec_max is not None : print ( 'renumbering electrode numbers' ) data [ [ 'a' , 'b' , 'm' , 'n' ] ] = rec_max + 1 - data [ [ 'a' , 'b' , 'm' , 'n' ] ] return data , None , None
8,544
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/iris_syscal_pro.py#L80-L156
[ "def", "delete_logs", "(", "room", ")", ":", "from", "indico_chat", ".", "plugin", "import", "ChatPlugin", "base_url", "=", "ChatPlugin", ".", "settings", ".", "get", "(", "'log_url'", ")", "if", "not", "base_url", "or", "room", ".", "custom_server", ":", "return", "try", ":", "response", "=", "requests", ".", "get", "(", "posixpath", ".", "join", "(", "base_url", ",", "'delete'", ")", ",", "params", "=", "{", "'cr'", ":", "room", ".", "jid", "}", ")", ".", "json", "(", ")", "except", "(", "RequestException", ",", "ValueError", ")", ":", "current_plugin", ".", "logger", ".", "exception", "(", "'Could not delete logs for %s'", ",", "room", ".", "jid", ")", "return", "if", "not", "response", ".", "get", "(", "'success'", ")", ":", "current_plugin", ".", "logger", ".", "warning", "(", "'Could not delete logs for %s: %s'", ",", "room", ".", "jid", ",", "response", ".", "get", "(", "'error'", ")", ")" ]
Read a . bin file generated by the IRIS Instruments Syscal Pro System and return a curated dataframe for further processing . This dataframe contains only information currently deemed important . Use the function reda . importers . iris_syscal_pro_binary . _import_bin to extract ALL information from a given . bin file .
def import_bin ( filename , * * kwargs ) : metadata , data_raw = _import_bin ( filename ) skip_rows = kwargs . get ( 'skip_rows' , 0 ) if skip_rows > 0 : data_raw . drop ( data_raw . index [ range ( 0 , skip_rows ) ] , inplace = True ) data_raw = data_raw . reset_index ( ) if kwargs . get ( 'check_meas_nums' , True ) : # check that first number is 0 if data_raw [ 'measurement_num' ] . iloc [ 0 ] != 0 : print ( 'WARNING: Measurement numbers do not start with 0 ' + '(did you download ALL data?)' ) # check that all measurement numbers increase by one if not np . all ( np . diff ( data_raw [ 'measurement_num' ] ) ) == 1 : print ( 'WARNING ' 'Measurement numbers are not consecutive. ' 'Perhaps the first measurement belongs to another measurement?' ' Use the skip_rows parameter to skip those measurements' ) # now check if there is a jump in measurement numbers somewhere # ignore first entry as this will always be nan diff = data_raw [ 'measurement_num' ] . diff ( ) [ 1 : ] jump = np . where ( diff != 1 ) [ 0 ] if len ( jump ) > 0 : print ( 'WARNING: One or more jumps in measurement numbers detected' ) print ( 'The jump indices are:' ) for jump_nr in jump : print ( jump_nr ) print ( 'Removing data points subsequent to the first jump' ) data_raw = data_raw . iloc [ 0 : jump [ 0 ] + 1 , : ] if data_raw . shape [ 0 ] == 0 : # no data present, return a bare DataFrame return pd . DataFrame ( columns = [ 'a' , 'b' , 'm' , 'n' , 'r' ] ) , None , None data = _convert_coords_to_abmn_X ( data_raw [ [ 'x_a' , 'x_b' , 'x_m' , 'x_n' ] ] , * * kwargs ) # [mV] / [mA] data [ 'r' ] = data_raw [ 'vp' ] / data_raw [ 'Iab' ] data [ 'Vmn' ] = data_raw [ 'vp' ] data [ 'vab' ] = data_raw [ 'vab' ] data [ 'Iab' ] = data_raw [ 'Iab' ] data [ 'mdelay' ] = data_raw [ 'mdelay' ] data [ 'Tm' ] = data_raw [ 'Tm' ] data [ 'Mx' ] = data_raw [ 'Mx' ] data [ 'chargeability' ] = data_raw [ 'm' ] data [ 'q' ] = data_raw [ 'q' ] # rename electrode denotations rec_max = kwargs . get ( 'reciprocals' , None ) if rec_max is not None : print ( 'renumbering electrode numbers' ) data [ [ 'a' , 'b' , 'm' , 'n' ] ] = rec_max + 1 - data [ [ 'a' , 'b' , 'm' , 'n' ] ] # print(data) return data , None , None
8,545
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/iris_syscal_pro.py#L160-L266
[ "def", "Flush", "(", "self", ")", ":", "if", "self", ".", "locked", "and", "self", ".", "CheckLease", "(", ")", "==", "0", ":", "self", ".", "_RaiseLockError", "(", "\"Flush\"", ")", "self", ".", "_WriteAttributes", "(", ")", "self", ".", "_SyncAttributes", "(", ")", "if", "self", ".", "parent", ":", "self", ".", "parent", ".", "Flush", "(", ")" ]
Execute specified arguments and send notification email
def call_and_notificate ( args , opts ) : # store starttime stctime = time . clock ( ) stttime = time . time ( ) stdtime = datetime . datetime . now ( ) # call subprocess exit_code , output = call ( args ) # calculate delta cdelta = time . clock ( ) - stctime tdelta = time . time ( ) - stttime endtime = datetime . datetime . now ( ) if exit_code == 0 : status = u"Success" else : status = u"Fail (%d)" % exit_code # create email body body = EMAIL_BODY % { 'prog' : get_command_str ( args ) , 'status' : status , 'stdtime' : stdtime , 'endtime' : endtime , 'tdelta' : tdelta , 'cdelta' : cdelta , 'output' : output , 'cwd' : os . getcwd ( ) , } # create email subject subject = opts . subject % { 'prog' : get_command_str ( args ) , 'status' : status . lower ( ) , } # create email message msg = create_message ( opts . from_addr , opts . to_addr , subject , body , opts . encoding ) # obtain password from keyring password = keyring . get_password ( 'notify' , opts . username ) # send email send_email ( msg , opts . host , opts . port , opts . username , password )
8,546
https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/notifier.py#L28-L78
[ "def", "repmc", "(", "instr", ",", "marker", ",", "value", ",", "lenout", "=", "None", ")", ":", "if", "lenout", "is", "None", ":", "lenout", "=", "ctypes", ".", "c_int", "(", "len", "(", "instr", ")", "+", "len", "(", "value", ")", "+", "len", "(", "marker", ")", "+", "15", ")", "instr", "=", "stypes", ".", "stringToCharP", "(", "instr", ")", "marker", "=", "stypes", ".", "stringToCharP", "(", "marker", ")", "value", "=", "stypes", ".", "stringToCharP", "(", "value", ")", "out", "=", "stypes", ".", "stringToCharP", "(", "lenout", ")", "libspice", ".", "repmc_c", "(", "instr", ",", "marker", ",", "value", ",", "lenout", ",", "out", ")", "return", "stypes", ".", "toPythonString", "(", "out", ")" ]
Determines the target thumbnail type either by looking for a format override specified at the model level or by using the format the user uploaded .
def get_thumbnail_format ( self ) : if self . field . thumbnail_format : # Over-ride was given, use that instead. return self . field . thumbnail_format . lower ( ) else : # Use the existing extension from the file. filename_split = self . name . rsplit ( '.' , 1 ) return filename_split [ - 1 ]
8,547
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/fields.py#L94-L106
[ "def", "acquire_writer", "(", "self", ")", ":", "with", "self", ".", "mutex", ":", "while", "self", ".", "rwlock", "!=", "0", ":", "self", ".", "_writer_wait", "(", ")", "self", ".", "rwlock", "=", "-", "1" ]
Handles some extra logic to generate the thumbnails when the original file is uploaded .
def save ( self , name , content , save = True ) : super ( ImageWithThumbsFieldFile , self ) . save ( name , content , save ) try : self . generate_thumbs ( name , content ) except IOError , exc : if 'cannot identify' in exc . message or 'bad EPS header' in exc . message : raise UploadedImageIsUnreadableError ( "We were unable to read the uploaded image. " "Please make sure you are uploading a valid image file." ) else : raise
8,548
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/fields.py#L108-L124
[ "def", "config", "(", ")", ":", "conf_args", "=", "{", "\"INTERVAL\"", ":", "60", ",", "\"STANDBY\"", ":", "3", "}", "config_file", "=", "read_file", "(", "\"{0}{1}\"", ".", "format", "(", "conf_path", ",", "\"sun.conf\"", ")", ")", "for", "line", "in", "config_file", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "lstrip", "(", ")", "if", "line", "and", "not", "line", ".", "startswith", "(", "\"#\"", ")", ":", "conf_args", "[", "line", ".", "split", "(", "\"=\"", ")", "[", "0", "]", "]", "=", "line", ".", "split", "(", "\"=\"", ")", "[", "1", "]", "return", "conf_args" ]
Deletes the original plus any thumbnails . Fails silently if there are errors deleting the thumbnails .
def delete ( self , save = True ) : for thumb in self . field . thumbs : thumb_name , thumb_options = thumb thumb_filename = self . _calc_thumb_filename ( thumb_name ) self . storage . delete ( thumb_filename ) super ( ImageWithThumbsFieldFile , self ) . delete ( save )
8,549
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/fields.py#L197-L207
[ "def", "split", "(", "self", ",", "verbose", "=", "None", ",", "end_in_new_line", "=", "None", ")", ":", "elapsed_time", "=", "self", ".", "get_elapsed_time", "(", ")", "self", ".", "split_elapsed_time", ".", "append", "(", "elapsed_time", ")", "self", ".", "_cumulative_elapsed_time", "+=", "elapsed_time", "self", ".", "_elapsed_time", "=", "datetime", ".", "timedelta", "(", ")", "if", "verbose", "is", "None", ":", "verbose", "=", "self", ".", "verbose_end", "if", "verbose", ":", "if", "end_in_new_line", "is", "None", ":", "end_in_new_line", "=", "self", ".", "end_in_new_line", "if", "end_in_new_line", ":", "self", ".", "log", "(", "\"{} done in {}\"", ".", "format", "(", "self", ".", "description", ",", "elapsed_time", ")", ")", "else", ":", "self", ".", "log", "(", "\" done in {}\"", ".", "format", "(", "elapsed_time", ")", ")", "self", ".", "_start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")" ]
edn simple value dump
def dump_edn_val ( v ) : if isinstance ( v , ( str , unicode ) ) : return json . dumps ( v ) elif isinstance ( v , E ) : return unicode ( v ) else : return dumps ( v )
8,550
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L644-L651
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
Builds the data structure edn and puts it in the db
def tx_schema ( self , * * kwargs ) : for s in self . schema . schema : tx = self . tx ( s , * * kwargs )
8,551
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L67-L71
[ "def", "_get_suggestions", "(", "self", ",", "filter_word", "=", "None", ")", ":", "keys", "=", "self", ".", "manifest", ".", "keys", "(", ")", "words", "=", "[", "]", "for", "key", "in", "keys", ":", "if", "isinstance", "(", "self", ".", "manifest", "[", "key", "]", ",", "Manifest", ")", ":", "# if this key is another manifest, append a slash to the ", "# suggestion so the user knows theres more items under this key", "words", ".", "append", "(", "key", "+", "'/'", ")", "else", ":", "words", ".", "append", "(", "key", ")", "if", "filter_word", ":", "words", "=", "[", "x", "for", "x", "in", "words", "if", "x", ".", "startswith", "(", "filter_word", ")", "]", "return", "words" ]
Executes a raw tx string or get a new TX object to work with .
def tx ( self , * args , * * kwargs ) : if 0 == len ( args ) : return TX ( self ) ops = [ ] for op in args : if isinstance ( op , list ) : ops += op elif isinstance ( op , ( str , unicode ) ) : ops . append ( op ) if 'debug' in kwargs : pp ( ops ) tx_proc = "[ %s ]" % "" . join ( ops ) x = self . rest ( 'POST' , self . uri_db , data = { "tx-data" : tx_proc } ) return x
8,552
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L73-L118
[ "def", "compute_score", "(", "subtitle", ",", "video", ",", "hearing_impaired", "=", "None", ")", ":", "logger", ".", "info", "(", "'Computing score of %r for video %r with %r'", ",", "subtitle", ",", "video", ",", "dict", "(", "hearing_impaired", "=", "hearing_impaired", ")", ")", "# get the scores dict", "scores", "=", "get_scores", "(", "video", ")", "logger", ".", "debug", "(", "'Using scores %r'", ",", "scores", ")", "# get the matches", "matches", "=", "subtitle", ".", "get_matches", "(", "video", ")", "logger", ".", "debug", "(", "'Found matches %r'", ",", "matches", ")", "# on hash match, discard everything else", "if", "'hash'", "in", "matches", ":", "logger", ".", "debug", "(", "'Keeping only hash match'", ")", "matches", "&=", "{", "'hash'", "}", "# handle equivalent matches", "if", "isinstance", "(", "video", ",", "Episode", ")", ":", "if", "'title'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding title match equivalent'", ")", "matches", ".", "add", "(", "'episode'", ")", "if", "'series_imdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding series_imdb_id match equivalent'", ")", "matches", "|=", "{", "'series'", ",", "'year'", "}", "if", "'imdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding imdb_id match equivalents'", ")", "matches", "|=", "{", "'series'", ",", "'year'", ",", "'season'", ",", "'episode'", "}", "if", "'tvdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding tvdb_id match equivalents'", ")", "matches", "|=", "{", "'series'", ",", "'year'", ",", "'season'", ",", "'episode'", "}", "if", "'series_tvdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding series_tvdb_id match equivalents'", ")", "matches", "|=", "{", "'series'", ",", "'year'", "}", "elif", "isinstance", "(", "video", ",", "Movie", ")", ":", "if", "'imdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding imdb_id match equivalents'", ")", "matches", "|=", "{", "'title'", ",", "'year'", "}", "# handle hearing impaired", "if", "hearing_impaired", "is", "not", "None", "and", "subtitle", ".", "hearing_impaired", "==", "hearing_impaired", ":", "logger", ".", "debug", "(", "'Matched hearing_impaired'", ")", "matches", ".", "add", "(", "'hearing_impaired'", ")", "# compute the score", "score", "=", "sum", "(", "(", "scores", ".", "get", "(", "match", ",", "0", ")", "for", "match", "in", "matches", ")", ")", "logger", ".", "info", "(", "'Computed score %r with final matches %r'", ",", "score", ",", "matches", ")", "# ensure score is within valid bounds", "assert", "0", "<=", "score", "<=", "scores", "[", "'hash'", "]", "+", "scores", "[", "'hearing_impaired'", "]", "return", "score" ]
Get an Entity
def e ( self , eid ) : ta = datetime . datetime . now ( ) rs = self . rest ( 'GET' , self . uri_db + '-/entity' , data = { 'e' : int ( eid ) } , parse = True ) tb = datetime . datetime . now ( ) - ta print cl ( '<<< fetched entity %s in %sms' % ( eid , tb . microseconds / 1000.0 ) , 'cyan' ) return rs
8,553
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L120-L127
[ "def", "download", "(", "date_array", ",", "tag", ",", "sat_id", ",", "data_path", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "import", "sys", "import", "os", "import", "pysftp", "import", "davitpy", "if", "user", "is", "None", ":", "user", "=", "os", ".", "environ", "[", "'DBREADUSER'", "]", "if", "password", "is", "None", ":", "password", "=", "os", ".", "environ", "[", "'DBREADPASS'", "]", "with", "pysftp", ".", "Connection", "(", "os", ".", "environ", "[", "'VTDB'", "]", ",", "username", "=", "user", ",", "password", "=", "password", ")", "as", "sftp", ":", "for", "date", "in", "date_array", ":", "myDir", "=", "'/data/'", "+", "date", ".", "strftime", "(", "\"%Y\"", ")", "+", "'/grdex/'", "+", "tag", "+", "'/'", "fname", "=", "date", ".", "strftime", "(", "\"%Y%m%d\"", ")", "+", "'.'", "+", "tag", "+", "'.grdex'", "local_fname", "=", "fname", "+", "'.bz2'", "saved_fname", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "local_fname", ")", "full_fname", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "fname", ")", "try", ":", "print", "(", "'Downloading file for '", "+", "date", ".", "strftime", "(", "'%D'", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "sftp", ".", "get", "(", "myDir", "+", "local_fname", ",", "saved_fname", ")", "os", ".", "system", "(", "'bunzip2 -c '", "+", "saved_fname", "+", "' > '", "+", "full_fname", ")", "os", ".", "system", "(", "'rm '", "+", "saved_fname", ")", "except", "IOError", ":", "print", "(", "'File not available for '", "+", "date", ".", "strftime", "(", "'%D'", ")", ")", "return" ]
redact the value of an attribute
def retract ( self , e , a , v ) : ta = datetime . datetime . now ( ) ret = u"[:db/retract %i :%s %s]" % ( e , a , dump_edn_val ( v ) ) rs = self . tx ( ret ) tb = datetime . datetime . now ( ) - ta print cl ( '<<< retracted %s,%s,%s in %sms' % ( e , a , v , tb . microseconds / 1000.0 ) , 'cyan' ) return rs
8,554
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L129-L137
[ "def", "compress_pdf", "(", "pdf_fpath", ",", "output_fname", "=", "None", ")", ":", "import", "utool", "as", "ut", "ut", ".", "assertpath", "(", "pdf_fpath", ")", "suffix", "=", "'_'", "+", "ut", ".", "get_datestamp", "(", "False", ")", "+", "'_compressed'", "print", "(", "'pdf_fpath = %r'", "%", "(", "pdf_fpath", ",", ")", ")", "output_pdf_fpath", "=", "ut", ".", "augpath", "(", "pdf_fpath", ",", "suffix", ",", "newfname", "=", "output_fname", ")", "print", "(", "'output_pdf_fpath = %r'", "%", "(", "output_pdf_fpath", ",", ")", ")", "gs_exe", "=", "find_ghostscript_exe", "(", ")", "cmd_list", "=", "(", "gs_exe", ",", "'-sDEVICE=pdfwrite'", ",", "'-dCompatibilityLevel=1.4'", ",", "'-dNOPAUSE'", ",", "'-dQUIET'", ",", "'-dBATCH'", ",", "'-sOutputFile='", "+", "output_pdf_fpath", ",", "pdf_fpath", ")", "ut", ".", "cmd", "(", "*", "cmd_list", ")", "return", "output_pdf_fpath" ]
Returns a lazy generator that will only fetch groups of datoms at the chunk size specified .
def datoms ( self , index = 'aevt' , e = '' , a = '' , v = '' , limit = 0 , offset = 0 , chunk = 100 , start = '' , end = '' , since = '' , as_of = '' , history = '' , * * kwargs ) : assert index in [ 'aevt' , 'eavt' , 'avet' , 'vaet' ] , "non-existant index" data = { 'index' : index , 'a' : ':{0}' . format ( a ) if a else '' , 'v' : dump_edn_val ( v ) if v else '' , 'e' : int ( e ) if e else '' , 'offset' : offset or 0 , 'start' : start , 'end' : end , 'limit' : limit , 'history' : 'true' if history else '' , 'as-of' : int ( as_of ) if as_of else '' , 'since' : int ( since ) if since else '' , } data [ 'limit' ] = offset + chunk rs = True while rs and ( data [ 'offset' ] < ( limit or 1000000000 ) ) : ta = datetime . datetime . now ( ) rs = self . rest ( 'GET' , self . uri_db + '-/datoms' , data = data , parse = True ) if not len ( rs ) : rs = False tb = datetime . datetime . now ( ) - ta print cl ( '<<< fetched %i datoms at offset %i in %sms' % ( len ( rs ) , data [ 'offset' ] , tb . microseconds / 1000.0 ) , 'cyan' ) for r in rs : yield r data [ 'offset' ] += chunk
8,555
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L140-L172
[ "def", "rotate_key", "(", "self", ",", "name", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/keys/{name}/rotate'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "name", "=", "name", ",", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", ")" ]
debug timing colored terminal output
def debug ( self , defn , args , kwargs , fmt = None , color = 'green' ) : ta = datetime . datetime . now ( ) rs = defn ( * args , * * kwargs ) tb = datetime . datetime . now ( ) - ta fmt = fmt or "processed {defn} in {ms}ms" logmsg = fmt . format ( ms = tb . microseconds / 1000.0 , defn = defn ) "terminal output" print cl ( logmsg , color ) "logging output" logging . debug ( logmsg ) return rs
8,556
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L193-L205
[ "def", "add_jardiff_optgroup", "(", "parser", ")", ":", "og", "=", "parser", ".", "add_argument_group", "(", "\"JAR Checking Options\"", ")", "og", ".", "add_argument", "(", "\"--ignore-jar-entry\"", ",", "action", "=", "\"append\"", ",", "default", "=", "[", "]", ")", "og", ".", "add_argument", "(", "\"--ignore-jar-signature\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"Ignore JAR signing changes\"", ")", "og", ".", "add_argument", "(", "\"--ignore-manifest\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"Ignore changes to manifests\"", ")", "og", ".", "add_argument", "(", "\"--ignore-manifest-subsections\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"Ignore changes to manifest subsections\"", ")", "og", ".", "add_argument", "(", "\"--ignore-manifest-key\"", ",", "action", "=", "\"append\"", ",", "default", "=", "[", "]", ",", "help", "=", "\"case-insensitive manifest keys to ignore\"", ")" ]
new query builder on current db
def find ( self , * args , * * kwargs ) : return Query ( * args , db = self , schema = self . schema )
8,557
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L223-L225
[ "def", "parse", "(", "s", ")", ":", "stopwatch", "=", "StopWatch", "(", ")", "for", "line", "in", "s", ".", "splitlines", "(", ")", ":", "if", "line", ".", "strip", "(", ")", ":", "parts", "=", "line", ".", "split", "(", "None", ")", "name", "=", "parts", "[", "0", "]", "if", "name", "!=", "\"%\"", ":", "# ie not the header line", "rest", "=", "(", "float", "(", "v", ")", "for", "v", "in", "parts", "[", "2", ":", "]", ")", "stopwatch", ".", "times", "[", "parts", "[", "0", "]", "]", ".", "merge", "(", "Stat", ".", "build", "(", "*", "rest", ")", ")", "return", "stopwatch" ]
execute query get back
def hashone ( self ) : rs = self . one ( ) if not rs : return { } else : finds = " " . join ( self . _find ) . split ( ' ' ) return dict ( zip ( ( x . replace ( '?' , '' ) for x in finds ) , rs ) )
8,558
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L302-L309
[ "def", "matches", "(", "self", ",", "spec", ")", ":", "if", "callable", "(", "spec", ")", "and", "not", "isinstance", "(", "spec", ",", "type", ")", ":", "return", "spec", "(", "self", ")", "elif", "isinstance", "(", "spec", ",", "type", ")", ":", "return", "isinstance", "(", "self", ",", "spec", ")", "specification", "=", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "group", ",", "self", ".", "label", ")", "split_spec", "=", "tuple", "(", "spec", ".", "split", "(", "'.'", ")", ")", "if", "not", "isinstance", "(", "spec", ",", "tuple", ")", "else", "spec", "split_spec", ",", "nocompare", "=", "zip", "(", "*", "(", "(", "None", ",", "True", ")", "if", "s", "==", "'*'", "or", "s", "is", "None", "else", "(", "s", ",", "False", ")", "for", "s", "in", "split_spec", ")", ")", "if", "all", "(", "nocompare", ")", ":", "return", "True", "match_fn", "=", "itemgetter", "(", "*", "(", "idx", "for", "idx", ",", "nc", "in", "enumerate", "(", "nocompare", ")", "if", "not", "nc", ")", ")", "self_spec", "=", "match_fn", "(", "split_spec", ")", "unescaped_match", "=", "match_fn", "(", "specification", "[", ":", "len", "(", "split_spec", ")", "]", ")", "==", "self_spec", "if", "unescaped_match", ":", "return", "True", "sanitizers", "=", "[", "util", ".", "sanitize_identifier", ",", "util", ".", "group_sanitizer", ",", "util", ".", "label_sanitizer", "]", "identifier_specification", "=", "tuple", "(", "fn", "(", "ident", ",", "escape", "=", "False", ")", "for", "ident", ",", "fn", "in", "zip", "(", "specification", ",", "sanitizers", ")", ")", "identifier_match", "=", "match_fn", "(", "identifier_specification", "[", ":", "len", "(", "split_spec", ")", "]", ")", "==", "self_spec", "return", "identifier_match" ]
execute query get all list of lists
def all ( self ) : query , inputs = self . _toedn ( ) return self . db . q ( query , inputs = inputs , limit = self . _limit , offset = self . _offset , history = self . _history )
8,559
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L320-L327
[ "def", "run", "(", "self", ")", ":", "dataset", "=", "self", ".", "source", ".", "train_dataset", "(", ")", "num_samples", "=", "len", "(", "dataset", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "self", ".", "cases", ",", "self", ".", "samples", "+", "1", ")", "selected_sample", "=", "np", ".", "sort", "(", "np", ".", "random", ".", "choice", "(", "num_samples", ",", "self", ".", "cases", ",", "replace", "=", "False", ")", ")", "for", "i", "in", "range", "(", "self", ".", "cases", ")", ":", "raw_image", ",", "_", "=", "dataset", ".", "get_raw", "(", "selected_sample", "[", "i", "]", ")", "ax", "[", "i", ",", "0", "]", ".", "imshow", "(", "raw_image", ")", "ax", "[", "i", ",", "0", "]", ".", "set_title", "(", "\"Original image\"", ")", "for", "j", "in", "range", "(", "self", ".", "samples", ")", ":", "augmented_image", ",", "_", "=", "dataset", "[", "selected_sample", "[", "i", "]", "]", "augmented_image", "=", "dataset", ".", "denormalize", "(", "augmented_image", ")", "ax", "[", "i", ",", "j", "+", "1", "]", ".", "imshow", "(", "augmented_image", ")", "plt", ".", "show", "(", ")" ]
prepare the query for the rest api
def _toedn ( self ) : finds = u"" inputs = u"" wheres = u"" args = [ ] ": in and args" for a , b in self . _input : inputs += " {0}" . format ( a ) args . append ( dump_edn_val ( b ) ) if inputs : inputs = u":in ${0}" . format ( inputs ) " :where " for where in self . _where : if isinstance ( where , ( str , unicode ) ) : wheres += u"[{0}]" . format ( where ) elif isinstance ( where , ( list ) ) : wheres += u" " . join ( [ u"[{0}]" . format ( w ) for w in where ] ) " find: " if self . _find == [ ] : #find all fs = set ( ) for p in wheres . replace ( '[' , ' ' ) . replace ( ']' , ' ' ) . split ( ' ' ) : if p . startswith ( '?' ) : fs . add ( p ) self . _find = list ( fs ) finds = " " . join ( self . _find ) " all togethr now..." q = u"""[ :find {0} {1} :where {2} ]""" . format ( finds , inputs , wheres ) return q , args
8,560
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L329-L359
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
Accumulate datums for the transaction
def add ( self , * args , * * kwargs ) : assert self . resp is None , "Transaction already committed" entity , av_pairs , args = None , [ ] , list ( args ) if len ( args ) : if isinstance ( args [ 0 ] , ( int , long ) ) : " first arg is an entity or tempid" entity = E ( args [ 0 ] , tx = self ) elif isinstance ( args [ 0 ] , E ) : " dont resuse entity from another tx" if args [ 0 ] . _tx is self : entity = args [ 0 ] else : if int ( args [ 0 ] ) > 0 : " use the entity id on a new obj" entity = E ( int ( args [ 0 ] ) , tx = self ) args [ 0 ] = None " drop the first arg" if entity is not None or args [ 0 ] in ( None , False , 0 ) : v = args . pop ( 0 ) " auto generate a temp id?" if entity is None : entity = E ( self . ctmpid , tx = self ) self . ctmpid -= 1 " a,v from kwargs" if len ( args ) == 0 and kwargs : for a , v in kwargs . iteritems ( ) : self . addeav ( entity , a , v ) " a,v from args " if len ( args ) : assert len ( args ) % 2 == 0 , "imbalanced a,v in args: " % args for first , second in pairwise ( args ) : if not first . startswith ( ':' ) : first = ':' + first if not first . endswith ( '/' ) : " longhand used: blah/blah " if isinstance ( second , list ) : for v in second : self . addeav ( entity , first , v ) else : self . addeav ( entity , first , second ) continue elif isinstance ( second , dict ) : " shorthand used: blah/, dict " for a , v in second . iteritems ( ) : self . addeav ( entity , "%s%s" % ( first , a ) , v ) continue elif isinstance ( second , ( list , tuple ) ) : " shorthand used: blah/, list|tuple " for a , v in pairwise ( second ) : self . addeav ( entity , "%s%s" % ( first , a ) , v ) continue else : raise Exception , "invalid pair: %s : %s" % ( first , second ) "pass back the entity so it can be resolved after tx()" return entity
8,561
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L515-L594
[ "def", "get_listing", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'listing'", ")", ":", "allEvents", "=", "self", ".", "get_allEvents", "(", ")", "openEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", "closedEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", "publicEvents", "=", "allEvents", ".", "instance_of", "(", "PublicEvent", ")", "allSeries", "=", "allEvents", ".", "instance_of", "(", "Series", ")", "self", ".", "listing", "=", "{", "'allEvents'", ":", "allEvents", ",", "'openEvents'", ":", "openEvents", ",", "'closedEvents'", ":", "closedEvents", ",", "'publicEvents'", ":", "publicEvents", ",", "'allSeries'", ":", "allSeries", ",", "'regOpenEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateEvents'", ":", "publicEvents", ".", "filter", "(", "publicevent__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'publicevent__category'", ")", ",", "'regOpenSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateSeries'", ":", "allSeries", ".", "filter", "(", "series__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'series__category'", ")", ",", "}", "return", "self", ".", "listing" ]
Resolve one or more tempids . Automatically takes place after transaction is executed .
def resolve ( self ) : assert isinstance ( self . resp , dict ) , "Transaction in uncommitted or failed state" rids = [ ( v ) for k , v in self . resp [ 'tempids' ] . items ( ) ] self . txid = self . resp [ 'tx-data' ] [ 0 ] [ 'tx' ] rids . reverse ( ) for t in self . tmpents : pos = self . tmpents . index ( t ) t . _eid , t . _txid = rids [ pos ] , self . txid for t in self . realents : t . _txid = self . txid
8,562
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L611-L623
[ "def", "store_vector", "(", "self", ",", "v", ",", "data", "=", "None", ")", ":", "# We will store the normalized vector (used during retrieval)", "nv", "=", "unitvec", "(", "v", ")", "# Store vector in each bucket of all hashes", "for", "lshash", "in", "self", ".", "lshashes", ":", "for", "bucket_key", "in", "lshash", ".", "hash_vector", "(", "v", ")", ":", "#print 'Storying in bucket %s one vector' % bucket_key", "self", ".", "storage", ".", "store_vector", "(", "lshash", ".", "hash_name", ",", "bucket_key", ",", "nv", ",", "data", ")" ]
Get fitness locations and their current usage .
def get_usage ( self ) : resp = requests . get ( FITNESS_URL , timeout = 30 ) resp . raise_for_status ( ) soup = BeautifulSoup ( resp . text , "html5lib" ) eastern = pytz . timezone ( 'US/Eastern' ) output = [ ] for item in soup . findAll ( "div" , { "class" : "barChart" } ) : data = [ x . strip ( ) for x in item . get_text ( "\n" ) . strip ( ) . split ( "\n" ) ] data = [ x for x in data if x ] name = re . sub ( r"\s*(Hours)?\s*-?\s*(CLOSED|OPEN)?$" , "" , data [ 0 ] , re . I ) . strip ( ) output . append ( { "name" : name , "open" : "Open" in data [ 1 ] , "count" : int ( data [ 2 ] . rsplit ( " " , 1 ) [ - 1 ] ) , "updated" : eastern . localize ( datetime . datetime . strptime ( data [ 3 ] [ 8 : ] . strip ( ) , '%m/%d/%Y %I:%M %p' ) ) . isoformat ( ) , "percent" : int ( data [ 4 ] [ : - 1 ] ) } ) return output
8,563
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/fitness.py#L49-L69
[ "def", "template", "(", "client", ",", "src", ",", "dest", ",", "paths", ",", "opt", ")", ":", "key_map", "=", "cli_hash", "(", "opt", ".", "key_map", ")", "obj", "=", "{", "}", "for", "path", "in", "paths", ":", "response", "=", "client", ".", "read", "(", "path", ")", "if", "not", "response", ":", "raise", "aomi", ".", "exceptions", ".", "VaultData", "(", "\"Unable to retrieve %s\"", "%", "path", ")", "if", "is_aws", "(", "response", "[", "'data'", "]", ")", "and", "'sts'", "not", "in", "path", ":", "renew_secret", "(", "client", ",", "response", ",", "opt", ")", "for", "s_k", ",", "s_v", "in", "response", "[", "'data'", "]", ".", "items", "(", ")", ":", "o_key", "=", "s_k", "if", "s_k", "in", "key_map", ":", "o_key", "=", "key_map", "[", "s_k", "]", "k_name", "=", "secret_key_name", "(", "path", ",", "o_key", ",", "opt", ")", ".", "lower", "(", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "obj", "[", "k_name", "]", "=", "s_v", "template_obj", "=", "blend_vars", "(", "obj", ",", "opt", ")", "output", "=", "render", "(", "grok_template_file", "(", "src", ")", ",", "template_obj", ")", "write_raw_file", "(", "output", ",", "abspath", "(", "dest", ")", ")" ]
Return all buildings related to the provided query .
def search ( self , keyword ) : params = { "source" : "map" , "description" : keyword } data = self . _request ( ENDPOINTS [ 'SEARCH' ] , params ) data [ 'result_data' ] = [ res for res in data [ 'result_data' ] if isinstance ( res , dict ) ] return data
8,564
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/map.py#L21-L35
[ "def", "exit_resync", "(", "self", ")", ":", "print", "(", "\"********** exit & resync **********\"", ")", "try", ":", "if", "self", ".", "client_socket", ":", "self", ".", "client_socket", ".", "close", "(", ")", "self", ".", "client_socket", "=", "None", "try", ":", "self", ".", "exit", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "_log_error", "(", "e", ")", "print", "(", "\"Pause for exit(s) ...\"", ")", "time", ".", "sleep", "(", "60", ")", "except", "(", "socket", ".", "error", ",", "ConnectionError", ")", ":", "pass", "self", ".", "resync", "(", ")" ]
Use a finite - element modeling code to infer geometric factors for meshes with topography or irregular electrode spacings .
def compute_K_numerical ( dataframe , settings = None , keep_dir = None ) : inversion_code = reda . rcParams . get ( 'geom_factor.inversion_code' , 'crtomo' ) if inversion_code == 'crtomo' : import reda . utils . geom_fac_crtomo as geom_fac_crtomo if keep_dir is not None : keep_dir = os . path . abspath ( keep_dir ) K = geom_fac_crtomo . compute_K ( dataframe , settings , keep_dir ) else : raise Exception ( 'Inversion code {0} not implemented for K computation' . format ( inversion_code ) ) return K
8,565
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/geometric_factors.py#L29-L74
[ "def", "inverse", "(", "self", ",", "vector", ",", "duration", "=", "None", ")", ":", "ann", "=", "jams", ".", "Annotation", "(", "namespace", "=", "self", ".", "namespace", ",", "duration", "=", "duration", ")", "if", "duration", "is", "None", ":", "duration", "=", "0", "ann", ".", "append", "(", "time", "=", "0", ",", "duration", "=", "duration", ",", "value", "=", "vector", ")", "return", "ann" ]
Get key from object
def _get_object_key ( self , p_object ) : matched_key = None matched_index = None if hasattr ( p_object , self . _searchNames [ 0 ] ) : return getattr ( p_object , self . _searchNames [ 0 ] ) for x in xrange ( len ( self . _searchNames ) ) : key = self . _searchNames [ x ] if hasattr ( p_object , key ) : matched_key = key matched_index = x if matched_key is None : raise KeyError ( ) if matched_index != 0 and self . _searchOptimize : self . _searchNames . insert ( 0 , self . _searchNames . pop ( matched_index ) ) return getattr ( p_object , matched_key )
8,566
https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/lict.py#L52-L72
[ "def", "add_headers", "(", "width", "=", "80", ",", "title", "=", "'Untitled'", ",", "subtitle", "=", "''", ",", "author", "=", "''", ",", "email", "=", "''", ",", "description", "=", "''", ",", "tunings", "=", "[", "]", ")", ":", "result", "=", "[", "''", "]", "title", "=", "str", ".", "upper", "(", "title", ")", "result", "+=", "[", "str", ".", "center", "(", "' '", ".", "join", "(", "title", ")", ",", "width", ")", "]", "if", "subtitle", "!=", "''", ":", "result", "+=", "[", "''", ",", "str", ".", "center", "(", "str", ".", "title", "(", "subtitle", ")", ",", "width", ")", "]", "if", "author", "!=", "''", "or", "email", "!=", "''", ":", "result", "+=", "[", "''", ",", "''", "]", "if", "email", "!=", "''", ":", "result", "+=", "[", "str", ".", "center", "(", "'Written by: %s <%s>'", "%", "(", "author", ",", "email", ")", ",", "width", ")", "]", "else", ":", "result", "+=", "[", "str", ".", "center", "(", "'Written by: %s'", "%", "author", ",", "width", ")", "]", "if", "description", "!=", "''", ":", "result", "+=", "[", "''", ",", "''", "]", "words", "=", "description", ".", "split", "(", ")", "lines", "=", "[", "]", "line", "=", "[", "]", "last", "=", "0", "for", "word", "in", "words", ":", "if", "len", "(", "word", ")", "+", "last", "<", "width", "-", "10", ":", "line", ".", "append", "(", "word", ")", "last", "+=", "len", "(", "word", ")", "+", "1", "else", ":", "lines", ".", "append", "(", "line", ")", "line", "=", "[", "word", "]", "last", "=", "len", "(", "word", ")", "+", "1", "lines", ".", "append", "(", "line", ")", "for", "line", "in", "lines", ":", "result", "+=", "[", "str", ".", "center", "(", "' '", ".", "join", "(", "line", ")", ",", "width", ")", "]", "if", "tunings", "!=", "[", "]", ":", "result", "+=", "[", "''", ",", "''", ",", "str", ".", "center", "(", "'Instruments'", ",", "width", ")", "]", "for", "(", "i", ",", "tuning", ")", "in", "enumerate", "(", "tunings", ")", ":", "result", "+=", "[", "''", ",", "str", ".", "center", "(", "'%d. %s'", "%", "(", "i", "+", "1", ",", "tuning", ".", "instrument", ")", ",", "width", ")", ",", "str", ".", "center", "(", "tuning", ".", "description", ",", "width", ")", "]", "result", "+=", "[", "''", ",", "''", "]", "return", "result" ]
Compute weC from weT
def correct ( self , temp , we_t ) : if not PIDTempComp . in_range ( temp ) : return None n_t = self . cf_t ( temp ) if n_t is None : return None we_c = we_t * n_t return we_c
8,567
https://github.com/south-coast-science/scs_core/blob/a4152b0bbed6acbbf257e1bba6a912f6ebe578e5/src/scs_core/gas/pid_temp_comp.py#L66-L80
[ "def", "_unify_values", "(", "self", ",", "section", ",", "vars", ")", ":", "sectiondict", "=", "{", "}", "try", ":", "sectiondict", "=", "self", ".", "_sections", "[", "section", "]", "except", "KeyError", ":", "if", "section", "!=", "self", ".", "default_section", ":", "raise", "NoSectionError", "(", "section", ")", "# Update with the entry specific variables", "vardict", "=", "{", "}", "if", "vars", ":", "for", "key", ",", "value", "in", "vars", ".", "items", "(", ")", ":", "if", "value", "is", "not", "None", ":", "value", "=", "str", "(", "value", ")", "vardict", "[", "self", ".", "optionxform", "(", "key", ")", "]", "=", "value", "return", "_ChainMap", "(", "vardict", ",", "sectiondict", ",", "self", ".", "_defaults", ")" ]
DO NOT USE ANY MORE - DEPRECIATED!
def compute_norrec_differences ( df , keys_diff ) : raise Exception ( 'This function is depreciated!' ) print ( 'computing normal-reciprocal differences' ) # df.sort_index(level='norrec') def norrec_diff ( x ) : """compute norrec_diff""" if x . shape [ 0 ] != 2 : return np . nan else : return np . abs ( x . iloc [ 1 ] - x . iloc [ 0 ] ) keys_keep = list ( set ( df . columns . tolist ( ) ) - set ( keys_diff ) ) agg_dict = { x : _first for x in keys_keep } agg_dict . update ( { x : norrec_diff for x in keys_diff } ) for key in ( 'id' , 'timestep' , 'frequency' ) : if key in agg_dict : del ( agg_dict [ key ] ) # for frequencies, we could (I think) somehow prevent grouping by # frequencies... df = df . groupby ( ( 'timestep' , 'frequency' , 'id' ) ) . agg ( agg_dict ) # df.rename(columns={'r': 'Rdiff'}, inplace=True) df . reset_index ( ) return df
8,568
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/norrec.py#L48-L75
[ "def", "renew_compose", "(", "self", ",", "compose_id", ")", ":", "logger", ".", "info", "(", "\"Renewing compose %d\"", ",", "compose_id", ")", "response", "=", "self", ".", "session", ".", "patch", "(", "'{}composes/{}'", ".", "format", "(", "self", ".", "url", ",", "compose_id", ")", ")", "response", ".", "raise_for_status", "(", ")", "response_json", "=", "response", ".", "json", "(", ")", "compose_id", "=", "response_json", "[", "'id'", "]", "logger", ".", "info", "(", "\"Renewed compose is %d\"", ",", "compose_id", ")", "return", "response_json" ]
return a normalized version of abmn
def _normalize_abmn ( abmn ) : abmn_2d = np . atleast_2d ( abmn ) abmn_normalized = np . hstack ( ( np . sort ( abmn_2d [ : , 0 : 2 ] , axis = 1 ) , np . sort ( abmn_2d [ : , 2 : 4 ] , axis = 1 ) , ) ) return abmn_normalized
8,569
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/norrec.py#L78-L86
[ "def", "read_stats", "(", "self", ",", "*", "stats", ")", ":", "from", "ixexplorer", ".", "ixe_stream", "import", "IxePacketGroupStream", "sleep_time", "=", "0.1", "# in cases we only want few counters but very fast we need a smaller sleep time", "if", "not", "stats", ":", "stats", "=", "[", "m", ".", "attrname", "for", "m", "in", "IxePgStats", ".", "__tcl_members__", "if", "m", ".", "flags", "&", "FLAG_RDONLY", "]", "sleep_time", "=", "1", "# Read twice to refresh rate statistics.", "for", "port", "in", "self", ".", "tx_ports_streams", ":", "port", ".", "api", ".", "call_rc", "(", "'streamTransmitStats get {} 1 4096'", ".", "format", "(", "port", ".", "uri", ")", ")", "for", "rx_port", "in", "self", ".", "rx_ports", ":", "rx_port", ".", "api", ".", "call_rc", "(", "'packetGroupStats get {} 0 65536'", ".", "format", "(", "rx_port", ".", "uri", ")", ")", "time", ".", "sleep", "(", "sleep_time", ")", "self", ".", "statistics", "=", "OrderedDict", "(", ")", "for", "tx_port", ",", "streams", "in", "self", ".", "tx_ports_streams", ".", "items", "(", ")", ":", "for", "stream", "in", "streams", ":", "stream_stats", "=", "OrderedDict", "(", ")", "tx_port", ".", "api", ".", "call_rc", "(", "'streamTransmitStats get {} 1 4096'", ".", "format", "(", "tx_port", ".", "uri", ")", ")", "stream_tx_stats", "=", "IxeStreamTxStats", "(", "tx_port", ",", "stream", ".", "index", ")", "stream_stats_tx", "=", "{", "c", ":", "v", "for", "c", ",", "v", "in", "stream_tx_stats", ".", "get_attributes", "(", "FLAG_RDONLY", ")", ".", "items", "(", ")", "}", "stream_stats", "[", "'tx'", "]", "=", "stream_stats_tx", "stream_stat_pgid", "=", "IxePacketGroupStream", "(", "stream", ")", ".", "groupId", "stream_stats_pg", "=", "pg_stats_dict", "(", ")", "for", "port", "in", "self", ".", "session", ".", "ports", ".", "values", "(", ")", ":", "stream_stats_pg", "[", "str", "(", "port", ")", "]", "=", "OrderedDict", "(", "zip", "(", "stats", ",", "[", "-", "1", "]", "*", "len", "(", "stats", ")", ")", ")", "for", "rx_port", "in", "self", ".", "rx_ports", ":", "if", "not", "stream", ".", "rx_ports", "or", "rx_port", "in", "stream", ".", "rx_ports", ":", "rx_port", ".", "api", ".", "call_rc", "(", "'packetGroupStats get {} 0 65536'", ".", "format", "(", "rx_port", ".", "uri", ")", ")", "pg_stats", "=", "IxePgStats", "(", "rx_port", ",", "stream_stat_pgid", ")", "stream_stats_pg", "[", "str", "(", "rx_port", ")", "]", "=", "pg_stats", ".", "read_stats", "(", "*", "stats", ")", "stream_stats", "[", "'rx'", "]", "=", "stream_stats_pg", "self", ".", "statistics", "[", "str", "(", "stream", ")", "]", "=", "stream_stats", "return", "self", ".", "statistics" ]
Compute and write the difference between normal and reciprocal values for all columns specified in the diff_list parameter .
def assign_norrec_diffs ( df , diff_list ) : extra_dims = [ x for x in ( 'timestep' , 'frequency' , 'id' ) if x in df . columns ] g = df . groupby ( extra_dims ) def subrow ( row ) : if row . size == 2 : return row . iloc [ 1 ] - row . iloc [ 0 ] else : return np . nan for diffcol in diff_list : diff = g [ diffcol ] . agg ( subrow ) . reset_index ( ) # rename the column cols = list ( diff . columns ) cols [ - 1 ] = diffcol + 'diff' diff . columns = cols df = df . drop ( cols [ - 1 ] , axis = 1 , errors = 'ignore' ) . merge ( diff , on = extra_dims , how = 'outer' ) df = df . sort_values ( extra_dims ) return df
8,570
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/norrec.py#L336-L378
[ "def", "create_helping_material_info", "(", "helping", ")", ":", "helping_info", "=", "None", "file_path", "=", "None", "if", "helping", ".", "get", "(", "'info'", ")", ":", "helping_info", "=", "helping", "[", "'info'", "]", "else", ":", "helping_info", "=", "helping", "if", "helping_info", ".", "get", "(", "'file_path'", ")", ":", "file_path", "=", "helping_info", ".", "get", "(", "'file_path'", ")", "del", "helping_info", "[", "'file_path'", "]", "return", "helping_info", ",", "file_path" ]
Handles the ULogin response if user is already authenticated
def handle_authenticated_user ( self , response ) : current_user = get_user ( self . request ) ulogin , registered = ULoginUser . objects . get_or_create ( uid = response [ 'uid' ] , network = response [ 'network' ] , defaults = { 'identity' : response [ 'identity' ] , 'user' : current_user } ) if not registered : ulogin_user = ulogin . user logger . debug ( 'uLogin user already exists' ) if current_user != ulogin_user : logger . debug ( "Mismatch: %s is not a %s. Take over it!" % ( current_user , ulogin_user ) ) ulogin . user = current_user ulogin . save ( ) return get_user ( self . request ) , ulogin , registered
8,571
https://github.com/marazmiki/django-ulogin/blob/f41ad4b4ca130ad8af25be72ad882c8cf94a80dc/django_ulogin/views.py#L82-L107
[ "def", "as_hdf", "(", "self", ",", "filepath", ",", "mode", "=", "'w'", ",", "group", "=", "'/'", ")", ":", "import", "h5py", "# Groups in TDMS are mapped to the first level of the HDF5 hierarchy", "# Channels in TDMS are then mapped to the second level of the HDF5", "# hierarchy, under the appropriate groups.", "# Properties in TDMS are mapped to attributes in HDF5.", "# These all exist under the appropriate, channel group etc.", "h5file", "=", "h5py", ".", "File", "(", "filepath", ",", "mode", ")", "container_group", "=", "None", "if", "group", "in", "h5file", ":", "container_group", "=", "h5file", "[", "group", "]", "else", ":", "container_group", "=", "h5file", ".", "create_group", "(", "group", ")", "# First write the properties at the root level", "try", ":", "root", "=", "self", ".", "object", "(", ")", "for", "property_name", ",", "property_value", "in", "root", ".", "properties", ".", "items", "(", ")", ":", "container_group", ".", "attrs", "[", "property_name", "]", "=", "property_value", "except", "KeyError", ":", "# No root object present", "pass", "# Now iterate through groups and channels,", "# writing the properties and data", "for", "group_name", "in", "self", ".", "groups", "(", ")", ":", "try", ":", "group", "=", "self", ".", "object", "(", "group_name", ")", "# Write the group's properties", "for", "prop_name", ",", "prop_value", "in", "group", ".", "properties", ".", "items", "(", ")", ":", "container_group", "[", "group_name", "]", ".", "attrs", "[", "prop_name", "]", "=", "prop_value", "except", "KeyError", ":", "# No group object present", "pass", "# Write properties and data for each channel", "for", "channel", "in", "self", ".", "group_channels", "(", "group_name", ")", ":", "for", "prop_name", ",", "prop_value", "in", "channel", ".", "properties", ".", "items", "(", ")", ":", "container_group", ".", "attrs", "[", "prop_name", "]", "=", "prop_value", "container_group", "[", "group_name", "+", "'/'", "+", "channel", ".", "channel", "]", "=", "channel", ".", "data", "return", "h5file" ]
The request from ulogin service is correct
def form_valid ( self , form ) : response = self . ulogin_response ( form . cleaned_data [ 'token' ] , self . request . get_host ( ) ) if 'error' in response : return render ( self . request , self . error_template_name , { 'json' : response } ) if user_is_authenticated ( get_user ( self . request ) ) : user , identity , registered = self . handle_authenticated_user ( response ) else : user , identity , registered = self . handle_anonymous_user ( response ) assign . send ( sender = ULoginUser , user = get_user ( self . request ) , request = self . request , registered = registered , ulogin_user = identity , ulogin_data = response ) return redirect ( self . request . GET . get ( REDIRECT_FIELD_NAME ) or '/' )
8,572
https://github.com/marazmiki/django-ulogin/blob/f41ad4b4ca130ad8af25be72ad882c8cf94a80dc/django_ulogin/views.py#L135-L159
[ "def", "_GetUncompressedStreamSize", "(", "self", ")", ":", "self", ".", "_file_object", ".", "seek", "(", "0", ",", "os", ".", "SEEK_SET", ")", "self", ".", "_decompressor", "=", "self", ".", "_GetDecompressor", "(", ")", "self", ".", "_uncompressed_data", "=", "b''", "compressed_data_offset", "=", "0", "compressed_data_size", "=", "self", ".", "_file_object", ".", "get_size", "(", ")", "uncompressed_stream_size", "=", "0", "while", "compressed_data_offset", "<", "compressed_data_size", ":", "read_count", "=", "self", ".", "_ReadCompressedData", "(", "self", ".", "_COMPRESSED_DATA_BUFFER_SIZE", ")", "if", "read_count", "==", "0", ":", "break", "compressed_data_offset", "+=", "read_count", "uncompressed_stream_size", "+=", "self", ".", "_uncompressed_data_size", "return", "uncompressed_stream_size" ]
Makes a request to ULOGIN
def ulogin_response ( self , token , host ) : response = requests . get ( settings . TOKEN_URL , params = { 'token' : token , 'host' : host } ) content = response . content if sys . version_info >= ( 3 , 0 ) : content = content . decode ( 'utf8' ) return json . loads ( content )
8,573
https://github.com/marazmiki/django-ulogin/blob/f41ad4b4ca130ad8af25be72ad882c8cf94a80dc/django_ulogin/views.py#L167-L182
[ "def", "getStringPartition", "(", "self", ")", ":", "res", "=", "''", "for", "s", "in", "self", ".", "partitions", "[", "self", ".", "index", "]", ".", "getSlice", "(", ")", ":", "start", "=", "''", "stop", "=", "''", "if", "s", ".", "start", "is", "not", "None", ":", "start", "=", "int", "(", "s", ".", "start", ")", "if", "s", ".", "stop", "is", "not", "None", ":", "stop", "=", "int", "(", "s", ".", "stop", ")", "res", "+=", "'{0}:{1},'", ".", "format", "(", "start", ",", "stop", ")", "return", "res" ]
Initializes the bottom - up state arrays for tips based on their states given by the feature .
def initialise_parsimonious_states ( tree , feature , states ) : ps_feature_down = get_personalized_feature_name ( feature , BU_PARS_STATES ) ps_feature = get_personalized_feature_name ( feature , PARS_STATES ) all_states = set ( states ) for node in tree . traverse ( ) : state = getattr ( node , feature , set ( ) ) if not state : node . add_feature ( ps_feature_down , all_states ) else : node . add_feature ( ps_feature_down , state ) node . add_feature ( ps_feature , getattr ( node , ps_feature_down ) )
8,574
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/parsimony.py#L48-L67
[ "def", "getCiphertextLen", "(", "self", ",", "ciphertext", ")", ":", "plaintext_length", "=", "self", ".", "getPlaintextLen", "(", "ciphertext", ")", "ciphertext_length", "=", "plaintext_length", "+", "Encrypter", ".", "_CTXT_EXPANSION", "return", "ciphertext_length" ]
UPPASS traverses the tree starting from the tips and going up till the root and assigns to each parent node a state based on the states of its child nodes .
def uppass ( tree , feature ) : ps_feature = get_personalized_feature_name ( feature , BU_PARS_STATES ) for node in tree . traverse ( 'postorder' ) : if not node . is_leaf ( ) : children_states = get_most_common_states ( getattr ( child , ps_feature ) for child in node . children ) node_states = getattr ( node , ps_feature ) state_intersection = node_states & children_states node . add_feature ( ps_feature , state_intersection if state_intersection else node_states )
8,575
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/parsimony.py#L83-L111
[ "def", "is_varchar", "(", "self", ")", ":", "dt", "=", "DATA_TYPES", "[", "'varchar'", "]", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", "'type'", "]", "and", "len", "(", "self", ".", "data", ")", "<", "dt", "[", "'max'", "]", ":", "self", ".", "type", "=", "'VARCHAR'", "self", ".", "len", "=", "len", "(", "self", ".", "data", ")", "return", "True" ]
Calculates parsimonious states on the tree and stores them in the corresponding feature .
def parsimonious_acr ( tree , character , prediction_method , states , num_nodes , num_tips ) : initialise_parsimonious_states ( tree , character , states ) uppass ( tree , character ) results = [ ] result = { STATES : states , NUM_NODES : num_nodes , NUM_TIPS : num_tips } logger = logging . getLogger ( 'pastml' ) def process_result ( method , feature ) : out_feature = get_personalized_feature_name ( character , method ) if prediction_method != method else character res = result . copy ( ) res [ NUM_SCENARIOS ] , res [ NUM_UNRESOLVED_NODES ] , res [ NUM_STATES_PER_NODE ] = choose_parsimonious_states ( tree , feature , out_feature ) res [ NUM_STATES_PER_NODE ] /= num_nodes res [ PERC_UNRESOLVED ] = res [ NUM_UNRESOLVED_NODES ] * 100 / num_nodes logger . debug ( '{} node{} unresolved ({:.2f}%) for {} by {}, ' 'i.e. {:.4f} state{} per node in average.' . format ( res [ NUM_UNRESOLVED_NODES ] , 's are' if res [ NUM_UNRESOLVED_NODES ] != 1 else ' is' , res [ PERC_UNRESOLVED ] , character , method , res [ NUM_STATES_PER_NODE ] , 's' if res [ NUM_STATES_PER_NODE ] > 1 else '' ) ) res [ CHARACTER ] = out_feature res [ METHOD ] = method results . append ( res ) if prediction_method in { ACCTRAN , MP } : feature = get_personalized_feature_name ( character , PARS_STATES ) if prediction_method == MP : feature = get_personalized_feature_name ( feature , ACCTRAN ) acctran ( tree , character , feature ) result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) process_result ( ACCTRAN , feature ) bu_feature = get_personalized_feature_name ( character , BU_PARS_STATES ) for node in tree . traverse ( ) : if prediction_method == ACCTRAN : node . del_feature ( bu_feature ) node . del_feature ( feature ) if prediction_method != ACCTRAN : downpass ( tree , character , states ) feature = get_personalized_feature_name ( character , PARS_STATES ) if prediction_method == DOWNPASS : result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) if prediction_method in { DOWNPASS , MP } : process_result ( DOWNPASS , feature ) if prediction_method in { DELTRAN , MP } : deltran ( tree , character ) if prediction_method == DELTRAN : result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) process_result ( DELTRAN , feature ) for node in tree . traverse ( ) : node . del_feature ( feature ) logger . debug ( "Parsimonious reconstruction for {} requires {} state changes." . format ( character , result [ STEPS ] ) ) return results
8,576
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/parsimony.py#L224-L289
[ "def", "write_data", "(", "self", ",", "data", ",", "file_datetime", ")", ":", "with", "self", ".", "__lock", ":", "assert", "data", "is", "not", "None", "absolute_file_path", "=", "self", ".", "__file_path", "#logging.debug(\"WRITE data file %s for %s\", absolute_file_path, key)", "make_directory_if_needed", "(", "os", ".", "path", ".", "dirname", "(", "absolute_file_path", ")", ")", "properties", "=", "self", ".", "read_properties", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "absolute_file_path", ")", "else", "dict", "(", ")", "write_zip", "(", "absolute_file_path", ",", "data", ",", "properties", ")", "# convert to utc time.", "tz_minutes", "=", "Utility", ".", "local_utcoffset_minutes", "(", "file_datetime", ")", "timestamp", "=", "calendar", ".", "timegm", "(", "file_datetime", ".", "timetuple", "(", ")", ")", "-", "tz_minutes", "*", "60", "os", ".", "utime", "(", "absolute_file_path", ",", "(", "time", ".", "time", "(", ")", ",", "timestamp", ")", ")" ]
Convert a balance data structure into RingChartItem objects .
def balance_to_ringchart_items ( balance , account = '' , show = SHOW_CREDIT ) : show = show if show else SHOW_CREDIT # cannot show all in ring chart rcis = [ ] for item in balance : subaccount = item [ 'account_fragment' ] if not account else ':' . join ( ( account , item [ 'account_fragment' ] ) ) ch = balance_to_ringchart_items ( item [ 'children' ] , subaccount , show ) amount = item [ 'balance' ] if show == SHOW_CREDIT else - item [ 'balance' ] if amount < 0 : continue # omit negative amounts wedge_amount = max ( amount , sum ( map ( float , ch ) ) ) rci = gtkchartlib . ringchart . RingChartItem ( wedge_amount , tooltip = '{}\n{}' . format ( subaccount , wedge_amount ) , items = ch ) rcis . append ( rci ) return rcis
8,577
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/chart.py#L27-L45
[ "def", "_download_video", "(", "self", ",", "video_url", ",", "video_name", ")", ":", "filename", "=", "'{0:0=2d}_{1}'", ".", "format", "(", "DriverWrappersPool", ".", "videos_number", ",", "video_name", ")", "filename", "=", "'{}.mp4'", ".", "format", "(", "get_valid_filename", "(", "filename", ")", ")", "filepath", "=", "os", ".", "path", ".", "join", "(", "DriverWrappersPool", ".", "videos_directory", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "DriverWrappersPool", ".", "videos_directory", ")", ":", "os", ".", "makedirs", "(", "DriverWrappersPool", ".", "videos_directory", ")", "response", "=", "requests", ".", "get", "(", "video_url", ")", "open", "(", "filepath", ",", "'wb'", ")", ".", "write", "(", "response", ".", "content", ")", "self", ".", "logger", ".", "info", "(", "\"Video saved in '%s'\"", ",", "filepath", ")", "DriverWrappersPool", ".", "videos_number", "+=", "1" ]
Add file_handler to logger
def log_to_file ( log_path , log_urllib = False , limit = None ) : log_path = log_path file_handler = logging . FileHandler ( log_path ) if limit : file_handler = RotatingFileHandler ( log_path , mode = 'a' , maxBytes = limit * 1024 * 1024 , backupCount = 2 , encoding = None , delay = 0 ) fmt = '[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s' date_fmt = '%Y-%m-%d %H:%M:%S' formatter = logging . Formatter ( fmt , datefmt = date_fmt ) file_handler . setFormatter ( formatter ) logger . addHandler ( file_handler ) if log_urllib : urllib_logger . addHandler ( file_handler ) urllib_logger . setLevel ( logging . DEBUG )
8,578
https://github.com/orlandodiaz/log3/blob/aeedf83159be8dd3d4757e0d9240f9cdbc9c3ea2/log3/log.py#L34-L53
[ "def", "updateAARText", "(", "self", ")", ":", "self", ".", "airspeedText", ".", "set_text", "(", "'AR: %.1f m/s'", "%", "self", ".", "airspeed", ")", "self", ".", "altitudeText", ".", "set_text", "(", "'ALT: %.1f m '", "%", "self", ".", "relAlt", ")", "self", ".", "climbRateText", ".", "set_text", "(", "'CR: %.1f m/s'", "%", "self", ".", "climbRate", ")" ]
Handles session setup and teardown
def session_context ( fn ) : @ functools . wraps ( fn ) def wrap ( * args , * * kwargs ) : session = args [ 0 ] . Session ( ) # obtain from self result = fn ( * args , session = session , * * kwargs ) session . close ( ) return result return wrap
8,579
https://github.com/YosaiProject/yosai_alchemystore/blob/6479c159ab2ac357e6b70cdd71a2d673279e86bb/yosai_alchemystore/accountstore/accountstore.py#L66-L76
[ "def", "_redis_watcher", "(", "state", ")", ":", "conf", "=", "state", ".", "app", ".", "config", "r", "=", "redis", ".", "client", ".", "StrictRedis", "(", "host", "=", "conf", ".", "get", "(", "'WAFFLE_REDIS_HOST'", ",", "'localhost'", ")", ",", "port", "=", "conf", ".", "get", "(", "'WAFFLE_REDIS_PORT'", ",", "6379", ")", ")", "sub", "=", "r", ".", "pubsub", "(", "ignore_subscribe_messages", "=", "True", ")", "sub", ".", "subscribe", "(", "conf", ".", "get", "(", "'WAFFLE_REDIS_CHANNEL'", ",", "'waffleconf'", ")", ")", "while", "True", ":", "for", "msg", "in", "sub", ".", "listen", "(", ")", ":", "# Skip non-messages", "if", "not", "msg", "[", "'type'", "]", "==", "'message'", ":", "continue", "tstamp", "=", "float", "(", "msg", "[", "'data'", "]", ")", "# Compare timestamps and update config if needed", "if", "tstamp", ">", "state", ".", "_tstamp", ":", "state", ".", "update_conf", "(", ")", "state", ".", "_tstamp", "=", "tstamp" ]
helper function that writes out electrode positions to a file descriptor
def _syscal_write_electrode_coords ( fid , spacing , N ) : fid . write ( '# X Y Z\n' ) for i in range ( 0 , N ) : fid . write ( '{0} {1} {2} {3}\n' . format ( i + 1 , i * spacing , 0 , 0 ) )
8,580
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/exporters/syscal.py#L5-L19
[ "async", "def", "get_lease_async", "(", "self", ",", "partition_id", ")", ":", "try", ":", "blob", "=", "await", "self", ".", "host", ".", "loop", ".", "run_in_executor", "(", "self", ".", "executor", ",", "functools", ".", "partial", "(", "self", ".", "storage_client", ".", "get_blob_to_text", ",", "self", ".", "lease_container_name", ",", "partition_id", ")", ")", "lease", "=", "AzureBlobLease", "(", ")", "lease", ".", "with_blob", "(", "blob", ")", "async", "def", "state", "(", ")", ":", "\"\"\"\n Allow lease to curry storage_client to get state\n \"\"\"", "try", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "res", "=", "await", "loop", ".", "run_in_executor", "(", "self", ".", "executor", ",", "functools", ".", "partial", "(", "self", ".", "storage_client", ".", "get_blob_properties", ",", "self", ".", "lease_container_name", ",", "partition_id", ")", ")", "return", "res", ".", "properties", ".", "lease", ".", "state", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_logger", ".", "error", "(", "\"Failed to get lease state %r %r\"", ",", "err", ",", "partition_id", ")", "lease", ".", "state", "=", "state", "return", "lease", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_logger", ".", "error", "(", "\"Failed to get lease %r %r\"", ",", "err", ",", "partition_id", ")" ]
helper function that writes the actual measurement configurations to a file descriptor .
def _syscal_write_quadpoles ( fid , quadpoles ) : fid . write ( '# A B M N\n' ) for nr , quadpole in enumerate ( quadpoles ) : fid . write ( '{0} {1} {2} {3} {4}\n' . format ( nr , quadpole [ 0 ] , quadpole [ 1 ] , quadpole [ 2 ] , quadpole [ 3 ] ) )
8,581
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/exporters/syscal.py#L22-L38
[ "async", "def", "is_expired", "(", "self", ")", ":", "if", "asyncio", ".", "iscoroutinefunction", "(", "self", ".", "state", ")", ":", "current_state", "=", "await", "self", ".", "state", "(", ")", "else", ":", "current_state", "=", "self", ".", "state", "(", ")", "if", "current_state", ":", "return", "current_state", "!=", "\"leased\"", "return", "False" ]
Write configurations to a Syscal ascii file that can be read by the Electre Pro program .
def syscal_save_to_config_txt ( filename , configs , spacing = 1 ) : print ( 'Number of measurements: ' , configs . shape [ 0 ] ) number_of_electrodes = configs . max ( ) . astype ( int ) with open ( filename , 'w' ) as fid : _syscal_write_electrode_coords ( fid , spacing , number_of_electrodes ) _syscal_write_quadpoles ( fid , configs . astype ( int ) )
8,582
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/exporters/syscal.py#L41-L58
[ "def", "create_or_update", "(", "cls", ",", "bucket", ",", "key", ",", "value", ")", ":", "obj", "=", "cls", ".", "get", "(", "bucket", ",", "key", ")", "if", "obj", ":", "obj", ".", "value", "=", "value", "db", ".", "session", ".", "merge", "(", "obj", ")", "else", ":", "obj", "=", "cls", ".", "create", "(", "bucket", ",", "key", ",", "value", ")", "return", "obj" ]
Set up matplotlib imports and settings .
def setup ( use_latex = False , overwrite = False ) : # just make sure we can access matplotlib as mpl import matplotlib as mpl # general settings if overwrite : mpl . rcParams [ "lines.linewidth" ] = 2.0 mpl . rcParams [ "lines.markeredgewidth" ] = 3.0 mpl . rcParams [ "lines.markersize" ] = 3.0 mpl . rcParams [ "font.size" ] = 12 mpl . rcParams [ 'mathtext.default' ] = 'regular' if latex and use_latex : mpl . rcParams [ 'text.usetex' ] = True mpl . rc ( 'text.latex' , preamble = '' . join ( ( # r'\usepackage{droidsans} r'\usepackage[T1]{fontenc} ' , r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}' , r'\renewcommand\familydefault{\sfdefault} ' , r'\usepackage{mathastext} ' ) ) ) else : mpl . rcParams [ 'text.usetex' ] = False import matplotlib . pyplot as plt return plt , mpl
8,583
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/mpl.py#L19-L64
[ "def", "_GetPathSegmentIndexForSimilarityWeights", "(", "self", ",", "similarity_weights", ",", "occurrence_weights", ",", "value_weights", ")", ":", "largest_weight", "=", "similarity_weights", ".", "GetLargestWeight", "(", ")", "if", "largest_weight", ">", "0", ":", "similarity_weight_indexes", "=", "similarity_weights", ".", "GetIndexesForWeight", "(", "largest_weight", ")", "number_of_similarity_indexes", "=", "len", "(", "similarity_weight_indexes", ")", "else", ":", "number_of_similarity_indexes", "=", "0", "path_segment_index", "=", "None", "if", "number_of_similarity_indexes", "==", "0", ":", "path_segment_index", "=", "self", ".", "_GetPathSegmentIndexForOccurrenceWeights", "(", "occurrence_weights", ",", "value_weights", ")", "elif", "number_of_similarity_indexes", "==", "1", ":", "path_segment_index", "=", "similarity_weight_indexes", "[", "0", "]", "else", ":", "largest_weight", "=", "0", "largest_value_weight", "=", "0", "for", "similarity_index", "in", "similarity_weight_indexes", ":", "occurrence_weight", "=", "occurrence_weights", ".", "GetWeightForIndex", "(", "similarity_index", ")", "if", "largest_weight", ">", "0", "and", "largest_weight", "==", "occurrence_weight", ":", "value_weight", "=", "value_weights", ".", "GetWeightForIndex", "(", "similarity_index", ")", "if", "largest_value_weight", "<", "value_weight", ":", "largest_weight", "=", "0", "if", "not", "path_segment_index", "or", "largest_weight", "<", "occurrence_weight", ":", "largest_weight", "=", "occurrence_weight", "path_segment_index", "=", "similarity_index", "largest_value_weight", "=", "value_weights", ".", "GetWeightForIndex", "(", "similarity_index", ")", "return", "path_segment_index" ]
Load sEIT data from data directory . This function loads data previously exported from reda using reda . exporters . crtomo . write_files_to_directory
def load_seit_data ( directory , frequency_file = 'frequencies.dat' , data_prefix = 'volt_' , * * kwargs ) : frequencies = np . loadtxt ( directory + os . sep + frequency_file ) data_files = sorted ( glob ( directory + os . sep + data_prefix + '*' ) ) # check that the number of frequencies matches the number of data files if frequencies . size != len ( data_files ) : raise Exception ( 'number of frequencies does not match number of data files' ) # load data data_list = [ ] for frequency , filename in zip ( frequencies , data_files ) : subdata = load_mod_file ( filename ) subdata [ 'frequency' ] = frequency data_list . append ( subdata ) df = pd . concat ( data_list ) return df , None , None
8,584
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/crtomo.py#L59-L100
[ "def", "html", "(", "self", ")", "->", "str", ":", "if", "isinstance", "(", "self", ".", "value", ",", "bool", ")", ":", "val", "=", "'true'", "if", "self", ".", "value", "else", "'false'", "else", ":", "val", "=", "str", "(", "self", ".", "value", ")", "return", "'draggable=\"{}\"'", ".", "format", "(", "val", ")" ]
Normalises and diagonalises the rate matrix .
def get_diagonalisation ( frequencies , rate_matrix = None ) : Q = get_normalised_generator ( frequencies , rate_matrix ) d , A = np . linalg . eig ( Q ) return d , A , np . linalg . inv ( A )
8,585
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/models/generator.py#L4-L18
[ "def", "_agl_compliant_name", "(", "glyph_name", ")", ":", "MAX_GLYPH_NAME_LENGTH", "=", "63", "clean_name", "=", "re", ".", "sub", "(", "\"[^0-9a-zA-Z_.]\"", ",", "\"\"", ",", "glyph_name", ")", "if", "len", "(", "clean_name", ")", ">", "MAX_GLYPH_NAME_LENGTH", ":", "return", "None", "return", "clean_name" ]
Calculates the normalised generator from the rate matrix and character state frequencies .
def get_normalised_generator ( frequencies , rate_matrix = None ) : if rate_matrix is None : n = len ( frequencies ) rate_matrix = np . ones ( shape = ( n , n ) , dtype = np . float64 ) - np . eye ( n ) generator = rate_matrix * frequencies generator -= np . diag ( generator . sum ( axis = 1 ) ) mu = - generator . diagonal ( ) . dot ( frequencies ) generator /= mu return generator
8,586
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/models/generator.py#L21-L39
[ "async", "def", "upload_file", "(", "self", ",", "Filename", ",", "Bucket", ",", "Key", ",", "ExtraArgs", "=", "None", ",", "Callback", "=", "None", ",", "Config", "=", "None", ")", ":", "with", "open", "(", "Filename", ",", "'rb'", ")", "as", "open_file", ":", "await", "upload_fileobj", "(", "self", ",", "open_file", ",", "Bucket", ",", "Key", ",", "ExtraArgs", "=", "ExtraArgs", ",", "Callback", "=", "Callback", ",", "Config", "=", "Config", ")" ]
Calculates the probability matrix of substitutions i - > j over time t given the normalised generator diagonalisation .
def get_pij_matrix ( t , diag , A , A_inv ) : return A . dot ( np . diag ( np . exp ( diag * t ) ) ) . dot ( A_inv )
8,587
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/models/generator.py#L42-L53
[ "def", "MapFile", "(", "self", ",", "key_path_prefix", ",", "registry_file", ")", ":", "self", ".", "_registry_files", "[", "key_path_prefix", ".", "upper", "(", ")", "]", "=", "registry_file", "registry_file", ".", "SetKeyPathPrefix", "(", "key_path_prefix", ")" ]
Split specified arguments to two list .
def split_arguments ( args ) : prev = False for i , value in enumerate ( args [ 1 : ] ) : if value . startswith ( '-' ) : prev = True elif prev : prev = False else : return args [ : i + 1 ] , args [ i + 1 : ] return args , [ ]
8,588
https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/arguments.py#L8-L34
[ "def", "loadInternalSheet", "(", "klass", ",", "p", ",", "*", "*", "kwargs", ")", ":", "vs", "=", "klass", "(", "p", ".", "name", ",", "source", "=", "p", ",", "*", "*", "kwargs", ")", "options", ".", "_set", "(", "'encoding'", ",", "'utf8'", ",", "vs", ")", "if", "p", ".", "exists", "(", ")", ":", "vd", ".", "sheets", ".", "insert", "(", "0", ",", "vs", ")", "vs", ".", "reload", ".", "__wrapped__", "(", "vs", ")", "vd", ".", "sheets", ".", "pop", "(", "0", ")", "return", "vs" ]
Parse specified arguments via config
def parse_arguments ( args , config ) : import notify from conf import config_to_options opts = config_to_options ( config ) usage = ( "%(prog)s " "[-h] [-t TO_ADDR] [-f FROM_ADDR] [-e ENCODING] [-s SUBJECT]\n" " " "[-o HOST] [-p PORT] [--username USERNAME] [--password PASSWORD]\n" " " "[--setup] [--check] COMMAND ARGUMENTS" ) % { 'prog' : "notify" } description = """ Call COMMAND with ARGUMENTS and send notification email to TO_ADDR """ parser = optparse . OptionParser ( usage = usage , description = description , version = notify . __version__ ) parser . add_option ( '-t' , '--to-addr' , default = opts . to_addr , help = ( 'Destination of the email.' ) ) parser . add_option ( '-f' , '--from-addr' , default = opts . from_addr , help = ( 'Source of the email.' ) ) parser . add_option ( '-s' , '--subject' , default = opts . subject , help = ( 'Subject of the email' ) ) parser . add_option ( '-e' , '--encoding' , default = opts . encoding , help = ( 'Encoding of the email' ) ) parser . add_option ( '-o' , '--host' , default = opts . host , help = ( 'Host address of MUA' ) ) parser . add_option ( '-p' , '--port' , type = 'int' , default = opts . port , help = ( 'Port number of MUA' ) ) parser . add_option ( '--username' , default = opts . username , help = ( 'Username for authentication' ) ) parser . add_option ( '--password' , help = ( 'Password for authentication' ) ) parser . add_option ( '--setup' , default = False , action = 'store_true' , help = ( 'Setup %(prog)s configuration' ) ) parser . add_option ( '--check' , default = False , action = 'store_true' , help = ( 'Send %(prog)s configuration via email for ' 'checking. Only for Unix system.' ) ) # display help and exit if len ( args ) == 1 : parser . print_help ( ) sys . exit ( 0 ) else : # translate all specified arguments to unicode if sys . version_info < ( 3 , ) : encoding = sys . stdout . encoding args = map ( lambda x : unicode ( x , encoding ) , args ) # split argv to two array lhs , rhs = split_arguments ( args ) # parse options opts = parser . parse_args ( args = lhs [ 1 : ] ) [ 0 ] return rhs , opts
8,589
https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/arguments.py#L36-L117
[ "def", "move_vobject", "(", "self", ",", "uid", ",", "from_file", ",", "to_file", ")", ":", "if", "from_file", "not", "in", "self", ".", "_reminders", "or", "to_file", "not", "in", "self", ".", "_reminders", ":", "return", "uid", "=", "uid", ".", "split", "(", "'@'", ")", "[", "0", "]", "with", "self", ".", "_lock", ":", "rem", "=", "open", "(", "from_file", ")", ".", "readlines", "(", ")", "for", "(", "index", ",", "line", ")", "in", "enumerate", "(", "rem", ")", ":", "if", "uid", "==", "md5", "(", "line", "[", ":", "-", "1", "]", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", ":", "del", "rem", "[", "index", "]", "open", "(", "from_file", ",", "'w'", ")", ".", "writelines", "(", "rem", ")", "open", "(", "to_file", ",", "'a'", ")", ".", "write", "(", "line", ")", "break" ]
Returns True if we should require authentication for the URL given
def should_require_authentication ( self , url ) : return ( not self . routes # require auth for all URLs or any ( route . match ( url ) for route in self . routes ) )
8,590
https://github.com/jonashaag/httpauth/blob/1b2ab9cb5192b474c9723182690c352337f754bc/httpauth.py#L99-L102
[ "def", "createOverlay", "(", "self", ",", "pchOverlayKey", ",", "pchOverlayName", ")", ":", "fn", "=", "self", ".", "function_table", ".", "createOverlay", "pOverlayHandle", "=", "VROverlayHandle_t", "(", ")", "result", "=", "fn", "(", "pchOverlayKey", ",", "pchOverlayName", ",", "byref", "(", "pOverlayHandle", ")", ")", "return", "result", ",", "pOverlayHandle" ]
Returns True if the credentials passed in the Authorization header are valid False otherwise .
def authenticate ( self , environ ) : try : hd = parse_dict_header ( environ [ 'HTTP_AUTHORIZATION' ] ) except ( KeyError , ValueError ) : return False return self . credentials_valid ( hd [ 'response' ] , environ [ 'REQUEST_METHOD' ] , environ [ 'httpauth.uri' ] , hd [ 'nonce' ] , hd [ 'Digest username' ] , )
8,591
https://github.com/jonashaag/httpauth/blob/1b2ab9cb5192b474c9723182690c352337f754bc/httpauth.py#L104-L120
[ "def", "_simulate_mixture", "(", "self", ",", "op", ":", "ops", ".", "Operation", ",", "data", ":", "_StateAndBuffer", ",", "indices", ":", "List", "[", "int", "]", ")", "->", "None", ":", "probs", ",", "unitaries", "=", "zip", "(", "*", "protocols", ".", "mixture", "(", "op", ")", ")", "# We work around numpy barfing on choosing from a list of", "# numpy arrays (which is not `one-dimensional`) by selecting", "# the index of the unitary.", "index", "=", "np", ".", "random", ".", "choice", "(", "range", "(", "len", "(", "unitaries", ")", ")", ",", "p", "=", "probs", ")", "shape", "=", "(", "2", ",", ")", "*", "(", "2", "*", "len", "(", "indices", ")", ")", "unitary", "=", "unitaries", "[", "index", "]", ".", "astype", "(", "self", ".", "_dtype", ")", ".", "reshape", "(", "shape", ")", "result", "=", "linalg", ".", "targeted_left_multiply", "(", "unitary", ",", "data", ".", "state", ",", "indices", ",", "out", "=", "data", ".", "buffer", ")", "data", ".", "buffer", "=", "data", ".", "state", "data", ".", "state", "=", "result" ]
Return the next transaction object .
def next ( self ) : try : return self . dict_to_xn ( self . csvreader . next ( ) ) except MetadataException : # row was metadata; proceed to next row return next ( self )
8,592
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/readers/CSV.py#L92-L101
[ "def", "check", "(", "self", ",", "url_data", ")", ":", "# XXX user authentication from url_data", "password", "=", "''", "data", "=", "url_data", ".", "get_content", "(", ")", "# PDFParser needs a seekable file object", "fp", "=", "StringIO", "(", "data", ")", "try", ":", "parser", "=", "PDFParser", "(", "fp", ")", "doc", "=", "PDFDocument", "(", "parser", ",", "password", "=", "password", ")", "for", "(", "pageno", ",", "page", ")", "in", "enumerate", "(", "PDFPage", ".", "create_pages", "(", "doc", ")", ",", "start", "=", "1", ")", ":", "if", "\"Contents\"", "in", "page", ".", "attrs", ":", "search_url", "(", "page", ".", "attrs", "[", "\"Contents\"", "]", ",", "url_data", ",", "pageno", ",", "set", "(", ")", ")", "if", "\"Annots\"", "in", "page", ".", "attrs", ":", "search_url", "(", "page", ".", "attrs", "[", "\"Annots\"", "]", ",", "url_data", ",", "pageno", ",", "set", "(", ")", ")", "except", "PSException", "as", "msg", ":", "if", "not", "msg", ".", "args", ":", "# at least show the class name", "msg", "=", "repr", "(", "msg", ")", "log", ".", "warn", "(", "LOG_PLUGIN", ",", "\"Error parsing PDF file: %s\"", ",", "msg", ")" ]
Parse the date and return a datetime object
def parse_date ( self , date ) : if self . date_format is not None : return datetime . datetime . strptime ( date , self . date_format ) . date ( ) if re . match ( '\d{8}$' , date ) : # assume YYYYMMDD return datetime . date ( * map ( int , ( date [ : 4 ] , date [ 4 : 6 ] , date [ 6 : ] ) ) ) try : # split by '-' or '/' parts = date_delim . split ( date , 2 ) # maxsplit=2 if len ( parts ) == 3 : if len ( parts [ 0 ] ) == 4 : # YYYY, MM, DD return datetime . date ( * map ( int , parts ) ) elif len ( parts [ 2 ] ) == 4 : # DD, MM, YYYY return datetime . date ( * map ( int , reversed ( parts ) ) ) # fail except TypeError , ValueError : raise reader . DataError ( 'Bad date format: "{}"' . format ( date ) )
8,593
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/readers/CSV.py#L103-L140
[ "def", "_check_missing_manifests", "(", "self", ",", "segids", ")", ":", "manifest_paths", "=", "[", "self", ".", "_manifest_path", "(", "segid", ")", "for", "segid", "in", "segids", "]", "with", "Storage", "(", "self", ".", "vol", ".", "layer_cloudpath", ",", "progress", "=", "self", ".", "vol", ".", "progress", ")", "as", "stor", ":", "exists", "=", "stor", ".", "files_exist", "(", "manifest_paths", ")", "dne", "=", "[", "]", "for", "path", ",", "there", "in", "exists", ".", "items", "(", ")", ":", "if", "not", "there", ":", "(", "segid", ",", ")", "=", "re", ".", "search", "(", "r'(\\d+):0$'", ",", "path", ")", ".", "groups", "(", ")", "dne", ".", "append", "(", "segid", ")", "return", "dne" ]
Create a subscription with this short name and the provided parameters
def create ( self , uri , buffer = "queue" , interval = 10 ) : return self . _http_client . put_json ( "subscriptions/{}" . format ( self . short_name ) , { "subscription" : { "uri" : uri , "buffer" : buffer , "interval" : interval , } } )
8,594
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/subscriptions.py#L15-L29
[ "def", "merge_cts_records", "(", "file_name", ",", "crypto_idfp", ",", "crypto_idfps", ")", ":", "db", "=", "XonoticDB", ".", "load_path", "(", "file_name", ")", "db", ".", "merge_cts_records", "(", "crypto_idfp", ",", "crypto_idfps", ")", "db", ".", "save", "(", "file_name", ")" ]
Scans the PAL configure . ac looking for the version number .
def read_pal_version ( ) : verfile = os . path . join ( "cextern" , "pal" , "configure.ac" ) verstring = "-1.-1.-1" for line in open ( verfile ) : if line . startswith ( "AC_INIT" ) : # Version will be in string [nn.mm.pp] match = re . search ( r"\[(\d+\.\d+\.\d+)\]" , line ) if match : verstring = match . group ( 1 ) break ( major , minor , patch ) = verstring . split ( "." ) return ( verstring , major , minor , patch )
8,595
https://github.com/Starlink/palpy/blob/a7ad77058614a93b29a004bbad6bc0e61c73b6e0/support/palvers.py#L35-L55
[ "def", "_getStickersTemplatesDirectory", "(", "self", ",", "resource_name", ")", ":", "templates_dir", "=", "queryResourceDirectory", "(", "\"stickers\"", ",", "resource_name", ")", ".", "directory", "if", "self", ".", "filter_by_type", ":", "templates_dir", "=", "templates_dir", "+", "\"/\"", "+", "self", ".", "filter_by_type", "return", "templates_dir" ]
Update the fields value with the received information .
def _reset_model ( self , response ) : # pylint: disable=no-member # Reset the model to the initial state self . _provision_done = False # Set back the provision flag self . _changes . clear ( ) # Clear the changes # Process the raw data from the update response fields = self . process_raw_data ( response ) # Update the current model representation self . _set_fields ( fields ) # Lock the current model self . _provision_done = True
8,596
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L105-L120
[ "def", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ":", "v", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "a", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "v", "-=", "a", "*", "np", ".", "dot", "(", "a", ",", "v", ")", "# on plane", "n", "=", "vector_norm", "(", "v", ")", "if", "n", ">", "_EPS", ":", "if", "v", "[", "2", "]", "<", "0.0", ":", "np", ".", "negative", "(", "v", ",", "v", ")", "v", "/=", "n", "return", "v", "if", "a", "[", "2", "]", "==", "1.0", ":", "return", "np", ".", "array", "(", "[", "1.0", ",", "0.0", ",", "0.0", "]", ")", "return", "unit_vector", "(", "[", "-", "a", "[", "1", "]", ",", "a", "[", "0", "]", ",", "0.0", "]", ")" ]
Check if the current model is ready to be used .
def is_ready ( self ) : if not self . provisioning_state : raise exception . ServiceException ( "The object doesn't contain " "`provisioningState`." ) elif self . provisioning_state == constant . FAILED : raise exception . ServiceException ( "Failed to complete the required operation." ) elif self . provisioning_state == constant . SUCCEEDED : LOG . debug ( "The model %s: %s was successfully updated " "(or created)." , self . __class__ . __name__ , self . resource_id ) return True return False
8,597
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L122-L136
[ "def", "_orthogonalize", "(", "X", ")", ":", "if", "X", ".", "size", "==", "X", ".", "shape", "[", "0", "]", ":", "return", "X", "from", "scipy", ".", "linalg", "import", "pinv", ",", "norm", "for", "i", "in", "range", "(", "1", ",", "X", ".", "shape", "[", "1", "]", ")", ":", "X", "[", ":", ",", "i", "]", "-=", "np", ".", "dot", "(", "np", ".", "dot", "(", "X", "[", ":", ",", "i", "]", ",", "X", "[", ":", ",", ":", "i", "]", ")", ",", "pinv", "(", "X", "[", ":", ",", ":", "i", "]", ")", ")", "# X[:, i] /= norm(X[:, i])", "return", "X" ]
Retrives all the required resources .
def _get_all ( cls , parent_id = None , grandparent_id = None ) : client = cls . _get_client ( ) endpoint = cls . _endpoint . format ( resource_id = "" , parent_id = parent_id or "" , grandparent_id = grandparent_id or "" ) resources = [ ] while True : response = client . get_resource ( endpoint ) for raw_data in response . get ( "value" , [ ] ) : raw_data [ "parentResourceID" ] = parent_id raw_data [ "grandParentResourceID" ] = grandparent_id resources . append ( cls . from_raw_data ( raw_data ) ) endpoint = response . get ( "nextLink" ) if not endpoint : break return resources
8,598
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L148-L164
[ "def", "setOverlayTextureColorSpace", "(", "self", ",", "ulOverlayHandle", ",", "eTextureColorSpace", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTextureColorSpace", "result", "=", "fn", "(", "ulOverlayHandle", ",", "eTextureColorSpace", ")", "return", "result" ]
Retrieves the required resources .
def get ( cls , resource_id = None , parent_id = None , grandparent_id = None ) : if not resource_id : return cls . _get_all ( parent_id , grandparent_id ) else : return cls . _get ( resource_id , parent_id , grandparent_id )
8,599
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L179-L194
[ "def", "create_or_update_secret", "(", "self", ",", "path", ",", "secret", ",", "cas", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'options'", ":", "{", "}", ",", "'data'", ":", "secret", ",", "}", "if", "cas", "is", "not", "None", ":", "params", "[", "'options'", "]", "[", "'cas'", "]", "=", "cas", "api_path", "=", "'/v1/{mount_point}/data/{path}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "path", "=", "path", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]