query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Coroutine which returns when this token has been triggered
async def wait ( self ) -> None : if self . triggered_token is not None : return futures = [ asyncio . ensure_future ( self . _triggered . wait ( ) , loop = self . loop ) ] for token in self . _chain : futures . append ( asyncio . ensure_future ( token . wait ( ) , loop = self . loop ) ) def cancel_not_done ( fut : 'asyncio.Future[None]' ) -> None : for future in futures : if not future . done ( ) : future . cancel ( ) async def _wait_for_first ( futures : Sequence [ Awaitable [ Any ] ] ) -> None : for future in asyncio . as_completed ( futures ) : # We don't need to catch CancelledError here (and cancel not done futures) # because our callback (above) takes care of that. await cast ( Awaitable [ Any ] , future ) return fut = asyncio . ensure_future ( _wait_for_first ( futures ) , loop = self . loop ) fut . add_done_callback ( cancel_not_done ) await fut
7,500
https://github.com/ethereum/asyncio-cancel-token/blob/135395a1a396c50731c03cf570e267c47c612694/cancel_token/token.py#L87-L112
[ "def", "diff_archives", "(", "archive1", ",", "archive2", ",", "verbosity", "=", "0", ",", "interactive", "=", "True", ")", ":", "util", ".", "check_existing_filename", "(", "archive1", ")", "util", ".", "check_existing_filename", "(", "archive2", ")", "if", "verbosity", ">=", "0", ":", "util", ".", "log_info", "(", "\"Comparing %s with %s ...\"", "%", "(", "archive1", ",", "archive2", ")", ")", "res", "=", "_diff_archives", "(", "archive1", ",", "archive2", ",", "verbosity", "=", "verbosity", ",", "interactive", "=", "interactive", ")", "if", "res", "==", "0", "and", "verbosity", ">=", "0", ":", "util", ".", "log_info", "(", "\"... no differences found.\"", ")" ]
Wait for the first awaitable to complete unless we timeout or the token is triggered .
async def cancellable_wait ( self , * awaitables : Awaitable [ _R ] , timeout : float = None ) -> _R : futures = [ asyncio . ensure_future ( a , loop = self . loop ) for a in awaitables + ( self . wait ( ) , ) ] try : done , pending = await asyncio . wait ( futures , timeout = timeout , return_when = asyncio . FIRST_COMPLETED , loop = self . loop , ) except asyncio . futures . CancelledError : # Since we use return_when=asyncio.FIRST_COMPLETED above, we can be sure none of our # futures will be done here, so we don't need to check if any is done before cancelling. for future in futures : future . cancel ( ) raise for task in pending : task . cancel ( ) if not done : raise TimeoutError ( ) if self . triggered_token is not None : # We've been asked to cancel so we don't care about our future, but we must # consume its exception or else asyncio will emit warnings. for task in done : task . exception ( ) raise OperationCancelled ( "Cancellation requested by {} token" . format ( self . triggered_token ) ) return done . pop ( ) . result ( )
7,501
https://github.com/ethereum/asyncio-cancel-token/blob/135395a1a396c50731c03cf570e267c47c612694/cancel_token/token.py#L114-L153
[ "def", "learn_batch", "(", "self", ",", "inputBatch", ")", ":", "X", "=", "inputBatch", "Y", "=", "self", ".", "encode_batch", "(", "X", ")", "self", ".", "update_statistics", "(", "Y", ")", "self", ".", "update_weights", "(", "X", ",", "Y", ")", "return", "Y" ]
Parent of the actual URN for example 1 . 1 for 1 . 1 . 1
def parent ( self ) -> Optional [ 'CtsReference' ] : if self . start . depth == 1 and ( self . end is None or self . end . depth <= 1 ) : return None else : if self . start . depth > 1 and ( self . end is None or self . end . depth == 0 ) : return CtsReference ( "{0}{1}" . format ( "." . join ( self . start . list [ : - 1 ] ) , self . start . subreference or "" ) ) elif self . start . depth > 1 and self . end is not None and self . end . depth > 1 : _start = self . start . list [ 0 : - 1 ] _end = self . end . list [ 0 : - 1 ] if _start == _end and self . start . subreference is None and self . end . subreference is None : return CtsReference ( "." . join ( _start ) ) else : return CtsReference ( "{0}{1}-{2}{3}" . format ( "." . join ( _start ) , self . start . subreference or "" , "." . join ( _end ) , self . end . subreference or "" ) )
7,502
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_capitains_cts.py#L175-L203
[ "def", "exec_start", "(", "self", ",", "exec_id", ",", "detach", "=", "False", ",", "tty", "=", "False", ",", "stream", "=", "False", ",", "socket", "=", "False", ",", "demux", "=", "False", ")", ":", "# we want opened socket if socket == True", "data", "=", "{", "'Tty'", ":", "tty", ",", "'Detach'", ":", "detach", "}", "headers", "=", "{", "}", "if", "detach", "else", "{", "'Connection'", ":", "'Upgrade'", ",", "'Upgrade'", ":", "'tcp'", "}", "res", "=", "self", ".", "_post_json", "(", "self", ".", "_url", "(", "'/exec/{0}/start'", ",", "exec_id", ")", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "stream", "=", "True", ")", "if", "detach", ":", "return", "self", ".", "_result", "(", "res", ")", "if", "socket", ":", "return", "self", ".", "_get_raw_response_socket", "(", "res", ")", "return", "self", ".", "_read_from_socket", "(", "res", ",", "stream", ",", "tty", "=", "tty", ",", "demux", "=", "demux", ")" ]
Return highest reference level
def highest ( self ) -> CtsSinglePassageId : if not self . end : return self . start elif len ( self . start ) < len ( self . end ) and len ( self . start ) : return self . start elif len ( self . start ) > len ( self . end ) and len ( self . end ) : return self . end elif len ( self . start ) : return self . start
7,503
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_capitains_cts.py#L206-L223
[ "def", "_get_token", "(", "self", ")", ":", "# HTTP request", "try", ":", "raw_res", "=", "yield", "from", "self", ".", "_session", ".", "get", "(", "TOKEN_URL", ",", "headers", "=", "self", ".", "_headers", ",", "timeout", "=", "self", ".", "_timeout", ")", "except", "OSError", ":", "raise", "PyFidoError", "(", "\"Can not get token\"", ")", "# Research for json in answer", "content", "=", "yield", "from", "raw_res", ".", "text", "(", ")", "reg_res", "=", "re", ".", "search", "(", "r\"\\({.*}\\)\"", ",", "content", ")", "if", "reg_res", "is", "None", ":", "raise", "PyFidoError", "(", "\"Can not finf token json\"", ")", "# Load data as json", "return_data", "=", "json", ".", "loads", "(", "reg_res", ".", "group", "(", ")", "[", "1", ":", "-", "1", "]", ")", "# Get token and uuid", "token", "=", "return_data", ".", "get", "(", "'result'", ",", "{", "}", ")", ".", "get", "(", "'accessToken'", ")", "uuid", "=", "return_data", ".", "get", "(", "'result'", ",", "{", "}", ")", ".", "get", "(", "'userData'", ",", "{", "}", ")", ".", "get", "(", "'uuid'", ")", "# Check values", "if", "token", "is", "None", "or", "uuid", "is", "None", ":", "raise", "PyFidoError", "(", "\"Can not get token or uuid\"", ")", "return", "token", ",", "uuid" ]
Returns the urn up to given level using URN Constants
def upTo ( self , key ) : middle = [ component for component in [ self . __parsed [ "textgroup" ] , self . __parsed [ "work" ] , self . __parsed [ "version" ] ] if component is not None ] if key == URN . COMPLETE : return self . __str__ ( ) elif key == URN . NAMESPACE : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] ] ) elif key == URN . TEXTGROUP and self . __parsed [ "textgroup" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , self . __parsed [ "textgroup" ] ] ) elif key == URN . WORK and self . __parsed [ "work" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( [ self . __parsed [ "textgroup" ] , self . __parsed [ "work" ] ] ) ] ) elif key == URN . VERSION and self . __parsed [ "version" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) ] ) elif key == URN . NO_PASSAGE and self . __parsed [ "work" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) ] ) elif key == URN . PASSAGE and self . __parsed [ "reference" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference ) ] ) elif key == URN . PASSAGE_START and self . __parsed [ "reference" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference . start ) ] ) elif key == URN . PASSAGE_END and self . __parsed [ "reference" ] and self . reference . end is not None : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference . end ) ] ) else : raise KeyError ( "Provided key is not recognized." )
7,504
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_capitains_cts.py#L534-L612
[ "def", "stop", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", ":", "LOG", ".", "debug", "(", "\"args: %s\"", "%", "str", "(", "args", ")", ")", "if", "kwargs", ":", "LOG", ".", "debug", "(", "\"kwargs: %s\"", "%", "str", "(", "kwargs", ")", ")", "try", ":", "self", ".", "_server", ".", "stop", "(", ")", "self", ".", "_server", "=", "None", "# Device-storage clear", "self", ".", "devices", ".", "clear", "(", ")", "self", ".", "devices_all", ".", "clear", "(", ")", "self", ".", "devices_raw", ".", "clear", "(", ")", "self", ".", "devices_raw_dict", ".", "clear", "(", ")", "return", "True", "except", "Exception", "as", "err", ":", "LOG", ".", "critical", "(", "\"Failed to stop server\"", ")", "LOG", ".", "debug", "(", "str", "(", "err", ")", ")", "return", "False" ]
Attribute that serves as a reference getter
def attribute ( self ) : refs = re . findall ( "\@([a-zA-Z:]+)=\\\?[\'\"]\$" + str ( self . refsDecl . count ( "$" ) ) + "\\\?[\'\"]" , self . refsDecl ) return refs [ - 1 ]
7,505
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_capitains_cts.py#L770-L777
[ "def", "group_experiments_greedy", "(", "tomo_expt", ":", "TomographyExperiment", ")", ":", "diag_sets", "=", "_max_tpb_overlap", "(", "tomo_expt", ")", "grouped_expt_settings_list", "=", "list", "(", "diag_sets", ".", "values", "(", ")", ")", "grouped_tomo_expt", "=", "TomographyExperiment", "(", "grouped_expt_settings_list", ",", "program", "=", "tomo_expt", ".", "program", ")", "return", "grouped_tomo_expt" ]
Given a passageId matches a citation level
def match ( self , passageId ) : if not isinstance ( passageId , CtsReference ) : passageId = CtsReference ( passageId ) if self . is_root ( ) : return self [ passageId . depth - 1 ] return self . root . match ( passageId )
7,506
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_capitains_cts.py#L815-L826
[ "def", "_set_virtual", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "in", "self", "and", "key", "not", "in", "self", ".", "_virtual_keys", ":", "return", "# Do nothing for non-virtual keys.", "self", ".", "_virtual_keys", ".", "add", "(", "key", ")", "if", "key", "in", "self", "and", "self", "[", "key", "]", "is", "not", "value", ":", "self", ".", "_on_change", "(", "key", ",", "value", ")", "dict", ".", "__setitem__", "(", "self", ",", "key", ",", "value", ")", "for", "overlay", "in", "self", ".", "_iter_overlays", "(", ")", ":", "overlay", ".", "_set_virtual", "(", "key", ",", "value", ")" ]
Fill the xpath with given informations
def fill ( self , passage = None , xpath = None ) : if xpath is True : # Then passage is a string or None xpath = self . xpath replacement = r"\1" if isinstance ( passage , str ) : replacement = r"\1\2'" + passage + "'" return REFERENCE_REPLACER . sub ( replacement , xpath ) else : if isinstance ( passage , CtsReference ) : passage = passage . start . list elif passage is None : return REFERENCE_REPLACER . sub ( r"\1" , self . refsDecl ) passage = iter ( passage ) return REFERENCE_REPLACER . sub ( lambda m : _ref_replacer ( m , passage ) , self . refsDecl )
7,507
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_capitains_cts.py#L828-L872
[ "def", "scan_resource", "(", "self", ",", "pkg", ",", "path", ")", ":", "for", "fname", "in", "resource_listdir", "(", "pkg", ",", "path", ")", ":", "if", "fname", ".", "endswith", "(", "TABLE_EXT", ")", ":", "table_path", "=", "posixpath", ".", "join", "(", "path", ",", "fname", ")", "with", "contextlib", ".", "closing", "(", "resource_stream", "(", "pkg", ",", "table_path", ")", ")", "as", "stream", ":", "self", ".", "add_colortable", "(", "stream", ",", "posixpath", ".", "splitext", "(", "posixpath", ".", "basename", "(", "fname", ")", ")", "[", "0", "]", ")" ]
Ingest a resource and store data in its instance
def ingest ( resource , xpath = ".//tei:cRefPattern" ) : if len ( resource ) == 0 and isinstance ( resource , list ) : return None elif isinstance ( resource , list ) : resource = resource [ 0 ] elif not isinstance ( resource , _Element ) : return None resource = resource . xpath ( xpath , namespaces = XPATH_NAMESPACES ) citations = [ ] for x in range ( 0 , len ( resource ) ) : citations . append ( Citation ( name = resource [ x ] . get ( "n" ) , refsDecl = resource [ x ] . get ( "replacementPattern" ) [ 7 : - 1 ] , child = _child_or_none ( citations ) ) ) if len ( citations ) > 1 : for citation in citations [ : - 1 ] : citation . root = citations [ - 1 ] return citations [ - 1 ]
7,508
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_capitains_cts.py#L924-L956
[ "def", "auc", "(", "targets", ",", "predictions", ",", "average", "=", "'macro'", ",", "index_map", "=", "None", ")", ":", "_supervised_evaluation_error_checking", "(", "targets", ",", "predictions", ")", "_check_categorical_option_type", "(", "'average'", ",", "average", ",", "[", "'macro'", ",", "None", "]", ")", "_check_prob_and_prob_vector", "(", "predictions", ")", "_check_target_not_float", "(", "targets", ")", "_check_index_map", "(", "index_map", ")", "opts", "=", "{", "\"average\"", ":", "average", ",", "\"binary\"", ":", "predictions", ".", "dtype", "in", "[", "int", ",", "float", "]", "}", "if", "index_map", "is", "not", "None", ":", "opts", "[", "'index_map'", "]", "=", "index_map", "return", "_turicreate", ".", "extensions", ".", "_supervised_streaming_evaluator", "(", "targets", ",", "predictions", ",", "\"auc\"", ",", "opts", ")" ]
r hits the twitter api count times and grabs tweets for the indicated query
def get_tweets_count_times ( twitter , count , query = None ) : # get id to start from oldest_id , newest_id = _get_oldest_id ( query = query ) newest_id = newest_id or oldest_id all_tweets = [ ] i = 0 while i < count : i += 1 # use search api to request 100 tweets. Twitter returns the most recent (max_id) first if oldest_id <= newest_id : tweets = get_tweets ( query = query , max_id = oldest_id - 1 , count = TWEETS_PER_SEARCH , twitter = twitter ) else : tweets = get_tweets ( query = query , max_id = oldest_id - 1 , since_id = newest_id , count = TWEETS_PER_SEARCH , twitter = twitter ) rate_limit_remaining = twitter . get_lastfunction_header ( 'x-rate-limit-remaining' ) rate_limit_reset = twitter . get_lastfunction_header ( 'x-rate-limit-reset' ) if not len ( tweets ) : # not rate limitted, just no tweets returned by query oldest_id = oldest_id + ( ( newest_id or oldest_id ) - oldest_id + 1 ) * 10000 break elif isinstance ( tweets , dict ) : # rate limit hit, or other twython response error print ( tweets ) break all_tweets . extend ( tweets ) # determine new oldest id tweet_ids = { t [ 'id' ] for t in tweets } if oldest_id : tweet_ids . add ( oldest_id ) oldest_id , newest_id = min ( tweet_ids ) , max ( tweet_ids ) if rate_limit_remaining == 1 : time . sleep ( rate_limit_reset - time . time ( ) ) save_tweets ( all_tweets , query = query ) # set id to start from for next time _set_oldest_id ( oldest_id , newest_id , query = query ) if len ( all_tweets ) == 0 : os . remove ( make_oldest_id_path ( query ) ) return len ( all_tweets ) , twitter . get_lastfunction_header ( 'x-rate-limit-remaining' )
7,509
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/tweetget/core.py#L41-L86
[ "def", "buildTreeFromAlignment", "(", "filename", ",", "WorkingDir", "=", "None", ",", "SuppressStderr", "=", "None", ")", ":", "app", "=", "Clustalw", "(", "{", "'-tree'", ":", "None", ",", "'-infile'", ":", "filename", "}", ",", "SuppressStderr", "=", "SuppressStderr", ",", "WorkingDir", "=", "WorkingDir", ")", "app", ".", "Parameters", "[", "'-align'", "]", ".", "off", "(", ")", "return", "app", "(", ")" ]
Parse the contents of the output files retrieved in the FolderData .
def parse ( self , * * kwargs ) : try : output_folder = self . retrieved except exceptions . NotExistent : return self . exit_codes . ERROR_NO_RETRIEVED_FOLDER filename_stdout = self . node . get_attribute ( 'output_filename' ) filename_stderr = self . node . get_attribute ( 'error_filename' ) try : with output_folder . open ( filename_stderr , 'r' ) as handle : exit_code = self . parse_stderr ( handle ) except ( OSError , IOError ) : self . logger . exception ( 'Failed to read the stderr file\n%s' , traceback . format_exc ( ) ) return self . exit_codes . ERROR_READING_ERROR_FILE if exit_code : return exit_code try : with output_folder . open ( filename_stdout , 'r' ) as handle : handle . seek ( 0 ) exit_code = self . parse_stdout ( handle ) except ( OSError , IOError ) : self . logger . exception ( 'Failed to read the stdout file\n%s' , traceback . format_exc ( ) ) return self . exit_codes . ERROR_READING_OUTPUT_FILE if exit_code : return exit_code
7,510
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/parsers/cif_base.py#L28-L57
[ "def", "create_api_call", "(", "func", ",", "settings", ")", ":", "def", "base_caller", "(", "api_call", ",", "_", ",", "*", "args", ")", ":", "\"\"\"Simply call api_call and ignore settings.\"\"\"", "return", "api_call", "(", "*", "args", ")", "def", "inner", "(", "request", ",", "options", "=", "None", ")", ":", "\"\"\"Invoke with the actual settings.\"\"\"", "this_options", "=", "_merge_options_metadata", "(", "options", ",", "settings", ")", "this_settings", "=", "settings", ".", "merge", "(", "this_options", ")", "if", "this_settings", ".", "retry", "and", "this_settings", ".", "retry", ".", "retry_codes", ":", "api_call", "=", "gax", ".", "retry", ".", "retryable", "(", "func", ",", "this_settings", ".", "retry", ",", "*", "*", "this_settings", ".", "kwargs", ")", "else", ":", "api_call", "=", "gax", ".", "retry", ".", "add_timeout_arg", "(", "func", ",", "this_settings", ".", "timeout", ",", "*", "*", "this_settings", ".", "kwargs", ")", "api_call", "=", "_catch_errors", "(", "api_call", ",", "gax", ".", "config", ".", "API_ERRORS", ")", "return", "api_caller", "(", "api_call", ",", "this_settings", ",", "request", ")", "if", "settings", ".", "page_descriptor", ":", "if", "settings", ".", "bundler", "and", "settings", ".", "bundle_descriptor", ":", "raise", "ValueError", "(", "'The API call has incompatible settings: '", "'bundling and page streaming'", ")", "api_caller", "=", "_page_streamable", "(", "settings", ".", "page_descriptor", ")", "elif", "settings", ".", "bundler", "and", "settings", ".", "bundle_descriptor", ":", "api_caller", "=", "_bundleable", "(", "settings", ".", "bundle_descriptor", ")", "else", ":", "api_caller", "=", "base_caller", "return", "inner" ]
Parse the content written by the script to standard out into a CifData object .
def parse_stdout ( self , filelike ) : from CifFile import StarError if not filelike . read ( ) . strip ( ) : return self . exit_codes . ERROR_EMPTY_OUTPUT_FILE try : filelike . seek ( 0 ) cif = CifData ( file = filelike ) except StarError : self . logger . exception ( 'Failed to parse a `CifData` from the stdout file\n%s' , traceback . format_exc ( ) ) return self . exit_codes . ERROR_PARSING_CIF_DATA else : self . out ( 'cif' , cif ) return
7,511
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/parsers/cif_base.py#L59-L79
[ "def", "list_resource_groups", "(", "access_token", ",", "subscription_id", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourceGroups/'", ",", "'?api-version='", ",", "RESOURCE_API", "]", ")", "return", "do_get", "(", "endpoint", ",", "access_token", ")" ]
Parse the content written by the script to standard err .
def parse_stderr ( self , filelike ) : marker_error = 'ERROR,' marker_warning = 'WARNING,' messages = { 'errors' : [ ] , 'warnings' : [ ] } for line in filelike . readlines ( ) : if marker_error in line : messages [ 'errors' ] . append ( line . split ( marker_error ) [ - 1 ] . strip ( ) ) if marker_warning in line : messages [ 'warnings' ] . append ( line . split ( marker_warning ) [ - 1 ] . strip ( ) ) if self . node . get_option ( 'attach_messages' ) : self . out ( 'messages' , Dict ( dict = messages ) ) for error in messages [ 'errors' ] : if 'unknown option' in error : return self . exit_codes . ERROR_INVALID_COMMAND_LINE_OPTION return
7,512
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/parsers/cif_base.py#L81-L105
[ "def", "unmount", "(", "self", ")", ":", "self", ".", "unmount_bindmounts", "(", ")", "self", ".", "unmount_mounts", "(", ")", "self", ".", "unmount_volume_groups", "(", ")", "self", ".", "unmount_loopbacks", "(", ")", "self", ".", "unmount_base_images", "(", ")", "self", ".", "clean_dirs", "(", ")" ]
Reset the registry to the standard multihash functions .
def reset ( cls ) : # Maps function names (hyphens or underscores) to registered functions. cls . _func_from_name = { } # Maps hashlib names to registered functions. cls . _func_from_hash = { } # Hashlib compatibility data by function. cls . _func_hash = { } register = cls . _do_register for ( func , hash_name , hash_new ) in cls . _std_func_data : register ( func , func . name , hash_name , hash_new ) assert set ( cls . _func_hash ) == set ( Func )
7,513
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/funcs.py#L104-L118
[ "def", "getShocks", "(", "self", ")", ":", "PersistentShockConsumerType", ".", "getShocks", "(", "self", ")", "# Get permanent and transitory income shocks", "MedShkNow", "=", "np", ".", "zeros", "(", "self", ".", "AgentCount", ")", "# Initialize medical shock array", "MedPriceNow", "=", "np", ".", "zeros", "(", "self", ".", "AgentCount", ")", "# Initialize relative price array", "for", "t", "in", "range", "(", "self", ".", "T_cycle", ")", ":", "these", "=", "t", "==", "self", ".", "t_cycle", "N", "=", "np", ".", "sum", "(", "these", ")", "if", "N", ">", "0", ":", "MedShkAvg", "=", "self", ".", "MedShkAvg", "[", "t", "]", "MedShkStd", "=", "self", ".", "MedShkStd", "[", "t", "]", "MedPrice", "=", "self", ".", "MedPrice", "[", "t", "]", "MedShkNow", "[", "these", "]", "=", "self", ".", "RNG", ".", "permutation", "(", "approxLognormal", "(", "N", ",", "mu", "=", "np", ".", "log", "(", "MedShkAvg", ")", "-", "0.5", "*", "MedShkStd", "**", "2", ",", "sigma", "=", "MedShkStd", ")", "[", "1", "]", ")", "MedPriceNow", "[", "these", "]", "=", "MedPrice", "self", ".", "MedShkNow", "=", "MedShkNow", "self", ".", "MedPriceNow", "=", "MedPriceNow" ]
Return a registered hash function matching the given hint .
def get ( cls , func_hint ) : # Different possibilities of `func_hint`, most to least probable. try : # `Func` member (or its value) return Func ( func_hint ) except ValueError : pass if func_hint in cls . _func_from_name : # `Func` member name, extended return cls . _func_from_name [ func_hint ] if func_hint in cls . _func_hash : # registered app-specific code return func_hint raise KeyError ( "unknown hash function" , func_hint )
7,514
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/funcs.py#L121-L145
[ "def", "streaming_step", "(", "self", ")", ":", "system", "=", "self", ".", "system", "if", "system", ".", "config", ".", "dime_enable", ":", "system", ".", "streaming", ".", "sync_and_handle", "(", ")", "system", ".", "streaming", ".", "vars_to_modules", "(", ")", "system", ".", "streaming", ".", "vars_to_pmu", "(", ")" ]
Add hash function data to the registry without checks .
def _do_register ( cls , code , name , hash_name = None , hash_new = None ) : cls . _func_from_name [ name . replace ( '-' , '_' ) ] = code cls . _func_from_name [ name . replace ( '_' , '-' ) ] = code if hash_name : cls . _func_from_hash [ hash_name ] = code cls . _func_hash [ code ] = cls . _hash ( hash_name , hash_new )
7,515
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/funcs.py#L148-L154
[ "def", "put", "(", "self", ",", "message", ",", "indent", "=", "0", ")", ":", "color", "=", "self", ".", "_colors_conf", ".", "get", "(", "indent", "+", "indent", "%", "2", ",", "self", ".", "_colors_conf", ".", "get", "(", "0", ",", "self", ".", "_default_color", ")", ")", "for", "chunk", "in", "[", "' '", "*", "indent", ",", "self", ".", "_colors", "[", "color", "]", ",", "message", ",", "self", ".", "_colors", "[", "'ENDC'", "]", "]", ":", "self", ".", "_device", ".", "write", "(", "str", "(", "chunk", ")", ")", "self", ".", "_device", ".", "write", "(", "os", ".", "linesep", ")", "self", ".", "_device", ".", "flush", "(", ")" ]
Add an application - specific function to the registry .
def register ( cls , code , name , hash_name = None , hash_new = None ) : if not _is_app_specific_func ( code ) : raise ValueError ( "only application-specific functions can be registered" ) # Check already registered name in different mappings. name_mapping_data = [ # (mapping, name in mapping, error if existing) ( cls . _func_from_name , name , "function name is already registered for a different function" ) , ( cls . _func_from_hash , hash_name , "hashlib name is already registered for a different function" ) ] for ( mapping , nameinmap , errmsg ) in name_mapping_data : existing_func = mapping . get ( nameinmap , code ) if existing_func != code : raise ValueError ( errmsg , existing_func ) # Unregister if existing to ensure no orphan entries. if code in cls . _func_hash : cls . unregister ( code ) # Proceed to registration. cls . _do_register ( code , name , hash_name , hash_new )
7,516
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/funcs.py#L157-L199
[ "def", "_reatach", "(", "self", ")", ":", "for", "item", ",", "p", ",", "idx", "in", "self", ".", "_detached", ":", "# The item may have been deleted.", "if", "self", ".", "treeview", ".", "exists", "(", "item", ")", "and", "self", ".", "treeview", ".", "exists", "(", "p", ")", ":", "self", ".", "treeview", ".", "move", "(", "item", ",", "p", ",", "idx", ")", "self", ".", "_detached", "=", "[", "]" ]
Remove an application - specific function from the registry .
def unregister ( cls , code ) : if code in Func : raise ValueError ( "only application-specific functions can be unregistered" ) # Remove mapping to function by name. func_names = { n for ( n , f ) in cls . _func_from_name . items ( ) if f == code } for func_name in func_names : del cls . _func_from_name [ func_name ] # Remove hashlib data and mapping to hash. hash = cls . _func_hash . pop ( code ) if hash . name : del cls . _func_from_hash [ hash . name ]
7,517
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/funcs.py#L202-L230
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "fileobj", "is", "None", ":", "return", "if", "self", ".", "mode", "==", "WRITE", ":", "self", ".", "close_member", "(", ")", "self", ".", "fileobj", "=", "None", "elif", "self", ".", "mode", "==", "READ", ":", "self", ".", "fileobj", "=", "None", "if", "self", ".", "myfileobj", ":", "self", ".", "myfileobj", ".", "close", "(", ")", "self", ".", "myfileobj", "=", "None" ]
Return a hashlib - compatible object for the multihash func .
def hash_from_func ( cls , func ) : new = cls . _func_hash [ func ] . new return new ( ) if new else None
7,518
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/funcs.py#L247-L259
[ "def", "numRegisteredForRole", "(", "self", ",", "role", ",", "includeTemporaryRegs", "=", "False", ")", ":", "count", "=", "self", ".", "eventregistration_set", ".", "filter", "(", "cancelled", "=", "False", ",", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "count", "(", ")", "if", "includeTemporaryRegs", ":", "count", "+=", "self", ".", "temporaryeventregistration_set", ".", "filter", "(", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "exclude", "(", "registration__expirationDate__lte", "=", "timezone", ".", "now", "(", ")", ")", ".", "count", "(", ")", "return", "count" ]
plot P - V - T data before fitting
def thermal_data ( data , figsize = ( 12 , 4 ) , ms_data = 50 , v_label = 'Unit-cell volume $(\mathrm{\AA}^3)$' , pdf_filen = None , title = 'P-V-T data' ) : # basic figure setup f , ax = plt . subplots ( 1 , 2 , figsize = figsize , sharex = True ) # read data to plot if isuncertainties ( [ data [ 'p' ] , data [ 'v' ] , data [ 'temp' ] ] ) : p = unp . nominal_values ( data [ 'p' ] ) v = unp . nominal_values ( data [ 'v' ] ) temp = unp . nominal_values ( data [ 'temp' ] ) sp = unp . std_devs ( data [ 'p' ] ) sv = unp . std_devs ( data [ 'v' ] ) stemp = unp . std_devs ( data [ 'temp' ] ) ax [ 0 ] . errorbar ( p , v , xerr = sp , yerr = sv , marker = ' ' , c = 'k' , ms = 0 , mew = 0 , linestyle = 'None' , capsize = 0 , lw = 0.5 , zorder = 1 ) ax [ 1 ] . errorbar ( p , temp , xerr = sp , yerr = stemp , marker = ' ' , c = 'k' , ms = 0 , mew = 0 , linestyle = 'None' , capsize = 0 , lw = 0.5 , zorder = 1 ) else : p = data [ 'p' ] v = data [ 'v' ] temp = data [ 'temp' ] points = ax [ 0 ] . scatter ( p , v , marker = 'o' , s = ms_data , c = temp , cmap = c_map , vmin = 300. , vmax = temp . max ( ) , zorder = 2 ) points = ax [ 1 ] . scatter ( p , temp , marker = 'o' , s = ms_data , c = temp , cmap = c_map , vmin = 300. , vmax = temp . max ( ) , zorder = 2 ) ax [ 0 ] . set_xlabel ( 'Pressure (GPa)' ) ax [ 1 ] . set_xlabel ( 'Pressure (GPa)' ) ax [ 0 ] . set_ylabel ( v_label ) ax [ 1 ] . set_ylabel ( 'Temperature (K)' ) f . suptitle ( title ) # the parameters are the specified position you set position = f . add_axes ( [ 0.92 , 0.11 , .01 , 0.75 ] ) f . colorbar ( points , orientation = "vertical" , cax = position ) # position.text(150., 0.5, 'Temperature (K)', fontsize=10, # rotation=270, va='center') if pdf_filen is not None : f . savefig ( pdf_filen )
7,519
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/plot/thermal_fit.py#L15-L68
[ "def", "listen_to_node", "(", "self", ",", "id_", ")", ":", "if", "r_client", ".", "get", "(", "id_", ")", "is", "None", ":", "return", "else", ":", "self", ".", "toredis", ".", "subscribe", "(", "_pubsub_key", "(", "id_", ")", ",", "callback", "=", "self", ".", "callback", ")", "self", ".", "_listening_to", "[", "_pubsub_key", "(", "id_", ")", "]", "=", "id_", "return", "id_" ]
Return the binary digest of data with the given func .
def _do_digest ( data , func ) : func = FuncReg . get ( func ) hash = FuncReg . hash_from_func ( func ) if not hash : raise ValueError ( "no available hash function for hash" , func ) hash . update ( data ) return bytes ( hash . digest ( ) )
7,520
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/multihash.py#L16-L23
[ "def", "restart", "(", "self", ",", "offset", ":", "int", ")", ":", "yield", "from", "self", ".", "_control_stream", ".", "write_command", "(", "Command", "(", "'REST'", ",", "str", "(", "offset", ")", ")", ")", "reply", "=", "yield", "from", "self", ".", "_control_stream", ".", "read_reply", "(", ")", "self", ".", "raise_if_not_match", "(", "'Restart'", ",", "ReplyCodes", ".", "requested_file_action_pending_further_information", ",", "reply", ")" ]
Hash the given data into a new Multihash .
def digest ( data , func ) : digest = _do_digest ( data , func ) return Multihash ( func , digest )
7,521
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/multihash.py#L186-L198
[ "def", "waitForEvent", "(", "self", ",", "tv", "=", "0", ")", ":", "if", "tv", "is", "None", ":", "tv", "=", "0", "tv_s", "=", "int", "(", "tv", ")", "real_tv", "=", "libusb1", ".", "timeval", "(", "tv_s", ",", "int", "(", "(", "tv", "-", "tv_s", ")", "*", "1000000", ")", ")", "libusb1", ".", "libusb_wait_for_event", "(", "self", ".", "__context_p", ",", "byref", "(", "real_tv", ")", ")" ]
r Decode a multihash - encoded digest into a Multihash .
def decode ( mhash , encoding = None ) : mhash = bytes ( mhash ) if encoding : mhash = CodecReg . get_decoder ( encoding ) ( mhash ) try : func = mhash [ 0 ] length = mhash [ 1 ] digest = mhash [ 2 : ] except IndexError as ie : raise ValueError ( "multihash is too short" ) from ie if length != len ( digest ) : raise ValueError ( "multihash length field does not match digest field length" ) return Multihash ( func , digest )
7,522
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/multihash.py#L201-L235
[ "def", "configure_resultproducer", "(", "self", ")", ":", "rproducer", "=", "javabridge", ".", "make_instance", "(", "\"weka/experiment/RandomSplitResultProducer\"", ",", "\"()V\"", ")", "javabridge", ".", "call", "(", "rproducer", ",", "\"setRandomizeData\"", ",", "\"(Z)V\"", ",", "not", "self", ".", "preserve_order", ")", "javabridge", ".", "call", "(", "rproducer", ",", "\"setTrainPercent\"", ",", "\"(D)V\"", ",", "self", ".", "percentage", ")", "speval", ",", "classifier", "=", "self", ".", "configure_splitevaluator", "(", ")", "javabridge", ".", "call", "(", "rproducer", ",", "\"setSplitEvaluator\"", ",", "\"(Lweka/experiment/SplitEvaluator;)V\"", ",", "speval", ")", "prop_path", "=", "javabridge", ".", "get_env", "(", ")", ".", "make_object_array", "(", "2", ",", "javabridge", ".", "get_env", "(", ")", ".", "find_class", "(", "\"weka/experiment/PropertyNode\"", ")", ")", "cls", "=", "javabridge", ".", "get_env", "(", ")", ".", "find_class", "(", "\"weka/experiment/RandomSplitResultProducer\"", ")", "desc", "=", "javabridge", ".", "make_instance", "(", "\"java/beans/PropertyDescriptor\"", ",", "\"(Ljava/lang/String;Ljava/lang/Class;)V\"", ",", "\"splitEvaluator\"", ",", "cls", ")", "node", "=", "javabridge", ".", "make_instance", "(", "\"weka/experiment/PropertyNode\"", ",", "\"(Ljava/lang/Object;Ljava/beans/PropertyDescriptor;Ljava/lang/Class;)V\"", ",", "speval", ",", "desc", ",", "cls", ")", "javabridge", ".", "get_env", "(", ")", ".", "set_object_array_element", "(", "prop_path", ",", "0", ",", "node", ")", "cls", "=", "javabridge", ".", "get_env", "(", ")", ".", "get_object_class", "(", "speval", ")", "desc", "=", "javabridge", ".", "make_instance", "(", "\"java/beans/PropertyDescriptor\"", ",", "\"(Ljava/lang/String;Ljava/lang/Class;)V\"", ",", "\"classifier\"", ",", "cls", ")", "node", "=", "javabridge", ".", "make_instance", "(", "\"weka/experiment/PropertyNode\"", ",", "\"(Ljava/lang/Object;Ljava/beans/PropertyDescriptor;Ljava/lang/Class;)V\"", ",", "javabridge", ".", "call", "(", "speval", ",", "\"getClass\"", ",", "\"()Ljava/lang/Class;\"", ")", ",", "desc", ",", "cls", ")", "javabridge", ".", "get_env", "(", ")", ".", "set_object_array_element", "(", "prop_path", ",", "1", ",", "node", ")", "return", "rproducer", ",", "prop_path" ]
Create a Multihash from a hashlib - compatible hash object .
def from_hash ( self , hash ) : try : func = FuncReg . func_from_hash ( hash ) except KeyError as ke : raise ValueError ( "no matching multihash function" , hash . name ) from ke digest = hash . digest ( ) return Multihash ( func , digest )
7,523
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/multihash.py#L74-L97
[ "def", "cancel_order", "(", "self", ",", "order_id", ":", "str", ")", "->", "str", ":", "self", ".", "log", ".", "debug", "(", "f'Canceling order id={order_id} on {self.name}'", ")", "if", "self", ".", "dry_run", ":", "# Don't cancel if dry run", "self", ".", "log", ".", "warning", "(", "f'DRY RUN: Order cancelled on {self.name}: id={order_id}'", ")", "return", "order_id", "try", ":", "# Cancel order", "self", ".", "_cancel_order", "(", "order_id", ")", "except", "Exception", "as", "e", ":", "raise", "self", ".", "exception", "(", "OrderNotFound", ",", "f'Failed to cancel order: id={order_id}'", ",", "e", ")", "from", "e", "self", ".", "log", ".", "info", "(", "f'Order cancelled on {self.name}: id={order_id}'", ")", "return", "order_id" ]
r Encode into a multihash - encoded digest .
def encode ( self , encoding = None ) : try : fc = self . func . value except AttributeError : # application-specific function code fc = self . func mhash = bytes ( [ fc , len ( self . digest ) ] ) + self . digest if encoding : mhash = CodecReg . get_encoder ( encoding ) ( mhash ) return mhash
7,524
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/multihash.py#L120-L145
[ "def", "describe_topic", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "topics", "=", "list_topics", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "ret", "=", "{", "}", "for", "topic", ",", "arn", "in", "topics", ".", "items", "(", ")", ":", "if", "name", "in", "(", "topic", ",", "arn", ")", ":", "ret", "=", "{", "'TopicArn'", ":", "arn", "}", "ret", "[", "'Attributes'", "]", "=", "get_topic_attributes", "(", "arn", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "ret", "[", "'Subscriptions'", "]", "=", "list_subscriptions_by_topic", "(", "arn", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "# Grab extended attributes for the above subscriptions", "for", "sub", "in", "range", "(", "len", "(", "ret", "[", "'Subscriptions'", "]", ")", ")", ":", "sub_arn", "=", "ret", "[", "'Subscriptions'", "]", "[", "sub", "]", "[", "'SubscriptionArn'", "]", "if", "not", "sub_arn", ".", "startswith", "(", "'arn:aws:sns:'", ")", ":", "# Sometimes a sub is in e.g. PendingAccept or other", "# wierd states and doesn't have an ARN yet", "log", ".", "debug", "(", "'Subscription with invalid ARN %s skipped...'", ",", "sub_arn", ")", "continue", "deets", "=", "get_subscription_attributes", "(", "SubscriptionArn", "=", "sub_arn", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "ret", "[", "'Subscriptions'", "]", "[", "sub", "]", ".", "update", "(", "deets", ")", "return", "ret" ]
r Does the given data hash to the digest in this Multihash ?
def verify ( self , data ) : digest = _do_digest ( data , self . func ) return digest [ : len ( self . digest ) ] == self . digest
7,525
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/multihash.py#L147-L163
[ "def", "run", "(", "self", ",", "context", ")", ":", "try", ":", "self", ".", "_load_state", "(", "context", ")", "spec", "=", "self", ".", "module_router", ".", "endpoint_routing", "(", "context", ")", "resp", "=", "self", ".", "_run_bound_endpoint", "(", "context", ",", "spec", ")", "self", ".", "_save_state", "(", "resp", ",", "context", ")", "except", "SATOSANoBoundEndpointError", ":", "raise", "except", "SATOSAError", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"Uncaught SATOSA error \"", ",", "context", ".", "state", ",", "exc_info", "=", "True", ")", "raise", "except", "UnknownSystemEntity", "as", "err", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"configuration error: unknown system entity \"", "+", "str", "(", "err", ")", ",", "context", ".", "state", ",", "exc_info", "=", "False", ")", "raise", "except", "Exception", "as", "err", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"Uncaught exception\"", ",", "context", ".", "state", ",", "exc_info", "=", "True", ")", "raise", "SATOSAUnknownError", "(", "\"Unknown error\"", ")", "from", "err", "return", "resp" ]
Return a new Multihash with a shorter digest length .
def truncate ( self , length ) : if length > len ( self . digest ) : raise ValueError ( "cannot enlarge the original digest by %d bytes" % ( length - len ( self . digest ) ) ) return self . __class__ ( self . func , self . digest [ : length ] )
7,526
https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/multihash.py#L165-L183
[ "def", "_perturbation", "(", "self", ")", ":", "if", "self", ".", "P", ">", "1", ":", "scales", "=", "[", "]", "for", "term_i", "in", "range", "(", "self", ".", "n_terms", ")", ":", "_scales", "=", "SP", ".", "randn", "(", "self", ".", "diag", "[", "term_i", "]", ".", "shape", "[", "0", "]", ")", "if", "self", ".", "offset", "[", "term_i", "]", ">", "0", ":", "_scales", "=", "SP", ".", "concatenate", "(", "(", "_scales", ",", "SP", ".", "zeros", "(", "1", ")", ")", ")", "scales", ".", "append", "(", "_scales", ")", "scales", "=", "SP", ".", "concatenate", "(", "scales", ")", "else", ":", "scales", "=", "SP", ".", "randn", "(", "self", ".", "vd", ".", "getNumberScales", "(", ")", ")", "return", "scales" ]
Set the VALUE for KEY predicate in the Metadata Graph
def set ( self , key : URIRef , value : Union [ Literal , BNode , URIRef , str , int ] , lang : Optional [ str ] = None ) : if not isinstance ( value , Literal ) and lang is not None : value = Literal ( value , lang = lang ) elif not isinstance ( value , ( BNode , URIRef ) ) : value , _type = term . _castPythonToLiteral ( value ) if _type is None : value = Literal ( value ) else : value = Literal ( value , datatype = _type ) self . graph . set ( ( self . asNode ( ) , key , value ) )
7,527
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L55-L70
[ "def", "replace_postgres_db", "(", "self", ",", "file_url", ")", ":", "self", ".", "print_message", "(", "\"Replacing postgres database\"", ")", "if", "file_url", ":", "self", ".", "print_message", "(", "\"Sourcing data from online backup file '%s'\"", "%", "file_url", ")", "source_file", "=", "self", ".", "download_file_from_url", "(", "self", ".", "args", ".", "source_app", ",", "file_url", ")", "elif", "self", ".", "databases", "[", "'source'", "]", "[", "'name'", "]", ":", "self", ".", "print_message", "(", "\"Sourcing data from database '%s'\"", "%", "self", ".", "databases", "[", "'source'", "]", "[", "'name'", "]", ")", "source_file", "=", "self", ".", "dump_database", "(", ")", "else", ":", "self", ".", "print_message", "(", "\"Sourcing data from local backup file %s\"", "%", "self", ".", "args", ".", "file", ")", "source_file", "=", "self", ".", "args", ".", "file", "self", ".", "drop_database", "(", ")", "self", ".", "create_database", "(", ")", "source_file", "=", "self", ".", "unzip_file_if_necessary", "(", "source_file", ")", "self", ".", "print_message", "(", "\"Importing '%s' into database '%s'\"", "%", "(", "source_file", ",", "self", ".", "databases", "[", "'destination'", "]", "[", "'name'", "]", ")", ")", "args", "=", "[", "\"pg_restore\"", ",", "\"--no-acl\"", ",", "\"--no-owner\"", ",", "\"--dbname=%s\"", "%", "self", ".", "databases", "[", "'destination'", "]", "[", "'name'", "]", ",", "source_file", ",", "]", "args", ".", "extend", "(", "self", ".", "databases", "[", "'destination'", "]", "[", "'args'", "]", ")", "subprocess", ".", "check_call", "(", "args", ")" ]
Add a triple to the graph related to this node
def add ( self , key , value , lang = None ) : if not isinstance ( value , Literal ) and lang is not None : value = Literal ( value , lang = lang ) elif not isinstance ( value , ( BNode , URIRef ) ) : value , _type = term . _castPythonToLiteral ( value ) if _type is None : value = Literal ( value ) else : value = Literal ( value , datatype = _type ) self . graph . add ( ( self . asNode ( ) , key , value ) )
7,528
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L72-L87
[ "def", "Run", "(", "self", ")", ":", "self", ".", "_GetArgs", "(", ")", "goodlogging", ".", "Log", ".", "Info", "(", "\"CLEAR\"", ",", "\"Using database: {0}\"", ".", "format", "(", "self", ".", "_databasePath", ")", ")", "self", ".", "_db", "=", "database", ".", "RenamerDB", "(", "self", ".", "_databasePath", ")", "if", "self", ".", "_dbPrint", "or", "self", ".", "_dbUpdate", ":", "goodlogging", ".", "Log", ".", "Seperator", "(", ")", "self", ".", "_db", ".", "PrintAllTables", "(", ")", "if", "self", ".", "_dbUpdate", ":", "goodlogging", ".", "Log", ".", "Seperator", "(", ")", "self", ".", "_db", ".", "ManualUpdateTables", "(", ")", "self", ".", "_GetDatabaseConfig", "(", ")", "if", "self", ".", "_enableExtract", ":", "goodlogging", ".", "Log", ".", "Seperator", "(", ")", "extractFileList", "=", "[", "]", "goodlogging", ".", "Log", ".", "Info", "(", "\"CLEAR\"", ",", "\"Parsing source directory for compressed files\"", ")", "goodlogging", ".", "Log", ".", "IncreaseIndent", "(", ")", "extract", ".", "GetCompressedFilesInDir", "(", "self", ".", "_sourceDir", ",", "extractFileList", ",", "self", ".", "_ignoredDirsList", ")", "goodlogging", ".", "Log", ".", "DecreaseIndent", "(", ")", "goodlogging", ".", "Log", ".", "Seperator", "(", ")", "extract", ".", "Extract", "(", "extractFileList", ",", "self", ".", "_supportedFormatsList", ",", "self", ".", "_archiveDir", ",", "self", ".", "_skipUserInputExtract", ")", "goodlogging", ".", "Log", ".", "Seperator", "(", ")", "tvFileList", "=", "[", "]", "goodlogging", ".", "Log", ".", "Info", "(", "\"CLEAR\"", ",", "\"Parsing source directory for compatible files\"", ")", "goodlogging", ".", "Log", ".", "IncreaseIndent", "(", ")", "self", ".", "_GetSupportedFilesInDir", "(", "self", ".", "_sourceDir", ",", "tvFileList", ",", "self", ".", "_supportedFormatsList", ",", "self", ".", "_ignoredDirsList", ")", "goodlogging", ".", "Log", ".", "DecreaseIndent", "(", ")", "tvRenamer", "=", "renamer", ".", "TVRenamer", "(", "self", ".", "_db", ",", "tvFileList", ",", "self", ".", "_archiveDir", ",", "guideName", "=", "'EPGUIDES'", ",", "tvDir", "=", "self", ".", "_tvDir", ",", "inPlaceRename", "=", "self", ".", "_inPlaceRename", ",", "forceCopy", "=", "self", ".", "_crossSystemCopyEnabled", ",", "skipUserInput", "=", "self", ".", "_skipUserInputRename", ")", "tvRenamer", ".", "Run", "(", ")" ]
Returns triple related to this node . Can filter on lang
def get ( self , key , lang = None ) : if lang is not None : for o in self . graph . objects ( self . asNode ( ) , key ) : if o . language == lang : yield o else : for o in self . graph . objects ( self . asNode ( ) , key ) : yield o
7,529
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L89-L102
[ "def", "_send_register_payload", "(", "self", ",", "websocket", ")", ":", "file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "HANDSHAKE_FILE_NAME", ")", "data", "=", "codecs", ".", "open", "(", "file", ",", "'r'", ",", "'utf-8'", ")", "raw_handshake", "=", "data", ".", "read", "(", ")", "handshake", "=", "json", ".", "loads", "(", "raw_handshake", ")", "handshake", "[", "'payload'", "]", "[", "'client-key'", "]", "=", "self", ".", "client_key", "yield", "from", "websocket", ".", "send", "(", "json", ".", "dumps", "(", "handshake", ")", ")", "raw_response", "=", "yield", "from", "websocket", ".", "recv", "(", ")", "response", "=", "json", ".", "loads", "(", "raw_response", ")", "if", "response", "[", "'type'", "]", "==", "'response'", "and", "response", "[", "'payload'", "]", "[", "'pairingType'", "]", "==", "'PROMPT'", ":", "raw_response", "=", "yield", "from", "websocket", ".", "recv", "(", ")", "response", "=", "json", ".", "loads", "(", "raw_response", ")", "if", "response", "[", "'type'", "]", "==", "'registered'", ":", "self", ".", "client_key", "=", "response", "[", "'payload'", "]", "[", "'client-key'", "]", "self", ".", "save_key_file", "(", ")" ]
Returns a single triple related to this node .
def get_single ( self , key , lang = None ) : if not isinstance ( key , URIRef ) : key = URIRef ( key ) if lang is not None : default = None for o in self . graph . objects ( self . asNode ( ) , key ) : default = o if o . language == lang : return o return default else : for o in self . graph . objects ( self . asNode ( ) , key ) : return o
7,530
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L104-L123
[ "def", "startDataStoreMachine", "(", "self", ",", "dataStoreItemName", ",", "machineName", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/items/enterpriseDatabases/%s/machines/%s/start\"", "%", "(", "dataStoreItemName", ",", "machineName", ")", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Remove triple matching the predicate or the object
def remove ( self , predicate = None , obj = None ) : self . graph . remove ( ( self . asNode ( ) , predicate , obj ) )
7,531
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L139-L145
[ "def", "InsertData", "(", "self", ",", "table_id", ",", "fd", ",", "schema", ",", "job_id", ")", ":", "configuration", "=", "{", "\"schema\"", ":", "{", "\"fields\"", ":", "schema", "}", ",", "\"destinationTable\"", ":", "{", "\"projectId\"", ":", "self", ".", "project_id", ",", "\"tableId\"", ":", "table_id", ",", "\"datasetId\"", ":", "self", ".", "dataset_id", "}", ",", "\"sourceFormat\"", ":", "\"NEWLINE_DELIMITED_JSON\"", ",", "}", "body", "=", "{", "\"configuration\"", ":", "{", "\"load\"", ":", "configuration", "}", ",", "\"jobReference\"", ":", "{", "\"projectId\"", ":", "self", ".", "project_id", ",", "\"jobId\"", ":", "job_id", "}", "}", "# File content can be gzipped for bandwidth efficiency. The server handles", "# it correctly without any changes to the request.", "mediafile", "=", "http", ".", "MediaFileUpload", "(", "fd", ".", "name", ",", "mimetype", "=", "\"application/octet-stream\"", ")", "job", "=", "self", ".", "service", ".", "jobs", "(", ")", ".", "insert", "(", "projectId", "=", "self", ".", "project_id", ",", "body", "=", "body", ",", "media_body", "=", "mediafile", ")", "try", ":", "response", "=", "job", ".", "execute", "(", ")", "return", "response", "except", "errors", ".", "HttpError", "as", "e", ":", "if", "self", ".", "GetDataset", "(", "self", ".", "dataset_id", ")", ":", "logging", ".", "exception", "(", "\"Error with job: %s\"", ",", "job_id", ")", "else", ":", "# If this is our first export ever, we need to create the dataset.", "logging", ".", "info", "(", "\"Attempting to create dataset: %s\"", ",", "self", ".", "dataset_id", ")", "self", ".", "CreateDataset", "(", ")", "return", "self", ".", "RetryUpload", "(", "job", ",", "job_id", ",", "e", ")" ]
Remove triple where Metadata is the object
def unlink ( self , subj = None , predicate = None ) : self . graph . remove ( ( subj , predicate , self . asNode ( ) ) )
7,532
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L147-L153
[ "def", "_irregular", "(", "singular", ",", "plural", ")", ":", "def", "caseinsensitive", "(", "string", ")", ":", "return", "''", ".", "join", "(", "'['", "+", "char", "+", "char", ".", "upper", "(", ")", "+", "']'", "for", "char", "in", "string", ")", "if", "singular", "[", "0", "]", ".", "upper", "(", ")", "==", "plural", "[", "0", "]", ".", "upper", "(", ")", ":", "PLURALS", ".", "insert", "(", "0", ",", "(", "r\"(?i)({}){}$\"", ".", "format", "(", "singular", "[", "0", "]", ",", "singular", "[", "1", ":", "]", ")", ",", "r'\\1'", "+", "plural", "[", "1", ":", "]", ")", ")", "PLURALS", ".", "insert", "(", "0", ",", "(", "r\"(?i)({}){}$\"", ".", "format", "(", "plural", "[", "0", "]", ",", "plural", "[", "1", ":", "]", ")", ",", "r'\\1'", "+", "plural", "[", "1", ":", "]", ")", ")", "SINGULARS", ".", "insert", "(", "0", ",", "(", "r\"(?i)({}){}$\"", ".", "format", "(", "plural", "[", "0", "]", ",", "plural", "[", "1", ":", "]", ")", ",", "r'\\1'", "+", "singular", "[", "1", ":", "]", ")", ")", "else", ":", "PLURALS", ".", "insert", "(", "0", ",", "(", "r\"{}{}$\"", ".", "format", "(", "singular", "[", "0", "]", ".", "upper", "(", ")", ",", "caseinsensitive", "(", "singular", "[", "1", ":", "]", ")", ")", ",", "plural", "[", "0", "]", ".", "upper", "(", ")", "+", "plural", "[", "1", ":", "]", ")", ")", "PLURALS", ".", "insert", "(", "0", ",", "(", "r\"{}{}$\"", ".", "format", "(", "singular", "[", "0", "]", ".", "lower", "(", ")", ",", "caseinsensitive", "(", "singular", "[", "1", ":", "]", ")", ")", ",", "plural", "[", "0", "]", ".", "lower", "(", ")", "+", "plural", "[", "1", ":", "]", ")", ")", "PLURALS", ".", "insert", "(", "0", ",", "(", "r\"{}{}$\"", ".", "format", "(", "plural", "[", "0", "]", ".", "upper", "(", ")", ",", "caseinsensitive", "(", "plural", "[", "1", ":", "]", ")", ")", ",", "plural", "[", "0", "]", ".", "upper", "(", ")", "+", "plural", "[", "1", ":", "]", ")", ")", "PLURALS", ".", "insert", "(", "0", ",", "(", "r\"{}{}$\"", ".", "format", "(", "plural", "[", "0", "]", ".", "lower", "(", ")", ",", "caseinsensitive", "(", "plural", "[", "1", ":", "]", ")", ")", ",", "plural", "[", "0", "]", ".", "lower", "(", ")", "+", "plural", "[", "1", ":", "]", ")", ")", "SINGULARS", ".", "insert", "(", "0", ",", "(", "r\"{}{}$\"", ".", "format", "(", "plural", "[", "0", "]", ".", "upper", "(", ")", ",", "caseinsensitive", "(", "plural", "[", "1", ":", "]", ")", ")", ",", "singular", "[", "0", "]", ".", "upper", "(", ")", "+", "singular", "[", "1", ":", "]", ")", ")", "SINGULARS", ".", "insert", "(", "0", ",", "(", "r\"{}{}$\"", ".", "format", "(", "plural", "[", "0", "]", ".", "lower", "(", ")", ",", "caseinsensitive", "(", "plural", "[", "1", ":", "]", ")", ")", ",", "singular", "[", "0", "]", ".", "lower", "(", ")", "+", "singular", "[", "1", ":", "]", ")", ")" ]
Retrieve a metadata node or generate a new one
def getOr ( subject , predicate , * args , * * kwargs ) : if ( subject , predicate , None ) in get_graph ( ) : return Metadata ( node = get_graph ( ) . objects ( subject , predicate ) . __next__ ( ) ) return Metadata ( * args , * * kwargs )
7,533
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L235-L246
[ "def", "_get_all_volumes_paths", "(", "conn", ")", ":", "volumes", "=", "[", "vol", "for", "l", "in", "[", "obj", ".", "listAllVolumes", "(", ")", "for", "obj", "in", "conn", ".", "listAllStoragePools", "(", ")", "]", "for", "vol", "in", "l", "]", "return", "{", "vol", ".", "path", "(", ")", ":", "[", "path", ".", "text", "for", "path", "in", "ElementTree", ".", "fromstring", "(", "vol", ".", "XMLDesc", "(", ")", ")", ".", "findall", "(", "'.//backingStore/path'", ")", "]", "for", "vol", "in", "volumes", "if", "_is_valid_volume", "(", "vol", ")", "}" ]
manage migrate backup_app 0004_BackupRun_ini_file_20160203_1415
def forwards_func ( apps , schema_editor ) : print ( "\n" ) create_count = 0 BackupRun = apps . get_model ( "backup_app" , "BackupRun" ) # historical version of BackupRun backup_runs = BackupRun . objects . all ( ) for backup_run in backup_runs : # Use the origin BackupRun model to get access to write_config() temp = OriginBackupRun ( name = backup_run . name , backup_datetime = backup_run . backup_datetime ) try : temp . write_config ( ) except OSError as err : print ( "ERROR creating config file: %s" % err ) else : create_count += 1 # print("%r created." % config_path.path) print ( "%i config files created.\n" % create_count )
7,534
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/backup_app/migrations/0004_BackupRun_ini_file_20160203_1415.py#L9-L27
[ "def", "not_storable", "(", "_type", ")", ":", "return", "Storable", "(", "_type", ",", "handlers", "=", "StorableHandler", "(", "poke", "=", "fake_poke", ",", "peek", "=", "fail_peek", "(", "_type", ")", ")", ")" ]
manage migrate backup_app 0003_auto_20160127_2002
def reverse_func ( apps , schema_editor ) : print ( "\n" ) remove_count = 0 BackupRun = apps . get_model ( "backup_app" , "BackupRun" ) backup_runs = BackupRun . objects . all ( ) for backup_run in backup_runs : # Use the origin BackupRun model to get access to get_config_path() temp = OriginBackupRun ( name = backup_run . name , backup_datetime = backup_run . backup_datetime ) config_path = temp . get_config_path ( ) try : config_path . unlink ( ) except OSError as err : print ( "ERROR removing config file: %s" % err ) else : remove_count += 1 # print("%r removed." % config_path.path) print ( "%i config files removed.\n" % remove_count )
7,535
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/backup_app/migrations/0004_BackupRun_ini_file_20160203_1415.py#L30-L50
[ "def", "sort2groups", "(", "array", ",", "gpat", "=", "[", "'_R1'", ",", "'_R2'", "]", ")", ":", "groups", "=", "[", "REGroup", "(", "gp", ")", "for", "gp", "in", "gpat", "]", "unmatched", "=", "[", "]", "for", "item", "in", "array", ":", "matched", "=", "False", "for", "m", "in", "groups", ":", "if", "m", ".", "match", "(", "item", ")", ":", "matched", "=", "True", "break", "if", "not", "matched", ":", "unmatched", ".", "append", "(", "item", ")", "return", "[", "sorted", "(", "m", ".", "list", ")", "for", "m", "in", "groups", "]", ",", "sorted", "(", "unmatched", ")" ]
calculate Gruneisen parameter for the Speziale equation
def speziale_grun ( v , v0 , gamma0 , q0 , q1 ) : if isuncertainties ( [ v , v0 , gamma0 , q0 , q1 ] ) : gamma = gamma0 * unp . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) else : gamma = gamma0 * np . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) return gamma
7,536
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_therm_Speziale.py#L15-L30
[ "def", "WriteBlobs", "(", "self", ",", "blob_id_data_map", ",", "cursor", "=", "None", ")", ":", "chunks", "=", "[", "]", "for", "blob_id", ",", "blob", "in", "iteritems", "(", "blob_id_data_map", ")", ":", "chunks", ".", "extend", "(", "_BlobToChunks", "(", "blob_id", ".", "AsBytes", "(", ")", ",", "blob", ")", ")", "for", "values", "in", "_PartitionChunks", "(", "chunks", ")", ":", "_Insert", "(", "cursor", ",", "\"blobs\"", ",", "values", ")" ]
calculate Debye temperature for the Speziale equation
def speziale_debyetemp ( v , v0 , gamma0 , q0 , q1 , theta0 ) : if isuncertainties ( [ v , v0 , gamma0 , q0 , q1 , theta0 ] ) : f_vu = np . vectorize ( uct . wrap ( integrate_gamma ) , excluded = [ 1 , 2 , 3 , 4 , 5 , 6 ] ) integ = f_vu ( v , v0 , gamma0 , q0 , q1 , theta0 ) theta = unp . exp ( unp . log ( theta0 ) - integ ) else : f_v = np . vectorize ( integrate_gamma , excluded = [ 1 , 2 , 3 , 4 , 5 , 6 ] ) integ = f_v ( v , v0 , gamma0 , q0 , q1 , theta0 ) theta = np . exp ( np . log ( theta0 ) - integ ) return theta
7,537
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_therm_Speziale.py#L33-L54
[ "def", "cluster_resources", "(", "self", ")", ":", "resources", "=", "defaultdict", "(", "int", ")", "clients", "=", "self", ".", "client_table", "(", ")", "for", "client", "in", "clients", ":", "# Only count resources from live clients.", "if", "client", "[", "\"IsInsertion\"", "]", ":", "for", "key", ",", "value", "in", "client", "[", "\"Resources\"", "]", ".", "items", "(", ")", ":", "resources", "[", "key", "]", "+=", "value", "return", "dict", "(", "resources", ")" ]
internal function to calculate Debye temperature
def integrate_gamma ( v , v0 , gamma0 , q0 , q1 , theta0 ) : def f_integrand ( v ) : gamma = gamma0 * np . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) return gamma / v theta_term = quad ( f_integrand , v0 , v ) [ 0 ] return theta_term
7,538
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_therm_Speziale.py#L57-L74
[ "def", "load_projects", "(", "self", ")", ":", "server_config", "=", "Config", ".", "instance", "(", ")", ".", "get_section_config", "(", "\"Server\"", ")", "projects_path", "=", "os", ".", "path", ".", "expanduser", "(", "server_config", ".", "get", "(", "\"projects_path\"", ",", "\"~/GNS3/projects\"", ")", ")", "os", ".", "makedirs", "(", "projects_path", ",", "exist_ok", "=", "True", ")", "try", ":", "for", "project_path", "in", "os", ".", "listdir", "(", "projects_path", ")", ":", "project_dir", "=", "os", ".", "path", ".", "join", "(", "projects_path", ",", "project_path", ")", "if", "os", ".", "path", ".", "isdir", "(", "project_dir", ")", ":", "for", "file", "in", "os", ".", "listdir", "(", "project_dir", ")", ":", "if", "file", ".", "endswith", "(", "\".gns3\"", ")", ":", "try", ":", "yield", "from", "self", ".", "load_project", "(", "os", ".", "path", ".", "join", "(", "project_dir", ",", "file", ")", ",", "load", "=", "False", ")", "except", "(", "aiohttp", ".", "web_exceptions", ".", "HTTPConflict", ",", "NotImplementedError", ")", ":", "pass", "# Skip not compatible projects", "except", "OSError", "as", "e", ":", "log", ".", "error", "(", "str", "(", "e", ")", ")" ]
calculate thermal pressure for the Speziale equation
def speziale_pth ( v , temp , v0 , gamma0 , q0 , q1 , theta0 , n , z , t_ref = 300. , three_r = 3. * constants . R ) : v_mol = vol_uc2mol ( v , z ) gamma = speziale_grun ( v , v0 , gamma0 , q0 , q1 ) theta = speziale_debyetemp ( v , v0 , gamma0 , q0 , q1 , theta0 ) xx = theta / temp debye = debye_E ( xx ) if t_ref == 0. : debye0 = 0. else : xx0 = theta / t_ref debye0 = debye_E ( xx0 ) Eth0 = three_r * n * t_ref * debye0 Eth = three_r * n * temp * debye delEth = Eth - Eth0 p_th = ( gamma / v_mol * delEth ) * 1.e-9 return p_th
7,539
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_therm_Speziale.py#L77-L109
[ "def", "registration_backend", "(", "backend", "=", "None", ",", "namespace", "=", "None", ")", ":", "# type: (Optional[Text], Optional[Text]) -> BaseBackend", "backend", "=", "backend", "or", "ORGS_REGISTRATION_BACKEND", "class_module", ",", "class_name", "=", "backend", ".", "rsplit", "(", "\".\"", ",", "1", ")", "mod", "=", "import_module", "(", "class_module", ")", "return", "getattr", "(", "mod", ",", "class_name", ")", "(", "namespace", "=", "namespace", ")" ]
String representation of the text
def text ( self ) -> str : return self . export ( output = Mimetypes . PLAINTEXT , exclude = self . default_exclude )
7,540
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L62-L67
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Set the DC Creator literal value
def set_creator ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : self . metadata . add ( key = DC . creator , value = value , lang = lang )
7,541
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L98-L104
[ "def", "uninstall", "(", "self", ",", "xmlpath", ")", ":", "from", "os", "import", "path", "fullpath", "=", "path", ".", "abspath", "(", "path", ".", "expanduser", "(", "xmlpath", ")", ")", "if", "fullpath", "in", "self", ".", "installed", ":", "repo", "=", "RepositorySettings", "(", "self", ",", "fullpath", ")", "if", "repo", ".", "name", ".", "lower", "(", ")", "in", "self", ".", "repositories", ":", "del", "self", ".", "repositories", "[", "repo", ".", "name", ".", "lower", "(", ")", "]", "if", "repo", ".", "name", ".", "lower", "(", ")", "in", "self", ".", "archive", ":", "del", "self", ".", "archive", "[", "repo", ".", "name", ".", "lower", "(", ")", "]", "self", ".", "_save_archive", "(", ")", "self", ".", "installed", ".", "remove", "(", "fullpath", ")", "self", ".", "_save_installed", "(", ")", "else", ":", "warn", "(", "\"The repository at {} was not installed to begin with.\"", ".", "format", "(", "fullpath", ")", ")" ]
Set the DC Title literal value
def set_title ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . title , value = value , lang = lang )
7,542
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L116-L122
[ "def", "_sync", "(", "self", ")", ":", "if", "(", "self", ".", "_opcount", ">", "self", ".", "checkpoint_operations", "or", "datetime", ".", "now", "(", ")", ">", "self", ".", "_last_sync", "+", "self", ".", "checkpoint_timeout", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Synchronizing queue metadata.\"", ")", "self", ".", "queue_metadata", ".", "sync", "(", ")", "self", ".", "_last_sync", "=", "datetime", ".", "now", "(", ")", "self", ".", "_opcount", "=", "0", "else", ":", "self", ".", "log", ".", "debug", "(", "\"NOT synchronizing queue metadata.\"", ")" ]
Get the description of the object
def get_description ( self , lang : str = None ) -> Literal : return self . metadata . get_single ( key = DC . description , lang = lang )
7,543
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L124-L131
[ "def", "_handle_fetch_response", "(", "self", ",", "request", ",", "send_time", ",", "response", ")", ":", "fetch_offsets", "=", "{", "}", "for", "topic", ",", "partitions", "in", "request", ".", "topics", ":", "for", "partition_data", "in", "partitions", ":", "partition", ",", "offset", "=", "partition_data", "[", ":", "2", "]", "fetch_offsets", "[", "TopicPartition", "(", "topic", ",", "partition", ")", "]", "=", "offset", "partitions", "=", "set", "(", "[", "TopicPartition", "(", "topic", ",", "partition_data", "[", "0", "]", ")", "for", "topic", ",", "partitions", "in", "response", ".", "topics", "for", "partition_data", "in", "partitions", "]", ")", "metric_aggregator", "=", "FetchResponseMetricAggregator", "(", "self", ".", "_sensors", ",", "partitions", ")", "# randomized ordering should improve balance for short-lived consumers", "random", ".", "shuffle", "(", "response", ".", "topics", ")", "for", "topic", ",", "partitions", "in", "response", ".", "topics", ":", "random", ".", "shuffle", "(", "partitions", ")", "for", "partition_data", "in", "partitions", ":", "tp", "=", "TopicPartition", "(", "topic", ",", "partition_data", "[", "0", "]", ")", "completed_fetch", "=", "CompletedFetch", "(", "tp", ",", "fetch_offsets", "[", "tp", "]", ",", "response", ".", "API_VERSION", ",", "partition_data", "[", "1", ":", "]", ",", "metric_aggregator", ")", "self", ".", "_completed_fetches", ".", "append", "(", "completed_fetch", ")", "if", "response", ".", "API_VERSION", ">=", "1", ":", "self", ".", "_sensors", ".", "fetch_throttle_time_sensor", ".", "record", "(", "response", ".", "throttle_time_ms", ")", "self", ".", "_sensors", ".", "fetch_latency", ".", "record", "(", "(", "time", ".", "time", "(", ")", "-", "send_time", ")", "*", "1000", ")" ]
Set the DC Description literal value
def set_description ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . description , value = value , lang = lang )
7,544
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L133-L139
[ "def", "sample", "(", "self", ",", "event", "=", "None", ",", "record_keepalive", "=", "False", ")", ":", "url", "=", "'https://stream.twitter.com/1.1/statuses/sample.json'", "params", "=", "{", "\"stall_warning\"", ":", "True", "}", "headers", "=", "{", "'accept-encoding'", ":", "'deflate, gzip'", "}", "errors", "=", "0", "while", "True", ":", "try", ":", "log", ".", "info", "(", "\"connecting to sample stream\"", ")", "resp", "=", "self", ".", "post", "(", "url", ",", "params", ",", "headers", "=", "headers", ",", "stream", "=", "True", ")", "errors", "=", "0", "for", "line", "in", "resp", ".", "iter_lines", "(", "chunk_size", "=", "512", ")", ":", "if", "event", "and", "event", ".", "is_set", "(", ")", ":", "log", ".", "info", "(", "\"stopping sample\"", ")", "# Explicitly close response", "resp", ".", "close", "(", ")", "return", "if", "line", "==", "\"\"", ":", "log", ".", "info", "(", "\"keep-alive\"", ")", "if", "record_keepalive", ":", "yield", "\"keep-alive\"", "continue", "try", ":", "yield", "json", ".", "loads", "(", "line", ".", "decode", "(", ")", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"json parse error: %s - %s\"", ",", "e", ",", "line", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "errors", "+=", "1", "log", ".", "error", "(", "\"caught http error %s on %s try\"", ",", "e", ",", "errors", ")", "if", "self", ".", "http_errors", "and", "errors", "==", "self", ".", "http_errors", ":", "log", ".", "warning", "(", "\"too many errors\"", ")", "raise", "e", "if", "e", ".", "response", ".", "status_code", "==", "420", ":", "if", "interruptible_sleep", "(", "errors", "*", "60", ",", "event", ")", ":", "log", ".", "info", "(", "\"stopping filter\"", ")", "return", "else", ":", "if", "interruptible_sleep", "(", "errors", "*", "5", ",", "event", ")", ":", "log", ".", "info", "(", "\"stopping filter\"", ")", "return", "except", "Exception", "as", "e", ":", "errors", "+=", "1", "log", ".", "error", "(", "\"caught exception %s on %s try\"", ",", "e", ",", "errors", ")", "if", "self", ".", "http_errors", "and", "errors", "==", "self", ".", "http_errors", ":", "log", ".", "warning", "(", "\"too many errors\"", ")", "raise", "e", "if", "interruptible_sleep", "(", "errors", ",", "event", ")", ":", "log", ".", "info", "(", "\"stopping filter\"", ")", "return" ]
Set the DC Subject literal value
def set_subject ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . subject , value = value , lang = lang )
7,545
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L150-L156
[ "def", "stop_experiment", "(", "args", ")", ":", "experiment_id_list", "=", "parse_ids", "(", "args", ")", "if", "experiment_id_list", ":", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "for", "experiment_id", "in", "experiment_id_list", ":", "print_normal", "(", "'Stoping experiment %s'", "%", "experiment_id", ")", "nni_config", "=", "Config", "(", "experiment_dict", "[", "experiment_id", "]", "[", "'fileName'", "]", ")", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "rest_pid", ":", "kill_command", "(", "rest_pid", ")", "tensorboard_pid_list", "=", "nni_config", ".", "get_config", "(", "'tensorboardPidList'", ")", "if", "tensorboard_pid_list", ":", "for", "tensorboard_pid", "in", "tensorboard_pid_list", ":", "try", ":", "kill_command", "(", "tensorboard_pid", ")", "except", "Exception", "as", "exception", ":", "print_error", "(", "exception", ")", "nni_config", ".", "set_config", "(", "'tensorboardPidList'", ",", "[", "]", ")", "print_normal", "(", "'Stop experiment success!'", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'status'", ",", "'STOPPED'", ")", "time_now", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "time", ".", "localtime", "(", "time", ".", "time", "(", ")", ")", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'endTime'", ",", "str", "(", "time_now", ")", ")" ]
Identifiers of children
def childIds ( self ) -> BaseReferenceSet : if self . _childIds is None : self . _childIds = self . getReffs ( ) return self . _childIds
7,546
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L326-L333
[ "def", "_do_http", "(", "opts", ",", "profile", "=", "'default'", ")", ":", "ret", "=", "{", "}", "url", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:url'", ".", "format", "(", "profile", ")", ",", "''", ")", "user", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:user'", ".", "format", "(", "profile", ")", ",", "''", ")", "passwd", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:pass'", ".", "format", "(", "profile", ")", ",", "''", ")", "realm", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:realm'", ".", "format", "(", "profile", ")", ",", "''", ")", "timeout", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:timeout'", ".", "format", "(", "profile", ")", ",", "''", ")", "if", "not", "url", ":", "raise", "Exception", "(", "'missing url in profile {0}'", ".", "format", "(", "profile", ")", ")", "if", "user", "and", "passwd", ":", "auth", "=", "_auth", "(", "url", "=", "url", ",", "realm", "=", "realm", ",", "user", "=", "user", ",", "passwd", "=", "passwd", ")", "_install_opener", "(", "auth", ")", "url", "+=", "'?{0}'", ".", "format", "(", "_urlencode", "(", "opts", ")", ")", "for", "line", "in", "_urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", ".", "read", "(", ")", ".", "splitlines", "(", ")", ":", "splt", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "if", "splt", "[", "0", "]", "in", "ret", ":", "ret", "[", "splt", "[", "0", "]", "]", "+=", "',{0}'", ".", "format", "(", "splt", "[", "1", "]", ")", "else", ":", "ret", "[", "splt", "[", "0", "]", "]", "=", "splt", "[", "1", "]", "return", "ret" ]
First child s id of current TextualNode
def firstId ( self ) -> BaseReference : if self . childIds is not None : if len ( self . childIds ) > 0 : return self . childIds [ 0 ] return None else : raise NotImplementedError
7,547
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L336-L344
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Last child s id of current TextualNode
def lastId ( self ) -> BaseReference : if self . childIds is not None : if len ( self . childIds ) > 0 : return self . childIds [ - 1 ] return None else : raise NotImplementedError
7,548
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L347-L355
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Get the set of words used anywhere in a sequence of documents and assign an integer id
def compile_vocab ( docs , limit = 1e6 , verbose = 0 , tokenizer = Tokenizer ( stem = None , lower = None , strip = None ) ) : tokenizer = make_tokenizer ( tokenizer ) d = Dictionary ( ) try : limit = min ( limit , docs . count ( ) ) docs = docs . iterator ( ) except ( AttributeError , TypeError ) : pass for i , doc in enumerate ( docs ) : # if isinstance(doc, (tuple, list)) and len(doc) == 2 and isinstance(doc[1], int): # doc, score = docs try : # in case docs is a values() queryset (dicts of records in a DB table) doc = doc . values ( ) except AttributeError : # doc already is a values_list if not isinstance ( doc , str ) : doc = ' ' . join ( [ str ( v ) for v in doc ] ) else : doc = str ( doc ) if i >= limit : break d . add_documents ( [ list ( tokenizer ( doc ) ) ] ) if verbose and not i % 100 : log . info ( '{}: {}' . format ( i , repr ( d ) [ : 120 ] ) ) return d
7,549
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/util.py#L274-L314
[ "def", "_check_curtailment_target", "(", "curtailment", ",", "curtailment_target", ",", "curtailment_key", ")", ":", "if", "not", "(", "abs", "(", "curtailment", ".", "sum", "(", "axis", "=", "1", ")", "-", "curtailment_target", ")", "<", "1e-1", ")", ".", "all", "(", ")", ":", "message", "=", "'Curtailment target not met for {}.'", ".", "format", "(", "curtailment_key", ")", "logging", ".", "error", "(", "message", ")", "raise", "TypeError", "(", "message", ")" ]
Generate a sequence of documents from the lines in a file
def gen_file_lines ( path , mode = 'rUb' , strip_eol = True , ascii = True , eol = '\n' ) : if isinstance ( path , str ) : path = open ( path , mode ) with path : # TODO: read one char at a time looking for the eol char and yielding the interveening chars for line in path : if ascii : line = str ( line ) if strip_eol : line = line . rstrip ( '\n' ) yield line
7,550
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/util.py#L332-L354
[ "def", "_visit_te_shape", "(", "self", ",", "shape", ":", "ShExJ", ".", "shapeExpr", ",", "visit_center", ":", "_VisitorCenter", ")", "->", "None", ":", "if", "isinstance", "(", "shape", ",", "ShExJ", ".", "Shape", ")", "and", "shape", ".", "expression", "is", "not", "None", ":", "visit_center", ".", "f", "(", "visit_center", ".", "arg_cntxt", ",", "shape", ".", "expression", ",", "self", ")" ]
Decorator to register filters for given inventory . For a function abc it has the same effect
def inventory ( self , inventory_name ) : def decorator ( f ) : self . add ( func = f , inventory_name = inventory_name ) return f return decorator
7,551
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/utils.py#L45-L68
[ "def", "lock", "(", "self", ")", ":", "component", "=", "self", ".", "component", "while", "True", ":", "if", "isinstance", "(", "component", ",", "smartcard", ".", "pcsc", ".", "PCSCCardConnection", ".", "PCSCCardConnection", ")", ":", "hresult", "=", "SCardBeginTransaction", "(", "component", ".", "hcard", ")", "if", "0", "!=", "hresult", ":", "raise", "CardConnectionException", "(", "'Failed to lock with SCardBeginTransaction: '", "+", "SCardGetErrorMessage", "(", "hresult", ")", ")", "else", ":", "# print('locked')", "pass", "break", "if", "hasattr", "(", "component", ",", "'component'", ")", ":", "component", "=", "component", ".", "component", "else", ":", "break" ]
Dispatch a collection using internal filters
def dispatch ( self , collection , * * kwargs ) : for inventory , method in self . methods [ : : - 1 ] : if method ( collection , * * kwargs ) is True : collection . parent = self . collection . children [ inventory ] return raise UndispatchedTextError ( "CapitainsCtsText not dispatched %s" % collection . id )
7,552
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/utils.py#L70-L82
[ "def", "default_combine_filenames_generator", "(", "filenames", ",", "max_length", "=", "40", ")", ":", "path", "=", "None", "names", "=", "[", "]", "extension", "=", "None", "timestamps", "=", "[", "]", "for", "filename", "in", "filenames", ":", "name", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "if", "not", "extension", ":", "extension", "=", "os", ".", "path", ".", "splitext", "(", "name", ")", "[", "1", "]", "elif", "os", ".", "path", ".", "splitext", "(", "name", ")", "[", "1", "]", "!=", "extension", ":", "raise", "ValueError", "(", "\"Can't combine multiple file extensions\"", ")", "for", "each", "in", "re", ".", "finditer", "(", "'\\.\\d{10}\\.'", ",", "name", ")", ":", "timestamps", ".", "append", "(", "int", "(", "each", ".", "group", "(", ")", ".", "replace", "(", "'.'", ",", "''", ")", ")", ")", "name", "=", "name", ".", "replace", "(", "each", ".", "group", "(", ")", ",", "'.'", ")", "name", "=", "os", ".", "path", ".", "splitext", "(", "name", ")", "[", "0", "]", "names", ".", "append", "(", "name", ")", "if", "path", "is", "None", ":", "path", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "else", ":", "if", "len", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "<", "len", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "new_filename", "=", "'_'", ".", "join", "(", "names", ")", "if", "timestamps", ":", "new_filename", "+=", "\".%s\"", "%", "max", "(", "timestamps", ")", "new_filename", "=", "new_filename", "[", ":", "max_length", "]", "new_filename", "+=", "extension", "return", "os", ".", "path", ".", "join", "(", "path", ",", "new_filename", ")" ]
r Return a sequence of words or tokens using a re . match iteratively through the str
def generate_tokens ( doc , regex = CRE_TOKEN , strip = True , nonwords = False ) : if isinstance ( regex , basestring ) : regex = re . compile ( regex ) for w in regex . finditer ( doc ) : if w : w = w . group ( ) if strip : w = w . strip ( r'-_*`()}{' + r"'" ) if w and ( nonwords or not re . match ( r'^' + RE_NONWORD + '$' , w ) ) : yield w
7,553
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/nlp.py#L32-L59
[ "def", "cart_db", "(", ")", ":", "config", "=", "_config_file", "(", ")", "_config_test", "(", "config", ")", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"Establishing cart connection:\"", ")", "cart_con", "=", "MongoClient", "(", "dict", "(", "config", ".", "items", "(", "config", ".", "sections", "(", ")", "[", "0", "]", ")", ")", "[", "'cart_host'", "]", ")", "cart_db", "=", "cart_con", ".", "carts", "return", "cart_db" ]
Strip dollar signs and commas from financial numerical string
def financial_float ( s , scale_factor = 1 , typ = float , ignore = FINANCIAL_WHITESPACE , percent_str = PERCENT_SYMBOLS , replace = FINANCIAL_MAPPING , normalize_case = str . lower ) : percent_scale_factor = 1 if isinstance ( s , basestring ) : s = normalize_case ( s ) . strip ( ) for i in ignore : s = s . replace ( normalize_case ( i ) , '' ) s = s . strip ( ) for old , new in replace : s = s . replace ( old , new ) for p in percent_str : if s . endswith ( p ) : # %% will become 0.0001 percent_scale_factor *= 0.01 s = s [ : - len ( p ) ] try : return ( scale_factor if scale_factor < 1 else percent_scale_factor ) * typ ( float ( s ) ) except ( ValueError , TypeError ) : return s
7,554
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/nlp.py#L62-L90
[ "def", "clear", "(", "self", ",", "page_size", "=", "10", ",", "vtimeout", "=", "10", ")", ":", "n", "=", "0", "l", "=", "self", ".", "get_messages", "(", "page_size", ",", "vtimeout", ")", "while", "l", ":", "for", "m", "in", "l", ":", "self", ".", "delete_message", "(", "m", ")", "n", "+=", "1", "l", "=", "self", ".", "get_messages", "(", "page_size", ",", "vtimeout", ")", "return", "n" ]
Return boolean to indicate whether date is invalid None if valid False if not a date
def is_invalid_date ( d ) : if not isinstance ( d , DATE_TYPES ) : return False if d . year < 1970 or d . year >= 2100 : return True
7,555
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/nlp.py#L123-L140
[ "def", "__store_recent_file", "(", "self", ",", "file", ")", ":", "LOGGER", ".", "debug", "(", "\"> Storing '{0}' file in recent files.\"", ".", "format", "(", "file", ")", ")", "recentFiles", "=", "[", "foundations", ".", "strings", ".", "to_string", "(", "recentFile", ")", "for", "recentFile", "in", "self", ".", "__settings", ".", "get_key", "(", "self", ".", "__settings_section", ",", "\"recentFiles\"", ")", ".", "toStringList", "(", ")", "if", "foundations", ".", "common", ".", "path_exists", "(", "recentFile", ")", "]", "if", "not", "recentFiles", ":", "recentFiles", "=", "[", "]", "if", "file", "in", "recentFiles", ":", "recentFiles", ".", "pop", "(", "recentFiles", ".", "index", "(", "file", ")", ")", "recentFiles", ".", "insert", "(", "0", ",", "file", ")", "del", "recentFiles", "[", "self", ".", "__maximum_recent_files", ":", "]", "recentFiles", "=", "self", ".", "__settings", ".", "set_key", "(", "self", ".", "__settings_section", ",", "\"recentFiles\"", ",", "recentFiles", ")", "self", ".", "recent_files_changed", ".", "emit", "(", ")" ]
Get the set of words used anywhere in a sequence of documents and count occurrences
def vocab_freq ( docs , limit = 1e6 , verbose = 1 , tokenizer = generate_tokens ) : total = Counter ( ) try : limit = min ( limit , docs . count ( ) ) docs = docs . iterator ( ) except : pass for i , doc in enumerate ( docs ) : try : doc = doc . values ( ) except AttributeError : if not isinstance ( doc , basestring ) : doc = ' ' . join ( [ stringify ( v ) for v in doc ] ) else : doc = stringify ( doc ) if i >= limit : break c = Counter ( tokenizer ( doc , strip = True , nonwords = False ) ) if verbose and ( verbose < 1e-3 or not i % int ( limit * verbose ) ) : print ( '{}: {} ... {}' . format ( i , c . keys ( ) [ : 3 ] , c . keys ( ) [ - 3 : ] if len ( c . keys ( ) ) > 6 else '' ) ) total += c return total
7,556
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/nlp.py#L315-L342
[ "def", "_shutdown", "(", "self", ")", ":", "for", "exit_handler", "in", "self", ".", "_exit_handlers", ":", "exit_handler", "(", ")", "if", "self", ".", "_socket", ":", "self", ".", "_socket", ".", "close", "(", ")", "self", ".", "_socket", "=", "None" ]
r Make sure the provided string is a valid filename and optionally remove whitespace
def make_filename ( s , allow_whitespace = False , allow_underscore = False , allow_hyphen = False , limit = 255 , lower = False ) : s = stringify ( s ) s = CRE_BAD_FILENAME . sub ( '' , s ) if not allow_whitespace : s = CRE_WHITESPACE . sub ( '' , s ) if lower : s = str . lower ( s ) if not allow_hyphen : s = s . replace ( '-' , '' ) if not allow_underscore : s = s . replace ( '_' , '' ) if limit is not None : s = s [ : limit ] return s or 'empty' [ : limit ]
7,557
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/nlp.py#L380-L404
[ "def", "_morph", "(", "self", ")", ":", "batches", "=", "self", ".", "batches", "self", ".", "__class__", "=", "Executor", "self", ".", "__init__", "(", "[", "]", ")", "self", ".", "batches", "=", "batches" ]
This should make the Stemmer picklable and unpicklable by not using bound methods
def stem ( self , s ) : if self . _stemmer is None : return passthrough ( s ) try : # try the local attribute `stemmer`, a StemmerI instance first # if you use the self.stem method from an unpickled object it may not work return getattr ( getattr ( self , '_stemmer' , None ) , 'stem' , None ) ( s ) except ( AttributeError , TypeError ) : return getattr ( getattr ( self , '_stemmer' , self ) , 'lemmatize' , passthrough ) ( s )
7,558
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/nlp.py#L235-L244
[ "def", "flush", "(", "self", ")", ":", "writer", "=", "self", ".", "writer", "if", "writer", "is", "None", ":", "raise", "GaugedUseAfterFreeError", "self", ".", "flush_writer_position", "(", ")", "keys", "=", "self", ".", "translate_keys", "(", ")", "blocks", "=", "[", "]", "current_block", "=", "self", ".", "current_block", "statistics", "=", "self", ".", "statistics", "driver", "=", "self", ".", "driver", "flags", "=", "0", "# for future extensions, e.g. block compression", "for", "namespace", ",", "key", ",", "block", "in", "self", ".", "pending_blocks", "(", ")", ":", "length", "=", "block", ".", "byte_length", "(", ")", "if", "not", "length", ":", "continue", "key_id", "=", "keys", "[", "(", "namespace", ",", "key", ")", "]", "statistics", "[", "namespace", "]", ".", "byte_count", "+=", "length", "blocks", ".", "append", "(", "(", "namespace", ",", "current_block", ",", "key_id", ",", "block", ".", "buffer", "(", ")", ",", "flags", ")", ")", "if", "self", ".", "config", ".", "overwrite_blocks", ":", "driver", ".", "replace_blocks", "(", "blocks", ")", "else", ":", "driver", ".", "insert_or_append_blocks", "(", "blocks", ")", "if", "not", "Gauged", ".", "writer_flush_maps", "(", "writer", ",", "True", ")", ":", "raise", "MemoryError", "update_namespace", "=", "driver", ".", "add_namespace_statistics", "for", "namespace", ",", "stats", "in", "statistics", ".", "iteritems", "(", ")", ":", "update_namespace", "(", "namespace", ",", "self", ".", "current_block", ",", "stats", ".", "data_points", ",", "stats", ".", "byte_count", ")", "statistics", ".", "clear", "(", ")", "driver", ".", "commit", "(", ")", "self", ".", "flush_now", "=", "False" ]
Return a new vector with value associated at index . The implicit parameter is not modified .
def assoc ( self , index , value ) : newvec = ImmutableVector ( ) newvec . tree = self . tree . assoc ( index , value ) if index >= self . _length : newvec . _length = index + 1 else : newvec . _length = self . _length return newvec
7,559
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/vector.py#L14-L23
[ "def", "on_train_end", "(", "self", ",", "logs", ")", ":", "duration", "=", "timeit", ".", "default_timer", "(", ")", "-", "self", ".", "train_start", "print", "(", "'done, took {:.3f} seconds'", ".", "format", "(", "duration", ")", ")" ]
Returns the result of concatenating tailvec to the implicit parameter
def concat ( self , tailvec ) : newvec = ImmutableVector ( ) vallist = [ ( i + self . _length , tailvec [ i ] ) for i in range ( 0 , tailvec . _length ) ] newvec . tree = self . tree . multi_assoc ( vallist ) newvec . _length = self . _length + tailvec . _length return newvec
7,560
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/vector.py#L25-L33
[ "def", "load", "(", "self", ")", ":", "try", ":", "if", "os", ".", "path", ".", "getsize", "(", "self", ".", "state_file", ")", "<=", "1", ":", "raise", "IOError", "(", "\"File is empty.\"", ")", "with", "open", "(", "self", ".", "state_file", ")", "as", "fh", ":", "state", "=", "json", ".", "load", "(", "fh", ")", "assert", "isinstance", "(", "state", ",", "dict", ")", "self", ".", "hosts", "=", "state", "[", "'hosts'", "]", "self", ".", "stats", "=", "state", "[", "'stats'", "]", "for", "key", "in", "self", ".", "stats", ":", "self", ".", "stats", "[", "key", "]", "[", "'open_requests'", "]", "=", "0", "except", "(", "IOError", ",", "OSError", ")", ":", "# There is no state file; start empty.", "self", ".", "hosts", "=", "{", "}", "self", ".", "stats", "=", "{", "}" ]
Return a new ImmutableVector with the last item removed .
def pop ( self ) : if self . _length == 0 : raise IndexError ( ) newvec = ImmutableVector ( ) newvec . tree = self . tree . remove ( self . _length - 1 ) newvec . _length = self . _length - 1 return newvec
7,561
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/vector.py#L35-L42
[ "def", "run_command", "(", "self", ",", "cmd", ",", "input_data", "=", "None", ")", ":", "kwargs", "=", "{", "'stdout'", ":", "subprocess", ".", "PIPE", ",", "'stderr'", ":", "subprocess", ".", "PIPE", ",", "}", "if", "input_data", "is", "not", "None", ":", "kwargs", "[", "'stdin'", "]", "=", "subprocess", ".", "PIPE", "stdout", "=", "[", "]", "stderr", "=", "[", "]", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "*", "*", "kwargs", ")", "# We don't use communicate() here because we may need to", "# get clever with interacting with the command", "t1", "=", "Thread", "(", "target", "=", "self", ".", "_reader", ",", "args", "=", "(", "'stdout'", ",", "p", ".", "stdout", ",", "stdout", ")", ")", "t1", ".", "start", "(", ")", "t2", "=", "Thread", "(", "target", "=", "self", ".", "_reader", ",", "args", "=", "(", "'stderr'", ",", "p", ".", "stderr", ",", "stderr", ")", ")", "t2", ".", "start", "(", ")", "if", "input_data", "is", "not", "None", ":", "p", ".", "stdin", ".", "write", "(", "input_data", ")", "p", ".", "stdin", ".", "close", "(", ")", "p", ".", "wait", "(", ")", "t1", ".", "join", "(", ")", "t2", ".", "join", "(", ")", "return", "p", ".", "returncode", ",", "stdout", ",", "stderr" ]
Retrieve and parse a text given an identifier
def read ( self , identifier , path ) : with open ( path ) as f : o = self . classes [ "text" ] ( urn = identifier , resource = self . xmlparse ( f ) ) return o
7,562
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/local.py#L111-L123
[ "def", "clean_new_password2", "(", "self", ")", ":", "password1", "=", "self", ".", "cleaned_data", ".", "get", "(", "'new_password1'", ")", "password2", "=", "self", ".", "cleaned_data", ".", "get", "(", "'new_password2'", ")", "if", "password1", "or", "password2", ":", "if", "password1", "!=", "password2", ":", "raise", "forms", ".", "ValidationError", "(", "self", ".", "error_messages", "[", "'password_mismatch'", "]", ",", "code", "=", "'password_mismatch'", ",", ")", "password_validation", ".", "validate_password", "(", "password2", ",", "self", ".", "instance", ")", "return", "password2" ]
Parses a textgroup from a cts file
def _parse_textgroup ( self , cts_file ) : with io . open ( cts_file ) as __xml__ : return self . classes [ "textgroup" ] . parse ( resource = __xml__ ) , cts_file
7,563
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/local.py#L139-L149
[ "def", "open", "(", "self", ",", "pysession_id", ")", ":", "self", ".", "id", "=", "id", "(", "self", ")", "self", ".", "funcserver", "=", "self", ".", "application", ".", "funcserver", "self", ".", "pysession_id", "=", "pysession_id", "# register this connection with node", "self", ".", "state", "=", "self", ".", "funcserver", ".", "websocks", "[", "self", ".", "id", "]", "=", "{", "'id'", ":", "self", ".", "id", ",", "'sock'", ":", "self", "}" ]
Parses a work from a cts file
def _parse_work ( self , cts_file , textgroup ) : with io . open ( cts_file ) as __xml__ : work , texts = self . classes [ "work" ] . parse ( resource = __xml__ , parent = textgroup , _with_children = True ) return work , texts , os . path . dirname ( cts_file )
7,564
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/local.py#L167-L183
[ "def", "_PythonValueToJsonValue", "(", "py_value", ")", ":", "if", "py_value", "is", "None", ":", "return", "JsonValue", "(", "is_null", "=", "True", ")", "if", "isinstance", "(", "py_value", ",", "bool", ")", ":", "return", "JsonValue", "(", "boolean_value", "=", "py_value", ")", "if", "isinstance", "(", "py_value", ",", "six", ".", "string_types", ")", ":", "return", "JsonValue", "(", "string_value", "=", "py_value", ")", "if", "isinstance", "(", "py_value", ",", "numbers", ".", "Number", ")", ":", "if", "isinstance", "(", "py_value", ",", "six", ".", "integer_types", ")", ":", "if", "_MININT64", "<", "py_value", "<", "_MAXINT64", ":", "return", "JsonValue", "(", "integer_value", "=", "py_value", ")", "return", "JsonValue", "(", "double_value", "=", "float", "(", "py_value", ")", ")", "if", "isinstance", "(", "py_value", ",", "dict", ")", ":", "return", "JsonValue", "(", "object_value", "=", "_PythonValueToJsonObject", "(", "py_value", ")", ")", "if", "isinstance", "(", "py_value", ",", "collections", ".", "Iterable", ")", ":", "return", "JsonValue", "(", "array_value", "=", "_PythonValueToJsonArray", "(", "py_value", ")", ")", "raise", "exceptions", ".", "InvalidDataError", "(", "'Cannot convert \"%s\" to JsonValue'", "%", "py_value", ")" ]
Complete the TextMetadata object with its citation scheme by parsing the original text
def _parse_text ( self , text , directory ) : text_id , text_metadata = text . id , text text_metadata . path = "{directory}/{textgroup}.{work}.{version}.xml" . format ( directory = directory , textgroup = text_metadata . urn . textgroup , work = text_metadata . urn . work , version = text_metadata . urn . version ) if os . path . isfile ( text_metadata . path ) : try : text = self . read ( text_id , path = text_metadata . path ) cites = list ( ) for cite in [ c for c in text . citation ] [ : : - 1 ] : if len ( cites ) >= 1 : cites . append ( self . classes [ "citation" ] ( xpath = cite . xpath . replace ( "'" , '"' ) , scope = cite . scope . replace ( "'" , '"' ) , name = cite . name , child = cites [ - 1 ] ) ) else : cites . append ( self . classes [ "citation" ] ( xpath = cite . xpath . replace ( "'" , '"' ) , scope = cite . scope . replace ( "'" , '"' ) , name = cite . name ) ) del text text_metadata . citation = cites [ - 1 ] self . logger . info ( "%s has been parsed " , text_metadata . path ) if not text_metadata . citation . is_set ( ) : self . logger . error ( "%s has no passages" , text_metadata . path ) return False return True except Exception : self . logger . error ( "%s does not accept parsing at some level (most probably citation) " , text_metadata . path ) return False else : self . logger . error ( "%s is not present" , text_metadata . path ) return False
7,565
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/local.py#L185-L235
[ "def", "run_socket_event_loop", "(", "self", ")", ":", "try", ":", "while", "True", ":", "self", ".", "_pool", ".", "join", "(", ")", "# If we have no loggers we'll sleep briefly to ensure that we", "# allow other processes (I.e., the webserver) to do their work.", "if", "len", "(", "self", ".", "_logger_data", ".", "keys", "(", ")", ")", "==", "0", ":", "time", ".", "sleep", "(", "0.5", ")", "except", "KeyboardInterrupt", ":", "pass", "finally", ":", "self", ".", "_pool", ".", "kill", "(", ")" ]
Run the dispatcher over a textgroup .
def _dispatch ( self , textgroup , directory ) : if textgroup . id in self . dispatcher . collection : self . dispatcher . collection [ textgroup . id ] . update ( textgroup ) else : self . dispatcher . dispatch ( textgroup , path = directory ) for work_urn , work in textgroup . works . items ( ) : if work_urn in self . dispatcher . collection [ textgroup . id ] . works : self . dispatcher . collection [ work_urn ] . update ( work )
7,566
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/local.py#L237-L250
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
Parse a list of directories and reads it into a collection
def parse ( self , resource ) : textgroups = [ ] texts = [ ] invalids = [ ] for folder in resource : cts_files = glob ( "{base_folder}/data/*/__cts__.xml" . format ( base_folder = folder ) ) for cts_file in cts_files : textgroup , cts_file = self . _parse_textgroup ( cts_file ) textgroups . append ( ( textgroup , cts_file ) ) for textgroup , cts_textgroup_file in textgroups : cts_work_files = glob ( "{parent}/*/__cts__.xml" . format ( parent = os . path . dirname ( cts_textgroup_file ) ) ) for cts_work_file in cts_work_files : _ , parsed_texts , directory = self . _parse_work ( cts_work_file , textgroup ) texts . extend ( [ ( text , directory ) for text in parsed_texts ] ) for text , directory in texts : # If text_id is not none, the text parsing errored if not self . _parse_text ( text , directory ) : invalids . append ( text ) # Dispatching routine for textgroup , textgroup_path in textgroups : self . _dispatch_container ( textgroup , textgroup_path ) # Clean invalids if there was a need self . _clean_invalids ( invalids ) self . inventory = self . dispatcher . collection return self . inventory
7,567
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/local.py#L276-L312
[ "def", "make_compare", "(", "key", ",", "value", ",", "obj", ")", ":", "if", "'__'", "not", "in", "key", ":", "# If no __ exists, default to doing an \"exact\" comparison", "key", ",", "comp", "=", "key", ",", "'exact'", "else", ":", "key", ",", "comp", "=", "key", ".", "rsplit", "(", "'__'", ",", "1", ")", "# Check if comp is valid", "if", "hasattr", "(", "Compare", ",", "comp", ")", ":", "return", "getattr", "(", "Compare", ",", "comp", ")", "(", "key", ",", "value", ",", "obj", ")", "raise", "AttributeError", "(", "\"No comparison '%s'\"", "%", "comp", ")" ]
convert velocities to moduli mainly to support Burnman operations
def velocities_to_moduli ( rho , v_phi , v_s ) : return v_phi * v_phi * rho , v_s * v_s * rho
7,568
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/conversion.py#L5-L15
[ "def", "add_dataframe", "(", "df", ",", "name", ",", "pkg", "=", "None", ",", "description", "=", "''", ")", ":", "from", "warnings", "import", "warn", "from", "metapack", ".", "cli", ".", "core", "import", "alt_col_name", ",", "type_map", "import", "numpy", "as", "np", "if", "name", "is", "None", "or", "df", "is", "None", ":", "warn", "(", "\"Did not find dataframe for reference '{}' \"", ".", "format", "(", "ref", ")", ")", "return", "pkg", "=", "pkg", "or", "open_source_package", "(", ")", "resource_ref", "=", "'file:'", "+", "get_notebook_rel_path", "(", "pkg", ")", "+", "'#'", "+", "name", "t", "=", "pkg", ".", "find_first", "(", "'Root.Datafile'", ",", "value", "=", "resource_ref", ")", "col_props", "=", "{", "}", "if", "t", ":", "print", "(", "\"Datafile exists for url '{}', deleting\"", ".", "format", "(", "resource_ref", ")", ")", "if", "t", ".", "schema_term", ":", "col_props", "=", "{", "c", "[", "'name'", "]", ":", "c", "for", "c", "in", "t", ".", "columns", "(", ")", "}", "pkg", ".", "remove_term", "(", "t", ".", "schema_term", ")", "pkg", ".", "remove_term", "(", "t", ")", "t", "=", "pkg", "[", "'Resources'", "]", ".", "new_term", "(", "'Root.Datafile'", ",", "resource_ref", ",", "name", "=", "name", ",", "description", "=", "description", ")", "st", "=", "pkg", "[", "'Schema'", "]", ".", "new_term", "(", "'Table'", ",", "t", ".", "schema_name", ",", "description", "=", "description", ")", "for", "i", ",", "name", "in", "enumerate", "(", "df", ".", "columns", ")", ":", "props", "=", "col_props", ".", "get", "(", "name", ",", "{", "}", ")", "try", ":", "native_type", "=", "type", "(", "np", ".", "asscalar", "(", "df", "[", "name", "]", ".", "dtype", ".", "type", "(", "0", ")", ")", ")", ".", "__name__", "except", "ValueError", ":", "native_type", "=", "df", "[", "name", "]", ".", "dtype", ".", "name", "except", "AttributeError", ":", "native_type", "=", "type", "(", "df", "[", "name", "]", "[", "0", "]", ")", ".", "__name__", "for", "pn", "in", "'datatype name pos header'", ".", "split", "(", ")", ":", "if", "pn", "in", "props", ":", "del", "props", "[", "pn", "]", "if", "'altname'", "in", "props", ":", "altname", "=", "props", "[", "'altname'", "]", "del", "props", "[", "'altname'", "]", "else", ":", "raw_alt_name", "=", "alt_col_name", "(", "name", ",", "i", ")", "altname", "=", "raw_alt_name", "if", "raw_alt_name", "!=", "name", "else", "''", "col", "=", "df", "[", "name", "]", "if", "hasattr", "(", "col", ",", "'description'", ")", ":", "# custom property", "props", "[", "'description'", "]", "=", "col", ".", "description", "t", "=", "st", ".", "new_child", "(", "'Column'", ",", "name", ",", "datatype", "=", "type_map", ".", "get", "(", "native_type", ",", "native_type", ")", ",", "altname", "=", "altname", ",", "*", "*", "props", ")", "pkg", ".", "write_csv", "(", ")" ]
convert moduli to velocities mainly to support Burnman operations
def moduli_to_velocities ( rho , K_s , G ) : return np . sqrt ( K_s / rho ) , np . sqrt ( G / rho )
7,569
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/conversion.py#L18-L28
[ "def", "read", "(", "self", ",", "num_bytes", ")", ":", "while", "len", "(", "self", ".", "decoded", ")", "<", "num_bytes", ":", "try", ":", "tag", ",", "data", "=", "next", "(", "self", ".", "chunks", ")", "except", "StopIteration", ":", "raise", "EOFError", "(", ")", "if", "tag", "!=", "b'IDAT'", ":", "continue", "self", ".", "decoded", "+=", "self", ".", "decompressor", ".", "decompress", "(", "data", ")", "r", "=", "self", ".", "decoded", "[", ":", "num_bytes", "]", "self", ".", "decoded", "=", "self", ".", "decoded", "[", "num_bytes", ":", "]", "return", "r" ]
calculate static pressure at 300 K from Hugoniot data using the constq formulation
def jamieson_pst ( v , v0 , c0 , s , gamma0 , q , theta0 , n , z , mass , c_v , three_r = 3. * constants . R , t_ref = 300. ) : rho = mass / vol_uc2mol ( v , z ) * 1.e-6 rho0 = mass / vol_uc2mol ( v0 , z ) * 1.e-6 p_h = hugoniot_p ( rho , rho0 , c0 , s ) p_th_h = jamieson_pth ( v , v0 , c0 , s , gamma0 , q , theta0 , n , z , mass , c_v , three_r = three_r , t_ref = t_ref ) p_st = p_h - p_th_h return p_st
7,570
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_jamieson.py#L30-L59
[ "def", "multi_session", "(", "self", ")", ":", "_val", "=", "0", "if", "\"multi_session\"", "in", "self", ".", "_dict", ":", "_val", "=", "self", ".", "_dict", "[", "\"multi_session\"", "]", "if", "str", "(", "_val", ")", ".", "lower", "(", ")", "==", "'all'", ":", "_val", "=", "-", "1", "return", "int", "(", "_val", ")" ]
calculate thermal pressure from Hugoniot data using the constq formulation
def jamieson_pth ( v , v0 , c0 , s , gamma0 , q , theta0 , n , z , mass , c_v , three_r = 3. * constants . R , t_ref = 300. ) : rho = mass / vol_uc2mol ( v , z ) * 1.e-6 rho0 = mass / vol_uc2mol ( v0 , z ) * 1.e-6 temp = hugoniot_t ( rho , rho0 , c0 , s , gamma0 , q , theta0 , n , mass , three_r = three_r , t_ref = t_ref , c_v = c_v ) pth = constq_pth ( v , temp , v0 , gamma0 , q , theta0 , n , z , t_ref = t_ref , three_r = three_r ) return pth
7,571
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_jamieson.py#L62-L91
[ "def", "_sorted_keys", "(", "self", ",", "keys", ")", ":", "sorted_keys", "=", "[", "]", "if", "(", "'epoch'", "in", "keys", ")", "and", "(", "'epoch'", "not", "in", "self", ".", "keys_ignored_", ")", ":", "sorted_keys", ".", "append", "(", "'epoch'", ")", "for", "key", "in", "sorted", "(", "keys", ")", ":", "if", "not", "(", "(", "key", "in", "(", "'epoch'", ",", "'dur'", ")", ")", "or", "(", "key", "in", "self", ".", "keys_ignored_", ")", "or", "key", ".", "endswith", "(", "'_best'", ")", "or", "key", ".", "startswith", "(", "'event_'", ")", ")", ":", "sorted_keys", ".", "append", "(", "key", ")", "for", "key", "in", "sorted", "(", "keys", ")", ":", "if", "key", ".", "startswith", "(", "'event_'", ")", "and", "(", "key", "not", "in", "self", ".", "keys_ignored_", ")", ":", "sorted_keys", ".", "append", "(", "key", ")", "if", "(", "'dur'", "in", "keys", ")", "and", "(", "'dur'", "not", "in", "self", ".", "keys_ignored_", ")", ":", "sorted_keys", ".", "append", "(", "'dur'", ")", "return", "sorted_keys" ]
calculate pressure along a Hugoniot throug nonlinear equations presented in Jameison 1982
def hugoniot_p_nlin ( rho , rho0 , a , b , c ) : eta = 1. - ( rho0 / rho ) Up = np . zeros_like ( eta ) if isuncertainties ( [ rho , rho0 , a , b , c ] ) : Up [ eta != 0. ] = ( ( b * eta - 1. ) + unp . sqrt ( np . power ( ( 1. - b * eta ) , 2. ) - 4. * np . power ( eta , 2. ) * a * c ) ) / ( - 2. * eta * c ) else : Up [ eta != 0. ] = ( ( b * eta - 1. ) + np . sqrt ( np . power ( ( 1. - b * eta ) , 2. ) - 4. * np . power ( eta , 2. ) * a * c ) ) / ( - 2. * eta * c ) Us = a + Up * b + Up * Up * c Ph = rho0 * Up * Us return Ph
7,572
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_jamieson.py#L94-L118
[ "def", "timedelta2period", "(", "duration", ")", ":", "seconds", "=", "duration", ".", "seconds", "minutes", "=", "(", "seconds", "%", "3600", ")", "//", "60", "seconds", "=", "(", "seconds", "%", "60", ")", "return", "'{0:0>2}:{1:0>2}'", ".", "format", "(", "minutes", ",", "seconds", ")" ]
Construct a list for address label .
def generate_address_label ( self ) : if self . organisation_name : self . address_label . append ( self . organisation_name ) if self . department_name : self . address_label . append ( self . department_name ) if self . po_box_number : self . address_label . append ( 'PO Box ' + self . po_box_number ) elements = [ self . sub_building_name , self . building_name , self . building_number , self . dependent_thoroughfare , self . thoroughfare , self . double_dependent_locality , self . dependent_locality , ] for element in elements : if element : self . _append_to_label ( element ) # pad label to length of 7 if not already if len ( self . address_label ) < 7 : for i in range ( 7 - len ( self . address_label ) ) : self . address_label . append ( '' ) # finally, add post town self . address_label [ 5 ] = self . post_town return ", " . join ( [ f for f in self . address_label if f ] )
7,573
https://github.com/DemocracyClub/uk-geo-utils/blob/ea5513968c85e93f004a3079342a62662357c2c9/uk_geo_utils/helpers.py#L82-L121
[ "def", "_merge", "(", "*", "args", ")", ":", "return", "re", ".", "compile", "(", "r'^'", "+", "r'[/-]'", ".", "join", "(", "args", ")", "+", "r'(?:\\s+'", "+", "_dow", "+", "')?$'", ")" ]
Check for exception rule .
def _is_exception_rule ( self , element ) : if element [ 0 ] . isdigit ( ) and element [ - 1 ] . isdigit ( ) : return True if len ( element ) > 1 and element [ 0 ] . isdigit ( ) and element [ - 2 ] . isdigit ( ) and element [ - 1 ] . isalpha ( ) : return True if len ( element ) == 1 and element . isalpha ( ) : return True return False
7,574
https://github.com/DemocracyClub/uk-geo-utils/blob/ea5513968c85e93f004a3079342a62662357c2c9/uk_geo_utils/helpers.py#L123-L143
[ "def", "_spintaylor_aligned_prec_swapper", "(", "*", "*", "p", ")", ":", "orig_approximant", "=", "p", "[", "'approximant'", "]", "if", "p", "[", "'spin2x'", "]", "==", "0", "and", "p", "[", "'spin2y'", "]", "==", "0", "and", "p", "[", "'spin1x'", "]", "==", "0", "and", "p", "[", "'spin1y'", "]", "==", "0", ":", "p", "[", "'approximant'", "]", "=", "'TaylorF2'", "else", ":", "p", "[", "'approximant'", "]", "=", "'SpinTaylorF2'", "hp", ",", "hc", "=", "_lalsim_fd_waveform", "(", "*", "*", "p", ")", "p", "[", "'approximant'", "]", "=", "orig_approximant", "return", "hp", ",", "hc" ]
Append address element to the label .
def _append_to_label ( self , element ) : if len ( self . address_label ) > 0 and self . _is_exception_rule ( self . address_label [ - 1 ] ) : self . address_label [ - 1 ] += ( ' ' + element ) else : self . address_label . append ( element )
7,575
https://github.com/DemocracyClub/uk-geo-utils/blob/ea5513968c85e93f004a3079342a62662357c2c9/uk_geo_utils/helpers.py#L145-L156
[ "def", "sync_blockchain", "(", "working_dir", ",", "bt_opts", ",", "last_block", ",", "server_state", ",", "expected_snapshots", "=", "{", "}", ",", "*", "*", "virtualchain_args", ")", ":", "subdomain_index", "=", "server_state", "[", "'subdomains'", "]", "atlas_state", "=", "server_state", "[", "'atlas'", "]", "# make this usable even if we haven't explicitly configured virtualchain ", "impl", "=", "sys", ".", "modules", "[", "__name__", "]", "log", ".", "info", "(", "\"Synchronizing database {} up to block {}\"", ".", "format", "(", "working_dir", ",", "last_block", ")", ")", "# NOTE: this is the only place where a read-write handle should be created,", "# since this is the only place where the db should be modified.", "new_db", "=", "BlockstackDB", ".", "borrow_readwrite_instance", "(", "working_dir", ",", "last_block", ",", "expected_snapshots", "=", "expected_snapshots", ")", "# propagate runtime state to virtualchain callbacks", "new_db", ".", "subdomain_index", "=", "subdomain_index", "new_db", ".", "atlas_state", "=", "atlas_state", "rc", "=", "virtualchain", ".", "sync_virtualchain", "(", "bt_opts", ",", "last_block", ",", "new_db", ",", "expected_snapshots", "=", "expected_snapshots", ",", "*", "*", "virtualchain_args", ")", "BlockstackDB", ".", "release_readwrite_instance", "(", "new_db", ",", "last_block", ")", "return", "rc" ]
Template loader that loads templates from a ZIP file .
def load_template_source ( template_name , template_dirs = None ) : template_zipfiles = getattr ( settings , "TEMPLATE_ZIP_FILES" , [ ] ) # Try each ZIP file in TEMPLATE_ZIP_FILES. for fname in template_zipfiles : try : z = zipfile . ZipFile ( fname ) source = z . read ( template_name ) except ( IOError , KeyError ) : continue z . close ( ) # We found a template, so return the source. template_path = "%s:%s" % ( fname , template_name ) return ( source , template_path ) # If we reach here, the template couldn't be loaded raise TemplateDoesNotExist ( template_name )
7,576
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/django_zip_template_loader.py#L6-L23
[ "def", "connections", "(", "self", ")", ":", "conn", "=", "lambda", "x", ":", "str", "(", "x", ")", ".", "replace", "(", "'connection:'", ",", "''", ")", "return", "[", "conn", "(", "name", ")", "for", "name", "in", "self", ".", "sections", "(", ")", "]" ]
Sanitize the capabilities we pass to Selenic so that they can be consumed by Browserstack .
def sanitize_capabilities ( caps ) : platform = caps [ "platform" ] upper_platform = platform . upper ( ) if upper_platform . startswith ( "WINDOWS 8" ) : caps [ "platform" ] = "WIN8" elif upper_platform . startswith ( "OS X " ) : caps [ "platform" ] = "MAC" elif upper_platform == "WINDOWS 10" : del caps [ "platform" ] caps [ "os" ] = "Windows" caps [ "os_version" ] = "10" if caps [ "browserName" ] . upper ( ) == "MICROSOFTEDGE" : # Sauce Labs takes complete version numbers like # 15.1234. However, Browser Stack takes only .0 numbers like # 15.0. caps [ "version" ] = caps [ "version" ] . split ( "." , 1 ) [ 0 ] + ".0" caps [ "browser_version" ] = caps [ "version" ] del caps [ "version" ] return caps
7,577
https://github.com/mangalam-research/selenic/blob/2284c68e15fa3d34b88aa2eec1a2e8ecd37f44ad/selenic/remote/browserstack.py#L154-L186
[ "def", "_read_prm_file", "(", "prm_filename", ")", ":", "logger", ".", "debug", "(", "\"Reading config-file: %s\"", "%", "prm_filename", ")", "try", ":", "with", "open", "(", "prm_filename", ",", "\"r\"", ")", "as", "config_file", ":", "prm_dict", "=", "yaml", ".", "load", "(", "config_file", ")", "except", "yaml", ".", "YAMLError", ":", "raise", "ConfigFileNotRead", "else", ":", "_update_prms", "(", "prm_dict", ")" ]
Enclosing function .
def my_func ( version ) : # noqa: D202 class MyClass ( object ) : """Enclosed class.""" if version == 2 : import docs . support . python2_module as pm else : import docs . support . python3_module as pm def __init__ ( self , value ) : self . _value = value def _get_value ( self ) : return self . _value value = property ( _get_value , pm . _set_value , None , "Value property" )
7,578
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/docs/support/pinspect_example_1.py#L10-L27
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Fetches all subscriptions from Membersuite of a particular publication_id if set .
def get_subscriptions ( self , publication_id = None , owner_id = None , since_when = None , limit_to = 200 , max_calls = None , start_record = 0 , verbose = False ) : query = "SELECT Objects() FROM Subscription" # collect all where parameters into a list of # (key, operator, value) tuples where_params = [ ] if owner_id : where_params . append ( ( 'owner' , '=' , "'%s'" % owner_id ) ) if publication_id : where_params . append ( ( 'publication' , '=' , "'%s'" % publication_id ) ) if since_when : d = datetime . date . today ( ) - datetime . timedelta ( days = since_when ) where_params . append ( ( 'LastModifiedDate' , ">" , "'%s 00:00:00'" % d ) ) if where_params : query += " WHERE " query += " AND " . join ( [ "%s %s %s" % ( p [ 0 ] , p [ 1 ] , p [ 2 ] ) for p in where_params ] ) subscription_list = self . get_long_query ( query , limit_to = limit_to , max_calls = max_calls , start_record = start_record , verbose = verbose ) return subscription_list
7,579
https://github.com/AASHE/python-membersuite-api-client/blob/221f5ed8bc7d4424237a4669c5af9edc11819ee9/membersuite_api_client/subscriptions/services.py#L27-L58
[ "def", "QRatio", "(", "s1", ",", "s2", ",", "force_ascii", "=", "True", ",", "full_process", "=", "True", ")", ":", "if", "full_process", ":", "p1", "=", "utils", ".", "full_process", "(", "s1", ",", "force_ascii", "=", "force_ascii", ")", "p2", "=", "utils", ".", "full_process", "(", "s2", ",", "force_ascii", "=", "force_ascii", ")", "else", ":", "p1", "=", "s1", "p2", "=", "s2", "if", "not", "utils", ".", "validate_string", "(", "p1", ")", ":", "return", "0", "if", "not", "utils", ".", "validate_string", "(", "p2", ")", ":", "return", "0", "return", "ratio", "(", "p1", ",", "p2", ")" ]
The psycopg adaptor returns Python objects but we also have to handle conversion ourselves
def get_prep_value ( self , value ) : if isinstance ( value , JSON . JsonDict ) : return json . dumps ( value , cls = JSON . Encoder ) if isinstance ( value , JSON . JsonList ) : return value . json_string if isinstance ( value , JSON . JsonString ) : return json . dumps ( value ) return value
7,580
https://github.com/aychedee/unchained/blob/11d03451ee5247e66b3d6a454e1bde71f81ae357/unchained/fields.py#L152-L162
[ "def", "__expire_files", "(", "self", ")", ":", "self", ".", "__files", "=", "OrderedDict", "(", "item", "for", "item", "in", "self", ".", "__files", ".", "items", "(", ")", "if", "not", "item", "[", "1", "]", ".", "expired", ")" ]
Generates a meta class to index sub classes by their keys .
def registry ( attr , base = type ) : class Registry ( base ) : def __init__ ( cls , name , bases , attrs ) : super ( Registry , cls ) . __init__ ( name , bases , attrs ) if not hasattr ( cls , '__registry__' ) : cls . __registry__ = { } key = getattr ( cls , attr ) if key is not NotImplemented : assert key not in cls . __registry__ cls . __registry__ [ key ] = cls def __dispatch__ ( cls , key ) : try : return cls . __registry__ [ key ] except KeyError : raise ValueError ( 'Unknown %s: %s' % ( attr , key ) ) return Registry
7,581
https://github.com/sublee/etc/blob/f2be64604da5af0d7739cfacf36f55712f0fc5cb/etc/helpers.py#L18-L34
[ "def", "cli", "(", "env", ",", "volume_id", ",", "lun_id", ")", ":", "block_storage_manager", "=", "SoftLayer", ".", "BlockStorageManager", "(", "env", ".", "client", ")", "res", "=", "block_storage_manager", ".", "create_or_update_lun_id", "(", "volume_id", ",", "lun_id", ")", "if", "'value'", "in", "res", "and", "lun_id", "==", "res", "[", "'value'", "]", ":", "click", ".", "echo", "(", "'Block volume with id %s is reporting LUN ID %s'", "%", "(", "res", "[", "'volumeId'", "]", ",", "res", "[", "'value'", "]", ")", ")", "else", ":", "click", ".", "echo", "(", "'Failed to confirm the new LUN ID on volume %s'", "%", "(", "volume_id", ")", ")" ]
Used for efficient debug logging where the actual message isn t evaluated unless it will actually be accepted by the logger .
def debug_generate ( self , debug_generator , * gen_args , * * gen_kwargs ) : if self . isEnabledFor ( logging . DEBUG ) : message = debug_generator ( * gen_args , * * gen_kwargs ) # Allow for content filtering to skip logging if message is not None : return self . debug ( message )
7,582
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/loggers/default.py#L22-L31
[ "def", "split", "(", "self", ",", "amount", ")", ":", "split_objs", "=", "list", "(", "self", ".", "all", "(", ")", ")", "if", "not", "split_objs", ":", "raise", "NoSplitsFoundForRecurringCost", "(", ")", "portions", "=", "[", "split_obj", ".", "portion", "for", "split_obj", "in", "split_objs", "]", "split_amounts", "=", "ratio_split", "(", "amount", ",", "portions", ")", "return", "[", "(", "split_objs", "[", "i", "]", ",", "split_amount", ")", "for", "i", ",", "split_amount", "in", "enumerate", "(", "split_amounts", ")", "]" ]
A function for validating an individual token .
def verify_token ( token , public_key_or_address , signing_algorithm = "ES256K" ) : decoded_token = decode_token ( token ) decoded_token_payload = decoded_token [ "payload" ] if "subject" not in decoded_token_payload : raise ValueError ( "Token doesn't have a subject" ) if "publicKey" not in decoded_token_payload [ "subject" ] : raise ValueError ( "Token doesn't have a subject public key" ) if "issuer" not in decoded_token_payload : raise ValueError ( "Token doesn't have an issuer" ) if "publicKey" not in decoded_token_payload [ "issuer" ] : raise ValueError ( "Token doesn't have an issuer public key" ) if "claim" not in decoded_token_payload : raise ValueError ( "Token doesn't have a claim" ) issuer_public_key = str ( decoded_token_payload [ "issuer" ] [ "publicKey" ] ) public_key_object = ECPublicKey ( issuer_public_key ) compressed_public_key = compress ( issuer_public_key ) decompressed_public_key = decompress ( issuer_public_key ) if public_key_object . _type == PubkeyType . compressed : compressed_address = public_key_object . address ( ) uncompressed_address = bin_hash160_to_address ( bin_hash160 ( decompress ( public_key_object . to_bin ( ) ) ) ) elif public_key_object . _type == PubkeyType . uncompressed : compressed_address = bin_hash160_to_address ( bin_hash160 ( compress ( public_key_object . to_bin ( ) ) ) ) uncompressed_address = public_key_object . address ( ) else : raise ValueError ( "Invalid issuer public key format" ) if public_key_or_address == compressed_public_key : pass elif public_key_or_address == decompressed_public_key : pass elif public_key_or_address == compressed_address : pass elif public_key_or_address == uncompressed_address : pass else : raise ValueError ( "Token public key doesn't match the verifying value" ) token_verifier = TokenVerifier ( ) if not token_verifier . verify ( token , public_key_object . to_pem ( ) ) : raise ValueError ( "Token was not signed by the issuer public key" ) return decoded_token
7,583
https://github.com/blockstack-packages/blockstack-profiles-py/blob/103783798df78cf0f007801e79ec6298f00b2817/blockstack_profiles/token_verifying.py#L18-L74
[ "def", "convert_mean", "(", "binaryproto_fname", ",", "output", "=", "None", ")", ":", "mean_blob", "=", "caffe_parser", ".", "caffe_pb2", ".", "BlobProto", "(", ")", "with", "open", "(", "binaryproto_fname", ",", "'rb'", ")", "as", "f", ":", "mean_blob", ".", "ParseFromString", "(", "f", ".", "read", "(", ")", ")", "img_mean_np", "=", "np", ".", "array", "(", "mean_blob", ".", "data", ")", "img_mean_np", "=", "img_mean_np", ".", "reshape", "(", "mean_blob", ".", "channels", ",", "mean_blob", ".", "height", ",", "mean_blob", ".", "width", ")", "# swap channels from Caffe BGR to RGB", "img_mean_np", "[", "[", "0", ",", "2", "]", ",", ":", ",", ":", "]", "=", "img_mean_np", "[", "[", "2", ",", "0", "]", ",", ":", ",", ":", "]", "nd", "=", "mx", ".", "nd", ".", "array", "(", "img_mean_np", ")", "if", "output", "is", "not", "None", ":", "mx", ".", "nd", ".", "save", "(", "output", ",", "{", "\"mean_image\"", ":", "nd", "}", ")", "return", "nd" ]
A function for validating an individual token record and extracting the decoded token .
def verify_token_record ( token_record , public_key_or_address , signing_algorithm = "ES256K" ) : if "token" not in token_record : raise ValueError ( "Token record must have a token inside it" ) token = token_record [ "token" ] decoded_token = verify_token ( token , public_key_or_address , signing_algorithm = signing_algorithm ) token_payload = decoded_token [ "payload" ] issuer_public_key = token_payload [ "issuer" ] [ "publicKey" ] if "parentPublicKey" in token_record : if issuer_public_key == token_record [ "parentPublicKey" ] : pass else : raise ValueError ( "Verification of tokens signed with keychains is not yet supported" ) return decoded_token
7,584
https://github.com/blockstack-packages/blockstack-profiles-py/blob/103783798df78cf0f007801e79ec6298f00b2817/blockstack_profiles/token_verifying.py#L77-L99
[ "def", "get_cube", "(", "cube", ",", "init", "=", "False", ",", "pkgs", "=", "None", ",", "cube_paths", "=", "None", ",", "config", "=", "None", ",", "backends", "=", "None", ",", "*", "*", "kwargs", ")", ":", "pkgs", "=", "pkgs", "or", "[", "'cubes'", "]", "pkgs", "=", "[", "pkgs", "]", "if", "isinstance", "(", "pkgs", ",", "basestring", ")", "else", "pkgs", "# search in the given path too, if provided", "cube_paths", "=", "cube_paths", "or", "[", "]", "cube_paths_is_basestring", "=", "isinstance", "(", "cube_paths", ",", "basestring", ")", "cube_paths", "=", "[", "cube_paths", "]", "if", "cube_paths_is_basestring", "else", "cube_paths", "cube_paths", "=", "[", "os", ".", "path", ".", "expanduser", "(", "path", ")", "for", "path", "in", "cube_paths", "]", "# append paths which don't already exist in sys.path to sys.path", "[", "sys", ".", "path", ".", "append", "(", "path", ")", "for", "path", "in", "cube_paths", "if", "path", "not", "in", "sys", ".", "path", "]", "pkgs", "=", "pkgs", "+", "DEFAULT_PKGS", "err", "=", "False", "for", "pkg", "in", "pkgs", ":", "try", ":", "_cube", "=", "_load_cube_pkg", "(", "pkg", ",", "cube", ")", "except", "ImportError", "as", "err", ":", "_cube", "=", "None", "if", "_cube", ":", "break", "else", ":", "logger", ".", "error", "(", "err", ")", "raise", "RuntimeError", "(", "'\"%s\" not found! %s; %s \\n%s)'", "%", "(", "cube", ",", "pkgs", ",", "cube_paths", ",", "sys", ".", "path", ")", ")", "if", "init", ":", "_cube", "=", "_cube", "(", "config", "=", "config", ",", "*", "*", "kwargs", ")", "return", "_cube" ]
A function for extracting a profile from a list of tokens .
def get_profile_from_tokens ( token_records , public_key_or_address , hierarchical_keys = False ) : if hierarchical_keys : raise NotImplementedError ( "Hierarchical key support not implemented" ) profile = { } for token_record in token_records : # print token_record try : decoded_token = verify_token_record ( token_record , public_key_or_address ) except ValueError : # traceback.print_exc() continue else : if "payload" in decoded_token : if "claim" in decoded_token [ "payload" ] : claim = decoded_token [ "payload" ] [ "claim" ] profile . update ( claim ) return profile
7,585
https://github.com/blockstack-packages/blockstack-profiles-py/blob/103783798df78cf0f007801e79ec6298f00b2817/blockstack_profiles/token_verifying.py#L102-L124
[ "def", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ":", "v", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "a", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "v", "-=", "a", "*", "np", ".", "dot", "(", "a", ",", "v", ")", "# on plane", "n", "=", "vector_norm", "(", "v", ")", "if", "n", ">", "_EPS", ":", "if", "v", "[", "2", "]", "<", "0.0", ":", "np", ".", "negative", "(", "v", ",", "v", ")", "v", "/=", "n", "return", "v", "if", "a", "[", "2", "]", "==", "1.0", ":", "return", "np", ".", "array", "(", "[", "1.0", ",", "0.0", ",", "0.0", "]", ")", "return", "unit_vector", "(", "[", "-", "a", "[", "1", "]", ",", "a", "[", "0", "]", ",", "0.0", "]", ")" ]
Resolves a zone file to a profile and checks to makes sure the tokens are signed with a key that corresponds to the address or public key provided .
def resolve_zone_file_to_profile ( zone_file , address_or_public_key ) : if is_profile_in_legacy_format ( zone_file ) : return zone_file try : token_file_url = get_token_file_url_from_zone_file ( zone_file ) except Exception as e : raise Exception ( "Token file URL could not be extracted from zone file" ) try : r = requests . get ( token_file_url ) except Exception as e : raise Exception ( "Token could not be acquired from token file URL" ) try : profile_token_records = json . loads ( r . text ) except ValueError : raise Exception ( "Token records could not be extracted from token file" ) try : profile = get_profile_from_tokens ( profile_token_records , address_or_public_key ) except Exception as e : raise Exception ( "Profile could not be extracted from token records" ) return profile
7,586
https://github.com/blockstack-packages/blockstack-profiles-py/blob/103783798df78cf0f007801e79ec6298f00b2817/blockstack_profiles/zone_file_format.py#L51-L80
[ "def", "neutralize_variable", "(", "self", ",", "variable_name", ")", ":", "self", ".", "variables", "[", "variable_name", "]", "=", "get_neutralized_variable", "(", "self", ".", "get_variable", "(", "variable_name", ")", ")" ]
Prepare watchdog for scheduled task starting
def __dog_started ( self ) : if self . __task is not None : raise RuntimeError ( 'Unable to start task. In order to start a new task - at first stop it' ) self . __task = self . record ( ) . task ( ) if isinstance ( self . __task , WScheduleTask ) is False : task_class = self . __task . __class__ . __qualname__ raise RuntimeError ( 'Unable to start unknown type of task: %s' % task_class )
7,587
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L124-L135
[ "def", "read_content", "(", "self", ")", ":", "if", "self", ".", "is_directory", "(", ")", ":", "self", ".", "url_connection", ".", "cwd", "(", "self", ".", "filename", ")", "self", ".", "files", "=", "self", ".", "get_files", "(", ")", "# XXX limit number of files?", "data", "=", "get_index_html", "(", "self", ".", "files", ")", "else", ":", "# download file in BINARY mode", "ftpcmd", "=", "\"RETR %s\"", "%", "self", ".", "filename", "buf", "=", "StringIO", "(", ")", "def", "stor_data", "(", "s", ")", ":", "\"\"\"Helper method storing given data\"\"\"", "# limit the download size", "if", "(", "buf", ".", "tell", "(", ")", "+", "len", "(", "s", ")", ")", ">", "self", ".", "max_size", ":", "raise", "LinkCheckerError", "(", "_", "(", "\"FTP file size too large\"", ")", ")", "buf", ".", "write", "(", "s", ")", "self", ".", "url_connection", ".", "retrbinary", "(", "ftpcmd", ",", "stor_data", ")", "data", "=", "buf", ".", "getvalue", "(", ")", "buf", ".", "close", "(", ")", "return", "data" ]
Start a scheduled task
def __thread_started ( self ) : if self . __task is None : raise RuntimeError ( 'Unable to start thread without "start" method call' ) self . __task . start ( ) self . __task . start_event ( ) . wait ( self . __scheduled_task_startup_timeout__ )
7,588
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L138-L146
[ "def", "image_create", "(", "request", ",", "*", "*", "kwargs", ")", ":", "data", "=", "kwargs", ".", "pop", "(", "'data'", ",", "None", ")", "location", "=", "None", "if", "VERSIONS", ".", "active", ">=", "2", ":", "location", "=", "kwargs", ".", "pop", "(", "'location'", ",", "None", ")", "image", "=", "glanceclient", "(", "request", ")", ".", "images", ".", "create", "(", "*", "*", "kwargs", ")", "if", "location", "is", "not", "None", ":", "glanceclient", "(", "request", ")", ".", "images", ".", "add_location", "(", "image", ".", "id", ",", "location", ",", "{", "}", ")", "if", "data", ":", "if", "isinstance", "(", "data", ",", "six", ".", "string_types", ")", ":", "# The image data is meant to be uploaded externally, return a", "# special wrapper to bypass the web server in a subsequent upload", "return", "ExternallyUploadedImage", "(", "image", ",", "request", ")", "elif", "isinstance", "(", "data", ",", "TemporaryUploadedFile", ")", ":", "# Hack to fool Django, so we can keep file open in the new thread.", "if", "six", ".", "PY2", ":", "data", ".", "file", ".", "close_called", "=", "True", "else", ":", "data", ".", "file", ".", "_closer", ".", "close_called", "=", "True", "elif", "isinstance", "(", "data", ",", "InMemoryUploadedFile", ")", ":", "# Clone a new file for InMemeoryUploadedFile.", "# Because the old one will be closed by Django.", "data", "=", "SimpleUploadedFile", "(", "data", ".", "name", ",", "data", ".", "read", "(", ")", ",", "data", ".", "content_type", ")", "if", "VERSIONS", ".", "active", "<", "2", ":", "thread", ".", "start_new_thread", "(", "image_update", ",", "(", "request", ",", "image", ".", "id", ")", ",", "{", "'data'", ":", "data", "}", ")", "else", ":", "def", "upload", "(", ")", ":", "try", ":", "return", "glanceclient", "(", "request", ")", ".", "images", ".", "upload", "(", "image", ".", "id", ",", "data", ")", "finally", ":", "filename", "=", "str", "(", "data", ".", "file", ".", "name", ")", "try", ":", "os", ".", "remove", "(", "filename", ")", "except", "OSError", "as", "e", ":", "LOG", ".", "warning", "(", "'Failed to remove temporary image file '", "'%(file)s (%(e)s)'", ",", "{", "'file'", ":", "filename", ",", "'e'", ":", "e", "}", ")", "thread", ".", "start_new_thread", "(", "upload", ",", "(", ")", ")", "return", "Image", "(", "image", ")" ]
Poll for scheduled task stop events
def _polling_iteration ( self ) : if self . __task is None : self . ready_event ( ) . set ( ) elif self . __task . check_events ( ) is True : self . ready_event ( ) . set ( ) self . registry ( ) . task_finished ( self )
7,589
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L149-L158
[ "def", "read_data", "(", "self", ",", "blocksize", "=", "4096", ")", ":", "frames", "=", "ctypes", ".", "c_uint", "(", "blocksize", "//", "self", ".", "_client_fmt", ".", "mBytesPerFrame", ")", "buf", "=", "ctypes", ".", "create_string_buffer", "(", "blocksize", ")", "buflist", "=", "AudioBufferList", "(", ")", "buflist", ".", "mNumberBuffers", "=", "1", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mNumberChannels", "=", "self", ".", "_client_fmt", ".", "mChannelsPerFrame", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "=", "blocksize", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", "=", "ctypes", ".", "cast", "(", "buf", ",", "ctypes", ".", "c_void_p", ")", "while", "True", ":", "check", "(", "_coreaudio", ".", "ExtAudioFileRead", "(", "self", ".", "_obj", ",", "ctypes", ".", "byref", "(", "frames", ")", ",", "ctypes", ".", "byref", "(", "buflist", ")", ")", ")", "assert", "buflist", ".", "mNumberBuffers", "==", "1", "size", "=", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "if", "not", "size", ":", "break", "data", "=", "ctypes", ".", "cast", "(", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char", ")", ")", "blob", "=", "data", "[", ":", "size", "]", "yield", "blob" ]
Stop scheduled task beacuse of watchdog stop
def thread_stopped ( self ) : if self . __task is not None : if self . __task . stop_event ( ) . is_set ( ) is False : self . __task . stop ( ) self . __task = None
7,590
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L161-L169
[ "def", "_get_request_param", "(", "self", ",", "request", ")", ":", "params", "=", "{", "}", "try", ":", "params", "=", "request", ".", "POST", ".", "copy", "(", ")", "if", "not", "params", ":", "params", "=", "json", ".", "loads", "(", "request", ".", "body", ")", "except", "Exception", ":", "pass", "for", "key", "in", "params", ":", "# replace a value to a masked characters", "if", "key", "in", "self", ".", "mask_fields", ":", "params", "[", "key", "]", "=", "'*'", "*", "8", "# when a file uploaded (E.g create image)", "files", "=", "request", ".", "FILES", ".", "values", "(", ")", "if", "list", "(", "files", ")", ":", "filenames", "=", "', '", ".", "join", "(", "[", "up_file", ".", "name", "for", "up_file", "in", "files", "]", ")", "params", "[", "'file_name'", "]", "=", "filenames", "try", ":", "return", "json", ".", "dumps", "(", "params", ",", "ensure_ascii", "=", "False", ")", "except", "Exception", ":", "return", "'Unserializable Object'" ]
Terminate all the running tasks
def stop_running_tasks ( self ) : for task in self . __running_registry : task . stop ( ) self . __running_registry . clear ( )
7,591
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L301-L308
[ "def", "_request", "(", "self", ",", "method", ",", "path", ",", "data", "=", "None", ",", "reestablish_session", "=", "True", ")", ":", "if", "path", ".", "startswith", "(", "\"http\"", ")", ":", "url", "=", "path", "# For cases where URL of different form is needed.", "else", ":", "url", "=", "self", ".", "_format_path", "(", "path", ")", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/json\"", "}", "if", "self", ".", "_user_agent", ":", "headers", "[", "'User-Agent'", "]", "=", "self", ".", "_user_agent", "body", "=", "json", ".", "dumps", "(", "data", ")", ".", "encode", "(", "\"utf-8\"", ")", "try", ":", "response", "=", "requests", ".", "request", "(", "method", ",", "url", ",", "data", "=", "body", ",", "headers", "=", "headers", ",", "cookies", "=", "self", ".", "_cookies", ",", "*", "*", "self", ".", "_request_kwargs", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "err", ":", "# error outside scope of HTTP status codes", "# e.g. unable to resolve domain name", "raise", "PureError", "(", "err", ".", "message", ")", "if", "response", ".", "status_code", "==", "200", ":", "if", "\"application/json\"", "in", "response", ".", "headers", ".", "get", "(", "\"Content-Type\"", ",", "\"\"", ")", ":", "if", "response", ".", "cookies", ":", "self", ".", "_cookies", ".", "update", "(", "response", ".", "cookies", ")", "else", ":", "self", ".", "_cookies", ".", "clear", "(", ")", "content", "=", "response", ".", "json", "(", ")", "if", "isinstance", "(", "content", ",", "list", ")", ":", "content", "=", "ResponseList", "(", "content", ")", "elif", "isinstance", "(", "content", ",", "dict", ")", ":", "content", "=", "ResponseDict", "(", "content", ")", "content", ".", "headers", "=", "response", ".", "headers", "return", "content", "raise", "PureError", "(", "\"Response not in JSON: \"", "+", "response", ".", "text", ")", "elif", "response", ".", "status_code", "==", "401", "and", "reestablish_session", ":", "self", ".", "_start_session", "(", ")", "return", "self", ".", "_request", "(", "method", ",", "path", ",", "data", ",", "False", ")", "elif", "response", ".", "status_code", "==", "450", "and", "self", ".", "_renegotiate_rest_version", ":", "# Purity REST API version is incompatible.", "old_version", "=", "self", ".", "_rest_version", "self", ".", "_rest_version", "=", "self", ".", "_choose_rest_version", "(", ")", "if", "old_version", "==", "self", ".", "_rest_version", ":", "# Got 450 error, but the rest version was supported", "# Something really unexpected happened.", "raise", "PureHTTPError", "(", "self", ".", "_target", ",", "str", "(", "self", ".", "_rest_version", ")", ",", "response", ")", "return", "self", ".", "_request", "(", "method", ",", "path", ",", "data", ",", "reestablish_session", ")", "else", ":", "raise", "PureHTTPError", "(", "self", ".", "_target", ",", "str", "(", "self", ".", "_rest_version", ")", ",", "response", ")" ]
Add new tasks source
def add_source ( self , task_source ) : next_start = task_source . next_start ( ) self . __sources [ task_source ] = next_start self . __update ( task_source )
7,592
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L452-L461
[ "def", "connection", "(", "cls", ")", ":", "local", "=", "cls", ".", "_threadlocal", "if", "not", "getattr", "(", "local", ",", "'connection'", ",", "None", ")", ":", "# Make sure these variables are no longer affected by other threads.", "local", ".", "user", "=", "cls", ".", "user", "local", ".", "password", "=", "cls", ".", "password", "local", ".", "site", "=", "cls", ".", "site", "local", ".", "timeout", "=", "cls", ".", "timeout", "local", ".", "headers", "=", "cls", ".", "headers", "local", ".", "format", "=", "cls", ".", "format", "local", ".", "version", "=", "cls", ".", "version", "local", ".", "url", "=", "cls", ".", "url", "if", "cls", ".", "site", "is", "None", ":", "raise", "ValueError", "(", "\"No shopify session is active\"", ")", "local", ".", "connection", "=", "ShopifyConnection", "(", "cls", ".", "site", ",", "cls", ".", "user", ",", "cls", ".", "password", ",", "cls", ".", "timeout", ",", "cls", ".", "format", ")", "return", "local", ".", "connection" ]
Recheck next start of records from all the sources
def __update_all ( self ) : self . __next_start = None self . __next_sources = [ ] for source in self . __sources : self . __update ( source )
7,593
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L476-L485
[ "def", "_comp_bbox", "(", "el", ",", "el2", ")", ":", "# only compare if both elements have x/y coordinates\r", "if", "_comp_bbox_keys_required", "<=", "set", "(", "el", ".", "keys", "(", ")", ")", "and", "_comp_bbox_keys_required", "<=", "set", "(", "el2", ".", "keys", "(", ")", ")", ":", "if", "_box_in_box", "(", "el2", ",", "el", ")", ":", "return", "1", "if", "_box_in_box", "(", "el", ",", "el2", ")", ":", "return", "-", "1", "return", "0" ]
Recheck next start of tasks from the given one only
def __update ( self , task_source ) : next_start = task_source . next_start ( ) if next_start is not None : if next_start . tzinfo is None or next_start . tzinfo != timezone . utc : raise ValueError ( 'Invalid timezone information' ) if self . __next_start is None or next_start < self . __next_start : self . __next_start = next_start self . __next_sources = [ task_source ] elif next_start == self . __next_start : self . __next_sources . append ( task_source )
7,594
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L488-L505
[ "def", "_verify_options", "(", "options", ")", ":", "# sanity check all vals used for bitwise operations later", "bitwise_args", "=", "[", "(", "'level'", ",", "options", "[", "'level'", "]", ")", ",", "(", "'facility'", ",", "options", "[", "'facility'", "]", ")", "]", "bitwise_args", ".", "extend", "(", "[", "(", "'option'", ",", "x", ")", "for", "x", "in", "options", "[", "'options'", "]", "]", ")", "for", "opt_name", ",", "opt", "in", "bitwise_args", ":", "if", "not", "hasattr", "(", "syslog", ",", "opt", ")", ":", "log", ".", "error", "(", "'syslog has no attribute %s'", ",", "opt", ")", "return", "False", "if", "not", "isinstance", "(", "getattr", "(", "syslog", ",", "opt", ")", ",", "int", ")", ":", "log", ".", "error", "(", "'%s is not a valid syslog %s'", ",", "opt", ",", "opt_name", ")", "return", "False", "# Sanity check tag", "if", "'tag'", "in", "options", ":", "if", "not", "isinstance", "(", "options", "[", "'tag'", "]", ",", "six", ".", "string_types", ")", ":", "log", ".", "error", "(", "'tag must be a string'", ")", "return", "False", "if", "len", "(", "options", "[", "'tag'", "]", ")", ">", "32", ":", "log", ".", "error", "(", "'tag size is limited to 32 characters'", ")", "return", "False", "return", "True" ]
Check if there are records that are ready to start and return them if there are any
def check ( self ) : if self . __next_start is not None : utc_now = utc_datetime ( ) if utc_now >= self . __next_start : result = [ ] for task_source in self . __next_sources : records = task_source . has_records ( ) if records is not None : result . extend ( records ) self . __update_all ( ) if len ( result ) > 0 : return tuple ( result )
7,595
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L507-L525
[ "def", "handle_exception", "(", "self", ")", ":", "etype", ",", "evalue", "=", "sys", ".", "exc_info", "(", ")", "[", ":", "2", "]", "log", ".", "debug", "(", "LOG_CHECK", ",", "\"Error in %s: %s %s\"", ",", "self", ".", "url", ",", "etype", ",", "evalue", ",", "exception", "=", "True", ")", "# note: etype must be the exact class, not a subclass", "if", "(", "etype", "in", "ExcNoCacheList", ")", "or", "(", "etype", "==", "socket", ".", "error", "and", "evalue", ".", "args", "[", "0", "]", "==", "errno", ".", "EBADF", ")", "or", "not", "evalue", ":", "# EBADF occurs when operating on an already socket", "self", ".", "caching", "=", "False", "# format unicode message \"<exception name>: <error message>\"", "errmsg", "=", "unicode", "(", "etype", ".", "__name__", ")", "uvalue", "=", "strformat", ".", "unicode_safe", "(", "evalue", ")", "if", "uvalue", ":", "errmsg", "+=", "u\": %s\"", "%", "uvalue", "# limit length to 240", "return", "strformat", ".", "limit", "(", "errmsg", ",", "length", "=", "240", ")" ]
Start required registries and start this scheduler
def thread_started ( self ) : self . __running_record_registry . start ( ) self . __running_record_registry . start_event ( ) . wait ( ) WPollingThreadTask . thread_started ( self )
7,596
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/scheduler/scheduler.py#L657-L664
[ "def", "update", "(", "self", ")", ":", "self", ".", "info", ".", "display_dataset", "(", ")", "self", ".", "overview", ".", "update", "(", ")", "self", ".", "labels", ".", "update", "(", "labels", "=", "self", ".", "info", ".", "dataset", ".", "header", "[", "'chan_name'", "]", ")", "self", ".", "channels", ".", "update", "(", ")", "try", ":", "self", ".", "info", ".", "markers", "=", "self", ".", "info", ".", "dataset", ".", "read_markers", "(", ")", "except", "FileNotFoundError", ":", "lg", ".", "info", "(", "'No notes/markers present in the header of the file'", ")", "else", ":", "self", ".", "notes", ".", "update_dataset_marker", "(", ")" ]
Check if a file of directory is contained in another .
def dir_contains ( dirname , path , exists = True ) : if exists : dirname = osp . abspath ( dirname ) path = osp . abspath ( path ) if six . PY2 or six . PY34 : return osp . exists ( path ) and osp . samefile ( osp . commonprefix ( [ dirname , path ] ) , dirname ) else : return osp . samefile ( osp . commonpath ( [ dirname , path ] ) , dirname ) return dirname in osp . commonprefix ( [ dirname , path ] )
7,597
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/utils.py#L11-L35
[ "def", "last_update_time", "(", "self", ")", "->", "float", ":", "stdout", "=", "self", ".", "stdout_interceptor", "stderr", "=", "self", ".", "stderr_interceptor", "return", "max", "(", "[", "self", ".", "_last_update_time", ",", "stdout", ".", "last_write_time", "if", "stdout", "else", "0", ",", "stderr", ".", "last_write_time", "if", "stderr", "else", "0", ",", "]", ")" ]
Return the next name that numerically follows old
def get_next_name ( old , fmt = '%i' ) : nums = re . findall ( '\d+' , old ) if not nums : raise ValueError ( "Could not get the next name because the old name " "has no numbers in it" ) num0 = nums [ - 1 ] num1 = str ( int ( num0 ) + 1 ) return old [ : : - 1 ] . replace ( num0 [ : : - 1 ] , num1 [ : : - 1 ] , 1 ) [ : : - 1 ]
7,598
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/utils.py#L42-L50
[ "def", "pores", "(", "self", ",", "labels", "=", "'all'", ",", "mode", "=", "'or'", ",", "asmask", "=", "False", ")", ":", "ind", "=", "self", ".", "_get_indices", "(", "element", "=", "'pore'", ",", "labels", "=", "labels", ",", "mode", "=", "mode", ")", "if", "asmask", ":", "ind", "=", "self", ".", "tomask", "(", "pores", "=", "ind", ")", "return", "ind" ]
Split up the key by . and get the value from the base dictionary d
def go_through_dict ( key , d , setdefault = None ) : patt = re . compile ( r'(?<!\\)\.' ) sub_d = d splitted = patt . split ( key ) n = len ( splitted ) for i , k in enumerate ( splitted ) : if i < n - 1 : if setdefault is not None : sub_d = sub_d . setdefault ( k , setdefault ( ) ) else : sub_d = sub_d [ k ] else : return k , sub_d
7,599
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/utils.py#L61-L93
[ "def", "_compute_slab_correction_term", "(", "self", ",", "C", ",", "rrup", ")", ":", "slab_term", "=", "C", "[", "'SSL'", "]", "*", "np", ".", "log", "(", "rrup", ")", "return", "slab_term" ]