query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Populate the blobs
def prepare_blobs ( self ) : self . raw_header = self . extract_header ( ) if self . cache_enabled : self . _cache_offsets ( )
10,000
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L163-L167
[ "def", "connect", "(", "image", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "image", ")", ":", "log", ".", "warning", "(", "'Could not connect image: %s does not exist'", ",", "image", ")", "return", "''", "if", "salt", ".", "utils", ".", "path", ".", "which", "(", "'sfdisk'", ")", ":", "fdisk", "=", "'sfdisk -d'", "else", ":", "fdisk", "=", "'fdisk -l'", "__salt__", "[", "'cmd.run'", "]", "(", "'modprobe nbd max_part=63'", ")", "for", "nbd", "in", "glob", ".", "glob", "(", "'/dev/nbd?'", ")", ":", "if", "__salt__", "[", "'cmd.retcode'", "]", "(", "'{0} {1}'", ".", "format", "(", "fdisk", ",", "nbd", ")", ")", ":", "while", "True", ":", "# Sometimes nbd does not \"take hold\", loop until we can verify", "__salt__", "[", "'cmd.run'", "]", "(", "'qemu-nbd -c {0} {1}'", ".", "format", "(", "nbd", ",", "image", ")", ",", "python_shell", "=", "False", ",", ")", "if", "not", "__salt__", "[", "'cmd.retcode'", "]", "(", "'{0} {1}'", ".", "format", "(", "fdisk", ",", "nbd", ")", ")", ":", "break", "return", "nbd", "log", ".", "warning", "(", "'Could not connect image: %s'", ",", "image", ")", "return", "''" ]
Create a dictionary with the EVT header information
def extract_header ( self ) : self . log . info ( "Extracting the header" ) raw_header = self . raw_header = defaultdict ( list ) first_line = self . blob_file . readline ( ) first_line = try_decode_string ( first_line ) self . blob_file . seek ( 0 , 0 ) if not first_line . startswith ( str ( 'start_run' ) ) : self . log . warning ( "No header found." ) return raw_header for line in iter ( self . blob_file . readline , '' ) : line = try_decode_string ( line ) line = line . strip ( ) try : tag , value = str ( line ) . split ( ':' ) except ValueError : continue raw_header [ tag ] . append ( str ( value ) . split ( ) ) if line . startswith ( str ( 'end_event:' ) ) : self . _record_offset ( ) if self . _auto_parse and 'physics' in raw_header : parsers = [ p [ 0 ] . lower ( ) for p in raw_header [ 'physics' ] ] self . _register_parsers ( parsers ) return raw_header raise ValueError ( "Incomplete header, no 'end_event' tag found!" )
10,001
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L169-L193
[ "def", "get_all_indices", "(", "self", ",", "n_samples", "=", "None", ",", "max_samples", "=", "None", ",", "random_state", "=", "None", ")", ":", "if", "self", ".", "_indices_state", "is", "None", "and", "random_state", "is", "None", ":", "raise", "ValueError", "(", "'The program has not been evaluated for fitness '", "'yet, indices not available.'", ")", "if", "n_samples", "is", "not", "None", "and", "self", ".", "_n_samples", "is", "None", ":", "self", ".", "_n_samples", "=", "n_samples", "if", "max_samples", "is", "not", "None", "and", "self", ".", "_max_samples", "is", "None", ":", "self", ".", "_max_samples", "=", "max_samples", "if", "random_state", "is", "not", "None", "and", "self", ".", "_indices_state", "is", "None", ":", "self", ".", "_indices_state", "=", "random_state", ".", "get_state", "(", ")", "indices_state", "=", "check_random_state", "(", "None", ")", "indices_state", ".", "set_state", "(", "self", ".", "_indices_state", ")", "not_indices", "=", "sample_without_replacement", "(", "self", ".", "_n_samples", ",", "self", ".", "_n_samples", "-", "self", ".", "_max_samples", ",", "random_state", "=", "indices_state", ")", "sample_counts", "=", "np", ".", "bincount", "(", "not_indices", ",", "minlength", "=", "self", ".", "_n_samples", ")", "indices", "=", "np", ".", "where", "(", "sample_counts", "==", "0", ")", "[", "0", "]", "return", "indices", ",", "not_indices" ]
Return a blob with the event at the given index
def get_blob ( self , index ) : self . log . info ( "Retrieving blob #{}" . format ( index ) ) if index > len ( self . event_offsets ) - 1 : self . log . info ( "Index not in cache, caching offsets" ) self . _cache_offsets ( index , verbose = False ) self . blob_file . seek ( self . event_offsets [ index ] , 0 ) blob = self . _create_blob ( ) if blob is None : self . log . info ( "Empty blob created..." ) raise IndexError else : self . log . debug ( "Applying parsers..." ) for parser in self . parsers : parser ( blob ) self . log . debug ( "Returning the blob" ) return blob
10,002
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L195-L211
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "if", "(", "'SQLALCHEMY_DATABASE_URI'", "not", "in", "app", ".", "config", "and", "'SQLALCHEMY_BINDS'", "not", "in", "app", ".", "config", ")", ":", "warnings", ".", "warn", "(", "'Neither SQLALCHEMY_DATABASE_URI nor SQLALCHEMY_BINDS is set. '", "'Defaulting SQLALCHEMY_DATABASE_URI to \"sqlite:///:memory:\".'", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_DATABASE_URI'", ",", "'sqlite:///:memory:'", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_BINDS'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_NATIVE_UNICODE'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_ECHO'", ",", "False", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_RECORD_QUERIES'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_POOL_SIZE'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_POOL_TIMEOUT'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_POOL_RECYCLE'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_MAX_OVERFLOW'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_COMMIT_ON_TEARDOWN'", ",", "False", ")", "track_modifications", "=", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_TRACK_MODIFICATIONS'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SQLALCHEMY_ENGINE_OPTIONS'", ",", "{", "}", ")", "if", "track_modifications", "is", "None", ":", "warnings", ".", "warn", "(", "FSADeprecationWarning", "(", "'SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and '", "'will be disabled by default in the future. Set it to True '", "'or False to suppress this warning.'", ")", ")", "# Deprecation warnings for config keys that should be replaced by SQLALCHEMY_ENGINE_OPTIONS.", "utils", ".", "engine_config_warning", "(", "app", ".", "config", ",", "'3.0'", ",", "'SQLALCHEMY_POOL_SIZE'", ",", "'pool_size'", ")", "utils", ".", "engine_config_warning", "(", "app", ".", "config", ",", "'3.0'", ",", "'SQLALCHEMY_POOL_TIMEOUT'", ",", "'pool_timeout'", ")", "utils", ".", "engine_config_warning", "(", "app", ".", "config", ",", "'3.0'", ",", "'SQLALCHEMY_POOL_RECYCLE'", ",", "'pool_recycle'", ")", "utils", ".", "engine_config_warning", "(", "app", ".", "config", ",", "'3.0'", ",", "'SQLALCHEMY_MAX_OVERFLOW'", ",", "'max_overflow'", ")", "app", ".", "extensions", "[", "'sqlalchemy'", "]", "=", "_SQLAlchemyState", "(", "self", ")", "@", "app", ".", "teardown_appcontext", "def", "shutdown_session", "(", "response_or_exc", ")", ":", "if", "app", ".", "config", "[", "'SQLALCHEMY_COMMIT_ON_TEARDOWN'", "]", ":", "if", "response_or_exc", "is", "None", ":", "self", ".", "session", ".", "commit", "(", ")", "self", ".", "session", ".", "remove", "(", ")", "return", "response_or_exc" ]
Pump the next blob to the modules
def process ( self , blob = None ) : try : blob = self . get_blob ( self . index ) except IndexError : self . log . info ( "Got an IndexError, trying the next file" ) if ( self . basename or self . filenames ) and self . file_index < self . index_stop : self . file_index += 1 self . log . info ( "Now at file_index={}" . format ( self . file_index ) ) self . _reset ( ) self . blob_file . close ( ) self . log . info ( "Resetting blob index to 0" ) self . index = 0 file_index = self . _get_file_index_str ( ) if self . filenames : self . filename = self . filenames [ self . file_index - 1 ] elif self . basename : self . filename = "{}{}{}.evt" . format ( self . basename , file_index , self . suffix ) self . log . info ( "Next filename: {}" . format ( self . filename ) ) self . print ( "Opening {0}" . format ( self . filename ) ) self . open_file ( self . filename ) self . prepare_blobs ( ) try : blob = self . get_blob ( self . index ) except IndexError : self . log . warning ( "No blob found in file {}" . format ( self . filename ) ) else : return blob self . log . info ( "No files left, terminating the pipeline" ) raise StopIteration self . index += 1 return blob
10,003
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L213-L250
[ "def", "uri_tree_precode_check", "(", "uri_tree", ",", "type_host", "=", "HOST_REG_NAME", ")", ":", "scheme", ",", "authority", ",", "path", ",", "query", ",", "fragment", "=", "uri_tree", "# pylint: disable-msg=W0612", "if", "scheme", ":", "if", "not", "valid_scheme", "(", "scheme", ")", ":", "raise", "InvalidSchemeError", ",", "\"Invalid scheme %r\"", "%", "(", "scheme", ",", ")", "if", "authority", ":", "user", ",", "passwd", ",", "host", ",", "port", "=", "authority", "# pylint: disable-msg=W0612", "if", "port", "and", "not", "__all_in", "(", "port", ",", "DIGIT", ")", ":", "raise", "InvalidPortError", ",", "\"Invalid port %r\"", "%", "(", "port", ",", ")", "if", "type_host", "==", "HOST_IP_LITERAL", ":", "if", "host", "and", "(", "not", "__valid_IPLiteral", "(", "host", ")", ")", ":", "raise", "InvalidIPLiteralError", ",", "\"Invalid IP-literal %r\"", "%", "(", "host", ",", ")", "elif", "type_host", "==", "HOST_IPV4_ADDRESS", ":", "if", "host", "and", "(", "not", "__valid_IPv4address", "(", "host", ")", ")", ":", "raise", "InvalidIPv4addressError", ",", "\"Invalid IPv4address %r\"", "%", "(", "host", ",", ")", "if", "path", ":", "if", "authority", "and", "path", "[", "0", "]", "!=", "'/'", ":", "raise", "InvalidPathError", ",", "\"Invalid path %r - non-absolute path can't be used with an authority\"", "%", "(", "path", ",", ")", "return", "uri_tree" ]
Cache all event offsets .
def _cache_offsets ( self , up_to_index = None , verbose = True ) : if not up_to_index : if verbose : self . print ( "Caching event file offsets, this may take a bit." ) self . blob_file . seek ( 0 , 0 ) self . event_offsets = [ ] if not self . raw_header : self . event_offsets . append ( 0 ) else : self . blob_file . seek ( self . event_offsets [ - 1 ] , 0 ) for line in iter ( self . blob_file . readline , '' ) : line = try_decode_string ( line ) if line . startswith ( 'end_event:' ) : self . _record_offset ( ) if len ( self . event_offsets ) % 100 == 0 : if verbose : print ( '.' , end = '' ) sys . stdout . flush ( ) if up_to_index and len ( self . event_offsets ) >= up_to_index + 1 : return self . event_offsets . pop ( ) # get rid of the last entry if not up_to_index : self . whole_file_cached = True self . print ( "\n{0} events indexed." . format ( len ( self . event_offsets ) ) )
10,004
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L252-L276
[ "def", "purge_archived_resources", "(", "user", ",", "table", ")", ":", "if", "user", ".", "is_not_super_admin", "(", ")", ":", "raise", "dci_exc", ".", "Unauthorized", "(", ")", "where_clause", "=", "sql", ".", "and_", "(", "table", ".", "c", ".", "state", "==", "'archived'", ")", "query", "=", "table", ".", "delete", "(", ")", ".", "where", "(", "where_clause", ")", "flask", ".", "g", ".", "db_conn", ".", "execute", "(", "query", ")", "return", "flask", ".", "Response", "(", "None", ",", "204", ",", "content_type", "=", "'application/json'", ")" ]
Stores the current file pointer position
def _record_offset ( self ) : offset = self . blob_file . tell ( ) self . event_offsets . append ( offset )
10,005
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L278-L281
[ "def", "convert_to_experiment_list", "(", "experiments", ")", ":", "exp_list", "=", "experiments", "# Transform list if necessary", "if", "experiments", "is", "None", ":", "exp_list", "=", "[", "]", "elif", "isinstance", "(", "experiments", ",", "Experiment", ")", ":", "exp_list", "=", "[", "experiments", "]", "elif", "type", "(", "experiments", ")", "is", "dict", ":", "exp_list", "=", "[", "Experiment", ".", "from_json", "(", "name", ",", "spec", ")", "for", "name", ",", "spec", "in", "experiments", ".", "items", "(", ")", "]", "# Validate exp_list", "if", "(", "type", "(", "exp_list", ")", "is", "list", "and", "all", "(", "isinstance", "(", "exp", ",", "Experiment", ")", "for", "exp", "in", "exp_list", ")", ")", ":", "if", "len", "(", "exp_list", ")", ">", "1", ":", "logger", ".", "warning", "(", "\"All experiments will be \"", "\"using the same SearchAlgorithm.\"", ")", "else", ":", "raise", "TuneError", "(", "\"Invalid argument: {}\"", ".", "format", "(", "experiments", ")", ")", "return", "exp_list" ]
Parse the next event from the current file position
def _create_blob ( self ) : blob = None for line in self . blob_file : line = try_decode_string ( line ) line = line . strip ( ) if line == '' : self . log . info ( "Ignoring empty line..." ) continue if line . startswith ( 'end_event:' ) and blob : blob [ 'raw_header' ] = self . raw_header return blob try : tag , values = line . split ( ':' ) except ValueError : self . log . warning ( "Ignoring corrupt line: {}" . format ( line ) ) continue try : values = tuple ( split ( values . strip ( ) , callback = float ) ) except ValueError : self . log . info ( "Empty value: {}" . format ( values ) ) if line . startswith ( 'start_event:' ) : blob = Blob ( ) blob [ tag ] = tuple ( int ( v ) for v in values ) continue if tag not in blob : blob [ tag ] = [ ] blob [ tag ] . append ( values )
10,006
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L283-L310
[ "def", "compose", "(", "self", ")", ":", "rr", "=", "self", ".", "__reagents", "+", "self", ".", "__reactants", "if", "rr", ":", "if", "not", "all", "(", "isinstance", "(", "x", ",", "(", "MoleculeContainer", ",", "CGRContainer", ")", ")", "for", "x", "in", "rr", ")", ":", "raise", "TypeError", "(", "'Queries not composable'", ")", "r", "=", "reduce", "(", "or_", ",", "rr", ")", "else", ":", "r", "=", "MoleculeContainer", "(", ")", "if", "self", ".", "__products", ":", "if", "not", "all", "(", "isinstance", "(", "x", ",", "(", "MoleculeContainer", ",", "CGRContainer", ")", ")", "for", "x", "in", "self", ".", "__products", ")", ":", "raise", "TypeError", "(", "'Queries not composable'", ")", "p", "=", "reduce", "(", "or_", ",", "self", ".", "__products", ")", "else", ":", "p", "=", "MoleculeContainer", "(", ")", "return", "r", "^", "p" ]
Runs a python cgi server in a subprocess .
def runserver ( project_name ) : DIR = os . listdir ( project_name ) if 'settings.py' not in DIR : raise NotImplementedError ( 'No file called: settings.py found in %s' % project_name ) CGI_BIN_FOLDER = os . path . join ( project_name , 'cgi' , 'cgi-bin' ) CGI_FOLDER = os . path . join ( project_name , 'cgi' ) if not os . path . exists ( CGI_BIN_FOLDER ) : os . makedirs ( CGI_BIN_FOLDER ) os . chdir ( CGI_FOLDER ) subprocess . Popen ( "python -m http.server --cgi 8000" )
10,007
https://github.com/abiiranathan/db2/blob/347319e421921517bcae7639f524c3c3eb5446e6/db2/__main__.py#L44-L59
[ "def", "union", "(", "self", ",", "other", ")", ":", "if", "self", ".", "_jrdd_deserializer", "==", "other", ".", "_jrdd_deserializer", ":", "rdd", "=", "RDD", "(", "self", ".", "_jrdd", ".", "union", "(", "other", ".", "_jrdd", ")", ",", "self", ".", "ctx", ",", "self", ".", "_jrdd_deserializer", ")", "else", ":", "# These RDDs contain data in different serialized formats, so we", "# must normalize them to the default serializer.", "self_copy", "=", "self", ".", "_reserialize", "(", ")", "other_copy", "=", "other", ".", "_reserialize", "(", ")", "rdd", "=", "RDD", "(", "self_copy", ".", "_jrdd", ".", "union", "(", "other_copy", ".", "_jrdd", ")", ",", "self", ".", "ctx", ",", "self", ".", "ctx", ".", "serializer", ")", "if", "(", "self", ".", "partitioner", "==", "other", ".", "partitioner", "and", "self", ".", "getNumPartitions", "(", ")", "==", "rdd", ".", "getNumPartitions", "(", ")", ")", ":", "rdd", ".", "partitioner", "=", "self", ".", "partitioner", "return", "rdd" ]
Get the utility of a given decision given a preference .
def getUtility ( self , decision , sample , aggregationMode = "avg" ) : utilities = self . getUtilities ( decision , sample ) if aggregationMode == "avg" : utility = numpy . mean ( utilities ) elif aggregationMode == "min" : utility = min ( utilities ) elif aggregationMode == "max" : utility = max ( utilities ) else : print ( "ERROR: aggregation mode not recognized" ) exit ( ) return utility
10,008
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/utilityFunction.py#L13-L37
[ "def", "_extract_from_url", "(", "self", ",", "url", ")", ":", "# Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py", "m", "=", "re", ".", "search", "(", "re_pub_date", ",", "url", ")", "if", "m", ":", "return", "self", ".", "parse_date_str", "(", "m", ".", "group", "(", "0", ")", ")", "return", "None" ]
Returns a floats that contains the utilities of every candidate in the decision .
def getUtilities ( self , decision , orderVector ) : scoringVector = self . getScoringVector ( orderVector ) utilities = [ ] for alt in decision : altPosition = orderVector . index ( alt ) utility = float ( scoringVector [ altPosition ] ) if self . isLoss == True : utility = - 1 * utility utilities . append ( utility ) return utilities
10,009
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/utilityFunction.py#L59-L77
[ "def", "startup_script", "(", "self", ")", ":", "script_file", "=", "self", ".", "script_file", "if", "script_file", "is", "None", ":", "return", "None", "try", ":", "with", "open", "(", "script_file", ",", "\"rb\"", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ",", "errors", "=", "\"replace\"", ")", "except", "OSError", "as", "e", ":", "raise", "VPCSError", "(", "'Cannot read the startup script file \"{}\": {}'", ".", "format", "(", "script_file", ",", "e", ")", ")" ]
Returns a floats that contains the utilities of every candidate in the decision . This was adapted from code written by Lirong Xia .
def getUtilities ( self , decision , binaryRelations ) : m = len ( binaryRelations ) utilities = [ ] for cand in decision : tops = [ cand - 1 ] index = 0 while index < len ( tops ) : s = tops [ index ] for j in range ( m ) : if j == s : continue if binaryRelations [ j ] [ s ] > 0 : if j not in tops : tops . append ( j ) index += 1 if len ( tops ) <= self . k : if self . isLoss == False : utilities . append ( 1.0 ) elif self . isLoss == True : utilities . append ( - 1.0 ) else : utilities . append ( 0.0 ) return utilities
10,010
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/utilityFunction.py#L140-L174
[ "def", "_unbind_topics", "(", "self", ",", "topics", ")", ":", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "status", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "tracing", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "streaming", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "response", ")" ]
Return username and password for the KM3NeT WebDB .
def db_credentials ( self ) : try : username = self . config . get ( 'DB' , 'username' ) password = self . config . get ( 'DB' , 'password' ) except Error : username = input ( "Please enter your KM3NeT DB username: " ) password = getpass . getpass ( "Password: " ) return username , password
10,011
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/config.py#L104-L112
[ "def", "addStampAnnot", "(", "self", ",", "rect", ",", "stamp", "=", "0", ")", ":", "CheckParent", "(", "self", ")", "val", "=", "_fitz", ".", "Page_addStampAnnot", "(", "self", ",", "rect", ",", "stamp", ")", "if", "not", "val", ":", "return", "val", ".", "thisown", "=", "True", "val", ".", "parent", "=", "weakref", ".", "proxy", "(", "self", ")", "self", ".", "_annot_refs", "[", "id", "(", "val", ")", "]", "=", "val", "return", "val" ]
Prompts the user to input a local path .
def get_path ( src ) : # pragma: no cover res = None while not res : if res is False : print ( colored ( 'You must provide a path to an existing directory!' , 'red' ) ) print ( 'You need a local clone or release of (a fork of) ' 'https://github.com/{0}' . format ( src ) ) res = input ( colored ( 'Local path to {0}: ' . format ( src ) , 'green' , attrs = [ 'blink' ] ) ) if res and Path ( res ) . exists ( ) : return Path ( res ) . resolve ( ) res = False
10,012
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/__main__.py#L53-L69
[ "def", "body_block_caption_render", "(", "caption_tags", ",", "base_url", "=", "None", ")", ":", "caption_content", "=", "[", "]", "supplementary_material_tags", "=", "[", "]", "for", "block_tag", "in", "remove_doi_paragraph", "(", "caption_tags", ")", ":", "# Note then skip p tags with supplementary-material inside", "if", "raw_parser", ".", "supplementary_material", "(", "block_tag", ")", ":", "for", "supp_tag", "in", "raw_parser", ".", "supplementary_material", "(", "block_tag", ")", ":", "supplementary_material_tags", ".", "append", "(", "supp_tag", ")", "continue", "for", "block_content", "in", "body_block_content_render", "(", "block_tag", ",", "base_url", "=", "base_url", ")", ":", "if", "block_content", "!=", "{", "}", ":", "caption_content", ".", "append", "(", "block_content", ")", "return", "caption_content", ",", "supplementary_material_tags" ]
Execute all workflows
def execute_all ( self ) : for workflow_id in self . workflows : if self . workflows [ workflow_id ] . online : for interval in self . workflows [ workflow_id ] . requested_intervals : logging . info ( "Executing workflow {} over interval {}" . format ( workflow_id , interval ) ) self . workflows [ workflow_id ] . execute ( interval )
10,013
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow_manager.py#L350-L358
[ "def", "vapour_pressure", "(", "Temperature", ",", "element", ")", ":", "if", "element", "==", "\"Rb\"", ":", "Tmelt", "=", "39.30", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.857", "-", "4215.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.312", "-", "4040.0", "/", "Temperature", ")", "# Torr.", "elif", "element", "==", "\"Cs\"", ":", "Tmelt", "=", "28.5", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.711", "-", "3999.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.165", "-", "3830.0", "/", "Temperature", ")", "# Torr.", "else", ":", "s", "=", "str", "(", "element", ")", "s", "+=", "\" is not an element in the database for this function.\"", "raise", "ValueError", "(", "s", ")", "P", "=", "P", "*", "101325.0", "/", "760.0", "# Pascals.", "return", "P" ]
Execute the tool over the given time interval . If an alignment stream is given the output instances will be aligned to this stream
def execute ( self , sources , sink , interval , alignment_stream = None ) : if not isinstance ( interval , TimeInterval ) : raise TypeError ( 'Expected TimeInterval, got {}' . format ( type ( interval ) ) ) # logging.info(self.message(interval)) if interval . end > sink . channel . up_to_timestamp : raise StreamNotAvailableError ( sink . channel . up_to_timestamp ) required_intervals = TimeIntervals ( [ interval ] ) - sink . calculated_intervals if not required_intervals . is_empty : document_count = 0 for interval in required_intervals : for stream_instance in self . _execute ( sources = sources , alignment_stream = alignment_stream , interval = interval ) : sink . writer ( stream_instance ) document_count += 1 sink . calculated_intervals += interval required_intervals = TimeIntervals ( [ interval ] ) - sink . calculated_intervals if not required_intervals . is_empty : # raise ToolExecutionError(required_intervals) logging . error ( "{} execution error for time interval {} on stream {}" . format ( self . name , interval , sink ) ) if not document_count : logging . debug ( "{} did not produce any data for time interval {} on stream {}" . format ( self . name , interval , sink ) ) self . write_to_history ( interval = interval , tool = self . name , document_count = document_count )
10,014
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/tool.py#L49-L97
[ "def", "as_dict", "(", "self", ")", ":", "return", "{", "k", ":", "unfreeze", "(", "v", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", "if", "not", "self", ".", "is_special", "(", "k", ")", "}" ]
Must be overridden by deriving classes must create the stream according to the tool and return its unique identifier stream_id
def create_stream ( self , stream_id , sandbox = None ) : if stream_id in self . streams : raise StreamAlreadyExistsError ( "Stream with id '{}' already exists" . format ( stream_id ) ) if sandbox is not None : raise ValueError ( "Cannot use sandboxes with memory streams" ) stream = Stream ( channel = self , stream_id = stream_id , calculated_intervals = None , sandbox = None ) self . streams [ stream_id ] = stream self . data [ stream_id ] = StreamInstanceCollection ( ) return stream
10,015
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/memory_channel.py#L44-L59
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "private_file", "=", "self", ".", "get_private_file", "(", ")", "if", "not", "self", ".", "can_access_file", "(", "private_file", ")", ":", "return", "HttpResponseForbidden", "(", "'Private storage access denied'", ")", "if", "not", "private_file", ".", "exists", "(", ")", ":", "return", "self", ".", "serve_file_not_found", "(", "private_file", ")", "else", ":", "return", "self", ".", "serve_file", "(", "private_file", ")" ]
Clears all streams in the channel - use with caution!
def purge_all ( self , remove_definitions = False ) : for stream_id in list ( self . streams . keys ( ) ) : self . purge_stream ( stream_id , remove_definition = remove_definitions )
10,016
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/memory_channel.py#L61-L68
[ "def", "append_vector", "(", "self", ",", "name", ",", "vector", ",", "_left", "=", "False", ")", ":", "if", "np", ".", "issubdtype", "(", "vector", ".", "dtype", ",", "np", ".", "integer", ")", ":", "# determine the length we need", "largest", "=", "str", "(", "max", "(", "vector", ".", "max", "(", ")", ",", "vector", ".", "min", "(", ")", ",", "key", "=", "abs", ")", ")", "length", "=", "max", "(", "len", "(", "largest", ")", ",", "min", "(", "7", ",", "len", "(", "name", ")", ")", ")", "# how many spaces we need to represent", "if", "len", "(", "name", ")", ">", "length", ":", "header", "=", "name", "[", ":", "length", "-", "1", "]", "+", "'.'", "else", ":", "header", "=", "name", ".", "rjust", "(", "length", ")", "def", "f", "(", "datum", ")", ":", "return", "str", "(", "getattr", "(", "datum", ",", "name", ")", ")", ".", "rjust", "(", "length", ")", "elif", "np", ".", "issubdtype", "(", "vector", ".", "dtype", ",", "np", ".", "floating", ")", ":", "largest", "=", "np", ".", "format_float_positional", "(", "max", "(", "vector", ".", "max", "(", ")", ",", "vector", ".", "min", "(", ")", ",", "key", "=", "abs", ")", ",", "precision", "=", "6", ",", "trim", "=", "'0'", ")", "length", "=", "max", "(", "len", "(", "largest", ")", ",", "min", "(", "7", ",", "len", "(", "name", ")", ")", ")", "# how many spaces we need to represent", "if", "len", "(", "name", ")", ">", "length", ":", "header", "=", "name", "[", ":", "length", "-", "1", "]", "+", "'.'", "else", ":", "header", "=", "name", ".", "rjust", "(", "length", ")", "def", "f", "(", "datum", ")", ":", "return", "np", ".", "format_float_positional", "(", "getattr", "(", "datum", ",", "name", ")", ",", "precision", "=", "6", ",", "trim", "=", "'0'", ",", ")", ".", "rjust", "(", "length", ")", "else", ":", "length", "=", "7", "if", "len", "(", "name", ")", ">", "length", ":", "header", "=", "name", "[", ":", "length", "-", "1", "]", "+", "'.'", "else", ":", "header", "=", "name", ".", "rjust", "(", "length", ")", "def", "f", "(", "datum", ")", ":", "r", "=", "repr", "(", "getattr", "(", "datum", ",", "name", ")", ")", "if", "len", "(", "r", ")", ">", "length", ":", "r", "=", "r", "[", ":", "length", "-", "3", "]", "+", "'...'", "return", "r", ".", "rjust", "(", "length", ")", "self", ".", "append", "(", "header", ",", "f", ",", "_left", "=", "_left", ")" ]
Call this function to ensure that the channel is up to date at the time of timestamp . I . e . all the streams that have been created before or at that timestamp are calculated exactly until up_to_timestamp .
def update_state ( self , up_to_timestamp ) : for stream_id in self . streams : self . streams [ stream_id ] . calculated_intervals = TimeIntervals ( [ ( MIN_DATE , up_to_timestamp ) ] ) self . up_to_timestamp = up_to_timestamp
10,017
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/memory_channel.py#L158-L166
[ "def", "pem", "(", "self", ")", ":", "bio", "=", "Membio", "(", ")", "if", "libcrypto", ".", "PEM_write_bio_X509", "(", "bio", ".", "bio", ",", "self", ".", "cert", ")", "==", "0", ":", "raise", "X509Error", "(", "\"error serializing certificate\"", ")", "return", "str", "(", "bio", ")" ]
Compile regex from pattern and pattern_dict
def compile_regex ( self , pattern , flags = 0 ) : pattern_re = regex . compile ( '(?P<substr>%\{(?P<fullname>(?P<patname>\w+)(?::(?P<subname>\w+))?)\})' ) while 1 : matches = [ md . groupdict ( ) for md in pattern_re . finditer ( pattern ) ] if len ( matches ) == 0 : break for md in matches : if md [ 'patname' ] in self . pattern_dict : if md [ 'subname' ] : # TODO error if more than one occurance if '(?P<' in self . pattern_dict [ md [ 'patname' ] ] : # this is not part of the original logstash implementation # but it might be useful to be able to replace the # group name used in the pattern repl = regex . sub ( '\(\?P<(\w+)>' , '(?P<%s>' % md [ 'subname' ] , self . pattern_dict [ md [ 'patname' ] ] , 1 ) else : repl = '(?P<%s>%s)' % ( md [ 'subname' ] , self . pattern_dict [ md [ 'patname' ] ] ) else : repl = self . pattern_dict [ md [ 'patname' ] ] # print "Replacing %s with %s" %(md['substr'], repl) pattern = pattern . replace ( md [ 'substr' ] , repl ) else : # print('patname not found') # maybe missing path entry or missing pattern file? return # print 'pattern: %s' % pattern return regex . compile ( pattern , flags )
10,018
https://github.com/finklabs/korg/blob/e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f/korg/pattern.py#L27-L56
[ "def", "cublasDestroy", "(", "handle", ")", ":", "status", "=", "_libcublas", ".", "cublasDestroy_v2", "(", "ctypes", ".", "c_void_p", "(", "handle", ")", ")", "cublasCheckStatus", "(", "status", ")" ]
Load all pattern from all the files in folders
def _load_patterns ( self , folders , pattern_dict = None ) : if pattern_dict is None : pattern_dict = { } for folder in folders : for file in os . listdir ( folder ) : if regex . match ( '^[\w-]+$' , file ) : self . _load_pattern_file ( os . path . join ( folder , file ) , pattern_dict ) return pattern_dict
10,019
https://github.com/finklabs/korg/blob/e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f/korg/pattern.py#L73-L81
[ "def", "post", "(", "self", ",", "uri", ",", "body", "=", "None", ",", "logon_required", "=", "True", ",", "wait_for_completion", "=", "True", ",", "operation_timeout", "=", "None", ")", ":", "try", ":", "return", "self", ".", "_urihandler", ".", "post", "(", "self", ".", "_hmc", ",", "uri", ",", "body", ",", "logon_required", ",", "wait_for_completion", ")", "except", "HTTPError", "as", "exc", ":", "raise", "zhmcclient", ".", "HTTPError", "(", "exc", ".", "response", "(", ")", ")", "except", "ConnectionError", "as", "exc", ":", "raise", "zhmcclient", ".", "ConnectionError", "(", "exc", ".", "message", ",", "None", ")" ]
Unpickle file contents .
def load_pkl ( filenames ) : if not isinstance ( filenames , ( list , tuple ) ) : filenames = [ filenames ] times = [ ] for name in filenames : name = str ( name ) with open ( name , 'rb' ) as file : loaded_obj = pickle . load ( file ) if not isinstance ( loaded_obj , Times ) : raise TypeError ( "At least one loaded object is not a Times data object." ) times . append ( loaded_obj ) return times if len ( times ) > 1 else times [ 0 ]
10,020
https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/io.py#L170-L193
[ "def", "verify_logout_request", "(", "cls", ",", "logout_request", ",", "ticket", ")", ":", "try", ":", "session_index", "=", "cls", ".", "get_saml_slos", "(", "logout_request", ")", "session_index", "=", "session_index", "[", "0", "]", ".", "text", "if", "session_index", "==", "ticket", ":", "return", "True", "else", ":", "return", "False", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "return", "False" ]
Issue API requests .
async def retrieve ( self , url , * * kwargs ) : try : async with self . websession . request ( 'GET' , url , * * kwargs ) as res : if res . status != 200 : raise Exception ( "Could not retrieve information from API" ) if res . content_type == 'application/json' : return await res . json ( ) return await res . text ( ) except aiohttp . ClientError as err : logging . error ( err )
10,021
https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L22-L32
[ "def", "set_temperature", "(", "self", ",", "zone", ",", "temperature", ",", "until", "=", "None", ")", ":", "if", "until", "is", "None", ":", "data", "=", "{", "\"Value\"", ":", "temperature", ",", "\"Status\"", ":", "\"Hold\"", ",", "\"NextTime\"", ":", "None", "}", "else", ":", "data", "=", "{", "\"Value\"", ":", "temperature", ",", "\"Status\"", ":", "\"Temporary\"", ",", "\"NextTime\"", ":", "until", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")", "}", "self", ".", "_set_heat_setpoint", "(", "zone", ",", "data", ")" ]
Convert string to int or float .
def _to_number ( cls , string ) : try : if float ( string ) - int ( string ) == 0 : return int ( string ) return float ( string ) except ValueError : try : return float ( string ) except ValueError : return string
10,022
https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L35-L45
[ "def", "_check_lsm_input", "(", "self", ",", "data_var_map_array", ")", ":", "REQUIRED_HMET_VAR_LIST", "=", "[", "'Prcp'", ",", "'Pres'", ",", "'Temp'", ",", "'Clod'", ",", "'RlHm'", ",", "'Drad'", ",", "'Grad'", ",", "'WndS'", "]", "# make sure all required variables exist", "given_hmet_var_list", "=", "[", "]", "for", "gssha_data_var", ",", "lsm_data_var", "in", "data_var_map_array", ":", "gssha_data_hmet_name", "=", "self", ".", "netcdf_attributes", "[", "gssha_data_var", "]", "[", "'hmet_name'", "]", "if", "gssha_data_hmet_name", "in", "given_hmet_var_list", ":", "raise", "ValueError", "(", "\"Duplicate parameter for HMET variable {0}\"", ".", "format", "(", "gssha_data_hmet_name", ")", ")", "else", ":", "given_hmet_var_list", ".", "append", "(", "gssha_data_hmet_name", ")", "for", "REQUIRED_HMET_VAR", "in", "REQUIRED_HMET_VAR_LIST", ":", "if", "REQUIRED_HMET_VAR", "not", "in", "given_hmet_var_list", ":", "raise", "ValueError", "(", "\"ERROR: HMET param is required to continue \"", "\"{0} ...\"", ".", "format", "(", "REQUIRED_HMET_VAR", ")", ")" ]
Retrieve stations .
async def stations ( self ) : data = await self . retrieve ( API_DISTRITS ) Station = namedtuple ( 'Station' , [ 'latitude' , 'longitude' , 'idAreaAviso' , 'idConselho' , 'idDistrito' , 'idRegiao' , 'globalIdLocal' , 'local' ] ) _stations = [ ] for station in data [ 'data' ] : _station = Station ( self . _to_number ( station [ 'latitude' ] ) , self . _to_number ( station [ 'longitude' ] ) , station [ 'idAreaAviso' ] , station [ 'idConcelho' ] , station [ 'idDistrito' ] , station [ 'idRegiao' ] , station [ 'globalIdLocal' ] // 100 * 100 , station [ 'local' ] , ) _stations . append ( _station ) return _stations
10,023
https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L47-L74
[ "async", "def", "_catch_response", "(", "self", ",", "h11_connection", ")", ":", "response", "=", "await", "self", ".", "_recv_event", "(", "h11_connection", ")", "resp_data", "=", "{", "'encoding'", ":", "self", ".", "encoding", ",", "'method'", ":", "self", ".", "method", ",", "'status_code'", ":", "response", ".", "status_code", ",", "'reason_phrase'", ":", "str", "(", "response", ".", "reason", ",", "'utf-8'", ")", ",", "'http_version'", ":", "str", "(", "response", ".", "http_version", ",", "'utf-8'", ")", ",", "'headers'", ":", "c_i_dict", "(", "[", "(", "str", "(", "name", ",", "'utf-8'", ")", ",", "str", "(", "value", ",", "'utf-8'", ")", ")", "for", "name", ",", "value", "in", "response", ".", "headers", "]", ")", ",", "'body'", ":", "b''", ",", "'url'", ":", "self", ".", "req_url", "}", "for", "header", "in", "response", ".", "headers", ":", "if", "header", "[", "0", "]", "==", "b'set-cookie'", ":", "try", ":", "resp_data", "[", "'headers'", "]", "[", "'set-cookie'", "]", ".", "append", "(", "str", "(", "header", "[", "1", "]", ",", "'utf-8'", ")", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "resp_data", "[", "'headers'", "]", "[", "'set-cookie'", "]", "=", "[", "str", "(", "header", "[", "1", "]", ",", "'utf-8'", ")", "]", "# check whether we should receive body according to RFC 7230", "# https://tools.ietf.org/html/rfc7230#section-3.3.3", "get_body", "=", "False", "try", ":", "if", "int", "(", "resp_data", "[", "'headers'", "]", "[", "'content-length'", "]", ")", ">", "0", ":", "get_body", "=", "True", "except", "KeyError", ":", "try", ":", "if", "'chunked'", "in", "resp_data", "[", "'headers'", "]", "[", "'transfer-encoding'", "]", ".", "lower", "(", ")", ":", "get_body", "=", "True", "except", "KeyError", ":", "if", "resp_data", "[", "'headers'", "]", ".", "get", "(", "'connection'", ",", "''", ")", ".", "lower", "(", ")", "==", "'close'", ":", "get_body", "=", "True", "if", "get_body", ":", "if", "self", ".", "callback", "is", "not", "None", ":", "endof", "=", "await", "self", ".", "_body_callback", "(", "h11_connection", ")", "elif", "self", ".", "stream", ":", "if", "not", "(", "(", "self", ".", "scheme", "==", "self", ".", "initial_scheme", "and", "self", ".", "host", "==", "self", ".", "initial_netloc", ")", "or", "resp_data", "[", "'headers'", "]", "[", "'connection'", "]", ".", "lower", "(", ")", "==", "'close'", ")", ":", "self", ".", "sock", ".", "_active", "=", "False", "resp_data", "[", "'body'", "]", "=", "StreamBody", "(", "h11_connection", ",", "self", ".", "sock", ",", "resp_data", "[", "'headers'", "]", ".", "get", "(", "'content-encoding'", ",", "None", ")", ",", "resp_data", "[", "'encoding'", "]", ")", "self", ".", "streaming", "=", "True", "else", ":", "while", "True", ":", "data", "=", "await", "self", ".", "_recv_event", "(", "h11_connection", ")", "if", "isinstance", "(", "data", ",", "h11", ".", "Data", ")", ":", "resp_data", "[", "'body'", "]", "+=", "data", ".", "data", "elif", "isinstance", "(", "data", ",", "h11", ".", "EndOfMessage", ")", ":", "break", "else", ":", "endof", "=", "await", "self", ".", "_recv_event", "(", "h11_connection", ")", "assert", "isinstance", "(", "endof", ",", "h11", ".", "EndOfMessage", ")", "if", "self", ".", "streaming", ":", "return", "StreamResponse", "(", "*", "*", "resp_data", ")", "return", "Response", "(", "*", "*", "resp_data", ")" ]
Retrieve translation for weather type .
async def weather_type_classe ( self ) : data = await self . retrieve ( url = API_WEATHER_TYPE ) self . weather_type = dict ( ) for _type in data [ 'data' ] : self . weather_type [ _type [ 'idWeatherType' ] ] = _type [ 'descIdWeatherTypePT' ] return self . weather_type
10,024
https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L99-L109
[ "def", "frompng", "(", "path", ",", "ext", "=", "'png'", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "npartitions", "=", "None", ",", "labels", "=", "None", ",", "engine", "=", "None", ",", "credentials", "=", "None", ")", ":", "from", "scipy", ".", "misc", "import", "imread", "def", "getarray", "(", "idx_buffer_filename", ")", ":", "idx", ",", "buf", ",", "_", "=", "idx_buffer_filename", "fbuf", "=", "BytesIO", "(", "buf", ")", "yield", "(", "idx", ",", ")", ",", "imread", "(", "fbuf", ")", "return", "frompath", "(", "path", ",", "accessor", "=", "getarray", ",", "ext", "=", "ext", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "recursive", "=", "recursive", ",", "npartitions", "=", "npartitions", ",", "labels", "=", "labels", ",", "engine", "=", "engine", ",", "credentials", "=", "credentials", ")" ]
Retrieve translation for wind type .
async def wind_type_classe ( self ) : data = await self . retrieve ( url = API_WIND_TYPE ) self . wind_type = dict ( ) for _type in data [ 'data' ] : self . wind_type [ int ( _type [ 'classWindSpeed' ] ) ] = _type [ 'descClassWindSpeedDailyPT' ] return self . wind_type
10,025
https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L111-L121
[ "def", "get_splicejunction_file", "(", "out_dir", ",", "data", ")", ":", "samplename", "=", "dd", ".", "get_sample_name", "(", "data", ")", "sjfile", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "os", ".", "pardir", ",", "\"{0}SJ.out.tab\"", ")", ".", "format", "(", "samplename", ")", "if", "file_exists", "(", "sjfile", ")", ":", "return", "sjfile", "else", ":", "return", "None" ]
Add the plugin to our set of listeners for each message that it listens to tell it to use our messages Queue for communication and start it up .
def register ( self , plugin ) : for listener in plugin . listeners : self . listeners [ listener ] . add ( plugin ) self . plugins . add ( plugin ) plugin . messenger = self . messages plugin . start ( )
10,026
https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/dispatcher.py#L16-L25
[ "def", "__remove_if_expired", "(", "self", ",", "filename", ")", ":", "if", "not", "self", ".", "duration", ":", "return", "created", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getctime", "(", "filename", ")", ")", "expired", "=", "created", "+", "self", ".", "duration", "if", "expired", "<", "datetime", ".", "datetime", ".", "now", "(", ")", ":", "os", ".", "remove", "(", "filename", ")", "log", ".", "debug", "(", "\"%s expired, deleted\"", ",", "filename", ")" ]
Send APP_START to any plugins that listen for it and loop around waiting for messages and sending them to their listening plugins until it s time to shutdown .
def start ( self ) : self . recieve ( 'APP_START' ) self . alive = True while self . alive : message , payload = self . messages . get ( ) if message == 'APP_STOP' : for plugin in self . plugins : plugin . recieve ( 'SHUTDOWN' ) self . alive = False else : self . recieve ( message , payload )
10,027
https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/dispatcher.py#L27-L41
[ "def", "fetch_and_create_image", "(", "self", ",", "url", ",", "image_title", ")", ":", "context", "=", "{", "\"file_url\"", ":", "url", ",", "\"foreign_title\"", ":", "image_title", ",", "}", "try", ":", "image_file", "=", "requests", ".", "get", "(", "url", ")", "local_image", "=", "Image", "(", "title", "=", "image_title", ",", "file", "=", "ImageFile", "(", "BytesIO", "(", "image_file", ".", "content", ")", ",", "name", "=", "image_title", ")", ")", "local_image", ".", "save", "(", ")", "return", "(", "local_image", ",", "context", ")", "except", "Exception", "as", "e", ":", "context", ".", "update", "(", "{", "\"exception\"", ":", "e", ",", "}", ")", "raise", "ImageCreationFailed", "(", "context", ",", "None", ")" ]
Pick a palette
def choose ( self , palette ) : try : self . _cycler = cycle ( self . colours [ palette ] ) except KeyError : raise KeyError ( "Chose one of the following colour palettes: {0}" . format ( self . available ) )
10,028
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/style/__init__.py#L50-L59
[ "def", "_set_max_value", "(", "self", ",", "max_value", ")", ":", "self", ".", "_external_max_value", "=", "max_value", "# Check that the current value of the parameter is still within the boundaries. If not, issue a warning", "if", "self", ".", "_external_max_value", "is", "not", "None", "and", "self", ".", "value", ">", "self", ".", "_external_max_value", ":", "warnings", ".", "warn", "(", "\"The current value of the parameter %s (%s) \"", "\"was above the new maximum %s.\"", "%", "(", "self", ".", "name", ",", "self", ".", "value", ",", "self", ".", "_external_max_value", ")", ",", "exceptions", ".", "RuntimeWarning", ")", "self", ".", "value", "=", "self", ".", "_external_max_value" ]
Load all available styles
def refresh_styles ( self ) : import matplotlib . pyplot as plt self . colours = { } for style in plt . style . available : try : style_colours = plt . style . library [ style ] [ 'axes.prop_cycle' ] self . colours [ style ] = [ c [ 'color' ] for c in list ( style_colours ) ] except KeyError : continue self . colours [ 'km3pipe' ] = [ "#ff7869" , "#4babe1" , "#96ad3e" , "#e4823d" , "#5d72b2" , "#e2a3c2" , "#fd9844" , "#e480e7" ]
10,029
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/style/__init__.py#L61-L76
[ "def", "check_image_size_incorrect", "(", "self", ")", ":", "last_virtual_address", "=", "0", "last_virtual_size", "=", "0", "section_alignment", "=", "self", ".", "pefile_handle", ".", "OPTIONAL_HEADER", ".", "SectionAlignment", "total_image_size", "=", "self", ".", "pefile_handle", ".", "OPTIONAL_HEADER", ".", "SizeOfImage", "for", "section", "in", "self", ".", "pefile_handle", ".", "sections", ":", "if", "section", ".", "VirtualAddress", ">", "last_virtual_address", ":", "last_virtual_address", "=", "section", ".", "VirtualAddress", "last_virtual_size", "=", "section", ".", "Misc_VirtualSize", "# Just pad the size to be equal to the alignment and check for mismatch", "last_virtual_size", "+=", "section_alignment", "-", "(", "last_virtual_size", "%", "section_alignment", ")", "if", "(", "last_virtual_address", "+", "last_virtual_size", ")", "!=", "total_image_size", ":", "return", "{", "'description'", ":", "'Image size does not match reported size'", ",", "'severity'", ":", "3", ",", "'category'", ":", "'MALFORMED'", "}", "return", "None" ]
Make the connection . Return a file - like object .
def get_file_object ( username , password , utc_start = None , utc_stop = None ) : if not utc_start : utc_start = datetime . now ( ) if not utc_stop : utc_stop = utc_start + timedelta ( days = 1 ) logging . info ( "Downloading schedules for username [%s] in range [%s] to " "[%s]." % ( username , utc_start , utc_stop ) ) replacements = { 'start_time' : utc_start . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) , 'stop_time' : utc_stop . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) } soap_message_xml = ( soap_message_xml_template % replacements ) authinfo = urllib2 . HTTPDigestAuthHandler ( ) authinfo . add_password ( realm , url , username , password ) try : request = urllib2 . Request ( url , soap_message_xml , request_headers ) response = urllib2 . build_opener ( authinfo ) . open ( request ) if response . headers [ 'Content-Encoding' ] == 'gzip' : response = GzipStream ( response ) except : logging . exception ( "Could not acquire connection to Schedules Direct." ) raise return response
10,030
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/retrieve.py#L51-L81
[ "def", "processReqDuringBatch", "(", "self", ",", "req", ":", "Request", ",", "cons_time", ":", "int", ")", ":", "if", "self", ".", "isMaster", ":", "self", ".", "node", ".", "doDynamicValidation", "(", "req", ")", "self", ".", "node", ".", "applyReq", "(", "req", ",", "cons_time", ")" ]
Parse the data using the connected file - like object .
def process_file_object ( file_obj , importer , progress ) : logging . info ( "Processing schedule data." ) try : handler = XmlCallbacks ( importer , progress ) parser = sax . make_parser ( ) parser . setContentHandler ( handler ) parser . setErrorHandler ( handler ) parser . parse ( file_obj ) except : logging . exception ( "Parse failed." ) raise logging . info ( "Schedule data processed." )
10,031
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/retrieve.py#L83-L98
[ "def", "cancel_operation", "(", "self", ",", "name", ",", "retry", "=", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "gapic_v1", ".", "method", ".", "DEFAULT", ")", ":", "# Create the request object.", "request", "=", "operations_pb2", ".", "CancelOperationRequest", "(", "name", "=", "name", ")", "self", ".", "_cancel_operation", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ")" ]
A utility function to marry the connecting and reading functions .
def parse_schedules ( username , password , importer , progress , utc_start = None , utc_stop = None ) : file_obj = get_file_object ( username , password , utc_start , utc_stop ) process_file_object ( file_obj , importer , progress )
10,032
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/retrieve.py#L100-L105
[ "def", "unset_quota_volume", "(", "name", ",", "path", ")", ":", "cmd", "=", "'volume quota {0}'", ".", "format", "(", "name", ")", "if", "path", ":", "cmd", "+=", "' remove {0}'", ".", "format", "(", "path", ")", "if", "not", "_gluster", "(", "cmd", ")", ":", "return", "False", "return", "True" ]
Concatenate KM3HDF5 files via pipeline .
def km3h5concat ( input_files , output_file , n_events = None , * * kwargs ) : from km3pipe import Pipeline # noqa from km3pipe . io import HDF5Pump , HDF5Sink # noqa pipe = Pipeline ( ) pipe . attach ( HDF5Pump , filenames = input_files , * * kwargs ) pipe . attach ( StatusBar , every = 250 ) pipe . attach ( HDF5Sink , filename = output_file , * * kwargs ) pipe . drain ( n_events )
10,033
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/km3h5concat.py#L32-L41
[ "def", "is_orthogonal", "(", "matrix", ":", "np", ".", "ndarray", ",", "*", ",", "rtol", ":", "float", "=", "1e-5", ",", "atol", ":", "float", "=", "1e-8", ")", "->", "bool", ":", "return", "(", "matrix", ".", "shape", "[", "0", "]", "==", "matrix", ".", "shape", "[", "1", "]", "and", "np", ".", "all", "(", "np", ".", "imag", "(", "matrix", ")", "==", "0", ")", "and", "np", ".", "allclose", "(", "matrix", ".", "dot", "(", "matrix", ".", "T", ")", ",", "np", ".", "eye", "(", "matrix", ".", "shape", "[", "0", "]", ")", ",", "rtol", "=", "rtol", ",", "atol", "=", "atol", ")", ")" ]
Retrieve data for given stream and parameters or None if not found
def get_data ( stream , parameters , fmt ) : sds = kp . db . StreamDS ( ) if stream not in sds . streams : log . error ( "Stream '{}' not found in the database." . format ( stream ) ) return params = { } if parameters : for parameter in parameters : if '=' not in parameter : log . error ( "Invalid parameter syntax '{}'\n" "The correct syntax is 'parameter=value'" . format ( parameter ) ) continue key , value = parameter . split ( '=' ) params [ key ] = value data = sds . get ( stream , fmt , * * params ) if data is not None : with pd . option_context ( 'display.max_rows' , None , 'display.max_columns' , None ) : print ( data ) else : sds . help ( stream )
10,034
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L56-L80
[ "def", "assign_candidate", "(", "self", ",", "verse", ":", "Verse", ",", "candidate", ":", "str", ")", "->", "Verse", ":", "verse", ".", "scansion", "=", "candidate", "verse", ".", "valid", "=", "True", "verse", ".", "accented", "=", "self", ".", "formatter", ".", "merge_line_scansion", "(", "verse", ".", "original", ",", "verse", ".", "scansion", ")", "return", "verse" ]
Show a short list of available streams .
def available_streams ( ) : sds = kp . db . StreamDS ( ) print ( "Available streams: " ) print ( ', ' . join ( sorted ( sds . streams ) ) )
10,035
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L83-L87
[ "def", "_initialize_mtf_dimension_name_to_size_gcd", "(", "self", ",", "mtf_graph", ")", ":", "mtf_dimension_name_to_size_gcd", "=", "{", "}", "for", "mtf_operation", "in", "mtf_graph", ".", "operations", ":", "for", "mtf_tensor", "in", "mtf_operation", ".", "outputs", ":", "for", "mtf_dimension", "in", "mtf_tensor", ".", "shape", ".", "dims", ":", "mtf_dimension_name_to_size_gcd", "[", "mtf_dimension", ".", "name", "]", "=", "fractions", ".", "gcd", "(", "mtf_dimension_name_to_size_gcd", ".", "get", "(", "mtf_dimension", ".", "name", ",", "mtf_dimension", ".", "size", ")", ",", "mtf_dimension", ".", "size", ")", "return", "mtf_dimension_name_to_size_gcd" ]
Reads the CSV file and uploads its contents to the runsummary table
def upload_runsummary ( csv_filename , dryrun = False ) : print ( "Checking '{}' for consistency." . format ( csv_filename ) ) if not os . path . exists ( csv_filename ) : log . critical ( "{} -> file not found." . format ( csv_filename ) ) return try : df = pd . read_csv ( csv_filename , sep = '\t' ) except pd . errors . EmptyDataError as e : log . error ( e ) return cols = set ( df . columns ) if not REQUIRED_COLUMNS . issubset ( cols ) : log . error ( "Missing columns: {}." . format ( ', ' . join ( str ( c ) for c in REQUIRED_COLUMNS - cols ) ) ) return parameters = cols - REQUIRED_COLUMNS if len ( parameters ) < 1 : log . error ( "No parameter columns found." ) return if len ( df ) == 0 : log . critical ( "Empty dataset." ) return print ( "Found data for parameters: {}." . format ( ', ' . join ( str ( c ) for c in parameters ) ) ) print ( "Converting CSV data into JSON" ) if dryrun : log . warn ( "Dryrun: adding 'TEST_' prefix to parameter names" ) prefix = "TEST_" else : prefix = "" data = convert_runsummary_to_json ( df , prefix = prefix ) print ( "We have {:.3f} MB to upload." . format ( len ( data ) / 1024 ** 2 ) ) print ( "Requesting database session." ) db = kp . db . DBManager ( ) # noqa if kp . db . we_are_in_lyon ( ) : session_cookie = "sid=_kmcprod_134.158_lyo7783844001343100343mcprod1223user" # noqa else : session_cookie = kp . config . Config ( ) . get ( 'DB' , 'session_cookie' ) if session_cookie is None : raise SystemExit ( "Could not restore DB session." ) log . debug ( "Using the session cookie: {}" . format ( session_cookie ) ) cookie_key , sid = session_cookie . split ( '=' ) print ( "Uploading the data to the database." ) r = requests . post ( RUNSUMMARY_URL , cookies = { cookie_key : sid } , files = { 'datafile' : data } ) if r . status_code == 200 : log . debug ( "POST request status code: {}" . format ( r . status_code ) ) print ( "Database response:" ) db_answer = json . loads ( r . text ) for key , value in db_answer . items ( ) : print ( " -> {}: {}" . format ( key , value ) ) if db_answer [ 'Result' ] == 'OK' : print ( "Upload successful." ) else : log . critical ( "Something went wrong." ) else : log . error ( "POST request status code: {}" . format ( r . status_code ) ) log . critical ( "Something went wrong..." ) return
10,036
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L90-L162
[ "def", "remove_server", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "server", "=", "self", ".", "_get_server", "(", "server_id", ")", "# Delete any instances we recorded to be cleaned up", "if", "server_id", "in", "self", ".", "_owned_subscriptions", ":", "inst_list", "=", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_filters", ":", "inst_list", "=", "self", ".", "_owned_filters", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_filters", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_destinations", ":", "inst_list", "=", "self", ".", "_owned_destinations", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_destinations", "[", "server_id", "]", "# Remove server from this listener", "del", "self", ".", "_servers", "[", "server_id", "]" ]
Convert a Pandas DataFrame with runsummary to JSON for DB upload
def convert_runsummary_to_json ( df , comment = 'Uploaded via km3pipe.StreamDS' , prefix = 'TEST_' ) : data_field = [ ] comment += ", by {}" . format ( getpass . getuser ( ) ) for det_id , det_data in df . groupby ( 'det_id' ) : runs_field = [ ] data_field . append ( { "DetectorId" : det_id , "Runs" : runs_field } ) for run , run_data in det_data . groupby ( 'run' ) : parameters_field = [ ] runs_field . append ( { "Run" : int ( run ) , "Parameters" : parameters_field } ) parameter_dict = { } for row in run_data . itertuples ( ) : for parameter_name in run_data . columns : if parameter_name in REQUIRED_COLUMNS : continue if parameter_name not in parameter_dict : entry = { 'Name' : prefix + parameter_name , 'Data' : [ ] } parameter_dict [ parameter_name ] = entry data_value = getattr ( row , parameter_name ) try : data_value = float ( data_value ) except ValueError as e : log . critical ( "Data values has to be floats!" ) raise ValueError ( e ) value = { 'S' : str ( getattr ( row , 'source' ) ) , 'D' : data_value } parameter_dict [ parameter_name ] [ 'Data' ] . append ( value ) for parameter_data in parameter_dict . values ( ) : parameters_field . append ( parameter_data ) data_to_upload = { "Comment" : comment , "Data" : data_field } file_data_to_upload = json . dumps ( data_to_upload ) return file_data_to_upload
10,037
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L165-L203
[ "def", "volume_delete", "(", "self", ",", "name", ")", ":", "if", "self", ".", "volume_conn", "is", "None", ":", "raise", "SaltCloudSystemExit", "(", "'No cinder endpoint available'", ")", "nt_ks", "=", "self", ".", "volume_conn", "try", ":", "volume", "=", "self", ".", "volume_show", "(", "name", ")", "except", "KeyError", "as", "exc", ":", "raise", "SaltCloudSystemExit", "(", "'Unable to find {0} volume: {1}'", ".", "format", "(", "name", ",", "exc", ")", ")", "if", "volume", "[", "'status'", "]", "==", "'deleted'", ":", "return", "volume", "response", "=", "nt_ks", ".", "volumes", ".", "delete", "(", "volume", "[", "'id'", "]", ")", "return", "volume" ]
Given a order vector V and a proposed order vector W calculate the acceptance ratio for changing to W when using MCMC .
def calcAcceptanceRatio ( self , V , W ) : acceptanceRatio = 1.0 for comb in itertools . combinations ( V , 2 ) : #Check if comb[0] is ranked before comb[1] in V and W vIOverJ = 1 wIOverJ = 1 if V . index ( comb [ 0 ] ) > V . index ( comb [ 1 ] ) : vIOverJ = 0 if W . index ( comb [ 0 ] ) > W . index ( comb [ 1 ] ) : wIOverJ = 0 acceptanceRatio = acceptanceRatio * self . phi ** ( self . wmg [ comb [ 0 ] ] [ comb [ 1 ] ] * ( vIOverJ - wIOverJ ) ) return acceptanceRatio
10,038
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L34-L62
[ "def", "_error_messages", "(", "self", ",", "driver_id", ")", ":", "assert", "isinstance", "(", "driver_id", ",", "ray", ".", "DriverID", ")", "message", "=", "self", ".", "redis_client", ".", "execute_command", "(", "\"RAY.TABLE_LOOKUP\"", ",", "ray", ".", "gcs_utils", ".", "TablePrefix", ".", "ERROR_INFO", ",", "\"\"", ",", "driver_id", ".", "binary", "(", ")", ")", "# If there are no errors, return early.", "if", "message", "is", "None", ":", "return", "[", "]", "gcs_entries", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "message", ",", "0", ")", "error_messages", "=", "[", "]", "for", "i", "in", "range", "(", "gcs_entries", ".", "EntriesLength", "(", ")", ")", ":", "error_data", "=", "ray", ".", "gcs_utils", ".", "ErrorTableData", ".", "GetRootAsErrorTableData", "(", "gcs_entries", ".", "Entries", "(", "i", ")", ",", "0", ")", "assert", "driver_id", ".", "binary", "(", ")", "==", "error_data", ".", "DriverId", "(", ")", "error_message", "=", "{", "\"type\"", ":", "decode", "(", "error_data", ".", "Type", "(", ")", ")", ",", "\"message\"", ":", "decode", "(", "error_data", ".", "ErrorMessage", "(", ")", ")", ",", "\"timestamp\"", ":", "error_data", ".", "Timestamp", "(", ")", ",", "}", "error_messages", ".", "append", "(", "error_message", ")", "return", "error_messages" ]
Generate the next sample by randomly flipping two adjacent candidates .
def getNextSample ( self , V ) : # Select a random alternative in V to switch with its adacent alternatives. randPos = random . randint ( 0 , len ( V ) - 2 ) W = copy . deepcopy ( V ) d = V [ randPos ] c = V [ randPos + 1 ] W [ randPos ] = c W [ randPos + 1 ] = d # Check whether we should change to the new ranking. prMW = 1 prMV = 1 prob = min ( 1.0 , ( prMW / prMV ) * pow ( self . phi , self . wmg [ d ] [ c ] ) ) / 2 if random . random ( ) <= prob : V = W return V
10,039
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L66-L89
[ "def", "_error_messages", "(", "self", ",", "driver_id", ")", ":", "assert", "isinstance", "(", "driver_id", ",", "ray", ".", "DriverID", ")", "message", "=", "self", ".", "redis_client", ".", "execute_command", "(", "\"RAY.TABLE_LOOKUP\"", ",", "ray", ".", "gcs_utils", ".", "TablePrefix", ".", "ERROR_INFO", ",", "\"\"", ",", "driver_id", ".", "binary", "(", ")", ")", "# If there are no errors, return early.", "if", "message", "is", "None", ":", "return", "[", "]", "gcs_entries", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "message", ",", "0", ")", "error_messages", "=", "[", "]", "for", "i", "in", "range", "(", "gcs_entries", ".", "EntriesLength", "(", ")", ")", ":", "error_data", "=", "ray", ".", "gcs_utils", ".", "ErrorTableData", ".", "GetRootAsErrorTableData", "(", "gcs_entries", ".", "Entries", "(", "i", ")", ",", "0", ")", "assert", "driver_id", ".", "binary", "(", ")", "==", "error_data", ".", "DriverId", "(", ")", "error_message", "=", "{", "\"type\"", ":", "decode", "(", "error_data", ".", "Type", "(", ")", ")", ",", "\"message\"", ":", "decode", "(", "error_data", ".", "ErrorMessage", "(", ")", ")", ",", "\"timestamp\"", ":", "error_data", ".", "Timestamp", "(", ")", ",", "}", "error_messages", ".", "append", "(", "error_message", ")", "return", "error_messages" ]
Generate the next sample by randomly shuffling candidates .
def getNextSample ( self , V ) : positions = range ( 0 , len ( self . wmg ) ) randPoss = random . sample ( positions , self . shuffleSize ) flipSet = copy . deepcopy ( randPoss ) randPoss . sort ( ) W = copy . deepcopy ( V ) for j in range ( 0 , self . shuffleSize ) : W [ randPoss [ j ] ] = V [ flipSet [ j ] ] # Check whether we should change to the new ranking. prMW = 1.0 prMV = 1.0 acceptanceRatio = self . calcAcceptanceRatio ( V , W ) prob = min ( 1.0 , ( prMW / prMV ) * acceptanceRatio ) if random . random ( ) <= prob : V = W return V
10,040
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L98-L121
[ "def", "_error_messages", "(", "self", ",", "driver_id", ")", ":", "assert", "isinstance", "(", "driver_id", ",", "ray", ".", "DriverID", ")", "message", "=", "self", ".", "redis_client", ".", "execute_command", "(", "\"RAY.TABLE_LOOKUP\"", ",", "ray", ".", "gcs_utils", ".", "TablePrefix", ".", "ERROR_INFO", ",", "\"\"", ",", "driver_id", ".", "binary", "(", ")", ")", "# If there are no errors, return early.", "if", "message", "is", "None", ":", "return", "[", "]", "gcs_entries", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "message", ",", "0", ")", "error_messages", "=", "[", "]", "for", "i", "in", "range", "(", "gcs_entries", ".", "EntriesLength", "(", ")", ")", ":", "error_data", "=", "ray", ".", "gcs_utils", ".", "ErrorTableData", ".", "GetRootAsErrorTableData", "(", "gcs_entries", ".", "Entries", "(", "i", ")", ",", "0", ")", "assert", "driver_id", ".", "binary", "(", ")", "==", "error_data", ".", "DriverId", "(", ")", "error_message", "=", "{", "\"type\"", ":", "decode", "(", "error_data", ".", "Type", "(", ")", ")", ",", "\"message\"", ":", "decode", "(", "error_data", ".", "ErrorMessage", "(", ")", ")", ",", "\"timestamp\"", ":", "error_data", ".", "Timestamp", "(", ")", ",", "}", "error_messages", ".", "append", "(", "error_message", ")", "return", "error_messages" ]
We generate a new ranking based on a Mallows - based jumping distribution . The algorithm is described in Bayesian Ordinal Peer Grading by Raman and Joachims .
def getNextSample ( self , V ) : phi = self . phi wmg = self . wmg W = [ ] W . append ( V [ 0 ] ) for j in range ( 2 , len ( V ) + 1 ) : randomSelect = random . random ( ) threshold = 0.0 denom = 1.0 for k in range ( 1 , j ) : denom = denom + phi ** k for k in range ( 1 , j + 1 ) : numerator = phi ** ( j - k ) threshold = threshold + numerator / denom if randomSelect <= threshold : W . insert ( k - 1 , V [ j - 1 ] ) break # Check whether we should change to the new ranking. acceptanceRatio = self . calcAcceptanceRatio ( V , W ) prob = min ( 1.0 , acceptanceRatio ) if random . random ( ) <= prob : V = W return V
10,041
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L125-L157
[ "def", "on_end_validation", "(", "self", ",", "event", ")", ":", "self", ".", "Enable", "(", ")", "self", ".", "Show", "(", ")", "self", ".", "magic_gui_frame", ".", "Destroy", "(", ")" ]
Given a ranking over the candidates generate a new ranking by assigning each candidate at position i a Plakett - Luce weight of phi^i and draw a new ranking .
def getNextSample ( self , V ) : W , WProb = self . drawRankingPlakettLuce ( V ) VProb = self . calcProbOfVFromW ( V , W ) acceptanceRatio = self . calcAcceptanceRatio ( V , W ) prob = min ( 1.0 , acceptanceRatio * ( VProb / WProb ) ) if random . random ( ) <= prob : V = W return V
10,042
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L191-L206
[ "def", "compute_motor_command", "(", "self", ",", "m_ag", ")", ":", "m_env", "=", "bounds_min_max", "(", "m_ag", ",", "self", ".", "conf", ".", "m_mins", ",", "self", ".", "conf", ".", "m_maxs", ")", "return", "m_env" ]
Returns a vector that contains the probabily of an item being from each position . We say that every item in a order vector is drawn with weight phi^i where i is its position .
def calcDrawingProbs ( self ) : wmg = self . wmg phi = self . phi # We say the weight of the candidate in position i is phi^i. weights = [ ] for i in range ( 0 , len ( wmg . keys ( ) ) ) : weights . append ( phi ** i ) # Calculate the probabilty that an item at each weight is drawn. totalWeight = sum ( weights ) for i in range ( 0 , len ( wmg . keys ( ) ) ) : weights [ i ] = weights [ i ] / totalWeight return weights
10,043
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L208-L227
[ "def", "_diff", "(", "self", ",", "cursor", ",", "tokenizer", ",", "output_fh", ")", ":", "temp_path", "=", "self", ".", "_csv_temp", "(", "cursor", ",", "constants", ".", "QUERY_FIELDNAMES", ")", "output_fh", "=", "self", ".", "_reduce_diff_results", "(", "temp_path", ",", "tokenizer", ",", "output_fh", ")", "try", ":", "os", ".", "remove", "(", "temp_path", ")", "except", "OSError", "as", "e", ":", "self", ".", "_logger", ".", "error", "(", "'Failed to remove temporary file containing '", "'unreduced results: {}'", ".", "format", "(", "e", ")", ")", "return", "output_fh" ]
Given an order vector over the candidates draw candidates to generate a new order vector .
def drawRankingPlakettLuce ( self , rankList ) : probs = self . plakettLuceProbs numCands = len ( rankList ) newRanking = [ ] remainingCands = copy . deepcopy ( rankList ) probsCopy = copy . deepcopy ( self . plakettLuceProbs ) totalProb = sum ( probs ) # We will use prob to iteratively calculate the probabilty that we draw the order vector # that we end up drawing. prob = 1.0 while ( len ( newRanking ) < numCands ) : # We generate a random number from 0 to 1, and use it to select a candidate. rand = random . random ( ) threshold = 0.0 for i in range ( 0 , len ( probsCopy ) ) : threshold = threshold + probsCopy [ i ] / totalProb if rand <= threshold : prob = prob * probsCopy [ i ] / totalProb newRanking . append ( remainingCands [ i ] ) remainingCands . pop ( i ) totalProb = totalProb - probsCopy [ i ] probsCopy . pop ( i ) break return newRanking , prob
10,044
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L229-L263
[ "def", "wave_infochunk", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "file", ":", "if", "file", ".", "read", "(", "4", ")", "!=", "b\"RIFF\"", ":", "return", "None", "data_size", "=", "file", ".", "read", "(", "4", ")", "# container size", "if", "file", ".", "read", "(", "4", ")", "!=", "b\"WAVE\"", ":", "return", "None", "while", "True", ":", "chunkid", "=", "file", ".", "read", "(", "4", ")", "sizebuf", "=", "file", ".", "read", "(", "4", ")", "if", "len", "(", "sizebuf", ")", "<", "4", "or", "len", "(", "chunkid", ")", "<", "4", ":", "return", "None", "size", "=", "struct", ".", "unpack", "(", "b'<L'", ",", "sizebuf", ")", "[", "0", "]", "if", "chunkid", "[", "0", ":", "3", "]", "!=", "b\"fmt\"", ":", "if", "size", "%", "2", "==", "1", ":", "seek", "=", "size", "+", "1", "else", ":", "seek", "=", "size", "file", ".", "seek", "(", "size", ",", "1", ")", "else", ":", "return", "bytearray", "(", "b\"RIFF\"", "+", "data_size", "+", "b\"WAVE\"", "+", "chunkid", "+", "sizebuf", "+", "file", ".", "read", "(", "size", ")", ")" ]
Given a order vector V and an order vector W calculate the probability that we generate V as our next sample if our current sample was W .
def calcProbOfVFromW ( self , V , W ) : weights = range ( 0 , len ( V ) ) i = 0 for alt in W : weights [ alt - 1 ] = self . phi ** i i = i + 1 # Calculate the probability that we draw V[0], V[1], and so on from W. prob = 1.0 totalWeight = sum ( weights ) for alt in V : prob = prob * weights [ alt - 1 ] / totalWeight totalWeight = totalWeight - weights [ alt - 1 ] return prob
10,045
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L265-L289
[ "def", "reset_all_metadata", "(", "self", ")", ":", "self", ".", "topics_to_brokers", ".", "clear", "(", ")", "self", ".", "topic_partitions", ".", "clear", "(", ")", "self", ".", "topic_errors", ".", "clear", "(", ")", "self", ".", "consumer_group_to_brokers", ".", "clear", "(", ")" ]
Read a 1D Histogram .
def get_hist ( rfile , histname , get_overflow = False ) : import root_numpy as rnp rfile = open_rfile ( rfile ) hist = rfile [ histname ] xlims = np . array ( list ( hist . xedges ( ) ) ) bin_values = rnp . hist2array ( hist , include_overflow = get_overflow ) rfile . close ( ) return bin_values , xlims
10,046
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/root.py#L31-L40
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Sample the interpolator of a root 2d hist .
def interpol_hist2d ( h2d , oversamp_factor = 10 ) : from rootpy import ROOTError xlim = h2d . bins ( axis = 0 ) ylim = h2d . bins ( axis = 1 ) xn = h2d . nbins ( 0 ) yn = h2d . nbins ( 1 ) x = np . linspace ( xlim [ 0 ] , xlim [ 1 ] , xn * oversamp_factor ) y = np . linspace ( ylim [ 0 ] , ylim [ 1 ] , yn * oversamp_factor ) mat = np . zeros ( ( xn , yn ) ) for xi in range ( xn ) : for yi in range ( yn ) : try : mat [ xi , yi ] = h2d . interpolate ( x [ xi ] , y [ yi ] ) except ROOTError : continue return mat , x , y
10,047
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/root.py#L70-L91
[ "def", "claim_messages", "(", "self", ",", "queue", ",", "ttl", ",", "grace", ",", "count", "=", "None", ")", ":", "return", "queue", ".", "claim_messages", "(", "ttl", ",", "grace", ",", "count", "=", "count", ")" ]
Create the main window .
def create_window ( size = None , samples = 16 , * , fullscreen = False , title = None , threaded = True ) -> Window : if size is None : width , height = 1280 , 720 else : width , height = size if samples < 0 or ( samples & ( samples - 1 ) ) != 0 : raise Exception ( 'Invalid number of samples: %d' % samples ) window = Window . __new__ ( Window ) window . wnd = glwnd . create_window ( width , height , samples , fullscreen , title , threaded ) return window
10,048
https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__init__.py#L307-L335
[ "def", "normalize", "(", "template_dict", ")", ":", "resources", "=", "template_dict", ".", "get", "(", "RESOURCES_KEY", ",", "{", "}", ")", "for", "logical_id", ",", "resource", "in", "resources", ".", "items", "(", ")", ":", "resource_metadata", "=", "resource", ".", "get", "(", "METADATA_KEY", ",", "{", "}", ")", "asset_path", "=", "resource_metadata", ".", "get", "(", "ASSET_PATH_METADATA_KEY", ")", "asset_property", "=", "resource_metadata", ".", "get", "(", "ASSET_PROPERTY_METADATA_KEY", ")", "ResourceMetadataNormalizer", ".", "_replace_property", "(", "asset_property", ",", "asset_path", ",", "resource", ",", "logical_id", ")" ]
Clear the window .
def clear ( self , red = 0.0 , green = 0.0 , blue = 0.0 , alpha = 0.0 ) -> None : self . wnd . clear ( red , green , blue , alpha )
10,049
https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__init__.py#L59-L64
[ "def", "do_cli", "(", "ctx", ",", "template", ",", "semantic_version", ")", ":", "try", ":", "template_data", "=", "get_template_data", "(", "template", ")", "except", "ValueError", "as", "ex", ":", "click", ".", "secho", "(", "\"Publish Failed\"", ",", "fg", "=", "'red'", ")", "raise", "UserException", "(", "str", "(", "ex", ")", ")", "# Override SemanticVersion in template metadata when provided in command input", "if", "semantic_version", "and", "SERVERLESS_REPO_APPLICATION", "in", "template_data", ".", "get", "(", "METADATA", ",", "{", "}", ")", ":", "template_data", ".", "get", "(", "METADATA", ")", ".", "get", "(", "SERVERLESS_REPO_APPLICATION", ")", "[", "SEMANTIC_VERSION", "]", "=", "semantic_version", "try", ":", "publish_output", "=", "publish_application", "(", "template_data", ")", "click", ".", "secho", "(", "\"Publish Succeeded\"", ",", "fg", "=", "\"green\"", ")", "click", ".", "secho", "(", "_gen_success_message", "(", "publish_output", ")", ")", "except", "InvalidS3UriError", ":", "click", ".", "secho", "(", "\"Publish Failed\"", ",", "fg", "=", "'red'", ")", "raise", "UserException", "(", "\"Your SAM template contains invalid S3 URIs. Please make sure that you have uploaded application \"", "\"artifacts to S3 by packaging the template. See more details in {}\"", ".", "format", "(", "SAM_PACKAGE_DOC", ")", ")", "except", "ServerlessRepoError", "as", "ex", ":", "click", ".", "secho", "(", "\"Publish Failed\"", ",", "fg", "=", "'red'", ")", "LOG", ".", "debug", "(", "\"Failed to publish application to serverlessrepo\"", ",", "exc_info", "=", "True", ")", "error_msg", "=", "'{}\\nPlease follow the instructions in {}'", ".", "format", "(", "str", "(", "ex", ")", ",", "SAM_PUBLISH_DOC", ")", "raise", "UserException", "(", "error_msg", ")", "application_id", "=", "publish_output", ".", "get", "(", "'application_id'", ")", "_print_console_link", "(", "ctx", ".", "region", ",", "application_id", ")" ]
Set the window to windowed mode .
def windowed ( self , size ) -> None : width , height = size self . wnd . windowed ( width , height )
10,050
https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__init__.py#L73-L80
[ "def", "get_current_user_info", "(", "anchore_auth", ")", ":", "user_url", "=", "anchore_auth", "[", "'client_info_url'", "]", "+", "'/'", "+", "anchore_auth", "[", "'username'", "]", "user_timeout", "=", "60", "retries", "=", "3", "result", "=", "requests", ".", "get", "(", "user_url", ",", "headers", "=", "{", "'x-anchore-password'", ":", "anchore_auth", "[", "'password'", "]", "}", ")", "if", "result", ".", "status_code", "==", "200", ":", "user_data", "=", "json", ".", "loads", "(", "result", ".", "content", ")", "else", ":", "raise", "requests", ".", "HTTPError", "(", "'Error response from service: {}'", ".", "format", "(", "result", ".", "status_code", ")", ")", "return", "user_data" ]
Extract metadata for a specific product
def product_metadata ( product , dst_folder , counter = None , writers = [ file_writer ] , geometry_check = None ) : if not counter : counter = { 'products' : 0 , 'saved_tiles' : 0 , 'skipped_tiles' : 0 , 'skipped_tiles_paths' : [ ] } s3_url = 'http://sentinel-s2-l1c.s3.amazonaws.com' product_meta_link = '{0}/{1}' . format ( s3_url , product [ 'metadata' ] ) product_info = requests . get ( product_meta_link , stream = True ) product_metadata = metadata_to_dict ( product_info . raw ) product_metadata [ 'product_meta_link' ] = product_meta_link counter [ 'products' ] += 1 for tile in product [ 'tiles' ] : tile_info = requests . get ( '{0}/{1}' . format ( s3_url , tile ) ) try : metadata = tile_metadata ( tile_info . json ( ) , copy ( product_metadata ) , geometry_check ) for w in writers : w ( dst_folder , metadata ) logger . info ( 'Saving to disk: %s' % metadata [ 'tile_name' ] ) counter [ 'saved_tiles' ] += 1 except JSONDecodeError : logger . warning ( 'Tile: %s was not found and skipped' % tile ) counter [ 'skipped_tiles' ] += 1 counter [ 'skipped_tiles_paths' ] . append ( tile ) return counter
10,051
https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/main.py#L58-L93
[ "def", "array", "(", "source_array", ",", "ctx", "=", "None", ",", "dtype", "=", "None", ")", ":", "ctx", "=", "current_context", "(", ")", "if", "ctx", "is", "None", "else", "ctx", "if", "isinstance", "(", "source_array", ",", "NDArray", ")", ":", "assert", "(", "source_array", ".", "stype", "!=", "'default'", ")", ",", "\"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray\"", "# prepare dtype and ctx based on source_array, if not provided", "dtype", "=", "_prepare_default_dtype", "(", "source_array", ",", "dtype", ")", "# if both dtype and ctx are different from source_array, we cannot copy directly", "if", "source_array", ".", "dtype", "!=", "dtype", "and", "source_array", ".", "context", "!=", "ctx", ":", "arr", "=", "empty", "(", "source_array", ".", "stype", ",", "source_array", ".", "shape", ",", "dtype", "=", "dtype", ")", "arr", "[", ":", "]", "=", "source_array", "arr", "=", "arr", ".", "as_in_context", "(", "ctx", ")", "else", ":", "arr", "=", "empty", "(", "source_array", ".", "stype", ",", "source_array", ".", "shape", ",", "dtype", "=", "dtype", ",", "ctx", "=", "ctx", ")", "arr", "[", ":", "]", "=", "source_array", "return", "arr", "elif", "spsp", "and", "isinstance", "(", "source_array", ",", "spsp", ".", "csr", ".", "csr_matrix", ")", ":", "# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy", "# preprocess scipy csr to canonical form", "csr", "=", "source_array", ".", "sorted_indices", "(", ")", "csr", ".", "sum_duplicates", "(", ")", "dtype", "=", "_prepare_default_dtype", "(", "source_array", ",", "dtype", ")", "return", "csr_matrix", "(", "(", "csr", ".", "data", ",", "csr", ".", "indices", ",", "csr", ".", "indptr", ")", ",", "shape", "=", "csr", ".", "shape", ",", "dtype", "=", "dtype", ",", "ctx", "=", "ctx", ")", "elif", "isinstance", "(", "source_array", ",", "(", "np", ".", "ndarray", ",", "np", ".", "generic", ")", ")", ":", "raise", "ValueError", "(", "\"Please use mx.nd.array to create an NDArray with source_array of type \"", ",", "type", "(", "source_array", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Unexpected source_array type: \"", ",", "type", "(", "source_array", ")", ")" ]
Extra metadata for all products in a specific date
def daily_metadata ( year , month , day , dst_folder , writers = [ file_writer ] , geometry_check = None , num_worker_threads = 1 ) : threaded = False counter = { 'products' : 0 , 'saved_tiles' : 0 , 'skipped_tiles' : 0 , 'skipped_tiles_paths' : [ ] } if num_worker_threads > 1 : threaded = True queue = Queue ( ) # create folders year_dir = os . path . join ( dst_folder , str ( year ) ) month_dir = os . path . join ( year_dir , str ( month ) ) day_dir = os . path . join ( month_dir , str ( day ) ) product_list = get_products_metadata_path ( year , month , day ) logger . info ( 'There are %s products in %s-%s-%s' % ( len ( list ( iterkeys ( product_list ) ) ) , year , month , day ) ) for name , product in iteritems ( product_list ) : product_dir = os . path . join ( day_dir , name ) if threaded : queue . put ( [ product , product_dir , counter , writers , geometry_check ] ) else : counter = product_metadata ( product , product_dir , counter , writers , geometry_check ) if threaded : def worker ( ) : while not queue . empty ( ) : args = queue . get ( ) try : product_metadata ( * args ) except Exception : exc = sys . exc_info ( ) logger . error ( '%s tile skipped due to error: %s' % ( threading . current_thread ( ) . name , exc [ 1 ] . __str__ ( ) ) ) args [ 2 ] [ 'skipped_tiles' ] += 1 queue . task_done ( ) threads = [ ] for i in range ( num_worker_threads ) : t = threading . Thread ( target = worker ) t . start ( ) threads . append ( t ) queue . join ( ) return counter
10,052
https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/main.py#L102-L158
[ "def", "complex_dtype", "(", "dtype", ",", "default", "=", "None", ")", ":", "dtype", ",", "dtype_in", "=", "np", ".", "dtype", "(", "dtype", ")", ",", "dtype", "if", "is_complex_floating_dtype", "(", "dtype", ")", ":", "return", "dtype", "try", ":", "complex_base_dtype", "=", "TYPE_MAP_R2C", "[", "dtype", ".", "base", "]", "except", "KeyError", ":", "if", "default", "is", "not", "None", ":", "return", "default", "else", ":", "raise", "ValueError", "(", "'no complex counterpart exists for `dtype` {}'", "''", ".", "format", "(", "dtype_repr", "(", "dtype_in", ")", ")", ")", "else", ":", "return", "np", ".", "dtype", "(", "(", "complex_base_dtype", ",", "dtype", ".", "shape", ")", ")" ]
Extra metadata for all products in a date range
def range_metadata ( start , end , dst_folder , num_worker_threads = 0 , writers = [ file_writer ] , geometry_check = None ) : assert isinstance ( start , date ) assert isinstance ( end , date ) delta = end - start dates = [ ] for i in range ( delta . days + 1 ) : dates . append ( start + timedelta ( days = i ) ) days = len ( dates ) total_counter = { 'days' : days , 'products' : 0 , 'saved_tiles' : 0 , 'skipped_tiles' : 0 , 'skipped_tiles_paths' : [ ] } def update_counter ( counter ) : for key in iterkeys ( total_counter ) : if key in counter : total_counter [ key ] += counter [ key ] for d in dates : logger . info ( 'Getting metadata of {0}-{1}-{2}' . format ( d . year , d . month , d . day ) ) update_counter ( daily_metadata ( d . year , d . month , d . day , dst_folder , writers , geometry_check , num_worker_threads ) ) return total_counter
10,053
https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/main.py#L161-L194
[ "def", "spa", "(", "args", ")", ":", "from", "jcvi", ".", "algorithms", ".", "graph", "import", "merge_paths", "from", "jcvi", ".", "utils", ".", "cbook", "import", "uniqify", "p", "=", "OptionParser", "(", "spa", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--unmapped\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Include unmapped scaffolds in the list [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "spafiles", "=", "args", "paths", "=", "[", "]", "mappings", "=", "[", "]", "missings", "=", "[", "]", "for", "spafile", "in", "spafiles", ":", "fp", "=", "open", "(", "spafile", ")", "path", "=", "[", "]", "mapping", "=", "[", "]", "missing", "=", "[", "]", "for", "row", "in", "fp", ":", "if", "row", "[", "0", "]", "==", "'#'", "or", "not", "row", ".", "strip", "(", ")", ":", "continue", "atoms", "=", "row", ".", "rstrip", "(", ")", ".", "split", "(", "'\\t'", ")", "if", "len", "(", "atoms", ")", "==", "2", ":", "a", ",", "c2", "=", "atoms", "assert", "a", "==", "\"unmapped\"", "missing", ".", "append", "(", "c2", ")", "continue", "c1", ",", "c2", ",", "orientation", "=", "atoms", "path", ".", "append", "(", "c1", ")", "mapping", ".", "append", "(", "c2", ")", "paths", ".", "append", "(", "uniqify", "(", "path", ")", ")", "mappings", ".", "append", "(", "mapping", ")", "missings", ".", "append", "(", "missing", ")", "ref", "=", "merge_paths", "(", "paths", ")", "print", "(", "\"ref\"", ",", "len", "(", "ref", ")", ",", "\",\"", ".", "join", "(", "ref", ")", ")", "for", "spafile", ",", "mapping", ",", "missing", "in", "zip", "(", "spafiles", ",", "mappings", ",", "missings", ")", ":", "mapping", "=", "[", "x", "for", "x", "in", "mapping", "if", "\"random\"", "not", "in", "x", "]", "mapping", "=", "uniqify", "(", "mapping", ")", "if", "len", "(", "mapping", ")", "<", "50", "and", "opts", ".", "unmapped", ":", "mapping", "=", "uniqify", "(", "mapping", "+", "missing", ")", "print", "(", "spafile", ",", "len", "(", "mapping", ")", ",", "\",\"", ".", "join", "(", "mapping", ")", ")" ]
Get a resource on TMDB .
def get_on_tmdb ( uri , * * kwargs ) : kwargs [ 'api_key' ] = app . config [ 'TMDB_API_KEY' ] response = requests_session . get ( ( TMDB_API_URL + uri ) . encode ( 'utf8' ) , params = kwargs ) response . raise_for_status ( ) return json . loads ( response . text )
10,054
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/tmdb_proxy.py#L40-L46
[ "def", "files_comments_delete", "(", "self", ",", "*", ",", "file", ":", "str", ",", "id", ":", "str", ",", "*", "*", "kwargs", ")", "->", "SlackResponse", ":", "kwargs", ".", "update", "(", "{", "\"file\"", ":", "file", ",", "\"id\"", ":", "id", "}", ")", "return", "self", ".", "api_call", "(", "\"files.comments.delete\"", ",", "json", "=", "kwargs", ")" ]
Search a movie on TMDB .
def search ( ) : redis_key = 's_%s' % request . args [ 'query' ] . lower ( ) cached = redis_ro_conn . get ( redis_key ) if cached : return Response ( cached ) else : try : found = get_on_tmdb ( u'/search/movie' , query = request . args [ 'query' ] ) movies = [ ] for movie in found [ 'results' ] : cast = get_on_tmdb ( u'/movie/%s/casts' % movie [ 'id' ] ) year = datetime . strptime ( movie [ 'release_date' ] , '%Y-%m-%d' ) . year if movie [ 'release_date' ] else None movies . append ( { 'title' : movie [ 'original_title' ] , 'directors' : [ x [ 'name' ] for x in cast [ 'crew' ] if x [ 'department' ] == 'Directing' and x [ 'job' ] == 'Director' ] , 'year' : year , '_tmdb_id' : movie [ 'id' ] } ) except requests . HTTPError as err : return Response ( 'TMDB API error: %s' % str ( err ) , status = err . response . status_code ) json_response = json . dumps ( { 'movies' : movies } ) redis_conn . setex ( redis_key , app . config [ 'CACHE_TTL' ] , json_response ) return Response ( json_response )
10,055
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/tmdb_proxy.py#L50-L72
[ "async", "def", "_wait_exponentially", "(", "self", ",", "exception", ",", "max_wait_time", "=", "300", ")", ":", "wait_time", "=", "min", "(", "(", "2", "**", "self", ".", "_connection_attempts", ")", "+", "random", ".", "random", "(", ")", ",", "max_wait_time", ")", "try", ":", "wait_time", "=", "exception", ".", "response", "[", "\"headers\"", "]", "[", "\"Retry-After\"", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "self", ".", "_logger", ".", "debug", "(", "\"Waiting %s seconds before reconnecting.\"", ",", "wait_time", ")", "await", "asyncio", ".", "sleep", "(", "float", "(", "wait_time", ")", ")" ]
Get informations about a movie using its tmdb id .
def get_movie ( tmdb_id ) : redis_key = 'm_%s' % tmdb_id cached = redis_ro_conn . get ( redis_key ) if cached : return Response ( cached ) else : try : details = get_on_tmdb ( u'/movie/%d' % tmdb_id ) cast = get_on_tmdb ( u'/movie/%d/casts' % tmdb_id ) alternative = get_on_tmdb ( u'/movie/%d/alternative_titles' % tmdb_id ) except requests . HTTPError as err : return Response ( 'TMDB API error: %s' % str ( err ) , status = err . response . status_code ) movie = { 'title' : details [ 'original_title' ] , 'score' : details [ 'popularity' ] , 'directors' : [ x [ 'name' ] for x in cast [ 'crew' ] if x [ 'department' ] == 'Directing' and x [ 'job' ] == 'Director' ] , 'writers' : [ x [ 'name' ] for x in cast [ 'crew' ] if x [ 'department' ] == 'Writing' ] , 'cast' : [ x [ 'name' ] for x in cast [ 'cast' ] ] , 'genres' : [ x [ 'name' ] for x in details [ 'genres' ] ] , 'countries' : [ x [ 'name' ] for x in details [ 'production_countries' ] ] , 'tmdb_votes' : int ( round ( details . get ( 'vote_average' , 0 ) * 0.5 ) ) , '_tmdb_id' : tmdb_id } if details . get ( 'release_date' ) : movie [ 'year' ] = datetime . strptime ( details [ 'release_date' ] , '%Y-%m-%d' ) . year if details . get ( 'belongs_to_collection' ) : movie [ 'collection' ] = details [ 'belongs_to_collection' ] [ 'name' ] for alt in alternative [ 'titles' ] : movie [ 'title_%s' % alt [ 'iso_3166_1' ] . lower ( ) ] = alt [ 'title' ] json_response = json . dumps ( { 'movie' : movie } ) redis_conn . setex ( redis_key , app . config [ 'CACHE_TTL' ] , json_response ) return Response ( json_response )
10,056
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/tmdb_proxy.py#L76-L107
[ "def", "run", "(", "self", ")", ":", "# Create the thread pool.", "executor", "=", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "max_workers", "=", "self", ".", "_config", "[", "'num_workers'", "]", ")", "# Wait to ensure multiple senders can be synchronised.", "now", "=", "int", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "timestamp", "(", ")", ")", "start_time", "=", "(", "(", "now", "+", "29", ")", "//", "30", ")", "*", "30", "self", ".", "_log", ".", "info", "(", "'Waiting until {}'", ".", "format", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "start_time", ")", ")", ")", "while", "int", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "timestamp", "(", ")", ")", "<", "start_time", ":", "time", ".", "sleep", "(", "0.1", ")", "# Run the event loop.", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "try", ":", "loop", ".", "run_until_complete", "(", "self", ".", "_run_loop", "(", "executor", ")", ")", "except", "KeyboardInterrupt", ":", "pass", "finally", ":", "# Send the end of stream message to each stream.", "self", ".", "_log", ".", "info", "(", "'Shutting down, closing streams...'", ")", "tasks", "=", "[", "]", "for", "stream", ",", "item_group", "in", "self", ".", "_streams", ":", "tasks", ".", "append", "(", "stream", ".", "async_send_heap", "(", "item_group", ".", "get_end", "(", ")", ")", ")", "loop", ".", "run_until_complete", "(", "asyncio", ".", "gather", "(", "*", "tasks", ")", ")", "self", ".", "_log", ".", "info", "(", "'... finished.'", ")", "executor", ".", "shutdown", "(", ")" ]
r Provides a way for each connection wrapper to handle error responses .
def _handle_response_error ( self , response , retries , * * kwargs ) : error = self . _convert_response_to_error ( response ) if error is None : return response max_retries = self . _max_retries_for_error ( error ) if max_retries is None or retries >= max_retries : return response backoff = min ( 0.0625 * 2 ** retries , 1.0 ) self . logger . warning ( "Sleeping for %r before retrying failed request..." , backoff ) time . sleep ( backoff ) retries += 1 self . logger . warning ( "Retrying failed request. Attempt %d/%d." , retries , max_retries ) return self . request ( retries = retries , * * kwargs )
10,057
https://github.com/LeadPages/gcloud_requests/blob/8933363c4e9fa1e5ec0e90d683fca8ef8a949752/gcloud_requests/proxy.py#L132-L162
[ "def", "download_storyitem", "(", "self", ",", "item", ":", "StoryItem", ",", "target", ":", "str", ")", "->", "bool", ":", "date_local", "=", "item", ".", "date_local", "dirname", "=", "_PostPathFormatter", "(", "item", ")", ".", "format", "(", "self", ".", "dirname_pattern", ",", "target", "=", "target", ")", "filename", "=", "dirname", "+", "'/'", "+", "self", ".", "format_filename", "(", "item", ",", "target", "=", "target", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "exist_ok", "=", "True", ")", "downloaded", "=", "False", "if", "not", "item", ".", "is_video", "or", "self", ".", "download_video_thumbnails", "is", "True", ":", "url", "=", "item", ".", "url", "downloaded", "=", "self", ".", "download_pic", "(", "filename", "=", "filename", ",", "url", "=", "url", ",", "mtime", "=", "date_local", ")", "if", "item", ".", "is_video", "and", "self", ".", "download_videos", "is", "True", ":", "downloaded", "|=", "self", ".", "download_pic", "(", "filename", "=", "filename", ",", "url", "=", "item", ".", "video_url", ",", "mtime", "=", "date_local", ")", "# Save caption if desired", "metadata_string", "=", "_ArbitraryItemFormatter", "(", "item", ")", ".", "format", "(", "self", ".", "storyitem_metadata_txt_pattern", ")", ".", "strip", "(", ")", "if", "metadata_string", ":", "self", ".", "save_caption", "(", "filename", "=", "filename", ",", "mtime", "=", "item", ".", "date_local", ",", "caption", "=", "metadata_string", ")", "# Save metadata as JSON if desired.", "if", "self", ".", "save_metadata", "is", "not", "False", ":", "self", ".", "save_metadata_json", "(", "filename", ",", "item", ")", "self", ".", "context", ".", "log", "(", ")", "return", "downloaded" ]
Subclasses may override this method in order to influence how errors are parsed from the response .
def _convert_response_to_error ( self , response ) : content_type = response . headers . get ( "content-type" , "" ) if "application/x-protobuf" in content_type : self . logger . debug ( "Decoding protobuf response." ) data = status_pb2 . Status . FromString ( response . content ) status = self . _PB_ERROR_CODES . get ( data . code ) error = { "status" : status } return error elif "application/json" in content_type : self . logger . debug ( "Decoding json response." ) data = response . json ( ) error = data . get ( "error" ) if not error or not isinstance ( error , dict ) : self . logger . warning ( "Unexpected error response: %r" , data ) return None return error self . logger . warning ( "Unexpected response: %r" , response . text ) return None
10,058
https://github.com/LeadPages/gcloud_requests/blob/8933363c4e9fa1e5ec0e90d683fca8ef8a949752/gcloud_requests/proxy.py#L164-L193
[ "def", "fit", "(", "self", ",", "weighted", ",", "show_progress", "=", "True", ")", ":", "self", ".", "similarity", "=", "all_pairs_knn", "(", "weighted", ",", "self", ".", "K", ",", "show_progress", "=", "show_progress", ",", "num_threads", "=", "self", ".", "num_threads", ")", ".", "tocsr", "(", ")", "self", ".", "scorer", "=", "NearestNeighboursScorer", "(", "self", ".", "similarity", ")" ]
Parse the format_string and return prepared data according to the env .
def parse_pattern ( format_string , env , wrapper = lambda x , y : y ) : formatter = Formatter ( ) fields = [ x [ 1 ] for x in formatter . parse ( format_string ) if x [ 1 ] is not None ] prepared_env = { } # Create a prepared environment with only used fields, all as list: for field in fields : # Search for a movie attribute for each alternative field separated # by a pipe sign: for field_alt in ( x . strip ( ) for x in field . split ( '|' ) ) : # Handle default values (enclosed by quotes): if field_alt [ 0 ] in '\'"' and field_alt [ - 1 ] in '\'"' : field_values = field_alt [ 1 : - 1 ] else : field_values = env . get ( field_alt ) if field_values is not None : break else : field_values = [ ] if not isinstance ( field_values , list ) : field_values = [ field_values ] prepared_env [ field ] = wrapper ( field_alt , field_values ) return prepared_env
10,059
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/pattern.py#L7-L38
[ "def", "unregister_service", "(", "self", ",", "registration", ")", ":", "# type: (ServiceRegistration) -> bool", "# Get the Service Reference", "reference", "=", "registration", ".", "get_reference", "(", ")", "# Remove the service from the registry", "svc_instance", "=", "self", ".", "_registry", ".", "unregister", "(", "reference", ")", "# Keep a track of the unregistering reference", "self", ".", "__unregistering_services", "[", "reference", "]", "=", "svc_instance", "# Call the listeners", "event", "=", "ServiceEvent", "(", "ServiceEvent", ".", "UNREGISTERING", ",", "reference", ")", "self", ".", "_dispatcher", ".", "fire_service_event", "(", "event", ")", "# Update the bundle registration information", "bundle", "=", "reference", ".", "get_bundle", "(", ")", "bundle", ".", "_unregistered_service", "(", "registration", ")", "# Remove the unregistering reference", "del", "self", ".", "__unregistering_services", "[", "reference", "]", "return", "True" ]
Create symmetric percentiles with p coverage .
def perc ( arr , p = 95 , * * kwargs ) : offset = ( 100 - p ) / 2 return np . percentile ( arr , ( offset , 100 - offset ) , * * kwargs )
10,060
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L143-L146
[ "def", "_update_kube_events", "(", "self", ",", "instance", ",", "pods_list", ",", "event_items", ")", ":", "node_ip", ",", "node_name", "=", "self", ".", "kubeutil", ".", "get_node_info", "(", ")", "self", ".", "log", ".", "debug", "(", "'Processing events on {} [{}]'", ".", "format", "(", "node_name", ",", "node_ip", ")", ")", "k8s_namespaces", "=", "instance", ".", "get", "(", "'namespaces'", ",", "DEFAULT_NAMESPACES", ")", "if", "not", "isinstance", "(", "k8s_namespaces", ",", "list", ")", ":", "self", ".", "log", ".", "warning", "(", "'Configuration key \"namespaces\" is not a list: fallback to the default value'", ")", "k8s_namespaces", "=", "DEFAULT_NAMESPACES", "# handle old config value", "if", "'namespace'", "in", "instance", "and", "instance", ".", "get", "(", "'namespace'", ")", "not", "in", "(", "None", ",", "'default'", ")", ":", "self", ".", "log", ".", "warning", "(", "'''The 'namespace' parameter is deprecated and will stop being supported starting '''", "'''from 5.13. Please use 'namespaces' and/or 'namespace_name_regexp' instead.'''", ")", "k8s_namespaces", ".", "append", "(", "instance", ".", "get", "(", "'namespace'", ")", ")", "if", "self", ".", "k8s_namespace_regexp", ":", "namespaces_endpoint", "=", "'{}/namespaces'", ".", "format", "(", "self", ".", "kubeutil", ".", "kubernetes_api_url", ")", "self", ".", "log", ".", "debug", "(", "'Kubernetes API endpoint to query namespaces: %s'", "%", "namespaces_endpoint", ")", "namespaces", "=", "self", ".", "kubeutil", ".", "retrieve_json_auth", "(", "namespaces_endpoint", ")", ".", "json", "(", ")", "for", "namespace", "in", "namespaces", ".", "get", "(", "'items'", ",", "[", "]", ")", ":", "name", "=", "namespace", ".", "get", "(", "'metadata'", ",", "{", "}", ")", ".", "get", "(", "'name'", ",", "None", ")", "if", "name", "and", "self", ".", "k8s_namespace_regexp", ".", "match", "(", "name", ")", ":", "k8s_namespaces", ".", "append", "(", "name", ")", "k8s_namespaces", "=", "set", "(", "k8s_namespaces", ")", "for", "event", "in", "event_items", ":", "event_ts", "=", "calendar", ".", "timegm", "(", "time", ".", "strptime", "(", "event", ".", "get", "(", "'lastTimestamp'", ")", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", ")", "involved_obj", "=", "event", ".", "get", "(", "'involvedObject'", ",", "{", "}", ")", "# filter events by white listed namespaces (empty namespace belong to the 'default' one)", "if", "involved_obj", ".", "get", "(", "'namespace'", ",", "'default'", ")", "not", "in", "k8s_namespaces", ":", "continue", "tags", "=", "self", ".", "kubeutil", ".", "extract_event_tags", "(", "event", ")", "tags", ".", "extend", "(", "instance", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "title", "=", "'{} {} on {}'", ".", "format", "(", "involved_obj", ".", "get", "(", "'name'", ")", ",", "event", ".", "get", "(", "'reason'", ")", ",", "node_name", ")", "message", "=", "event", ".", "get", "(", "'message'", ")", "source", "=", "event", ".", "get", "(", "'source'", ")", "k8s_event_type", "=", "event", ".", "get", "(", "'type'", ")", "alert_type", "=", "K8S_ALERT_MAP", ".", "get", "(", "k8s_event_type", ",", "'info'", ")", "if", "source", ":", "message", "+=", "'\\nSource: {} {}\\n'", ".", "format", "(", "source", ".", "get", "(", "'component'", ",", "''", ")", ",", "source", ".", "get", "(", "'host'", ",", "''", ")", ")", "msg_body", "=", "\"%%%\\n{}\\n```\\n{}\\n```\\n%%%\"", ".", "format", "(", "title", ",", "message", ")", "dd_event", "=", "{", "'timestamp'", ":", "event_ts", ",", "'host'", ":", "node_ip", ",", "'event_type'", ":", "EVENT_TYPE", ",", "'msg_title'", ":", "title", ",", "'msg_text'", ":", "msg_body", ",", "'source_type_name'", ":", "EVENT_TYPE", ",", "'alert_type'", ":", "alert_type", ",", "'event_object'", ":", "'kubernetes:{}'", ".", "format", "(", "involved_obj", ".", "get", "(", "'name'", ")", ")", ",", "'tags'", ":", "tags", ",", "}", "self", ".", "event", "(", "dd_event", ")" ]
Resample an array with replacement .
def resample_1d ( arr , n_out = None , random_state = None ) : if random_state is None : random_state = np . random . RandomState ( ) arr = np . atleast_1d ( arr ) n = len ( arr ) if n_out is None : n_out = n idx = random_state . randint ( 0 , n , size = n ) return arr [ idx ]
10,061
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L149-L167
[ "def", "get_l2vpnfs_table", "(", "self", ")", ":", "l2vpnfs_table", "=", "self", ".", "_global_tables", ".", "get", "(", "RF_L2VPN_FLOWSPEC", ")", "# Lazy initialization of the table.", "if", "not", "l2vpnfs_table", ":", "l2vpnfs_table", "=", "L2VPNFlowSpecTable", "(", "self", ".", "_core_service", ",", "self", ".", "_signal_bus", ")", "self", ".", "_global_tables", "[", "RF_L2VPN_FLOWSPEC", "]", "=", "l2vpnfs_table", "self", ".", "_tables", "[", "(", "None", ",", "RF_L2VPN_FLOWSPEC", ")", "]", "=", "l2vpnfs_table", "return", "l2vpnfs_table" ]
Bootstrap the fit params of a distribution .
def bootstrap_params ( rv_cont , data , n_iter = 5 , * * kwargs ) : fit_res = [ ] for _ in range ( n_iter ) : params = rv_cont . fit ( resample_1d ( data , * * kwargs ) ) fit_res . append ( params ) fit_res = np . array ( fit_res ) return fit_res
10,062
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L170-L187
[ "def", "linear_rref", "(", "A", ",", "b", ",", "Matrix", "=", "None", ",", "S", "=", "None", ")", ":", "if", "Matrix", "is", "None", ":", "from", "sympy", "import", "Matrix", "if", "S", "is", "None", ":", "from", "sympy", "import", "S", "mat_rows", "=", "[", "_map2l", "(", "S", ",", "list", "(", "row", ")", "+", "[", "v", "]", ")", "for", "row", ",", "v", "in", "zip", "(", "A", ",", "b", ")", "]", "aug", "=", "Matrix", "(", "mat_rows", ")", "raug", ",", "pivot", "=", "aug", ".", "rref", "(", ")", "nindep", "=", "len", "(", "pivot", ")", "return", "raug", "[", ":", "nindep", ",", ":", "-", "1", "]", ",", "raug", "[", ":", "nindep", ",", "-", "1", "]" ]
Get mean + quantile range from bootstrapped params .
def param_describe ( params , quant = 95 , axis = 0 ) : par = np . mean ( params , axis = axis ) lo , up = perc ( quant ) p_up = np . percentile ( params , up , axis = axis ) p_lo = np . percentile ( params , lo , axis = axis ) return par , p_lo , p_up
10,063
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L190-L196
[ "def", "delete_session_entity_type", "(", "project_id", ",", "session_id", ",", "entity_type_display_name", ")", ":", "import", "dialogflow_v2", "as", "dialogflow", "session_entity_types_client", "=", "dialogflow", ".", "SessionEntityTypesClient", "(", ")", "session_entity_type_name", "=", "(", "session_entity_types_client", ".", "session_entity_type_path", "(", "project_id", ",", "session_id", ",", "entity_type_display_name", ")", ")", "session_entity_types_client", ".", "delete_session_entity_type", "(", "session_entity_type_name", ")" ]
Bootstrap a distribution fit + get confidence intervals for the params .
def bootstrap_fit ( rv_cont , data , n_iter = 10 , quant = 95 , print_params = True , * * kwargs ) : fit_params = bootstrap_params ( rv_cont , data , n_iter ) par , lo , up = param_describe ( fit_params , quant = quant ) names = param_names ( rv_cont ) maxlen = max ( [ len ( s ) for s in names ] ) print ( "--------------" ) print ( rv_cont . name ) print ( "--------------" ) for i , name in enumerate ( names ) : print ( "{nam:>{fill}}: {mean:+.3f} ∈ " "[{lo:+.3f}, {up:+.3f}] ({q}%)" . format ( nam = name , fill = maxlen , mean = par [ i ] , lo = lo [ i ] , up = up [ i ] , q = quant ) ) out = { 'mean' : par , 'lower limit' : lo , 'upper limit' : up , } return out
10,064
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L199-L241
[ "def", "linear_rref", "(", "A", ",", "b", ",", "Matrix", "=", "None", ",", "S", "=", "None", ")", ":", "if", "Matrix", "is", "None", ":", "from", "sympy", "import", "Matrix", "if", "S", "is", "None", ":", "from", "sympy", "import", "S", "mat_rows", "=", "[", "_map2l", "(", "S", ",", "list", "(", "row", ")", "+", "[", "v", "]", ")", "for", "row", ",", "v", "in", "zip", "(", "A", ",", "b", ")", "]", "aug", "=", "Matrix", "(", "mat_rows", ")", "raug", ",", "pivot", "=", "aug", ".", "rref", "(", ")", "nindep", "=", "len", "(", "pivot", ")", "return", "raug", "[", ":", "nindep", ",", ":", "-", "1", "]", ",", "raug", "[", ":", "nindep", ",", "-", "1", "]" ]
Draw Random Variates .
def rvs ( self , * args , * * kwargs ) : # TODO REVERSE THIS FUCK PYTHON2 size = kwargs . pop ( 'size' , 1 ) random_state = kwargs . pop ( 'size' , None ) # don't ask me why it uses `self._size` return self . _kde . sample ( n_samples = size , random_state = random_state )
10,065
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L99-L111
[ "def", "delete_entity", "(", "self", ",", "entity_id", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/entity/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "entity_id", ",", ")", "return", "self", ".", "_adapter", ".", "delete", "(", "url", "=", "api_path", ",", ")" ]
Entry point when running as script from commandline .
def main ( ) : from docopt import docopt args = docopt ( __doc__ ) infile = args [ 'INFILE' ] outfile = args [ 'OUTFILE' ] i3extract ( infile , outfile )
10,066
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/i3shower2hdf5.py#L348-L354
[ "def", "addcommenttomergerequest", "(", "self", ",", "project_id", ",", "mergerequest_id", ",", "note", ")", ":", "request", "=", "requests", ".", "post", "(", "'{0}/{1}/merge_request/{2}/comments'", ".", "format", "(", "self", ".", "projects_url", ",", "project_id", ",", "mergerequest_id", ")", ",", "data", "=", "{", "'note'", ":", "note", "}", ",", "headers", "=", "self", ".", "headers", ",", "verify", "=", "self", ".", "verify_ssl", ",", "auth", "=", "self", ".", "auth", ",", "timeout", "=", "self", ".", "timeout", ")", "return", "request", ".", "status_code", "==", "201" ]
Connect using the configuration given
def connect ( self , server_config ) : if 'connection_string' in server_config : self . client = pymongo . MongoClient ( server_config [ 'connection_string' ] ) self . db = self . client [ server_config [ 'db' ] ] else : self . client = pymongo . MongoClient ( server_config [ 'host' ] , server_config [ 'port' ] , tz_aware = self . get_config_value ( 'tz_aware' , True ) ) self . db = self . client [ server_config [ 'db' ] ] if ( 'authentication_database' in server_config and server_config [ 'authentication_database' ] ) : self . db . authenticate ( server_config [ 'username' ] , server_config [ 'password' ] , source = server_config [ 'authentication_database' ] ) else : if 'username' in server_config : if 'password' in server_config : self . db . authenticate ( server_config [ 'username' ] , server_config [ 'password' ] ) else : self . db . authenticate ( server_config [ 'username' ] ) # Mongo Engine connection d = dict ( ( k , v ) for k , v in server_config . items ( ) if k not in [ 'modalities' , 'summaries' ] ) if 'authentication_database' in d : d [ 'authentication_source' ] = d [ 'authentication_database' ] del d [ 'authentication_database' ] self . session = connect ( alias = "hyperstream" , * * d ) # TODO: This sets the default connection of mongoengine, but seems to be a bit of a hack if "default" not in connection . _connections : connection . _connections [ "default" ] = connection . _connections [ "hyperstream" ] connection . _connection_settings [ "default" ] = connection . _connection_settings [ "hyperstream" ]
10,067
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/client.py#L65-L107
[ "def", "run", "(", "self", ")", ":", "# Create the thread pool.", "executor", "=", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "max_workers", "=", "self", ".", "_config", "[", "'num_workers'", "]", ")", "# Wait to ensure multiple senders can be synchronised.", "now", "=", "int", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "timestamp", "(", ")", ")", "start_time", "=", "(", "(", "now", "+", "29", ")", "//", "30", ")", "*", "30", "self", ".", "_log", ".", "info", "(", "'Waiting until {}'", ".", "format", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "start_time", ")", ")", ")", "while", "int", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "timestamp", "(", ")", ")", "<", "start_time", ":", "time", ".", "sleep", "(", "0.1", ")", "# Run the event loop.", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "try", ":", "loop", ".", "run_until_complete", "(", "self", ".", "_run_loop", "(", "executor", ")", ")", "except", "KeyboardInterrupt", ":", "pass", "finally", ":", "# Send the end of stream message to each stream.", "self", ".", "_log", ".", "info", "(", "'Shutting down, closing streams...'", ")", "tasks", "=", "[", "]", "for", "stream", ",", "item_group", "in", "self", ".", "_streams", ":", "tasks", ".", "append", "(", "stream", ".", "async_send_heap", "(", "item_group", ".", "get_end", "(", ")", ")", ")", "loop", ".", "run_until_complete", "(", "asyncio", ".", "gather", "(", "*", "tasks", ")", ")", "self", ".", "_log", ".", "info", "(", "'... finished.'", ")", "executor", ".", "shutdown", "(", ")" ]
Concatenate HDF5 Files
def ptconcat ( output_file , input_files , overwrite = False ) : filt = tb . Filters ( complevel = 5 , shuffle = True , fletcher32 = True , complib = 'zlib' ) out_tabs = { } dt_file = input_files [ 0 ] log . info ( "Reading data struct '%s'..." % dt_file ) h5struc = tb . open_file ( dt_file , 'r' ) log . info ( "Opening output file '%s'..." % output_file ) if overwrite : outmode = 'w' else : outmode = 'a' h5out = tb . open_file ( output_file , outmode ) for node in h5struc . walk_nodes ( '/' , classname = 'Table' ) : path = node . _v_pathname log . debug ( path ) dtype = node . dtype p , n = os . path . split ( path ) out_tabs [ path ] = h5out . create_table ( p , n , description = dtype , filters = filt , createparents = True ) h5struc . close ( ) for fname in input_files : log . info ( 'Reading %s...' % fname ) h5 = tb . open_file ( fname ) for path , out in out_tabs . items ( ) : tab = h5 . get_node ( path ) out . append ( tab [ : ] ) h5 . close ( ) h5out . close ( )
10,068
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/ptconcat.py#L36-L68
[ "def", "waitForEvent", "(", "self", ",", "event_name", ",", "predicate", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", ":", "deadline", "=", "time", ".", "time", "(", ")", "+", "timeout", "while", "time", ".", "time", "(", ")", "<=", "deadline", ":", "# Calculate the max timeout for the next event rpc call.", "rpc_timeout", "=", "deadline", "-", "time", ".", "time", "(", ")", "if", "rpc_timeout", "<", "0", ":", "break", "# A single RPC call cannot exceed MAX_TIMEOUT.", "rpc_timeout", "=", "min", "(", "rpc_timeout", ",", "MAX_TIMEOUT", ")", "try", ":", "event", "=", "self", ".", "waitAndGet", "(", "event_name", ",", "rpc_timeout", ")", "except", "TimeoutError", ":", "# Ignoring TimeoutError since we need to throw one with a more", "# specific message.", "break", "if", "predicate", "(", "event", ")", ":", "return", "event", "raise", "TimeoutError", "(", "self", ".", "_ad", ",", "'Timed out after %ss waiting for an \"%s\" event that satisfies the '", "'predicate \"%s\".'", "%", "(", "timeout", ",", "event_name", ",", "predicate", ".", "__name__", ")", ")" ]
Calibrate intra DOM PMT time offsets efficiencies and sigmas
def calibrate_dom ( dom_id , data , detector , livetime = None , fit_ang_dist = False , scale_mc_to_data = True , ad_fit_shape = 'pexp' , fit_background = True , ctmin = - 1. ) : if isinstance ( data , str ) : filename = data loaders = { '.h5' : load_k40_coincidences_from_hdf5 , '.root' : load_k40_coincidences_from_rootfile } try : loader = loaders [ os . path . splitext ( filename ) [ 1 ] ] except KeyError : log . critical ( 'File format not supported.' ) raise IOError else : data , livetime = loader ( filename , dom_id ) combs = np . array ( list ( combinations ( range ( 31 ) , 2 ) ) ) angles = calculate_angles ( detector , combs ) cos_angles = np . cos ( angles ) angles = angles [ cos_angles >= ctmin ] data = data [ cos_angles >= ctmin ] combs = combs [ cos_angles >= ctmin ] try : fit_res = fit_delta_ts ( data , livetime , fit_background = fit_background ) rates , means , sigmas , popts , pcovs = fit_res except : return 0 rate_errors = np . array ( [ np . diag ( pc ) [ 2 ] for pc in pcovs ] ) # mean_errors = np.array([np.diag(pc)[0] for pc in pcovs]) scale_factor = None if fit_ang_dist : fit_res = fit_angular_distribution ( angles , rates , rate_errors , shape = ad_fit_shape ) fitted_rates , exp_popts , exp_pcov = fit_res else : mc_fitted_rates = exponential_polinomial ( np . cos ( angles ) , * MC_ANG_DIST ) if scale_mc_to_data : scale_factor = np . mean ( rates [ angles < 1.5 ] ) / np . mean ( mc_fitted_rates [ angles < 1.5 ] ) else : scale_factor = 1. fitted_rates = mc_fitted_rates * scale_factor exp_popts = [ ] exp_pcov = [ ] print ( 'Using angular distribution from Monte Carlo' ) # t0_weights = np.array([0. if a>1. else 1. for a in angles]) if not fit_background : minimize_weights = calculate_weights ( fitted_rates , data ) else : minimize_weights = fitted_rates opt_t0s = minimize_t0s ( means , minimize_weights , combs ) opt_sigmas = minimize_sigmas ( sigmas , minimize_weights , combs ) opt_qes = minimize_qes ( fitted_rates , rates , minimize_weights , combs ) corrected_means = correct_means ( means , opt_t0s . x , combs ) corrected_rates = correct_rates ( rates , opt_qes . x , combs ) rms_means , rms_corrected_means = calculate_rms_means ( means , corrected_means ) rms_rates , rms_corrected_rates = calculate_rms_rates ( rates , fitted_rates , corrected_rates ) cos_angles = np . cos ( angles ) return_data = { 'opt_t0s' : opt_t0s , 'opt_qes' : opt_qes , 'data' : data , 'means' : means , 'rates' : rates , 'fitted_rates' : fitted_rates , 'angles' : angles , 'corrected_means' : corrected_means , 'corrected_rates' : corrected_rates , 'rms_means' : rms_means , 'rms_corrected_means' : rms_corrected_means , 'rms_rates' : rms_rates , 'rms_corrected_rates' : rms_corrected_rates , 'gaussian_popts' : popts , 'livetime' : livetime , 'exp_popts' : exp_popts , 'exp_pcov' : exp_pcov , 'scale_factor' : scale_factor , 'opt_sigmas' : opt_sigmas , 'sigmas' : sigmas , 'combs' : combs } return return_data
10,069
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L382-L498
[ "def", "hide", "(", "self", ",", "selections", ")", ":", "if", "'atoms'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'atoms'", "]", "=", "selections", "[", "'atoms'", "]", "self", ".", "on_atom_hidden_changed", "(", ")", "if", "'bonds'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'bonds'", "]", "=", "selections", "[", "'bonds'", "]", "self", ".", "on_bond_hidden_changed", "(", ")", "if", "'box'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'box'", "]", "=", "box_s", "=", "selections", "[", "'box'", "]", "if", "box_s", ".", "mask", "[", "0", "]", ":", "if", "self", ".", "viewer", ".", "has_renderer", "(", "self", ".", "box_renderer", ")", ":", "self", ".", "viewer", ".", "remove_renderer", "(", "self", ".", "box_renderer", ")", "else", ":", "if", "not", "self", ".", "viewer", ".", "has_renderer", "(", "self", ".", "box_renderer", ")", ":", "self", ".", "viewer", ".", "add_renderer", "(", "self", ".", "box_renderer", ")", "return", "self", ".", "hidden_state" ]
Load k40 coincidences from hdf5 file
def load_k40_coincidences_from_hdf5 ( filename , dom_id ) : with h5py . File ( filename , 'r' ) as h5f : data = h5f [ '/k40counts/{0}' . format ( dom_id ) ] livetime = data . attrs [ 'livetime' ] data = np . array ( data ) return data , livetime
10,070
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L507-L526
[ "def", "_verify_options", "(", "options", ")", ":", "# sanity check all vals used for bitwise operations later", "bitwise_args", "=", "[", "(", "'level'", ",", "options", "[", "'level'", "]", ")", ",", "(", "'facility'", ",", "options", "[", "'facility'", "]", ")", "]", "bitwise_args", ".", "extend", "(", "[", "(", "'option'", ",", "x", ")", "for", "x", "in", "options", "[", "'options'", "]", "]", ")", "for", "opt_name", ",", "opt", "in", "bitwise_args", ":", "if", "not", "hasattr", "(", "syslog", ",", "opt", ")", ":", "log", ".", "error", "(", "'syslog has no attribute %s'", ",", "opt", ")", "return", "False", "if", "not", "isinstance", "(", "getattr", "(", "syslog", ",", "opt", ")", ",", "int", ")", ":", "log", ".", "error", "(", "'%s is not a valid syslog %s'", ",", "opt", ",", "opt_name", ")", "return", "False", "# Sanity check tag", "if", "'tag'", "in", "options", ":", "if", "not", "isinstance", "(", "options", "[", "'tag'", "]", ",", "six", ".", "string_types", ")", ":", "log", ".", "error", "(", "'tag must be a string'", ")", "return", "False", "if", "len", "(", "options", "[", "'tag'", "]", ")", ">", "32", ":", "log", ".", "error", "(", "'tag size is limited to 32 characters'", ")", "return", "False", "return", "True" ]
Load k40 coincidences from JMonitorK40 ROOT file
def load_k40_coincidences_from_rootfile ( filename , dom_id ) : from ROOT import TFile root_file_monitor = TFile ( filename , "READ" ) dom_name = str ( dom_id ) + ".2S" histo_2d_monitor = root_file_monitor . Get ( dom_name ) data = [ ] for c in range ( 1 , histo_2d_monitor . GetNbinsX ( ) + 1 ) : combination = [ ] for b in range ( 1 , histo_2d_monitor . GetNbinsY ( ) + 1 ) : combination . append ( histo_2d_monitor . GetBinContent ( c , b ) ) data . append ( combination ) weights = { } weights_histo = root_file_monitor . Get ( 'weights_hist' ) try : for i in range ( 1 , weights_histo . GetNbinsX ( ) + 1 ) : # we have to read all the entries, unfortunately weight = weights_histo . GetBinContent ( i ) label = weights_histo . GetXaxis ( ) . GetBinLabel ( i ) weights [ label [ 3 : ] ] = weight dom_weight = weights [ str ( dom_id ) ] except AttributeError : log . info ( "Weights histogram broken or not found, setting weight to 1." ) dom_weight = 1. return np . array ( data ) , dom_weight
10,071
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L529-L566
[ "def", "pages", "(", "self", ")", ":", "# Each page is an iterator of rows. But also has num_items, remaining,", "# and to_dataframe.", "avro_schema", ",", "column_names", "=", "_avro_schema", "(", "self", ".", "_read_session", ")", "for", "block", "in", "self", ".", "_reader", ":", "self", ".", "_status", "=", "block", ".", "status", "yield", "ReadRowsPage", "(", "avro_schema", ",", "column_names", ",", "block", ")" ]
Calculates angles between PMT combinations according to positions in detector_file
def calculate_angles ( detector , combs ) : angles = [ ] pmt_angles = detector . pmt_angles for first , second in combs : angles . append ( kp . math . angle_between ( np . array ( pmt_angles [ first ] ) , np . array ( pmt_angles [ second ] ) ) ) return np . array ( angles )
10,072
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L635-L657
[ "def", "with_fields", "(", "self", ",", "*", "fields", ")", ":", "Unihan", "=", "self", ".", "sql", ".", "base", ".", "classes", ".", "Unihan", "query", "=", "self", ".", "sql", ".", "session", ".", "query", "(", "Unihan", ")", "for", "field", "in", "fields", ":", "query", "=", "query", ".", "filter", "(", "Column", "(", "field", ")", ".", "isnot", "(", "None", ")", ")", "return", "query" ]
Fits angular distribution of rates .
def fit_angular_distribution ( angles , rates , rate_errors , shape = 'pexp' ) : if shape == 'exp' : fit_function = exponential # p0 = [-0.91871169, 2.72224241, -1.19065965, 1.48054122] if shape == 'pexp' : fit_function = exponential_polinomial # p0 = [0.34921202, 2.8629577] cos_angles = np . cos ( angles ) popt , pcov = optimize . curve_fit ( fit_function , cos_angles , rates ) fitted_rates = fit_function ( cos_angles , * popt ) return fitted_rates , popt , pcov
10,073
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L668-L696
[ "def", "release_address", "(", "self", ",", "public_ip", "=", "None", ",", "allocation_id", "=", "None", ")", ":", "params", "=", "{", "}", "if", "public_ip", "is", "not", "None", ":", "params", "[", "'PublicIp'", "]", "=", "public_ip", "elif", "allocation_id", "is", "not", "None", ":", "params", "[", "'AllocationId'", "]", "=", "allocation_id", "return", "self", ".", "get_status", "(", "'ReleaseAddress'", ",", "params", ",", "verb", "=", "'POST'", ")" ]
Varies t0s to minimize the deviation of the gaussian means from zero .
def minimize_t0s ( means , weights , combs ) : def make_quality_function ( means , weights , combs ) : def quality_function ( t0s ) : sq_sum = 0 for mean , comb , weight in zip ( means , combs , weights ) : sq_sum += ( ( mean - ( t0s [ comb [ 1 ] ] - t0s [ comb [ 0 ] ] ) ) * weight ) ** 2 return sq_sum return quality_function qfunc = make_quality_function ( means , weights , combs ) # t0s = np.zeros(31) t0s = np . random . rand ( 31 ) bounds = [ ( 0 , 0 ) ] + [ ( - 10. , 10. ) ] * 30 opt_t0s = optimize . minimize ( qfunc , t0s , bounds = bounds ) return opt_t0s
10,074
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L699-L728
[ "def", "_parse_path", "(", "self", ",", "path", ")", ":", "handle", ",", "path", "=", "self", ".", "_split_path", "(", "path", ")", "if", "self", ".", "_machine", "is", "not", "None", ":", "handle", "=", "self", ".", "_connect_hive", "(", "handle", ")", "return", "handle", ",", "path" ]
Varies QEs to minimize the deviation of the rates from the fitted_rates .
def minimize_qes ( fitted_rates , rates , weights , combs ) : def make_quality_function ( fitted_rates , rates , weights , combs ) : def quality_function ( qes ) : sq_sum = 0 for fitted_rate , comb , rate , weight in zip ( fitted_rates , combs , rates , weights ) : sq_sum += ( ( rate / qes [ comb [ 0 ] ] / qes [ comb [ 1 ] ] - fitted_rate ) * weight ) ** 2 return sq_sum return quality_function qfunc = make_quality_function ( fitted_rates , rates , weights , combs ) qes = np . ones ( 31 ) bounds = [ ( 0.1 , 2. ) ] * 31 opt_qes = optimize . minimize ( qfunc , qes , bounds = bounds ) return opt_qes
10,075
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L764-L795
[ "def", "get", "(", "self", ",", "block_number", ":", "BlockNumber", ")", "->", "str", ":", "if", "block_number", "in", "self", ".", "mapping", ":", "return", "self", ".", "mapping", "[", "block_number", "]", "block_hash", "=", "self", ".", "web3", ".", "eth", ".", "getBlock", "(", "block_number", ")", "[", "'hash'", "]", "block_hash", "=", "block_hash", ".", "hex", "(", ")", "self", ".", "mapping", "[", "block_number", "]", "=", "block_hash", "return", "block_hash" ]
Applies optimal t0s to gaussians means .
def correct_means ( means , opt_t0s , combs ) : corrected_means = np . array ( [ ( opt_t0s [ comb [ 1 ] ] - opt_t0s [ comb [ 0 ] ] ) - mean for mean , comb in zip ( means , combs ) ] ) return corrected_means
10,076
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L798-L816
[ "def", "unblock", "(", "self", ",", "event", ")", ":", "if", "event", "not", "in", "self", ".", "blockEvents", ":", "return", "self", ".", "blockEvents", "[", "event", "]", ".", "unblock", "(", "event", ")", "del", "self", ".", "blockEvents", "[", "event", "]" ]
Applies optimal qes to rates .
def correct_rates ( rates , opt_qes , combs ) : corrected_rates = np . array ( [ rate / opt_qes [ comb [ 0 ] ] / opt_qes [ comb [ 1 ] ] for rate , comb in zip ( rates , combs ) ] ) return corrected_rates
10,077
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L819-L839
[ "def", "bootstrap_from_webapi", "(", "self", ",", "cellid", "=", "0", ")", ":", "self", ".", "_LOG", ".", "debug", "(", "\"Attempting bootstrap via WebAPI\"", ")", "from", "steam", "import", "webapi", "try", ":", "resp", "=", "webapi", ".", "get", "(", "'ISteamDirectory'", ",", "'GetCMList'", ",", "1", ",", "params", "=", "{", "'cellid'", ":", "cellid", "}", ")", "except", "Exception", "as", "exp", ":", "self", ".", "_LOG", ".", "error", "(", "\"WebAPI boostrap failed: %s\"", "%", "str", "(", "exp", ")", ")", "return", "False", "result", "=", "EResult", "(", "resp", "[", "'response'", "]", "[", "'result'", "]", ")", "if", "result", "!=", "EResult", ".", "OK", ":", "self", ".", "_LOG", ".", "error", "(", "\"GetCMList failed with %s\"", "%", "repr", "(", "result", ")", ")", "return", "False", "serverlist", "=", "resp", "[", "'response'", "]", "[", "'serverlist'", "]", "self", ".", "_LOG", ".", "debug", "(", "\"Recieved %d servers from WebAPI\"", "%", "len", "(", "serverlist", ")", ")", "def", "str_to_tuple", "(", "serveraddr", ")", ":", "ip", ",", "port", "=", "serveraddr", ".", "split", "(", "':'", ")", "return", "str", "(", "ip", ")", ",", "int", "(", "port", ")", "self", ".", "clear", "(", ")", "self", ".", "merge_list", "(", "map", "(", "str_to_tuple", ",", "serverlist", ")", ")", "return", "True" ]
Calculates RMS of means from zero before and after correction
def calculate_rms_means ( means , corrected_means ) : rms_means = np . sqrt ( np . mean ( ( means - 0 ) ** 2 ) ) rms_corrected_means = np . sqrt ( np . mean ( ( corrected_means - 0 ) ** 2 ) ) return rms_means , rms_corrected_means
10,078
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L842-L857
[ "def", "blend_color", "(", "color", ",", "color2", ")", ":", "r1", ",", "g1", ",", "b1", "=", "hex_to_rgb", "(", "color", ")", "r2", ",", "g2", ",", "b2", "=", "hex_to_rgb", "(", "color2", ")", "r3", "=", "int", "(", "0.5", "*", "r1", "+", "0.5", "*", "r2", ")", "g3", "=", "int", "(", "0.5", "*", "g1", "+", "0.5", "*", "g2", ")", "b3", "=", "int", "(", "0.5", "*", "b1", "+", "0.5", "*", "b2", ")", "return", "rgb_to_hex", "(", "(", "r3", ",", "g3", ",", "b3", ")", ")" ]
Calculates RMS of rates from fitted_rates before and after correction
def calculate_rms_rates ( rates , fitted_rates , corrected_rates ) : rms_rates = np . sqrt ( np . mean ( ( rates - fitted_rates ) ** 2 ) ) rms_corrected_rates = np . sqrt ( np . mean ( ( corrected_rates - fitted_rates ) ** 2 ) ) return rms_rates , rms_corrected_rates
10,079
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L860-L875
[ "def", "debug", "(", "self", ")", ":", "url", "=", "'{}/debug/status'", ".", "format", "(", "self", ".", "url", ")", "data", "=", "self", ".", "_get", "(", "url", ")", "return", "data", ".", "json", "(", ")" ]
Add counts to twofold coincidences for a given tmax .
def add_to_twofold_matrix ( times , tdcs , mat , tmax = 10 ) : h_idx = 0 # index of initial hit c_idx = 0 # index of coincident candidate hit n_hits = len ( times ) multiplicity = 0 while h_idx <= n_hits : c_idx = h_idx + 1 if ( c_idx < n_hits ) and ( times [ c_idx ] - times [ h_idx ] <= tmax ) : multiplicity = 2 c_idx += 1 while ( c_idx < n_hits ) and ( times [ c_idx ] - times [ h_idx ] <= tmax ) : c_idx += 1 multiplicity += 1 if multiplicity != 2 : h_idx = c_idx continue c_idx -= 1 h_tdc = tdcs [ h_idx ] c_tdc = tdcs [ c_idx ] h_time = times [ h_idx ] c_time = times [ c_idx ] if h_tdc != c_tdc : dt = int ( c_time - h_time ) if h_tdc > c_tdc : mat [ get_comb_index ( c_tdc , h_tdc ) , - dt + tmax ] += 1 else : mat [ get_comb_index ( h_tdc , c_tdc ) , dt + tmax ] += 1 h_idx = c_idx
10,080
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L885-L926
[ "def", "clean_for_storage", "(", "self", ",", "data", ")", ":", "data", "=", "self", ".", "data_to_unicode", "(", "data", ")", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "for", "k", "in", "dict", "(", "data", ")", ".", "keys", "(", ")", ":", "if", "k", "==", "'_id'", ":", "del", "data", "[", "k", "]", "continue", "if", "'.'", "in", "k", ":", "new_k", "=", "k", ".", "replace", "(", "'.'", ",", "'_'", ")", "data", "[", "new_k", "]", "=", "data", "[", "k", "]", "del", "data", "[", "k", "]", "k", "=", "new_k", "if", "isinstance", "(", "data", "[", "k", "]", ",", "dict", ")", ":", "data", "[", "k", "]", "=", "self", ".", "clean_for_storage", "(", "data", "[", "k", "]", ")", "elif", "isinstance", "(", "data", "[", "k", "]", ",", "list", ")", ":", "data", "[", "k", "]", "=", "[", "self", ".", "clean_for_storage", "(", "item", ")", "for", "item", "in", "data", "[", "k", "]", "]", "return", "data" ]
Reset coincidence counter
def reset ( self ) : self . counts = defaultdict ( partial ( np . zeros , ( 465 , self . tmax * 2 + 1 ) ) ) self . n_timeslices = defaultdict ( int )
10,081
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L247-L250
[ "def", "query", "(", "self", ",", "domain", ")", ":", "result", "=", "{", "}", "try", ":", "result", "=", "self", ".", "pdns", ".", "query", "(", "domain", ")", "except", ":", "self", ".", "error", "(", "'Exception while querying passiveDNS. Check the domain format.'", ")", "# Clean the datetime problems in order to correct the json serializability", "clean_result", "=", "[", "]", "for", "ind", ",", "resultset", "in", "enumerate", "(", "result", ")", ":", "if", "resultset", ".", "get", "(", "'time_first'", ",", "None", ")", ":", "resultset", "[", "'time_first'", "]", "=", "resultset", ".", "get", "(", "'time_first'", ")", ".", "isoformat", "(", "' '", ")", "if", "resultset", ".", "get", "(", "'time_last'", ",", "None", ")", ":", "resultset", "[", "'time_last'", "]", "=", "resultset", ".", "get", "(", "'time_last'", ")", ".", "isoformat", "(", "' '", ")", "clean_result", ".", "append", "(", "resultset", ")", "return", "clean_result" ]
Write coincidence counts into a Python pickle
def dump ( self ) : self . print ( "Dumping data to {}" . format ( self . dump_filename ) ) pickle . dump ( { 'data' : self . counts , 'livetime' : self . get_livetime ( ) } , open ( self . dump_filename , "wb" ) )
10,082
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L283-L289
[ "async", "def", "register", "(", "self", ",", "request", ")", ":", "session", "=", "await", "get_session", "(", "request", ")", "user_id", "=", "session", ".", "get", "(", "'user_id'", ")", "if", "user_id", ":", "return", "redirect", "(", "request", ",", "'timeline'", ")", "error", "=", "None", "form", "=", "None", "if", "request", ".", "method", "==", "'POST'", ":", "form", "=", "await", "request", ".", "post", "(", ")", "user_id", "=", "await", "db", ".", "get_user_id", "(", "self", ".", "mongo", ".", "user", ",", "form", "[", "'username'", "]", ")", "if", "not", "form", "[", "'username'", "]", ":", "error", "=", "'You have to enter a username'", "elif", "not", "form", "[", "'email'", "]", "or", "'@'", "not", "in", "form", "[", "'email'", "]", ":", "error", "=", "'You have to enter a valid email address'", "elif", "not", "form", "[", "'password'", "]", ":", "error", "=", "'You have to enter a password'", "elif", "form", "[", "'password'", "]", "!=", "form", "[", "'password2'", "]", ":", "error", "=", "'The two passwords do not match'", "elif", "user_id", "is", "not", "None", ":", "error", "=", "'The username is already taken'", "else", ":", "await", "self", ".", "mongo", ".", "user", ".", "insert", "(", "{", "'username'", ":", "form", "[", "'username'", "]", ",", "'email'", ":", "form", "[", "'email'", "]", ",", "'pw_hash'", ":", "generate_password_hash", "(", "form", "[", "'password'", "]", ")", "}", ")", "return", "redirect", "(", "request", ",", "'login'", ")", "return", "{", "\"error\"", ":", "error", ",", "\"form\"", ":", "form", "}" ]
Attempts to get an IOOS definition from a list of xml elements
def get_named_by_definition ( cls , element_list , string_def ) : try : return next ( ( st . value for st in element_list if st . definition == string_def ) ) except Exception : return None
10,083
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/describe_sensor.py#L37-L48
[ "def", "check_update", "(", "from_currency", ",", "to_currency", ")", ":", "if", "from_currency", "not", "in", "ccache", ":", "# if currency never get converted before", "ccache", "[", "from_currency", "]", "=", "{", "}", "if", "ccache", "[", "from_currency", "]", ".", "get", "(", "to_currency", ")", "is", "None", ":", "ccache", "[", "from_currency", "]", "[", "to_currency", "]", "=", "{", "'last_update'", ":", "0", "}", "last_update", "=", "float", "(", "ccache", "[", "from_currency", "]", "[", "to_currency", "]", "[", "'last_update'", "]", ")", "if", "time", ".", "time", "(", ")", "-", "last_update", ">=", "30", "*", "60", ":", "# if last update is more than 30 min ago", "return", "True", "return", "False" ]
Gets a definition given an identifier and where to search for it
def get_ioos_def ( self , ident , elem_type , ont ) : if elem_type == "identifier" : getter_fn = self . system . get_identifiers_by_name elif elem_type == "classifier" : getter_fn = self . system . get_classifiers_by_name else : raise ValueError ( "Unknown element type '{}'" . format ( elem_type ) ) return DescribeSensor . get_named_by_definition ( getter_fn ( ident ) , urljoin ( ont , ident ) )
10,084
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/describe_sensor.py#L50-L60
[ "def", "write_wavefile", "(", "f", ",", "samples", ",", "nframes", "=", "None", ",", "nchannels", "=", "2", ",", "sampwidth", "=", "2", ",", "framerate", "=", "44100", ",", "bufsize", "=", "2048", ")", ":", "if", "nframes", "is", "None", ":", "nframes", "=", "0", "w", "=", "wave", ".", "open", "(", "f", ",", "'wb'", ")", "w", ".", "setparams", "(", "(", "nchannels", ",", "sampwidth", ",", "framerate", ",", "nframes", ",", "'NONE'", ",", "'not compressed'", ")", ")", "max_amplitude", "=", "float", "(", "int", "(", "(", "2", "**", "(", "sampwidth", "*", "8", ")", ")", "/", "2", ")", "-", "1", ")", "# split the samples into chunks (to reduce memory consumption and improve performance)", "for", "chunk", "in", "grouper", "(", "bufsize", ",", "samples", ")", ":", "frames", "=", "b''", ".", "join", "(", "b''", ".", "join", "(", "struct", ".", "pack", "(", "'h'", ",", "int", "(", "max_amplitude", "*", "sample", ")", ")", "for", "sample", "in", "channels", ")", "for", "channels", "in", "chunk", "if", "channels", "is", "not", "None", ")", "w", ".", "writeframesraw", "(", "frames", ")", "w", ".", "close", "(", ")" ]
follow the grammatical patterns to generate a random sentence
def get_sentence ( start = None , depth = 7 ) : if not GRAMMAR : return 'Please set a GRAMMAR file' start = start if start else GRAMMAR . start ( ) if isinstance ( start , Nonterminal ) : productions = GRAMMAR . productions ( start ) if not depth : # time to break the cycle terminals = [ p for p in productions if not isinstance ( start , Nonterminal ) ] if len ( terminals ) : production = terminals production = random . choice ( productions ) sentence = [ ] for piece in production . rhs ( ) : sentence += get_sentence ( start = piece , depth = depth - 1 ) return sentence else : return [ start ]
10,085
https://github.com/mouse-reeve/horoscope-generator/blob/01acf298116745ded5819d348c28a98a7492ccf3/horoscope_generator/HoroscopeGenerator.py#L17-L38
[ "def", "GetExtractionStatusUpdateCallback", "(", "self", ")", ":", "if", "self", ".", "_mode", "==", "self", ".", "MODE_LINEAR", ":", "return", "self", ".", "_PrintExtractionStatusUpdateLinear", "if", "self", ".", "_mode", "==", "self", ".", "MODE_WINDOW", ":", "return", "self", ".", "_PrintExtractionStatusUpdateWindow", "return", "None" ]
fix display formatting of a sentence array
def format_sentence ( sentence ) : for index , word in enumerate ( sentence ) : if word == 'a' and index + 1 < len ( sentence ) and re . match ( r'^[aeiou]' , sentence [ index + 1 ] ) and not re . match ( r'^uni' , sentence [ index + 1 ] ) : sentence [ index ] = 'an' text = ' ' . join ( sentence ) text = '%s%s' % ( text [ 0 ] . upper ( ) , text [ 1 : ] ) text = text . replace ( ' ,' , ',' ) return '%s.' % text
10,086
https://github.com/mouse-reeve/horoscope-generator/blob/01acf298116745ded5819d348c28a98a7492ccf3/horoscope_generator/HoroscopeGenerator.py#L40-L50
[ "def", "getByteStatistic", "(", "self", ",", "wanInterfaceId", "=", "1", ",", "timeout", "=", "1", ")", ":", "namespace", "=", "Wan", ".", "getServiceType", "(", "\"getByteStatistic\"", ")", "+", "str", "(", "wanInterfaceId", ")", "uri", "=", "self", ".", "getControlURL", "(", "namespace", ")", "results", "=", "self", ".", "execute", "(", "uri", ",", "namespace", ",", "\"GetTotalBytesSent\"", ",", "timeout", "=", "timeout", ")", "results2", "=", "self", ".", "execute", "(", "uri", ",", "namespace", ",", "\"GetTotalBytesReceived\"", ",", "timeout", "=", "timeout", ")", "return", "[", "int", "(", "results", "[", "\"NewTotalBytesSent\"", "]", ")", ",", "int", "(", "results2", "[", "\"NewTotalBytesReceived\"", "]", ")", "]" ]
Callback run for each new station
def new_station ( self , _id , callSign , name , affiliate , fccChannelNumber ) : if self . __v_station : # [Station: 11440, WFLX, WFLX, Fox Affiliate, 29] # [Station: 11836, WSCV, WSCV, TELEMUNDO (HBC) Affiliate, 51] # [Station: 11867, TBS, Turner Broadcasting System, Satellite, None] # [Station: 11869, WTCE, WTCE, Independent, 21] # [Station: 11924, WTVX, WTVX, CW Affiliate, 34] # [Station: 11991, WXEL, WXEL, PBS Affiliate, 42] # [Station: 12131, TOON, Cartoon Network, Satellite, None] # [Station: 12444, ESPN2, ESPN2, Sports Satellite, None] # [Station: 12471, WFGC, WFGC, Independent, 61] # [Station: 16046, TVNI, TV Chile Internacional, Latin American Satellite, None] # [Station: 22233, GOAC020, Government Access - GOAC020, Cablecast, None] print ( "[Station: %s, %s, %s, %s, %s]" % ( _id , callSign , name , affiliate , fccChannelNumber ) )
10,087
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/examples/read.py#L37-L53
[ "def", "summary", "(", "*", "samples", ")", ":", "warnings", ",", "similar", "=", "[", "]", ",", "[", "]", "qsig", "=", "config_utils", ".", "get_program", "(", "\"qsignature\"", ",", "samples", "[", "0", "]", "[", "0", "]", "[", "\"config\"", "]", ")", "if", "not", "qsig", ":", "return", "[", "[", "]", "]", "res_qsig", "=", "config_utils", ".", "get_resources", "(", "\"qsignature\"", ",", "samples", "[", "0", "]", "[", "0", "]", "[", "\"config\"", "]", ")", "jvm_opts", "=", "\" \"", ".", "join", "(", "res_qsig", ".", "get", "(", "\"jvm_opts\"", ",", "[", "\"-Xms750m\"", ",", "\"-Xmx8g\"", "]", ")", ")", "work_dir", "=", "samples", "[", "0", "]", "[", "0", "]", "[", "\"dirs\"", "]", "[", "\"work\"", "]", "count", "=", "0", "for", "data", "in", "samples", ":", "data", "=", "data", "[", "0", "]", "vcf", "=", "tz", ".", "get_in", "(", "[", "\"summary\"", ",", "\"qc\"", ",", "\"qsignature\"", ",", "\"base\"", "]", ",", "data", ")", "if", "vcf", ":", "count", "+=", "1", "vcf_name", "=", "dd", ".", "get_sample_name", "(", "data", ")", "+", "\".qsig.vcf\"", "out_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"qsignature\"", ")", ")", "if", "not", "os", ".", "path", ".", "lexists", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "vcf_name", ")", ")", ":", "os", ".", "symlink", "(", "vcf", ",", "os", ".", "path", ".", "join", "(", "out_dir", ",", "vcf_name", ")", ")", "if", "count", ">", "0", ":", "qc_out_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"qc\"", ",", "\"qsignature\"", ")", ")", "out_file", "=", "os", ".", "path", ".", "join", "(", "qc_out_dir", ",", "\"qsignature.xml\"", ")", "out_ma_file", "=", "os", ".", "path", ".", "join", "(", "qc_out_dir", ",", "\"qsignature.ma\"", ")", "out_warn_file", "=", "os", ".", "path", ".", "join", "(", "qc_out_dir", ",", "\"qsignature.warnings\"", ")", "log", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"qsignature\"", ",", "\"qsig-summary.log\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "samples", "[", "0", "]", "[", "0", "]", ",", "out_file", ")", "as", "file_txt_out", ":", "base_cmd", "=", "(", "\"{qsig} {jvm_opts} \"", "\"org.qcmg.sig.SignatureCompareRelatedSimple \"", "\"-log {log} -dir {out_dir} \"", "\"-o {file_txt_out} \"", ")", "do", ".", "run", "(", "base_cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"qsignature score calculation\"", ")", "error", ",", "warnings", ",", "similar", "=", "_parse_qsignature_output", "(", "out_file", ",", "out_ma_file", ",", "out_warn_file", ",", "samples", "[", "0", "]", "[", "0", "]", ")", "return", "[", "{", "'total samples'", ":", "count", ",", "'similar samples pairs'", ":", "len", "(", "similar", ")", ",", "'warnings samples pairs'", ":", "len", "(", "warnings", ")", ",", "'error samples'", ":", "list", "(", "error", ")", ",", "'out_dir'", ":", "qc_out_dir", "}", "]", "else", ":", "return", "[", "]" ]
Callback run for each new lineup
def new_lineup ( self , name , location , device , _type , postalCode , _id ) : if self . __v_lineup : # [Lineup: Comcast West Palm Beach /Palm Beach Co., West Palm Beach, Digital, CableDigital, 33436, FL09567:X] print ( "[Lineup: %s, %s, %s, %s, %s, %s]" % ( name , location , device , _type , postalCode , _id ) )
10,088
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/examples/read.py#L55-L61
[ "def", "external_metadata", "(", "self", ",", "datasource_type", "=", "None", ",", "datasource_id", "=", "None", ")", ":", "if", "datasource_type", "==", "'druid'", ":", "datasource", "=", "ConnectorRegistry", ".", "get_datasource", "(", "datasource_type", ",", "datasource_id", ",", "db", ".", "session", ")", "elif", "datasource_type", "==", "'table'", ":", "database", "=", "(", "db", ".", "session", ".", "query", "(", "Database", ")", ".", "filter_by", "(", "id", "=", "request", ".", "args", ".", "get", "(", "'db_id'", ")", ")", ".", "one", "(", ")", ")", "Table", "=", "ConnectorRegistry", ".", "sources", "[", "'table'", "]", "datasource", "=", "Table", "(", "database", "=", "database", ",", "table_name", "=", "request", ".", "args", ".", "get", "(", "'table_name'", ")", ",", "schema", "=", "request", ".", "args", ".", "get", "(", "'schema'", ")", "or", "None", ",", ")", "external_metadata", "=", "datasource", ".", "external_metadata", "(", ")", "return", "self", ".", "json_response", "(", "external_metadata", ")" ]
Callback run for each new program genre entry
def new_genre ( self , program , genre , relevance ) : if self . __v_genre : # [Genre: SP002709210000, Sports event, 0] # [Genre: SP002709210000, Basketball, 1] # [Genre: SP002737310000, Sports event, 0] # [Genre: SP002737310000, Basketball, 1] # [Genre: SH016761790000, News, 0] # [Genre: SH016761790000, Talk, 1] # [Genre: SH016761790000, Interview, 2] # [Genre: SH016761790000, Politics, 3] print ( "[Genre: %s, %s, %s]" % ( program , genre , relevance ) )
10,089
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/examples/read.py#L108-L120
[ "def", "ask_question", "(", "self", ",", "question_text", ",", "question", "=", "None", ")", ":", "if", "question", "is", "not", "None", ":", "q", "=", "question", ".", "to_dict", "(", ")", "else", ":", "q", "=", "WatsonQuestion", "(", "question_text", ")", ".", "to_dict", "(", ")", "r", "=", "requests", ".", "post", "(", "self", ".", "url", "+", "'/question'", ",", "json", "=", "{", "'question'", ":", "q", "}", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", ",", "'X-SyncTimeout'", ":", "30", "}", ",", "auth", "=", "(", "self", ".", "username", ",", "self", ".", "password", ")", ")", "try", ":", "response_json", "=", "r", ".", "json", "(", ")", "except", "ValueError", ":", "raise", "Exception", "(", "'Failed to parse response JSON'", ")", "return", "WatsonAnswer", "(", "response_json", ")" ]
Submit a job via qsub .
def qsub ( script , job_name , dryrun = False , * args , * * kwargs ) : print ( "Preparing job script..." ) job_string = gen_job ( script = script , job_name = job_name , * args , * * kwargs ) env = os . environ . copy ( ) if dryrun : print ( "This is a dry run! Here is the generated job file, which will " "not be submitted:" ) print ( job_string ) else : print ( "Calling qsub with the generated job script." ) p = subprocess . Popen ( 'qsub -V' , stdin = subprocess . PIPE , env = env , shell = True ) p . communicate ( input = bytes ( job_string . encode ( 'ascii' ) ) )
10,090
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L66-L82
[ "def", "min_renewable_share", "(", "network", ",", "snapshots", ",", "share", "=", "0.72", ")", ":", "renewables", "=", "[", "'wind_onshore'", ",", "'wind_offshore'", ",", "'biomass'", ",", "'solar'", ",", "'run_of_river'", "]", "res", "=", "list", "(", "network", ".", "generators", ".", "index", "[", "network", ".", "generators", ".", "carrier", ".", "isin", "(", "renewables", ")", "]", ")", "total", "=", "list", "(", "network", ".", "generators", ".", "index", ")", "snapshots", "=", "network", ".", "snapshots", "def", "_rule", "(", "m", ")", ":", "\"\"\"\n \"\"\"", "renewable_production", "=", "sum", "(", "m", ".", "generator_p", "[", "gen", ",", "sn", "]", "for", "gen", "in", "res", "for", "sn", "in", "snapshots", ")", "total_production", "=", "sum", "(", "m", ".", "generator_p", "[", "gen", ",", "sn", "]", "for", "gen", "in", "total", "for", "sn", "in", "snapshots", ")", "return", "(", "renewable_production", ">=", "total_production", "*", "share", ")", "network", ".", "model", ".", "min_renewable_share", "=", "Constraint", "(", "rule", "=", "_rule", ")" ]
Generate a job script .
def gen_job ( script , job_name , log_path = 'qlogs' , group = 'km3net' , platform = 'cl7' , walltime = '00:10:00' , vmem = '8G' , fsize = '8G' , shell = None , email = None , send_mail = 'n' , job_array_start = 1 , job_array_stop = None , job_array_step = 1 , irods = False , sps = True , hpss = False , xrootd = False , dcache = False , oracle = False , split_array_logs = False ) : if shell is None : shell = os . environ [ 'SHELL' ] if email is None : email = os . environ [ 'USER' ] + '@km3net.de' if isinstance ( script , Script ) : script = str ( script ) log_path = os . path . join ( os . getcwd ( ) , log_path ) if job_array_stop is not None : job_array_option = "#$ -t {}-{}:{}" . format ( job_array_start , job_array_stop , job_array_step ) else : job_array_option = "#" if split_array_logs : task_name = '_$TASK_ID' else : task_name = '' job_string = JOB_TEMPLATE . format ( script = script , email = email , send_mail = send_mail , log_path = log_path , job_name = job_name , group = group , walltime = walltime , vmem = vmem , fsize = fsize , irods = irods , sps = sps , hpss = hpss , xrootd = xrootd , dcache = dcache , oracle = oracle , shell = shell , platform = platform , job_array_option = job_array_option , task_name = task_name ) return job_string
10,091
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L85-L147
[ "def", "removefromreadergroup", "(", "self", ",", "groupname", ")", ":", "hresult", ",", "hcontext", "=", "SCardEstablishContext", "(", "SCARD_SCOPE_USER", ")", "if", "0", "!=", "hresult", ":", "raise", "EstablishContextException", "(", "hresult", ")", "try", ":", "hresult", "=", "SCardRemoveReaderFromGroup", "(", "hcontext", ",", "self", ".", "name", ",", "groupname", ")", "if", "0", "!=", "hresult", ":", "raise", "RemoveReaderFromGroupException", "(", "hresult", ",", "self", ".", "name", ",", "groupname", ")", "finally", ":", "hresult", "=", "SCardReleaseContext", "(", "hcontext", ")", "if", "0", "!=", "hresult", ":", "raise", "ReleaseContextException", "(", "hresult", ")" ]
Return the environment dict of a loaded Jpp env .
def get_jpp_env ( jpp_dir ) : env = { v [ 0 ] : '' . join ( v [ 1 : ] ) for v in [ l . split ( '=' ) for l in os . popen ( "source {0}/setenv.sh {0} && env" . format ( jpp_dir ) ) . read ( ) . split ( '\n' ) if '=' in l ] } return env
10,092
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L150-L165
[ "def", "_strip_ctype", "(", "name", ",", "ctype", ",", "protocol", "=", "2", ")", ":", "# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced')", "try", ":", "name", ",", "ctypestr", "=", "name", ".", "rsplit", "(", "','", ",", "1", ")", "except", "ValueError", ":", "pass", "else", ":", "ctype", "=", "Nds2ChannelType", ".", "find", "(", "ctypestr", ")", ".", "value", "# NDS1 stores channels with trend suffix, so we put it back:", "if", "protocol", "==", "1", "and", "ctype", "in", "(", "Nds2ChannelType", ".", "STREND", ".", "value", ",", "Nds2ChannelType", ".", "MTREND", ".", "value", ")", ":", "name", "+=", "',{0}'", ".", "format", "(", "ctypestr", ")", "return", "name", ",", "ctype" ]
Add an iget command to retrieve a file from iRODS .
def iget ( self , irods_path , attempts = 1 , pause = 15 ) : if attempts > 1 : cmd = """ for i in {{1..{0}}}; do ret=$(iget -v {1} 2>&1) echo $ret if [[ $ret == *"ERROR"* ]]; then echo "Attempt $i failed" else break fi sleep {2}s done """ cmd = lstrip ( cmd ) cmd = cmd . format ( attempts , irods_path , pause ) self . add ( cmd ) else : self . add ( 'iget -v "{}"' . format ( irods_path ) )
10,093
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L198-L225
[ "def", "teardown_websocket", "(", "self", ",", "func", ":", "Callable", ",", "name", ":", "AppOrBlueprintKey", "=", "None", ")", "->", "Callable", ":", "handler", "=", "ensure_coroutine", "(", "func", ")", "self", ".", "teardown_websocket_funcs", "[", "name", "]", ".", "append", "(", "handler", ")", "return", "func" ]
Helper function for two - argument commands
def _add_two_argument_command ( self , command , arg1 , arg2 ) : self . lines . append ( "{} {} {}" . format ( command , arg1 , arg2 ) )
10,094
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L227-L229
[ "async", "def", "restore_default_configuration", "(", "self", ")", ":", "self", ".", "_data", "=", "await", "self", ".", "_handler", ".", "restore_default_configuration", "(", "system_id", "=", "self", ".", "system_id", ")" ]
List all garage door devices .
def get_devices ( self ) : devices = self . make_request ( '["{username}","{password}","info","",""]' . format ( username = self . username , password = self . password ) ) if devices != False : garage_doors = [ ] try : self . apicode = devices . find ( 'apicode' ) . text self . _device_states = { } for doorNum in range ( 1 , 4 ) : door = devices . find ( 'door' + str ( doorNum ) ) doorName = door . find ( 'name' ) . text if doorName : dev = { 'door' : doorNum , 'name' : doorName } for id in [ 'mode' , 'sensor' , 'status' , 'sensorid' , 'temperature' , 'voltage' , 'camera' , 'events' , 'permission' ] : item = door . find ( id ) if item is not None : dev [ id ] = item . text garage_state = door . find ( 'status' ) . text dev [ 'status' ] = self . DOOR_STATE [ garage_state ] self . _device_states [ doorNum ] = self . DOOR_STATE [ garage_state ] garage_doors . append ( dev ) return garage_doors except TypeError as ex : print ( ex ) return False else : return False
10,095
https://github.com/dlbroadfoot/pygogogate2/blob/3cc0a5d9e493024eeb0c07b39b2b90f7b5b7b406/pygogogate2/__init__.py#L70-L102
[ "def", "_MergeEventTag", "(", "self", ",", "storage_writer", ",", "attribute_container", ")", ":", "if", "attribute_container", ".", "CONTAINER_TYPE", "!=", "'event_tag'", ":", "return", "event_identifier", "=", "attribute_container", ".", "GetEventIdentifier", "(", ")", "if", "not", "event_identifier", ":", "return", "# Check if the event has already been tagged on a previous occasion,", "# we need to append the event tag to the last stored one.", "stored_event_tag", "=", "self", ".", "_event_tag_index", ".", "GetEventTagByIdentifier", "(", "storage_writer", ",", "event_identifier", ")", "if", "stored_event_tag", ":", "attribute_container", ".", "AddComment", "(", "stored_event_tag", ".", "comment", ")", "attribute_container", ".", "AddLabels", "(", "stored_event_tag", ".", "labels", ")", "self", ".", "_event_tag_index", ".", "SetEventTag", "(", "attribute_container", ")" ]
List only MyQ garage door devices .
def get_status ( self , device_id ) : devices = self . get_devices ( ) if devices != False : for device in devices : if device [ 'door' ] == device_id : return device [ 'status' ] return False
10,096
https://github.com/dlbroadfoot/pygogogate2/blob/3cc0a5d9e493024eeb0c07b39b2b90f7b5b7b406/pygogogate2/__init__.py#L105-L114
[ "def", "save_statement", "(", "self", ",", "statement", ")", ":", "if", "not", "isinstance", "(", "statement", ",", "Statement", ")", ":", "statement", "=", "Statement", "(", "statement", ")", "request", "=", "HTTPRequest", "(", "method", "=", "\"POST\"", ",", "resource", "=", "\"statements\"", ")", "if", "statement", ".", "id", "is", "not", "None", ":", "request", ".", "method", "=", "\"PUT\"", "request", ".", "query_params", "[", "\"statementId\"", "]", "=", "statement", ".", "id", "request", ".", "headers", "[", "\"Content-Type\"", "]", "=", "\"application/json\"", "request", ".", "content", "=", "statement", ".", "to_json", "(", "self", ".", "version", ")", "lrs_response", "=", "self", ".", "_send_request", "(", "request", ")", "if", "lrs_response", ".", "success", ":", "if", "statement", ".", "id", "is", "None", ":", "statement", ".", "id", "=", "json", ".", "loads", "(", "lrs_response", ".", "data", ")", "[", "0", "]", "lrs_response", ".", "content", "=", "statement", "return", "lrs_response" ]
Test a sequence for compatibility with CLPA and LingPy .
def analyze ( segments , analysis , lookup = dict ( bipa = { } , dolgo = { } ) ) : # raise a ValueError in case of empty segments/strings if not segments : raise ValueError ( 'Empty sequence.' ) # test if at least one element in `segments` has information # (helps to catch really badly formed input, such as ['\n'] if not [ segment for segment in segments if segment . strip ( ) ] : raise ValueError ( 'No information in the sequence.' ) # build the phonologic and sound class analyses try : bipa_analysis , sc_analysis = [ ] , [ ] for s in segments : a = lookup [ 'bipa' ] . get ( s ) if a is None : a = lookup [ 'bipa' ] . setdefault ( s , BIPA [ s ] ) bipa_analysis . append ( a ) sc = lookup [ 'dolgo' ] . get ( s ) if sc is None : sc = lookup [ 'dolgo' ] . setdefault ( s , BIPA . translate ( s , DOLGO ) ) sc_analysis . append ( sc ) except : # noqa print ( segments ) raise # compute general errors; this loop must take place outside the # following one because the code for computing single errors (either # in `bipa_analysis` or in `soundclass_analysis`) is unnecessary # complicated for sound_bipa , sound_class in zip ( bipa_analysis , sc_analysis ) : if isinstance ( sound_bipa , pyclts . models . UnknownSound ) or sound_class == '?' : analysis . general_errors += 1 # iterate over the segments and analyses, updating counts of occurrences # and specific errors for segment , sound_bipa , sound_class in zip ( segments , bipa_analysis , sc_analysis ) : # update the segment count analysis . segments . update ( [ segment ] ) # add an error if we got an unknown sound, otherwise just append # the `replacements` dictionary if isinstance ( sound_bipa , pyclts . models . UnknownSound ) : analysis . bipa_errors . add ( segment ) else : analysis . replacements [ sound_bipa . source ] . add ( sound_bipa . __unicode__ ( ) ) # update sound class errors, if any if sound_class == '?' : analysis . sclass_errors . add ( segment ) return segments , bipa_analysis , sc_analysis , analysis
10,097
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/transcription.py#L37-L94
[ "def", "extract_string_pairs_in_directory", "(", "directory_path", ",", "extract_func", ",", "filter_func", ")", ":", "result", "=", "{", "}", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "directory_path", ")", ":", "for", "file_name", "in", "filenames", ":", "if", "filter_func", "(", "file_name", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "file_name", ")", "try", ":", "extract_func", "(", "result", ",", "file_path", ")", "except", "Exception", "as", "e", ":", "print", "\"Error in file \"", "+", "file_name", "print", "e", "return", "result" ]
Grab most energetic particle from mc_tracks dataframe .
def most_energetic ( df ) : idx = df . groupby ( [ 'event_id' ] ) [ 'energy' ] . transform ( max ) == df [ 'energy' ] return df [ idx ] . reindex ( )
10,098
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/mc.py#L101-L104
[ "def", "findInvariantPartitioning", "(", "self", ")", ":", "symorders", "=", "self", ".", "symorders", "[", ":", "]", "_range", "=", "range", "(", "len", "(", "symorders", ")", ")", "while", "1", ":", "pos", "=", "self", ".", "findLowest", "(", "symorders", ")", "if", "pos", "==", "-", "1", ":", "self", ".", "symorders", "=", "symorders", "return", "for", "i", "in", "_range", ":", "symorders", "[", "i", "]", "=", "symorders", "[", "i", "]", "*", "2", "+", "1", "symorders", "[", "pos", "]", "=", "symorders", "[", "pos", "]", "-", "1", "symorders", "=", "self", ".", "findInvariant", "(", "symorders", ")" ]
Connect to JLigier
def _connect ( self ) : log . debug ( "Connecting to JLigier" ) self . socket = socket . socket ( ) self . socket . connect ( ( self . host , self . port ) )
10,099
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/controlhost.py#L124-L128
[ "def", "thaw", "(", "vault_client", ",", "src_file", ",", "opt", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "src_file", ")", ":", "raise", "aomi", ".", "exceptions", ".", "AomiFile", "(", "\"%s does not exist\"", "%", "src_file", ")", "tmp_dir", "=", "ensure_tmpdir", "(", ")", "zip_file", "=", "thaw_decrypt", "(", "vault_client", ",", "src_file", ",", "tmp_dir", ",", "opt", ")", "archive", "=", "zipfile", ".", "ZipFile", "(", "zip_file", ",", "'r'", ")", "for", "archive_file", "in", "archive", ".", "namelist", "(", ")", ":", "archive", ".", "extract", "(", "archive_file", ",", "tmp_dir", ")", "os", ".", "chmod", "(", "\"%s/%s\"", "%", "(", "tmp_dir", ",", "archive_file", ")", ",", "0o640", ")", "LOG", ".", "debug", "(", "\"Extracted %s from archive\"", ",", "archive_file", ")", "LOG", ".", "info", "(", "\"Thawing secrets into %s\"", ",", "opt", ".", "secrets", ")", "config", "=", "get_secretfile", "(", "opt", ")", "Context", ".", "load", "(", "config", ",", "opt", ")", ".", "thaw", "(", "tmp_dir", ")" ]