query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Return the set of HEALPix pixels that cover the given coordinates at resolution nside . Optionally return the set of subpixels within those pixels at resolution nside_subpix
def surveyPixel ( lon , lat , nside_pix , nside_subpix = None ) : pix = np . unique ( ang2pix ( nside_pix , lon , lat ) ) if nside_subpix is None : return pix else : subpix_array = [ ] for ii in range ( 0 , len ( pix ) ) : subpix = subpixel ( pix [ ii ] , nside_pix , nside_subpix ) subpix_array . append ( subpix ) return pix , np . array ( subpix_array )
5,300
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/skymap.py#L17-L30
[ "def", "compare", "(", "left", ":", "Optional", "[", "L", "]", ",", "right", ":", "Optional", "[", "R", "]", ")", "->", "'Comparison[L, R]'", ":", "if", "isinstance", "(", "left", ",", "File", ")", "and", "isinstance", "(", "right", ",", "Directory", ")", ":", "return", "FileDirectoryComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "Directory", ")", "and", "isinstance", "(", "right", ",", "File", ")", ":", "return", "DirectoryFileComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "File", ")", "or", "isinstance", "(", "right", ",", "File", ")", ":", "return", "FileComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "Directory", ")", "or", "isinstance", "(", "right", ",", "Directory", ")", ":", "return", "DirectoryComparison", "(", "left", ",", "right", ")", "raise", "TypeError", "(", "f'Cannot compare entities: {left}, {right}'", ")" ]
Generate a set of coordinates at the centers of pixels of resolutions nside across the full sky .
def allSkyCoordinates ( nside ) : lon , lat = pix2ang ( nside , np . arange ( 0 , hp . nside2npix ( nside ) ) ) return lon , lat
5,301
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/skymap.py#L86-L91
[ "def", "_init_libcrypto", "(", ")", ":", "libcrypto", "=", "_load_libcrypto", "(", ")", "try", ":", "libcrypto", ".", "OPENSSL_init_crypto", "(", ")", "except", "AttributeError", ":", "# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)", "libcrypto", ".", "OPENSSL_no_config", "(", ")", "libcrypto", ".", "OPENSSL_add_all_algorithms_noconf", "(", ")", "libcrypto", ".", "RSA_new", ".", "argtypes", "=", "(", ")", "libcrypto", ".", "RSA_new", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "RSA_size", ".", "argtype", "=", "(", "c_void_p", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "argtypes", "=", "(", "c_char_p", ",", "c_int", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "restype", "=", "c_void_p", "libcrypto", ".", "BIO_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "restype", "=", "c_void_p", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_private_encrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "libcrypto", ".", "RSA_public_decrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "return", "libcrypto" ]
Generate n random positions within a HEALPix mask of booleans .
def randomPositionsMask ( mask , nside_pix , n ) : npix = len ( mask ) nside = hp . npix2nside ( npix ) # Estimate the number of points that need to be thrown based off # coverage fraction of the HEALPix mask coverage_fraction = float ( np . sum ( mask ) ) / len ( mask ) n_throw = int ( n / coverage_fraction ) lon , lat = [ ] , [ ] latch = True count = 0 while len ( lon ) < n : lon_throw = np . random . uniform ( 0. , 360. , n_throw ) lat_throw = np . degrees ( np . arcsin ( np . random . uniform ( - 1. , 1. , n_throw ) ) ) pix = ugali . utils . healpix . angToPix ( nside , lon_throw , lat_throw ) cut = mask [ pix ] . astype ( bool ) lon = np . append ( lon , lon_throw [ cut ] ) lat = np . append ( lat , lat_throw [ cut ] ) count += 1 if count > 10 : raise RuntimeError ( 'Too many loops...' ) return lon [ 0 : n ] , lat [ 0 : n ]
5,302
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/skymap.py#L153-L185
[ "def", "_read_journal", "(", "self", ")", ":", "root", "=", "self", ".", "_filesystem", ".", "inspect_get_roots", "(", ")", "[", "0", "]", "inode", "=", "self", ".", "_filesystem", ".", "stat", "(", "'C:\\\\$Extend\\\\$UsnJrnl'", ")", "[", "'ino'", "]", "with", "NamedTemporaryFile", "(", "buffering", "=", "0", ")", "as", "tempfile", ":", "self", ".", "_filesystem", ".", "download_inode", "(", "root", ",", "inode", ",", "tempfile", ".", "name", ")", "journal", "=", "usn_journal", "(", "tempfile", ".", "name", ")", "return", "parse_journal", "(", "journal", ")" ]
Embeds a logical Ising model onto another graph via an embedding .
def embed_ising ( source_linear , source_quadratic , embedding , target_adjacency , chain_strength = 1.0 ) : # store variables in the target graph that the embedding hasn't used unused = { v for v in target_adjacency } - set ( ) . union ( * embedding . values ( ) ) # ok, let's begin with the linear biases. # we spread the value of h evenly over the chain target_linear = { v : 0. for v in target_adjacency } for v , bias in iteritems ( source_linear ) : try : chain_variables = embedding [ v ] except KeyError : # if our embedding doesn't deal with this variable, assume it's an isolated vertex and embed it to one of # the unused variables. if this turns out to not be an isolated vertex, it will be caught below when # handling quadratic biases try : embedding [ v ] = { unused . pop ( ) } except KeyError : raise ValueError ( 'no embedding provided for source variable {}' . format ( v ) ) chain_variables = embedding [ v ] b = bias / len ( chain_variables ) for s in chain_variables : try : target_linear [ s ] += b except KeyError : raise ValueError ( 'chain variable {} not in target_adjacency' . format ( s ) ) # next up the quadratic biases. # We spread the quadratic biases evenly over the edges target_quadratic = { } for ( u , v ) , bias in iteritems ( source_quadratic ) : edges = set ( ) if u not in embedding : raise ValueError ( 'no embedding provided for source variable {}' . format ( u ) ) if v not in embedding : raise ValueError ( 'no embedding provided for source variable {}' . format ( v ) ) for s in embedding [ u ] : for t in embedding [ v ] : try : if s in target_adjacency [ t ] and ( t , s ) not in edges : edges . add ( ( s , t ) ) except KeyError : raise ValueError ( 'chain variable {} not in target_adjacency' . format ( s ) ) if not edges : raise ValueError ( "no edges in target graph between source variables {}, {}" . format ( u , v ) ) b = bias / len ( edges ) # in some cases the logical J can have (u, v) and (v, u) as inputs, so make # sure we are not doubling them up with our choice of ordering for s , t in edges : if ( s , t ) in target_quadratic : target_quadratic [ ( s , t ) ] += b elif ( t , s ) in target_quadratic : target_quadratic [ ( t , s ) ] += b else : target_quadratic [ ( s , t ) ] = b # finally we need to connect the nodes in the chains chain_quadratic = { } for chain in itervalues ( embedding ) : chain_quadratic . update ( chain_to_quadratic ( chain , target_adjacency , chain_strength ) ) return target_linear , target_quadratic , chain_quadratic
5,303
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L163-L278
[ "def", "query", "(", "self", ",", "domain", ")", ":", "result", "=", "{", "}", "try", ":", "result", "=", "self", ".", "pdns", ".", "query", "(", "domain", ")", "except", ":", "self", ".", "error", "(", "'Exception while querying passiveDNS. Check the domain format.'", ")", "# Clean the datetime problems in order to correct the json serializability", "clean_result", "=", "[", "]", "for", "ind", ",", "resultset", "in", "enumerate", "(", "result", ")", ":", "if", "resultset", ".", "get", "(", "'time_first'", ",", "None", ")", ":", "resultset", "[", "'time_first'", "]", "=", "resultset", ".", "get", "(", "'time_first'", ")", ".", "isoformat", "(", "' '", ")", "if", "resultset", ".", "get", "(", "'time_last'", ",", "None", ")", ":", "resultset", "[", "'time_last'", "]", "=", "resultset", ".", "get", "(", "'time_last'", ")", ".", "isoformat", "(", "' '", ")", "clean_result", ".", "append", "(", "resultset", ")", "return", "clean_result" ]
Determines the frequency of chain breaks in the given samples .
def chain_break_frequency ( samples , embedding ) : counts = { v : 0 for v in embedding } total = 0 for sample in samples : for v , chain in iteritems ( embedding ) : vals = [ sample [ u ] for u in chain ] if not _all_equal ( vals ) : counts [ v ] += 1 total += 1 return { v : counts [ v ] / total for v in embedding }
5,304
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L331-L359
[ "def", "load_toml_rest_api_config", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "LOGGER", ".", "info", "(", "\"Skipping rest api loading from non-existent config file: %s\"", ",", "filename", ")", "return", "RestApiConfig", "(", ")", "LOGGER", ".", "info", "(", "\"Loading rest api information from config: %s\"", ",", "filename", ")", "try", ":", "with", "open", "(", "filename", ")", "as", "fd", ":", "raw_config", "=", "fd", ".", "read", "(", ")", "except", "IOError", "as", "e", ":", "raise", "RestApiConfigurationError", "(", "\"Unable to load rest api configuration file: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "toml_config", "=", "toml", ".", "loads", "(", "raw_config", ")", "invalid_keys", "=", "set", "(", "toml_config", ".", "keys", "(", ")", ")", ".", "difference", "(", "[", "'bind'", ",", "'connect'", ",", "'timeout'", ",", "'opentsdb_db'", ",", "'opentsdb_url'", ",", "'opentsdb_username'", ",", "'opentsdb_password'", ",", "'client_max_size'", "]", ")", "if", "invalid_keys", ":", "raise", "RestApiConfigurationError", "(", "\"Invalid keys in rest api config: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "sorted", "(", "list", "(", "invalid_keys", ")", ")", ")", ")", ")", "config", "=", "RestApiConfig", "(", "bind", "=", "toml_config", ".", "get", "(", "\"bind\"", ",", "None", ")", ",", "connect", "=", "toml_config", ".", "get", "(", "'connect'", ",", "None", ")", ",", "timeout", "=", "toml_config", ".", "get", "(", "'timeout'", ",", "None", ")", ",", "opentsdb_url", "=", "toml_config", ".", "get", "(", "'opentsdb_url'", ",", "None", ")", ",", "opentsdb_db", "=", "toml_config", ".", "get", "(", "'opentsdb_db'", ",", "None", ")", ",", "opentsdb_username", "=", "toml_config", ".", "get", "(", "'opentsdb_username'", ",", "None", ")", ",", "opentsdb_password", "=", "toml_config", ".", "get", "(", "'opentsdb_password'", ",", "None", ")", ",", "client_max_size", "=", "toml_config", ".", "get", "(", "'client_max_size'", ",", "None", ")", ")", "return", "config" ]
Return samples over the variables in the source graph .
def unembed_samples ( samples , embedding , chain_break_method = None ) : if chain_break_method is None : chain_break_method = majority_vote return list ( itertools . chain ( * ( chain_break_method ( sample , embedding ) for sample in samples ) ) )
5,305
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L362-L384
[ "def", "get_partition_scores", "(", "image", ",", "min_w", "=", "1", ",", "min_h", "=", "1", ")", ":", "h", ",", "w", "=", "image", ".", "shape", "[", ":", "2", "]", "if", "w", "==", "0", "or", "h", "==", "0", ":", "return", "[", "]", "area", "=", "h", "*", "w", "cnz", "=", "numpy", ".", "count_nonzero", "total", "=", "cnz", "(", "image", ")", "if", "total", "==", "0", "or", "area", "==", "total", ":", "return", "[", "]", "if", "h", "<", "min_h", "*", "2", ":", "y_c", "=", "[", "]", "else", ":", "y_c", "=", "[", "(", "-", "abs", "(", "(", "count", "/", "(", "(", "h", "-", "y", ")", "*", "w", ")", ")", "-", "(", "(", "total", "-", "count", ")", "/", "(", "y", "*", "w", ")", ")", ")", ",", "y", ",", "0", ")", "for", "count", ",", "y", "in", "(", "(", "cnz", "(", "image", "[", "y", ":", "]", ")", ",", "y", ")", "for", "y", "in", "range", "(", "min_h", ",", "image", ".", "shape", "[", "0", "]", "-", "min_h", ")", ")", "]", "if", "w", "<", "min_w", "*", "2", ":", "x_c", "=", "[", "]", "else", ":", "x_c", "=", "[", "(", "-", "abs", "(", "(", "count", "/", "(", "h", "*", "(", "w", "-", "x", ")", ")", ")", "-", "(", "(", "total", "-", "count", ")", "/", "(", "h", "*", "x", ")", ")", ")", ",", "x", ",", "1", ")", "for", "count", ",", "x", "in", "(", "(", "cnz", "(", "image", "[", ":", ",", "x", ":", "]", ")", ",", "x", ")", "for", "x", "in", "range", "(", "min_w", ",", "image", ".", "shape", "[", "1", "]", "-", "min_w", ")", ")", "]", "return", "sorted", "(", "x_c", "+", "y_c", ")" ]
Discards the sample if broken .
def discard ( sample , embedding ) : unembeded = { } for v , chain in iteritems ( embedding ) : vals = [ sample [ u ] for u in chain ] if _all_equal ( vals ) : unembeded [ v ] = vals . pop ( ) else : return yield unembeded
5,306
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L387-L412
[ "def", "template", "(", "client", ",", "src", ",", "dest", ",", "paths", ",", "opt", ")", ":", "key_map", "=", "cli_hash", "(", "opt", ".", "key_map", ")", "obj", "=", "{", "}", "for", "path", "in", "paths", ":", "response", "=", "client", ".", "read", "(", "path", ")", "if", "not", "response", ":", "raise", "aomi", ".", "exceptions", ".", "VaultData", "(", "\"Unable to retrieve %s\"", "%", "path", ")", "if", "is_aws", "(", "response", "[", "'data'", "]", ")", "and", "'sts'", "not", "in", "path", ":", "renew_secret", "(", "client", ",", "response", ",", "opt", ")", "for", "s_k", ",", "s_v", "in", "response", "[", "'data'", "]", ".", "items", "(", ")", ":", "o_key", "=", "s_k", "if", "s_k", "in", "key_map", ":", "o_key", "=", "key_map", "[", "s_k", "]", "k_name", "=", "secret_key_name", "(", "path", ",", "o_key", ",", "opt", ")", ".", "lower", "(", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "obj", "[", "k_name", "]", "=", "s_v", "template_obj", "=", "blend_vars", "(", "obj", ",", "opt", ")", "output", "=", "render", "(", "grok_template_file", "(", "src", ")", ",", "template_obj", ")", "write_raw_file", "(", "output", ",", "abspath", "(", "dest", ")", ")" ]
Determines the sample values by majority vote .
def majority_vote ( sample , embedding ) : unembeded = { } for v , chain in iteritems ( embedding ) : vals = [ sample [ u ] for u in chain ] if _all_equal ( vals ) : unembeded [ v ] = vals . pop ( ) else : unembeded [ v ] = _most_common ( vals ) yield unembeded
5,307
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L415-L441
[ "def", "_load_latex2unicode_constants", "(", "kb_file", "=", "None", ")", ":", "if", "kb_file", "is", "None", ":", "kb_file", "=", "get_kb_filename", "(", ")", "try", ":", "data", "=", "open", "(", "kb_file", ")", "except", "IOError", ":", "# File not found or similar", "sys", ".", "stderr", ".", "write", "(", "\"\\nCould not open LaTeX to Unicode KB file. \"", "\"Aborting translation.\\n\"", ")", "return", "CFG_LATEX_UNICODE_TRANSLATION_CONST", "latex_symbols", "=", "[", "]", "translation_table", "=", "{", "}", "for", "line", "in", "data", ":", "# The file has form of latex|--|utf-8. First decode to Unicode.", "line", "=", "line", ".", "decode", "(", "'utf-8'", ")", "mapping", "=", "line", ".", "split", "(", "'|--|'", ")", "translation_table", "[", "mapping", "[", "0", "]", ".", "rstrip", "(", "'\\n'", ")", "]", "=", "mapping", "[", "1", "]", ".", "rstrip", "(", "'\\n'", ")", "latex_symbols", ".", "append", "(", "re", ".", "escape", "(", "mapping", "[", "0", "]", ".", "rstrip", "(", "'\\n'", ")", ")", ")", "data", ".", "close", "(", ")", "CFG_LATEX_UNICODE_TRANSLATION_CONST", "[", "'regexp_obj'", "]", "=", "re", ".", "compile", "(", "\"|\"", ".", "join", "(", "latex_symbols", ")", ")", "CFG_LATEX_UNICODE_TRANSLATION_CONST", "[", "'table'", "]", "=", "translation_table" ]
Determines the sample values by weighed random choice .
def weighted_random ( sample , embedding ) : unembeded = { } for v , chain in iteritems ( embedding ) : vals = [ sample [ u ] for u in chain ] # pick a random element uniformly from all vals, this weights them by # the proportion of each unembeded [ v ] = random . choice ( vals ) yield unembeded
5,308
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L444-L470
[ "def", "MapFile", "(", "self", ",", "key_path_prefix", ",", "registry_file", ")", ":", "self", ".", "_registry_files", "[", "key_path_prefix", ".", "upper", "(", ")", "]", "=", "registry_file", "registry_file", ".", "SetKeyPathPrefix", "(", "key_path_prefix", ")" ]
True if all values in iterable are equal else False .
def _all_equal ( iterable ) : iterator = iter ( iterable ) first = next ( iterator ) return all ( first == rest for rest in iterator )
5,309
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L549-L553
[ "def", "ReadFD", "(", "self", ",", "Channel", ")", ":", "try", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "msg", "=", "TPCANMsgFDMac", "(", ")", "else", ":", "msg", "=", "TPCANMsgFD", "(", ")", "timestamp", "=", "TPCANTimestampFD", "(", ")", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_ReadFD", "(", "Channel", ",", "byref", "(", "msg", ")", ",", "byref", "(", "timestamp", ")", ")", "return", "TPCANStatus", "(", "res", ")", ",", "msg", ",", "timestamp", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.ReadFD\"", ")", "raise" ]
Returns the most common element in iterable .
def _most_common ( iterable ) : data = Counter ( iterable ) return max ( data , key = data . __getitem__ )
5,310
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L556-L559
[ "def", "loop_input", "(", "self", ",", "on_quit", "=", "None", ")", ":", "# Start the init script", "self", ".", "_run_script", "(", "self", ".", "__session", ",", "self", ".", "_context", ".", "get_property", "(", "PROP_INIT_FILE", ")", ")", "# Run the script", "script_file", "=", "self", ".", "_context", ".", "get_property", "(", "PROP_RUN_FILE", ")", "if", "script_file", ":", "self", ".", "_run_script", "(", "self", ".", "__session", ",", "script_file", ")", "else", ":", "# No script: run the main loop (blocking)", "self", ".", "_run_loop", "(", "self", ".", "__session", ")", "# Nothing more to do", "self", ".", "_stop_event", ".", "set", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"Bye !\\n\"", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "on_quit", "is", "not", "None", ":", "# Call a handler if needed", "on_quit", "(", ")" ]
Logarithm of the likelihood
def lnlike ( self , theta ) : params , loglike = self . params , self . loglike kwargs = dict ( list ( zip ( params , theta ) ) ) try : lnlike = loglike . value ( * * kwargs ) except ValueError as AssertionError : lnlike = - np . inf return lnlike
5,311
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L150-L158
[ "def", "resolve_meta_key", "(", "hub", ",", "key", ",", "meta", ")", ":", "if", "key", "not", "in", "meta", ":", "return", "None", "value", "=", "meta", "[", "key", "]", "if", "isinstance", "(", "value", ",", "str", ")", "and", "value", "[", "0", "]", "==", "'>'", ":", "topic", "=", "value", "[", "1", ":", "]", "if", "topic", "not", "in", "hub", ":", "raise", "KeyError", "(", "'topic %s not found in hub'", "%", "topic", ")", "return", "hub", "[", "topic", "]", ".", "get", "(", ")", "return", "value" ]
Logarithm of the prior
def lnprior ( self , theta ) : params , priors = self . params , self . priors kwargs = dict ( list ( zip ( params , theta ) ) ) err = np . seterr ( invalid = 'raise' ) try : lnprior = np . sum ( np . log ( [ priors [ k ] ( v ) for k , v in list ( kwargs . items ( ) ) ] ) ) except ( FloatingPointError , ValueError ) : lnprior = - np . inf np . seterr ( * * err ) return lnprior
5,312
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L160-L170
[ "def", "get_meta_data_editor", "(", "self", ",", "for_gaphas", "=", "True", ")", ":", "meta_gaphas", "=", "self", ".", "meta", "[", "'gui'", "]", "[", "'editor_gaphas'", "]", "meta_opengl", "=", "self", ".", "meta", "[", "'gui'", "]", "[", "'editor_opengl'", "]", "assert", "isinstance", "(", "meta_gaphas", ",", "Vividict", ")", "and", "isinstance", "(", "meta_opengl", ",", "Vividict", ")", "# Use meta data of editor with more keys (typically one of the editors has zero keys)", "# TODO check if the magic length condition in the next line can be improved (consistent behavior getter/setter?)", "parental_conversion_from_opengl", "=", "self", ".", "_parent", "and", "self", ".", "_parent", "(", ")", ".", "temp", "[", "'conversion_from_opengl'", "]", "from_gaphas", "=", "len", "(", "meta_gaphas", ")", ">", "len", "(", "meta_opengl", ")", "or", "(", "len", "(", "meta_gaphas", ")", "==", "len", "(", "meta_opengl", ")", "and", "for_gaphas", "and", "not", "parental_conversion_from_opengl", ")", "# Convert meta data if meta data target and origin differ", "if", "from_gaphas", "and", "not", "for_gaphas", ":", "self", ".", "meta", "[", "'gui'", "]", "[", "'editor_opengl'", "]", "=", "self", ".", "_meta_data_editor_gaphas2opengl", "(", "meta_gaphas", ")", "elif", "not", "from_gaphas", "and", "for_gaphas", ":", "self", ".", "meta", "[", "'gui'", "]", "[", "'editor_gaphas'", "]", "=", "self", ".", "_meta_data_editor_opengl2gaphas", "(", "meta_opengl", ")", "# only keep meta data for one editor", "del", "self", ".", "meta", "[", "'gui'", "]", "[", "'editor_opengl'", "if", "for_gaphas", "else", "'editor_gaphas'", "]", "return", "self", ".", "meta", "[", "'gui'", "]", "[", "'editor_gaphas'", "]", "if", "for_gaphas", "else", "self", ".", "meta", "[", "'gui'", "]", "[", "'editor_opengl'", "]" ]
Logarithm of the probability
def lnprob ( self , theta ) : global niter params , priors , loglike = self . params , self . priors , self . loglike # Avoid extra likelihood calls with bad priors _lnprior = self . lnprior ( theta ) if np . isfinite ( _lnprior ) : _lnlike = self . lnlike ( theta ) else : _lnprior = - np . inf _lnlike = - np . inf _lnprob = _lnprior + _lnlike if ( niter % 100 == 0 ) : msg = "%i function calls ...\n" % niter msg += ', ' . join ( '%s: %.3f' % ( k , v ) for k , v in zip ( params , theta ) ) msg += '\nlog(like): %.3f, log(prior): %.3f' % ( _lnprior , _lnlike ) logger . debug ( msg ) niter += 1 return _lnprob
5,313
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L172-L192
[ "def", "_intersection", "(", "self", ",", "keys", ",", "rows", ")", ":", "# If there are no other keys with start and end date (i.e. nothing to merge) return immediately.", "if", "not", "keys", ":", "return", "rows", "ret", "=", "list", "(", ")", "for", "row", "in", "rows", ":", "start_date", "=", "row", "[", "self", ".", "_key_start_date", "]", "end_date", "=", "row", "[", "self", ".", "_key_end_date", "]", "for", "key_start_date", ",", "key_end_date", "in", "keys", ":", "start_date", ",", "end_date", "=", "Type2JoinHelper", ".", "_intersect", "(", "start_date", ",", "end_date", ",", "row", "[", "key_start_date", "]", ",", "row", "[", "key_end_date", "]", ")", "if", "not", "start_date", ":", "break", "if", "key_start_date", "not", "in", "[", "self", ".", "_key_start_date", ",", "self", ".", "_key_end_date", "]", ":", "del", "row", "[", "key_start_date", "]", "if", "key_end_date", "not", "in", "[", "self", ".", "_key_start_date", ",", "self", ".", "_key_end_date", "]", ":", "del", "row", "[", "key_end_date", "]", "if", "start_date", ":", "row", "[", "self", ".", "_key_start_date", "]", "=", "start_date", "row", "[", "self", ".", "_key_end_date", "]", "=", "end_date", "ret", ".", "append", "(", "row", ")", "return", "ret" ]
Top level interface to write the membership from a config and source model .
def write_membership ( filename , config , srcfile , section = None ) : source = Source ( ) source . load ( srcfile , section = section ) loglike = createLoglike ( config , source ) loglike . write_membership ( filename )
5,314
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L512-L519
[ "def", "revoke_grant", "(", "key_id", ",", "grant_id", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "key_id", ".", "startswith", "(", "'alias/'", ")", ":", "key_id", "=", "_get_key_id", "(", "key_id", ")", "r", "=", "{", "}", "try", ":", "conn", ".", "revoke_grant", "(", "key_id", ",", "grant_id", ")", "r", "[", "'result'", "]", "=", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "r", "[", "'result'", "]", "=", "False", "r", "[", "'error'", "]", "=", "__utils__", "[", "'boto.get_error'", "]", "(", "e", ")", "return", "r" ]
Create a catalog object
def createCatalog ( config , roi = None , lon = None , lat = None ) : import ugali . observation . catalog if roi is None : roi = createROI ( config , lon , lat ) catalog = ugali . observation . catalog . Catalog ( config , roi = roi ) return catalog
5,315
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L566-L573
[ "def", "start_server", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Opens a browser-based \"", "\"client that interfaces with the \"", "\"chemical format converter.\"", ")", "parser", ".", "add_argument", "(", "'--debug'", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Prints all \"", "\"transmitted data streams.\"", ")", "parser", ".", "add_argument", "(", "'--port'", ",", "type", "=", "int", ",", "default", "=", "8000", ",", "help", "=", "\"The port \"", "\"on which to serve the website.\"", ")", "parser", ".", "add_argument", "(", "'--timeout'", ",", "type", "=", "int", ",", "default", "=", "5", ",", "help", "=", "\"The maximum \"", "\"time, in seconds, allowed for a process to run \"", "\"before returning an error.\"", ")", "parser", ".", "add_argument", "(", "'--workers'", ",", "type", "=", "int", ",", "default", "=", "2", ",", "help", "=", "\"The number of \"", "\"worker processes to use with the server.\"", ")", "parser", ".", "add_argument", "(", "'--no-browser'", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Disables \"", "\"opening a browser window on startup.\"", ")", "global", "args", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "debug", ":", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "handlers", "=", "[", "(", "r'/'", ",", "IndexHandler", ")", ",", "(", "r'/websocket'", ",", "WebSocket", ")", ",", "(", "r'/static/(.*)'", ",", "tornado", ".", "web", ".", "StaticFileHandler", ",", "{", "'path'", ":", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "}", ")", "]", "application", "=", "tornado", ".", "web", ".", "Application", "(", "handlers", ")", "application", ".", "listen", "(", "args", ".", "port", ")", "if", "not", "args", ".", "no_browser", ":", "webbrowser", ".", "open", "(", "'http://localhost:%d/'", "%", "args", ".", "port", ",", "new", "=", "2", ")", "try", ":", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "sys", ".", "stderr", ".", "write", "(", "\"Received keyboard interrupt. Stopping server.\\n\"", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "stop", "(", ")", "sys", ".", "exit", "(", "1", ")" ]
Simulate a catalog object .
def simulateCatalog ( config , roi = None , lon = None , lat = None ) : import ugali . simulation . simulator if roi is None : roi = createROI ( config , lon , lat ) sim = ugali . simulation . simulator . Simulator ( config , roi ) return sim . catalog ( )
5,316
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L576-L583
[ "def", "wrap", "(", "self", ",", "wrapper", ")", ":", "if", "self", ".", "_recv_thread", "and", "self", ".", "_send_thread", ":", "# Have to suspend the send/recv threads", "self", ".", "_recv_lock", ".", "acquire", "(", ")", "self", ".", "_send_lock", ".", "acquire", "(", ")", "# Wrap the socket", "self", ".", "_sock", "=", "wrapper", "(", "self", ".", "_sock", ")", "# OK, restart the send/recv threads", "if", "self", ".", "_recv_thread", "and", "self", ".", "_send_thread", ":", "# Release our locks", "self", ".", "_send_lock", ".", "release", "(", ")", "self", ".", "_recv_lock", ".", "release", "(", ")" ]
Calculated observable fraction within each pixel of the target region .
def calc_observable_fraction ( self , distance_modulus ) : # This is the observable fraction after magnitude cuts in each # pixel of the ROI. observable_fraction = self . isochrone . observableFraction ( self . mask , distance_modulus ) if not observable_fraction . sum ( ) > 0 : msg = "No observable fraction" msg += ( "\n" + str ( self . source . params ) ) logger . error ( msg ) raise ValueError ( msg ) return observable_fraction
5,317
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L265-L277
[ "def", "recover_and_supervise", "(", "recovery_file", ")", ":", "try", ":", "logging", ".", "info", "(", "\"Attempting to recover Supervisor data from \"", "+", "recovery_file", ")", "with", "open", "(", "recovery_file", ")", "as", "rf", ":", "recovery_data", "=", "json", ".", "load", "(", "rf", ")", "monitor_data", "=", "recovery_data", "[", "'monitor_data'", "]", "dependencies", "=", "recovery_data", "[", "'dependencies'", "]", "args", "=", "recovery_data", "[", "'args'", "]", "except", ":", "logging", ".", "error", "(", "\"Could not recover monitor data, exiting...\"", ")", "return", "1", "logging", ".", "info", "(", "\"Data successfully loaded, resuming Supervisor\"", ")", "supervise_until_complete", "(", "monitor_data", ",", "dependencies", ",", "args", ",", "recovery_file", ")" ]
Calculate the spatial signal probability for each catalog object .
def calc_signal_spatial ( self ) : # Calculate the surface intensity self . surface_intensity_sparse = self . calc_surface_intensity ( ) # Calculate the probability per object-by-object level self . surface_intensity_object = self . kernel . pdf ( self . catalog . lon , self . catalog . lat ) # Spatial component of signal probability u_spatial = self . surface_intensity_object return u_spatial
5,318
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L350-L371
[ "def", "update", "(", "cls", ",", "request_response_id", ",", "monetary_account_id", "=", "None", ",", "amount_responded", "=", "None", ",", "status", "=", "None", ",", "address_shipping", "=", "None", ",", "address_billing", "=", "None", ",", "custom_headers", "=", "None", ")", ":", "if", "custom_headers", "is", "None", ":", "custom_headers", "=", "{", "}", "api_client", "=", "client", ".", "ApiClient", "(", "cls", ".", "_get_api_context", "(", ")", ")", "request_map", "=", "{", "cls", ".", "FIELD_AMOUNT_RESPONDED", ":", "amount_responded", ",", "cls", ".", "FIELD_STATUS", ":", "status", ",", "cls", ".", "FIELD_ADDRESS_SHIPPING", ":", "address_shipping", ",", "cls", ".", "FIELD_ADDRESS_BILLING", ":", "address_billing", "}", "request_map_string", "=", "converter", ".", "class_to_json", "(", "request_map", ")", "request_map_string", "=", "cls", ".", "_remove_field_for_request", "(", "request_map_string", ")", "request_bytes", "=", "request_map_string", ".", "encode", "(", ")", "endpoint_url", "=", "cls", ".", "_ENDPOINT_URL_UPDATE", ".", "format", "(", "cls", ".", "_determine_user_id", "(", ")", ",", "cls", ".", "_determine_monetary_account_id", "(", "monetary_account_id", ")", ",", "request_response_id", ")", "response_raw", "=", "api_client", ".", "put", "(", "endpoint_url", ",", "request_bytes", ",", "custom_headers", ")", "return", "BunqResponseRequestResponse", ".", "cast_from_bunq_response", "(", "cls", ".", "_from_json", "(", "response_raw", ",", "cls", ".", "_OBJECT_TYPE_PUT", ")", ")" ]
Maximize the log - likelihood as a function of richness .
def fit_richness ( self , atol = 1.e-3 , maxiter = 50 ) : # Check whether the signal probability for all objects are zero # This can occur for finite kernels on the edge of the survey footprint if np . isnan ( self . u ) . any ( ) : logger . warning ( "NaN signal probability found" ) return 0. , 0. , None if not np . any ( self . u ) : logger . warning ( "Signal probability is zero for all objects" ) return 0. , 0. , None if self . f == 0 : logger . warning ( "Observable fraction is zero" ) return 0. , 0. , None # Richness corresponding to 0, 1, and 10 observable stars richness = np . array ( [ 0. , 1. / self . f , 10. / self . f ] ) loglike = np . array ( [ self . value ( richness = r ) for r in richness ] ) found_maximum = False iteration = 0 while not found_maximum : parabola = ugali . utils . parabola . Parabola ( richness , 2. * loglike ) if parabola . vertex_x < 0. : found_maximum = True else : richness = np . append ( richness , parabola . vertex_x ) loglike = np . append ( loglike , self . value ( richness = richness [ - 1 ] ) ) if np . fabs ( loglike [ - 1 ] - np . max ( loglike [ 0 : - 1 ] ) ) < atol : found_maximum = True iteration += 1 if iteration > maxiter : logger . warning ( "Maximum number of iterations reached" ) break index = np . argmax ( loglike ) return loglike [ index ] , richness [ index ] , parabola
5,319
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L380-L431
[ "def", "get_scopes_information", "(", "self", ")", ":", "scopes", "=", "StandardScopeClaims", ".", "get_scopes_info", "(", "self", ".", "params", "[", "'scope'", "]", ")", "if", "settings", ".", "get", "(", "'OIDC_EXTRA_SCOPE_CLAIMS'", ")", ":", "scopes_extra", "=", "settings", ".", "get", "(", "'OIDC_EXTRA_SCOPE_CLAIMS'", ",", "import_str", "=", "True", ")", ".", "get_scopes_info", "(", "self", ".", "params", "[", "'scope'", "]", ")", "for", "index_extra", ",", "scope_extra", "in", "enumerate", "(", "scopes_extra", ")", ":", "for", "index", ",", "scope", "in", "enumerate", "(", "scopes", "[", ":", "]", ")", ":", "if", "scope_extra", "[", "'scope'", "]", "==", "scope", "[", "'scope'", "]", ":", "del", "scopes", "[", "index", "]", "else", ":", "scopes_extra", "=", "[", "]", "return", "scopes", "+", "scopes_extra" ]
Resolve a JSON Pointer object reference to the object itself .
def resolve_reference ( self , ref ) : url , resolved = self . resolver . resolve ( ref ) return resolved
5,320
https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/apidef/doc.py#L25-L34
[ "def", "write", "(", "self", ",", "splits", "=", "None", ",", "mergers", "=", "None", ",", "dividends", "=", "None", ",", "stock_dividends", "=", "None", ")", ":", "self", ".", "write_frame", "(", "'splits'", ",", "splits", ")", "self", ".", "write_frame", "(", "'mergers'", ",", "mergers", ")", "self", ".", "write_dividend_data", "(", "dividends", ",", "stock_dividends", ")", "# Use IF NOT EXISTS here to allow multiple writes if desired.", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS splits_sids \"", "\"ON splits(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS splits_effective_date \"", "\"ON splits(effective_date)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS mergers_sids \"", "\"ON mergers(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS mergers_effective_date \"", "\"ON mergers(effective_date)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS dividends_sid \"", "\"ON dividends(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS dividends_effective_date \"", "\"ON dividends(effective_date)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS dividend_payouts_sid \"", "\"ON dividend_payouts(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date \"", "\"ON dividend_payouts(ex_date)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid \"", "\"ON stock_dividend_payouts(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date \"", "\"ON stock_dividend_payouts(ex_date)\"", ")" ]
Construct a Path object from a path string .
def get_path ( self , path ) : mapping = self . get_path_mapping ( path ) return self . path_class ( api = self , path = path , mapping = mapping )
5,321
https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/apidef/doc.py#L43-L53
[ "def", "_merge_csv_section", "(", "sections", ",", "pc", ",", "csvs", ")", ":", "logger_csvs", ".", "info", "(", "\"enter merge_csv_section\"", ")", "try", ":", "# Loop through each table_data in paleoData", "for", "_name", ",", "_section", "in", "sections", ".", "items", "(", ")", ":", "if", "\"measurementTable\"", "in", "_section", ":", "sections", "[", "_name", "]", "[", "\"measurementTable\"", "]", "=", "_merge_csv_table", "(", "_section", "[", "\"measurementTable\"", "]", ",", "pc", ",", "csvs", ")", "if", "\"model\"", "in", "_section", ":", "sections", "[", "_name", "]", "[", "\"model\"", "]", "=", "_merge_csv_model", "(", "_section", "[", "\"model\"", "]", ",", "pc", ",", "csvs", ")", "except", "Exception", "as", "e", ":", "print", "(", "\"Error: There was an error merging CSV data into the metadata \"", ")", "logger_csvs", ".", "error", "(", "\"merge_csv_section: {}\"", ".", "format", "(", "e", ")", ")", "logger_csvs", ".", "info", "(", "\"exit merge_csv_section\"", ")", "return", "sections" ]
Construct an APIDefinition by parsing the given filename .
def from_file ( cls , filename ) : with open ( filename ) as infp : if filename . endswith ( '.yaml' ) or filename . endswith ( '.yml' ) : import yaml data = yaml . safe_load ( infp ) else : import json data = json . load ( infp ) return cls . from_data ( data )
5,322
https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/apidef/doc.py#L65-L82
[ "def", "addNoise", "(", "vecs", ",", "percent", "=", "0.1", ",", "n", "=", "2048", ")", ":", "noisyVecs", "=", "[", "]", "for", "vec", "in", "vecs", ":", "nv", "=", "vec", ".", "copy", "(", ")", "for", "idx", "in", "vec", ":", "if", "numpy", ".", "random", ".", "random", "(", ")", "<=", "percent", ":", "nv", ".", "discard", "(", "idx", ")", "nv", ".", "add", "(", "numpy", ".", "random", ".", "randint", "(", "n", ")", ")", "noisyVecs", ".", "append", "(", "nv", ")", "return", "noisyVecs" ]
Log and return a server error message .
def _server_error_message ( url , message ) : msg = _error_message . format ( url = url , message = message ) log . error ( msg ) return msg
5,323
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/utils.py#L23-L27
[ "def", "cudnnSetPooling2dDescriptor", "(", "poolingDesc", ",", "mode", ",", "windowHeight", ",", "windowWidth", ",", "verticalPadding", ",", "horizontalPadding", ",", "verticalStride", ",", "horizontalStride", ")", ":", "status", "=", "_libcudnn", ".", "cudnnSetPooling2dDescriptor", "(", "poolingDesc", ",", "mode", ",", "windowHeight", ",", "windowWidth", ",", "verticalPadding", ",", "horizontalPadding", ",", "verticalStride", ",", "horizontalStride", ")", "cudnnCheckStatus", "(", "status", ")" ]
Make a request with the provided data .
def make_request ( url , method = 'GET' , query = None , body = None , auth = None , timeout = 10 , client = None , macaroons = None ) : headers = { } kwargs = { 'timeout' : timeout , 'headers' : headers } # Handle the request body. if body is not None : if isinstance ( body , collections . Mapping ) : body = json . dumps ( body ) kwargs [ 'data' ] = body # Handle request methods. if method in ( 'GET' , 'HEAD' ) : if query : url = '{}?{}' . format ( url , urlencode ( query , True ) ) elif method in ( 'DELETE' , 'PATCH' , 'POST' , 'PUT' ) : headers [ 'Content-Type' ] = 'application/json' else : raise ValueError ( 'invalid method {}' . format ( method ) ) if macaroons is not None : headers [ 'Macaroons' ] = macaroons kwargs [ 'auth' ] = auth if client is None else client . auth ( ) api_method = getattr ( requests , method . lower ( ) ) # Perform the request. try : response = api_method ( url , * * kwargs ) except requests . exceptions . Timeout : raise timeout_error ( url , timeout ) except Exception as err : msg = _server_error_message ( url , err ) raise ServerError ( msg ) # Handle error responses. try : response . raise_for_status ( ) except HTTPError as err : msg = _server_error_message ( url , err . response . text ) raise ServerError ( err . response . status_code , msg ) except requests . exceptions . RequestException as err : msg = _server_error_message ( url , err . message ) raise ServerError ( msg ) # Some requests just result in a status with no response body. if not response . content : return { } # Assume the response body is a JSON encoded string. try : return response . json ( ) except Exception as err : msg = 'Error decoding JSON response: {} message: {}' . format ( url , err ) log . error ( msg ) raise ServerError ( msg )
5,324
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/utils.py#L30-L99
[ "def", "dump", "(", "self", ")", ":", "assert", "self", ".", "database", "is", "not", "None", "cmd", "=", "\"SELECT count from {} WHERE rowid={}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "STATE_INFO_ROW", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "self", ".", "_from_sqlite", "(", "ret", "[", "0", "]", "[", "0", "]", ")", "+", "self", ".", "inserts", "if", "count", ">", "self", ".", "row_limit", ":", "msg", "=", "\"cleaning up state, this might take a while.\"", "logger", ".", "warning", "(", "msg", ")", "delete", "=", "count", "-", "self", ".", "row_limit", "delete", "+=", "int", "(", "self", ".", "row_limit", "*", "(", "self", ".", "row_cleanup_quota", "/", "100.0", ")", ")", "cmd", "=", "(", "\"DELETE FROM {} WHERE timestamp IN (\"", "\"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});\"", ")", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ",", "self", ".", "STATE_TABLE", ",", "delete", ")", ")", "self", ".", "_vacuum", "(", ")", "cmd", "=", "\"SELECT COUNT(*) FROM {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "ret", "[", "0", "]", "[", "0", "]", "cmd", "=", "\"UPDATE {} SET count = {} WHERE rowid = {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "_to_sqlite", "(", "count", ")", ",", "self", ".", "STATE_INFO_ROW", ",", ")", ")", "self", ".", "_update_cache_directory_state", "(", ")", "self", ".", "database", ".", "commit", "(", ")", "self", ".", "cursor", ".", "close", "(", ")", "self", ".", "database", ".", "close", "(", ")", "self", ".", "database", "=", "None", "self", ".", "cursor", "=", "None", "self", ".", "inserts", "=", "0" ]
Get the plans for a given charm .
def get_plans ( self , reference ) : response = make_request ( '{}charm?charm-url={}' . format ( self . url , 'cs:' + reference . path ( ) ) , timeout = self . timeout , client = self . _client ) try : return tuple ( map ( lambda plan : Plan ( url = plan [ 'url' ] , plan = plan [ 'plan' ] , created_on = datetime . datetime . strptime ( plan [ 'created-on' ] , "%Y-%m-%dT%H:%M:%SZ" ) , description = plan . get ( 'description' ) , price = plan . get ( 'price' ) ) , response ) ) except Exception as err : log . error ( 'cannot process plans: invalid JSON response: {!r}' . format ( response ) ) raise ServerError ( 'unable to get list of plans for {}: {}' . format ( reference . path ( ) , err ) )
5,325
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L46-L72
[ "def", "unique", "(", "self", ")", ":", "from", ".", "sframe", "import", "SFrame", "as", "_SFrame", "tmp_sf", "=", "_SFrame", "(", ")", "tmp_sf", ".", "add_column", "(", "self", ",", "'X1'", ",", "inplace", "=", "True", ")", "res", "=", "tmp_sf", ".", "groupby", "(", "'X1'", ",", "{", "}", ")", "return", "SArray", "(", "_proxy", "=", "res", "[", "'X1'", "]", ".", "__proxy__", ")" ]
Get the list of wallets .
def list_wallets ( self ) : response = make_request ( '{}wallet' . format ( self . url ) , timeout = self . timeout , client = self . _client ) try : total = response [ 'total' ] return { 'credit' : response [ 'credit' ] , 'total' : WalletTotal ( limit = total [ 'limit' ] , budgeted = total [ 'budgeted' ] , available = total [ 'available' ] , unallocated = total [ 'unallocated' ] , usage = total [ 'usage' ] , consumed = total [ 'consumed' ] ) , 'wallets' : tuple ( Wallet ( owner = wallet [ 'owner' ] , wallet = wallet [ 'wallet' ] , limit = wallet [ 'limit' ] , budgeted = wallet [ 'budgeted' ] , unallocated = wallet [ 'unallocated' ] , available = wallet [ 'available' ] , consumed = wallet [ 'consumed' ] , default = 'default' in wallet ) for wallet in response [ 'wallets' ] ) , } except Exception as err : log . error ( 'cannot process wallets: invalid JSON response: {!r}' . format ( response ) ) raise ServerError ( 'unable to get list of wallets: {!r}' . format ( err ) )
5,326
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L74-L112
[ "def", "_as_log_entry", "(", "self", ",", "name", ",", "now", ")", ":", "# initialize the struct with fields that are always present", "d", "=", "{", "u'http_response_code'", ":", "self", ".", "response_code", ",", "u'timestamp'", ":", "time", ".", "mktime", "(", "now", ".", "timetuple", "(", ")", ")", "}", "# compute the severity", "severity", "=", "_SEVERITY", ".", "INFO", "if", "self", ".", "response_code", ">=", "400", ":", "severity", "=", "_SEVERITY", ".", "ERROR", "d", "[", "u'error_cause'", "]", "=", "self", ".", "error_cause", ".", "name", "# add 'optional' fields to the struct", "if", "self", ".", "request_size", ">", "0", ":", "d", "[", "u'request_size'", "]", "=", "self", ".", "request_size", "if", "self", ".", "response_size", ">", "0", ":", "d", "[", "u'response_size'", "]", "=", "self", ".", "response_size", "if", "self", ".", "method", ":", "d", "[", "u'http_method'", "]", "=", "self", ".", "method", "if", "self", ".", "request_time", ":", "d", "[", "u'request_latency_in_ms'", "]", "=", "self", ".", "request_time", ".", "total_seconds", "(", ")", "*", "1000", "# add 'copyable' fields to the struct", "for", "key", "in", "self", ".", "COPYABLE_LOG_FIELDS", ":", "value", "=", "getattr", "(", "self", ",", "key", ",", "None", ")", "if", "value", ":", "d", "[", "key", "]", "=", "value", "return", "sc_messages", ".", "LogEntry", "(", "name", "=", "name", ",", "timestamp", "=", "timestamp", ".", "to_rfc3339", "(", "now", ")", ",", "severity", "=", "severity", ",", "structPayload", "=", "_struct_payload_from", "(", "d", ")", ")" ]
Get a single wallet .
def get_wallet ( self , wallet_name ) : response = make_request ( '{}wallet/{}' . format ( self . url , wallet_name ) , timeout = self . timeout , client = self . _client ) try : total = response [ 'total' ] return { 'credit' : response [ 'credit' ] , 'limit' : response [ 'limit' ] , 'total' : WalletTotal ( limit = total [ 'limit' ] , budgeted = total [ 'budgeted' ] , available = total [ 'available' ] , unallocated = total [ 'unallocated' ] , usage = total [ 'usage' ] , consumed = total [ 'consumed' ] ) } except Exception as exc : log . error ( 'cannot get wallet from server: {!r}' . format ( exc ) ) raise ServerError ( 'unable to get list of wallets: {!r}' . format ( exc ) )
5,327
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L114-L142
[ "def", "datetime", "(", "self", ",", "field", "=", "None", ",", "val", "=", "None", ")", ":", "if", "val", "is", "None", ":", "def", "source", "(", ")", ":", "tzinfo", "=", "get_default_timezone", "(", ")", "if", "settings", ".", "USE_TZ", "else", "None", "return", "datetime", ".", "fromtimestamp", "(", "randrange", "(", "1", ",", "2100000000", ")", ",", "tzinfo", ")", "else", ":", "def", "source", "(", ")", ":", "tzinfo", "=", "get_default_timezone", "(", ")", "if", "settings", ".", "USE_TZ", "else", "None", "return", "datetime", ".", "fromtimestamp", "(", "int", "(", "val", ".", "strftime", "(", "\"%s\"", ")", ")", "+", "randrange", "(", "-", "365", "*", "24", "*", "3600", "*", "2", ",", "365", "*", "24", "*", "3600", "*", "2", ")", ",", "tzinfo", ")", "return", "self", ".", "get_allowed_value", "(", "source", ",", "field", ")" ]
Update a wallet with a new limit .
def update_wallet ( self , wallet_name , limit ) : request = { 'update' : { 'limit' : str ( limit ) , } } return make_request ( '{}wallet/{}' . format ( self . url , wallet_name ) , method = 'PATCH' , body = request , timeout = self . timeout , client = self . _client )
5,328
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L144-L162
[ "def", "map_system_entities", "(", "engine", ",", "metadata", ",", "reset", ")", ":", "# Map the user message system entity.", "msg_tbl", "=", "Table", "(", "'_user_messages'", ",", "metadata", ",", "Column", "(", "'guid'", ",", "String", ",", "nullable", "=", "False", ",", "primary_key", "=", "True", ")", ",", "Column", "(", "'text'", ",", "String", ",", "nullable", "=", "False", ")", ",", "Column", "(", "'time_stamp'", ",", "DateTime", "(", "timezone", "=", "True", ")", ",", "nullable", "=", "False", ",", "default", "=", "sa_func", ".", "now", "(", ")", ")", ",", ")", "mapper", "(", "UserMessage", ",", "msg_tbl", ",", "id_attribute", "=", "'guid'", ")", "if", "reset", ":", "metadata", ".", "drop_all", "(", "bind", "=", "engine", ",", "tables", "=", "[", "msg_tbl", "]", ")", "metadata", ".", "create_all", "(", "bind", "=", "engine", ",", "tables", "=", "[", "msg_tbl", "]", ")" ]
Delete a wallet .
def delete_wallet ( self , wallet_name ) : return make_request ( '{}wallet/{}' . format ( self . url , wallet_name ) , method = 'DELETE' , timeout = self . timeout , client = self . _client )
5,329
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L183-L194
[ "def", "poll", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "timeout", "=", "-", "1", "message", "=", "super", "(", "AvroConsumer", ",", "self", ")", ".", "poll", "(", "timeout", ")", "if", "message", "is", "None", ":", "return", "None", "if", "not", "message", ".", "error", "(", ")", ":", "try", ":", "if", "message", ".", "value", "(", ")", "is", "not", "None", ":", "decoded_value", "=", "self", ".", "_serializer", ".", "decode_message", "(", "message", ".", "value", "(", ")", ",", "is_key", "=", "False", ")", "message", ".", "set_value", "(", "decoded_value", ")", "if", "message", ".", "key", "(", ")", "is", "not", "None", ":", "decoded_key", "=", "self", ".", "_serializer", ".", "decode_message", "(", "message", ".", "key", "(", ")", ",", "is_key", "=", "True", ")", "message", ".", "set_key", "(", "decoded_key", ")", "except", "SerializerError", "as", "e", ":", "raise", "SerializerError", "(", "\"Message deserialization failed for message at {} [{}] offset {}: {}\"", ".", "format", "(", "message", ".", "topic", "(", ")", ",", "message", ".", "partition", "(", ")", ",", "message", ".", "offset", "(", ")", ",", "e", ")", ")", "return", "message" ]
Create a new budget for a model and wallet .
def create_budget ( self , wallet_name , model_uuid , limit ) : request = { 'model' : model_uuid , 'limit' : limit , } return make_request ( '{}wallet/{}/budget' . format ( self . url , wallet_name ) , method = 'POST' , body = request , timeout = self . timeout , client = self . _client )
5,330
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L196-L214
[ "def", "get_doc", "(", "logger", "=", "None", ",", "plugin", "=", "None", ",", "reporthook", "=", "None", ")", ":", "from", "ginga", ".", "GingaPlugin", "import", "GlobalPlugin", ",", "LocalPlugin", "if", "isinstance", "(", "plugin", ",", "GlobalPlugin", ")", ":", "plugin_page", "=", "'plugins_global'", "plugin_name", "=", "str", "(", "plugin", ")", "elif", "isinstance", "(", "plugin", ",", "LocalPlugin", ")", ":", "plugin_page", "=", "'plugins_local'", "plugin_name", "=", "str", "(", "plugin", ")", "else", ":", "plugin_page", "=", "None", "plugin_name", "=", "None", "try", ":", "index_html", "=", "_download_rtd_zip", "(", "reporthook", "=", "reporthook", ")", "# Download failed, use online resource", "except", "Exception", "as", "e", ":", "url", "=", "'https://ginga.readthedocs.io/en/latest/'", "if", "plugin_name", "is", "not", "None", ":", "if", "toolkit", ".", "family", ".", "startswith", "(", "'qt'", ")", ":", "# This displays plugin docstring.", "url", "=", "None", "else", ":", "# This redirects to online doc.", "url", "+=", "'manual/{}/{}.html'", ".", "format", "(", "plugin_page", ",", "plugin_name", ")", "if", "logger", "is", "not", "None", ":", "logger", ".", "error", "(", "str", "(", "e", ")", ")", "# Use local resource", "else", ":", "pfx", "=", "'file:'", "url", "=", "'{}{}'", ".", "format", "(", "pfx", ",", "index_html", ")", "# https://github.com/rtfd/readthedocs.org/issues/2803", "if", "plugin_name", "is", "not", "None", ":", "url", "+=", "'#{}'", ".", "format", "(", "plugin_name", ")", "return", "url" ]
Delete a budget .
def delete_budget ( self , model_uuid ) : return make_request ( '{}model/{}/budget' . format ( self . url , model_uuid ) , method = 'DELETE' , timeout = self . timeout , client = self . _client )
5,331
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/plans.py#L238-L250
[ "def", "render_to_response", "(", "self", ",", "context", ",", "*", "*", "response_kwargs", ")", ":", "serializer", "=", "PollPublicSerializer", "(", "self", ".", "object", ")", "response", "=", "HttpResponse", "(", "json", ".", "dumps", "(", "serializer", ".", "data", ")", ",", "content_type", "=", "\"application/json\"", ")", "if", "\"HTTP_ORIGIN\"", "in", "self", ".", "request", ".", "META", ":", "response", "[", "\"Access-Control-Allow-Origin\"", "]", "=", "self", ".", "request", ".", "META", "[", "\"HTTP_ORIGIN\"", "]", "response", "[", "\"Access-Control-Allow-Credentials\"", "]", "=", "'true'", "return", "response" ]
Binary classification confusion
def confusion ( df , labels = [ 'neg' , 'pos' ] ) : c = pd . DataFrame ( np . zeros ( ( 2 , 2 ) ) , dtype = int ) a , b = df . columns [ : 2 ] # labels[df.columns[:2]] c . columns = sorted ( set ( df [ a ] ) ) [ : 2 ] c . columns . name = a c . index = list ( c . columns ) c . index . name = b c1 , c2 = c . columns c [ c1 ] [ c1 ] = ( ( df [ a ] == c1 ) & ( df [ b ] == c1 ) ) . sum ( ) c [ c1 ] [ c2 ] = ( ( df [ a ] == c1 ) & ( df [ b ] == c2 ) ) . sum ( ) c [ c2 ] [ c2 ] = ( ( df [ a ] == c2 ) & ( df [ b ] == c2 ) ) . sum ( ) c [ c2 ] [ c1 ] = ( ( df [ a ] == c2 ) & ( df [ b ] == c1 ) ) . sum ( ) return c
5,332
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/stats.py#L227-L240
[ "def", "get_duration", "(", "self", ")", ":", "postgame", "=", "self", ".", "get_postgame", "(", ")", "if", "postgame", ":", "return", "postgame", ".", "duration_int", "*", "1000", "duration", "=", "self", ".", "_header", ".", "initial", ".", "restore_time", "try", ":", "while", "self", ".", "_handle", ".", "tell", "(", ")", "<", "self", ".", "size", ":", "operation", "=", "mgz", ".", "body", ".", "operation", ".", "parse_stream", "(", "self", ".", "_handle", ")", "if", "operation", ".", "type", "==", "'sync'", ":", "duration", "+=", "operation", ".", "time_increment", "elif", "operation", ".", "type", "==", "'action'", ":", "if", "operation", ".", "action", ".", "type", "==", "'resign'", ":", "self", ".", "_cache", "[", "'resigned'", "]", ".", "add", "(", "operation", ".", "action", ".", "player_id", ")", "self", ".", "_handle", ".", "seek", "(", "self", ".", "body_position", ")", "except", "(", "construct", ".", "core", ".", "ConstructError", ",", "zlib", ".", "error", ",", "ValueError", ")", ":", "raise", "RuntimeError", "(", "\"invalid mgz file\"", ")", "return", "duration" ]
r Find the threshold level that accomplishes the desired specificity
def thresh_from_spec ( spec , labels , scores , * * kwargs ) : cost_fun . verbose = kwargs . pop ( 'verbose' , cost_fun . verbose ) cost_fun . target = spec return minimize ( cost_fun , x0 = [ .5 ] , args = ( labels , scores ) , method = 'SLSQP' , constraints = ( { 'type' : 'ineq' , 'fun' : lambda x : np . array ( [ x [ 0 ] ] ) , 'jac' : lambda x : np . array ( [ 1. ] ) } , ) , * * kwargs )
5,333
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/stats.py#L938-L951
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
Merge two dicts of addable values
def add_dicts ( d1 , d2 ) : if d1 is None : return d2 if d2 is None : return d1 keys = set ( d1 ) keys . update ( set ( d2 ) ) ret = { } for key in keys : v1 = d1 . get ( key ) v2 = d2 . get ( key ) if v1 is None : ret [ key ] = v2 elif v2 is None : ret [ key ] = v1 else : ret [ key ] = v1 + v2 return ret
5,334
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L7-L25
[ "def", "_format_line", "(", "self", ",", "data", ",", "column", "=", "0", ",", "rel_line", "=", "1", ")", ":", "line_length", "=", "len", "(", "data", ")", "if", "line_length", ">", "140", ":", "if", "rel_line", "==", "0", ":", "# Trim from the beginning", "data", "=", "'... %s'", "%", "data", "[", "-", "140", ":", "]", "elif", "rel_line", "==", "1", ":", "# Trim surrounding the error position", "if", "column", "<", "70", ":", "data", "=", "'%s ...'", "%", "data", "[", ":", "140", "]", "elif", "column", ">", "line_length", "-", "70", ":", "data", "=", "'... %s'", "%", "data", "[", "-", "140", ":", "]", "else", ":", "data", "=", "'... %s ...'", "%", "data", "[", "column", "-", "70", ":", "column", "+", "70", "]", "elif", "rel_line", "==", "2", ":", "# Trim from the end", "data", "=", "'%s ...'", "%", "data", "[", ":", "140", "]", "data", "=", "unicodehelper", ".", "decode", "(", "data", ")", "return", "data" ]
Update the consumed capacity metrics
def _update_capacity ( self , data ) : if 'ConsumedCapacity' in data : # This is all for backwards compatibility consumed = data [ 'ConsumedCapacity' ] if not isinstance ( consumed , list ) : consumed = [ consumed ] for cap in consumed : self . capacity += cap . get ( 'CapacityUnits' , 0 ) self . table_capacity += cap . get ( 'Table' , { } ) . get ( 'CapacityUnits' , 0 ) local_indexes = cap . get ( 'LocalSecondaryIndexes' , { } ) for k , v in six . iteritems ( local_indexes ) : self . indexes . setdefault ( k , 0 ) self . indexes [ k ] += v [ 'CapacityUnits' ] global_indexes = cap . get ( 'GlobalSecondaryIndexes' , { } ) for k , v in six . iteritems ( global_indexes ) : self . global_indexes . setdefault ( k , 0 ) self . global_indexes [ k ] += v [ 'CapacityUnits' ]
5,335
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L233-L251
[ "def", "concatenate_json", "(", "source_folder", ",", "destination_file", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "source_folder", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "'*.json'", ")", ":", "matches", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "with", "open", "(", "destination_file", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"[\\n\"", ")", "for", "m", "in", "matches", "[", ":", "-", "1", "]", ":", "f", ".", "write", "(", "open", "(", "m", ",", "\"rb\"", ")", ".", "read", "(", ")", ")", "f", ".", "write", "(", "\",\\n\"", ")", "f", ".", "write", "(", "open", "(", "matches", "[", "-", "1", "]", ",", "\"rb\"", ")", ".", "read", "(", ")", ")", "f", ".", "write", "(", "\"\\n]\"", ")" ]
Fetch more results from Dynamo
def fetch ( self ) : self . limit . set_request_args ( self . kwargs ) data = self . connection . call ( * self . args , * * self . kwargs ) self . limit . post_fetch ( data ) self . last_evaluated_key = data . get ( 'LastEvaluatedKey' ) if self . last_evaluated_key is None : self . kwargs . pop ( 'ExclusiveStartKey' , None ) else : self . kwargs [ 'ExclusiveStartKey' ] = self . last_evaluated_key self . _update_capacity ( data ) if 'consumed_capacity' in data : self . consumed_capacity += data [ 'consumed_capacity' ] for raw_item in data [ 'Items' ] : item = self . connection . dynamizer . decode_keys ( raw_item ) if self . limit . accept ( item ) : yield item
5,336
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L288-L304
[ "def", "collect", "(", "self", ")", ":", "self", ".", "nb", "=", "nbformat", ".", "read", "(", "str", "(", "self", ".", "fspath", ")", ",", "as_version", "=", "4", ")", "# Start the cell count", "cell_num", "=", "0", "# Iterate over the cells in the notebook", "for", "cell", "in", "self", ".", "nb", ".", "cells", ":", "# Skip the cells that have text, headings or related stuff", "# Only test code cells", "if", "cell", ".", "cell_type", "==", "'code'", ":", "# The cell may contain a comment indicating that its output", "# should be checked or ignored. If it doesn't, use the default", "# behaviour. The --nbval option checks unmarked cells.", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", "as", "ws", ":", "options", "=", "defaultdict", "(", "bool", ",", "find_metadata_tags", "(", "cell", ".", "metadata", ")", ")", "comment_opts", "=", "dict", "(", "find_comment_markers", "(", "cell", ".", "source", ")", ")", "if", "set", "(", "comment_opts", ".", "keys", "(", ")", ")", "&", "set", "(", "options", ".", "keys", "(", ")", ")", ":", "warnings", ".", "warn", "(", "\"Overlapping options from comments and metadata, \"", "\"using options from comments: %s\"", "%", "str", "(", "set", "(", "comment_opts", ".", "keys", "(", ")", ")", "&", "set", "(", "options", ".", "keys", "(", ")", ")", ")", ")", "for", "w", "in", "ws", ":", "self", ".", "parent", ".", "config", ".", "warn", "(", "\"C1\"", ",", "str", "(", "w", ".", "message", ")", ",", "'%s:Cell %d'", "%", "(", "getattr", "(", "self", ",", "\"fspath\"", ",", "None", ")", ",", "cell_num", ")", ")", "options", ".", "update", "(", "comment_opts", ")", "options", ".", "setdefault", "(", "'check'", ",", "self", ".", "compare_outputs", ")", "yield", "IPyNbCell", "(", "'Cell '", "+", "str", "(", "cell_num", ")", ",", "self", ",", "cell_num", ",", "cell", ",", "options", ")", "# Update 'code' cell count", "cell_num", "+=", "1" ]
Construct the kwargs to pass to batch_get_item
def build_kwargs ( self ) : keys , self . keys = self . keys [ : MAX_GET_BATCH ] , self . keys [ MAX_GET_BATCH : ] query = { 'ConsistentRead' : self . consistent } if self . attributes is not None : query [ 'ProjectionExpression' ] = self . attributes if self . alias : query [ 'ExpressionAttributeNames' ] = self . alias query [ 'Keys' ] = keys return { 'RequestItems' : { self . tablename : query , } , 'ReturnConsumedCapacity' : self . return_capacity , }
5,337
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L331-L345
[ "def", "vim", "(", ")", ":", "install_package", "(", "'vim'", ")", "print_msg", "(", "'## install ~/.vimrc\\n'", ")", "install_file_legacy", "(", "'~/.vimrc'", ")", "print_msg", "(", "'\\n## set up pathogen\\n'", ")", "run", "(", "'mkdir -p ~/.vim/autoload ~/.vim/bundle'", ")", "checkup_git_repo_legacy", "(", "url", "=", "'https://github.com/tpope/vim-pathogen.git'", ")", "run", "(", "'ln -snf ~/repos/vim-pathogen/autoload/pathogen.vim '", "'~/.vim/autoload/pathogen.vim'", ")", "print_msg", "(", "'\\n## install vim packages\\n'", ")", "install_package", "(", "'ctags'", ")", "# required by package tagbar", "repos", "=", "[", "{", "'name'", ":", "'vim-colors-solarized'", ",", "'url'", ":", "'git://github.com/altercation/vim-colors-solarized.git'", ",", "}", ",", "{", "'name'", ":", "'nerdtree'", ",", "'url'", ":", "'https://github.com/scrooloose/nerdtree.git'", ",", "}", ",", "{", "'name'", ":", "'vim-nerdtree-tabs'", ",", "'url'", ":", "'https://github.com/jistr/vim-nerdtree-tabs.git'", ",", "}", ",", "{", "'name'", ":", "'tagbar'", ",", "'url'", ":", "'https://github.com/majutsushi/tagbar.git'", ",", "}", ",", "]", "checkup_git_repos_legacy", "(", "repos", ",", "base_dir", "=", "'~/.vim/bundle'", ")" ]
Fetch a set of items from their keys
def fetch ( self ) : kwargs = self . build_kwargs ( ) data = self . connection . call ( 'batch_get_item' , * * kwargs ) if 'UnprocessedKeys' in data : for items in six . itervalues ( data [ 'UnprocessedKeys' ] ) : self . keys . extend ( items [ 'Keys' ] ) # Getting UnprocessedKeys indicates that we are exceeding our # throughput. So sleep for a bit. self . _attempt += 1 self . connection . exponential_sleep ( self . _attempt ) else : # No UnprocessedKeys means our request rate is fine, so we can # reset the attempt number. self . _attempt = 0 self . _update_capacity ( data ) if 'consumed_capacity' in data : # Comes back as a list from BatchWriteItem self . consumed_capacity = sum ( data [ 'consumed_capacity' ] , self . consumed_capacity ) return iter ( data [ 'Responses' ] [ self . tablename ] )
5,338
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L347-L367
[ "def", "ValidateEndConfig", "(", "self", ",", "config_obj", ",", "errors_fatal", "=", "True", ")", ":", "errors", "=", "super", "(", "WindowsClientRepacker", ",", "self", ")", ".", "ValidateEndConfig", "(", "config_obj", ",", "errors_fatal", "=", "errors_fatal", ")", "install_dir", "=", "config_obj", "[", "\"Client.install_path\"", "]", "for", "path", "in", "config_obj", "[", "\"Client.tempdir_roots\"", "]", ":", "if", "path", ".", "startswith", "(", "\"/\"", ")", ":", "errors", ".", "append", "(", "\"Client.tempdir_root %s starts with /, probably has Unix path.\"", "%", "path", ")", "if", "not", "path", ".", "startswith", "(", "install_dir", ")", ":", "errors", ".", "append", "(", "\"Client.tempdir_root %s is not inside the install_dir %s, this is \"", "\"a security risk\"", "%", "(", "(", "path", ",", "install_dir", ")", ")", ")", "if", "config_obj", ".", "Get", "(", "\"Logging.path\"", ")", ".", "startswith", "(", "\"/\"", ")", ":", "errors", ".", "append", "(", "\"Logging.path starts with /, probably has Unix path. %s\"", "%", "config_obj", "[", "\"Logging.path\"", "]", ")", "if", "\"Windows\\\\\"", "in", "config_obj", ".", "GetRaw", "(", "\"Logging.path\"", ")", ":", "errors", ".", "append", "(", "\"Windows in Logging.path, you probably want \"", "\"%(WINDIR|env) instead\"", ")", "if", "not", "config_obj", "[", "\"Client.binary_name\"", "]", ".", "endswith", "(", "\".exe\"", ")", ":", "errors", ".", "append", "(", "\"Missing .exe extension on binary_name %s\"", "%", "config_obj", "[", "\"Client.binary_name\"", "]", ")", "if", "not", "config_obj", "[", "\"Nanny.binary\"", "]", ".", "endswith", "(", "\".exe\"", ")", ":", "errors", ".", "append", "(", "\"Missing .exe extension on nanny_binary\"", ")", "if", "errors_fatal", "and", "errors", ":", "for", "error", "in", "errors", ":", "logging", ".", "error", "(", "\"Build Config Error: %s\"", ",", "error", ")", "raise", "RuntimeError", "(", "\"Bad configuration generated. Terminating.\"", ")", "else", ":", "return", "errors" ]
Return a copy of the limit
def copy ( self ) : return Limit ( self . scan_limit , self . item_limit , self . min_scan_limit , self . strict , self . filter )
5,339
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L494-L497
[ "def", "_check_missing_manifests", "(", "self", ",", "segids", ")", ":", "manifest_paths", "=", "[", "self", ".", "_manifest_path", "(", "segid", ")", "for", "segid", "in", "segids", "]", "with", "Storage", "(", "self", ".", "vol", ".", "layer_cloudpath", ",", "progress", "=", "self", ".", "vol", ".", "progress", ")", "as", "stor", ":", "exists", "=", "stor", ".", "files_exist", "(", "manifest_paths", ")", "dne", "=", "[", "]", "for", "path", ",", "there", "in", "exists", ".", "items", "(", ")", ":", "if", "not", "there", ":", "(", "segid", ",", ")", "=", "re", ".", "search", "(", "r'(\\d+):0$'", ",", "path", ")", ".", "groups", "(", ")", "dne", ".", "append", "(", "segid", ")", "return", "dne" ]
Set the Limit parameter into the request args
def set_request_args ( self , args ) : if self . scan_limit is not None : args [ 'Limit' ] = self . scan_limit elif self . item_limit is not None : args [ 'Limit' ] = max ( self . item_limit , self . min_scan_limit ) else : args . pop ( 'Limit' , None )
5,340
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L499-L506
[ "def", "store_atlas_zonefile_data", "(", "zonefile_data", ",", "zonefile_dir", ",", "fsync", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "zonefile_dir", ")", ":", "os", ".", "makedirs", "(", "zonefile_dir", ",", "0700", ")", "zonefile_hash", "=", "get_zonefile_data_hash", "(", "zonefile_data", ")", "# only store to the latest supported directory", "zonefile_path", "=", "atlas_zonefile_path", "(", "zonefile_dir", ",", "zonefile_hash", ")", "zonefile_dir_path", "=", "os", ".", "path", ".", "dirname", "(", "zonefile_path", ")", "if", "os", ".", "path", ".", "exists", "(", "zonefile_path", ")", ":", "# already exists ", "return", "True", "if", "not", "os", ".", "path", ".", "exists", "(", "zonefile_dir_path", ")", ":", "os", ".", "makedirs", "(", "zonefile_dir_path", ")", "try", ":", "with", "open", "(", "zonefile_path", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "zonefile_data", ")", "f", ".", "flush", "(", ")", "if", "fsync", ":", "os", ".", "fsync", "(", "f", ".", "fileno", "(", ")", ")", "except", "Exception", ",", "e", ":", "log", ".", "exception", "(", "e", ")", "return", "False", "return", "True" ]
Return True if the limit has been reached
def complete ( self ) : if self . scan_limit is not None and self . scan_limit == 0 : return True if self . item_limit is not None and self . item_limit == 0 : return True return False
5,341
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L509-L515
[ "def", "_create_from_pandas_with_arrow", "(", "self", ",", "pdf", ",", "schema", ",", "timezone", ")", ":", "from", "pyspark", ".", "serializers", "import", "ArrowStreamPandasSerializer", "from", "pyspark", ".", "sql", ".", "types", "import", "from_arrow_type", ",", "to_arrow_type", ",", "TimestampType", "from", "pyspark", ".", "sql", ".", "utils", "import", "require_minimum_pandas_version", ",", "require_minimum_pyarrow_version", "require_minimum_pandas_version", "(", ")", "require_minimum_pyarrow_version", "(", ")", "from", "pandas", ".", "api", ".", "types", "import", "is_datetime64_dtype", ",", "is_datetime64tz_dtype", "import", "pyarrow", "as", "pa", "# Create the Spark schema from list of names passed in with Arrow types", "if", "isinstance", "(", "schema", ",", "(", "list", ",", "tuple", ")", ")", ":", "arrow_schema", "=", "pa", ".", "Schema", ".", "from_pandas", "(", "pdf", ",", "preserve_index", "=", "False", ")", "struct", "=", "StructType", "(", ")", "for", "name", ",", "field", "in", "zip", "(", "schema", ",", "arrow_schema", ")", ":", "struct", ".", "add", "(", "name", ",", "from_arrow_type", "(", "field", ".", "type", ")", ",", "nullable", "=", "field", ".", "nullable", ")", "schema", "=", "struct", "# Determine arrow types to coerce data when creating batches", "if", "isinstance", "(", "schema", ",", "StructType", ")", ":", "arrow_types", "=", "[", "to_arrow_type", "(", "f", ".", "dataType", ")", "for", "f", "in", "schema", ".", "fields", "]", "elif", "isinstance", "(", "schema", ",", "DataType", ")", ":", "raise", "ValueError", "(", "\"Single data type %s is not supported with Arrow\"", "%", "str", "(", "schema", ")", ")", "else", ":", "# Any timestamps must be coerced to be compatible with Spark", "arrow_types", "=", "[", "to_arrow_type", "(", "TimestampType", "(", ")", ")", "if", "is_datetime64_dtype", "(", "t", ")", "or", "is_datetime64tz_dtype", "(", "t", ")", "else", "None", "for", "t", "in", "pdf", ".", "dtypes", "]", "# Slice the DataFrame to be batched", "step", "=", "-", "(", "-", "len", "(", "pdf", ")", "//", "self", ".", "sparkContext", ".", "defaultParallelism", ")", "# round int up", "pdf_slices", "=", "(", "pdf", "[", "start", ":", "start", "+", "step", "]", "for", "start", "in", "xrange", "(", "0", ",", "len", "(", "pdf", ")", ",", "step", ")", ")", "# Create list of Arrow (columns, type) for serializer dump_stream", "arrow_data", "=", "[", "[", "(", "c", ",", "t", ")", "for", "(", "_", ",", "c", ")", ",", "t", "in", "zip", "(", "pdf_slice", ".", "iteritems", "(", ")", ",", "arrow_types", ")", "]", "for", "pdf_slice", "in", "pdf_slices", "]", "jsqlContext", "=", "self", ".", "_wrapped", ".", "_jsqlContext", "safecheck", "=", "self", ".", "_wrapped", ".", "_conf", ".", "arrowSafeTypeConversion", "(", ")", "col_by_name", "=", "True", "# col by name only applies to StructType columns, can't happen here", "ser", "=", "ArrowStreamPandasSerializer", "(", "timezone", ",", "safecheck", ",", "col_by_name", ")", "def", "reader_func", "(", "temp_filename", ")", ":", "return", "self", ".", "_jvm", ".", "PythonSQLUtils", ".", "readArrowStreamFromFile", "(", "jsqlContext", ",", "temp_filename", ")", "def", "create_RDD_server", "(", ")", ":", "return", "self", ".", "_jvm", ".", "ArrowRDDServer", "(", "jsqlContext", ")", "# Create Spark DataFrame from Arrow stream file, using one batch per partition", "jrdd", "=", "self", ".", "_sc", ".", "_serialize_to_jvm", "(", "arrow_data", ",", "ser", ",", "reader_func", ",", "create_RDD_server", ")", "jdf", "=", "self", ".", "_jvm", ".", "PythonSQLUtils", ".", "toDataFrame", "(", "jrdd", ",", "schema", ".", "json", "(", ")", ",", "jsqlContext", ")", "df", "=", "DataFrame", "(", "jdf", ",", "self", ".", "_wrapped", ")", "df", ".", "_schema", "=", "schema", "return", "df" ]
Apply the filter and item_limit and return True to accept
def accept ( self , item ) : accept = self . filter ( item ) if accept and self . item_limit is not None : if self . item_limit > 0 : self . item_limit -= 1 elif self . strict : return False return accept
5,342
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L522-L530
[ "def", "local_session", "(", "factory", ")", ":", "factory_region", "=", "getattr", "(", "factory", ",", "'region'", ",", "'global'", ")", "s", "=", "getattr", "(", "CONN_CACHE", ",", "factory_region", ",", "{", "}", ")", ".", "get", "(", "'session'", ")", "t", "=", "getattr", "(", "CONN_CACHE", ",", "factory_region", ",", "{", "}", ")", ".", "get", "(", "'time'", ")", "n", "=", "time", ".", "time", "(", ")", "if", "s", "is", "not", "None", "and", "t", "+", "(", "60", "*", "45", ")", ">", "n", ":", "return", "s", "s", "=", "factory", "(", ")", "setattr", "(", "CONN_CACHE", ",", "factory_region", ",", "{", "'session'", ":", "s", ",", "'time'", ":", "n", "}", ")", "return", "s" ]
Generate a random walk and return True if the walker has returned to the origin after taking n steps .
def returned ( n ) : ## `takei` yield lazily so we can short-circuit and avoid computing the rest of the walk for pos in randwalk ( ) >> drop ( 1 ) >> takei ( xrange ( n - 1 ) ) : if pos == Origin : return True return False
5,343
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/example/randwalk.py#L16-L24
[ "def", "compare_documents", "(", "self", ",", "file_1", ",", "file_2", ",", "file_1_content_type", "=", "None", ",", "file_2_content_type", "=", "None", ",", "file_1_label", "=", "None", ",", "file_2_label", "=", "None", ",", "model", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "file_1", "is", "None", ":", "raise", "ValueError", "(", "'file_1 must be provided'", ")", "if", "file_2", "is", "None", ":", "raise", "ValueError", "(", "'file_2 must be provided'", ")", "headers", "=", "{", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'compare-comply'", ",", "'V1'", ",", "'compare_documents'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'version'", ":", "self", ".", "version", ",", "'file_1_label'", ":", "file_1_label", ",", "'file_2_label'", ":", "file_2_label", ",", "'model'", ":", "model", "}", "form_data", "=", "{", "}", "form_data", "[", "'file_1'", "]", "=", "(", "None", ",", "file_1", ",", "file_1_content_type", "or", "'application/octet-stream'", ")", "form_data", "[", "'file_2'", "]", "=", "(", "None", ",", "file_2", ",", "file_2_content_type", "or", "'application/octet-stream'", ")", "url", "=", "'/v1/comparison'", "response", "=", "self", ".", "request", "(", "method", "=", "'POST'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "files", "=", "form_data", ",", "accept_json", "=", "True", ")", "return", "response" ]
Generate a random walk and return its length upto the moment that the walker first returns to the origin .
def first_return ( ) : walk = randwalk ( ) >> drop ( 1 ) >> takewhile ( lambda v : v != Origin ) >> list return len ( walk )
5,344
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/example/randwalk.py#L26-L36
[ "def", "classify_languages", "(", "self", ")", ":", "return", "BlobsWithLanguageDataFrame", "(", "self", ".", "_engine_dataframe", ".", "classifyLanguages", "(", ")", ",", "self", ".", "_session", ",", "self", ".", "_implicits", ")" ]
An arithmetic sequence generator . Works with any type with + defined .
def seq ( start = 0 , step = 1 ) : def seq ( a , d ) : while 1 : yield a a += d return seq ( start , step )
5,345
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L1194-L1204
[ "def", "filter_thumbnail_files", "(", "chan_path", ",", "filenames", ",", "metadata_provider", ")", ":", "thumbnail_files_to_skip", "=", "metadata_provider", ".", "get_thumbnail_paths", "(", ")", "filenames_cleaned", "=", "[", "]", "for", "filename", "in", "filenames", ":", "keep", "=", "True", "chan_filepath", "=", "os", ".", "path", ".", "join", "(", "chan_path", ",", "filename", ")", "chan_filepath_tuple", "=", "path_to_tuple", "(", "chan_filepath", ")", "if", "chan_filepath_tuple", "in", "thumbnail_files_to_skip", ":", "keep", "=", "False", "if", "keep", ":", "filenames_cleaned", ".", "append", "(", "filename", ")", "return", "filenames_cleaned" ]
Connect inpipe and outpipe . If outpipe is not a Stream instance it should be an function callable on an iterable .
def pipe ( inpipe , outpipe ) : if hasattr ( outpipe , '__pipe__' ) : return outpipe . __pipe__ ( inpipe ) elif hasattr ( outpipe , '__call__' ) : return outpipe ( inpipe ) else : raise BrokenPipe ( 'No connection mechanism defined' )
5,346
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L170-L179
[ "def", "_crates_cache", "(", ")", "->", "str", ":", "return", "os", ".", "environ", ".", "get", "(", "'XDG_CACHE_HOME'", ",", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.cache'", ",", "'cr8'", ",", "'crates'", ")", ")" ]
Return job ids assigned to the submitted items .
def submit ( self , * items ) : with self . lock : if self . closed : raise BrokenPipe ( 'Job submission has been closed.' ) id = self . jobcount self . _status += [ 'SUBMITTED' ] * len ( items ) self . jobcount += len ( items ) for item in items : self . waitqueue . put ( ( id , item ) ) id += 1 if len ( items ) == 1 : return id - 1 else : return range ( id - len ( items ) , id )
5,347
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L980-L994
[ "def", "clear_cached_endpoints", "(", "self", ",", "prefix", "=", "None", ")", ":", "prefix", "=", "prefix", "if", "prefix", "is", "not", "None", "else", "self", ".", "cache_prefix", "for", "endpoint", "in", "self", ".", "app", ".", "op", ".", "values", "(", ")", ":", "cache_key", "=", "'%s:app:%s'", "%", "(", "prefix", ",", "endpoint", ".", "url", ")", "self", ".", "cache", ".", "invalidate", "(", "cache_key", ")", "self", ".", "cache", ".", "invalidate", "(", "'%s:app:meta_swagger_url'", "%", "self", ".", "cache_prefix", ")", "self", ".", "app", "=", "None" ]
Try to cancel jobs with associated ids . Return the actual number of jobs cancelled .
def cancel ( self , * ids ) : ncancelled = 0 with self . lock : for id in ids : try : if self . _status [ id ] == 'SUBMITTED' : self . _status [ id ] = 'CANCELLED' ncancelled += 1 except IndexError : pass return ncancelled
5,348
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L996-L1010
[ "def", "set_cache", "(", "new_path", "=", "None", ")", ":", "global", "CACHE_DIR", ",", "API_CACHE", ",", "SPRITE_CACHE", "if", "new_path", "is", "None", ":", "new_path", "=", "get_default_cache", "(", ")", "CACHE_DIR", "=", "safe_make_dirs", "(", "os", ".", "path", ".", "abspath", "(", "new_path", ")", ")", "API_CACHE", "=", "os", ".", "path", ".", "join", "(", "CACHE_DIR", ",", "'api.cache'", ")", "SPRITE_CACHE", "=", "safe_make_dirs", "(", "os", ".", "path", ".", "join", "(", "CACHE_DIR", ",", "'sprite'", ")", ")", "return", "CACHE_DIR", ",", "API_CACHE", ",", "SPRITE_CACHE" ]
Shut down the Executor . Suspend all waiting jobs . Running workers will terminate after finishing their current job items . The call will block until all workers are terminated .
def shutdown ( self ) : with self . lock : self . pool . inqueue . put ( StopIteration ) # Stop the pool workers self . waitqueue . put ( StopIteration ) # Stop the input_feeder _iterqueue ( self . waitqueue ) >> item [ - 1 ] # Exhaust the waitqueue self . closed = True self . join ( )
5,349
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L1045-L1056
[ "def", "_check_registry_type", "(", "folder", "=", "None", ")", ":", "folder", "=", "_registry_folder", "(", "folder", ")", "default_file", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "'registry_type.txt'", ")", "try", ":", "with", "open", "(", "default_file", ",", "\"r\"", ")", "as", "infile", ":", "data", "=", "infile", ".", "read", "(", ")", "data", "=", "data", ".", "strip", "(", ")", "ComponentRegistry", ".", "SetBackingStore", "(", "data", ")", "except", "IOError", ":", "pass" ]
monica helps you order food from the timeline
def main ( ) : arguments = docopt ( __doc__ , version = __version__ ) if arguments [ 'configure' ] and flag : configure ( ) if arguments [ 'cuisine' ] : if arguments [ 'list' ] : cuisine ( 'list' ) else : cuisine ( arguments [ '<cuisine-id>' ] ) elif arguments [ 'surprise' ] : surprise ( ) elif arguments [ 'reviews' ] : reviews ( arguments [ '<restaurant-id>' ] ) elif arguments [ 'search' ] : search ( arguments [ 'QUERY' ] ) elif arguments [ 'budget' ] : try : money = arguments [ '<budget>' ] money = float ( money ) budget ( money ) except : print 'Budget should be a number!' elif arguments [ 'restaurant' ] : restaurant ( arguments [ '<restaurant-id>' ] ) else : print ( __doc__ )
5,350
https://github.com/Zephrys/monica/blob/ff0bc7df18d86ad66af6c655cdd292ddceb84fd7/monica/monica.py#L214-L241
[ "def", "process", "(", "self", ",", "metric", ")", ":", "for", "rmq_server", "in", "self", ".", "connections", ".", "keys", "(", ")", ":", "try", ":", "if", "(", "(", "self", ".", "connections", "[", "rmq_server", "]", "is", "None", "or", "self", ".", "connections", "[", "rmq_server", "]", ".", "is_open", "is", "False", ")", ")", ":", "self", ".", "_bind", "(", "rmq_server", ")", "channel", "=", "self", ".", "channels", "[", "rmq_server", "]", "channel", ".", "basic_publish", "(", "exchange", "=", "self", ".", "rmq_exchange", ",", "routing_key", "=", "''", ",", "body", "=", "\"%s\"", "%", "metric", ")", "except", "Exception", "as", "exception", ":", "self", ".", "log", ".", "error", "(", "\"Failed publishing to %s, attempting reconnect\"", ",", "rmq_server", ")", "self", ".", "log", ".", "debug", "(", "\"Caught exception: %s\"", ",", "exception", ")", "self", ".", "_unbind", "(", "rmq_server", ")", "self", ".", "_bind", "(", "rmq_server", ")" ]
Create a list of requirements from the output of the pip freeze command saved in a text file .
def _get_requirements ( fname ) : packages = _read ( fname ) . split ( '\n' ) packages = ( p . strip ( ) for p in packages ) packages = ( p for p in packages if p and not p . startswith ( '#' ) ) return list ( packages )
5,351
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/setup.py#L41-L49
[ "def", "ng_dissim", "(", "a", ",", "b", ",", "X", "=", "None", ",", "membship", "=", "None", ")", ":", "# Without membership, revert to matching dissimilarity", "if", "membship", "is", "None", ":", "return", "matching_dissim", "(", "a", ",", "b", ")", "def", "calc_cjr", "(", "b", ",", "X", ",", "memj", ",", "idr", ")", ":", "\"\"\"Num objects w/ category value x_{i,r} for rth attr in jth cluster\"\"\"", "xcids", "=", "np", ".", "where", "(", "memj", "==", "1", ")", "return", "float", "(", "(", "np", ".", "take", "(", "X", ",", "xcids", ",", "axis", "=", "0", ")", "[", "0", "]", "[", ":", ",", "idr", "]", "==", "b", "[", "idr", "]", ")", ".", "sum", "(", "0", ")", ")", "def", "calc_dissim", "(", "b", ",", "X", ",", "memj", ",", "idr", ")", ":", "# Size of jth cluster", "cj", "=", "float", "(", "np", ".", "sum", "(", "memj", ")", ")", "return", "(", "1.0", "-", "(", "calc_cjr", "(", "b", ",", "X", ",", "memj", ",", "idr", ")", "/", "cj", ")", ")", "if", "cj", "!=", "0.0", "else", "0.0", "if", "len", "(", "membship", ")", "!=", "a", ".", "shape", "[", "0", "]", "and", "len", "(", "membship", "[", "0", "]", ")", "!=", "X", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"'membship' must be a rectangular array where \"", "\"the number of rows in 'membship' equals the \"", "\"number of rows in 'a' and the number of \"", "\"columns in 'membship' equals the number of rows in 'X'.\"", ")", "return", "np", ".", "array", "(", "[", "np", ".", "array", "(", "[", "calc_dissim", "(", "b", ",", "X", ",", "membship", "[", "idj", "]", ",", "idr", ")", "if", "b", "[", "idr", "]", "==", "t", "else", "1.0", "for", "idr", ",", "t", "in", "enumerate", "(", "val_a", ")", "]", ")", ".", "sum", "(", "0", ")", "for", "idj", ",", "val_a", "in", "enumerate", "(", "a", ")", "]", ")" ]
Adds links all placeholders plugins except django - terms plugins
def TermsProcessor ( instance , placeholder , rendered_content , original_context ) : if 'terms' in original_context : return rendered_content return mark_safe ( replace_terms ( rendered_content ) )
5,352
https://github.com/BertrandBordage/django-terms/blob/2555c2cf5abf14adef9a8e2dd22c4a9076396a10/terms/cms_plugin_processors.py#L7-L14
[ "def", "split", "(", "self", ",", "verbose", "=", "None", ",", "end_in_new_line", "=", "None", ")", ":", "elapsed_time", "=", "self", ".", "get_elapsed_time", "(", ")", "self", ".", "split_elapsed_time", ".", "append", "(", "elapsed_time", ")", "self", ".", "_cumulative_elapsed_time", "+=", "elapsed_time", "self", ".", "_elapsed_time", "=", "datetime", ".", "timedelta", "(", ")", "if", "verbose", "is", "None", ":", "verbose", "=", "self", ".", "verbose_end", "if", "verbose", ":", "if", "end_in_new_line", "is", "None", ":", "end_in_new_line", "=", "self", ".", "end_in_new_line", "if", "end_in_new_line", ":", "self", ".", "log", "(", "\"{} done in {}\"", ".", "format", "(", "self", ".", "description", ",", "elapsed_time", ")", ")", "else", ":", "self", ".", "log", "(", "\" done in {}\"", ".", "format", "(", "elapsed_time", ")", ")", "self", ".", "_start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")" ]
Valid time steps for this service as a list of datetime objects .
def time_stops ( self ) : if not self . supports_time : return [ ] if self . service . calendar == 'standard' : units = self . service . time_interval_units interval = self . service . time_interval steps = [ self . time_start ] if units in ( 'years' , 'decades' , 'centuries' ) : if units == 'years' : years = interval elif units == 'decades' : years = 10 * interval else : years = 100 * interval next_value = lambda x : x . replace ( year = x . year + years ) elif units == 'months' : def _fn ( x ) : year = x . year + ( x . month + interval - 1 ) // 12 month = ( x . month + interval ) % 12 or 12 day = min ( x . day , calendar . monthrange ( year , month ) [ 1 ] ) return x . replace ( year = year , month = month , day = day ) next_value = _fn else : if units == 'milliseconds' : delta = timedelta ( milliseconds = interval ) elif units == 'seconds' : delta = timedelta ( seconds = interval ) elif units == 'minutes' : delta = timedelta ( minutes = interval ) elif units == 'hours' : delta = timedelta ( hours = interval ) elif units == 'days' : delta = timedelta ( days = interval ) elif units == 'weeks' : delta = timedelta ( weeks = interval ) else : raise ValidationError ( "Service has an invalid time_interval_units: {}" . format ( self . service . time_interval_units ) ) next_value = lambda x : x + delta while steps [ - 1 ] < self . time_end : value = next_value ( steps [ - 1 ] ) if value > self . time_end : break steps . append ( value ) return steps else : # TODO raise NotImplementedError
5,353
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/models.py#L98-L155
[ "def", "unshare", "(", "flags", ")", ":", "res", "=", "lib", ".", "unshare", "(", "flags", ")", "if", "res", "!=", "0", ":", "_check_error", "(", "ffi", ".", "errno", ")" ]
Parse target coordinates in various ways ...
def _parse_coords ( self , opts ) : # The coordinates are mutually exclusive, so # shouldn't have to worry about over-writing them. if 'coords' in vars ( opts ) : return radius = vars ( opts ) . get ( 'radius' , 0 ) gal = None if vars ( opts ) . get ( 'gal' ) is not None : gal = opts . gal elif vars ( opts ) . get ( 'cel' ) is not None : gal = cel2gal ( * opts . cel ) elif vars ( opts ) . get ( 'hpx' ) is not None : gal = pix2ang ( * opts . hpx ) if gal is not None : opts . coords = [ ( gal [ 0 ] , gal [ 1 ] , radius ) ] opts . names = [ vars ( opts ) . get ( 'name' , '' ) ] else : opts . coords = None opts . names = None if vars ( opts ) . get ( 'targets' ) is not None : opts . names , opts . coords = self . parse_targets ( opts . targets ) if vars ( opts ) . get ( 'radius' ) is not None : opts . coords [ 'radius' ] = vars ( opts ) . get ( 'radius' )
5,354
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/parser.py#L87-L112
[ "def", "remove_armor", "(", "armored_data", ")", ":", "stream", "=", "io", ".", "BytesIO", "(", "armored_data", ")", "lines", "=", "stream", ".", "readlines", "(", ")", "[", "3", ":", "-", "1", "]", "data", "=", "base64", ".", "b64decode", "(", "b''", ".", "join", "(", "lines", ")", ")", "payload", ",", "checksum", "=", "data", "[", ":", "-", "3", "]", ",", "data", "[", "-", "3", ":", "]", "assert", "util", ".", "crc24", "(", "payload", ")", "==", "checksum", "return", "payload" ]
Property to return the default value .
def default_value ( self ) : if callable ( self . default ) and self . call_default : return self . default ( ) return self . default
5,355
https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L446-L458
[ "def", "draw", "(", "self", ",", "milliseconds", ",", "surface", ")", ":", "self", ".", "drawn_rects", "=", "[", "]", "cam", "=", "Ragnarok", ".", "get_world", "(", ")", ".", "Camera", "cX", ",", "cY", ",", "cXMax", ",", "cYMax", "=", "cam", ".", "get_cam_bounds", "(", ")", "#Draw out only the tiles visible to the camera.", "start_pos", "=", "self", ".", "pixels_to_tiles", "(", "(", "cX", ",", "cY", ")", ")", "start_pos", "-=", "Vector2", "(", "1", ",", "1", ")", "end_pos", "=", "self", ".", "pixels_to_tiles", "(", "(", "cXMax", ",", "cYMax", ")", ")", "end_pos", "+=", "Vector2", "(", "1", ",", "1", ")", "start_pos", ".", "X", ",", "start_pos", ".", "Y", "=", "self", ".", "clamp_within_range", "(", "start_pos", ".", "X", ",", "start_pos", ".", "Y", ")", "end_pos", ".", "X", ",", "end_pos", ".", "Y", "=", "self", ".", "clamp_within_range", "(", "end_pos", ".", "X", ",", "end_pos", ".", "Y", ")", "cam_pos", "=", "cam", ".", "get_world_pos", "(", ")", "for", "x", "in", "range", "(", "start_pos", ".", "X", ",", "end_pos", ".", "X", "+", "1", ")", ":", "for", "y", "in", "range", "(", "start_pos", ".", "Y", ",", "end_pos", ".", "Y", "+", "1", ")", ":", "tile", "=", "self", ".", "tiles", "[", "y", "]", "[", "x", "]", "translate_posX", "=", "tile", ".", "coords", ".", "X", "-", "cam_pos", ".", "X", "translate_posY", "=", "tile", ".", "coords", ".", "Y", "-", "cam_pos", ".", "Y", "surface", ".", "blit", "(", "self", ".", "spritesheet", ".", "image", ",", "(", "translate_posX", ",", "translate_posY", ")", ",", "tile", ".", "source", ",", "special_flags", "=", "0", ")" ]
Property to return the variable defined in django . conf . settings .
def raw_value ( self ) : if self . parent_setting is not None : return self . parent_setting . raw_value [ self . full_name ] else : return getattr ( settings , self . full_name )
5,356
https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L461-L475
[ "def", "get_max_events_in_both_arrays", "(", "events_one", ",", "events_two", ")", ":", "events_one", "=", "np", ".", "ascontiguousarray", "(", "events_one", ")", "# change memory alignement for c++ library", "events_two", "=", "np", ".", "ascontiguousarray", "(", "events_two", ")", "# change memory alignement for c++ library", "event_result", "=", "np", ".", "empty", "(", "shape", "=", "(", "events_one", ".", "shape", "[", "0", "]", "+", "events_two", ".", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "events_one", ".", "dtype", ")", "count", "=", "analysis_functions", ".", "get_max_events_in_both_arrays", "(", "events_one", ",", "events_two", ",", "event_result", ")", "return", "event_result", "[", ":", "count", "]" ]
Return the transformed raw or default value .
def get_value ( self ) : try : value = self . raw_value except ( AttributeError , KeyError ) as err : self . _reraise_if_required ( err ) default_value = self . default_value if self . transform_default : return self . transform ( default_value ) return default_value else : return self . transform ( value )
5,357
https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L489-L509
[ "def", "_init_libcrypto", "(", ")", ":", "libcrypto", "=", "_load_libcrypto", "(", ")", "try", ":", "libcrypto", ".", "OPENSSL_init_crypto", "(", ")", "except", "AttributeError", ":", "# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)", "libcrypto", ".", "OPENSSL_no_config", "(", ")", "libcrypto", ".", "OPENSSL_add_all_algorithms_noconf", "(", ")", "libcrypto", ".", "RSA_new", ".", "argtypes", "=", "(", ")", "libcrypto", ".", "RSA_new", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "RSA_size", ".", "argtype", "=", "(", "c_void_p", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "argtypes", "=", "(", "c_char_p", ",", "c_int", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "restype", "=", "c_void_p", "libcrypto", ".", "BIO_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "restype", "=", "c_void_p", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_private_encrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "libcrypto", ".", "RSA_public_decrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "return", "libcrypto" ]
Run the validators on the setting value .
def run_validators ( self , value ) : errors = [ ] for validator in self . validators : try : validator ( value ) except ValidationError as error : errors . extend ( error . messages ) if errors : raise ValidationError ( errors )
5,358
https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L522-L531
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "ex", ")", "logger", ".", "warning", "(", "\"Unable to read wav with memmory mapping. Trying without now.\"", ")", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "False", ")", "self", ".", "_array", "=", "data", "self", ".", "attributes", "[", "'rate'", "]", "=", "rate" ]
Transform a path into an actual Python object .
def transform ( self , path ) : if path is None or not path : return None obj_parent_modules = path . split ( "." ) objects = [ obj_parent_modules . pop ( - 1 ) ] while True : try : parent_module_path = "." . join ( obj_parent_modules ) parent_module = importlib . import_module ( parent_module_path ) break except ImportError : if len ( obj_parent_modules ) == 1 : raise ImportError ( "No module named '%s'" % obj_parent_modules [ 0 ] ) objects . insert ( 0 , obj_parent_modules . pop ( - 1 ) ) current_object = parent_module for obj in objects : current_object = getattr ( current_object , obj ) return current_object
5,359
https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L1085-L1120
[ "def", "find_video_file", "(", "rtdc_dataset", ")", ":", "video", "=", "None", "if", "rtdc_dataset", ".", "_fdir", ".", "exists", "(", ")", ":", "# Cell images (video)", "videos", "=", "[", "v", ".", "name", "for", "v", "in", "rtdc_dataset", ".", "_fdir", ".", "rglob", "(", "\"*.avi\"", ")", "]", "# Filter videos according to measurement number", "meas_id", "=", "rtdc_dataset", ".", "_mid", "videos", "=", "[", "v", "for", "v", "in", "videos", "if", "v", ".", "split", "(", "\"_\"", ")", "[", "0", "]", "==", "meas_id", "]", "videos", ".", "sort", "(", ")", "if", "len", "(", "videos", ")", "!=", "0", ":", "# Defaults to first avi file", "video", "=", "videos", "[", "0", "]", "# g/q video file names. q comes first.", "for", "v", "in", "videos", ":", "if", "v", ".", "endswith", "(", "\"imag.avi\"", ")", ":", "video", "=", "v", "break", "# add this here, because fRT-DC measurements also contain", "# videos ..._proc.avi", "elif", "v", ".", "endswith", "(", "\"imaq.avi\"", ")", ":", "video", "=", "v", "break", "if", "video", "is", "None", ":", "return", "None", "else", ":", "return", "rtdc_dataset", ".", "_fdir", "/", "video" ]
Return dictionary with values of subsettings .
def get_value ( self ) : try : self . raw_value except ( AttributeError , KeyError ) as err : self . _reraise_if_required ( err ) default_value = self . default_value if self . transform_default : return self . transform ( default_value ) return default_value else : # If setting is defined, load values of all subsettings. value = { } for key , subsetting in self . settings . items ( ) : value [ key ] = subsetting . get_value ( ) return value
5,360
https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L1154-L1174
[ "def", "to_fastq_apipe_cl", "(", "sdf_file", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "cmd", "=", "[", "\"rtg\"", ",", "\"sdf2fastq\"", ",", "\"--no-gzip\"", ",", "\"-o\"", ",", "\"-\"", "]", "if", "start", "is", "not", "None", ":", "cmd", "+=", "[", "\"--start-id=%s\"", "%", "start", "]", "if", "end", "is", "not", "None", ":", "cmd", "+=", "[", "\"--end-id=%s\"", "%", "end", "]", "if", "is_paired", "(", "sdf_file", ")", ":", "out", "=", "[", "]", "for", "ext", "in", "[", "\"left\"", ",", "\"right\"", "]", ":", "out", ".", "append", "(", "\"<(%s)\"", "%", "_rtg_cmd", "(", "cmd", "+", "[", "\"-i\"", ",", "os", ".", "path", ".", "join", "(", "sdf_file", ",", "ext", ")", "]", ")", ")", "return", "out", "else", ":", "cmd", "+=", "[", "\"-i\"", ",", "sdf_file", "]", "return", "[", "\"<(%s)\"", "%", "_rtg_cmd", "(", "cmd", ")", ",", "None", "]" ]
Sum an array of magnitudes in flux space .
def sum_mags ( mags , weights = None ) : flux = 10 ** ( - np . asarray ( mags ) / 2.5 ) if weights is None : return - 2.5 * np . log10 ( np . sum ( flux ) ) else : return - 2.5 * np . log10 ( np . sum ( weights * flux ) )
5,361
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L57-L74
[ "def", "get_rsa_key", "(", "self", ",", "username", ")", ":", "try", ":", "resp", "=", "self", ".", "session", ".", "post", "(", "'https://steamcommunity.com/login/getrsakey/'", ",", "timeout", "=", "15", ",", "data", "=", "{", "'username'", ":", "username", ",", "'donotchache'", ":", "int", "(", "time", "(", ")", "*", "1000", ")", ",", "}", ",", ")", ".", "json", "(", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "raise", "HTTPError", "(", "str", "(", "e", ")", ")", "return", "resp" ]
Calculate the absolute magnitude from a set of bands
def absolute_magnitude ( distance_modulus , g , r , prob = None ) : V = g - 0.487 * ( g - r ) - 0.0249 flux = np . sum ( 10 ** ( - ( V - distance_modulus ) / 2.5 ) ) Mv = - 2.5 * np . log10 ( flux ) return Mv
5,362
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L1208-L1214
[ "def", "get_web_session_cookies", "(", "self", ")", ":", "if", "not", "self", ".", "logged_on", ":", "return", "None", "resp", "=", "self", ".", "send_job_and_wait", "(", "MsgProto", "(", "EMsg", ".", "ClientRequestWebAPIAuthenticateUserNonce", ")", ",", "timeout", "=", "7", ")", "if", "resp", "is", "None", ":", "return", "None", "skey", ",", "ekey", "=", "generate_session_key", "(", ")", "data", "=", "{", "'steamid'", ":", "self", ".", "steam_id", ",", "'sessionkey'", ":", "ekey", ",", "'encrypted_loginkey'", ":", "symmetric_encrypt", "(", "resp", ".", "webapi_authenticate_user_nonce", ".", "encode", "(", "'ascii'", ")", ",", "skey", ")", ",", "}", "try", ":", "resp", "=", "webapi", ".", "post", "(", "'ISteamUserAuth'", ",", "'AuthenticateUser'", ",", "1", ",", "params", "=", "data", ")", "except", "Exception", "as", "exp", ":", "self", ".", "_LOG", ".", "debug", "(", "\"get_web_session_cookies error: %s\"", "%", "str", "(", "exp", ")", ")", "return", "None", "return", "{", "'steamLogin'", ":", "resp", "[", "'authenticateuser'", "]", "[", "'token'", "]", ",", "'steamLoginSecure'", ":", "resp", "[", "'authenticateuser'", "]", "[", "'tokensecure'", "]", ",", "}" ]
Compute observable fraction of stars with masses greater than mass_min in each pixel in the interior region of the mask . Incorporates simplistic photometric errors .
def observableFractionCDF ( self , mask , distance_modulus , mass_min = 0.1 ) : method = 'step' mass_init , mass_pdf , mass_act , mag_1 , mag_2 = self . sample ( mass_min = mass_min , full_data_range = False ) mag_1 = mag_1 + distance_modulus mag_2 = mag_2 + distance_modulus mask_1 , mask_2 = mask . mask_roi_unique . T mag_err_1 = mask . photo_err_1 ( mask_1 [ : , np . newaxis ] - mag_1 ) mag_err_2 = mask . photo_err_2 ( mask_2 [ : , np . newaxis ] - mag_2 ) # "upper" bound set by maglim delta_hi_1 = ( mask_1 [ : , np . newaxis ] - mag_1 ) / mag_err_1 delta_hi_2 = ( mask_2 [ : , np . newaxis ] - mag_2 ) / mag_err_2 # "lower" bound set by bins_mag (maglim shouldn't be 0) delta_lo_1 = ( mask . roi . bins_mag [ 0 ] - mag_1 ) / mag_err_1 delta_lo_2 = ( mask . roi . bins_mag [ 0 ] - mag_2 ) / mag_err_2 cdf_1 = norm_cdf ( delta_hi_1 ) - norm_cdf ( delta_lo_1 ) cdf_2 = norm_cdf ( delta_hi_2 ) - norm_cdf ( delta_lo_2 ) cdf = cdf_1 * cdf_2 if method is None or method == 'none' : comp_cdf = cdf elif self . band_1_detection == True : comp = mask . mask_1 . completeness ( mag_1 , method = method ) comp_cdf = comp * cdf elif self . band_1_detection == False : comp = mask . mask_2 . completeness ( mag_2 , method = method ) comp_cdf = comp * cdf else : comp_1 = mask . mask_1 . completeness ( mag_1 , method = method ) comp_2 = mask . mask_2 . completeness ( mag_2 , method = method ) comp_cdf = comp_1 * comp_2 * cdf observable_fraction = ( mass_pdf [ np . newaxis ] * comp_cdf ) . sum ( axis = - 1 ) return observable_fraction [ mask . mask_roi_digi [ mask . roi . pixel_interior_cut ] ]
5,363
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L544-L595
[ "def", "pp_event", "(", "seq", ")", ":", "if", "isinstance", "(", "seq", ",", "Event", ")", ":", "return", "str", "(", "seq", ")", "# Get the original sequence back if seq is a pretty name already", "rev_curses", "=", "dict", "(", "(", "v", ",", "k", ")", "for", "k", ",", "v", "in", "CURSES_NAMES", ".", "items", "(", ")", ")", "rev_curtsies", "=", "dict", "(", "(", "v", ",", "k", ")", "for", "k", ",", "v", "in", "CURTSIES_NAMES", ".", "items", "(", ")", ")", "if", "seq", "in", "rev_curses", ":", "seq", "=", "rev_curses", "[", "seq", "]", "elif", "seq", "in", "rev_curtsies", ":", "seq", "=", "rev_curtsies", "[", "seq", "]", "pretty", "=", "curtsies_name", "(", "seq", ")", "if", "pretty", "!=", "seq", ":", "return", "pretty", "return", "repr", "(", "seq", ")", ".", "lstrip", "(", "'u'", ")", "[", "1", ":", "-", "1", "]" ]
Return a 2D histogram the isochrone in mag - mag space .
def histogram2d ( self , distance_modulus = None , delta_mag = 0.03 , steps = 10000 ) : if distance_modulus is not None : self . distance_modulus = distance_modulus # Isochrone will be binned, so might as well sample lots of points mass_init , mass_pdf , mass_act , mag_1 , mag_2 = self . sample ( mass_steps = steps ) #logger.warning("Fudging intrinisic dispersion in isochrone.") #mag_1 += np.random.normal(scale=0.02,size=len(mag_1)) #mag_2 += np.random.normal(scale=0.02,size=len(mag_2)) # We cast to np.float32 to save memory bins_mag_1 = np . arange ( self . mod + mag_1 . min ( ) - ( 0.5 * delta_mag ) , self . mod + mag_1 . max ( ) + ( 0.5 * delta_mag ) , delta_mag ) . astype ( np . float32 ) bins_mag_2 = np . arange ( self . mod + mag_2 . min ( ) - ( 0.5 * delta_mag ) , self . mod + mag_2 . max ( ) + ( 0.5 * delta_mag ) , delta_mag ) . astype ( np . float32 ) # ADW: Completeness needs to go in mass_pdf here... isochrone_pdf = np . histogram2d ( self . mod + mag_1 , self . mod + mag_2 , bins = [ bins_mag_1 , bins_mag_2 ] , weights = mass_pdf ) [ 0 ] . astype ( np . float32 ) return isochrone_pdf , bins_mag_1 , bins_mag_2
5,364
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L746-L786
[ "def", "devices", "(", "self", ",", "timeout", "=", "None", ")", ":", "# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw", "# from Android system/core/adb/transport.c statename()", "re_device_info", "=", "re", ".", "compile", "(", "r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'", ")", "devices", "=", "[", "]", "lines", "=", "self", ".", "command_output", "(", "[", "\"devices\"", ",", "\"-l\"", "]", ",", "timeout", "=", "timeout", ")", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "if", "line", "==", "'List of devices attached '", ":", "continue", "match", "=", "re_device_info", ".", "match", "(", "line", ")", "if", "match", ":", "device", "=", "{", "'device_serial'", ":", "match", ".", "group", "(", "1", ")", ",", "'state'", ":", "match", ".", "group", "(", "2", ")", "}", "remainder", "=", "line", "[", "match", ".", "end", "(", "2", ")", ":", "]", ".", "strip", "(", ")", "if", "remainder", ":", "try", ":", "device", ".", "update", "(", "dict", "(", "[", "j", ".", "split", "(", "':'", ")", "for", "j", "in", "remainder", ".", "split", "(", "' '", ")", "]", ")", ")", "except", "ValueError", ":", "self", ".", "_logger", ".", "warning", "(", "'devices: Unable to parse '", "'remainder for device %s'", "%", "line", ")", "devices", ".", "append", "(", "device", ")", "return", "devices" ]
Ok now here comes the beauty of having the signal MMD .
def pdf_mmd ( self , lon , lat , mag_1 , mag_2 , distance_modulus , mask , delta_mag = 0.03 , steps = 1000 ) : logger . info ( 'Running MMD pdf' ) roi = mask . roi mmd = self . signalMMD ( mask , distance_modulus , delta_mag = delta_mag , mass_steps = steps ) # This is fragile, store this information somewhere else... nedges = np . rint ( ( roi . bins_mag [ - 1 ] - roi . bins_mag [ 0 ] ) / delta_mag ) + 1 edges_mag , delta_mag = np . linspace ( roi . bins_mag [ 0 ] , roi . bins_mag [ - 1 ] , nedges , retstep = True ) idx_mag_1 = np . searchsorted ( edges_mag , mag_1 ) idx_mag_2 = np . searchsorted ( edges_mag , mag_2 ) if np . any ( idx_mag_1 > nedges ) or np . any ( idx_mag_1 == 0 ) : msg = "Magnitude out of range..." raise Exception ( msg ) if np . any ( idx_mag_2 > nedges ) or np . any ( idx_mag_2 == 0 ) : msg = "Magnitude out of range..." raise Exception ( msg ) idx = mask . roi . indexROI ( lon , lat ) u_color = mmd [ ( mask . mask_roi_digi [ idx ] , idx_mag_1 , idx_mag_2 ) ] # Remove the bin size to convert the pdf to units of mag^-2 u_color /= delta_mag ** 2 return u_color
5,365
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L788-L817
[ "def", "get_buckets", "(", "min_length", ",", "max_length", ",", "bucket_count", ")", ":", "if", "bucket_count", "<=", "0", ":", "return", "[", "max_length", "]", "unit_length", "=", "int", "(", "(", "max_length", "-", "min_length", ")", "//", "(", "bucket_count", ")", ")", "buckets", "=", "[", "min_length", "+", "unit_length", "*", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "0", ",", "bucket_count", ")", "]", "buckets", "[", "-", "1", "]", "=", "max_length", "return", "buckets" ]
Calculate the separation in magnitude - magnitude space between points and isochrone . Uses a dense sampling of the isochrone and calculates the metric distance from any isochrone sample point .
def raw_separation ( self , mag_1 , mag_2 , steps = 10000 ) : # http://stackoverflow.com/q/12653120/ mag_1 = np . array ( mag_1 , copy = False , ndmin = 1 ) mag_2 = np . array ( mag_2 , copy = False , ndmin = 1 ) init , pdf , act , iso_mag_1 , iso_mag_2 = self . sample ( mass_steps = steps ) iso_mag_1 += self . distance_modulus iso_mag_2 += self . distance_modulus iso_cut = ( iso_mag_1 < np . max ( mag_1 ) ) & ( iso_mag_1 > np . min ( mag_1 ) ) | ( iso_mag_2 < np . max ( mag_2 ) ) & ( iso_mag_2 > np . min ( mag_2 ) ) iso_mag_1 = iso_mag_1 [ iso_cut ] iso_mag_2 = iso_mag_2 [ iso_cut ] dist_mag_1 = mag_1 [ : , np . newaxis ] - iso_mag_1 dist_mag_2 = mag_2 [ : , np . newaxis ] - iso_mag_2 return np . min ( np . sqrt ( dist_mag_1 ** 2 + dist_mag_2 ** 2 ) , axis = 1 )
5,366
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L929-L960
[ "def", "get_file", "(", "cls", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "CONFIG", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "with", "codecs", ".", "open", "(", "filename", ",", "encoding", "=", "'utf-8'", ")", "as", "fp", ":", "config", ".", "readfp", "(", "fp", ")", "return", "cls", ".", "_parse_rtv_file", "(", "config", ")" ]
Calculate the separation between a specific point and the isochrone in magnitude - magnitude space . Uses an interpolation
def separation ( self , mag_1 , mag_2 ) : iso_mag_1 = self . mag_1 + self . distance_modulus iso_mag_2 = self . mag_2 + self . distance_modulus def interp_iso ( iso_mag_1 , iso_mag_2 , mag_1 , mag_2 ) : interp_1 = scipy . interpolate . interp1d ( iso_mag_1 , iso_mag_2 , bounds_error = False ) interp_2 = scipy . interpolate . interp1d ( iso_mag_2 , iso_mag_1 , bounds_error = False ) dy = interp_1 ( mag_1 ) - mag_2 dx = interp_2 ( mag_2 ) - mag_1 dmag_1 = np . fabs ( dx * dy ) / ( dx ** 2 + dy ** 2 ) * dy dmag_2 = np . fabs ( dx * dy ) / ( dx ** 2 + dy ** 2 ) * dx return dmag_1 , dmag_2 # Separate the various stellar evolution stages if np . issubdtype ( self . stage . dtype , np . number ) : sel = ( self . stage < self . hb_stage ) else : sel = ( self . stage != self . hb_stage ) # First do the MS/RGB rgb_mag_1 = iso_mag_1 [ sel ] rgb_mag_2 = iso_mag_2 [ sel ] dmag_1 , dmag_2 = interp_iso ( rgb_mag_1 , rgb_mag_2 , mag_1 , mag_2 ) # Then do the HB (if it exists) if not np . all ( sel ) : hb_mag_1 = iso_mag_1 [ ~ sel ] hb_mag_2 = iso_mag_2 [ ~ sel ] hb_dmag_1 , hb_dmag_2 = interp_iso ( hb_mag_1 , hb_mag_2 , mag_1 , mag_2 ) dmag_1 = np . nanmin ( [ dmag_1 , hb_dmag_1 ] , axis = 0 ) dmag_2 = np . nanmin ( [ dmag_2 , hb_dmag_2 ] , axis = 0 ) #return dmag_1,dmag_2 return np . sqrt ( dmag_1 ** 2 + dmag_2 ** 2 )
5,367
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L963-L1017
[ "def", "load_config", "(", "logdir", ")", ":", "# pylint: disable=missing-raises-doc", "config_path", "=", "logdir", "and", "os", ".", "path", ".", "join", "(", "logdir", ",", "'config.yaml'", ")", "if", "not", "config_path", "or", "not", "tf", ".", "gfile", ".", "Exists", "(", "config_path", ")", ":", "message", "=", "(", "'Cannot resume an existing run since the logging directory does not '", "'contain a configuration file.'", ")", "raise", "IOError", "(", "message", ")", "with", "tf", ".", "gfile", ".", "FastGFile", "(", "config_path", ",", "'r'", ")", "as", "file_", ":", "config", "=", "yaml", ".", "load", "(", "file_", ",", "Loader", "=", "yaml", ".", "Loader", ")", "message", "=", "'Resume run and write summaries and checkpoints to {}.'", "tf", ".", "logging", ".", "info", "(", "message", ".", "format", "(", "config", ".", "logdir", ")", ")", "return", "config" ]
Get the handler function for a given operation .
def get_handler ( self , operation_id ) : handler = ( self . handlers . get ( operation_id ) or self . handlers . get ( snake_case ( operation_id ) ) ) if handler : return handler raise MissingHandler ( 'Missing handler for operation %s (tried %s too)' % ( operation_id , snake_case ( operation_id ) ) )
5,368
https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/router.py#L105-L126
[ "def", "search", "(", "*", "*", "kw", ")", ":", "portal", "=", "get_portal", "(", ")", "catalog", "=", "ICatalog", "(", "portal", ")", "catalog_query", "=", "ICatalogQuery", "(", "catalog", ")", "query", "=", "catalog_query", ".", "make_query", "(", "*", "*", "kw", ")", "return", "catalog", "(", "query", ")" ]
Add handler functions from the given namespace for instance a module .
def add_handlers ( self , namespace ) : if isinstance ( namespace , str ) : namespace = import_module ( namespace ) if isinstance ( namespace , dict ) : namespace = namespace . items ( ) else : namespace = vars ( namespace ) . items ( ) for name , value in namespace : if name . startswith ( '_' ) : continue if isfunction ( value ) or ismethod ( value ) : self . handlers [ name ] = value
5,369
https://github.com/akx/lepo/blob/34cfb24a40f18ea40f672c1ea9a0734ee1816b7d/lepo/router.py#L128-L152
[ "def", "console_set_custom_font", "(", "fontFile", ":", "AnyStr", ",", "flags", ":", "int", "=", "FONT_LAYOUT_ASCII_INCOL", ",", "nb_char_horiz", ":", "int", "=", "0", ",", "nb_char_vertic", ":", "int", "=", "0", ",", ")", "->", "None", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "fontFile", ")", ":", "raise", "RuntimeError", "(", "\"File not found:\\n\\t%s\"", "%", "(", "os", ".", "path", ".", "realpath", "(", "fontFile", ")", ",", ")", ")", "lib", ".", "TCOD_console_set_custom_font", "(", "_bytes", "(", "fontFile", ")", ",", "flags", ",", "nb_char_horiz", ",", "nb_char_vertic", ")" ]
Returns a context dictionary for use in evaluating the expression .
def get_context ( self , arr , expr , context ) : expression_names = [ x for x in self . get_expression_names ( expr ) if x not in set ( context . keys ( ) ) . union ( [ 'i' ] ) ] if len ( expression_names ) != 1 : raise ValueError ( 'The expression must have exactly one variable.' ) return { expression_names [ 0 ] : arr }
5,370
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/tasks/raster.py#L82-L96
[ "def", "view_portfolio_loss", "(", "token", ",", "dstore", ")", ":", "data", "=", "portfolio_loss", "(", "dstore", ")", "# shape (R, L)", "loss_types", "=", "list", "(", "dstore", "[", "'oqparam'", "]", ".", "loss_dt", "(", ")", ".", "names", ")", "header", "=", "[", "'portfolio_loss'", "]", "+", "loss_types", "mean", "=", "[", "'mean'", "]", "+", "[", "row", ".", "mean", "(", ")", "for", "row", "in", "data", ".", "T", "]", "stddev", "=", "[", "'stddev'", "]", "+", "[", "row", ".", "std", "(", "ddof", "=", "1", ")", "for", "row", "in", "data", ".", "T", "]", "return", "rst_table", "(", "[", "mean", ",", "stddev", "]", ",", "header", ")" ]
Creates and returns a masked view of the input array .
def execute ( self , array_in , expression , * * kwargs ) : context = self . get_context ( array_in , expression , kwargs ) context . update ( kwargs ) return ma . masked_where ( self . evaluate_expression ( expression , context ) , array_in )
5,371
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/tasks/raster.py#L104-L109
[ "def", "_schedule_sending_init_updates", "(", "self", ")", ":", "def", "_enqueue_non_rtc_init_updates", "(", ")", ":", "LOG", ".", "debug", "(", "'Scheduled queuing of initial Non-RTC UPDATEs'", ")", "tm", "=", "self", ".", "_core_service", ".", "table_manager", "self", ".", "comm_all_best_paths", "(", "tm", ".", "global_tables", ")", "self", ".", "_sent_init_non_rtc_update", "=", "True", "# Stop the timer as we have handled RTC EOR", "self", ".", "_rtc_eor_timer", ".", "stop", "(", ")", "self", ".", "_rtc_eor_timer", "=", "None", "self", ".", "_sent_init_non_rtc_update", "=", "False", "self", ".", "_rtc_eor_timer", "=", "self", ".", "_create_timer", "(", "Peer", ".", "RTC_EOR_TIMER_NAME", ",", "_enqueue_non_rtc_init_updates", ")", "# Start timer for sending initial updates", "self", ".", "_rtc_eor_timer", ".", "start", "(", "const", ".", "RTC_EOR_DEFAULT_TIME", ",", "now", "=", "False", ")", "LOG", ".", "debug", "(", "'Scheduled sending of initial Non-RTC UPDATEs after:'", "' %s sec'", ",", "const", ".", "RTC_EOR_DEFAULT_TIME", ")" ]
Raise a server error indicating a request timeout to the given URL .
def timeout_error ( url , timeout ) : msg = 'Request timed out: {} timeout: {}s' . format ( url , timeout ) log . warning ( msg ) return ServerError ( msg )
5,372
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/errors.py#L19-L23
[ "def", "denormalize_volume", "(", "volume", ")", ":", "id", "=", "volume", ".", "get", "(", "'id'", ",", "None", ")", "res", "=", "dict", "(", ")", "res", ".", "update", "(", "volume", "[", "'metadata'", "]", ")", "denorm_attachments", "=", "list", "(", ")", "for", "a", "in", "volume", "[", "'attachments'", "]", ":", "denorm_attachments", ".", "append", "(", "Archivant", ".", "denormalize_attachment", "(", "a", ")", ")", "res", "[", "'_attachments'", "]", "=", "denorm_attachments", "return", "id", ",", "res" ]
Plot a basic histogram .
def histogram ( title , title_x , title_y , x , bins_x ) : plt . figure ( ) plt . hist ( x , bins_x ) plt . xlabel ( title_x ) plt . ylabel ( title_y ) plt . title ( title )
5,373
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L61-L70
[ "def", "disconnect_pv_clients", "(", "self", ",", "mris", ")", ":", "# type: (List[str]) -> None", "for", "mri", "in", "mris", ":", "for", "pv", "in", "self", ".", "_pvs", ".", "pop", "(", "mri", ",", "{", "}", ")", ".", "values", "(", ")", ":", "# Close pv with force destroy on, this will call", "# onLastDisconnect", "pv", ".", "close", "(", "destroy", "=", "True", ",", "sync", "=", "True", ",", "timeout", "=", "1.0", ")" ]
Create a two - dimension histogram plot or binned map .
def twoDimensionalHistogram ( title , title_x , title_y , z , bins_x , bins_y , lim_x = None , lim_y = None , vmin = None , vmax = None ) : plt . figure ( ) mesh_x , mesh_y = np . meshgrid ( bins_x , bins_y ) if vmin != None and vmin == vmax : plt . pcolor ( mesh_x , mesh_y , z ) else : plt . pcolor ( mesh_x , mesh_y , z , vmin = vmin , vmax = vmax ) plt . xlabel ( title_x ) plt . ylabel ( title_y ) plt . title ( title ) plt . colorbar ( ) if lim_x : plt . xlim ( lim_x [ 0 ] , lim_x [ 1 ] ) if lim_y : plt . ylim ( lim_y [ 0 ] , lim_y [ 1 ] )
5,374
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L74-L101
[ "def", "exportable_keys", "(", "self", ")", ":", "keys", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "tup", "in", "self", ".", "_tuples", ":", "for", "key", ",", "private", "in", "tup", ".", "_keys_and_privacy", "(", ")", ".", "items", "(", ")", ":", "keys", "[", "key", "]", ".", "append", "(", "private", ")", "return", "[", "k", "for", "k", ",", "ps", "in", "keys", ".", "items", "(", ")", "if", "not", "any", "(", "ps", ")", "]" ]
Create a two - dimensional scatter plot .
def twoDimensionalScatter ( title , title_x , title_y , x , y , lim_x = None , lim_y = None , color = 'b' , size = 20 , alpha = None ) : plt . figure ( ) plt . scatter ( x , y , c = color , s = size , alpha = alpha , edgecolors = 'none' ) plt . xlabel ( title_x ) plt . ylabel ( title_y ) plt . title ( title ) if type ( color ) is not str : plt . colorbar ( ) if lim_x : plt . xlim ( lim_x [ 0 ] , lim_x [ 1 ] ) if lim_y : plt . ylim ( lim_y [ 0 ] , lim_y [ 1 ] )
5,375
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L105-L127
[ "def", "_unregister_bundle_factories", "(", "self", ",", "bundle", ")", ":", "# type: (Bundle) -> None", "with", "self", ".", "__factories_lock", ":", "# Find out which factories must be removed", "to_remove", "=", "[", "factory_name", "for", "factory_name", "in", "self", ".", "__factories", "if", "self", ".", "get_factory_bundle", "(", "factory_name", ")", "is", "bundle", "]", "# Remove all of them", "for", "factory_name", "in", "to_remove", ":", "try", ":", "self", ".", "unregister_factory", "(", "factory_name", ")", "except", "ValueError", "as", "ex", ":", "_logger", ".", "warning", "(", "\"Error unregistering factory '%s': %s\"", ",", "factory_name", ",", "ex", ")" ]
Draw local projection of healpix map .
def drawHealpixMap ( hpxmap , lon , lat , size = 1.0 , xsize = 501 , coord = 'GC' , * * kwargs ) : ax = plt . gca ( ) x = np . linspace ( - size , size , xsize ) y = np . linspace ( - size , size , xsize ) xx , yy = np . meshgrid ( x , y ) coord = coord . upper ( ) if coord == 'GC' : #Assumes map and (lon,lat) are Galactic, but plotting celestial llon , llat = image2sphere ( * gal2cel ( lon , lat ) , x = xx . flat , y = yy . flat ) pix = ang2pix ( get_nside ( hpxmap ) , * cel2gal ( llon , llat ) ) elif coord == 'CG' : #Assumes map and (lon,lat) are celestial, but plotting Galactic llon , llat = image2sphere ( * cel2gal ( lon , lat ) , x = xx . flat , y = yy . flat ) pix = ang2pix ( get_nside ( hpxmap ) , * gal2cel ( llon , llat ) ) else : #Assumes plotting the native coordinates llon , llat = image2sphere ( lon , lat , xx . flat , yy . flat ) pix = ang2pix ( get_nside ( hpxmap ) , llon , llat ) values = hpxmap [ pix ] . reshape ( xx . shape ) zz = np . ma . array ( values , mask = ( values == hp . UNSEEN ) , fill_value = np . nan ) return drawProjImage ( xx , yy , zz , coord = coord , * * kwargs )
5,376
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L162-L189
[ "def", "tokenizer", "(", "text", ")", ":", "for", "entry", "in", "text", ".", "split", "(", "'$$$$\\n'", ")", ":", "if", "entry", ".", "rstrip", "(", ")", ":", "lines_stream", "=", "deque", "(", "entry", ".", "split", "(", "'\\n'", ")", ")", "else", ":", "continue", "# yield from _molfile(stream=lines_stream)", "for", "token", "in", "_molfile", "(", "stream", "=", "lines_stream", ")", ":", "yield", "token", "if", "len", "(", "lines_stream", ")", ":", "# yield from _sdfile(stream=lines_stream)", "for", "token", "in", "_sdfile", "(", "stream", "=", "lines_stream", ")", ":", "yield", "token", "yield", "EndOfFile", "(", ")" ]
Download Digitized Sky Survey images
def getDSSImage ( ra , dec , radius = 1.0 , xsize = 800 , * * kwargs ) : import subprocess import tempfile service = 'skyview' if service == 'stsci' : url = "https://archive.stsci.edu/cgi-bin/dss_search?" scale = 2.0 * radius * 60. params = dict ( ra = '%.3f' % ra , dec = '%.3f' % dec , width = scale , height = scale , format = 'gif' , version = 1 ) #v='poss2ukstu_red' elif service == 'skyview' : url = "https://skyview.gsfc.nasa.gov/cgi-bin/images?" params = dict ( survey = 'DSS' , position = '%.3f,%.3f' % ( ra , dec ) , scaling = 'Linear' , Return = 'GIF' , size = 2 * radius , projection = 'Car' , pixels = xsize ) else : raise Exception ( "Unrecognized service." ) query = '&' . join ( "%s=%s" % ( k , v ) for k , v in params . items ( ) ) tmp = tempfile . NamedTemporaryFile ( suffix = '.gif' ) cmd = 'wget --progress=dot:mega -O %s "%s"' % ( tmp . name , url + query ) subprocess . call ( cmd , shell = True ) im = plt . imread ( tmp . name ) tmp . close ( ) if service == 'stsci' and xsize : im = scipy . misc . imresize ( im , size = ( xsize , xsize ) ) return im
5,377
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L241-L298
[ "def", "_attach", "(", "self", ",", "fcp", ",", "assigner_id", ",", "target_wwpn", ",", "target_lun", ",", "multipath", ",", "os_version", ",", "mount_point", ")", ":", "LOG", ".", "info", "(", "'Start to attach device to %s'", "%", "assigner_id", ")", "self", ".", "fcp_mgr", ".", "init_fcp", "(", "assigner_id", ")", "new", "=", "self", ".", "fcp_mgr", ".", "increase_fcp_usage", "(", "fcp", ",", "assigner_id", ")", "try", ":", "if", "new", ":", "self", ".", "_dedicate_fcp", "(", "fcp", ",", "assigner_id", ")", "self", ".", "_add_disk", "(", "fcp", ",", "assigner_id", ",", "target_wwpn", ",", "target_lun", ",", "multipath", ",", "os_version", ",", "mount_point", ")", "except", "exception", ".", "SDKBaseException", "as", "err", ":", "errmsg", "=", "'rollback attach because error:'", "+", "err", ".", "format_message", "(", ")", "LOG", ".", "error", "(", "errmsg", ")", "connections", "=", "self", ".", "fcp_mgr", ".", "decrease_fcp_usage", "(", "fcp", ",", "assigner_id", ")", "# if connections less than 1, undedicate the device", "if", "not", "connections", ":", "with", "zvmutils", ".", "ignore_errors", "(", ")", ":", "self", ".", "_undedicate_fcp", "(", "fcp", ",", "assigner_id", ")", "raise", "exception", ".", "SDKBaseException", "(", "msg", "=", "errmsg", ")", "# TODO: other exceptions?", "LOG", ".", "info", "(", "'Attaching device to %s is done.'", "%", "assigner_id", ")" ]
Draw horizontal and vertical slices through histogram
def draw_slices ( hist , func = np . sum , * * kwargs ) : from mpl_toolkits . axes_grid1 import make_axes_locatable kwargs . setdefault ( 'ls' , '-' ) ax = plt . gca ( ) data = hist # Slices vslice = func ( data , axis = 0 ) hslice = func ( data , axis = 1 ) npix = np . array ( data . shape ) #xlim,ylim = plt.array(zip([0,0],npix-1)) xlim = ax . get_xlim ( ) ylim = ax . get_ylim ( ) #extent = ax.get_extent() #xlim =extent[:2] #ylim = extent[2:] # Bin centers xbin = np . linspace ( xlim [ 0 ] , xlim [ 1 ] , len ( vslice ) ) #+0.5 ybin = np . linspace ( ylim [ 0 ] , ylim [ 1 ] , len ( hslice ) ) #+0.5 divider = make_axes_locatable ( ax ) #gh2 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[2, 1]) hax = divider . append_axes ( "right" , size = 1.2 , pad = 0.05 , sharey = ax , axes_class = axes_divider . LocatableAxes ) hax . axis [ "left" ] . toggle ( label = False , ticklabels = False ) #hax.plot(hslice, plt.arange(*ylim)+0.5,'-') # Bin center hax . plot ( hslice , ybin , * * kwargs ) # Bin center hax . xaxis . set_major_locator ( MaxNLocator ( 4 , prune = 'both' ) ) hax . set_ylim ( * ylim ) #gh1 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[0, 2]) vax = divider . append_axes ( "top" , size = 1.2 , pad = 0.05 , sharex = ax , axes_class = axes_divider . LocatableAxes ) vax . axis [ "bottom" ] . toggle ( label = False , ticklabels = False ) vax . plot ( xbin , vslice , * * kwargs ) vax . yaxis . set_major_locator ( MaxNLocator ( 4 , prune = 'lower' ) ) vax . set_xlim ( * xlim ) return vax , hax
5,378
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L1035-L1077
[ "def", "NewFd", "(", "self", ",", "fd", ",", "URL", ",", "encoding", ",", "options", ")", ":", "ret", "=", "libxml2mod", ".", "xmlReaderNewFd", "(", "self", ".", "_o", ",", "fd", ",", "URL", ",", "encoding", ",", "options", ")", "return", "ret" ]
Plot a catalog of coordinates on a full - sky map .
def plotSkymapCatalog ( lon , lat , * * kwargs ) : fig = plt . figure ( ) ax = plt . subplot ( 111 , projection = projection ) drawSkymapCatalog ( ax , lon , lat , * * kwargs )
5,379
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L1409-L1415
[ "def", "_init_libcrypto", "(", ")", ":", "libcrypto", "=", "_load_libcrypto", "(", ")", "try", ":", "libcrypto", ".", "OPENSSL_init_crypto", "(", ")", "except", "AttributeError", ":", "# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)", "libcrypto", ".", "OPENSSL_no_config", "(", ")", "libcrypto", ".", "OPENSSL_add_all_algorithms_noconf", "(", ")", "libcrypto", ".", "RSA_new", ".", "argtypes", "=", "(", ")", "libcrypto", ".", "RSA_new", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "RSA_size", ".", "argtype", "=", "(", "c_void_p", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "argtypes", "=", "(", "c_char_p", ",", "c_int", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "restype", "=", "c_void_p", "libcrypto", ".", "BIO_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "restype", "=", "c_void_p", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_private_encrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "libcrypto", ".", "RSA_public_decrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "return", "libcrypto" ]
Create closed path .
def makePath ( x_path , y_path , epsilon = 1.e-10 ) : x_path_closed = np . concatenate ( [ x_path , x_path [ : : - 1 ] ] ) y_path_closed = np . concatenate ( [ y_path , epsilon + y_path [ : : - 1 ] ] ) path = matplotlib . path . Path ( list ( zip ( x_path_closed , y_path_closed ) ) ) return path
5,380
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L1499-L1506
[ "def", "keep_sources", "(", "self", ",", "keep", ")", ":", "if", "self", ".", "unmixing_", "is", "None", "or", "self", ".", "mixing_", "is", "None", ":", "raise", "RuntimeError", "(", "\"No sources available (run do_mvarica first)\"", ")", "n_sources", "=", "self", ".", "mixing_", ".", "shape", "[", "0", "]", "self", ".", "remove_sources", "(", "np", ".", "setdiff1d", "(", "np", ".", "arange", "(", "n_sources", ")", ",", "np", ".", "array", "(", "keep", ")", ")", ")", "return", "self" ]
Draw the maglim from the mask .
def drawMask ( self , ax = None , mask = None , mtype = 'maglim' ) : if not ax : ax = plt . gca ( ) if mask is None : mask = ugali . analysis . loglike . createMask ( self . config , roi = self . roi ) mask_map = hp . UNSEEN * np . ones ( hp . nside2npix ( self . nside ) ) if mtype . lower ( ) == 'maglim' : mask_map [ mask . roi . pixels ] = mask . mask_1 . mask_roi_sparse elif mtype . lower ( ) == 'fracdet' : mask_map [ mask . roi . pixels ] = mask . mask_1 . frac_roi_sparse else : raise Exception ( "Unrecognized type: %s" % mtype ) masked = ( mask_map == hp . UNSEEN ) | ( mask_map == 0 ) mask_map = np . ma . array ( mask_map , mask = masked , fill_value = np . nan ) im = drawHealpixMap ( mask_map , self . lon , self . lat , self . radius , coord = self . coord ) try : cbar = ax . cax . colorbar ( im ) except : cbar = plt . colorbar ( im ) cbar . ax . set_xticklabels ( cbar . ax . get_xticklabels ( ) , rotation = 90 ) ax . annotate ( mtype , * * self . label_kwargs ) return im
5,381
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L458-L480
[ "def", "cudnnSetPooling2dDescriptor", "(", "poolingDesc", ",", "mode", ",", "windowHeight", ",", "windowWidth", ",", "verticalPadding", ",", "horizontalPadding", ",", "verticalStride", ",", "horizontalStride", ")", ":", "status", "=", "_libcudnn", ".", "cudnnSetPooling2dDescriptor", "(", "poolingDesc", ",", "mode", ",", "windowHeight", ",", "windowWidth", ",", "verticalPadding", ",", "horizontalPadding", ",", "verticalStride", ",", "horizontalStride", ")", "cudnnCheckStatus", "(", "status", ")" ]
Parse configuration options out of an . ini configuration file .
def parse ( self , configManager , config ) : parser = ConfigParser . RawConfigParser ( ) configOptions = dict ( ) configFile = self . _getConfigFile ( config ) if configFile : parser . readfp ( configFile ) for section in parser . sections ( ) : if self . sections is None or section in self . sections : configOptions . update ( parser . items ( section ) ) return configOptions
5,382
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/subparsers/ini.py#L46-L67
[ "def", "removeJob", "(", "self", ",", "jobBatchSystemID", ")", ":", "assert", "jobBatchSystemID", "in", "self", ".", "jobBatchSystemIDToIssuedJob", "jobNode", "=", "self", ".", "jobBatchSystemIDToIssuedJob", "[", "jobBatchSystemID", "]", "if", "jobNode", ".", "preemptable", ":", "# len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued,", "# so decrement this value before removing the job from the issuedJob map", "assert", "self", ".", "preemptableJobsIssued", ">", "0", "self", ".", "preemptableJobsIssued", "-=", "1", "del", "self", ".", "jobBatchSystemIDToIssuedJob", "[", "jobBatchSystemID", "]", "# If service job", "if", "jobNode", ".", "jobStoreID", "in", "self", ".", "toilState", ".", "serviceJobStoreIDToPredecessorJob", ":", "# Decrement the number of services", "if", "jobNode", ".", "preemptable", ":", "self", ".", "preemptableServiceJobsIssued", "-=", "1", "else", ":", "self", ".", "serviceJobsIssued", "-=", "1", "return", "jobNode" ]
Create a chapter that contains a random number of paragraphs
def write_chapter ( self ) : self . paragraphs = [ ] self . paragraphs . append ( '\n' ) for x in range ( randint ( 0 , 50 ) ) : p = Paragraph ( self . model ) self . paragraphs . append ( p . get_paragraph ( ) ) self . paragraphs . append ( '\n' ) return self . paragraphs
5,383
https://github.com/accraze/python-markov-novel/blob/ff451639e93a3ac11fb0268b92bc0cffc00bfdbe/src/markov_novel/chapter.py#L20-L32
[ "def", "setKeyboardTransformAbsolute", "(", "self", ",", "eTrackingOrigin", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setKeyboardTransformAbsolute", "pmatTrackingOriginToKeyboardTransform", "=", "HmdMatrix34_t", "(", ")", "fn", "(", "eTrackingOrigin", ",", "byref", "(", "pmatTrackingOriginToKeyboardTransform", ")", ")", "return", "pmatTrackingOriginToKeyboardTransform" ]
Create CSS file .
def buildcss ( app , buildpath , imagefile ) : # set default values div = 'body' repeat = 'repeat-y' position = 'center' attachment = 'scroll' if app . config . sphinxmark_div != 'default' : div = app . config . sphinxmark_div if app . config . sphinxmark_repeat is False : repeat = 'no-repeat' if app . config . sphinxmark_fixed is True : attachment = 'fixed' border = app . config . sphinxmark_border if border == 'left' or border == 'right' : css = template ( 'border' , div = div , image = imagefile , side = border ) else : css = template ( 'watermark' , div = div , image = imagefile , repeat = repeat , position = position , attachment = attachment ) LOG . debug ( '[sphinxmark] Template: ' + css ) cssname = 'sphinxmark.css' cssfile = os . path . join ( buildpath , cssname ) with open ( cssfile , 'w' ) as f : f . write ( css ) return ( cssname )
5,384
https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L34-L64
[ "def", "_generate_noise_system", "(", "dimensions_tr", ",", "spatial_sd", ",", "temporal_sd", ",", "spatial_noise_type", "=", "'gaussian'", ",", "temporal_noise_type", "=", "'gaussian'", ",", ")", ":", "def", "noise_volume", "(", "dimensions", ",", "noise_type", ",", ")", ":", "if", "noise_type", "==", "'rician'", ":", "# Generate the Rician noise (has an SD of 1)", "noise", "=", "stats", ".", "rice", ".", "rvs", "(", "b", "=", "0", ",", "loc", "=", "0", ",", "scale", "=", "1.527", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'exponential'", ":", "# Make an exponential distribution (has an SD of 1)", "noise", "=", "stats", ".", "expon", ".", "rvs", "(", "0", ",", "scale", "=", "1", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'gaussian'", ":", "noise", "=", "np", ".", "random", ".", "randn", "(", "np", ".", "prod", "(", "dimensions", ")", ")", ".", "reshape", "(", "dimensions", ")", "# Return the noise", "return", "noise", "# Get just the xyz coordinates", "dimensions", "=", "np", ".", "asarray", "(", "[", "dimensions_tr", "[", "0", "]", ",", "dimensions_tr", "[", "1", "]", ",", "dimensions_tr", "[", "2", "]", ",", "1", "]", ")", "# Generate noise", "spatial_noise", "=", "noise_volume", "(", "dimensions", ",", "spatial_noise_type", ")", "temporal_noise", "=", "noise_volume", "(", "dimensions_tr", ",", "temporal_noise_type", ")", "# Make the system noise have a specific spatial variability", "spatial_noise", "*=", "spatial_sd", "# Set the size of the noise", "temporal_noise", "*=", "temporal_sd", "# The mean in time of system noise needs to be zero, so subtract the", "# means of the temporal noise in time", "temporal_noise_mean", "=", "np", ".", "mean", "(", "temporal_noise", ",", "3", ")", ".", "reshape", "(", "dimensions", "[", "0", "]", ",", "dimensions", "[", "1", "]", ",", "dimensions", "[", "2", "]", ",", "1", ")", "temporal_noise", "=", "temporal_noise", "-", "temporal_noise_mean", "# Save the combination", "system_noise", "=", "spatial_noise", "+", "temporal_noise", "return", "system_noise" ]
Create PNG image from string .
def createimage ( app , srcdir , buildpath ) : text = app . config . sphinxmark_text # draw transparent background width = app . config . sphinxmark_text_width height = app . config . sphinxmark_text_spacing img = Image . new ( 'RGBA' , ( width , height ) , ( 255 , 255 , 255 , 0 ) ) d = ImageDraw . Draw ( img ) # set font fontfile = os . path . join ( srcdir , 'arial.ttf' ) font = ImageFont . truetype ( fontfile , app . config . sphinxmark_text_size ) # set x y location for text xsize , ysize = d . textsize ( text , font ) LOG . debug ( '[sphinxmark] x = ' + str ( xsize ) + '\ny = ' + str ( ysize ) ) x = ( width / 2 ) - ( xsize / 2 ) y = ( height / 2 ) - ( ysize / 2 ) # add text to image color = app . config . sphinxmark_text_color d . text ( ( x , y ) , text , font = font , fill = color ) # set opacity img . putalpha ( app . config . sphinxmark_text_opacity ) # rotate image img = img . rotate ( app . config . sphinxmark_text_rotation ) # save image imagefile = 'textmark_' + text + '.png' imagepath = os . path . join ( buildpath , imagefile ) img . save ( imagepath , 'PNG' ) LOG . debug ( '[sphinxmark] Image saved to: ' + imagepath ) return ( imagefile )
5,385
https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L67-L103
[ "def", "find_bled112_devices", "(", "cls", ")", ":", "found_devs", "=", "[", "]", "ports", "=", "serial", ".", "tools", ".", "list_ports", ".", "comports", "(", ")", "for", "port", "in", "ports", ":", "if", "not", "hasattr", "(", "port", ",", "'pid'", ")", "or", "not", "hasattr", "(", "port", ",", "'vid'", ")", ":", "continue", "# Check if the device matches the BLED112's PID/VID combination", "if", "port", ".", "pid", "==", "1", "and", "port", ".", "vid", "==", "9304", ":", "found_devs", ".", "append", "(", "port", ".", "device", ")", "return", "found_devs" ]
Get image file .
def getimage ( app ) : # append source directory to TEMPLATE_PATH so template is found srcdir = os . path . abspath ( os . path . dirname ( __file__ ) ) TEMPLATE_PATH . append ( srcdir ) staticbase = '_static' buildpath = os . path . join ( app . outdir , staticbase ) try : os . makedirs ( buildpath ) except OSError : if not os . path . isdir ( buildpath ) : raise if app . config . sphinxmark_image == 'default' : imagefile = 'watermark-draft.png' imagepath = os . path . join ( srcdir , imagefile ) copy ( imagepath , buildpath ) LOG . debug ( '[sphinxmark] Using default image: ' + imagefile ) elif app . config . sphinxmark_image == 'text' : imagefile = createimage ( app , srcdir , buildpath ) LOG . debug ( '[sphinxmark] Image: ' + imagefile ) else : imagefile = app . config . sphinxmark_image if app . config . html_static_path : staticpath = app . config . html_static_path [ 0 ] else : staticpath = '_static' LOG . debug ( '[sphinxmark] static path: ' + staticpath ) imagepath = os . path . join ( app . confdir , staticpath , imagefile ) LOG . debug ( '[sphinxmark] Imagepath: ' + imagepath ) try : copy ( imagepath , buildpath ) except Exception : message = ( "Cannot find '%s'. Put watermark images in the " "'_static' directory or specify the location using " "'html_static_path'." % imagefile ) LOG . warning ( message ) LOG . warning ( 'Failed to add watermark.' ) return return ( buildpath , imagefile )
5,386
https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L106-L149
[ "def", "_generate_noise_system", "(", "dimensions_tr", ",", "spatial_sd", ",", "temporal_sd", ",", "spatial_noise_type", "=", "'gaussian'", ",", "temporal_noise_type", "=", "'gaussian'", ",", ")", ":", "def", "noise_volume", "(", "dimensions", ",", "noise_type", ",", ")", ":", "if", "noise_type", "==", "'rician'", ":", "# Generate the Rician noise (has an SD of 1)", "noise", "=", "stats", ".", "rice", ".", "rvs", "(", "b", "=", "0", ",", "loc", "=", "0", ",", "scale", "=", "1.527", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'exponential'", ":", "# Make an exponential distribution (has an SD of 1)", "noise", "=", "stats", ".", "expon", ".", "rvs", "(", "0", ",", "scale", "=", "1", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'gaussian'", ":", "noise", "=", "np", ".", "random", ".", "randn", "(", "np", ".", "prod", "(", "dimensions", ")", ")", ".", "reshape", "(", "dimensions", ")", "# Return the noise", "return", "noise", "# Get just the xyz coordinates", "dimensions", "=", "np", ".", "asarray", "(", "[", "dimensions_tr", "[", "0", "]", ",", "dimensions_tr", "[", "1", "]", ",", "dimensions_tr", "[", "2", "]", ",", "1", "]", ")", "# Generate noise", "spatial_noise", "=", "noise_volume", "(", "dimensions", ",", "spatial_noise_type", ")", "temporal_noise", "=", "noise_volume", "(", "dimensions_tr", ",", "temporal_noise_type", ")", "# Make the system noise have a specific spatial variability", "spatial_noise", "*=", "spatial_sd", "# Set the size of the noise", "temporal_noise", "*=", "temporal_sd", "# The mean in time of system noise needs to be zero, so subtract the", "# means of the temporal noise in time", "temporal_noise_mean", "=", "np", ".", "mean", "(", "temporal_noise", ",", "3", ")", ".", "reshape", "(", "dimensions", "[", "0", "]", ",", "dimensions", "[", "1", "]", ",", "dimensions", "[", "2", "]", ",", "1", ")", "temporal_noise", "=", "temporal_noise", "-", "temporal_noise_mean", "# Save the combination", "system_noise", "=", "spatial_noise", "+", "temporal_noise", "return", "system_noise" ]
Add watermark .
def watermark ( app , env ) : if app . config . sphinxmark_enable is True : LOG . info ( 'adding watermark...' , nonl = True ) buildpath , imagefile = getimage ( app ) cssname = buildcss ( app , buildpath , imagefile ) app . add_css_file ( cssname ) LOG . info ( ' done' )
5,387
https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L152-L159
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Configure setup for Sphinx extension .
def setup ( app ) : app . add_config_value ( 'sphinxmark_enable' , False , 'html' ) app . add_config_value ( 'sphinxmark_div' , 'default' , 'html' ) app . add_config_value ( 'sphinxmark_border' , None , 'html' ) app . add_config_value ( 'sphinxmark_repeat' , True , 'html' ) app . add_config_value ( 'sphinxmark_fixed' , False , 'html' ) app . add_config_value ( 'sphinxmark_image' , 'default' , 'html' ) app . add_config_value ( 'sphinxmark_text' , 'default' , 'html' ) app . add_config_value ( 'sphinxmark_text_color' , ( 255 , 0 , 0 ) , 'html' ) app . add_config_value ( 'sphinxmark_text_size' , 100 , 'html' ) app . add_config_value ( 'sphinxmark_text_width' , 1000 , 'html' ) app . add_config_value ( 'sphinxmark_text_opacity' , 20 , 'html' ) app . add_config_value ( 'sphinxmark_text_spacing' , 400 , 'html' ) app . add_config_value ( 'sphinxmark_text_rotation' , 0 , 'html' ) app . connect ( 'env-updated' , watermark ) return { 'version' : '0.1.18' , 'parallel_read_safe' : True , 'parallel_write_safe' : True , }
5,388
https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L162-L187
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Uses Stirling s approximation for the log - gamma function suitable for large arguments .
def gammalnStirling ( z ) : return ( 0.5 * ( np . log ( 2. * np . pi ) - np . log ( z ) ) ) + ( z * ( np . log ( z + ( 1. / ( ( 12. * z ) - ( 1. / ( 10. * z ) ) ) ) ) - 1. ) )
5,389
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/bayesian_efficiency.py#L15-L20
[ "def", "Send", "(", "self", ",", "usb", ",", "timeout_ms", "=", "None", ")", ":", "usb", ".", "BulkWrite", "(", "self", ".", "Pack", "(", ")", ",", "timeout_ms", ")", "usb", ".", "BulkWrite", "(", "self", ".", "data", ",", "timeout_ms", ")" ]
Wrapping the isochrone and kernel simulate functions .
def satellite ( isochrone , kernel , stellar_mass , distance_modulus , * * kwargs ) : mag_1 , mag_2 = isochrone . simulate ( stellar_mass , distance_modulus ) lon , lat = kernel . simulate ( len ( mag_1 ) ) return mag_1 , mag_2 , lon , lat
5,390
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L811-L818
[ "def", "ParseFileObject", "(", "self", ",", "parser_mediator", ",", "file_object", ")", ":", "# Trivial JSON format check: first character must be an open brace.", "if", "file_object", ".", "read", "(", "1", ")", "!=", "b'{'", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'is not a valid JSON file, missing opening brace.'", ")", "file_object", ".", "seek", "(", "0", ",", "os", ".", "SEEK_SET", ")", "file_entry", "=", "parser_mediator", ".", "GetFileEntry", "(", ")", "file_system", "=", "file_entry", ".", "GetFileSystem", "(", ")", "json_file_path", "=", "parser_mediator", ".", "GetDisplayName", "(", ")", "split_path", "=", "file_system", ".", "SplitPath", "(", "json_file_path", ")", "try", ":", "if", "'containers'", "in", "split_path", ":", "if", "'config.json'", "in", "split_path", ":", "self", ".", "_ParseContainerConfigJSON", "(", "parser_mediator", ",", "file_object", ")", "if", "json_file_path", ".", "endswith", "(", "'-json.log'", ")", ":", "self", ".", "_ParseContainerLogJSON", "(", "parser_mediator", ",", "file_object", ")", "elif", "'graph'", "in", "split_path", ":", "if", "'json'", "in", "split_path", ":", "self", ".", "_ParseLayerConfigJSON", "(", "parser_mediator", ",", "file_object", ")", "except", "ValueError", "as", "exception", ":", "if", "exception", "==", "'No JSON object could be decoded'", ":", "raise", "errors", ".", "UnableToParseFile", "(", "exception", ")", "else", ":", "raise" ]
An a priori detectability proxy .
def detectability ( self , * * kwargs ) : distance_modulus = kwargs . get ( 'distance_modulus' ) distance = mod2dist ( distance_modulus ) stellar_mass = kwargs . get ( 'stellar_mass' ) extension = kwargs . get ( 'extension' ) # Normalized to 10^3 Msolar at mod=18 norm = 10 ** 3 / mod2dist ( 18 ) ** 2 detect = stellar_mass / distance ** 2 detect /= norm
5,391
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L76-L88
[ "def", "drop_namespace_by_url", "(", "self", ",", "url", ":", "str", ")", "->", "None", ":", "namespace", "=", "self", ".", "get_namespace_by_url", "(", "url", ")", "self", ".", "session", ".", "query", "(", "NamespaceEntry", ")", ".", "filter", "(", "NamespaceEntry", ".", "namespace", "==", "namespace", ")", ".", "delete", "(", ")", "self", ".", "session", ".", "delete", "(", "namespace", ")", "self", ".", "session", ".", "commit", "(", ")" ]
Bundle it .
def _create_catalog ( self , catalog = None ) : if catalog is None : catalog = ugali . analysis . loglike . createCatalog ( self . config , self . roi ) cut = self . mask . restrictCatalogToObservableSpace ( catalog ) self . catalog = catalog . applyCut ( cut )
5,392
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L228-L235
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Subpixels for random position generation .
def _setup_subpix ( self , nside = 2 ** 16 ) : # Only setup once... if hasattr ( self , 'subpix' ) : return # Simulate over full ROI self . roi_radius = self . config [ 'coords' ] [ 'roi_radius' ] # Setup background spatial stuff logger . info ( "Setup subpixels..." ) self . nside_pixel = self . config [ 'coords' ] [ 'nside_pixel' ] self . nside_subpixel = self . nside_pixel * 2 ** 4 # Could be config parameter epsilon = np . degrees ( hp . max_pixrad ( self . nside_pixel ) ) # Pad roi radius to cover edge healpix subpix = ugali . utils . healpix . query_disc ( self . nside_subpixel , self . roi . vec , self . roi_radius + epsilon ) superpix = ugali . utils . healpix . superpixel ( subpix , self . nside_subpixel , self . nside_pixel ) self . subpix = subpix [ np . in1d ( superpix , self . roi . pixels ) ]
5,393
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L289-L306
[ "def", "getKeyName", "(", "username", ",", "date", ",", "blob_key", ")", ":", "sep", "=", "FileMetadata", ".", "__SEP", "return", "str", "(", "username", "+", "sep", "+", "str", "(", "date", ")", "+", "sep", "+", "blob_key", ")" ]
The purpose here is to create a more finely binned background CMD to sample from .
def _setup_cmd ( self , mode = 'cloud-in-cells' ) : # Only setup once... if hasattr ( self , 'bkg_lambda' ) : return logger . info ( "Setup color..." ) # In the limit theta->0: 2*pi*(1-cos(theta)) -> pi*theta**2 # (Remember to convert from sr to deg^2) #solid_angle_roi = sr2deg(2*np.pi*(1-np.cos(np.radians(self.roi_radius)))) solid_angle_roi = self . roi . area_pixel * len ( self . roi . pixels ) # Large CMD bins cause problems when simulating config = Config ( self . config ) config [ 'color' ] [ 'n_bins' ] *= 5 #10 config [ 'mag' ] [ 'n_bins' ] *= 1 #2 #config['mask']['minimum_solid_angle'] = 0 roi = ugali . analysis . loglike . createROI ( config , self . roi . lon , self . roi . lat ) mask = ugali . analysis . loglike . createMask ( config , roi ) self . bkg_centers_color = roi . centers_color self . bkg_centers_mag = roi . centers_mag # Background CMD has units: [objs / deg^2 / mag^2] cmd_background = mask . backgroundCMD ( self . catalog , mode ) self . bkg_lambda = cmd_background * solid_angle_roi * roi . delta_color * roi . delta_mag np . sum ( self . bkg_lambda ) # Clean up del config , roi , mask
5,394
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L308-L340
[ "def", "Close", "(", "self", ")", ":", "if", "self", ".", "connection", "is", "not", "None", ":", "try", ":", "self", ".", "connection", ".", "commit", "(", ")", "self", ".", "connection", ".", "close", "(", ")", "self", ".", "connection", "=", "None", "except", "Exception", ",", "e", ":", "pass" ]
Quick uniform background generation .
def toy_background ( self , mc_source_id = 2 , seed = None ) : logger . info ( "Running toy background simulation..." ) size = 20000 nstar = np . random . poisson ( size ) #np.random.seed(0) logger . info ( "Simulating %i background stars..." % nstar ) ### # Random points from roi pixels ### idx = np.random.randint(len(self.roi.pixels)-1,size=nstar) ### pix = self.roi.pixels[idx] # Random points drawn from subpixels logger . info ( "Generating uniform positions..." ) idx = np . random . randint ( 0 , len ( self . subpix ) - 1 , size = nstar ) lon , lat = pix2ang ( self . nside_subpixel , self . subpix [ idx ] ) pix = ang2pix ( self . nside_pixel , lon , lat ) lon , lat = pix2ang ( self . nside_pixel , pix ) # Single color #mag_1 = 19.05*np.ones(len(pix)) #mag_2 = 19.10*np.ones(len(pix)) # Uniform in color logger . info ( "Generating uniform CMD..." ) mag_1 = np . random . uniform ( self . config [ 'mag' ] [ 'min' ] , self . config [ 'mag' ] [ 'max' ] , size = nstar ) color = np . random . uniform ( self . config [ 'color' ] [ 'min' ] , self . config [ 'color' ] [ 'max' ] , size = nstar ) mag_2 = mag_1 - color # There is probably a better way to do this step without creating the full HEALPix map mask = - 1. * np . ones ( hp . nside2npix ( self . nside_pixel ) ) mask [ self . roi . pixels ] = self . mask . mask_1 . mask_roi_sparse mag_lim_1 = mask [ pix ] mask = - 1. * np . ones ( hp . nside2npix ( self . nside_pixel ) ) mask [ self . roi . pixels ] = self . mask . mask_2 . mask_roi_sparse mag_lim_2 = mask [ pix ] #mag_err_1 = 1.0*np.ones(len(pix)) #mag_err_2 = 1.0*np.ones(len(pix)) mag_err_1 = self . photo_err_1 ( mag_lim_1 - mag_1 ) mag_err_2 = self . photo_err_2 ( mag_lim_2 - mag_2 ) mc_source_id = mc_source_id * np . ones ( len ( mag_1 ) ) select = ( mag_lim_1 > mag_1 ) & ( mag_lim_2 > mag_2 ) hdu = ugali . observation . catalog . makeHDU ( self . config , mag_1 [ select ] , mag_err_1 [ select ] , mag_2 [ select ] , mag_err_2 [ select ] , lon [ select ] , lat [ select ] , mc_source_id [ select ] ) catalog = ugali . observation . catalog . Catalog ( self . config , data = hdu . data ) return catalog
5,395
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L343-L397
[ "def", "get_data", "(", "self", ",", "start", "=", "None", ",", "length", "=", "None", ")", ":", "PointerToRawData_adj", "=", "self", ".", "pe", ".", "adjust_FileAlignment", "(", "self", ".", "PointerToRawData", ",", "self", ".", "pe", ".", "OPTIONAL_HEADER", ".", "FileAlignment", ")", "VirtualAddress_adj", "=", "self", ".", "pe", ".", "adjust_SectionAlignment", "(", "self", ".", "VirtualAddress", ",", "self", ".", "pe", ".", "OPTIONAL_HEADER", ".", "SectionAlignment", ",", "self", ".", "pe", ".", "OPTIONAL_HEADER", ".", "FileAlignment", ")", "if", "start", "is", "None", ":", "offset", "=", "PointerToRawData_adj", "else", ":", "offset", "=", "(", "start", "-", "VirtualAddress_adj", ")", "+", "PointerToRawData_adj", "if", "length", "is", "not", "None", ":", "end", "=", "offset", "+", "length", "else", ":", "end", "=", "offset", "+", "self", ".", "SizeOfRawData", "# PointerToRawData is not adjusted here as we might want to read any possible extra bytes", "# that might get cut off by aligning the start (and hence cutting something off the end)", "#", "if", "end", ">", "self", ".", "PointerToRawData", "+", "self", ".", "SizeOfRawData", ":", "end", "=", "self", ".", "PointerToRawData", "+", "self", ".", "SizeOfRawData", "return", "self", ".", "pe", ".", "__data__", "[", "offset", ":", "end", "]" ]
Create a simulated satellite . Returns a catalog object .
def satellite ( self , stellar_mass , distance_modulus , mc_source_id = 1 , seed = None , * * kwargs ) : if seed is not None : np . random . seed ( seed ) isochrone = kwargs . pop ( 'isochrone' , self . isochrone ) kernel = kwargs . pop ( 'kernel' , self . kernel ) for k , v in kwargs . items ( ) : if k in kernel . params . keys ( ) : setattr ( kernel , k , v ) mag_1 , mag_2 = isochrone . simulate ( stellar_mass , distance_modulus ) lon , lat = kernel . simulate ( len ( mag_1 ) ) logger . info ( "Simulating %i satellite stars..." % len ( mag_1 ) ) pix = ang2pix ( self . config [ 'coords' ] [ 'nside_pixel' ] , lon , lat ) # There is probably a better way to do this step without creating the full HEALPix map mask = - 1. * np . ones ( hp . nside2npix ( self . config [ 'coords' ] [ 'nside_pixel' ] ) ) mask [ self . roi . pixels ] = self . mask . mask_1 . mask_roi_sparse mag_lim_1 = mask [ pix ] mask = - 1. * np . ones ( hp . nside2npix ( self . config [ 'coords' ] [ 'nside_pixel' ] ) ) mask [ self . roi . pixels ] = self . mask . mask_2 . mask_roi_sparse mag_lim_2 = mask [ pix ] mag_err_1 = self . photo_err_1 ( mag_lim_1 - mag_1 ) mag_err_2 = self . photo_err_2 ( mag_lim_2 - mag_2 ) # Randomize magnitudes by their errors mag_obs_1 = mag_1 + np . random . normal ( size = len ( mag_1 ) ) * mag_err_1 mag_obs_2 = mag_2 + np . random . normal ( size = len ( mag_2 ) ) * mag_err_2 #mag_obs_1 = mag_1 #mag_obs_2 = mag_2 #select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2) select = ( mag_lim_1 > mag_obs_1 ) & ( mag_lim_2 > mag_obs_2 ) # Make sure objects lie within the original cmd (should also be done later...) #select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, mag_obs_1 - mag_obs_2, mag_obs_1,self.roi.bins_color, self.roi.bins_mag) > 0) #return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut] logger . info ( "Clipping %i simulated satellite stars..." % ( ~ select ) . sum ( ) ) mc_source_id = mc_source_id * np . ones ( len ( mag_1 ) ) hdu = ugali . observation . catalog . makeHDU ( self . config , mag_obs_1 [ select ] , mag_err_1 [ select ] , mag_obs_2 [ select ] , mag_err_2 [ select ] , lon [ select ] , lat [ select ] , mc_source_id [ select ] ) catalog = ugali . observation . catalog . Catalog ( self . config , data = hdu . data ) return catalog
5,396
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L495-L544
[ "def", "chunk_upload_file", "(", "self", ",", "name", ",", "folder_id", ",", "file_path", ",", "progress_callback", "=", "None", ",", "chunk_size", "=", "1024", "*", "1024", "*", "1", ")", ":", "try", ":", "return", "self", ".", "__do_chunk_upload_file", "(", "name", ",", "folder_id", ",", "file_path", ",", "progress_callback", ",", "chunk_size", ")", "except", "BoxError", ",", "ex", ":", "if", "ex", ".", "status", "!=", "401", ":", "raise", "#tokens had been refreshed, so we start again the upload", "return", "self", ".", "__do_chunk_upload_file", "(", "name", ",", "folder_id", ",", "file_path", ",", "progress_callback", ",", "chunk_size", ")" ]
Create a catalog fits file object based on input data .
def makeHDU ( self , mag_1 , mag_err_1 , mag_2 , mag_err_2 , lon , lat , mc_source_id ) : if self . config [ 'catalog' ] [ 'coordsys' ] . lower ( ) == 'cel' and self . config [ 'coords' ] [ 'coordsys' ] . lower ( ) == 'gal' : lon , lat = ugali . utils . projector . gal2cel ( lon , lat ) elif self . config [ 'catalog' ] [ 'coordsys' ] . lower ( ) == 'gal' and self . config [ 'coords' ] [ 'coordsys' ] . lower ( ) == 'cel' : lon , lat = ugali . utils . projector . cel2gal ( lon , lat ) columns = [ pyfits . Column ( name = self . config [ 'catalog' ] [ 'objid_field' ] , format = 'D' , array = np . arange ( len ( lon ) ) ) , pyfits . Column ( name = self . config [ 'catalog' ] [ 'lon_field' ] , format = 'D' , array = lon ) , pyfits . Column ( name = self . config [ 'catalog' ] [ 'lat_field' ] , format = 'D' , array = lat ) , pyfits . Column ( name = self . config [ 'catalog' ] [ 'mag_1_field' ] , format = 'E' , array = mag_1 ) , pyfits . Column ( name = self . config [ 'catalog' ] [ 'mag_err_1_field' ] , format = 'E' , array = mag_err_1 ) , pyfits . Column ( name = self . config [ 'catalog' ] [ 'mag_2_field' ] , format = 'E' , array = mag_2 ) , pyfits . Column ( name = self . config [ 'catalog' ] [ 'mag_err_2_field' ] , format = 'E' , array = mag_err_2 ) , pyfits . Column ( name = self . config [ 'catalog' ] [ 'mc_source_id_field' ] , format = 'I' , array = mc_source_id ) , ] hdu = pyfits . new_table ( columns ) return hdu
5,397
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L628-L662
[ "async", "def", "set_room_temperatures_by_name", "(", "self", ",", "room_name", ",", "sleep_temp", "=", "None", ",", "comfort_temp", "=", "None", ",", "away_temp", "=", "None", ")", ":", "if", "sleep_temp", "is", "None", "and", "comfort_temp", "is", "None", "and", "away_temp", "is", "None", ":", "return", "for", "room_id", ",", "_room", "in", "self", ".", "rooms", ".", "items", "(", ")", ":", "if", "_room", ".", "name", "==", "room_name", ":", "await", "self", ".", "set_room_temperatures", "(", "room_id", ",", "sleep_temp", ",", "comfort_temp", ",", "away_temp", ")", "return", "_LOGGER", ".", "error", "(", "\"Could not find a room with name %s\"", ",", "room_name", ")" ]
Return a dict with swapped keys and values
def inverted_dict ( d ) : return dict ( ( force_hashable ( v ) , k ) for ( k , v ) in viewitems ( dict ( d ) ) )
5,398
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L167-L173
[ "def", "getOverlayTransformAbsolute", "(", "self", ",", "ulOverlayHandle", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayTransformAbsolute", "peTrackingOrigin", "=", "ETrackingUniverseOrigin", "(", ")", "pmatTrackingOriginToOverlayTransform", "=", "HmdMatrix34_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "peTrackingOrigin", ")", ",", "byref", "(", "pmatTrackingOriginToOverlayTransform", ")", ")", "return", "result", ",", "peTrackingOrigin", ",", "pmatTrackingOriginToOverlayTransform" ]
Return a dict where the keys are all the values listed in the values of the original dict
def inverted_dict_of_lists ( d ) : new_dict = { } for ( old_key , old_value_list ) in viewitems ( dict ( d ) ) : for new_key in listify ( old_value_list ) : new_dict [ new_key ] = old_key return new_dict
5,399
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L176-L186
[ "def", "from_offset", "(", "tu", ",", "file", ",", "offset", ")", ":", "return", "conf", ".", "lib", ".", "clang_getLocationForOffset", "(", "tu", ",", "file", ",", "offset", ")" ]