query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Sort a list of strings according to the provided sorted list of string prefixes
def sort_strings ( strings , sort_order = None , reverse = False , case_sensitive = False , sort_order_first = True ) : if not case_sensitive : sort_order = tuple ( s . lower ( ) for s in sort_order ) strings = tuple ( s . lower ( ) for s in strings ) prefix_len = max ( len ( s ) for s in sort_order ) def compare ( a , b , prefix_len = prefix_len ) : if prefix_len : if a [ : prefix_len ] in sort_order : if b [ : prefix_len ] in sort_order : comparison = sort_order . index ( a [ : prefix_len ] ) - sort_order . index ( b [ : prefix_len ] ) comparison = int ( comparison / abs ( comparison or 1 ) ) if comparison : return comparison * ( - 2 * reverse + 1 ) elif sort_order_first : return - 1 * ( - 2 * reverse + 1 ) # b may be in sort_order list, so it should be first elif sort_order_first and b [ : prefix_len ] in sort_order : return - 2 * reverse + 1 return ( - 1 * ( a < b ) + 1 * ( a > b ) ) * ( - 2 * reverse + 1 ) return sorted ( strings , key = functools . cmp_to_key ( compare ) )
5,400
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L189-L238
[ "def", "unpack", "(", "self", ",", "source", ":", "IO", ")", ":", "method_count", "=", "unpack", "(", "'>H'", ",", "source", ".", "read", "(", "2", ")", ")", "[", "0", "]", "for", "_", "in", "repeat", "(", "None", ",", "method_count", ")", ":", "method", "=", "Method", "(", "self", ".", "_cf", ")", "method", ".", "unpack", "(", "source", ")", "self", ".", "append", "(", "method", ")" ]
r Normalize field values by stripping whitespace from strings localizing datetimes to a timezone etc
def clean_field_dict ( field_dict , cleaner = str . strip , time_zone = None ) : d = { } if time_zone is None : tz = DEFAULT_TZ for k , v in viewitems ( field_dict ) : if k == '_state' : continue if isinstance ( v , basestring ) : d [ k ] = cleaner ( str ( v ) ) elif isinstance ( v , ( datetime . datetime , datetime . date ) ) : d [ k ] = tz . localize ( v ) else : d [ k ] = v return d
5,401
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L241-L260
[ "def", "readme_verify", "(", ")", ":", "expected", "=", "populate_readme", "(", "REVISION", ",", "RTD_VERSION", ")", "# Actually get the stored contents.", "with", "open", "(", "README_FILE", ",", "\"r\"", ")", "as", "file_obj", ":", "contents", "=", "file_obj", ".", "read", "(", ")", "if", "contents", "!=", "expected", ":", "err_msg", "=", "\"\\n\"", "+", "get_diff", "(", "contents", ",", "expected", ",", "\"README.rst.actual\"", ",", "\"README.rst.expected\"", ")", "raise", "ValueError", "(", "err_msg", ")", "else", ":", "print", "(", "\"README contents are as expected.\"", ")" ]
Iterate through a queryset in batches of length batch_len
def generate_tuple_batches ( qs , batch_len = 1 ) : num_items , batch = 0 , [ ] for item in qs : if num_items >= batch_len : yield tuple ( batch ) num_items = 0 batch = [ ] num_items += 1 batch += [ item ] if num_items : yield tuple ( batch )
5,402
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L491-L506
[ "def", "beacon", "(", "config", ")", ":", "ret", "=", "[", "]", "journal", "=", "_get_journal", "(", ")", "_config", "=", "{", "}", "list", "(", "map", "(", "_config", ".", "update", ",", "config", ")", ")", "while", "True", ":", "cur", "=", "journal", ".", "get_next", "(", ")", "if", "not", "cur", ":", "break", "for", "name", "in", "_config", ".", "get", "(", "'services'", ",", "{", "}", ")", ":", "n_flag", "=", "0", "for", "key", "in", "_config", "[", "'services'", "]", "[", "name", "]", ":", "if", "isinstance", "(", "key", ",", "salt", ".", "ext", ".", "six", ".", "string_types", ")", ":", "key", "=", "salt", ".", "utils", ".", "data", ".", "decode", "(", "key", ")", "if", "key", "in", "cur", ":", "if", "_config", "[", "'services'", "]", "[", "name", "]", "[", "key", "]", "==", "cur", "[", "key", "]", ":", "n_flag", "+=", "1", "if", "n_flag", "==", "len", "(", "_config", "[", "'services'", "]", "[", "name", "]", ")", ":", "# Match!", "sub", "=", "salt", ".", "utils", ".", "data", ".", "simple_types_filter", "(", "cur", ")", "sub", ".", "update", "(", "{", "'tag'", ":", "name", "}", ")", "ret", ".", "append", "(", "sub", ")", "return", "ret" ]
Find the member of a set that means count or frequency or probability or number of occurrences .
def find_count_label ( d ) : for name in COUNT_NAMES : if name in d : return name for name in COUNT_NAMES : if str ( name ) . lower ( ) in d : return name
5,403
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L569-L578
[ "def", "error_messages", "(", "self", ",", "driver_id", "=", "None", ")", ":", "if", "driver_id", "is", "not", "None", ":", "assert", "isinstance", "(", "driver_id", ",", "ray", ".", "DriverID", ")", "return", "self", ".", "_error_messages", "(", "driver_id", ")", "error_table_keys", "=", "self", ".", "redis_client", ".", "keys", "(", "ray", ".", "gcs_utils", ".", "TablePrefix_ERROR_INFO_string", "+", "\"*\"", ")", "driver_ids", "=", "[", "key", "[", "len", "(", "ray", ".", "gcs_utils", ".", "TablePrefix_ERROR_INFO_string", ")", ":", "]", "for", "key", "in", "error_table_keys", "]", "return", "{", "binary_to_hex", "(", "driver_id", ")", ":", "self", ".", "_error_messages", "(", "ray", ".", "DriverID", "(", "driver_id", ")", ")", "for", "driver_id", "in", "driver_ids", "}" ]
Like fuzzy_get but assume the obj is dict - like and return the value without the key
def fuzzy_get_value ( obj , approximate_key , default = None , * * kwargs ) : dict_obj = OrderedDict ( obj ) try : return dict_obj [ list ( dict_obj . keys ( ) ) [ int ( approximate_key ) ] ] except ( ValueError , IndexError ) : pass return fuzzy_get ( dict_obj , approximate_key , key_and_value = False , * * kwargs )
5,404
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L732-L770
[ "def", "updateSeriesRegistrationStatus", "(", ")", ":", "from", ".", "models", "import", "Series", "if", "not", "getConstant", "(", "'general__enableCronTasks'", ")", ":", "return", "logger", ".", "info", "(", "'Checking status of Series that are open for registration.'", ")", "open_series", "=", "Series", ".", "objects", ".", "filter", "(", ")", ".", "filter", "(", "*", "*", "{", "'registrationOpen'", ":", "True", "}", ")", "for", "series", "in", "open_series", ":", "series", ".", "updateRegistrationStatus", "(", ")" ]
r Join a sequence into a tuple or a concatenated string
def joined_seq ( seq , sep = None ) : joined_seq = tuple ( seq ) if isinstance ( sep , basestring ) : joined_seq = sep . join ( str ( item ) for item in joined_seq ) return joined_seq
5,405
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L811-L822
[ "def", "anti_clobber_dir_path", "(", "dir_path", ",", "suffix", "=", "'.d'", ")", ":", "dir_path", "=", "os", ".", "path", ".", "normpath", "(", "dir_path", ")", "parts", "=", "dir_path", ".", "split", "(", "os", ".", "sep", ")", "for", "index", "in", "range", "(", "len", "(", "parts", ")", ")", ":", "test_path", "=", "os", ".", "sep", ".", "join", "(", "parts", "[", ":", "index", "+", "1", "]", ")", "if", "os", ".", "path", ".", "isfile", "(", "test_path", ")", ":", "parts", "[", "index", "]", "+=", "suffix", "return", "os", ".", "sep", ".", "join", "(", "parts", ")", "return", "dir_path" ]
Produce dictionary of sequences from sequence of sequences optionally with a header row .
def dos_from_table ( table , header = None ) : start_row = 0 if not table : return table if not header : header = table [ 0 ] start_row = 1 header_list = header if header and isinstance ( header , basestring ) : header_list = header . split ( '\t' ) if len ( header_list ) != len ( table [ 0 ] ) : header_list = header . split ( ',' ) if len ( header_list ) != len ( table [ 0 ] ) : header_list = header . split ( ' ' ) ans = { } for i , k in enumerate ( header ) : ans [ k ] = [ row [ i ] for row in table [ start_row : ] ] return ans
5,406
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L847-L869
[ "def", "tangent_bbox_intersection", "(", "first", ",", "second", ",", "intersections", ")", ":", "node_first1", "=", "first", ".", "nodes", "[", ":", ",", "0", "]", "node_first2", "=", "first", ".", "nodes", "[", ":", ",", "-", "1", "]", "node_second1", "=", "second", ".", "nodes", "[", ":", ",", "0", "]", "node_second2", "=", "second", ".", "nodes", "[", ":", ",", "-", "1", "]", "endpoint_check", "(", "first", ",", "node_first1", ",", "0.0", ",", "second", ",", "node_second1", ",", "0.0", ",", "intersections", ")", "endpoint_check", "(", "first", ",", "node_first1", ",", "0.0", ",", "second", ",", "node_second2", ",", "1.0", ",", "intersections", ")", "endpoint_check", "(", "first", ",", "node_first2", ",", "1.0", ",", "second", ",", "node_second1", ",", "0.0", ",", "intersections", ")", "endpoint_check", "(", "first", ",", "node_first2", ",", "1.0", ",", "second", ",", "node_second2", ",", "1.0", ",", "intersections", ")" ]
Like numpy . transposed but allows uneven row lengths
def transposed_lists ( list_of_lists , default = None ) : if default is None or default is [ ] or default is tuple ( ) : default = [ ] elif default is 'None' : default = [ None ] else : default = [ default ] N = len ( list_of_lists ) Ms = [ len ( row ) for row in list_of_lists ] M = max ( Ms ) ans = [ ] for j in range ( M ) : ans += [ [ ] ] for i in range ( N ) : if j < Ms [ i ] : ans [ - 1 ] += [ list_of_lists [ i ] [ j ] ] else : ans [ - 1 ] += list ( default ) return ans
5,407
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L872-L905
[ "def", "_handle_processor_error", "(", "self", ",", "failure", ")", ":", "# Check if we're stopping/stopped and the errback of the processor", "# deferred is just the cancelling we initiated. If so, we skip", "# notifying via the _start_d deferred, as it will be 'callback'd at the", "# end of stop()", "if", "not", "(", "self", ".", "_stopping", "and", "failure", ".", "check", "(", "CancelledError", ")", ")", ":", "if", "self", ".", "_start_d", ":", "# Make sure we're not already stopped", "self", ".", "_start_d", ".", "errback", "(", "failure", ")" ]
Compute an emprical histogram PMF or CDF in a list of lists
def hist_from_counts ( counts , normalize = False , cumulative = False , to_str = False , sep = ',' , min_bin = None , max_bin = None ) : counters = [ dict ( ( i , c ) for i , c in enumerate ( counts ) ) ] intkeys_list = [ [ c for c in counts_dict if ( isinstance ( c , int ) or ( isinstance ( c , float ) and int ( c ) == c ) ) ] for counts_dict in counters ] min_bin , max_bin = min_bin or 0 , max_bin or len ( counts ) - 1 histograms = [ ] for intkeys , counts in zip ( intkeys_list , counters ) : histograms += [ OrderedDict ( ) ] if not intkeys : continue if normalize : N = sum ( counts [ c ] for c in intkeys ) for c in intkeys : counts [ c ] = float ( counts [ c ] ) / N if cumulative : for i in range ( min_bin , max_bin + 1 ) : histograms [ - 1 ] [ i ] = counts . get ( i , 0 ) + histograms [ - 1 ] . get ( i - 1 , 0 ) else : for i in range ( min_bin , max_bin + 1 ) : histograms [ - 1 ] [ i ] = counts . get ( i , 0 ) if not histograms : histograms = [ OrderedDict ( ) ] # fill in the zero counts between the integer bins of the histogram aligned_histograms = [ ] for i in range ( min_bin , max_bin + 1 ) : aligned_histograms += [ tuple ( [ i ] + [ hist . get ( i , 0 ) for hist in histograms ] ) ] if to_str : # FIXME: add header row return str_from_table ( aligned_histograms , sep = sep , max_rows = 365 * 2 + 1 ) return aligned_histograms
5,408
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L972-L1011
[ "def", "getTotalAssociations", "(", "self", ",", "wifiInterfaceId", "=", "1", ",", "timeout", "=", "1", ")", ":", "namespace", "=", "Wifi", ".", "getServiceType", "(", "\"getTotalAssociations\"", ")", "+", "str", "(", "wifiInterfaceId", ")", "uri", "=", "self", ".", "getControlURL", "(", "namespace", ")", "results", "=", "self", ".", "execute", "(", "uri", ",", "namespace", ",", "\"GetTotalAssociations\"", ",", "timeout", "=", "timeout", ")", "return", "int", "(", "results", "[", "\"NewTotalAssociations\"", "]", ")" ]
Similar to fuzzy_get but allows non - string keys and a list of possible keys
def get_similar ( obj , labels , default = None , min_similarity = 0.5 ) : raise NotImplementedError ( "Unfinished implementation, needs to be in fuzzy_get where list of scores & keywords is sorted." ) labels = listify ( labels ) def not_found ( * args , * * kwargs ) : return 0 min_score = int ( min_similarity * 100 ) for similarity_score in [ 100 , 95 , 90 , 80 , 70 , 50 , 30 , 10 , 5 , 0 ] : if similarity_score <= min_score : similarity_score = min_score for label in labels : try : result = obj . get ( label , not_found ) except AttributeError : try : result = obj . __getitem__ ( label ) except ( IndexError , TypeError ) : result = not_found if result is not not_found : return result if similarity_score == min_score : if result is not not_found : return result
5,409
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1094-L1126
[ "def", "remove_armor", "(", "armored_data", ")", ":", "stream", "=", "io", ".", "BytesIO", "(", "armored_data", ")", "lines", "=", "stream", ".", "readlines", "(", ")", "[", "3", ":", "-", "1", "]", "data", "=", "base64", ".", "b64decode", "(", "b''", ".", "join", "(", "lines", ")", ")", "payload", ",", "checksum", "=", "data", "[", ":", "-", "3", "]", ",", "data", "[", "-", "3", ":", "]", "assert", "util", ".", "crc24", "(", "payload", ")", "==", "checksum", "return", "payload" ]
r Force the file or path str to end with the indicated extension
def update_file_ext ( filename , ext = 'txt' , sep = '.' ) : path , filename = os . path . split ( filename ) if ext and ext [ 0 ] == sep : ext = ext [ 1 : ] return os . path . join ( path , sep . join ( filename . split ( sep ) [ : - 1 if filename . count ( sep ) > 1 else 1 ] + [ ext ] ) )
5,410
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1328-L1347
[ "def", "calc_temp", "(", "Data_ref", ",", "Data", ")", ":", "T", "=", "300", "*", "(", "(", "Data", ".", "A", "*", "Data_ref", ".", "Gamma", ")", "/", "(", "Data_ref", ".", "A", "*", "Data", ".", "Gamma", ")", ")", "Data", ".", "T", "=", "T", "return", "T" ]
Change encoding of text file
def transcode ( infile , outfile = None , incoding = "shift-jis" , outcoding = "utf-8" ) : if not outfile : outfile = os . path . basename ( infile ) + '.utf8' with codecs . open ( infile , "rb" , incoding ) as fpin : with codecs . open ( outfile , "wb" , outcoding ) as fpout : fpout . write ( fpin . read ( ) )
5,411
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1404-L1410
[ "def", "match_color_index", "(", "self", ",", "color", ")", ":", "from", "jcvi", ".", "utils", ".", "webcolors", "import", "color_diff", "if", "isinstance", "(", "color", ",", "int", ")", ":", "return", "color", "if", "color", ":", "if", "isinstance", "(", "color", ",", "six", ".", "string_types", ")", ":", "rgb", "=", "map", "(", "int", ",", "color", ".", "split", "(", "','", ")", ")", "else", ":", "rgb", "=", "color", ".", "Get", "(", ")", "logging", ".", "disable", "(", "logging", ".", "DEBUG", ")", "distances", "=", "[", "color_diff", "(", "rgb", ",", "x", ")", "for", "x", "in", "self", ".", "xlwt_colors", "]", "logging", ".", "disable", "(", "logging", ".", "NOTSET", ")", "result", "=", "distances", ".", "index", "(", "min", "(", "distances", ")", ")", "self", ".", "unused_colors", ".", "discard", "(", "self", ".", "xlwt_colors", "[", "result", "]", ")", "return", "result" ]
Convert a dict to an object or namespace
def dict2obj ( d ) : if isinstance ( d , ( Mapping , list , tuple ) ) : try : d = dict ( d ) except ( ValueError , TypeError ) : return d else : return d obj = Object ( ) for k , v in viewitems ( d ) : obj . __dict__ [ k ] = dict2obj ( v ) return obj
5,412
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1602-L1627
[ "def", "get_tdata", "(", "t_format", ",", "files", ")", ":", "def", "median", "(", "arr", ")", ":", "return", "arr", ".", "min", "(", ")", "+", "(", "arr", ".", "max", "(", ")", "-", "arr", ".", "min", "(", ")", ")", "/", "2", "import", "re", "from", "pandas", "import", "Index", "t_pattern", "=", "t_format", "for", "fmt", ",", "patt", "in", "t_patterns", ".", "items", "(", ")", ":", "t_pattern", "=", "t_pattern", ".", "replace", "(", "fmt", ",", "patt", ")", "t_pattern", "=", "re", ".", "compile", "(", "t_pattern", ")", "time", "=", "list", "(", "range", "(", "len", "(", "files", ")", ")", ")", "for", "i", ",", "f", "in", "enumerate", "(", "files", ")", ":", "time", "[", "i", "]", "=", "median", "(", "np", ".", "array", "(", "list", "(", "map", "(", "lambda", "s", ":", "np", ".", "datetime64", "(", "dt", ".", "datetime", ".", "strptime", "(", "s", ",", "t_format", ")", ")", ",", "t_pattern", ".", "findall", "(", "f", ")", ")", ")", ")", ")", "ind", "=", "np", ".", "argsort", "(", "time", ")", "# sort according to time", "files", "=", "np", ".", "array", "(", "files", ")", "[", "ind", "]", "time", "=", "np", ".", "array", "(", "time", ")", "[", "ind", "]", "return", "to_datetime", "(", "Index", "(", "time", ",", "name", "=", "'time'", ")", ")", ",", "files" ]
Return the digits to either side of a single non - digit character as a 2 - tuple of integers
def int_pair ( s , default = ( 0 , None ) ) : s = re . split ( r'[^0-9]+' , str ( s ) . strip ( ) ) if len ( s ) and len ( s [ 0 ] ) : if len ( s ) > 1 and len ( s [ 1 ] ) : return ( int ( s [ 0 ] ) , int ( s [ 1 ] ) ) return ( int ( s [ 0 ] ) , default [ 1 ] ) return default
5,413
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1859-L1872
[ "def", "_getStickersTemplatesDirectory", "(", "self", ",", "resource_name", ")", ":", "templates_dir", "=", "queryResourceDirectory", "(", "\"stickers\"", ",", "resource_name", ")", ".", "directory", "if", "self", ".", "filter_by_type", ":", "templates_dir", "=", "templates_dir", "+", "\"/\"", "+", "self", ".", "filter_by_type", "return", "templates_dir" ]
r Coerce a string into a float
def make_float ( s , default = '' , ignore_commas = True ) : if ignore_commas and isinstance ( s , basestring ) : s = s . replace ( ',' , '' ) try : return float ( s ) except ( IndexError , ValueError , AttributeError , TypeError ) : try : return float ( str ( s ) ) except ValueError : try : return float ( normalize_scientific_notation ( str ( s ) , ignore_commas ) ) except ValueError : try : return float ( first_digits ( s ) ) except ValueError : return default
5,414
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1903-L1941
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
Coerce a string or nested list of strings into a flat list of strings .
def normalize_names ( names ) : if isinstance ( names , basestring ) : names = names . split ( ',' ) names = listify ( names ) return [ str ( name ) . strip ( ) for name in names ]
5,415
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2023-L2028
[ "def", "_compute_ogg_page_crc", "(", "page", ")", ":", "page_zero_crc", "=", "page", "[", ":", "OGG_FIRST_PAGE_HEADER_CRC_OFFSET", "]", "+", "b\"\\00\"", "*", "OGG_FIRST_PAGE_HEADER_CRC", ".", "size", "+", "page", "[", "OGG_FIRST_PAGE_HEADER_CRC_OFFSET", "+", "OGG_FIRST_PAGE_HEADER_CRC", ".", "size", ":", "]", "return", "ogg_page_crc", "(", "page_zero_crc", ")" ]
r Make a string compatible with typical serial number requirements
def normalize_serial_number ( sn , max_length = None , left_fill = '0' , right_fill = str ( ) , blank = str ( ) , valid_chars = ' -0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' , invalid_chars = None , strip_whitespace = True , join = False , na = rex . nones ) : # All 9 kwargs have persistent default values stored as attributes of the funcion instance if max_length is None : max_length = normalize_serial_number . max_length else : normalize_serial_number . max_length = max_length if left_fill is None : left_fill = normalize_serial_number . left_fill else : normalize_serial_number . left_fill = left_fill if right_fill is None : right_fill = normalize_serial_number . right_fill else : normalize_serial_number . right_fill = right_fill if blank is None : blank = normalize_serial_number . blank else : normalize_serial_number . blank = blank if valid_chars is None : valid_chars = normalize_serial_number . valid_chars else : normalize_serial_number . valid_chars = valid_chars if invalid_chars is None : invalid_chars = normalize_serial_number . invalid_chars else : normalize_serial_number . invalid_chars = invalid_chars if strip_whitespace is None : strip_whitespace = normalize_serial_number . strip_whitespace else : normalize_serial_number . strip_whitespace = strip_whitespace if join is None : join = normalize_serial_number . join else : normalize_serial_number . join = join if na is None : na = normalize_serial_number . na else : normalize_serial_number . na = na if invalid_chars is None : invalid_chars = ( c for c in charlist . ascii_all if c not in valid_chars ) invalid_chars = '' . join ( invalid_chars ) sn = str ( sn ) . strip ( invalid_chars ) if strip_whitespace : sn = sn . strip ( ) if invalid_chars : if join : sn = sn . translate ( dict ( zip ( invalid_chars , [ '' ] * len ( invalid_chars ) ) ) ) else : sn = multisplit ( sn , invalid_chars ) [ - 1 ] sn = sn [ - max_length : ] if strip_whitespace : sn = sn . strip ( ) if na : if isinstance ( na , ( tuple , set , dict , list ) ) and sn in na : sn = '' elif na . match ( sn ) : sn = '' if not sn and not ( blank is False ) : return blank if left_fill : sn = left_fill * int ( max_length - len ( sn ) / len ( left_fill ) ) + sn if right_fill : sn = sn + right_fill * ( max_length - len ( sn ) / len ( right_fill ) ) return sn
5,416
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2064-L2173
[ "def", "_ProcessMetadataFile", "(", "self", ",", "mediator", ",", "file_entry", ")", ":", "self", ".", "processing_status", "=", "definitions", ".", "STATUS_INDICATOR_EXTRACTING", "self", ".", "_event_extractor", ".", "ParseFileEntryMetadata", "(", "mediator", ",", "file_entry", ")", "for", "data_stream", "in", "file_entry", ".", "data_streams", ":", "if", "self", ".", "_abort", ":", "break", "self", ".", "last_activity_timestamp", "=", "time", ".", "time", "(", ")", "self", ".", "_event_extractor", ".", "ParseMetadataFile", "(", "mediator", ",", "file_entry", ",", "data_stream", ".", "name", ")" ]
Simple clumsy slow HTML tag stripper
def strip_HTML ( s ) : result = '' total = 0 for c in s : if c == '<' : total = 1 elif c == '>' : total = 0 result += ' ' elif total == 0 : result += c return result
5,417
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2256-L2268
[ "def", "record", "(", "self", ",", "event_type", ":", "str", ",", "event_data", ":", "Mapping", "=", "None", ",", "instance", ":", "Any", "=", "None", ",", "*", "*", "kwargs", ")", "->", "'Event'", ":", "if", "not", "self", ".", "is_setup", ":", "return", "if", "not", "self", ".", "can_handle", "(", "event_type", "=", "event_type", ")", ":", "return", "event", "=", "self", ".", "get_event", "(", "event_type", "=", "event_type", ",", "event_data", "=", "event_data", ",", "instance", "=", "instance", ",", "*", "*", "kwargs", ")", "self", ".", "record_event", "(", "event", ")", "return", "event" ]
Use the pypi tabulate package instead!
def tabulate ( lol , headers , eol = '\n' ) : yield '| %s |' % ' | ' . join ( headers ) + eol yield '| %s:|' % ':| ' . join ( [ '-' * len ( w ) for w in headers ] ) + eol for row in lol : yield '| %s |' % ' | ' . join ( str ( c ) for c in row ) + eol
5,418
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2396-L2401
[ "def", "writeGlobalFileStream", "(", "self", ",", "cleanup", "=", "False", ")", ":", "# TODO: Make this work with FileID", "with", "self", ".", "jobStore", ".", "writeFileStream", "(", "None", "if", "not", "cleanup", "else", "self", ".", "jobGraph", ".", "jobStoreID", ")", "as", "(", "backingStream", ",", "fileStoreID", ")", ":", "# We have a string version of the file ID, and the backing stream.", "# We need to yield a stream the caller can write to, and a FileID", "# that accurately reflects the size of the data written to the", "# stream. We assume the stream is not seekable.", "# Make and keep a reference to the file ID, which is currently empty", "fileID", "=", "FileID", "(", "fileStoreID", ",", "0", ")", "# Wrap the stream to increment the file ID's size for each byte written", "wrappedStream", "=", "WriteWatchingStream", "(", "backingStream", ")", "# When the stream is written to, count the bytes", "def", "handle", "(", "numBytes", ")", ":", "fileID", ".", "size", "+=", "numBytes", "wrappedStream", ".", "onWrite", "(", "handle", ")", "yield", "wrappedStream", ",", "fileID" ]
Return an N - length list with elements values extrapolating as necessary .
def listify ( values , N = 1 , delim = None ) : ans = [ ] if values is None else values # convert non-string non-list iterables into a list if hasattr ( ans , '__iter__' ) and not isinstance ( ans , basestring ) : ans = list ( ans ) else : # split the string (if possible) if isinstance ( delim , basestring ) and isinstance ( ans , basestring ) : try : ans = ans . split ( delim ) except ( IndexError , ValueError , AttributeError , TypeError ) : ans = [ ans ] else : ans = [ ans ] # pad the end of the list if a length has been specified if len ( ans ) : if len ( ans ) < N and N > 1 : ans += [ ans [ - 1 ] ] * ( N - len ( ans ) ) else : if N > 1 : ans = [ [ ] ] * N return ans
5,419
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2464-L2515
[ "def", "gen_yaml_category", "(", ")", ":", "for", "wroot", ",", "_", ",", "wfiles", "in", "os", ".", "walk", "(", "'./database/meta'", ")", ":", "for", "wfile", "in", "wfiles", ":", "if", "wfile", ".", "endswith", "(", "'.yaml'", ")", ":", "gen_category", "(", "os", ".", "path", ".", "join", "(", "wroot", ",", "wfile", ")", ",", "wfile", "[", "0", "]", ")" ]
Return the desired element in a list ignoring the rest .
def unlistify ( n , depth = 1 , typ = list , get = None ) : i = 0 if depth is None : depth = 1 index_desired = get or 0 while i < depth and isinstance ( n , typ ) : if len ( n ) : if len ( n ) > index_desired : n = n [ index_desired ] i += 1 else : return n return n
5,420
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2522-L2549
[ "def", "upload", "(", "config", ")", ":", "token", "=", "get_keeper_token", "(", "config", "[", "'keeper_url'", "]", ",", "config", "[", "'keeper_user'", "]", ",", "config", "[", "'keeper_password'", "]", ")", "build_resource", "=", "register_build", "(", "config", ",", "token", ")", "ltdconveyor", ".", "upload_dir", "(", "build_resource", "[", "'bucket_name'", "]", ",", "build_resource", "[", "'bucket_root_dir'", "]", ",", "config", "[", "'build_dir'", "]", ",", "aws_access_key_id", "=", "config", "[", "'aws_id'", "]", ",", "aws_secret_access_key", "=", "config", "[", "'aws_secret'", "]", ",", "surrogate_key", "=", "build_resource", "[", "'surrogate_key'", "]", ",", "cache_control", "=", "'max-age=31536000'", ",", "surrogate_control", "=", "None", ",", "upload_dir_redirect_objects", "=", "True", ")", "confirm_build", "(", "config", ",", "token", ",", "build_resource", ")" ]
r Strip whitespace from all dictionary keys to the depth indicated
def strip_keys ( d , nones = False , depth = 0 ) : ans = type ( d ) ( ( str ( k ) . strip ( ) , v ) for ( k , v ) in viewitems ( OrderedDict ( d ) ) if ( not nones or ( str ( k ) . strip ( ) and str ( k ) . strip ( ) != 'None' ) ) ) if int ( depth ) < 1 : return ans if int ( depth ) > strip_keys . MAX_DEPTH : warnings . warn ( RuntimeWarning ( "Maximum recursion depth allowance (%r) exceeded." % strip_keys . MAX_DEPTH ) ) for k , v in viewitems ( ans ) : if isinstance ( v , Mapping ) : ans [ k ] = strip_keys ( v , nones = nones , depth = int ( depth ) - 1 ) return ans
5,421
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2568-L2585
[ "def", "updateSeriesRegistrationStatus", "(", ")", ":", "from", ".", "models", "import", "Series", "if", "not", "getConstant", "(", "'general__enableCronTasks'", ")", ":", "return", "logger", ".", "info", "(", "'Checking status of Series that are open for registration.'", ")", "open_series", "=", "Series", ".", "objects", ".", "filter", "(", ")", ".", "filter", "(", "*", "*", "{", "'registrationOpen'", ":", "True", "}", ")", "for", "series", "in", "open_series", ":", "series", ".", "updateRegistrationStatus", "(", ")" ]
Dictionary of sequences from CSV file
def get_table_from_csv ( filename = 'ssg_report_aarons_returns.csv' , delimiter = ',' , dos = False ) : table = [ ] with open ( filename , 'rb' ) as f : reader = csv . reader ( f , dialect = 'excel' , delimiter = delimiter ) for row in reader : table += [ row ] if not dos : return table return dos_from_table ( table )
5,422
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2596-L2605
[ "def", "set_axes_equal", "(", "ax", ")", ":", "bounds", "=", "[", "ax", ".", "get_xlim3d", "(", ")", ",", "ax", ".", "get_ylim3d", "(", ")", ",", "ax", ".", "get_zlim3d", "(", ")", "]", "ranges", "=", "[", "abs", "(", "bound", "[", "1", "]", "-", "bound", "[", "0", "]", ")", "for", "bound", "in", "bounds", "]", "centers", "=", "[", "np", ".", "mean", "(", "bound", ")", "for", "bound", "in", "bounds", "]", "radius", "=", "0.5", "*", "max", "(", "ranges", ")", "lower_limits", "=", "centers", "-", "radius", "upper_limits", "=", "centers", "+", "radius", "ax", ".", "set_xlim3d", "(", "[", "lower_limits", "[", "0", "]", ",", "upper_limits", "[", "0", "]", "]", ")", "ax", ".", "set_ylim3d", "(", "[", "lower_limits", "[", "1", "]", ",", "upper_limits", "[", "1", "]", "]", ")", "ax", ".", "set_zlim3d", "(", "[", "lower_limits", "[", "2", "]", ",", "upper_limits", "[", "2", "]", "]", ")" ]
Attempt to shorten a phrase by deleting words at the end of the phrase
def shorten ( s , max_len = 16 ) : short = s words = [ abbreviate ( word ) for word in get_words ( s ) ] for i in range ( len ( words ) , 0 , - 1 ) : short = ' ' . join ( words [ : i ] ) if len ( short ) <= max_len : break return short [ : max_len ]
5,423
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2627-L2641
[ "def", "_get_root", "(", "cls", ",", "order", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "\"Console\"", ":", "global", "_root_console", "if", "_root_console", "is", "None", ":", "_root_console", "=", "object", ".", "__new__", "(", "cls", ")", "self", "=", "_root_console", "# type: Console", "if", "order", "is", "not", "None", ":", "self", ".", "_order", "=", "order", "self", ".", "console_c", "=", "ffi", ".", "NULL", "self", ".", "_init_setup_console_data", "(", "self", ".", "_order", ")", "return", "self" ]
r Return string at most max_len characters or sequence elments appended with the ellipsis characters
def truncate ( s , max_len = 20 , ellipsis = '...' ) : if s is None : return None elif isinstance ( s , basestring ) : return s [ : min ( len ( s ) , max_len ) ] + ellipsis if len ( s ) > max_len else '' elif isinstance ( s , Mapping ) : truncated_str = str ( dict ( islice ( viewitems ( s ) , max_len ) ) ) else : truncated_str = str ( list ( islice ( s , max_len ) ) ) return truncated_str [ : - 1 ] + '...' if len ( s ) > max_len else truncated_str
5,424
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2656-L2676
[ "def", "invalidate", "(", "self", ")", ":", "for", "row", "in", "self", ".", "rows", ":", "for", "key", "in", "row", ".", "keys", ":", "key", ".", "state", "=", "0" ]
Return a list of all possible meanings of a phrase containing slashes
def slash_product ( string_or_seq , slash = '/' , space = ' ' ) : # Terminating case is a sequence of strings without any slashes if not isinstance ( string_or_seq , basestring ) : # If it's not a string and has no slashes, we're done if not any ( slash in s for s in string_or_seq ) : return list ( string_or_seq ) ans = [ ] for s in string_or_seq : # slash_product of a string will always return a flat list ans += slash_product ( s ) return slash_product ( ans ) # Another terminating case is a single string without any slashes if slash not in string_or_seq : return [ string_or_seq ] # The third case is a string with some slashes in it i = string_or_seq . index ( slash ) head , tail = string_or_seq [ : i ] . split ( space ) , string_or_seq [ i + 1 : ] . split ( space ) alternatives = head [ - 1 ] , tail [ 0 ] head , tail = space . join ( head [ : - 1 ] ) , space . join ( tail [ 1 : ] ) return slash_product ( [ space . join ( [ head , word , tail ] ) . strip ( space ) for word in alternatives ] )
5,425
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2916-L2964
[ "def", "frommembers", "(", "cls", ",", "members", "=", "(", ")", ")", ":", "return", "cls", ".", "fromint", "(", "sum", "(", "map", "(", "cls", ".", "_map", ".", "__getitem__", ",", "set", "(", "members", ")", ")", ")", ")" ]
Create a header a new image
def create_header ( coord , radius , proj = 'ZEA' , npix = 30 ) : gal = coord . name == 'galactic' values = [ [ "NAXIS" , 2 , ] , [ "NAXIS1" , npix , ] , [ "NAXIS2" , npix , ] , [ "CTYPE1" , 'GLON-%s' % proj if gal else 'RA---%s' % proj ] , [ "CTYPE2" , 'GLAT-%s' % proj if gal else 'DEC--%s' % proj ] , [ "CRPIX1" , npix / 2. + 0.5 , ] , [ "CRPIX2" , npix / 2. + 0.5 , ] , [ "CRVAL1" , coord . l . deg if gal else coord . ra . deg , ] , [ "CRVAL2" , coord . b . deg if gal else coord . dec . deg , ] , [ "CDELT1" , - 3. * radius / npix , ] , [ "CDELT2" , 3. * radius / npix , ] , ] if not gal : values += [ [ 'RADECSYS' , 'FK5' ] , [ 'EQUINOX' , 2000 ] , ] cards = [ pyfits . Card ( * i ) for i in values ] header = pyfits . Header ( cards = cards ) return header
5,426
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/position_angle.py#L19-L50
[ "def", "devices", "(", "self", ",", "timeout", "=", "None", ")", ":", "# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw", "# from Android system/core/adb/transport.c statename()", "re_device_info", "=", "re", ".", "compile", "(", "r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'", ")", "devices", "=", "[", "]", "lines", "=", "self", ".", "command_output", "(", "[", "\"devices\"", ",", "\"-l\"", "]", ",", "timeout", "=", "timeout", ")", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "if", "line", "==", "'List of devices attached '", ":", "continue", "match", "=", "re_device_info", ".", "match", "(", "line", ")", "if", "match", ":", "device", "=", "{", "'device_serial'", ":", "match", ".", "group", "(", "1", ")", ",", "'state'", ":", "match", ".", "group", "(", "2", ")", "}", "remainder", "=", "line", "[", "match", ".", "end", "(", "2", ")", ":", "]", ".", "strip", "(", ")", "if", "remainder", ":", "try", ":", "device", ".", "update", "(", "dict", "(", "[", "j", ".", "split", "(", "':'", ")", "for", "j", "in", "remainder", ".", "split", "(", "' '", ")", "]", ")", ")", "except", "ValueError", ":", "self", ".", "_logger", ".", "warning", "(", "'devices: Unable to parse '", "'remainder for device %s'", "%", "line", ")", "devices", ".", "append", "(", "device", ")", "return", "devices" ]
Split string text into word tokens using the Penn Treebank rules
def word_tokenize ( text ) : for ( regexp , replacement ) in RULES1 : text = sub ( regexp , replacement , text ) # add extra space to make things easier text = " " + text + " " for ( regexp , replacement ) in RULES2 : text = sub ( regexp , replacement , text ) for regexp in CONTRACTIONS : text = sub ( regexp , r"\1 \2 " , text ) # split and return return text . split ( )
5,427
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/penn_treebank_tokenizer.py#L83-L96
[ "def", "_link_dimensioned_streams", "(", "self", ")", ":", "streams", "=", "[", "s", "for", "s", "in", "self", ".", "streams", "if", "any", "(", "k", "in", "self", ".", "dimensions", "for", "k", "in", "s", ".", "contents", ")", "]", "for", "s", "in", "streams", ":", "s", ".", "add_subscriber", "(", "self", ".", "_stream_update", ",", "1", ")" ]
get_postcodedata - fetch information for postcode .
def get_postcodedata ( self , postcode , nr , addition = "" , * * params ) : endpoint = 'rest/addresses/%s/%s' % ( postcode , nr ) if addition : endpoint += '/' + addition retValue = self . _API__request ( endpoint , params = params ) # then it should match the houseNumberAdditions if addition and addition . upper ( ) not in [ a . upper ( ) for a in retValue [ 'houseNumberAdditions' ] ] : raise PostcodeError ( "ERRHouseNumberAdditionInvalid" , { "exceptionId" : "ERRHouseNumberAdditionInvalid" , "exception" : "Invalid housenumber addition: '%s'" % retValue [ 'houseNumberAddition' ] , "validHouseNumberAdditions" : retValue [ 'houseNumberAdditions' ] } ) return retValue
5,428
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/postcodepy.py#L14-L51
[ "def", "extract_files", "(", "files", ")", ":", "expanded_files", "=", "[", "]", "legal_extensions", "=", "[", "\".md\"", ",", "\".txt\"", ",", "\".rtf\"", ",", "\".html\"", ",", "\".tex\"", ",", "\".markdown\"", "]", "for", "f", "in", "files", ":", "# If it's a directory, recursively walk through it and find the files.", "if", "os", ".", "path", ".", "isdir", "(", "f", ")", ":", "for", "dir_", ",", "_", ",", "filenames", "in", "os", ".", "walk", "(", "f", ")", ":", "for", "filename", "in", "filenames", ":", "fn", ",", "file_extension", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "file_extension", "in", "legal_extensions", ":", "joined_file", "=", "os", ".", "path", ".", "join", "(", "dir_", ",", "filename", ")", "expanded_files", ".", "append", "(", "joined_file", ")", "# Otherwise add the file directly.", "else", ":", "expanded_files", ".", "append", "(", "f", ")", "return", "expanded_files" ]
get_signalcheck - perform a signal check .
def get_signalcheck ( self , sar , * * params ) : params = sar endpoint = 'rest/signal/check' # The 'sar'-request dictionary should be sent as valid JSON data, so # we need to convert it to JSON # when we construct the request in API.request retValue = self . _API__request ( endpoint , 'POST' , params = params , convJSON = True ) return retValue
5,429
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/postcodepy.py#L53-L75
[ "def", "add_param", "(", "self", ",", "param_name", ",", "layer_index", ",", "blob_index", ")", ":", "blobs", "=", "self", ".", "layers", "[", "layer_index", "]", ".", "blobs", "self", ".", "dict_param", "[", "param_name", "]", "=", "mx", ".", "nd", ".", "array", "(", "caffe", ".", "io", ".", "blobproto_to_array", "(", "blobs", "[", "blob_index", "]", ")", ")" ]
request - Returns dict of response from postcode . nl API .
def __request ( self , endpoint , method = 'GET' , params = None , convJSON = False ) : url = '%s/%s' % ( self . api_url , endpoint ) method = method . lower ( ) params = params or { } if convJSON : params = json . dumps ( params ) func = getattr ( self . client , method ) request_args = { } if method == 'get' : request_args [ 'params' ] = params else : request_args [ 'data' ] = params try : # Normally some valid HTTP-response will be the case # if not some exception regarding the request / connection has # occurred # this will be one of the exceptions of the request module # if so, we will a PostcodeError exception and pass the request # exception message response = func ( url , * * request_args ) except requests . RequestException as e : raise PostcodeError ( "ERRrequest" , { "exception" : e . __doc__ } ) content = response . content . decode ( 'utf-8' ) content = json . loads ( content ) if response . status_code == 200 : return content # Errors, otherwise we did not get here ... if 'exceptionId' in content : raise PostcodeError ( content [ 'exceptionId' ] , content ) raise PostcodeError ( "UnknownExceptionFromPostcodeNl" )
5,430
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/postcodepy.py#L122-L163
[ "def", "_CurrentAuditLog", "(", ")", ":", "now_sec", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", ".", "AsSecondsSinceEpoch", "(", ")", "rollover_seconds", "=", "AUDIT_ROLLOVER_TIME", ".", "seconds", "# This gives us a filename that only changes every", "# AUDIT_ROLLOVER_TIfilME seconds, but is still a valid timestamp.", "current_log", "=", "(", "now_sec", "//", "rollover_seconds", ")", "*", "rollover_seconds", "return", "_AuditLogBase", "(", ")", ".", "Add", "(", "str", "(", "current_log", ")", ")" ]
Fit a line to the x y data supplied and plot it along with teh raw samples
def regression_and_plot ( x , y = None ) : if y is None : y = x x = range ( len ( x ) ) if not isinstance ( x [ 0 ] , ( float , int , np . float64 , np . float32 ) ) : x = [ row [ 0 ] for row in x ] A = np . vstack ( [ np . array ( x ) , np . ones ( len ( x ) ) ] ) . T fit = np . linalg . lstsq ( A , y , rcond = None ) # if fit is None: # fit = [(1, 0), None, None, None] poly = fit [ 0 ] [ 0 ] , fit [ 0 ] [ - 1 ] poly = regressionplot ( x , y , poly ) return poly
5,431
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L125-L225
[ "def", "asdensity", "(", "self", ")", "->", "'Density'", ":", "matrix", "=", "bk", ".", "outer", "(", "self", ".", "tensor", ",", "bk", ".", "conj", "(", "self", ".", "tensor", ")", ")", "return", "Density", "(", "matrix", ",", "self", ".", "qubits", ",", "self", ".", "_memory", ")" ]
Scatter plot with colored markers depending on the discrete values in a category column
def scatmat ( df , category = None , colors = 'rgob' , num_plots = 4 , num_topics = 100 , num_columns = 4 , show = False , block = False , data_path = DATA_PATH , save = False , verbose = 1 ) : if category is None : category = list ( df . columns ) [ - 1 ] if isinstance ( category , ( str , bytes , int ) ) and category in df . columns : category = df [ category ] else : category = pd . Series ( category ) suffix = '{}x{}' . format ( * list ( df . shape ) ) # suffix = compose_suffix(len(df), num_topics, save) # save = bool(save) for i in range ( min ( num_plots * num_columns , num_topics ) / num_plots ) : scatter_matrix ( df [ df . columns [ i * num_columns : ( i + 1 ) * num_columns ] ] , marker = '+' , c = [ colors [ int ( x ) % len ( colors ) ] for x in category . values ] , figsize = ( 18 , 12 ) ) if save : name = 'scatmat_topics_{}-{}.jpg' . format ( i * num_columns , ( i + 1 ) * num_columns ) + suffix plt . savefig ( os . path . join ( data_path , name + '.jpg' ) ) if show : if block : plt . show ( ) else : plt . show ( block = False )
5,432
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L296-L323
[ "def", "DictKeys", "(", "keys", ")", ":", "req", "=", "[", "(", "Key", "(", "key", ")", ",", "Any", ")", "for", "key", "in", "keys", "]", "return", "Dict", "(", "dict", "(", "req", ")", ")" ]
3 - D Point cloud for plotting things like mesh models of horses ; )
def point_cloud ( df , columns = [ 0 , 1 , 2 ] ) : df = df if isinstance ( df , pd . DataFrame ) else pd . DataFrame ( df ) if not all ( c in df . columns for c in columns ) : columns = list ( df . columns ) [ : 3 ] fig = plt . figure ( ) ax = fig . add_subplot ( 111 , projection = '3d' ) # noqa Axes3D . scatter ( * [ df [ columns [ i ] ] for i in range ( 3 ) ] , zdir = 'z' , s = 20 , c = None , depthshade = True ) return ax
5,433
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L326-L335
[ "def", "flush", "(", "self", ")", ":", "chunks", "=", "[", "]", "chunks", ".", "append", "(", "self", ".", "_compress", "(", "b''", ",", "lib", ".", "BROTLI_OPERATION_FLUSH", ")", ")", "while", "lib", ".", "BrotliEncoderHasMoreOutput", "(", "self", ".", "_encoder", ")", "==", "lib", ".", "BROTLI_TRUE", ":", "chunks", ".", "append", "(", "self", ".", "_compress", "(", "b''", ",", "lib", ".", "BROTLI_OPERATION_FLUSH", ")", ")", "return", "b''", ".", "join", "(", "chunks", ")" ]
Display the last image drawn
def show ( self , block = False ) : try : plt . show ( block = block ) except ValueError : plt . show ( )
5,434
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L284-L289
[ "def", "searchEnterpriseGroups", "(", "self", ",", "searchFilter", "=", "\"\"", ",", "maxCount", "=", "100", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"filter\"", ":", "searchFilter", ",", "\"maxCount\"", ":", "maxCount", "}", "url", "=", "self", ".", "_url", "+", "\"/groups/searchEnterpriseGroups\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
save colormap to file
def save ( self , filename ) : plt . savefig ( filename , fig = self . fig , facecolor = 'black' , edgecolor = 'black' )
5,435
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L291-L293
[ "def", "AND", "(", "queryArr", ",", "exclude", "=", "None", ")", ":", "assert", "isinstance", "(", "queryArr", ",", "list", ")", ",", "\"provided argument as not a list\"", "assert", "len", "(", "queryArr", ")", ">", "0", ",", "\"queryArr had an empty list\"", "q", "=", "CombinedQuery", "(", ")", "q", ".", "setQueryParam", "(", "\"$and\"", ",", "[", "]", ")", "for", "item", "in", "queryArr", ":", "assert", "isinstance", "(", "item", ",", "(", "CombinedQuery", ",", "BaseQuery", ")", ")", ",", "\"item in the list was not a CombinedQuery or BaseQuery instance\"", "q", ".", "getQuery", "(", ")", "[", "\"$and\"", "]", ".", "append", "(", "item", ".", "getQuery", "(", ")", ")", "if", "exclude", "!=", "None", ":", "assert", "isinstance", "(", "exclude", ",", "(", "CombinedQuery", ",", "BaseQuery", ")", ")", ",", "\"exclude parameter was not a CombinedQuery or BaseQuery instance\"", "q", ".", "setQueryParam", "(", "\"$not\"", ",", "exclude", ".", "getQuery", "(", ")", ")", "return", "q" ]
Get the named parameter .
def getp ( self , name ) : name = self . _mapping . get ( name , name ) return self . params [ name ]
5,436
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/model.py#L98-L113
[ "def", "create_bundle", "(", "self", ",", "bundleId", ",", "data", "=", "None", ")", ":", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", "url", "=", "self", ".", "__get_base_bundle_url", "(", ")", "+", "\"/\"", "+", "bundleId", "if", "data", "is", "None", ":", "data", "=", "{", "}", "data", "[", "'sourceLanguage'", "]", "=", "'en'", "data", "[", "'targetLanguages'", "]", "=", "[", "]", "data", "[", "'notes'", "]", "=", "[", "]", "data", "[", "'metadata'", "]", "=", "{", "}", "data", "[", "'partner'", "]", "=", "''", "data", "[", "'segmentSeparatorPattern'", "]", "=", "''", "data", "[", "'noTranslationPattern'", "]", "=", "''", "json_data", "=", "json", ".", "dumps", "(", "data", ")", "response", "=", "self", ".", "__perform_rest_call", "(", "requestURL", "=", "url", ",", "restType", "=", "'PUT'", ",", "body", "=", "json_data", ",", "headers", "=", "headers", ")", "return", "response" ]
Take in a color look - up table and return the signal color evaluated for each object . Consider making the argument a Catalog object rather than magnitudes and uncertainties .
def readColorLUT ( infile , distance_modulus , mag_1 , mag_2 , mag_err_1 , mag_err_2 ) : reader = pyfits . open ( infile ) distance_modulus_array = reader [ 'DISTANCE_MODULUS' ] . data . field ( 'DISTANCE_MODULUS' ) if not numpy . any ( numpy . fabs ( distance_modulus_array - distance_modulus ) < 1.e-3 ) : logger . warning ( "Distance modulus %.2f not available in file %s" % ( distance_modulus , infile ) ) logger . warning ( ' available distance moduli:' + str ( distance_modulus_array ) ) return False distance_modulus_key = '%.2f' % ( distance_modulus_array [ numpy . argmin ( numpy . fabs ( distance_modulus_array - distance_modulus ) ) ] ) bins_mag_err = reader [ 'BINS_MAG_ERR' ] . data . field ( 'BINS_MAG_ERR' ) bins_mag_1 = reader [ 'BINS_MAG_1' ] . data . field ( 'BINS_MAG_1' ) bins_mag_2 = reader [ 'BINS_MAG_2' ] . data . field ( 'BINS_MAG_2' ) # Note that magnitude uncertainty is always assigned by rounding up, is this the right thing to do? index_mag_err_1 = numpy . clip ( numpy . digitize ( mag_err_1 , bins_mag_err ) - 1 , 0 , len ( bins_mag_err ) - 2 ) index_mag_err_2 = numpy . clip ( numpy . digitize ( mag_err_2 , bins_mag_err ) - 1 , 0 , len ( bins_mag_err ) - 2 ) u_color = numpy . zeros ( len ( mag_1 ) ) for index_mag_err_1_select in range ( 0 , len ( bins_mag_err ) - 1 ) : for index_mag_err_2_select in range ( 0 , len ( bins_mag_err ) - 1 ) : cut = numpy . logical_and ( index_mag_err_1 == index_mag_err_1_select , index_mag_err_2 == index_mag_err_2_select ) if numpy . sum ( cut ) < 1 : continue histo = reader [ distance_modulus_key ] . data . field ( '%i%i' % ( index_mag_err_1_select , index_mag_err_2_select ) ) u_color [ cut ] = ugali . utils . binning . take2D ( histo , mag_2 [ cut ] , mag_1 [ cut ] , bins_mag_2 , bins_mag_1 ) reader . close ( ) return u_color
5,437
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/color_lut.py#L335-L374
[ "def", "on_redis_error", "(", "self", ",", "fname", ",", "exc_type", ",", "exc_value", ")", ":", "if", "self", ".", "shared_client", ":", "Storage", ".", "storage", "=", "None", "else", ":", "self", ".", "storage", "=", "None", "if", "self", ".", "context", ".", "config", ".", "REDIS_STORAGE_IGNORE_ERRORS", "is", "True", ":", "logger", ".", "error", "(", "\"[REDIS_STORAGE] %s\"", "%", "exc_value", ")", "if", "fname", "==", "'_exists'", ":", "return", "False", "return", "None", "else", ":", "raise", "exc_value" ]
Based on django . util . functional . memoize . Automatically memoizes instace methods for the lifespan of an object . Only works with methods taking non - keword arguments . Note that the args to the function must be usable as dictionary keys . Also the first argument MUST be self . This decorator will not work for functions or class methods only object methods .
def auto_memoize ( func ) : @ wraps ( func ) def wrapper ( * args ) : inst = args [ 0 ] inst . _memoized_values = getattr ( inst , '_memoized_values' , { } ) key = ( func , args [ 1 : ] ) if key not in inst . _memoized_values : inst . _memoized_values [ key ] = func ( * args ) return inst . _memoized_values [ key ] return wrapper
5,438
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L15-L31
[ "def", "close", "(", "self", ")", ":", "self", ".", "stopped", ".", "set", "(", ")", "for", "event", "in", "self", ".", "to_be_stopped", ":", "event", ".", "set", "(", ")", "if", "self", ".", "_receiver_thread", "is", "not", "None", ":", "self", ".", "_receiver_thread", ".", "join", "(", ")", "self", ".", "_socket", ".", "close", "(", ")" ]
For a sorted list li returns the closest item to value
def best_fit ( li , value ) : index = min ( bisect_left ( li , value ) , len ( li ) - 1 ) if index in ( 0 , len ( li ) ) : return index if li [ index ] - value < value - li [ index - 1 ] : return index else : return index - 1
5,439
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L34-L45
[ "def", "request_control", "(", "self", ",", "device_id", ",", "access_mode", "=", "True", ")", ":", "if", "access_mode", ":", "if", "not", "request_control", "(", "self", ".", "corsair_sdk", ",", "device_id", ")", ":", "self", ".", "_raise_corsair_error", "(", ")", "return", "True", "else", ":", "self", ".", "reload", "(", ")" ]
Attempts to convert a PROJ4 projection object to an EPSG code and returns None if conversion fails
def proj4_to_epsg ( projection ) : def make_definition ( value ) : return { x . strip ( ) . lower ( ) for x in value . split ( '+' ) if x } # Use the EPSG in the definition if available match = EPSG_RE . search ( projection . srs ) if match : return int ( match . group ( 1 ) ) # Otherwise, try to look up the EPSG from the pyproj data file pyproj_data_dir = os . path . join ( os . path . dirname ( pyproj . __file__ ) , 'data' ) pyproj_epsg_file = os . path . join ( pyproj_data_dir , 'epsg' ) if os . path . exists ( pyproj_epsg_file ) : definition = make_definition ( projection . srs ) f = open ( pyproj_epsg_file , 'r' ) for line in f . readlines ( ) : match = PYPROJ_EPSG_FILE_RE . search ( line ) if match : file_definition = make_definition ( match . group ( 2 ) ) if definition == file_definition : return int ( match . group ( 1 ) ) return None
5,440
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L48-L71
[ "def", "get_dataframe", "(", "self", ",", "sort_key", "=", "\"wall_time\"", ",", "*", "*", "kwargs", ")", ":", "import", "pandas", "as", "pd", "frame", "=", "pd", ".", "DataFrame", "(", "columns", "=", "AbinitTimerSection", ".", "FIELDS", ")", "for", "osect", "in", "self", ".", "order_sections", "(", "sort_key", ")", ":", "frame", "=", "frame", ".", "append", "(", "osect", ".", "to_dict", "(", ")", ",", "ignore_index", "=", "True", ")", "# Monkey patch", "frame", ".", "info", "=", "self", ".", "info", "frame", ".", "cpu_time", "=", "self", ".", "cpu_time", "frame", ".", "wall_time", "=", "self", ".", "wall_time", "frame", ".", "mpi_nprocs", "=", "self", ".", "mpi_nprocs", "frame", ".", "omp_nthreads", "=", "self", ".", "omp_nthreads", "frame", ".", "mpi_rank", "=", "self", ".", "mpi_rank", "frame", ".", "fname", "=", "self", ".", "fname", "return", "frame" ]
Converts a well - known text string to a pyproj . Proj object
def wkt_to_proj4 ( wkt ) : srs = osgeo . osr . SpatialReference ( ) srs . ImportFromWkt ( wkt ) return pyproj . Proj ( str ( srs . ExportToProj4 ( ) ) )
5,441
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L74-L80
[ "def", "is_marginable", "(", "self", ")", ":", "return", "self", ".", "dimension_type", "not", "in", "{", "DT", ".", "CA", ",", "DT", ".", "MR", ",", "DT", ".", "MR_CAT", ",", "DT", ".", "LOGICAL", "}" ]
Converts a pyproj . Proj object to a well - known text string
def proj4_to_wkt ( projection ) : srs = osgeo . osr . SpatialReference ( ) srs . ImportFromProj4 ( projection . srs ) return srs . ExportToWkt ( )
5,442
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L83-L89
[ "def", "leave_moderator", "(", "self", ",", "subreddit", ")", ":", "self", ".", "evict", "(", "self", ".", "config", "[", "'my_mod_subreddits'", "]", ")", "return", "self", ".", "_leave_status", "(", "subreddit", ",", "self", ".", "config", "[", "'leavemoderator'", "]", ")" ]
Projects a shapely geometry object from the source to the target projection .
def project_geometry ( geometry , source , target ) : project = partial ( pyproj . transform , source , target ) return transform ( project , geometry )
5,443
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L92-L101
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", "BaseAuthorDetail", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "context", "[", "'author'", "]", "=", "self", ".", "author", "return", "context" ]
Load this config from an existing config
def _load ( self , config ) : if isstring ( config ) : self . filename = config params = yaml . load ( open ( config ) ) elif isinstance ( config , Config ) : # This is the copy constructor... self . filename = config . filename params = copy . deepcopy ( config ) elif isinstance ( config , dict ) : params = copy . deepcopy ( config ) elif config is None : params = { } else : raise Exception ( 'Unrecognized input' ) return params
5,444
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L64-L89
[ "async", "def", "release", "(", "data", ")", ":", "global", "session", "if", "not", "feature_flags", ".", "use_protocol_api_v2", "(", ")", ":", "session", ".", "adapter", ".", "remove_instrument", "(", "'left'", ")", "session", ".", "adapter", ".", "remove_instrument", "(", "'right'", ")", "else", ":", "session", ".", "adapter", ".", "cache_instruments", "(", ")", "session", "=", "None", "return", "web", ".", "json_response", "(", "{", "\"message\"", ":", "\"calibration session released\"", "}", ")" ]
Enforce some structure to the config file
def _validate ( self ) : # This could be done with a default config # Check that specific keys exist sections = odict ( [ ( 'catalog' , [ 'dirname' , 'basename' , 'lon_field' , 'lat_field' , 'objid_field' , 'mag_1_band' , 'mag_1_field' , 'mag_err_1_field' , 'mag_2_band' , 'mag_2_field' , 'mag_err_2_field' , ] ) , ( 'mask' , [ ] ) , ( 'coords' , [ 'nside_catalog' , 'nside_mask' , 'nside_likelihood' , 'nside_pixel' , 'roi_radius' , 'roi_radius_annulus' , 'roi_radius_interior' , 'coordsys' , ] ) , ( 'likelihood' , [ ] ) , ( 'output' , [ ] ) , ( 'batch' , [ ] ) , ] ) keys = np . array ( list ( sections . keys ( ) ) ) found = np . in1d ( keys , list ( self . keys ( ) ) ) if not np . all ( found ) : msg = 'Missing sections: ' + str ( keys [ ~ found ] ) raise Exception ( msg ) for section , keys in sections . items ( ) : keys = np . array ( keys ) found = np . in1d ( keys , list ( self [ section ] . keys ( ) ) ) if not np . all ( found ) : msg = 'Missing keys in %s: ' % ( section ) + str ( keys [ ~ found ] ) raise Exception ( msg )
5,445
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L91-L124
[ "def", "Nu_vertical_cylinder", "(", "Pr", ",", "Gr", ",", "L", "=", "None", ",", "D", "=", "None", ",", "Method", "=", "None", ",", "AvailableMethods", "=", "False", ")", ":", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "for", "key", ",", "values", "in", "vertical_cylinder_correlations", ".", "items", "(", ")", ":", "if", "values", "[", "4", "]", "or", "all", "(", "(", "L", ",", "D", ")", ")", ":", "methods", ".", "append", "(", "key", ")", "if", "'Popiel & Churchill'", "in", "methods", ":", "methods", ".", "remove", "(", "'Popiel & Churchill'", ")", "methods", ".", "insert", "(", "0", ",", "'Popiel & Churchill'", ")", "elif", "'McAdams, Weiss & Saunders'", "in", "methods", ":", "methods", ".", "remove", "(", "'McAdams, Weiss & Saunders'", ")", "methods", ".", "insert", "(", "0", ",", "'McAdams, Weiss & Saunders'", ")", "return", "methods", "if", "AvailableMethods", ":", "return", "list_methods", "(", ")", "if", "not", "Method", ":", "Method", "=", "list_methods", "(", ")", "[", "0", "]", "if", "Method", "in", "vertical_cylinder_correlations", ":", "if", "vertical_cylinder_correlations", "[", "Method", "]", "[", "4", "]", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ")", "else", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ",", "L", "=", "L", ",", "D", "=", "D", ")", "else", ":", "raise", "Exception", "(", "\"Correlation name not recognized; see the \"", "\"documentation for the available options.\"", ")" ]
Join dirnames and filenames from config .
def _formatFilepaths ( self ) : likedir = self [ 'output' ] [ 'likedir' ] self . likefile = join ( likedir , self [ 'output' ] [ 'likefile' ] ) self . mergefile = join ( likedir , self [ 'output' ] [ 'mergefile' ] ) self . roifile = join ( likedir , self [ 'output' ] [ 'roifile' ] ) searchdir = self [ 'output' ] [ 'searchdir' ] self . labelfile = join ( searchdir , self [ 'output' ] [ 'labelfile' ] ) self . objectfile = join ( searchdir , self [ 'output' ] [ 'objectfile' ] ) self . assocfile = join ( searchdir , self [ 'output' ] [ 'assocfile' ] ) self . candfile = join ( searchdir , self [ 'output' ] [ 'candfile' ] ) mcmcdir = self [ 'output' ] [ 'mcmcdir' ] self . mcmcfile = join ( mcmcdir , self [ 'output' ] [ 'mcmcfile' ] )
5,446
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L129-L145
[ "def", "Nu_vertical_cylinder", "(", "Pr", ",", "Gr", ",", "L", "=", "None", ",", "D", "=", "None", ",", "Method", "=", "None", ",", "AvailableMethods", "=", "False", ")", ":", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "for", "key", ",", "values", "in", "vertical_cylinder_correlations", ".", "items", "(", ")", ":", "if", "values", "[", "4", "]", "or", "all", "(", "(", "L", ",", "D", ")", ")", ":", "methods", ".", "append", "(", "key", ")", "if", "'Popiel & Churchill'", "in", "methods", ":", "methods", ".", "remove", "(", "'Popiel & Churchill'", ")", "methods", ".", "insert", "(", "0", ",", "'Popiel & Churchill'", ")", "elif", "'McAdams, Weiss & Saunders'", "in", "methods", ":", "methods", ".", "remove", "(", "'McAdams, Weiss & Saunders'", ")", "methods", ".", "insert", "(", "0", ",", "'McAdams, Weiss & Saunders'", ")", "return", "methods", "if", "AvailableMethods", ":", "return", "list_methods", "(", ")", "if", "not", "Method", ":", "Method", "=", "list_methods", "(", ")", "[", "0", "]", "if", "Method", "in", "vertical_cylinder_correlations", ":", "if", "vertical_cylinder_correlations", "[", "Method", "]", "[", "4", "]", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ")", "else", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ",", "L", "=", "L", ",", "D", "=", "D", ")", "else", ":", "raise", "Exception", "(", "\"Correlation name not recognized; see the \"", "\"documentation for the available options.\"", ")" ]
Write a copy of this config object .
def write ( self , filename ) : ext = os . path . splitext ( filename ) [ 1 ] writer = open ( filename , 'w' ) if ext == '.py' : writer . write ( pprint . pformat ( self ) ) elif ext == '.yaml' : writer . write ( yaml . dump ( self ) ) else : writer . close ( ) raise Exception ( 'Unrecognized config format: %s' % ext ) writer . close ( )
5,447
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L147-L168
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Return the requested filenames .
def getFilenames ( self , pixels = None ) : logger . debug ( "Getting filenames..." ) if pixels is None : return self . filenames else : return self . filenames [ np . in1d ( self . filenames [ 'pix' ] , pixels ) ]
5,448
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L305-L321
[ "def", "_mmUpdateDutyCycles", "(", "self", ")", ":", "period", "=", "self", ".", "getDutyCyclePeriod", "(", ")", "unionSDRArray", "=", "numpy", ".", "zeros", "(", "self", ".", "getNumColumns", "(", ")", ")", "unionSDRArray", "[", "list", "(", "self", ".", "_mmTraces", "[", "\"unionSDR\"", "]", ".", "data", "[", "-", "1", "]", ")", "]", "=", "1", "self", ".", "_mmData", "[", "\"unionSDRDutyCycle\"", "]", "=", "UnionTemporalPoolerMonitorMixin", ".", "_mmUpdateDutyCyclesHelper", "(", "self", ".", "_mmData", "[", "\"unionSDRDutyCycle\"", "]", ",", "unionSDRArray", ",", "period", ")", "self", ".", "_mmData", "[", "\"persistenceDutyCycle\"", "]", "=", "UnionTemporalPoolerMonitorMixin", ".", "_mmUpdateDutyCyclesHelper", "(", "self", ".", "_mmData", "[", "\"persistenceDutyCycle\"", "]", ",", "self", ".", "_poolingActivation", ",", "period", ")" ]
Return the indices of the super - pixels which contain each of the sub - pixels .
def superpixel ( subpix , nside_subpix , nside_superpix ) : if nside_subpix == nside_superpix : return subpix theta , phi = hp . pix2ang ( nside_subpix , subpix ) return hp . ang2pix ( nside_superpix , theta , phi )
5,449
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L20-L26
[ "def", "handlePortfolio", "(", "self", ",", "msg", ")", ":", "# log handler msg", "self", ".", "log_msg", "(", "\"portfolio\"", ",", "msg", ")", "# contract identifier", "contract_tuple", "=", "self", ".", "contract_to_tuple", "(", "msg", ".", "contract", ")", "contractString", "=", "self", ".", "contractString", "(", "contract_tuple", ")", "# try creating the contract", "self", ".", "registerContract", "(", "msg", ".", "contract", ")", "# new account?", "if", "msg", ".", "accountName", "not", "in", "self", ".", "_portfolios", ".", "keys", "(", ")", ":", "self", ".", "_portfolios", "[", "msg", ".", "accountName", "]", "=", "{", "}", "self", ".", "_portfolios", "[", "msg", ".", "accountName", "]", "[", "contractString", "]", "=", "{", "\"symbol\"", ":", "contractString", ",", "\"position\"", ":", "int", "(", "msg", ".", "position", ")", ",", "\"marketPrice\"", ":", "float", "(", "msg", ".", "marketPrice", ")", ",", "\"marketValue\"", ":", "float", "(", "msg", ".", "marketValue", ")", ",", "\"averageCost\"", ":", "float", "(", "msg", ".", "averageCost", ")", ",", "\"unrealizedPNL\"", ":", "float", "(", "msg", ".", "unrealizedPNL", ")", ",", "\"realizedPNL\"", ":", "float", "(", "msg", ".", "realizedPNL", ")", ",", "\"totalPNL\"", ":", "float", "(", "msg", ".", "realizedPNL", ")", "+", "float", "(", "msg", ".", "unrealizedPNL", ")", ",", "\"account\"", ":", "msg", ".", "accountName", "}", "# fire callback", "self", ".", "ibCallback", "(", "caller", "=", "\"handlePortfolio\"", ",", "msg", "=", "msg", ")" ]
Upgrade or degrade resolution of a pixel list .
def ud_grade_ipix ( ipix , nside_in , nside_out , nest = False ) : if nside_in == nside_out : return ipix elif nside_in < nside_out : return u_grade_ipix ( ipix , nside_in , nside_out , nest ) elif nside_in > nside_out : return d_grade_ipix ( ipix , nside_in , nside_out , nest )
5,450
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L99-L126
[ "def", "remove_armor", "(", "armored_data", ")", ":", "stream", "=", "io", ".", "BytesIO", "(", "armored_data", ")", "lines", "=", "stream", ".", "readlines", "(", ")", "[", "3", ":", "-", "1", "]", "data", "=", "base64", ".", "b64decode", "(", "b''", ".", "join", "(", "lines", ")", ")", "payload", ",", "checksum", "=", "data", "[", ":", "-", "3", "]", ",", "data", "[", "-", "3", ":", "]", "assert", "util", ".", "crc24", "(", "payload", ")", "==", "checksum", "return", "payload" ]
Find the indices of a set of pixels into another set of pixels . !!! ASSUMES SORTED PIXELS !!!
def index_pix_in_pixels ( pix , pixels , sort = False , outside = - 1 ) : # ADW: Not really safe to set index = -1 (accesses last entry); # -np.inf would be better, but breaks other code... # ADW: Are the pixels always sorted? Is there a quick way to check? if sort : pixels = np . sort ( pixels ) # Assumes that 'pixels' is pre-sorted, otherwise...??? index = np . searchsorted ( pixels , pix ) if np . isscalar ( index ) : if not np . in1d ( pix , pixels ) . any ( ) : index = outside else : # Find objects that are outside the pixels index [ ~ np . in1d ( pix , pixels ) ] = outside return index
5,451
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L217-L244
[ "def", "circuit_to_pdf_using_qcircuit_via_tex", "(", "circuit", ":", "circuits", ".", "Circuit", ",", "filepath", ":", "str", ",", "pdf_kwargs", "=", "None", ",", "qcircuit_kwargs", "=", "None", ",", "clean_ext", "=", "(", "'dvi'", ",", "'ps'", ")", ",", "documentclass", "=", "'article'", ")", ":", "pdf_kwargs", "=", "{", "'compiler'", ":", "'latexmk'", ",", "'compiler_args'", ":", "[", "'-pdfps'", "]", ",", "*", "*", "(", "{", "}", "if", "pdf_kwargs", "is", "None", "else", "pdf_kwargs", ")", "}", "qcircuit_kwargs", "=", "{", "}", "if", "qcircuit_kwargs", "is", "None", "else", "qcircuit_kwargs", "tex", "=", "circuit_to_latex_using_qcircuit", "(", "circuit", ",", "*", "*", "qcircuit_kwargs", ")", "doc", "=", "Document", "(", "documentclass", "=", "documentclass", ",", "document_options", "=", "'dvips'", ")", "doc", ".", "packages", ".", "append", "(", "Package", "(", "'amsmath'", ")", ")", "doc", ".", "packages", ".", "append", "(", "Package", "(", "'qcircuit'", ")", ")", "doc", ".", "append", "(", "NoEscape", "(", "tex", ")", ")", "doc", ".", "generate_pdf", "(", "filepath", ",", "*", "*", "pdf_kwargs", ")", "for", "ext", "in", "clean_ext", ":", "try", ":", "os", ".", "remove", "(", "filepath", "+", "'.'", "+", "ext", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "e", ":", "if", "e", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "raise" ]
Find the indices of a set of angles into a set of pixels
def index_lonlat_in_pixels ( lon , lat , pixels , nside , sort = False , outside = - 1 ) : pix = ang2pix ( nside , lon , lat ) return index_pix_in_pixels ( pix , pixels , sort , outside )
5,452
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L246-L261
[ "def", "connect_default_driver_wrapper", "(", "cls", ",", "config_files", "=", "None", ")", ":", "driver_wrapper", "=", "cls", ".", "get_default_wrapper", "(", ")", "if", "not", "driver_wrapper", ".", "driver", ":", "config_files", "=", "DriverWrappersPool", ".", "initialize_config_files", "(", "config_files", ")", "driver_wrapper", ".", "configure", "(", "config_files", ")", "driver_wrapper", ".", "connect", "(", ")", "return", "driver_wrapper" ]
Mimic the healpy header keywords .
def header_odict ( nside , nest = False , coord = None , partial = True ) : hdr = odict ( [ ] ) hdr [ 'PIXTYPE' ] = odict ( [ ( 'name' , 'PIXTYPE' ) , ( 'value' , 'HEALPIX' ) , ( 'comment' , 'HEALPIX pixelisation' ) ] ) ordering = 'NEST' if nest else 'RING' hdr [ 'ORDERING' ] = odict ( [ ( 'name' , 'ORDERING' ) , ( 'value' , ordering ) , ( 'comment' , 'Pixel ordering scheme, either RING or NESTED' ) ] ) hdr [ 'NSIDE' ] = odict ( [ ( 'name' , 'NSIDE' ) , ( 'value' , nside ) , ( 'comment' , 'Resolution parameter of HEALPIX' ) ] ) if coord : hdr [ 'COORDSYS' ] = odict ( [ ( 'name' , 'COORDSYS' ) , ( 'value' , coord ) , ( 'comment' , 'Ecliptic, Galactic or Celestial (equatorial)' ) ] ) if not partial : hdr [ 'FIRSTPIX' ] = odict ( [ ( 'name' , 'FIRSTPIX' ) , ( 'value' , 0 ) , ( 'comment' , 'First pixel # (0 based)' ) ] ) hdr [ 'LASTPIX' ] = odict ( [ ( 'name' , 'LASTPIX' ) , ( 'value' , hp . nside2npix ( nside ) - 1 ) , ( 'comment' , 'Last pixel # (0 based)' ) ] ) hdr [ 'INDXSCHM' ] = odict ( [ ( 'name' , 'INDXSCHM' ) , ( 'value' , 'EXPLICIT' if partial else 'IMPLICIT' ) , ( 'comment' , 'Indexing: IMPLICIT or EXPLICIT' ) ] ) hdr [ 'OBJECT' ] = odict ( [ ( 'name' , 'OBJECT' ) , ( 'value' , 'PARTIAL' if partial else 'FULLSKY' ) , ( 'comment' , 'Sky coverage, either FULLSKY or PARTIAL' ) ] ) return hdr
5,453
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L330-L362
[ "def", "_mmUpdateDutyCycles", "(", "self", ")", ":", "period", "=", "self", ".", "getDutyCyclePeriod", "(", ")", "unionSDRArray", "=", "numpy", ".", "zeros", "(", "self", ".", "getNumColumns", "(", ")", ")", "unionSDRArray", "[", "list", "(", "self", ".", "_mmTraces", "[", "\"unionSDR\"", "]", ".", "data", "[", "-", "1", "]", ")", "]", "=", "1", "self", ".", "_mmData", "[", "\"unionSDRDutyCycle\"", "]", "=", "UnionTemporalPoolerMonitorMixin", ".", "_mmUpdateDutyCyclesHelper", "(", "self", ".", "_mmData", "[", "\"unionSDRDutyCycle\"", "]", ",", "unionSDRArray", ",", "period", ")", "self", ".", "_mmData", "[", "\"persistenceDutyCycle\"", "]", "=", "UnionTemporalPoolerMonitorMixin", ".", "_mmUpdateDutyCyclesHelper", "(", "self", ".", "_mmData", "[", "\"persistenceDutyCycle\"", "]", ",", "self", ".", "_poolingActivation", ",", "period", ")" ]
Partial HEALPix maps are used to efficiently store maps of the sky by only writing out the pixels that contain data .
def write_partial_map ( filename , data , nside , coord = None , nest = False , header = None , dtype = None , * * kwargs ) : # ADW: Do we want to make everything uppercase? if isinstance ( data , dict ) : names = list ( data . keys ( ) ) else : names = data . dtype . names if 'PIXEL' not in names : msg = "'PIXEL' column not found." raise ValueError ( msg ) hdr = header_odict ( nside = nside , coord = coord , nest = nest ) fitshdr = fitsio . FITSHDR ( list ( hdr . values ( ) ) ) if header is not None : for k , v in header . items ( ) : fitshdr . add_record ( { 'name' : k , 'value' : v } ) logger . info ( "Writing %s" % filename ) fitsio . write ( filename , data , extname = 'PIX_DATA' , header = fitshdr , clobber = True )
5,454
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L364-L404
[ "def", "on_end_validation", "(", "self", ",", "event", ")", ":", "self", ".", "Enable", "(", ")", "self", ".", "Show", "(", ")", "self", ".", "magic_gui_frame", ".", "Destroy", "(", ")" ]
Merge header information from likelihood files .
def merge_likelihood_headers ( filenames , outfile ) : filenames = np . atleast_1d ( filenames ) ext = 'PIX_DATA' nside = fitsio . read_header ( filenames [ 0 ] , ext = ext ) [ 'LKDNSIDE' ] keys = [ 'STELLAR' , 'NINSIDE' , 'NANNULUS' ] data_dict = odict ( PIXEL = [ ] ) for k in keys : data_dict [ k ] = [ ] for i , filename in enumerate ( filenames ) : logger . debug ( '(%i/%i) %s' % ( i + 1 , len ( filenames ) , filename ) ) header = fitsio . read_header ( filename , ext = ext ) data_dict [ 'PIXEL' ] . append ( header [ 'LKDPIX' ] ) for key in keys : data_dict [ key ] . append ( header [ key ] ) del header data_dict [ 'PIXEL' ] = np . array ( data_dict [ 'PIXEL' ] , dtype = int ) for key in keys : data_dict [ key ] = np . array ( data_dict [ key ] , dtype = 'f4' ) #import pdb; pdb.set_trace() write_partial_map ( outfile , data_dict , nside ) return data_dict
5,455
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L479-L518
[ "def", "process", "(", "self", ",", "metric", ")", ":", "for", "rmq_server", "in", "self", ".", "connections", ".", "keys", "(", ")", ":", "try", ":", "if", "(", "(", "self", ".", "connections", "[", "rmq_server", "]", "is", "None", "or", "self", ".", "connections", "[", "rmq_server", "]", ".", "is_open", "is", "False", ")", ")", ":", "self", ".", "_bind", "(", "rmq_server", ")", "channel", "=", "self", ".", "channels", "[", "rmq_server", "]", "channel", ".", "basic_publish", "(", "exchange", "=", "self", ".", "rmq_exchange", ",", "routing_key", "=", "''", ",", "body", "=", "\"%s\"", "%", "metric", ")", "except", "Exception", "as", "exception", ":", "self", ".", "log", ".", "error", "(", "\"Failed publishing to %s, attempting reconnect\"", ",", "rmq_server", ")", "self", ".", "log", ".", "debug", "(", "\"Caught exception: %s\"", ",", "exception", ")", "self", ".", "_unbind", "(", "rmq_server", ")", "self", ".", "_bind", "(", "rmq_server", ")" ]
Converts a number to float or int as appropriate
def _convert_number ( self , number ) : number = float ( number ) return int ( number ) if number . is_integer ( ) else float ( number )
5,456
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/api.py#L46-L50
[ "def", "trigger", "(", "cls", ",", "streams", ")", ":", "# Union of stream contents", "items", "=", "[", "stream", ".", "contents", ".", "items", "(", ")", "for", "stream", "in", "set", "(", "streams", ")", "]", "union", "=", "[", "kv", "for", "kvs", "in", "items", "for", "kv", "in", "kvs", "]", "klist", "=", "[", "k", "for", "k", ",", "_", "in", "union", "]", "key_clashes", "=", "set", "(", "[", "k", "for", "k", "in", "klist", "if", "klist", ".", "count", "(", "k", ")", ">", "1", "]", ")", "if", "key_clashes", ":", "clashes", "=", "[", "]", "dicts", "=", "[", "dict", "(", "kvs", ")", "for", "kvs", "in", "items", "]", "for", "clash", "in", "key_clashes", ":", "values", "=", "set", "(", "d", "[", "clash", "]", "for", "d", "in", "dicts", "if", "clash", "in", "d", ")", "if", "len", "(", "values", ")", ">", "1", ":", "clashes", ".", "append", "(", "(", "clash", ",", "values", ")", ")", "if", "clashes", ":", "msg", "=", "', '", ".", "join", "(", "[", "'%r has values %r'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "clashes", "]", ")", "print", "(", "'Parameter value clashes where %s'", "%", "msg", ")", "# Group subscribers by precedence while keeping the ordering", "# within each group", "subscriber_precedence", "=", "defaultdict", "(", "list", ")", "for", "stream", "in", "streams", ":", "stream", ".", "_on_trigger", "(", ")", "for", "precedence", ",", "subscriber", "in", "stream", ".", "_subscribers", ":", "subscriber_precedence", "[", "precedence", "]", ".", "append", "(", "subscriber", ")", "sorted_subscribers", "=", "sorted", "(", "subscriber_precedence", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "subscribers", "=", "util", ".", "unique_iterator", "(", "[", "s", "for", "_", ",", "subscribers", "in", "sorted_subscribers", "for", "s", "in", "subscribers", "]", ")", "with", "triggering_streams", "(", "streams", ")", ":", "for", "subscriber", "in", "subscribers", ":", "subscriber", "(", "*", "*", "dict", "(", "union", ")", ")", "for", "stream", "in", "streams", ":", "with", "util", ".", "disable_constant", "(", "stream", ")", ":", "if", "stream", ".", "transient", ":", "stream", ".", "reset", "(", ")" ]
Write the results output file
def do_results ( args ) : config , name , label , coord = args filenames = make_filenames ( config , label ) srcfile = filenames [ 'srcfile' ] samples = filenames [ 'samfile' ] if not exists ( srcfile ) : logger . warning ( "Couldn't find %s; skipping..." % srcfile ) return if not exists ( samples ) : logger . warning ( "Couldn't find %s; skipping..." % samples ) return logger . info ( "Writing %s..." % srcfile ) from ugali . analysis . results import write_results write_results ( srcfile , config , srcfile , samples )
5,457
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/pipeline/run_05.0_followup.py#L42-L59
[ "def", "vapour_pressure", "(", "Temperature", ",", "element", ")", ":", "if", "element", "==", "\"Rb\"", ":", "Tmelt", "=", "39.30", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.857", "-", "4215.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.312", "-", "4040.0", "/", "Temperature", ")", "# Torr.", "elif", "element", "==", "\"Cs\"", ":", "Tmelt", "=", "28.5", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.711", "-", "3999.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.165", "-", "3830.0", "/", "Temperature", ")", "# Torr.", "else", ":", "s", "=", "str", "(", "element", ")", "s", "+=", "\" is not an element in the database for this function.\"", "raise", "ValueError", "(", "s", ")", "P", "=", "P", "*", "101325.0", "/", "760.0", "# Pascals.", "return", "P" ]
Write the membership output file
def do_membership ( args ) : config , name , label , coord = args filenames = make_filenames ( config , label ) srcfile = filenames [ 'srcfile' ] memfile = filenames [ 'memfile' ] logger . info ( "Writing %s..." % memfile ) from ugali . analysis . loglike import write_membership write_membership ( memfile , config , srcfile , section = 'source' )
5,458
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/pipeline/run_05.0_followup.py#L61-L71
[ "def", "arp_suppression", "(", "self", ",", "*", "*", "kwargs", ")", ":", "name", "=", "kwargs", ".", "pop", "(", "'name'", ")", "enable", "=", "kwargs", ".", "pop", "(", "'enable'", ",", "True", ")", "get", "=", "kwargs", ".", "pop", "(", "'get'", ",", "False", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "method_class", "=", "self", ".", "_interface", "arp_args", "=", "dict", "(", "name", "=", "name", ")", "if", "name", ":", "if", "not", "pynos", ".", "utilities", ".", "valid_vlan_id", "(", "name", ")", ":", "raise", "InvalidVlanId", "(", "\"`name` must be between `1` and `8191`\"", ")", "arp_suppression", "=", "getattr", "(", "method_class", ",", "'interface_vlan_interface_vlan_suppress_'", "'arp_suppress_arp_enable'", ")", "config", "=", "arp_suppression", "(", "*", "*", "arp_args", ")", "if", "get", ":", "return", "callback", "(", "config", ",", "handler", "=", "'get_config'", ")", "if", "not", "enable", ":", "config", ".", "find", "(", "'.//*suppress-arp'", ")", ".", "set", "(", "'operation'", ",", "'delete'", ")", "return", "callback", "(", "config", ")" ]
Create plots of mcmc output
def do_plot ( args ) : import ugali . utils . plotting import pylab as plt config , name , label , coord = args filenames = make_filenames ( config , label ) srcfile = filenames [ 'srcfile' ] samfile = filenames [ 'samfile' ] memfile = filenames [ 'memfile' ] if not exists ( srcfile ) : logger . warning ( "Couldn't find %s; skipping..." % srcfile ) return if not exists ( samfile ) : logger . warning ( "Couldn't find %s; skipping..." % samfile ) return config = ugali . utils . config . Config ( config ) burn = config [ 'mcmc' ] [ 'nburn' ] * config [ 'mcmc' ] [ 'nwalkers' ] source = ugali . analysis . source . Source ( ) source . load ( srcfile , section = 'source' ) outfile = samfile . replace ( '.npy' , '.png' ) ugali . utils . plotting . plotTriangle ( srcfile , samfile , burn = burn ) logger . info ( " Writing %s..." % outfile ) plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 ) plt . close ( ) plotter = ugali . utils . plotting . SourcePlotter ( source , config , radius = 0.5 ) data = fitsio . read ( memfile , trim_strings = True ) if exists ( memfile ) else None if data is not None : plt . figure ( ) kernel , isochrone = source . kernel , source . isochrone ugali . utils . plotting . plotMembership ( config , data , kernel , isochrone ) outfile = samfile . replace ( '.npy' , '_mem.png' ) logger . info ( " Writing %s..." % outfile ) plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 ) plt . close ( ) plotter . plot6 ( data ) outfile = samfile . replace ( '.npy' , '_6panel.png' ) logger . info ( " Writing %s..." % outfile ) plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 ) outfile = samfile . replace ( '.npy' , '_6panel.pdf' ) logger . info ( " Writing %s..." % outfile ) plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 ) plt . close ( ) try : title = name plotter . plot4 ( ) outfile = samfile . replace ( '.npy' , '_4panel.png' ) logger . info ( " Writing %s..." % outfile ) plt . suptitle ( title ) plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 ) plt . close ( ) except : logger . warning ( " Failed to create plotter.plot4()" )
5,459
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/pipeline/run_05.0_followup.py#L73-L136
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "sim", "=", "self", ".", "Similarity", "(", ")", "total", "=", "0.0", "# Calculate similarity ratio for each attribute", "cname", "=", "self", ".", "__class__", ".", "__name__", "for", "aname", ",", "weight", "in", "self", ".", "attributes", ".", "items", "(", ")", ":", "attr1", "=", "getattr", "(", "self", ",", "aname", ",", "None", ")", "attr2", "=", "getattr", "(", "other", ",", "aname", ",", "None", ")", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'%'", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ")", "# Similarity is ignored if None on both objects", "if", "attr1", "is", "None", "and", "attr2", "is", "None", ":", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'%'", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ",", "result", "=", "\"attributes are both None\"", ")", "continue", "# Similarity is 0 if either attribute is non-Comparable", "if", "not", "all", "(", "(", "isinstance", "(", "attr1", ",", "Comparable", ")", ",", "isinstance", "(", "attr2", ",", "Comparable", ")", ")", ")", ":", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'%'", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ",", "result", "=", "\"attributes not Comparable\"", ")", "total", "+=", "weight", "continue", "# Calculate similarity between the attributes", "attr_sim", "=", "(", "attr1", "%", "attr2", ")", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'%'", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ",", "result", "=", "attr_sim", ")", "# Add the similarity to the total", "sim", "+=", "attr_sim", "*", "weight", "total", "+=", "weight", "# Scale the similarity so the total is 1.0", "if", "total", ":", "sim", "*=", "(", "1.0", "/", "total", ")", "return", "sim" ]
Return our initialized dictionary arguments .
def parse ( self , * args ) : if isinstance ( self . dictionary , dict ) : return self . dictionary raise self . subparserException ( "Argument passed to Dictionary SubParser is not a dict: %s" % type ( self . dictionary ) )
5,460
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/subparsers/dictionary.py#L39-L47
[ "def", "calculate_best_fit_vectors", "(", "L", ",", "E", ",", "V", ",", "n_planes", ")", ":", "U", ",", "XV", "=", "E", "[", ":", "]", ",", "[", "]", "# make a copy of E to prevent mutation", "for", "pole", "in", "L", ":", "XV", ".", "append", "(", "vclose", "(", "pole", ",", "V", ")", ")", "# get some points on the great circle", "for", "c", "in", "range", "(", "3", ")", ":", "U", "[", "c", "]", "=", "U", "[", "c", "]", "+", "XV", "[", "-", "1", "]", "[", "c", "]", "# iterate to find best agreement", "angle_tol", "=", "1.", "while", "angle_tol", ">", "0.1", ":", "angles", "=", "[", "]", "for", "k", "in", "range", "(", "n_planes", ")", ":", "for", "c", "in", "range", "(", "3", ")", ":", "U", "[", "c", "]", "=", "U", "[", "c", "]", "-", "XV", "[", "k", "]", "[", "c", "]", "R", "=", "np", ".", "sqrt", "(", "U", "[", "0", "]", "**", "2", "+", "U", "[", "1", "]", "**", "2", "+", "U", "[", "2", "]", "**", "2", ")", "for", "c", "in", "range", "(", "3", ")", ":", "V", "[", "c", "]", "=", "old_div", "(", "U", "[", "c", "]", ",", "R", ")", "XX", "=", "vclose", "(", "L", "[", "k", "]", ",", "V", ")", "ang", "=", "XX", "[", "0", "]", "*", "XV", "[", "k", "]", "[", "0", "]", "+", "XX", "[", "1", "]", "*", "XV", "[", "k", "]", "[", "1", "]", "+", "XX", "[", "2", "]", "*", "XV", "[", "k", "]", "[", "2", "]", "angles", ".", "append", "(", "np", ".", "arccos", "(", "ang", ")", "*", "180.", "/", "np", ".", "pi", ")", "for", "c", "in", "range", "(", "3", ")", ":", "XV", "[", "k", "]", "[", "c", "]", "=", "XX", "[", "c", "]", "U", "[", "c", "]", "=", "U", "[", "c", "]", "+", "XX", "[", "c", "]", "amax", "=", "-", "1", "for", "ang", "in", "angles", ":", "if", "ang", ">", "amax", ":", "amax", "=", "ang", "angle_tol", "=", "amax", "return", "XV" ]
Caches variants_dict and replace_dict in a single database hit .
def _caches_dicts ( self ) : qs = ( self . get_query_set ( ) if django . VERSION < ( 1 , 6 ) else self . get_queryset ( ) ) variants_dict = self . _get_variants_dict ( qs ) cache . set ( VARIANTS_DICT_CACHE_KEY , variants_dict ) replace_dict = self . _get_replace_dict ( qs ) cache . set ( REPLACE_DICT_CACHE_KEY , replace_dict ) return variants_dict , replace_dict
5,461
https://github.com/BertrandBordage/django-terms/blob/2555c2cf5abf14adef9a8e2dd22c4a9076396a10/terms/managers.py#L41-L55
[ "def", "getMaxISOPacketSize", "(", "self", ",", "endpoint", ")", ":", "result", "=", "libusb1", ".", "libusb_get_max_iso_packet_size", "(", "self", ".", "device_p", ",", "endpoint", ")", "mayRaiseUSBError", "(", "result", ")", "return", "result" ]
Translate a botocore . exceptions . ClientError into a dynamo3 error
def translate_exception ( exc , kwargs ) : error = exc . response [ 'Error' ] error . setdefault ( 'Message' , '' ) err_class = EXC . get ( error [ 'Code' ] , DynamoDBError ) return err_class ( exc . response [ 'ResponseMetadata' ] [ 'HTTPStatusCode' ] , exc_info = sys . exc_info ( ) , args = pformat ( kwargs ) , * * error )
5,462
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/exception.py#L50-L56
[ "def", "find_video_file", "(", "rtdc_dataset", ")", ":", "video", "=", "None", "if", "rtdc_dataset", ".", "_fdir", ".", "exists", "(", ")", ":", "# Cell images (video)", "videos", "=", "[", "v", ".", "name", "for", "v", "in", "rtdc_dataset", ".", "_fdir", ".", "rglob", "(", "\"*.avi\"", ")", "]", "# Filter videos according to measurement number", "meas_id", "=", "rtdc_dataset", ".", "_mid", "videos", "=", "[", "v", "for", "v", "in", "videos", "if", "v", ".", "split", "(", "\"_\"", ")", "[", "0", "]", "==", "meas_id", "]", "videos", ".", "sort", "(", ")", "if", "len", "(", "videos", ")", "!=", "0", ":", "# Defaults to first avi file", "video", "=", "videos", "[", "0", "]", "# g/q video file names. q comes first.", "for", "v", "in", "videos", ":", "if", "v", ".", "endswith", "(", "\"imag.avi\"", ")", ":", "video", "=", "v", "break", "# add this here, because fRT-DC measurements also contain", "# videos ..._proc.avi", "elif", "v", ".", "endswith", "(", "\"imaq.avi\"", ")", ":", "video", "=", "v", "break", "if", "video", "is", "None", ":", "return", "None", "else", ":", "return", "rtdc_dataset", ".", "_fdir", "/", "video" ]
Raise this exception with the original traceback
def re_raise ( self ) : if self . exc_info is not None : six . reraise ( type ( self ) , self , self . exc_info [ 2 ] ) else : raise self
5,463
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/exception.py#L19-L24
[ "def", "updateSeriesRegistrationStatus", "(", ")", ":", "from", ".", "models", "import", "Series", "if", "not", "getConstant", "(", "'general__enableCronTasks'", ")", ":", "return", "logger", ".", "info", "(", "'Checking status of Series that are open for registration.'", ")", "open_series", "=", "Series", ".", "objects", ".", "filter", "(", ")", ".", "filter", "(", "*", "*", "{", "'registrationOpen'", ":", "True", "}", ")", "for", "series", "in", "open_series", ":", "series", ".", "updateRegistrationStatus", "(", ")" ]
r Yield text one line at a time from from a single file path files in a directory or a text string
def generate_lines ( text , ext = [ '.txt' , '.md' , '.rst' , '.asciidoc' , '.asc' ] ) : if isinstance ( text , basestring ) : if len ( text ) <= 256 : if os . path . isfile ( text ) and os . path . splitext ( text ) [ - 1 ] . lower ( ) in ext : return open ( text ) elif os . path . isdir ( text ) : return chain . from_iterable ( generate_lines ( stat [ 'path' ] ) for stat in find_files ( text , ext = ext ) ) else : return ( line for line in Split ( text = text ) ) else : return Split ( text = text ) return chain . from_iterable ( generate_lines ( obj ) for obj in text )
5,464
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/segmentation.py#L49-L66
[ "def", "solve", "(", "self", ",", "value", ",", "resource", ")", ":", "result", "=", "self", ".", "solve_value", "(", "value", ",", "resource", ")", "return", "self", ".", "coerce", "(", "result", ",", "resource", ")" ]
Add a timestamp and date to the data
def add ( self , now , num ) : if num == 0 : return self . points . append ( ( now , num ) )
5,465
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L27-L31
[ "def", "_add_item", "(", "self", ",", "item", ",", "indent_amt", ")", ":", "if", "self", ".", "_prev_item", "and", "self", ".", "_prev_item", ".", "is_string", "and", "item", ".", "is_string", ":", "# Place consecutive string literals on separate lines.", "self", ".", "_lines", ".", "append", "(", "self", ".", "_LineBreak", "(", ")", ")", "self", ".", "_lines", ".", "append", "(", "self", ".", "_Indent", "(", "indent_amt", ")", ")", "item_text", "=", "unicode", "(", "item", ")", "if", "self", ".", "_lines", "and", "self", ".", "_bracket_depth", ":", "# Adding the item into a container.", "self", ".", "_prevent_default_initializer_splitting", "(", "item", ",", "indent_amt", ")", "if", "item_text", "in", "'.,)]}'", ":", "self", ".", "_split_after_delimiter", "(", "item", ",", "indent_amt", ")", "elif", "self", ".", "_lines", "and", "not", "self", ".", "line_empty", "(", ")", ":", "# Adding the item outside of a container.", "if", "self", ".", "fits_on_current_line", "(", "len", "(", "item_text", ")", ")", ":", "self", ".", "_enforce_space", "(", "item", ")", "else", ":", "# Line break for the new item.", "self", ".", "_lines", ".", "append", "(", "self", ".", "_LineBreak", "(", ")", ")", "self", ".", "_lines", ".", "append", "(", "self", ".", "_Indent", "(", "indent_amt", ")", ")", "self", ".", "_lines", ".", "append", "(", "item", ")", "self", ".", "_prev_item", ",", "self", ".", "_prev_prev_item", "=", "item", ",", "self", ".", "_prev_item", "if", "item_text", "in", "'([{'", ":", "self", ".", "_bracket_depth", "+=", "1", "elif", "item_text", "in", "'}])'", ":", "self", ".", "_bracket_depth", "-=", "1", "assert", "self", ".", "_bracket_depth", ">=", "0" ]
Get the summation of all non - expired points
def value ( self ) : now = time . time ( ) cutoff = now - self . window while self . points and self . points [ 0 ] [ 0 ] < cutoff : self . points . pop ( 0 ) return sum ( [ p [ 1 ] for p in self . points ] )
5,466
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L34-L40
[ "def", "parse_compound_file", "(", "path", ",", "format", ")", ":", "context", "=", "FilePathContext", "(", "path", ")", "# YAML files do not need to explicitly specify format", "format", "=", "resolve_format", "(", "format", ",", "context", ".", "filepath", ")", "if", "format", "==", "'yaml'", ":", "logger", ".", "debug", "(", "'Parsing compound file {} as YAML'", ".", "format", "(", "context", ".", "filepath", ")", ")", "with", "context", ".", "open", "(", "'r'", ")", "as", "f", ":", "for", "compound", "in", "parse_compound_yaml_file", "(", "context", ",", "f", ")", ":", "yield", "compound", "elif", "format", "==", "'modelseed'", ":", "logger", ".", "debug", "(", "'Parsing compound file {} as ModelSEED TSV'", ".", "format", "(", "context", ".", "filepath", ")", ")", "with", "context", ".", "open", "(", "'r'", ")", "as", "f", ":", "for", "compound", "in", "modelseed", ".", "parse_compound_file", "(", "f", ",", "context", ")", ":", "yield", "compound", "elif", "format", "==", "'tsv'", ":", "logger", ".", "debug", "(", "'Parsing compound file {} as TSV'", ".", "format", "(", "context", ".", "filepath", ")", ")", "with", "context", ".", "open", "(", "'r'", ")", "as", "f", ":", "for", "compound", "in", "parse_compound_table_file", "(", "context", ",", "f", ")", ":", "yield", "compound", "else", ":", "raise", "ParseError", "(", "'Unable to detect format of compound file {}'", ".", "format", "(", "context", ".", "filepath", ")", ")" ]
Getter for a consumed capacity storage dict
def get_consumed ( self , key ) : if key not in self . _consumed : self . _consumed [ key ] = { 'read' : DecayingCapacityStore ( ) , 'write' : DecayingCapacityStore ( ) , } return self . _consumed [ key ]
5,467
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L96-L103
[ "def", "ParsedSections", "(", "file_val", ")", ":", "try", ":", "template_dict", "=", "{", "}", "cur_section", "=", "''", "for", "val", "in", "file_val", ".", "split", "(", "'\\n'", ")", ":", "val", "=", "val", ".", "strip", "(", ")", "if", "val", "!=", "''", ":", "section_match", "=", "re", ".", "match", "(", "r'\\[.+\\]'", ",", "val", ")", "if", "section_match", ":", "cur_section", "=", "section_match", ".", "group", "(", ")", "[", "1", ":", "-", "1", "]", "template_dict", "[", "cur_section", "]", "=", "{", "}", "else", ":", "option", ",", "value", "=", "val", ".", "split", "(", "'='", ",", "1", ")", "option", "=", "option", ".", "strip", "(", ")", "value", "=", "value", ".", "strip", "(", ")", "if", "option", ".", "startswith", "(", "'#'", ")", ":", "template_dict", "[", "cur_section", "]", "[", "val", "]", "=", "''", "else", ":", "template_dict", "[", "cur_section", "]", "[", "option", "]", "=", "value", "except", "Exception", ":", "# pragma: no cover", "template_dict", "=", "{", "}", "return", "template_dict" ]
Hook that runs in response to a returned capacity event
def on_capacity ( self , connection , command , query_kwargs , response , capacity ) : now = time . time ( ) args = ( connection , command , query_kwargs , response , capacity ) # Check total against the total_cap self . _wait ( args , now , self . total_cap , self . _total_consumed , capacity . total ) # Increment table consumed capacity & check it if capacity . tablename in self . table_caps : table_cap = self . table_caps [ capacity . tablename ] else : table_cap = self . default_cap consumed_history = self . get_consumed ( capacity . tablename ) if capacity . table_capacity is not None : self . _wait ( args , now , table_cap , consumed_history , capacity . table_capacity ) # The local index consumed capacity also counts against the table if capacity . local_index_capacity is not None : for consumed in six . itervalues ( capacity . local_index_capacity ) : self . _wait ( args , now , table_cap , consumed_history , consumed ) # Increment global indexes # check global indexes against the table+index cap or default gic = capacity . global_index_capacity if gic is not None : for index_name , consumed in six . iteritems ( gic ) : full_name = capacity . tablename + ':' + index_name if index_name in table_cap : index_cap = table_cap [ index_name ] elif full_name in self . table_caps : index_cap = self . table_caps [ full_name ] else : # If there's no specified capacity for the index, # use the cap on the table index_cap = table_cap consumed_history = self . get_consumed ( full_name ) self . _wait ( args , now , index_cap , consumed_history , consumed )
5,468
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L105-L143
[ "def", "bbox_rotate", "(", "bbox", ",", "angle", ",", "rows", ",", "cols", ",", "interpolation", ")", ":", "scale", "=", "cols", "/", "float", "(", "rows", ")", "x", "=", "np", ".", "array", "(", "[", "bbox", "[", "0", "]", ",", "bbox", "[", "2", "]", ",", "bbox", "[", "2", "]", ",", "bbox", "[", "0", "]", "]", ")", "y", "=", "np", ".", "array", "(", "[", "bbox", "[", "1", "]", ",", "bbox", "[", "1", "]", ",", "bbox", "[", "3", "]", ",", "bbox", "[", "3", "]", "]", ")", "x", "=", "x", "-", "0.5", "y", "=", "y", "-", "0.5", "angle", "=", "np", ".", "deg2rad", "(", "angle", ")", "x_t", "=", "(", "np", ".", "cos", "(", "angle", ")", "*", "x", "*", "scale", "+", "np", ".", "sin", "(", "angle", ")", "*", "y", ")", "/", "scale", "y_t", "=", "(", "-", "np", ".", "sin", "(", "angle", ")", "*", "x", "*", "scale", "+", "np", ".", "cos", "(", "angle", ")", "*", "y", ")", "x_t", "=", "x_t", "+", "0.5", "y_t", "=", "y_t", "+", "0.5", "return", "[", "min", "(", "x_t", ")", ",", "min", "(", "y_t", ")", ",", "max", "(", "x_t", ")", ",", "max", "(", "y_t", ")", "]" ]
Check the consumed capacity against the limit and sleep
def _wait ( self , args , now , cap , consumed_history , consumed_capacity ) : for key in [ 'read' , 'write' ] : if key in cap and cap [ key ] > 0 : consumed_history [ key ] . add ( now , consumed_capacity [ key ] ) consumed = consumed_history [ key ] . value if consumed > 0 and consumed >= cap [ key ] : seconds = math . ceil ( float ( consumed ) / cap [ key ] ) LOG . debug ( "Rate limited throughput exceeded. Sleeping " "for %d seconds." , seconds ) if callable ( self . callback ) : callback_args = args + ( seconds , ) if self . callback ( * callback_args ) : continue time . sleep ( seconds )
5,469
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L145-L159
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", "=", "None", ",", "spatialReferenceID", "=", "None", ",", "replaceParamFile", "=", "None", ")", ":", "yml_events", "=", "[", "]", "with", "open", "(", "path", ")", "as", "fo", ":", "yml_events", "=", "yaml", ".", "load", "(", "fo", ")", "for", "yml_event", "in", "yml_events", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "yml_event", ".", "subfolder", ")", ")", ":", "orm_event", "=", "yml_event", ".", "as_orm", "(", ")", "if", "not", "self", ".", "_similar_event_exists", "(", "orm_event", ".", "subfolder", ")", ":", "session", ".", "add", "(", "orm_event", ")", "self", ".", "events", ".", "append", "(", "orm_event", ")", "session", ".", "commit", "(", ")" ]
Send a case creation to SalesForces to create a ticket .
def create_case ( self , name , email , subject , description , businessImpact , priority , phone ) : if not ( '@' in parseaddr ( email ) [ 1 ] ) : raise ValueError ( 'invalid email: {}' . format ( email ) ) if '' == name or name is None : raise ValueError ( 'empty name' ) if '' == subject or subject is None : raise ValueError ( 'empty subject' ) if '' == description or description is None : raise ValueError ( 'empty description' ) if '' == businessImpact or businessImpact is None : raise ValueError ( 'empty business impact' ) if priority is None : raise ValueError ( 'Ensure the priority is from the set of ' 'known priorities' ) if '' == phone or phone is None : raise ValueError ( 'empty phone' ) try : r = requests . post ( self . url , data = { 'orgid' : self . orgId , 'recordType' : self . recordType , 'name' : name , 'email' : email , 'subject' : subject , 'description' : description , self . BUSINESS_IMPACT : businessImpact , 'priority' : priority , 'phone' : phone , 'external' : 1 } , timeout = self . timeout ) r . raise_for_status ( ) except Timeout : message = 'Request timed out: {url} timeout: {timeout}' message = message . format ( url = self . url , timeout = self . timeout ) log . error ( message ) raise ServerError ( message ) except RequestException as err : log . info ( 'cannot create case: {}' . format ( err ) ) raise ServerError ( 'cannot create case: {}' . format ( err ) )
5,470
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/support.py#L51-L105
[ "def", "_iter_avro_blocks", "(", "fo", ",", "header", ",", "codec", ",", "writer_schema", ",", "reader_schema", ")", ":", "sync_marker", "=", "header", "[", "'sync'", "]", "read_block", "=", "BLOCK_READERS", ".", "get", "(", "codec", ")", "if", "not", "read_block", ":", "raise", "ValueError", "(", "'Unrecognized codec: %r'", "%", "codec", ")", "while", "True", ":", "offset", "=", "fo", ".", "tell", "(", ")", "try", ":", "num_block_records", "=", "read_long", "(", "fo", ")", "except", "StopIteration", ":", "return", "block_bytes", "=", "read_block", "(", "fo", ")", "skip_sync", "(", "fo", ",", "sync_marker", ")", "size", "=", "fo", ".", "tell", "(", ")", "-", "offset", "yield", "Block", "(", "block_bytes", ",", "num_block_records", ",", "codec", ",", "reader_schema", ",", "writer_schema", ",", "offset", ",", "size", ")" ]
Get the sum of spending for this category up to and including the given month .
def get_cumulative_spend ( key ) : query = ( 'ROUND(SUM(total_ex_vat), 2) AS total ' 'FROM {table} ' 'WHERE date <= "{year}-{month:02}-01" ' 'AND lot="{lot}" ' 'AND customer_sector="{sector}" ' 'AND supplier_type="{sme_large}"' . format ( table = _RAW_SALES_TABLE , year = key . year , month = key . month , lot = key . lot , sector = key . sector , sme_large = key . sme_large ) ) logging . debug ( query ) result = scraperwiki . sqlite . select ( query ) logging . debug ( result ) value = result [ 0 ] [ 'total' ] return float ( result [ 0 ] [ 'total' ] ) if value is not None else 0.0
5,471
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/gcloud/aggregate.py#L130-L151
[ "def", "cf_decoder", "(", "variables", ",", "attributes", ",", "concat_characters", "=", "True", ",", "mask_and_scale", "=", "True", ",", "decode_times", "=", "True", ")", ":", "variables", ",", "attributes", ",", "_", "=", "decode_cf_variables", "(", "variables", ",", "attributes", ",", "concat_characters", ",", "mask_and_scale", ",", "decode_times", ")", "return", "variables", ",", "attributes" ]
Plot the ROI
def plot ( self , value = None , pixel = None ) : # DEPRECATED import ugali . utils . plotting map_roi = np . array ( hp . UNSEEN * np . ones ( hp . nside2npix ( self . config . params [ 'coords' ] [ 'nside_pixel' ] ) ) ) if value is None : #map_roi[self.pixels] = ugali.utils.projector.angsep(self.lon, self.lat, self.centers_lon, self.centers_lat) map_roi [ self . pixels ] = 1 map_roi [ self . pixels_annulus ] = 0 map_roi [ self . pixels_target ] = 2 elif value is not None and pixel is None : map_roi [ self . pixels ] = value elif value is not None and pixel is not None : map_roi [ pixel ] = value else : logger . error ( "Can't parse input" ) ugali . utils . plotting . zoomedHealpixMap ( 'Region of Interest' , map_roi , self . lon , self . lat , self . config . params [ 'coords' ] [ 'roi_radius' ] )
5,472
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/roi.py#L135-L160
[ "def", "Close", "(", "self", ")", ":", "if", "self", ".", "_connection", ":", "self", ".", "_cursor", "=", "None", "self", ".", "_connection", ".", "close", "(", ")", "self", ".", "_connection", "=", "None", "# TODO: move this to a central temp file manager and have it track errors.", "# https://github.com/log2timeline/dfvfs/issues/92", "try", ":", "os", ".", "remove", "(", "self", ".", "_temp_file_path", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "pass", "self", ".", "_temp_file_path", "=", "''" ]
Function for testing if coordintes in set of ROI pixels .
def inPixels ( self , lon , lat , pixels ) : nside = self . config . params [ 'coords' ] [ 'nside_pixel' ] return ugali . utils . healpix . in_pixels ( lon , lat , pixels , nside )
5,473
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/roi.py#L163-L166
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "__buffered", "is", "None", ":", "# Use floor division to force multiplier to an integer", "multiplier", "=", "self", ".", "__max_in_mem", "//", "self", ".", "__chunk_size", "self", ".", "__buffered", "=", "\"\"", "else", ":", "multiplier", "=", "1", "self", ".", "__buffered", "=", "self", ".", "__buffered", "[", "self", ".", "__chunk_size", ":", "]", "data", "=", "self", ".", "__file", ".", "read", "(", "self", ".", "__chunk_size", "*", "multiplier", ")", "# Data is a byte object in Python 3", "# Decode it in order to append to self.__buffered str later", "# Use the salt util in case it's already a string (Windows)", "data", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "data", ")", "if", "not", "data", ":", "self", ".", "__file", ".", "close", "(", ")", "raise", "StopIteration", "self", ".", "__buffered", "+=", "data", "return", "self", ".", "__buffered" ]
Return the catalog pixels spanned by this ROI .
def getCatalogPixels ( self ) : filenames = self . config . getFilenames ( ) nside_catalog = self . config . params [ 'coords' ] [ 'nside_catalog' ] nside_pixel = self . config . params [ 'coords' ] [ 'nside_pixel' ] # All possible catalog pixels spanned by the ROI superpix = ugali . utils . skymap . superpixel ( self . pixels , nside_pixel , nside_catalog ) superpix = np . unique ( superpix ) # Only catalog pixels that exist in catalog files pixels = np . intersect1d ( superpix , filenames [ 'pix' ] . compressed ( ) ) return pixels
5,474
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/roi.py#L196-L209
[ "def", "_do_logon", "(", "self", ")", ":", "if", "self", ".", "_userid", "is", "None", ":", "raise", "ClientAuthError", "(", "\"Userid is not provided.\"", ")", "if", "self", ".", "_password", "is", "None", ":", "if", "self", ".", "_get_password", ":", "self", ".", "_password", "=", "self", ".", "_get_password", "(", "self", ".", "_host", ",", "self", ".", "_userid", ")", "else", ":", "raise", "ClientAuthError", "(", "\"Password is not provided.\"", ")", "logon_uri", "=", "'/api/sessions'", "logon_body", "=", "{", "'userid'", ":", "self", ".", "_userid", ",", "'password'", ":", "self", ".", "_password", "}", "self", ".", "_headers", ".", "pop", "(", "'X-API-Session'", ",", "None", ")", "# Just in case", "self", ".", "_session", "=", "self", ".", "_new_session", "(", "self", ".", "retry_timeout_config", ")", "logon_res", "=", "self", ".", "post", "(", "logon_uri", ",", "logon_body", ",", "logon_required", "=", "False", ")", "self", ".", "_session_id", "=", "logon_res", "[", "'api-session'", "]", "self", ".", "_headers", "[", "'X-API-Session'", "]", "=", "self", ".", "_session_id" ]
Create the index schema
def schema ( self , hash_key ) : key_schema = [ hash_key . hash_schema ( ) ] if self . range_key is not None : key_schema . append ( self . range_key . range_schema ( ) ) schema_data = { 'IndexName' : self . name , 'KeySchema' : key_schema , 'Projection' : { 'ProjectionType' : self . projection_type , } } if self . include_fields is not None : schema_data [ 'Projection' ] [ 'NonKeyAttributes' ] = self . include_fields return schema_data
5,475
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L76-L98
[ "def", "_handle_eio_message", "(", "self", ",", "sid", ",", "data", ")", ":", "if", "sid", "in", "self", ".", "_binary_packet", ":", "pkt", "=", "self", ".", "_binary_packet", "[", "sid", "]", "if", "pkt", ".", "add_attachment", "(", "data", ")", ":", "del", "self", ".", "_binary_packet", "[", "sid", "]", "if", "pkt", ".", "packet_type", "==", "packet", ".", "BINARY_EVENT", ":", "self", ".", "_handle_event", "(", "sid", ",", "pkt", ".", "namespace", ",", "pkt", ".", "id", ",", "pkt", ".", "data", ")", "else", ":", "self", ".", "_handle_ack", "(", "sid", ",", "pkt", ".", "namespace", ",", "pkt", ".", "id", ",", "pkt", ".", "data", ")", "else", ":", "pkt", "=", "packet", ".", "Packet", "(", "encoded_packet", "=", "data", ")", "if", "pkt", ".", "packet_type", "==", "packet", ".", "CONNECT", ":", "self", ".", "_handle_connect", "(", "sid", ",", "pkt", ".", "namespace", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "DISCONNECT", ":", "self", ".", "_handle_disconnect", "(", "sid", ",", "pkt", ".", "namespace", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "EVENT", ":", "self", ".", "_handle_event", "(", "sid", ",", "pkt", ".", "namespace", ",", "pkt", ".", "id", ",", "pkt", ".", "data", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "ACK", ":", "self", ".", "_handle_ack", "(", "sid", ",", "pkt", ".", "namespace", ",", "pkt", ".", "id", ",", "pkt", ".", "data", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "BINARY_EVENT", "or", "pkt", ".", "packet_type", "==", "packet", ".", "BINARY_ACK", ":", "self", ".", "_binary_packet", "[", "sid", "]", "=", "pkt", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "ERROR", ":", "raise", "ValueError", "(", "'Unexpected ERROR packet.'", ")", "else", ":", "raise", "ValueError", "(", "'Unknown packet type.'", ")" ]
Create an index that projects all attributes
def all ( cls , name , hash_key , range_key = None , throughput = None ) : return cls ( cls . ALL , name , hash_key , range_key , throughput = throughput )
5,476
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L174-L176
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "RevocationType", "]", ",", "version", ":", "int", ",", "currency", ":", "str", ",", "inline", ":", "str", ")", "->", "RevocationType", ":", "cert_data", "=", "Revocation", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "cert_data", "is", "None", ":", "raise", "MalformedDocumentError", "(", "\"Revokation\"", ")", "pubkey", "=", "cert_data", ".", "group", "(", "1", ")", "signature", "=", "cert_data", ".", "group", "(", "2", ")", "return", "cls", "(", "version", ",", "currency", ",", "pubkey", ",", "signature", ")" ]
Create an index that projects only key attributes
def keys ( cls , name , hash_key , range_key = None , throughput = None ) : return cls ( cls . KEYS , name , hash_key , range_key , throughput = throughput )
5,477
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L179-L182
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "RevocationType", "]", ",", "version", ":", "int", ",", "currency", ":", "str", ",", "inline", ":", "str", ")", "->", "RevocationType", ":", "cert_data", "=", "Revocation", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "cert_data", "is", "None", ":", "raise", "MalformedDocumentError", "(", "\"Revokation\"", ")", "pubkey", "=", "cert_data", ".", "group", "(", "1", ")", "signature", "=", "cert_data", ".", "group", "(", "2", ")", "return", "cls", "(", "version", ",", "currency", ",", "pubkey", ",", "signature", ")" ]
Construct the schema definition for this index
def schema ( self ) : schema_data = super ( GlobalIndex , self ) . schema ( self . hash_key ) schema_data [ 'ProvisionedThroughput' ] = self . throughput . schema ( ) return schema_data
5,478
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L191-L195
[ "def", "_handle_eio_message", "(", "self", ",", "sid", ",", "data", ")", ":", "if", "sid", "in", "self", ".", "_binary_packet", ":", "pkt", "=", "self", ".", "_binary_packet", "[", "sid", "]", "if", "pkt", ".", "add_attachment", "(", "data", ")", ":", "del", "self", ".", "_binary_packet", "[", "sid", "]", "if", "pkt", ".", "packet_type", "==", "packet", ".", "BINARY_EVENT", ":", "self", ".", "_handle_event", "(", "sid", ",", "pkt", ".", "namespace", ",", "pkt", ".", "id", ",", "pkt", ".", "data", ")", "else", ":", "self", ".", "_handle_ack", "(", "sid", ",", "pkt", ".", "namespace", ",", "pkt", ".", "id", ",", "pkt", ".", "data", ")", "else", ":", "pkt", "=", "packet", ".", "Packet", "(", "encoded_packet", "=", "data", ")", "if", "pkt", ".", "packet_type", "==", "packet", ".", "CONNECT", ":", "self", ".", "_handle_connect", "(", "sid", ",", "pkt", ".", "namespace", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "DISCONNECT", ":", "self", ".", "_handle_disconnect", "(", "sid", ",", "pkt", ".", "namespace", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "EVENT", ":", "self", ".", "_handle_event", "(", "sid", ",", "pkt", ".", "namespace", ",", "pkt", ".", "id", ",", "pkt", ".", "data", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "ACK", ":", "self", ".", "_handle_ack", "(", "sid", ",", "pkt", ".", "namespace", ",", "pkt", ".", "id", ",", "pkt", ".", "data", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "BINARY_EVENT", "or", "pkt", ".", "packet_type", "==", "packet", ".", "BINARY_ACK", ":", "self", ".", "_binary_packet", "[", "sid", "]", "=", "pkt", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "ERROR", ":", "raise", "ValueError", "(", "'Unexpected ERROR packet.'", ")", "else", ":", "raise", "ValueError", "(", "'Unknown packet type.'", ")" ]
Create a Table from returned Dynamo data
def from_response ( cls , response ) : hash_key = None range_key = None # KeySchema may not be in the response if the TableStatus is DELETING. if 'KeySchema' in response : attrs = dict ( ( ( d [ 'AttributeName' ] , DynamoKey ( d [ 'AttributeName' ] , d [ 'AttributeType' ] ) ) for d in response [ 'AttributeDefinitions' ] ) ) hash_key = attrs [ response [ 'KeySchema' ] [ 0 ] [ 'AttributeName' ] ] if len ( response [ 'KeySchema' ] ) > 1 : range_key = attrs [ response [ 'KeySchema' ] [ 1 ] [ 'AttributeName' ] ] indexes = [ ] for idx in response . get ( 'LocalSecondaryIndexes' , [ ] ) : indexes . append ( LocalIndex . from_response ( idx , attrs ) ) global_indexes = [ ] for idx in response . get ( 'GlobalSecondaryIndexes' , [ ] ) : global_indexes . append ( GlobalIndex . from_response ( idx , attrs ) ) table = cls ( name = response [ 'TableName' ] , hash_key = hash_key , range_key = range_key , indexes = indexes , global_indexes = global_indexes , throughput = Throughput . from_response ( response [ 'ProvisionedThroughput' ] ) , status = response [ 'TableStatus' ] , size = response [ 'TableSizeBytes' ] , ) table . response = response return table
5,479
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L296-L328
[ "def", "chat", "(", "ws", ")", ":", "lag_tolerance_secs", "=", "float", "(", "request", ".", "args", ".", "get", "(", "\"tolerance\"", ",", "0.1", ")", ")", "client", "=", "Client", "(", "ws", ",", "lag_tolerance_secs", "=", "lag_tolerance_secs", ")", "client", ".", "subscribe", "(", "request", ".", "args", ".", "get", "(", "\"channel\"", ")", ")", "gevent", ".", "spawn", "(", "client", ".", "heartbeat", ")", "client", ".", "publish", "(", ")" ]
Get the serialized Dynamo format for the update
def serialize ( self ) : if self . action == 'Create' : payload = self . extra [ 'index' ] . schema ( ) else : payload = { 'IndexName' : self . index_name , } if self . action == 'Update' : payload [ 'ProvisionedThroughput' ] = self . extra [ 'throughput' ] . schema ( ) return { self . action : payload }
5,480
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L388-L401
[ "def", "compare_root_path", "(", "path_cost1", ",", "path_cost2", ",", "bridge_id1", ",", "bridge_id2", ",", "port_id1", ",", "port_id2", ")", ":", "result", "=", "Stp", ".", "_cmp_value", "(", "path_cost1", ",", "path_cost2", ")", "if", "not", "result", ":", "result", "=", "Stp", ".", "_cmp_value", "(", "bridge_id1", ",", "bridge_id2", ")", "if", "not", "result", ":", "result", "=", "Stp", ".", "_cmp_value", "(", "port_id1", ",", "port_id2", ")", "return", "result" ]
Returns existing instance of messenger . If one does not exist it will be created and returned .
def instance ( cls , interval = 5 ) : if not cls . _instance : cls . _instance = _Messenger ( interval ) return cls . _instance
5,481
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L87-L101
[ "def", "_update_mappings", "(", "self", ")", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'DB-Method'", ":", "'PUT'", "}", "url", "=", "'/v2/exchange/db/{}/{}/_mappings'", ".", "format", "(", "self", ".", "domain", ",", "self", ".", "data_type", ")", "r", "=", "self", ".", "tcex", ".", "session", ".", "post", "(", "url", ",", "json", "=", "self", ".", "mapping", ",", "headers", "=", "headers", ")", "self", ".", "tcex", ".", "log", ".", "debug", "(", "'update mapping. status_code: {}, response: \"{}\".'", ".", "format", "(", "r", ".", "status_code", ",", "r", ".", "text", ")", ")" ]
Sends provided message to all listeners . Message is only added to queue and will be processed on next tick .
def send ( self , message , * args , * * kwargs ) : self . _messages . put ( ( message , args , kwargs ) , False )
5,482
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L117-L125
[ "def", "_check_registry_type", "(", "folder", "=", "None", ")", ":", "folder", "=", "_registry_folder", "(", "folder", ")", "default_file", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "'registry_type.txt'", ")", "try", ":", "with", "open", "(", "default_file", ",", "\"r\"", ")", "as", "infile", ":", "data", "=", "infile", ".", "read", "(", ")", "data", "=", "data", ".", "strip", "(", ")", "ComponentRegistry", ".", "SetBackingStore", "(", "data", ")", "except", "IOError", ":", "pass" ]
Adds hander for specified message .
def subscribe ( self , message , handler ) : with self . _lock : ref = WeakCallable ( handler , self . _on_collect ) self . _subscribers [ message ] . append ( ref )
5,483
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L127-L140
[ "def", "BDEVolumeOpen", "(", "bde_volume", ",", "path_spec", ",", "file_object", ",", "key_chain", ")", ":", "password", "=", "key_chain", ".", "GetCredential", "(", "path_spec", ",", "'password'", ")", "if", "password", ":", "bde_volume", ".", "set_password", "(", "password", ")", "recovery_password", "=", "key_chain", ".", "GetCredential", "(", "path_spec", ",", "'recovery_password'", ")", "if", "recovery_password", ":", "bde_volume", ".", "set_recovery_password", "(", "recovery_password", ")", "startup_key", "=", "key_chain", ".", "GetCredential", "(", "path_spec", ",", "'startup_key'", ")", "if", "startup_key", ":", "bde_volume", ".", "read_startup_key", "(", "startup_key", ")", "bde_volume", ".", "open_file_object", "(", "file_object", ")" ]
Removes handler from message listeners .
def unsubscribe ( self , message , handler ) : with self . _lock : self . _subscribers [ message ] . remove ( WeakCallable ( handler ) )
5,484
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L143-L154
[ "def", "cn", "(", "shape", ",", "dtype", "=", "None", ",", "impl", "=", "'numpy'", ",", "*", "*", "kwargs", ")", ":", "cn_cls", "=", "tensor_space_impl", "(", "impl", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "cn_cls", ".", "default_dtype", "(", "ComplexNumbers", "(", ")", ")", "# Use args by keyword since the constructor may take other arguments", "# by position", "cn", "=", "cn_cls", "(", "shape", "=", "shape", ",", "dtype", "=", "dtype", ",", "*", "*", "kwargs", ")", "if", "not", "cn", ".", "is_complex", ":", "raise", "ValueError", "(", "'data type {!r} not a complex floating-point type.'", "''", ".", "format", "(", "dtype", ")", ")", "return", "cn" ]
Event handler for timer that processes all queued messages .
def _execute ( self , sender , event_args ) : with self . _lock : while not self . _messages . empty ( ) : msg , args , kwargs = self . _messages . get ( False ) for subscriber in self . _subscribers [ msg ] : try : subscriber ( * args , * * kwargs ) except weakref . ReferenceError : # Reference to handler is lost and it is OK to silence it pass
5,485
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L156-L168
[ "def", "create", "(", "self", ",", "repo_name", ",", "scm", "=", "'git'", ",", "private", "=", "True", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "bitbucket", ".", "url", "(", "'CREATE_REPO'", ")", "return", "self", ".", "bitbucket", ".", "dispatch", "(", "'POST'", ",", "url", ",", "auth", "=", "self", ".", "bitbucket", ".", "auth", ",", "name", "=", "repo_name", ",", "scm", "=", "scm", ",", "is_private", "=", "private", ",", "*", "*", "kwargs", ")" ]
Emits this signal . As result all handlers will be invoked .
def emit ( self , * args , * * kwargs ) : self . _messanger . send ( self , * args , * * kwargs )
5,486
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L204-L208
[ "def", "checkIsConsistent", "(", "self", ")", ":", "if", "is_an_array", "(", "self", ".", "mask", ")", "and", "self", ".", "mask", ".", "shape", "!=", "self", ".", "data", ".", "shape", ":", "raise", "ConsistencyError", "(", "\"Shape mismatch mask={}, data={}\"", ".", "format", "(", "self", ".", "mask", ".", "shape", "!=", "self", ".", "data", ".", "shape", ")", ")" ]
Raises event that property value has changed for provided property name .
def RaisePropertyChanged ( self , property_name ) : args = PropertyChangedEventArgs ( property_name ) for handler in self . property_chaged_handlers : handler ( self , args )
5,487
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L350-L359
[ "def", "write", "(", "self", ",", "filename", "=", "None", ",", "io", "=", "None", ",", "coors", "=", "None", ",", "igs", "=", "None", ",", "out", "=", "None", ",", "float_format", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "self", ".", "name", "+", "'.mesh'", "if", "io", "is", "None", ":", "io", "=", "self", ".", "io", "if", "io", "is", "None", ":", "io", "=", "'auto'", "if", "io", "==", "'auto'", ":", "io", "=", "MeshIO", ".", "any_from_filename", "(", "filename", ")", "if", "coors", "is", "None", ":", "coors", "=", "self", ".", "coors", "if", "igs", "is", "None", ":", "igs", "=", "range", "(", "len", "(", "self", ".", "conns", ")", ")", "aux_mesh", "=", "Mesh", ".", "from_data", "(", "self", ".", "name", ",", "coors", ",", "self", ".", "ngroups", ",", "self", ".", "conns", ",", "self", ".", "mat_ids", ",", "self", ".", "descs", ",", "igs", ")", "io", ".", "set_float_format", "(", "float_format", ")", "io", ".", "write", "(", "filename", ",", "aux_mesh", ",", "out", ",", "*", "*", "kwargs", ")" ]
Like os . walk but takes level kwarg that indicates how deep the recursion will go .
def walk_level ( path , level = 1 ) : if level is None : level = float ( 'inf' ) path = expand_path ( path ) if os . path . isdir ( path ) : root_level = path . count ( os . path . sep ) for root , dirs , files in os . walk ( path ) : yield root , dirs , files if root . count ( os . path . sep ) >= root_level + level : del dirs [ : ] elif os . path . isfile ( path ) : yield os . path . dirname ( path ) , [ ] , [ os . path . basename ( path ) ] else : raise RuntimeError ( "Can't find a valid folder or file for path {0}" . format ( repr ( path ) ) )
5,488
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/futil.py#L45-L79
[ "def", "send_headers", "(", "self", ")", ":", "self", ".", "events", ".", "sync_emit", "(", "'headers'", ")", "self", ".", "_set_default_headers", "(", ")", "header_str", "=", "self", ".", "status_line", "+", "self", ".", "EOL", "+", "str", "(", "self", ".", "headers", ")", "self", ".", "stream", ".", "write", "(", "header_str", ".", "encode", "(", ")", ")", "self", ".", "events", ".", "sync_emit", "(", "'after_headers'", ")" ]
Use python builtin equivalents to unix stat command and return dict containing stat data about a file
def get_stat ( full_path ) : status = { } status [ 'size' ] = os . path . getsize ( full_path ) status [ 'accessed' ] = datetime . datetime . fromtimestamp ( os . path . getatime ( full_path ) ) status [ 'modified' ] = datetime . datetime . fromtimestamp ( os . path . getmtime ( full_path ) ) status [ 'changed_any' ] = datetime . datetime . fromtimestamp ( os . path . getctime ( full_path ) ) # first 3 digits are User, Group, Other permissions: 1=execute,2=write,4=read status [ 'mode' ] = os . stat ( full_path ) . st_mode status [ 'type' ] = get_type ( full_path ) return status
5,489
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/futil.py#L109-L119
[ "def", "_add_section", "(", "self", ",", "section", ")", ":", "section", ".", "rid", "=", "0", "plen", "=", "0", "while", "self", ".", "_merge", "and", "self", ".", "_sections", "and", "plen", "!=", "len", "(", "self", ".", "_sections", ")", ":", "plen", "=", "len", "(", "self", ".", "_sections", ")", "self", ".", "_sections", "=", "[", "s", "for", "s", "in", "self", ".", "_sections", "if", "not", "section", ".", "join", "(", "s", ")", "]", "self", ".", "_sections", ".", "append", "(", "section", ")" ]
Take a pre - existing maglim map and divide it into chunks consistent with the catalog pixels .
def split ( config , dirname = 'split' , force = False ) : config = Config ( config ) filenames = config . getFilenames ( ) #healpix = filenames['pix'].compressed() # Check that things are ok basedir , basename = os . path . split ( config [ 'mask' ] [ 'dirname' ] ) #if basename == dirname: # msg = "Input and output directory are the same." # raise Exception(msg) outdir = mkdir ( os . path . join ( basedir , dirname ) ) nside_catalog = config [ 'coords' ] [ 'nside_catalog' ] nside_pixel = config [ 'coords' ] [ 'nside_pixel' ] release = config [ 'data' ] [ 'release' ] . lower ( ) band1 = config [ 'catalog' ] [ 'mag_1_band' ] band2 = config [ 'catalog' ] [ 'mag_2_band' ] # Read the magnitude limits maglimdir = config [ 'maglim' ] [ 'dirname' ] maglimfile_1 = join ( maglimdir , config [ 'maglim' ] [ 'filename_1' ] ) logger . info ( "Reading %s..." % maglimfile_1 ) maglim1 = read_map ( maglimfile_1 ) maglimfile_2 = join ( maglimdir , config [ 'maglim' ] [ 'filename_2' ] ) logger . info ( "Reading %s..." % maglimfile_2 ) maglim2 = read_map ( maglimfile_2 ) # Read the footprint footfile = config [ 'data' ] [ 'footprint' ] logger . info ( "Reading %s..." % footfile ) footprint = read_map ( footfile ) # Output mask names mask1 = os . path . basename ( config [ 'mask' ] [ 'basename_1' ] ) mask2 = os . path . basename ( config [ 'mask' ] [ 'basename_2' ] ) for band , maglim , base in [ ( band1 , maglim1 , mask1 ) , ( band2 , maglim2 , mask2 ) ] : nside_maglim = hp . npix2nside ( len ( maglim ) ) if nside_maglim != nside_pixel : msg = "Mask nside different from pixel nside" logger . warning ( msg ) #raise Exception(msg) pixels = np . nonzero ( maglim > 0 ) [ 0 ] superpix = superpixel ( pixels , nside_maglim , nside_catalog ) healpix = np . unique ( superpix ) for hpx in healpix : outfile = join ( outdir , base ) % hpx if os . path . exists ( outfile ) and not force : logger . warning ( "Found %s; skipping..." % outfile ) continue pix = pixels [ superpix == hpx ] print ( hpx , len ( pix ) ) logger . info ( 'Writing %s...' % outfile ) data = odict ( ) data [ 'PIXEL' ] = pix data [ 'MAGLIM' ] = maglim [ pix ] . astype ( 'f4' ) data [ 'FRACDET' ] = footprint [ pix ] . astype ( 'f4' ) ugali . utils . healpix . write_partial_map ( outfile , data , nside_pixel )
5,490
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/preprocess/maglims.py#L266-L332
[ "def", "system_status", "(", ")", ":", "# noqa: E501", "if", "(", "not", "hasAccess", "(", ")", ")", ":", "return", "redirectUnauthorized", "(", ")", "body", "=", "State", ".", "config", ".", "serialize", "(", "[", "\"driver\"", ",", "\"log\"", ",", "\"log-file\"", ",", "\"log-colorize\"", "]", ")", "body", ".", "update", "(", "{", "'debug'", ":", "State", ".", "options", ".", "debug", ",", "'sensitive'", ":", "State", ".", "options", ".", "sensitive", "}", ")", "return", "Response", "(", "status", "=", "200", ",", "body", "=", "body", ")" ]
Loop through pixels containing catalog objects and calculate the magnitude limit . This gets a bit convoluted due to all the different pixel resolutions ...
def run ( self , field = None , simple = False , force = False ) : if field is None : fields = [ 1 , 2 ] else : fields = [ field ] for filenames in self . filenames . compress ( ~ self . filenames . mask [ 'catalog' ] ) . data : infile = filenames [ 'catalog' ] for f in fields : outfile = filenames [ 'mask_%i' % f ] if os . path . exists ( outfile ) and not force : logger . info ( "Found %s; skipping..." % outfile ) continue pixels , maglims = self . calculate ( infile , f , simple ) logger . info ( "Creating %s" % outfile ) outdir = mkdir ( os . path . dirname ( outfile ) ) data = odict ( ) data [ 'PIXEL' ] = pixels data [ 'MAGLIM' ] = maglims . astype ( 'f4' ) ugali . utils . healpix . write_partial_map ( outfile , data , self . nside_pixel )
5,491
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/preprocess/maglims.py#L52-L75
[ "def", "_init_libcrypto", "(", ")", ":", "libcrypto", "=", "_load_libcrypto", "(", ")", "try", ":", "libcrypto", ".", "OPENSSL_init_crypto", "(", ")", "except", "AttributeError", ":", "# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)", "libcrypto", ".", "OPENSSL_no_config", "(", ")", "libcrypto", ".", "OPENSSL_add_all_algorithms_noconf", "(", ")", "libcrypto", ".", "RSA_new", ".", "argtypes", "=", "(", ")", "libcrypto", ".", "RSA_new", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "RSA_size", ".", "argtype", "=", "(", "c_void_p", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "argtypes", "=", "(", "c_char_p", ",", "c_int", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "restype", "=", "c_void_p", "libcrypto", ".", "BIO_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "restype", "=", "c_void_p", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_private_encrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "libcrypto", ".", "RSA_public_decrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "return", "libcrypto" ]
Filters the given variable set based on request parameters
def get_variable_set ( self , variable_set , data ) : if data . get ( 'dynamic_layers' ) : variable_set = [ ] # TODO elif data . get ( 'layers' ) : op , layer_ids = data [ 'layers' ] . split ( ':' , 1 ) op = op . lower ( ) layer_ids = [ int ( x ) for x in layer_ids . split ( ',' ) ] if op in ( 'show' , 'include' ) : variable_set = [ x for x in variable_set if x . index in layer_ids ] elif op in ( 'hide' , 'exclude' ) : variable_set = [ x for x in variable_set if x . index not in layer_ids ] elif self . service . render_top_layer_only : variable_set = [ variable_set [ 0 ] ] return variable_set
5,492
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L207-L224
[ "async", "def", "on_raw_422", "(", "self", ",", "message", ")", ":", "await", "self", ".", "_registration_completed", "(", "message", ")", "self", ".", "motd", "=", "None", "await", "self", ".", "on_connect", "(", ")" ]
Applies the correct time index to configurations
def apply_time_to_configurations ( self , configurations , data ) : time_value = None if data . get ( 'time' ) : time_value = data [ 'time' ] # Only single time values are supported. For extents, just grab the first value if isinstance ( data [ 'time' ] , ( tuple , list ) ) : time_value = time_value [ 0 ] if time_value : for config in configurations : config . set_time_index_from_datetime ( time_value , best_fit = ALLOW_BEST_FIT_TIME_INDEX ) return configurations
5,493
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L226-L241
[ "def", "Initialize", "(", "self", ")", ":", "super", "(", "AFF4MemoryStreamBase", ",", "self", ")", ".", "Initialize", "(", ")", "contents", "=", "b\"\"", "if", "\"r\"", "in", "self", ".", "mode", ":", "contents", "=", "self", ".", "Get", "(", "self", ".", "Schema", ".", "CONTENT", ")", ".", "AsBytes", "(", ")", "try", ":", "if", "contents", "is", "not", "None", ":", "contents", "=", "zlib", ".", "decompress", "(", "contents", ")", "except", "zlib", ".", "error", ":", "pass", "self", ".", "fd", "=", "io", ".", "BytesIO", "(", "contents", ")", "self", ".", "size", "=", "len", "(", "contents", ")", "self", ".", "offset", "=", "0" ]
Returns default values for the get image form
def _get_form_defaults ( self ) : return { 'response_format' : 'html' , 'bbox' : self . service . full_extent , 'size' : '400,400' , 'dpi' : 200 , 'image_projection' : pyproj . Proj ( str ( self . service . projection ) ) , 'bbox_projection' : pyproj . Proj ( str ( self . service . projection ) ) , 'image_format' : 'png' , 'transparent' : True }
5,494
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L247-L259
[ "def", "connect", "(", "cls", ",", "dbname", ")", ":", "test_times_schema", "=", "\"\"\"\n CREATE TABLE IF NOT EXISTS test_times (\n file text,\n module text,\n class text,\n func text,\n elapsed float\n )\n \"\"\"", "setup_times_schema", "=", "\"\"\"\n CREATE TABLE IF NOT EXISTS setup_times (\n file text,\n module text,\n class text,\n func text,\n elapsed float\n )\n \"\"\"", "schemas", "=", "[", "test_times_schema", ",", "setup_times_schema", "]", "db_file", "=", "'{}.db'", ".", "format", "(", "dbname", ")", "cls", ".", "connection", "=", "sqlite3", ".", "connect", "(", "db_file", ")", "for", "s", "in", "schemas", ":", "cls", ".", "connection", ".", "execute", "(", "s", ")" ]
Render image interface
def get_render_configurations ( self , request , * * kwargs ) : data = self . process_form_data ( self . _get_form_defaults ( ) , kwargs ) variable_set = self . get_variable_set ( self . service . variable_set . order_by ( 'index' ) , data ) base_config = ImageConfiguration ( extent = data [ 'bbox' ] , size = data [ 'size' ] , image_format = data [ 'image_format' ] , background_color = TRANSPARENT_BACKGROUND_COLOR if data . get ( 'transparent' ) else DEFAULT_BACKGROUND_COLOR ) return base_config , self . apply_time_to_configurations ( [ RenderConfiguration ( v ) for v in variable_set ] , data )
5,495
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L284-L297
[ "def", "namer", "(", "cls", ",", "imageUrl", ",", "pageUrl", ")", ":", "start", "=", "''", "tsmatch", "=", "compile", "(", "r'/(\\d+)-'", ")", ".", "search", "(", "imageUrl", ")", "if", "tsmatch", ":", "start", "=", "datetime", ".", "utcfromtimestamp", "(", "int", "(", "tsmatch", ".", "group", "(", "1", ")", ")", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "else", ":", "# There were only chapter 1, page 4 and 5 not matching when writing", "# this...", "start", "=", "'2015-04-11x'", "return", "start", "+", "\"-\"", "+", "pageUrl", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]" ]
Returns default values for the identify form
def _get_form_defaults ( self ) : return { 'response_format' : 'html' , 'geometry_type' : 'esriGeometryPoint' , 'projection' : pyproj . Proj ( str ( self . service . projection ) ) , 'return_geometry' : True , 'maximum_allowable_offset' : 2 , 'geometry_precision' : 3 , 'return_z' : False , 'return_m' : False }
5,496
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L303-L315
[ "def", "run", "(", "self", ")", ":", "self", ".", "run_plugins", "(", ")", "while", "True", ":", "# Reload plugins and config if either the config file or plugin", "# directory are modified.", "if", "self", ".", "_config_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_config_file_path", ")", "or", "self", ".", "_plugin_mod_time", "!=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "_plugin_path", ")", ":", "self", ".", "thread_manager", ".", "kill_all_threads", "(", ")", "self", ".", "output_dict", ".", "clear", "(", ")", "self", ".", "reload", "(", ")", "self", ".", "run_plugins", "(", ")", "self", ".", "output_to_bar", "(", "json", ".", "dumps", "(", "self", ".", "_remove_empty_output", "(", ")", ")", ")", "time", ".", "sleep", "(", "self", ".", "config", ".", "general", "[", "'interval'", "]", ")" ]
Retrieve a specific term and condition .
def get_terms ( self , name , revision = None ) : url = '{}terms/{}' . format ( self . url , name ) if revision : url = '{}?revision={}' . format ( url , revision ) json = make_request ( url , timeout = self . timeout , client = self . _client ) try : # This is always a list of one element. data = json [ 0 ] return Term ( name = data [ 'name' ] , title = data . get ( 'title' ) , revision = data [ 'revision' ] , created_on = datetime . datetime . strptime ( data [ 'created-on' ] , "%Y-%m-%dT%H:%M:%SZ" ) , content = data [ 'content' ] ) except ( KeyError , TypeError , ValueError , IndexError ) as err : log . info ( 'cannot process terms: invalid JSON response: {!r}' . format ( json ) ) raise ServerError ( 'unable to get terms for {}: {}' . format ( name , err ) )
5,497
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/terms.py#L38-L67
[ "def", "from_structures", "(", "cls", ",", "structures", ",", "authors", ",", "projects", "=", "None", ",", "references", "=", "''", ",", "remarks", "=", "None", ",", "data", "=", "None", ",", "histories", "=", "None", ",", "created_at", "=", "None", ")", ":", "data", "=", "[", "{", "}", "]", "*", "len", "(", "structures", ")", "if", "data", "is", "None", "else", "data", "histories", "=", "[", "[", "]", "]", "*", "len", "(", "structures", ")", "if", "histories", "is", "None", "else", "histories", "snl_list", "=", "[", "]", "for", "i", ",", "struct", "in", "enumerate", "(", "structures", ")", ":", "snl", "=", "StructureNL", "(", "struct", ",", "authors", ",", "projects", "=", "projects", ",", "references", "=", "references", ",", "remarks", "=", "remarks", ",", "data", "=", "data", "[", "i", "]", ",", "history", "=", "histories", "[", "i", "]", ",", "created_at", "=", "created_at", ")", "snl_list", ".", "append", "(", "snl", ")", "return", "snl_list" ]
Opens and returns the NetCDF dataset associated with a service or returns a previously - opened dataset
def open_dataset ( self , service ) : if not self . dataset : path = os . path . join ( SERVICE_DATA_ROOT , service . data_path ) self . dataset = netCDF4 . Dataset ( path , 'r' ) return self . dataset
5,498
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/views.py#L80-L86
[ "def", "delete", "(", "request", ",", "obj_id", "=", "None", ")", ":", "data", "=", "request", ".", "DELETE", "or", "json", ".", "loads", "(", "request", ".", "body", ")", "guids", "=", "data", ".", "get", "(", "'guids'", ")", ".", "split", "(", "','", ")", "objects", "=", "getObjectsFromGuids", "(", "guids", ")", "gallery", "=", "Gallery", ".", "objects", ".", "get", "(", "pk", "=", "obj_id", ")", "LOGGER", ".", "info", "(", "'{} removed {} from {}'", ".", "format", "(", "request", ".", "user", ".", "email", ",", "guids", ",", "gallery", ")", ")", "for", "o", "in", "objects", ":", "if", "isinstance", "(", "o", ",", "Image", ")", ":", "gallery", ".", "images", ".", "remove", "(", "o", ")", "elif", "isinstance", "(", "o", ",", "Video", ")", ":", "gallery", ".", "videos", ".", "remove", "(", "o", ")", "res", "=", "Result", "(", ")", "return", "JsonResponse", "(", "res", ".", "asDict", "(", ")", ")" ]
Returns this bbox normalized to match the ratio of the given size .
def _normalize_bbox ( self , bbox , size ) : bbox_ratio = float ( bbox . width ) / float ( bbox . height ) size_ratio = float ( size [ 0 ] ) / float ( size [ 1 ] ) if round ( size_ratio , 4 ) == round ( bbox_ratio , 4 ) : return bbox else : if bbox . height * size_ratio >= bbox . width : diff = bbox . height * size_ratio - bbox . width return BBox ( ( bbox . xmin - diff / 2 , bbox . ymin , bbox . xmax + diff / 2 , bbox . ymax ) , bbox . projection ) else : diff = abs ( bbox . width / size_ratio - bbox . height ) return BBox ( ( bbox . xmin , bbox . ymin - diff / 2 , bbox . xmax , bbox . ymax + diff / 2 ) , bbox . projection )
5,499
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/views.py#L148-L162
[ "def", "_read_page_header", "(", "file_obj", ")", ":", "tin", "=", "TFileTransport", "(", "file_obj", ")", "pin", "=", "TCompactProtocolFactory", "(", ")", ".", "get_protocol", "(", "tin", ")", "page_header", "=", "parquet_thrift", ".", "PageHeader", "(", ")", "page_header", ".", "read", "(", "pin", ")", "return", "page_header" ]