query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Get number of items in a generator function .
def size_of_generator ( generator , memory_efficient = True ) : if memory_efficient : counter = 0 for _ in generator : counter += 1 return counter else : return len ( list ( generator ) )
6,100
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L443-L460
[ "def", "configure", "(", "self", ",", "organization", ",", "base_url", "=", "''", ",", "ttl", "=", "''", ",", "max_ttl", "=", "''", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'organization'", ":", "organization", ",", "'base_url'", ":", "base_url", ",", "'ttl'", ":", "ttl", ",", "'max_ttl'", ":", "max_ttl", ",", "}", "api_path", "=", "'/v1/auth/{mount_point}/config'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
validate function form OrValidator
def validate ( self , value ) : errors = [ ] self . _used_validator = [ ] for val in self . _validators : try : val . validate ( value ) self . _used_validator . append ( val ) except ValidatorException as e : errors . append ( e ) except Exception as e : errors . append ( ValidatorException ( "Unknown Error" , e ) ) if len ( errors ) > 0 : raise ValidatorException . from_list ( errors ) return value
6,101
https://github.com/JanHendrikDolling/configvalidator/blob/efde23a9352ae1fd6702b04ad964783ce11cbca5/configvalidator/validators/__init__.py#L598-L617
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Gets the total value of the bar according to it s time signature
def GetTotalValue ( self ) : value = "" if hasattr ( self , "meter" ) : top_value = self . meter . beats bottom = self . meter . type fraction = top_value / bottom if fraction == 1 : value = "1" else : if fraction > 1 : value = "1." if fraction < 1 : if fraction >= 0.5 : fraction -= 0.5 value = "2" if fraction == 0.25 : value += "." return value
6,102
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/MeasureNode.py#L133-L151
[ "def", "__init_config_params", "(", "self", ",", "config", ")", ":", "if", "self", ".", "version", ">=", "(", "2", ",", "4", ")", ":", "params", "=", "config", ".", "get", "(", "'setParameter'", ",", "{", "}", ")", "# Set enableTestCommands by default but allow enableTestCommands:0.", "params", ".", "setdefault", "(", "'enableTestCommands'", ",", "1", ")", "# Reduce transactionLifetimeLimitSeconds for faster driver testing.", "if", "self", ".", "version", ">=", "(", "4", ",", "1", ")", "and", "not", "self", ".", "is_mongos", ":", "params", ".", "setdefault", "(", "'transactionLifetimeLimitSeconds'", ",", "3", ")", "# Increase transaction lock timeout to reduce the chance that tests", "# fail with LockTimeout: \"Unable to acquire lock {...} within 5ms\".", "if", "self", ".", "version", ">=", "(", "4", ",", "0", ")", "and", "not", "self", ".", "is_mongos", ":", "params", ".", "setdefault", "(", "'maxTransactionLockRequestTimeoutMillis'", ",", "25", ")", "config", "[", "'setParameter'", "]", "=", "params", "compressors", "=", "config", ".", "get", "(", "'networkMessageCompressors'", ")", "if", "compressors", "is", "None", ":", "if", "self", ".", "version", ">=", "(", "4", ",", "1", ",", "7", ")", ":", "# SERVER-38168 added zstd support in 4.1.7.", "config", "[", "'networkMessageCompressors'", "]", "=", "'zstd,zlib,snappy,noop'", "elif", "self", ".", "version", ">=", "(", "3", ",", "5", ",", "9", ")", ":", "# SERVER-27310 added zlib support in 3.5.9.", "config", "[", "'networkMessageCompressors'", "]", "=", "'zlib,snappy,noop'", "elif", "self", ".", "version", ">=", "(", "3", ",", "4", ")", ":", "config", "[", "'networkMessageCompressors'", "]", "=", "'snappy,noop'" ]
key as in musical key not index
def GetLastKey ( self , voice = 1 ) : voice_obj = self . GetChild ( voice ) if voice_obj is not None : key = BackwardSearch ( KeyNode , voice_obj , 1 ) if key is not None : return key else : if hasattr ( self , "key" ) : return self . key else : if hasattr ( self , "key" ) : return self . key
6,103
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/MeasureNode.py#L165-L178
[ "def", "reset", "(", ")", "->", "None", ":", "from", "wdom", ".", "document", "import", "get_new_document", ",", "set_document", "from", "wdom", ".", "element", "import", "Element", "from", "wdom", ".", "server", "import", "_tornado", "from", "wdom", ".", "window", "import", "customElements", "set_document", "(", "get_new_document", "(", ")", ")", "_tornado", ".", "connections", ".", "clear", "(", ")", "_tornado", ".", "set_application", "(", "_tornado", ".", "Application", "(", ")", ")", "Element", ".", "_elements_with_id", ".", "clear", "(", ")", "Element", ".", "_element_buffer", ".", "clear", "(", ")", "customElements", ".", "reset", "(", ")" ]
simple method that puts in spaces every 10 characters
def SplitString ( value ) : string_length = len ( value ) chunks = int ( string_length / 10 ) string_list = list ( value ) lstring = "" if chunks > 1 : lstring = "\\markup { \n\r \column { " for i in range ( int ( chunks ) ) : lstring += "\n\r\r \\line { \"" index = i * 10 for i in range ( index ) : lstring += string_list [ i ] lstring += "\" \r\r}" lstring += "\n\r } \n }" if lstring == "" : indexes = [ i for i in range ( len ( string_list ) ) if string_list [ i ] == "\r" or string_list [ i ] == "\n" ] lstring = "\\markup { \n\r \column { " if len ( indexes ) == 0 : lstring += "\n\r\r \\line { \"" + "" . join ( string_list ) + "\" \n\r\r } \n\r } \n }" else : rows = [ ] row_1 = string_list [ : indexes [ 0 ] ] rows . append ( row_1 ) for i in range ( len ( indexes ) ) : start = indexes [ i ] if i != len ( indexes ) - 1 : end = indexes [ i + 1 ] else : end = len ( string_list ) row = string_list [ start : end ] rows . append ( row ) for row in rows : lstring += "\n\r\r \\line { \"" lstring += "" . join ( row ) lstring += "\" \r\r}" lstring += "\n\r } \n }" return lstring
6,104
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/helpers.py#L4-L46
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
little function that converts numbers to words . This could be more efficient and won t work if the number is bigger than 999 but it s for stave names and I doubt any part would have more than 10 staves let alone 999 .
def NumbersToWords ( number ) : units = [ 'one' , 'two' , 'three' , 'four' , 'five' , 'six' , 'seven' , 'eight' , 'nine' ] tens = [ 'ten' , 'twenty' , 'thirty' , 'forty' , 'fifty' , 'sixty' , 'seventy' , 'eighty' , 'ninety' ] output = "" if number != 0 : str_val = str ( number ) if 4 > len ( str_val ) > 2 : output += units [ int ( str_val [ 0 ] ) - 1 ] output += "hundred" if str_val [ 1 ] != 0 : output += "and" + tens [ int ( str_val [ 1 ] ) - 1 ] if str_val [ 2 ] != 0 : output += units [ int ( str_val [ 2 ] ) - 1 ] if 3 > len ( str_val ) > 1 : output += tens [ int ( str_val [ 0 ] ) - 1 ] if str_val [ 1 ] != 0 : output += units [ int ( str_val [ 1 ] ) - 1 ] if 2 > len ( str_val ) == 1 : output += units [ int ( str_val [ 0 ] ) - 1 ] else : output = "zero" return output
6,105
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/helpers.py#L63-L108
[ "def", "_unbind_topics", "(", "self", ",", "topics", ")", ":", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "status", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "tracing", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "streaming", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "response", ")" ]
method to calculate the maximum total lilypond value for a measure without a time signature
def CheckTotals ( self ) : staves = self . GetChildrenIndexes ( ) for staff in staves : child = self . getStaff ( staff ) child . CheckTotals ( )
6,106
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/PartNode.py#L60-L65
[ "def", "rows", "(", "self", ")", ":", "# We use DataFrames for serialization of IndexedRows from", "# Java, so we first convert the RDD of rows to a DataFrame", "# on the Scala/Java side. Then we map each Row in the", "# DataFrame back to an IndexedRow on this side.", "rows_df", "=", "callMLlibFunc", "(", "\"getIndexedRows\"", ",", "self", ".", "_java_matrix_wrapper", ".", "_java_model", ")", "rows", "=", "rows_df", ".", "rdd", ".", "map", "(", "lambda", "row", ":", "IndexedRow", "(", "row", "[", "0", "]", ",", "row", "[", "1", "]", ")", ")", "return", "rows" ]
method which checks the bar before the current for changes we need to make to it s barlines
def CheckPreviousBarline ( self , staff ) : measure_before_last = self . getMeasureAtPosition ( - 2 , staff ) last_measure = self . getMeasureAtPosition ( - 1 , staff ) if last_measure is not None and measure_before_last is not None : bline1 = measure_before_last . GetBarline ( "right" ) bline2 = last_measure . GetBarline ( "left" ) if bline1 is not None : if hasattr ( bline1 , "ending" ) : if bline2 is not None : if not hasattr ( bline2 , "ending" ) : bline1 . ending . type = "discontinue" else : bline1 . ending . type = "discontinue"
6,107
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/PartNode.py#L78-L91
[ "def", "get_registration_id_info", "(", "self", ",", "registration_id", ")", ":", "response", "=", "self", ".", "registration_info_request", "(", "registration_id", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "response", ".", "json", "(", ")", "return", "None" ]
Selects the appropriate method to decode next bencode element and returns the result .
def __parse ( self ) -> object : char = self . data [ self . idx : self . idx + 1 ] if char in [ b'1' , b'2' , b'3' , b'4' , b'5' , b'6' , b'7' , b'8' , b'9' , b'0' ] : str_len = int ( self . __read_to ( b':' ) ) return self . __read ( str_len ) elif char == b'i' : self . idx += 1 return int ( self . __read_to ( b'e' ) ) elif char == b'd' : return self . __parse_dict ( ) elif char == b'l' : return self . __parse_list ( ) elif char == b'' : raise bencodepy . DecodingError ( 'Unexpected End of File at index position of {0}.' . format ( str ( self . idx ) ) ) else : raise bencodepy . DecodingError ( 'Invalid token character ({0}) at position {1}.' . format ( str ( char ) , str ( self . idx ) ) )
6,108
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L33-L50
[ "def", "ProcessResponse", "(", "self", ",", "client_id", ",", "response", ")", ":", "precondition", ".", "AssertType", "(", "client_id", ",", "Text", ")", "downsampled", "=", "rdf_client_stats", ".", "ClientStats", ".", "Downsampled", "(", "response", ")", "if", "data_store", ".", "AFF4Enabled", "(", ")", ":", "urn", "=", "rdf_client", ".", "ClientURN", "(", "client_id", ")", ".", "Add", "(", "\"stats\"", ")", "with", "aff4", ".", "FACTORY", ".", "Create", "(", "urn", ",", "aff4_stats", ".", "ClientStats", ",", "token", "=", "self", ".", "token", ",", "mode", "=", "\"w\"", ")", "as", "stats_fd", ":", "# Only keep the average of all values that fall within one minute.", "stats_fd", ".", "AddAttribute", "(", "stats_fd", ".", "Schema", ".", "STATS", ",", "downsampled", ")", "if", "data_store", ".", "RelationalDBEnabled", "(", ")", ":", "data_store", ".", "REL_DB", ".", "WriteClientStats", "(", "client_id", ",", "downsampled", ")", "return", "downsampled" ]
Start of decode process . Returns final results .
def decode ( self ) -> Iterable : if self . data [ 0 : 1 ] not in ( b'd' , b'l' ) : return self . __wrap_with_tuple ( ) return self . __parse ( )
6,109
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L52-L56
[ "def", "event_availability_array", "(", "events", ")", ":", "array", "=", "np", ".", "ones", "(", "(", "len", "(", "events", ")", ",", "len", "(", "events", ")", ")", ")", "for", "row", ",", "event", "in", "enumerate", "(", "events", ")", ":", "for", "col", ",", "other_event", "in", "enumerate", "(", "events", ")", ":", "if", "row", "!=", "col", ":", "tags", "=", "set", "(", "event", ".", "tags", ")", "events_share_tag", "=", "len", "(", "tags", ".", "intersection", "(", "other_event", ".", "tags", ")", ")", ">", "0", "if", "(", "other_event", "in", "event", ".", "unavailability", ")", "or", "events_share_tag", ":", "array", "[", "row", ",", "col", "]", "=", "0", "array", "[", "col", ",", "row", "]", "=", "0", "return", "array" ]
Returns a tuple of all nested bencode elements .
def __wrap_with_tuple ( self ) -> tuple : l = list ( ) length = len ( self . data ) while self . idx < length : l . append ( self . __parse ( ) ) return tuple ( l )
6,110
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L58-L64
[ "def", "prepare_hmet_lsm", "(", "self", ",", "lsm_data_var_map_array", ",", "hmet_ascii_output_folder", "=", "None", ",", "netcdf_file_path", "=", "None", ")", ":", "if", "self", ".", "l2g", "is", "None", ":", "raise", "ValueError", "(", "\"LSM converter not loaded ...\"", ")", "with", "tmp_chdir", "(", "self", ".", "project_manager", ".", "project_directory", ")", ":", "# GSSHA simulation does not work after HMET data is finished", "self", ".", "_update_simulation_end_from_lsm", "(", ")", "# HMET CARDS", "if", "netcdf_file_path", "is", "not", "None", ":", "self", ".", "l2g", ".", "lsm_data_to_subset_netcdf", "(", "netcdf_file_path", ",", "lsm_data_var_map_array", ")", "self", ".", "_update_card", "(", "\"HMET_NETCDF\"", ",", "netcdf_file_path", ",", "True", ")", "self", ".", "project_manager", ".", "deleteCard", "(", "'HMET_ASCII'", ",", "self", ".", "db_session", ")", "else", ":", "if", "\"{0}\"", "in", "hmet_ascii_output_folder", "and", "\"{1}\"", "in", "hmet_ascii_output_folder", ":", "hmet_ascii_output_folder", "=", "hmet_ascii_output_folder", ".", "format", "(", "self", ".", "simulation_start", ".", "strftime", "(", "\"%Y%m%d%H%M\"", ")", ",", "self", ".", "simulation_end", ".", "strftime", "(", "\"%Y%m%d%H%M\"", ")", ")", "self", ".", "l2g", ".", "lsm_data_to_arc_ascii", "(", "lsm_data_var_map_array", ",", "main_output_folder", "=", "os", ".", "path", ".", "join", "(", "self", ".", "gssha_directory", ",", "hmet_ascii_output_folder", ")", ")", "self", ".", "_update_card", "(", "\"HMET_ASCII\"", ",", "os", ".", "path", ".", "join", "(", "hmet_ascii_output_folder", ",", "'hmet_file_list.txt'", ")", ",", "True", ")", "self", ".", "project_manager", ".", "deleteCard", "(", "'HMET_NETCDF'", ",", "self", ".", "db_session", ")", "# UPDATE GMT CARD", "self", ".", "_update_gmt", "(", ")" ]
Returns an Ordered Dictionary of nested bencode elements .
def __parse_dict ( self ) -> OrderedDict : self . idx += 1 d = OrderedDict ( ) key_name = None while self . data [ self . idx : self . idx + 1 ] != b'e' : if key_name is None : key_name = self . __parse ( ) else : d [ key_name ] = self . __parse ( ) key_name = None self . idx += 1 return d
6,111
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L66-L78
[ "def", "check_update", "(", "from_currency", ",", "to_currency", ")", ":", "if", "from_currency", "not", "in", "ccache", ":", "# if currency never get converted before", "ccache", "[", "from_currency", "]", "=", "{", "}", "if", "ccache", "[", "from_currency", "]", ".", "get", "(", "to_currency", ")", "is", "None", ":", "ccache", "[", "from_currency", "]", "[", "to_currency", "]", "=", "{", "'last_update'", ":", "0", "}", "last_update", "=", "float", "(", "ccache", "[", "from_currency", "]", "[", "to_currency", "]", "[", "'last_update'", "]", ")", "if", "time", ".", "time", "(", ")", "-", "last_update", ">=", "30", "*", "60", ":", "# if last update is more than 30 min ago", "return", "True", "return", "False" ]
Returns an list of nested bencode elements .
def __parse_list ( self ) -> list : self . idx += 1 l = [ ] while self . data [ self . idx : self . idx + 1 ] != b'e' : l . append ( self . __parse ( ) ) self . idx += 1 return l
6,112
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L80-L87
[ "def", "_connect", "(", "self", ")", ":", "try", ":", "# Open Connection", "self", ".", "influx", "=", "InfluxDBClient", "(", "self", ".", "hostname", ",", "self", ".", "port", ",", "self", ".", "username", ",", "self", ".", "password", ",", "self", ".", "database", ",", "self", ".", "ssl", ")", "# Log", "self", ".", "log", ".", "debug", "(", "\"InfluxdbHandler: Established connection to \"", "\"%s:%d/%s.\"", ",", "self", ".", "hostname", ",", "self", ".", "port", ",", "self", ".", "database", ")", "except", "Exception", "as", "ex", ":", "# Log Error", "self", ".", "_throttle_error", "(", "\"InfluxdbHandler: Failed to connect to \"", "\"%s:%d/%s. %s\"", ",", "self", ".", "hostname", ",", "self", ".", "port", ",", "self", ".", "database", ",", "ex", ")", "# Close Socket", "self", ".", "_close", "(", ")", "return" ]
Method to remove and return all children of current node
def PopAllChildren ( self ) : indexes = self . GetChildrenIndexes ( ) children = [ ] for c in indexes : child = self . PopChild ( c ) children . append ( child ) return children
6,113
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/BaseTree.py#L215-L226
[ "def", "get_lat_lon_time_from_nmea", "(", "nmea_file", ",", "local_time", "=", "True", ")", ":", "with", "open", "(", "nmea_file", ",", "\"r\"", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "lines", "=", "[", "l", ".", "rstrip", "(", "\"\\n\\r\"", ")", "for", "l", "in", "lines", "]", "# Get initial date", "for", "l", "in", "lines", ":", "if", "\"GPRMC\"", "in", "l", ":", "data", "=", "pynmea2", ".", "parse", "(", "l", ")", "date", "=", "data", ".", "datetime", ".", "date", "(", ")", "break", "# Parse GPS trace", "points", "=", "[", "]", "for", "l", "in", "lines", ":", "if", "\"GPRMC\"", "in", "l", ":", "data", "=", "pynmea2", ".", "parse", "(", "l", ")", "date", "=", "data", ".", "datetime", ".", "date", "(", ")", "if", "\"$GPGGA\"", "in", "l", ":", "data", "=", "pynmea2", ".", "parse", "(", "l", ")", "timestamp", "=", "datetime", ".", "datetime", ".", "combine", "(", "date", ",", "data", ".", "timestamp", ")", "lat", ",", "lon", ",", "alt", "=", "data", ".", "latitude", ",", "data", ".", "longitude", ",", "data", ".", "altitude", "points", ".", "append", "(", "(", "timestamp", ",", "lat", ",", "lon", ",", "alt", ")", ")", "points", ".", "sort", "(", ")", "return", "points" ]
Shrinks input_file to output_file .
def _process_file ( input_file , output_file , apikey ) : bytes_ = read_binary ( input_file ) compressed = shrink ( bytes_ , apikey ) if compressed . success and compressed . bytes : write_binary ( output_file , compressed . bytes ) else : if compressed . errno in FATAL_ERRORS : raise StopProcessing ( compressed ) elif compressed . errno == TinyPNGError . InternalServerError : raise RetryProcessing ( compressed ) return compressed
6,114
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/pytinypng.py#L29-L49
[ "def", "set_cell", "(", "self", ",", "i", ",", "j", ",", "value", ")", ":", "bool_tests", "=", "[", "value", "in", "self", ".", "_possibles", "[", "i", "]", "[", "j", "]", ",", "value", "in", "self", ".", "_poss_rows", "[", "i", "]", ",", "value", "in", "self", ".", "_poss_cols", "[", "j", "]", ",", "value", "in", "self", ".", "_poss_box", "[", "(", "i", "//", "self", ".", "order", ")", "*", "self", ".", "order", "+", "(", "j", "//", "self", ".", "order", ")", "]", ",", "value", "not", "in", "self", ".", "row", "(", "i", ")", ",", "value", "not", "in", "self", ".", "col", "(", "j", ")", ",", "value", "not", "in", "self", ".", "box", "(", "i", ",", "j", ")", "]", "if", "all", "(", "bool_tests", ")", ":", "self", "[", "i", "]", "[", "j", "]", "=", "value", "else", ":", "raise", "SudokuHasNoSolutionError", "(", "\"This value cannot be set here!\"", ")" ]
Optimize and save png files form source to target directory .
def process_directory ( source , target , apikey , handler , overwrite = False ) : handler . on_start ( ) attempts = defaultdict ( lambda : 0 ) input_files = files_with_exts ( source , suffix = '.png' ) next_ = lambda : next ( input_files , None ) current_file = next_ ( ) response = None last_processed = None while current_file : output_file = target_path ( source , target , current_file ) if os . path . exists ( output_file ) and not overwrite : handler . on_skip ( current_file , source = source ) current_file = next_ ( ) continue try : handler . on_pre_item ( current_file ) last_processed = current_file response = _process_file ( current_file , output_file , apikey ) current_file = next_ ( ) except StopProcessing as e : # Unauthorized or exceed number of allowed monthly calls response = e . response handler . on_stop ( response . errmsg ) break except RetryProcessing as e : # handle InternalServerError on tinypng side response = e . response if attempts [ current_file ] < 9 : handler . on_retry ( current_file ) time . sleep ( TINYPNG_SLEEP_SEC ) attempts [ current_file ] += 1 else : current_file = next_ ( ) finally : handler . on_post_item ( response , input_file = last_processed , source = source ) handler . on_finish ( output_dir = target )
6,115
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/pytinypng.py#L52-L104
[ "def", "devices", "(", "self", ",", "timeout", "=", "None", ")", ":", "# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw", "# from Android system/core/adb/transport.c statename()", "re_device_info", "=", "re", ".", "compile", "(", "r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'", ")", "devices", "=", "[", "]", "lines", "=", "self", ".", "command_output", "(", "[", "\"devices\"", ",", "\"-l\"", "]", ",", "timeout", "=", "timeout", ")", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "if", "line", "==", "'List of devices attached '", ":", "continue", "match", "=", "re_device_info", ".", "match", "(", "line", ")", "if", "match", ":", "device", "=", "{", "'device_serial'", ":", "match", ".", "group", "(", "1", ")", ",", "'state'", ":", "match", ".", "group", "(", "2", ")", "}", "remainder", "=", "line", "[", "match", ".", "end", "(", "2", ")", ":", "]", ".", "strip", "(", ")", "if", "remainder", ":", "try", ":", "device", ".", "update", "(", "dict", "(", "[", "j", ".", "split", "(", "':'", ")", "for", "j", "in", "remainder", ".", "split", "(", "' '", ")", "]", ")", ")", "except", "ValueError", ":", "self", ".", "_logger", ".", "warning", "(", "'devices: Unable to parse '", "'remainder for device %s'", "%", "line", ")", "devices", ".", "append", "(", "device", ")", "return", "devices" ]
Batch compression .
def _main ( args ) : if not args . apikey : print ( "\nPlease provide TinyPNG API key" ) print ( "To obtain key visit https://api.tinypng.com/developers\n" ) sys . exit ( 1 ) input_dir = realpath ( args . input ) if not args . output : output_dir = input_dir + "-output" else : output_dir = realpath ( args . output ) if input_dir == output_dir : print ( "\nPlease specify different output directory\n" ) sys . exit ( 1 ) handler = ScreenHandler ( ) try : process_directory ( input_dir , output_dir , args . apikey , handler ) except KeyboardInterrupt : handler . on_finish ( output_dir = output_dir )
6,116
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/pytinypng.py#L107-L138
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Returns an ENVI Py Engine Task object . See ENVI Py Engine Task for examples .
def task ( self , task_name ) : return Task ( uri = ':' . join ( ( self . _engine_name , task_name ) ) , cwd = self . _cwd )
6,117
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/engine.py#L27-L34
[ "def", "restore_defaults_ratio", "(", "self", ")", ":", "# Set the flag to true because user ask to.", "self", ".", "is_restore_default", "=", "True", "# remove current default ratio", "for", "i", "in", "reversed", "(", "list", "(", "range", "(", "self", ".", "container_layout", ".", "count", "(", ")", ")", ")", ")", ":", "widget", "=", "self", ".", "container_layout", ".", "itemAt", "(", "i", ")", ".", "widget", "(", ")", "if", "widget", "is", "not", "None", ":", "widget", ".", "setParent", "(", "None", ")", "# reload default ratio", "self", ".", "restore_default_values_page", "(", ")" ]
Returns a list of all tasks known to the engine .
def tasks ( self ) : task_input = { 'taskName' : 'QueryTaskCatalog' } output = taskengine . execute ( task_input , self . _engine_name , cwd = self . _cwd ) return output [ 'outputParameters' ] [ 'TASKS' ]
6,118
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/engine.py#L37-L45
[ "def", "rgba_to_int", "(", "cls", ",", "red", ",", "green", ",", "blue", ",", "alpha", ")", ":", "red", "=", "unwrap", "(", "red", ")", "green", "=", "unwrap", "(", "green", ")", "blue", "=", "unwrap", "(", "blue", ")", "alpha", "=", "unwrap", "(", "alpha", ")", "if", "red", "is", "None", "or", "green", "is", "None", "or", "blue", "is", "None", ":", "return", "None", "if", "alpha", "is", "None", ":", "alpha", "=", "255", "r", "=", "red", "<<", "24", "g", "=", "green", "<<", "16", "b", "=", "blue", "<<", "8", "a", "=", "alpha", "<<", "0", "rgba_int", "=", "r", "+", "g", "+", "b", "+", "a", "if", "(", "rgba_int", ">", "(", "2", "**", "31", "-", "1", ")", ")", ":", "# convert to signed 32-bit int", "rgba_int", "=", "rgba_int", "-", "2", "**", "32", "return", "rgba_int" ]
Execute a query returning its result
def execute ( query , auth = None , client = urllib_request . build_opener ( ) ) : exec_fn = getattr ( type ( query ) , '__execute__' , _default_execute_method ) return exec_fn ( query , client , _make_auth ( auth ) )
6,119
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/query.py#L192-L218
[ "def", "_api_arguments", "(", "self", ")", ":", "# TC main >= 4.4 token will be passed to jobs.", "self", ".", "add_argument", "(", "'--tc_token'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Token'", ")", "self", ".", "add_argument", "(", "'--tc_token_expires'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Token Expiration Time'", ",", "type", "=", "int", ",", ")", "# TC Integrations Server or TC main < 4.4", "self", ".", "add_argument", "(", "'--api_access_id'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Access ID'", ",", "required", "=", "False", ")", "self", ".", "add_argument", "(", "'--api_secret_key'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Secret Key'", ",", "required", "=", "False", ")", "# Validate ThreatConnect SSL certificate", "self", ".", "add_argument", "(", "'--tc_verify'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Validate the ThreatConnect SSL Cert'", ")" ]
Execute a query asynchronously returning its result
def execute_async ( query , auth = None , client = event_loop ) : exc_fn = getattr ( type ( query ) , '__execute_async__' , Query . __execute_async__ ) return exc_fn ( query , client , _make_auth ( auth ) )
6,120
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/query.py#L221-L252
[ "def", "_handle_websocket_headers", "(", "self", ",", "handler", ":", "WebSocketHandler", ")", "->", "None", ":", "fields", "=", "(", "\"Host\"", ",", "\"Sec-Websocket-Key\"", ",", "\"Sec-Websocket-Version\"", ")", "if", "not", "all", "(", "map", "(", "lambda", "f", ":", "handler", ".", "request", ".", "headers", ".", "get", "(", "f", ")", ",", "fields", ")", ")", ":", "raise", "ValueError", "(", "\"Missing/Invalid WebSocket headers\"", ")" ]
Return a random integer N such that a < = N < = b .
def secure_randint ( min_value , max_value , system_random = None ) : if not system_random : system_random = random . SystemRandom ( ) return system_random . randint ( min_value , max_value )
6,121
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/entropy.py#L41-L49
[ "def", "_ParseKeysFromFindSpecs", "(", "self", ",", "parser_mediator", ",", "win_registry", ",", "find_specs", ")", ":", "searcher", "=", "dfwinreg_registry_searcher", ".", "WinRegistrySearcher", "(", "win_registry", ")", "for", "registry_key_path", "in", "iter", "(", "searcher", ".", "Find", "(", "find_specs", "=", "find_specs", ")", ")", ":", "if", "parser_mediator", ".", "abort", ":", "break", "registry_key", "=", "searcher", ".", "GetKeyByPath", "(", "registry_key_path", ")", "self", ".", "_ParseKey", "(", "parser_mediator", ",", "registry_key", ")" ]
merge two Mapping objects keeping the type of the first mapping
def _merge_maps ( m1 , m2 ) : return type ( m1 ) ( chain ( m1 . items ( ) , m2 . items ( ) ) )
6,122
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/http.py#L64-L66
[ "def", "get_repo_url", "(", "pypirc", ",", "repository", ")", ":", "pypirc", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "pypirc", ")", ")", "pypi_config", "=", "base", ".", "PyPIConfig", "(", "pypirc", ")", "repo_config", "=", "pypi_config", ".", "get_repo_config", "(", "repository", ")", "if", "repo_config", ":", "return", "repo_config", ".", "get_clean_url", "(", ")", "else", ":", "return", "base", ".", "RepositoryURL", "(", "repository", ")" ]
Create an HTTP basic authentication callable
def basic_auth ( credentials ) : encoded = b64encode ( ':' . join ( credentials ) . encode ( 'ascii' ) ) . decode ( ) return header_adder ( { 'Authorization' : 'Basic ' + encoded } )
6,123
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/http.py#L156-L170
[ "def", "parse_notebook_index", "(", "ntbkpth", ")", ":", "# Convert notebook to RST text in string", "rex", "=", "RSTExporter", "(", ")", "rsttxt", "=", "rex", ".", "from_filename", "(", "ntbkpth", ")", "[", "0", "]", "# Clean up trailing whitespace", "rsttxt", "=", "re", ".", "sub", "(", "r'\\n '", ",", "r''", ",", "rsttxt", ",", "re", ".", "M", "|", "re", ".", "S", ")", "pthidx", "=", "{", "}", "pthlst", "=", "[", "]", "lines", "=", "rsttxt", ".", "split", "(", "'\\n'", ")", "for", "l", "in", "lines", ":", "m", "=", "re", ".", "match", "(", "r'^-\\s+`([^<]+)\\s+<([^>]+).ipynb>`__'", ",", "l", ")", "if", "m", ":", "# List of subdirectories in order of appearance in index.rst", "pthlst", ".", "append", "(", "m", ".", "group", "(", "2", ")", ")", "# Dict mapping subdirectory name to description", "pthidx", "[", "m", ".", "group", "(", "2", ")", "]", "=", "m", ".", "group", "(", "1", ")", "return", "pthlst", ",", "pthidx" ]
Create a new request with added headers
def with_headers ( self , headers ) : return self . replace ( headers = _merge_maps ( self . headers , headers ) )
6,124
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/http.py#L96-L104
[ "def", "filename_metadata", "(", "filename", ")", ":", "from", ".", ".", "segments", "import", "Segment", "name", "=", "Path", "(", "filename", ")", ".", "name", "try", ":", "obs", ",", "desc", ",", "start", ",", "dur", "=", "name", ".", "split", "(", "'-'", ")", "except", "ValueError", "as", "exc", ":", "exc", ".", "args", "=", "(", "'Failed to parse {!r} as LIGO-T050017-compatible '", "'filename'", ".", "format", "(", "name", ")", ",", ")", "raise", "start", "=", "float", "(", "start", ")", "dur", "=", "dur", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "while", "True", ":", "# recursively remove extension components", "try", ":", "dur", "=", "float", "(", "dur", ")", "except", "ValueError", ":", "if", "'.'", "not", "in", "dur", ":", "raise", "dur", "=", "dur", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "else", ":", "break", "return", "obs", ",", "desc", ",", "Segment", "(", "start", ",", "start", "+", "dur", ")" ]
Create a new request with added query parameters
def with_params ( self , params ) : return self . replace ( params = _merge_maps ( self . params , params ) )
6,125
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/http.py#L116-L124
[ "def", "_get_external_accounts", "(", "self", ",", "locals", ")", ":", "users", "=", "dict", "(", ")", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "\"passwd -S -a\"", ")", "if", "out", "[", "'retcode'", "]", ":", "# System does not supports all accounts descriptions, just skipping.", "return", "users", "status", "=", "{", "'L'", ":", "'Locked'", ",", "'NP'", ":", "'No password'", ",", "'P'", ":", "'Usable password'", ",", "'LK'", ":", "'Locked'", "}", "for", "data", "in", "[", "elm", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "for", "elm", "in", "out", "[", "'stdout'", "]", ".", "split", "(", "os", ".", "linesep", ")", "if", "elm", ".", "strip", "(", ")", "]", ":", "if", "len", "(", "data", ")", "<", "2", ":", "continue", "name", ",", "login", "=", "data", "[", ":", "2", "]", "if", "name", "not", "in", "locals", ":", "users", "[", "name", "]", "=", "{", "'login'", ":", "login", ",", "'status'", ":", "status", ".", "get", "(", "login", ",", "'N/A'", ")", "}", "return", "users" ]
Determines if the n - th bit of passed bytes is 1 or 0 .
def _get_bit ( self , n , hash_bytes ) : if hash_bytes [ n // 8 ] >> int ( 8 - ( ( n % 8 ) + 1 ) ) & 1 == 1 : return True return False
6,126
https://github.com/azaghal/pydenticon/blob/002ad10fd58adedfb465b5ef96eacbe6a595c2ac/pydenticon/__init__.py#L88-L106
[ "def", "__validate_enrollment_periods", "(", "self", ",", "enrollments", ")", ":", "for", "a", ",", "b", "in", "itertools", ".", "combinations", "(", "enrollments", ",", "2", ")", ":", "max_start", "=", "max", "(", "a", ".", "start", ",", "b", ".", "start", ")", "min_end", "=", "min", "(", "a", ".", "end", ",", "b", ".", "end", ")", "if", "max_start", "<", "min_end", ":", "msg", "=", "\"invalid GrimoireLab enrollment dates. \"", "\"Organization dates overlap.\"", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")", "return", "enrollments" ]
Generates matrix that describes which blocks should be coloured .
def _generate_matrix ( self , hash_bytes ) : # Since the identicon needs to be symmetric, we'll need to work on half # the columns (rounded-up), and reflect where necessary. half_columns = self . columns // 2 + self . columns % 2 cells = self . rows * half_columns # Initialise the matrix (list of rows) that will be returned. matrix = [ [ False ] * self . columns for _ in range ( self . rows ) ] # Process the cells one by one. for cell in range ( cells ) : # If the bit from hash correpsonding to this cell is 1, mark the # cell as foreground one. Do not use first byte (since that one is # used for determining the foreground colour. if self . _get_bit ( cell , hash_bytes [ 1 : ] ) : # Determine the cell coordinates in matrix. column = cell // self . columns row = cell % self . rows # Mark the cell and its reflection. Central column may get # marked twice, but we don't care. matrix [ row ] [ column ] = True matrix [ row ] [ self . columns - column - 1 ] = True return matrix
6,127
https://github.com/azaghal/pydenticon/blob/002ad10fd58adedfb465b5ef96eacbe6a595c2ac/pydenticon/__init__.py#L108-L148
[ "def", "unregisterDataItem", "(", "self", ",", "path", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/unregisterItem\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"itempath\"", ":", "path", ",", "\"force\"", ":", "\"true\"", "}", "return", "self", ".", "_post", "(", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Generates an identicon image in requested image format out of the passed block matrix with the requested width height padding foreground colour background colour and image format .
def _generate_image ( self , matrix , width , height , padding , foreground , background , image_format ) : # Set-up a new image object, setting the background to provided value. image = Image . new ( "RGBA" , ( width + padding [ 2 ] + padding [ 3 ] , height + padding [ 0 ] + padding [ 1 ] ) , background ) # Set-up a draw image (for drawing the blocks). draw = ImageDraw . Draw ( image ) # Calculate the block widht and height. block_width = width // self . columns block_height = height // self . rows # Go through all the elements of a matrix, and draw the rectangles. for row , row_columns in enumerate ( matrix ) : for column , cell in enumerate ( row_columns ) : if cell : # Set-up the coordinates for a block. x1 = padding [ 2 ] + column * block_width y1 = padding [ 0 ] + row * block_height x2 = padding [ 2 ] + ( column + 1 ) * block_width - 1 y2 = padding [ 0 ] + ( row + 1 ) * block_height - 1 # Draw the rectangle. draw . rectangle ( ( x1 , y1 , x2 , y2 ) , fill = foreground ) # Set-up a stream where image will be saved. stream = BytesIO ( ) if image_format . upper ( ) == "JPEG" : image = image . convert ( mode = "RGB" ) # Save the image to stream. try : image . save ( stream , format = image_format , optimize = True ) except KeyError : raise ValueError ( "Pillow does not support requested image format: %s" % image_format ) image_raw = stream . getvalue ( ) stream . close ( ) # Return the resulting image. return image_raw
6,128
https://github.com/azaghal/pydenticon/blob/002ad10fd58adedfb465b5ef96eacbe6a595c2ac/pydenticon/__init__.py#L187-L261
[ "def", "aggregate", "(", "self", ",", "val1", ",", "val2", ")", ":", "assert", "val1", "is", "not", "None", "assert", "val2", "is", "not", "None", "return", "self", ".", "_aggregator", "(", "val1", ",", "val2", ")" ]
Generates an identicon image in the ASCII format . The image will just output the matrix used to generate the identicon .
def _generate_ascii ( self , matrix , foreground , background ) : return "\n" . join ( [ "" . join ( [ foreground if cell else background for cell in row ] ) for row in matrix ] )
6,129
https://github.com/azaghal/pydenticon/blob/002ad10fd58adedfb465b5ef96eacbe6a595c2ac/pydenticon/__init__.py#L263-L285
[ "def", "pause", "(", "self", ")", ":", "for", "child", "in", "chain", "(", "self", ".", "consumers", ".", "values", "(", ")", ",", "self", ".", "workers", ")", ":", "child", ".", "pause", "(", ")", "for", "child", "in", "chain", "(", "self", ".", "consumers", ".", "values", "(", ")", ",", "self", ".", "workers", ")", ":", "child", ".", "paused_event", ".", "wait", "(", ")" ]
Add the local timezone to value to make it aware .
def local_timezone ( value ) : if hasattr ( value , "tzinfo" ) and value . tzinfo is None : return value . replace ( tzinfo = dateutil . tz . tzlocal ( ) ) return value
6,130
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/util.py#L37-L41
[ "def", "evergreen", "(", "self", ",", "included_channel_ids", "=", "None", ",", "excluded_channel_ids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "eqs", "=", "self", ".", "search", "(", "*", "*", "kwargs", ")", "eqs", "=", "eqs", ".", "filter", "(", "Evergreen", "(", ")", ")", "if", "included_channel_ids", ":", "eqs", "=", "eqs", ".", "filter", "(", "VideohubChannel", "(", "included_ids", "=", "included_channel_ids", ")", ")", "if", "excluded_channel_ids", ":", "eqs", "=", "eqs", ".", "filter", "(", "VideohubChannel", "(", "excluded_ids", "=", "excluded_channel_ids", ")", ")", "return", "eqs" ]
Convert a PPMP entity to JSON . Additional arguments are the same as accepted by json . dumps .
def dumps ( data , * * kwargs ) : def _encoder ( value ) : if isinstance ( value , datetime . datetime ) : return value . isoformat ( ) if hasattr ( value , "_data" ) : return value . _data raise TypeError ( 'Could not encode %r' % value ) return json . dumps ( data , default = _encoder , * * kwargs )
6,131
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/util.py#L68-L81
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "fileobj", "is", "None", ":", "return", "if", "self", ".", "mode", "==", "WRITE", ":", "self", ".", "close_member", "(", ")", "self", ".", "fileobj", "=", "None", "elif", "self", ".", "mode", "==", "READ", ":", "self", ".", "fileobj", "=", "None", "if", "self", ".", "myfileobj", ":", "self", ".", "myfileobj", ".", "close", "(", ")", "self", ".", "myfileobj", "=", "None" ]
Optional helper method which works out the platform and calls the relevant setup method
def setup_lilypond ( path_to_lilypond_folder = "default" ) : options = { "win32" : setup_lilypond_windows , "darwin" : setup_lilypond_osx } if platform . startswith ( "linux" ) : setup_lilypond_linux ( ) else : options [ platform ] ( path_to_lilypond_folder )
6,132
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Output/helpers.py#L13-L26
[ "def", "listen_error_messages_raylet", "(", "worker", ",", "task_error_queue", ",", "threads_stopped", ")", ":", "worker", ".", "error_message_pubsub_client", "=", "worker", ".", "redis_client", ".", "pubsub", "(", "ignore_subscribe_messages", "=", "True", ")", "# Exports that are published after the call to", "# error_message_pubsub_client.subscribe and before the call to", "# error_message_pubsub_client.listen will still be processed in the loop.", "# Really we should just subscribe to the errors for this specific job.", "# However, currently all errors seem to be published on the same channel.", "error_pubsub_channel", "=", "str", "(", "ray", ".", "gcs_utils", ".", "TablePubsub", ".", "ERROR_INFO", ")", ".", "encode", "(", "\"ascii\"", ")", "worker", ".", "error_message_pubsub_client", ".", "subscribe", "(", "error_pubsub_channel", ")", "# worker.error_message_pubsub_client.psubscribe(\"*\")", "try", ":", "# Get the exports that occurred before the call to subscribe.", "error_messages", "=", "global_state", ".", "error_messages", "(", "worker", ".", "task_driver_id", ")", "for", "error_message", "in", "error_messages", ":", "logger", ".", "error", "(", "error_message", ")", "while", "True", ":", "# Exit if we received a signal that we should stop.", "if", "threads_stopped", ".", "is_set", "(", ")", ":", "return", "msg", "=", "worker", ".", "error_message_pubsub_client", ".", "get_message", "(", ")", "if", "msg", "is", "None", ":", "threads_stopped", ".", "wait", "(", "timeout", "=", "0.01", ")", "continue", "gcs_entry", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "msg", "[", "\"data\"", "]", ",", "0", ")", "assert", "gcs_entry", ".", "EntriesLength", "(", ")", "==", "1", "error_data", "=", "ray", ".", "gcs_utils", ".", "ErrorTableData", ".", "GetRootAsErrorTableData", "(", "gcs_entry", ".", "Entries", "(", "0", ")", ",", "0", ")", "driver_id", "=", "error_data", ".", "DriverId", "(", ")", "if", "driver_id", "not", "in", "[", "worker", ".", "task_driver_id", ".", "binary", "(", ")", ",", "DriverID", ".", "nil", "(", ")", ".", "binary", "(", ")", "]", ":", "continue", "error_message", "=", "ray", ".", "utils", ".", "decode", "(", "error_data", ".", "ErrorMessage", "(", ")", ")", "if", "(", "ray", ".", "utils", ".", "decode", "(", "error_data", ".", "Type", "(", ")", ")", "==", "ray_constants", ".", "TASK_PUSH_ERROR", ")", ":", "# Delay it a bit to see if we can suppress it", "task_error_queue", ".", "put", "(", "(", "error_message", ",", "time", ".", "time", "(", ")", ")", ")", "else", ":", "logger", ".", "error", "(", "error_message", ")", "finally", ":", "# Close the pubsub client to avoid leaking file descriptors.", "worker", ".", "error_message_pubsub_client", ".", "close", "(", ")" ]
Optional helper method which does the environment setup for lilypond in windows . If you ve ran this method you do not need and should not provide a lyscript when you instantiate this class . As this method is static you can run this method before you set up the LilypondRenderer instance .
def setup_lilypond_windows ( path = "default" ) : default = "C:/Program Files (x86)/LilyPond/usr/bin" path_variable = os . environ [ 'PATH' ] . split ( ";" ) if path == "default" : path_variable . append ( default ) else : path_variable . append ( path ) os . environ [ 'PATH' ] = ";" . join ( path_variable )
6,133
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Output/helpers.py#L29-L46
[ "def", "deletecols", "(", "X", ",", "cols", ")", ":", "if", "isinstance", "(", "cols", ",", "str", ")", ":", "cols", "=", "cols", ".", "split", "(", "','", ")", "retain", "=", "[", "n", "for", "n", "in", "X", ".", "dtype", ".", "names", "if", "n", "not", "in", "cols", "]", "if", "len", "(", "retain", ")", ">", "0", ":", "return", "X", "[", "retain", "]", "else", ":", "return", "None" ]
Convert a recursive dict to a plain ol dict .
def recursive_dict_to_dict ( rdict ) : d = { } for ( k , v ) in rdict . items ( ) : if isinstance ( v , defaultdict ) : d [ k ] = recursive_dict_to_dict ( v ) else : d [ k ] = v return d
6,134
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/dicts.py#L17-L26
[ "def", "set_interrupt", "(", "self", ",", "enabled", ")", ":", "enable_reg", "=", "self", ".", "_readU8", "(", "TCS34725_ENABLE", ")", "if", "enabled", ":", "enable_reg", "|=", "TCS34725_ENABLE_AIEN", "else", ":", "enable_reg", "&=", "~", "TCS34725_ENABLE_AIEN", "self", ".", "_write8", "(", "TCS34725_ENABLE", ",", "enable_reg", ")", "time", ".", "sleep", "(", "1", ")" ]
Recursively inspect a dictionary and remove all empty values including empty strings lists and dictionaries .
def scrub_dict ( d ) : if type ( d ) is dict : return dict ( ( k , scrub_dict ( v ) ) for k , v in d . iteritems ( ) if v and scrub_dict ( v ) ) elif type ( d ) is list : return [ scrub_dict ( v ) for v in d if v and scrub_dict ( v ) ] else : return d
6,135
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/dicts.py#L29-L42
[ "def", "not_storable", "(", "_type", ")", ":", "return", "Storable", "(", "_type", ",", "handlers", "=", "StorableHandler", "(", "poke", "=", "fake_poke", ",", "peek", "=", "fail_peek", "(", "_type", ")", ")", ")" ]
Recursively convert the object instance into a valid JSON type .
def _to_json_type ( obj , classkey = None ) : if isinstance ( obj , dict ) : data = { } for ( k , v ) in obj . items ( ) : data [ k ] = _to_json_type ( v , classkey ) return data elif hasattr ( obj , "_ast" ) : return _to_json_type ( obj . _ast ( ) ) elif hasattr ( obj , "__iter__" ) : return [ _to_json_type ( v , classkey ) for v in obj ] elif hasattr ( obj , "__dict__" ) : data = dict ( [ ( key , _to_json_type ( value , classkey ) ) for key , value in obj . __dict__ . iteritems ( ) if not callable ( value ) and not key . startswith ( '_' ) ] ) if classkey is not None and hasattr ( obj , "__class__" ) : data [ classkey ] = obj . __class__ . __name__ return data else : return obj
6,136
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/dicts.py#L45-L67
[ "def", "catFiles", "(", "filesToCat", ",", "catFile", ")", ":", "if", "len", "(", "filesToCat", ")", "==", "0", ":", "#We must handle this case or the cat call will hang waiting for input", "open", "(", "catFile", ",", "'w'", ")", ".", "close", "(", ")", "return", "maxCat", "=", "25", "system", "(", "\"cat %s > %s\"", "%", "(", "\" \"", ".", "join", "(", "filesToCat", "[", ":", "maxCat", "]", ")", ",", "catFile", ")", ")", "filesToCat", "=", "filesToCat", "[", "maxCat", ":", "]", "while", "len", "(", "filesToCat", ")", ">", "0", ":", "system", "(", "\"cat %s >> %s\"", "%", "(", "\" \"", ".", "join", "(", "filesToCat", "[", ":", "maxCat", "]", ")", ",", "catFile", ")", ")", "filesToCat", "=", "filesToCat", "[", "maxCat", ":", "]" ]
Convert an instance of an object into a dict .
def to_dict ( obj ) : d = _to_json_type ( obj ) if isinstance ( d , dict ) : return scrub_dict ( d ) else : raise ValueError ( "The value provided must be an object." )
6,137
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/dicts.py#L70-L77
[ "def", "_calculate_session_expiry", "(", "self", ",", "request", ",", "user_info", ")", ":", "access_token_expiry_timestamp", "=", "self", ".", "_get_access_token_expiry", "(", "request", ")", "id_token_expiry_timestamp", "=", "self", ".", "_get_id_token_expiry", "(", "user_info", ")", "now_in_seconds", "=", "int", "(", "time", ".", "time", "(", ")", ")", "# The session length is set to match whichever token expiration time is closer.", "earliest_expiration_timestamp", "=", "min", "(", "access_token_expiry_timestamp", ",", "id_token_expiry_timestamp", ")", "seconds_until_expiry", "=", "earliest_expiration_timestamp", "-", "now_in_seconds", "if", "seconds_until_expiry", "<=", "0", ":", "raise", "AuthError", "(", "'Session expiry time has already passed!'", ")", "return", "seconds_until_expiry" ]
print normal traceback information with some local arg values
def print_exc_plus ( stream = sys . stdout ) : # code of this mothod is mainly from <Python Cookbook> write = stream . write # assert the mothod exists flush = stream . flush tp , value , tb = sys . exc_info ( ) while tb . tb_next : tb = tb . tb_next stack = list ( ) f = tb . tb_frame while f : stack . append ( f ) f = f . f_back stack . reverse ( ) try : traceback . print_exc ( None , stream ) except BaseException as e : write ( u ( "FAILED PRINTING TRACE\n\n" ) ) write ( u ( str ( value ) ) ) write ( u ( '\n\n' ) ) finally : flush ( ) write ( u ( 'Locals by frame, innermost last\n' ) ) for frame in stack : write ( u ( '\nFrame %s in %s at line %s\n' % ( frame . f_code . co_name , frame . f_code . co_filename , frame . f_lineno ) ) ) for key , value , in frame . f_locals . items ( ) : write ( u ( '\t%20s = ' % key ) ) try : write ( u ( '%s\n' % value ) ) except BaseException : write ( u ( '<ERROR WHILE PRINTING VALUE>\n' ) ) flush ( )
6,138
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/tracemore.py#L22-L56
[ "def", "updateSeriesRegistrationStatus", "(", ")", ":", "from", ".", "models", "import", "Series", "if", "not", "getConstant", "(", "'general__enableCronTasks'", ")", ":", "return", "logger", ".", "info", "(", "'Checking status of Series that are open for registration.'", ")", "open_series", "=", "Series", ".", "objects", ".", "filter", "(", ")", ".", "filter", "(", "*", "*", "{", "'registrationOpen'", ":", "True", "}", ")", "for", "series", "in", "open_series", ":", "series", ".", "updateRegistrationStatus", "(", ")" ]
Revise consecutive empty space to single space .
def format_single_space_only ( text ) : return " " . join ( [ word for word in text . strip ( ) . split ( " " ) if len ( word ) >= 1 ] )
6,139
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/textformatter.py#L26-L37
[ "def", "multivariate_neg_logposterior", "(", "self", ",", "beta", ")", ":", "post", "=", "self", ".", "neg_loglik", "(", "beta", ")", "for", "k", "in", "range", "(", "0", ",", "self", ".", "z_no", ")", ":", "if", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "covariance_prior", "is", "True", ":", "post", "+=", "-", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "logpdf", "(", "self", ".", "custom_covariance", "(", "beta", ")", ")", "break", "else", ":", "post", "+=", "-", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "logpdf", "(", "beta", "[", "k", "]", ")", "return", "post" ]
Capitalize first letter for each words except function words .
def format_title ( text ) : text = text . strip ( ) # if empty string, return "" if len ( text ) == 0 : return text else : text = text . lower ( ) # lower all char # Change to in single space format words = [ word for word in text . strip ( ) . split ( " " ) if len ( word ) >= 1 ] # Capitalize all words except function word words_new = list ( ) for word in words : if word not in FUNCTION_WORD : word = word [ 0 ] . upper ( ) + word [ 1 : ] words_new . append ( word ) # Make sure first word always be capitalized words_new [ 0 ] = words_new [ 0 ] [ 0 ] . upper ( ) + words_new [ 0 ] [ 1 : ] return " " . join ( words_new )
6,140
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/textformatter.py#L40-L71
[ "def", "_parse_document", "(", "document", ":", "Path", ",", "system", ":", "System", "=", "None", ",", "profile", "=", "EProfile", ".", "FULL", ")", ":", "logger", ".", "debug", "(", "'parse document: {0}'", ".", "format", "(", "document", ")", ")", "stream", "=", "FileStream", "(", "str", "(", "document", ")", ",", "encoding", "=", "'utf-8'", ")", "system", "=", "FileSystem", ".", "_parse_stream", "(", "stream", ",", "system", ",", "document", ",", "profile", ")", "FileSystem", ".", "merge_annotations", "(", "system", ",", "document", ".", "stripext", "(", ")", "+", "'.yaml'", ")", "return", "system" ]
Capitalize first letter for each part of the name .
def format_person_name ( text ) : text = text . strip ( ) if len ( text ) == 0 : # if empty string, return it return text else : text = text . lower ( ) # lower all char # delete redundant empty space words = [ word for word in text . strip ( ) . split ( " " ) if len ( word ) >= 1 ] words = [ word [ 0 ] . upper ( ) + word [ 1 : ] for word in words ] return " " . join ( words )
6,141
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/textformatter.py#L74-L93
[ "def", "request", "(", "self", ",", "endpoint", ")", ":", "method", "=", "endpoint", ".", "method", "method", "=", "method", ".", "lower", "(", ")", "params", "=", "None", "try", ":", "params", "=", "getattr", "(", "endpoint", ",", "\"params\"", ")", "except", "AttributeError", ":", "# request does not have params", "params", "=", "{", "}", "headers", "=", "{", "}", "if", "hasattr", "(", "endpoint", ",", "\"HEADERS\"", ")", ":", "headers", "=", "getattr", "(", "endpoint", ",", "\"HEADERS\"", ")", "request_args", "=", "{", "}", "if", "method", "==", "'get'", ":", "request_args", "[", "'params'", "]", "=", "params", "elif", "hasattr", "(", "endpoint", ",", "\"data\"", ")", "and", "endpoint", ".", "data", ":", "request_args", "[", "'json'", "]", "=", "endpoint", ".", "data", "# if any parameter for request then merge them", "request_args", ".", "update", "(", "self", ".", "_request_params", ")", "# which API to access ?", "if", "not", "(", "hasattr", "(", "endpoint", ",", "\"STREAM\"", ")", "and", "getattr", "(", "endpoint", ",", "\"STREAM\"", ")", "is", "True", ")", ":", "url", "=", "\"{}/{}\"", ".", "format", "(", "TRADING_ENVIRONMENTS", "[", "self", ".", "environment", "]", "[", "\"api\"", "]", ",", "endpoint", ")", "response", "=", "self", ".", "__request", "(", "method", ",", "url", ",", "request_args", ",", "headers", "=", "headers", ")", "content", "=", "response", ".", "content", ".", "decode", "(", "'utf-8'", ")", "content", "=", "json", ".", "loads", "(", "content", ")", "# update endpoint", "endpoint", ".", "response", "=", "content", "endpoint", ".", "status_code", "=", "response", ".", "status_code", "return", "content", "else", ":", "url", "=", "\"{}/{}\"", ".", "format", "(", "TRADING_ENVIRONMENTS", "[", "self", ".", "environment", "]", "[", "\"stream\"", "]", ",", "endpoint", ")", "endpoint", ".", "response", "=", "self", ".", "__stream_request", "(", "method", ",", "url", ",", "request_args", ",", "headers", "=", "headers", ")", "return", "endpoint", ".", "response" ]
dump DictTree data to json files .
def dump ( self , path ) : try : with open ( path , "wb" ) as f : f . write ( self . __str__ ( ) . encode ( "utf-8" ) ) except : pass with open ( path , "wb" ) as f : pickle . dump ( self . __data__ , f )
6,142
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L117-L128
[ "def", "rate_limit", "(", "f", ")", ":", "def", "new_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "errors", "=", "0", "while", "True", ":", "resp", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "resp", ".", "status_code", "==", "200", ":", "errors", "=", "0", "return", "resp", "elif", "resp", ".", "status_code", "==", "401", ":", "# Hack to retain the original exception, but augment it with", "# additional context for the user to interpret it. In a Python", "# 3 only future we can raise a new exception of the same type", "# with a new message from the old error.", "try", ":", "resp", ".", "raise_for_status", "(", ")", "except", "requests", ".", "HTTPError", "as", "e", ":", "message", "=", "\"\\nThis is a protected or locked account, or\"", "+", "\" the credentials provided are no longer valid.\"", "e", ".", "args", "=", "(", "e", ".", "args", "[", "0", "]", "+", "message", ",", ")", "+", "e", ".", "args", "[", "1", ":", "]", "log", ".", "warning", "(", "\"401 Authentication required for %s\"", ",", "resp", ".", "url", ")", "raise", "elif", "resp", ".", "status_code", "==", "429", ":", "reset", "=", "int", "(", "resp", ".", "headers", "[", "'x-rate-limit-reset'", "]", ")", "now", "=", "time", ".", "time", "(", ")", "seconds", "=", "reset", "-", "now", "+", "10", "if", "seconds", "<", "1", ":", "seconds", "=", "10", "log", ".", "warning", "(", "\"rate limit exceeded: sleeping %s secs\"", ",", "seconds", ")", "time", ".", "sleep", "(", "seconds", ")", "elif", "resp", ".", "status_code", ">=", "500", ":", "errors", "+=", "1", "if", "errors", ">", "30", ":", "log", ".", "warning", "(", "\"too many errors from Twitter, giving up\"", ")", "resp", ".", "raise_for_status", "(", ")", "seconds", "=", "60", "*", "errors", "log", ".", "warning", "(", "\"%s from Twitter API, sleeping %s\"", ",", "resp", ".", "status_code", ",", "seconds", ")", "time", ".", "sleep", "(", "seconds", ")", "else", ":", "resp", ".", "raise_for_status", "(", ")", "return", "new_f" ]
load DictTree from json files .
def load ( cls , path ) : try : with open ( path , "rb" ) as f : return cls ( __data__ = json . loads ( f . read ( ) . decode ( "utf-8" ) ) ) except : pass with open ( path , "rb" ) as f : return cls ( __data__ = pickle . load ( f ) )
6,143
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L131-L142
[ "def", "delete_datapoint", "(", "self", ",", "datapoint", ")", ":", "datapoint", "=", "validate_type", "(", "datapoint", ",", "DataPoint", ")", "self", ".", "_conn", ".", "delete", "(", "\"/ws/DataPoint/{stream_id}/{datapoint_id}\"", ".", "format", "(", "stream_id", "=", "self", ".", "get_stream_id", "(", ")", ",", "datapoint_id", "=", "datapoint", ".", "get_id", "(", ")", ",", ")", ")" ]
Iterate values .
def values ( self ) : for key , value in self . __data__ . items ( ) : if key not in ( META , KEY ) : yield DictTree ( __data__ = value )
6,144
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L202-L208
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Iterate keys at specified depth .
def keys_at ( self , depth , counter = 1 ) : if depth < 1 : yield ROOT else : if counter == depth : for key in self . keys ( ) : yield key else : counter += 1 for dict_tree in self . values ( ) : for key in dict_tree . keys_at ( depth , counter ) : yield key
6,145
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L219-L233
[ "def", "end_headers", "(", "self", ")", ":", "if", "self", ".", "request_version", "!=", "'HTTP/0.9'", ":", "self", ".", "_headers_buffer", ".", "append", "(", "b\"\\r\\n\"", ")", "self", ".", "flush_headers", "(", ")" ]
Iterate values at specified depth .
def values_at ( self , depth ) : if depth < 1 : yield self else : for dict_tree in self . values ( ) : for value in dict_tree . values_at ( depth - 1 ) : yield value
6,146
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L235-L244
[ "def", "end_headers", "(", "self", ")", ":", "# Send them all at once", "for", "name", ",", "value", "in", "self", ".", "_headers", ".", "items", "(", ")", ":", "self", ".", "_handler", ".", "send_header", "(", "name", ",", "value", ")", "self", ".", "_handler", ".", "end_headers", "(", ")" ]
Iterate items at specified depth .
def items_at ( self , depth ) : if depth < 1 : yield ROOT , self elif depth == 1 : for key , value in self . items ( ) : yield key , value else : for dict_tree in self . values ( ) : for key , value in dict_tree . items_at ( depth - 1 ) : yield key , value
6,147
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L246-L258
[ "def", "end_headers", "(", "self", ")", ":", "if", "self", ".", "request_version", "!=", "'HTTP/0.9'", ":", "self", ".", "_headers_buffer", ".", "append", "(", "b\"\\r\\n\"", ")", "self", ".", "flush_headers", "(", ")" ]
Display the node stats info on specific depth in this dict .
def stats ( self , result = None , counter = 0 ) : if result is None : result = dict ( ) if counter == 0 : if len ( self ) : result [ 0 ] = { "depth" : 0 , "leaf" : 0 , "root" : 1 } else : result [ 0 ] = { "depth" : 0 , "leaf" : 1 , "root" : 0 } counter += 1 if len ( self ) : result . setdefault ( counter , { "depth" : counter , "leaf" : 0 , "root" : 0 } ) for dict_tree in self . values ( ) : if len ( dict_tree ) : # root result [ counter ] [ "root" ] += 1 else : # leaf result [ counter ] [ "leaf" ] += 1 dict_tree . stats ( result , counter ) return [ collections . OrderedDict ( [ ( "depth" , info [ "depth" ] ) , ( "leaf" , info [ "leaf" ] ) , ( "root" , info [ "root" ] ) , ] ) for info in sorted ( result . values ( ) , key = lambda x : x [ "depth" ] ) ]
6,148
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L271-L311
[ "def", "write", "(", "self", ",", "symbol", ",", "data", ")", ":", "# get the full set of date ranges that we have", "cursor", "=", "self", ".", "_collection", ".", "find", "(", ")", "for", "res", "in", "cursor", ":", "library", "=", "self", ".", "_arctic_lib", ".", "arctic", "[", "res", "[", "'library_name'", "]", "]", "dslice", "=", "self", ".", "_slice", "(", "data", ",", "to_dt", "(", "res", "[", "'start'", "]", ",", "mktz", "(", "'UTC'", ")", ")", ",", "to_dt", "(", "res", "[", "'end'", "]", ",", "mktz", "(", "'UTC'", ")", ")", ")", "if", "len", "(", "dslice", ")", "!=", "0", ":", "library", ".", "write", "(", "symbol", ",", "dslice", ")" ]
Puts data to the queue and returns a newly created Task
async def put ( self , data , * , pri = None , ttl = None , ttr = None , delay = None ) : opts = { } if pri is not None : opts [ 'pri' ] = pri if ttl is not None : opts [ 'ttl' ] = ttl if ttr is not None : opts [ 'ttr' ] = ttr if delay is not None : opts [ 'delay' ] = delay args = ( data , opts ) res = await self . conn . call ( self . __funcs [ 'put' ] , args ) return self . _create_task ( res . body )
6,149
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L74-L100
[ "def", "_get_license_description", "(", "license_code", ")", ":", "req", "=", "requests", ".", "get", "(", "\"{base_url}/licenses/{license_code}\"", ".", "format", "(", "base_url", "=", "BASE_URL", ",", "license_code", "=", "license_code", ")", ",", "headers", "=", "_HEADERS", ")", "if", "req", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "s", "=", "req", ".", "json", "(", ")", "[", "\"body\"", "]", "search_curly", "=", "re", ".", "search", "(", "r'\\{(.*)\\}'", ",", "s", ")", "search_square", "=", "re", ".", "search", "(", "r'\\[(.*)\\]'", ",", "s", ")", "license", "=", "\"\"", "replace_string", "=", "'{year} {name}'", ".", "format", "(", "year", "=", "date", ".", "today", "(", ")", ".", "year", ",", "name", "=", "_get_config_name", "(", ")", ")", "if", "search_curly", ":", "license", "=", "re", ".", "sub", "(", "r'\\{(.+)\\}'", ",", "replace_string", ",", "s", ")", "elif", "search_square", ":", "license", "=", "re", ".", "sub", "(", "r'\\[(.+)\\]'", ",", "replace_string", ",", "s", ")", "else", ":", "license", "=", "s", "return", "license", "else", ":", "print", "(", "Fore", ".", "RED", "+", "'No such license. Please check again.'", ")", ",", "print", "(", "Style", ".", "RESET_ALL", ")", ",", "sys", ".", "exit", "(", ")" ]
Takes task from the queue waiting the timeout if specified
async def take ( self , timeout = None ) : args = None if timeout is not None : args = ( timeout , ) res = await self . conn . call ( self . __funcs [ 'take' ] , args ) if len ( res . body ) > 0 : return self . _create_task ( res . body ) return None
6,150
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L102-L116
[ "def", "check_header", "(", "filename", ",", "is_newly_created", "=", "False", ")", ":", "try", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "pyfile", ":", "buf", "=", "\"\"", "for", "lineno", "in", "range", "(", "1", ",", "7", ")", ":", "line", "=", "pyfile", ".", "readline", "(", ")", "# Skip shebang line", "if", "lineno", "==", "1", "and", "line", ".", "startswith", "(", "'#!'", ")", ":", "line", "=", "pyfile", ".", "readline", "(", ")", "# Check if the copyright year can be parsed as within the current century, or the current", "# year if it is a new file.", "if", "line", ".", "startswith", "(", "\"# Copyright\"", ")", ":", "year", "=", "line", "[", "12", ":", "16", "]", "if", "is_newly_created", ":", "if", "not", "year", "==", "_current_year", ":", "raise", "HeaderCheckFailure", "(", "'{}: copyright year must be {} (was {})'", ".", "format", "(", "filename", ",", "_current_year", ",", "year", ")", ")", "else", ":", "if", "not", "_current_century_regex", ".", "match", "(", "year", ")", ":", "raise", "HeaderCheckFailure", "(", "\"{}: copyright year must match '{}' (was {}): current year is {}\"", ".", "format", "(", "filename", ",", "_current_century_regex", ".", "pattern", ",", "year", ",", "_current_year", ")", ")", "line", "=", "\"# Copyright YYYY\"", "+", "line", "[", "16", ":", "]", "buf", "+=", "line", "if", "buf", "!=", "EXPECTED_HEADER", ":", "raise", "HeaderCheckFailure", "(", "'{}: failed to parse header at all'", ".", "format", "(", "filename", ")", ")", "except", "IOError", "as", "e", ":", "raise", "HeaderCheckFailure", "(", "'{}: error while reading input ({})'", ".", "format", "(", "filename", ",", "str", "(", "e", ")", ")", ")" ]
Get task without changing its state
async def peek ( self , task_id ) : args = ( task_id , ) res = await self . conn . call ( self . __funcs [ 'peek' ] , args ) return self . _create_task ( res . body )
6,151
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L156-L166
[ "def", "_error_messages", "(", "self", ",", "driver_id", ")", ":", "assert", "isinstance", "(", "driver_id", ",", "ray", ".", "DriverID", ")", "message", "=", "self", ".", "redis_client", ".", "execute_command", "(", "\"RAY.TABLE_LOOKUP\"", ",", "ray", ".", "gcs_utils", ".", "TablePrefix", ".", "ERROR_INFO", ",", "\"\"", ",", "driver_id", ".", "binary", "(", ")", ")", "# If there are no errors, return early.", "if", "message", "is", "None", ":", "return", "[", "]", "gcs_entries", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "message", ",", "0", ")", "error_messages", "=", "[", "]", "for", "i", "in", "range", "(", "gcs_entries", ".", "EntriesLength", "(", ")", ")", ":", "error_data", "=", "ray", ".", "gcs_utils", ".", "ErrorTableData", ".", "GetRootAsErrorTableData", "(", "gcs_entries", ".", "Entries", "(", "i", ")", ",", "0", ")", "assert", "driver_id", ".", "binary", "(", ")", "==", "error_data", ".", "DriverId", "(", ")", "error_message", "=", "{", "\"type\"", ":", "decode", "(", "error_data", ".", "Type", "(", ")", ")", ",", "\"message\"", ":", "decode", "(", "error_data", ".", "ErrorMessage", "(", ")", ")", ",", "\"timestamp\"", ":", "error_data", ".", "Timestamp", "(", ")", ",", "}", "error_messages", ".", "append", "(", "error_message", ")", "return", "error_messages" ]
Kick count tasks from queue
async def kick ( self , count ) : args = ( count , ) res = await self . conn . call ( self . __funcs [ 'kick' ] , args ) if self . conn . version < ( 1 , 7 ) : return res . body [ 0 ] [ 0 ] return res . body [ 0 ]
6,152
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L190-L201
[ "def", "_get_license_description", "(", "license_code", ")", ":", "req", "=", "requests", ".", "get", "(", "\"{base_url}/licenses/{license_code}\"", ".", "format", "(", "base_url", "=", "BASE_URL", ",", "license_code", "=", "license_code", ")", ",", "headers", "=", "_HEADERS", ")", "if", "req", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "s", "=", "req", ".", "json", "(", ")", "[", "\"body\"", "]", "search_curly", "=", "re", ".", "search", "(", "r'\\{(.*)\\}'", ",", "s", ")", "search_square", "=", "re", ".", "search", "(", "r'\\[(.*)\\]'", ",", "s", ")", "license", "=", "\"\"", "replace_string", "=", "'{year} {name}'", ".", "format", "(", "year", "=", "date", ".", "today", "(", ")", ".", "year", ",", "name", "=", "_get_config_name", "(", ")", ")", "if", "search_curly", ":", "license", "=", "re", ".", "sub", "(", "r'\\{(.+)\\}'", ",", "replace_string", ",", "s", ")", "elif", "search_square", ":", "license", "=", "re", ".", "sub", "(", "r'\\[(.+)\\]'", ",", "replace_string", ",", "s", ")", "else", ":", "license", "=", "s", "return", "license", "else", ":", "print", "(", "Fore", ".", "RED", "+", "'No such license. Please check again.'", ")", ",", "print", "(", "Style", ".", "RESET_ALL", ")", ",", "sys", ".", "exit", "(", ")" ]
parse the response body as JSON raise on errors
def _parse_content ( response ) : if response . status_code != 200 : raise ApiError ( f'unknown error: {response.content.decode()}' ) result = json . loads ( response . content ) if not result [ 'ok' ] : raise ApiError ( f'{result["error"]}: {result.get("detail")}' ) return result
6,153
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/query.py#L19-L26
[ "def", "_init_itemid2name", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ".", "args", ",", "'id2sym'", ")", ":", "return", "None", "fin_id2sym", "=", "self", ".", "args", ".", "id2sym", "if", "fin_id2sym", "is", "not", "None", "and", "os", ".", "path", ".", "exists", "(", "fin_id2sym", ")", ":", "id2sym", "=", "{", "}", "cmpl", "=", "re", ".", "compile", "(", "r'^\\s*(\\S+)[\\s,;]+(\\S+)'", ")", "with", "open", "(", "fin_id2sym", ")", "as", "ifstrm", ":", "for", "line", "in", "ifstrm", ":", "mtch", "=", "cmpl", ".", "search", "(", "line", ")", "if", "mtch", ":", "id2sym", "[", "mtch", ".", "group", "(", "1", ")", "]", "=", "mtch", ".", "group", "(", "2", ")", "return", "id2sym" ]
decorator factory for retrieval queries from query params
def paginated_retrieval ( methodname , itemtype ) : return compose ( reusable , basic_interaction , map_yield ( partial ( _params_as_get , methodname ) ) , )
6,154
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/query.py#L49-L55
[ "def", "host_config", "(", "self", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "default_file_dir", "=", "join", "(", "expanduser", "(", "'~'", ")", ",", "'vent_files'", ")", "else", ":", "default_file_dir", "=", "'/opt/vent_files'", "status", "=", "self", ".", "ensure_dir", "(", "default_file_dir", ")", "if", "not", "isfile", "(", "self", ".", "cfg_file", ")", ":", "config", "=", "Template", "(", "template", "=", "self", ".", "cfg_file", ")", "sections", "=", "{", "'main'", ":", "{", "'files'", ":", "default_file_dir", "}", ",", "'network-mapping'", ":", "{", "}", ",", "'nvidia-docker-plugin'", ":", "{", "'port'", ":", "'3476'", "}", "}", "for", "s", "in", "sections", ":", "if", "sections", "[", "s", "]", ":", "for", "option", "in", "sections", "[", "s", "]", ":", "config", ".", "add_option", "(", "s", ",", "option", ",", "sections", "[", "s", "]", "[", "option", "]", ")", "else", ":", "config", ".", "add_section", "(", "s", ")", "config", ".", "write_config", "(", ")", "return", "status" ]
decorator factory for json POST queries
def json_post ( methodname , rtype , key ) : return compose ( reusable , map_return ( registry ( rtype ) , itemgetter ( key ) ) , basic_interaction , map_yield ( partial ( _json_as_post , methodname ) ) , oneyield , )
6,155
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/query.py#L62-L70
[ "def", "read_frames", "(", "cls", ",", "reader", ")", ":", "rval", "=", "deque", "(", ")", "while", "True", ":", "frame_start_pos", "=", "reader", ".", "tell", "(", ")", "try", ":", "frame", "=", "Frame", ".", "_read_frame", "(", "reader", ")", "except", "Reader", ".", "BufferUnderflow", ":", "# No more data in the stream", "frame", "=", "None", "except", "Reader", ".", "ReaderError", "as", "e", ":", "# Some other format error", "raise", "Frame", ".", "FormatError", ",", "str", "(", "e", ")", ",", "sys", ".", "exc_info", "(", ")", "[", "-", "1", "]", "except", "struct", ".", "error", "as", "e", ":", "raise", "Frame", ".", "FormatError", ",", "str", "(", "e", ")", ",", "sys", ".", "exc_info", "(", ")", "[", "-", "1", "]", "if", "frame", "is", "None", ":", "reader", ".", "seek", "(", "frame_start_pos", ")", "break", "rval", ".", "append", "(", "frame", ")", "return", "rval" ]
Return a ConfigParser object populated from the settings . cfg file .
def _read_config ( cfg_file ) : config = ConfigParser ( ) # maintain case of options config . optionxform = lambda option : option if not os . path . exists ( cfg_file ) : # Create an empty config config . add_section ( _MAIN_SECTION_NAME ) config . add_section ( _ENVIRONMENT_SECTION_NAME ) else : config . read ( cfg_file ) return config
6,156
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L145-L160
[ "def", "weld_aggregate", "(", "array", ",", "weld_type", ",", "operation", ")", ":", "obj_id", ",", "weld_obj", "=", "create_weld_object", "(", "array", ")", "weld_template", "=", "_weld_aggregate_code", "weld_obj", ".", "weld_code", "=", "weld_template", ".", "format", "(", "array", "=", "obj_id", ",", "type", "=", "weld_type", ",", "operation", "=", "operation", ")", "return", "weld_obj" ]
Write a config object to the settings . cfg file .
def _write_config ( config , cfg_file ) : directory = os . path . dirname ( cfg_file ) if not os . path . exists ( directory ) : os . makedirs ( directory ) with open ( cfg_file , "w+" ) as output_file : config . write ( output_file )
6,157
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L163-L173
[ "def", "remove_async_sns_topic", "(", "self", ",", "lambda_name", ")", ":", "topic_name", "=", "get_topic_name", "(", "lambda_name", ")", "removed_arns", "=", "[", "]", "for", "sub", "in", "self", ".", "sns_client", ".", "list_subscriptions", "(", ")", "[", "'Subscriptions'", "]", ":", "if", "topic_name", "in", "sub", "[", "'TopicArn'", "]", ":", "self", ".", "sns_client", ".", "delete_topic", "(", "TopicArn", "=", "sub", "[", "'TopicArn'", "]", ")", "removed_arns", ".", "append", "(", "sub", "[", "'TopicArn'", "]", ")", "return", "removed_arns" ]
Return all environment values from the config files . Values stored in the user configuration file will take precedence over values stored in the system configuration file .
def get_environment ( ) : section = _ENVIRONMENT_SECTION_NAME # Read system sys_cfg = _read_config ( _SYSTEM_CONFIG_FILE ) sys_env = dict ( sys_cfg . items ( section ) ) if sys_cfg . has_section ( section ) else { } # Read user usr_cfg = _read_config ( _USER_CONFIG_FILE ) usr_env = dict ( usr_cfg . items ( section ) ) if usr_cfg . has_section ( section ) else { } # Merge user into system for k in usr_env . keys ( ) : sys_env [ k ] = usr_env [ k ] return sys_env
6,158
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L176-L199
[ "def", "to_weld_vec", "(", "weld_type", ",", "ndim", ")", ":", "for", "i", "in", "range", "(", "ndim", ")", ":", "weld_type", "=", "WeldVec", "(", "weld_type", ")", "return", "weld_type" ]
Set engine environment values in the config file .
def set_environment ( environment , system = False ) : config_filename = _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config ( config_filename ) section = _ENVIRONMENT_SECTION_NAME for key in environment . keys ( ) : config . set ( section , key , environment [ key ] ) _write_config ( config , config_filename )
6,159
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L202-L218
[ "def", "deletecols", "(", "X", ",", "cols", ")", ":", "if", "isinstance", "(", "cols", ",", "str", ")", ":", "cols", "=", "cols", ".", "split", "(", "','", ")", "retain", "=", "[", "n", "for", "n", "in", "X", ".", "dtype", ".", "names", "if", "n", "not", "in", "cols", "]", "if", "len", "(", "retain", ")", ">", "0", ":", "return", "X", "[", "retain", "]", "else", ":", "return", "None" ]
Remove the specified environment setting from the appropriate config file .
def remove_environment ( environment_var_name , system = False ) : config_filename = _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config ( config_filename ) section = _ENVIRONMENT_SECTION_NAME config . remove_option ( section , environment_var_name ) _write_config ( config , config_filename )
6,160
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L221-L235
[ "def", "uunion1d", "(", "arr1", ",", "arr2", ")", ":", "v", "=", "np", ".", "union1d", "(", "arr1", ",", "arr2", ")", "v", "=", "_validate_numpy_wrapper_units", "(", "v", ",", "[", "arr1", ",", "arr2", "]", ")", "return", "v" ]
Returns the value of the specified configuration property . Property values stored in the user configuration file take precedence over values stored in the system configuration file .
def get ( property_name ) : config = _read_config ( _USER_CONFIG_FILE ) section = _MAIN_SECTION_NAME try : property_value = config . get ( section , property_name ) except ( NoOptionError , NoSectionError ) as error : # Try the system config file try : config = _read_config ( _SYSTEM_CONFIG_FILE ) property_value = config . get ( section , property_name ) except ( NoOptionError , NoSectionError ) as error : raise NoConfigOptionError ( error ) return property_value
6,161
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L238-L261
[ "def", "to_weld_vec", "(", "weld_type", ",", "ndim", ")", ":", "for", "i", "in", "range", "(", "ndim", ")", ":", "weld_type", "=", "WeldVec", "(", "weld_type", ")", "return", "weld_type" ]
Sets the configuration property to the specified value .
def set ( property_name , value , system = False ) : config_filename = _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config ( config_filename ) section = _MAIN_SECTION_NAME config . set ( section , property_name , value ) _write_config ( config , config_filename )
6,162
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L264-L279
[ "def", "list_ranges", "(", "self", ",", "share_name", ",", "directory_name", ",", "file_name", ",", "start_range", "=", "None", ",", "end_range", "=", "None", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "_validate_not_none", "(", "'file_name'", ",", "file_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "_get_path", "(", "share_name", ",", "directory_name", ",", "file_name", ")", "request", ".", "query", "=", "[", "(", "'comp'", ",", "'rangelist'", ")", ",", "(", "'timeout'", ",", "_int_to_str", "(", "timeout", ")", ")", ",", "]", "if", "start_range", "is", "not", "None", ":", "_validate_and_format_range_headers", "(", "request", ",", "start_range", ",", "end_range", ",", "start_range_required", "=", "False", ",", "end_range_required", "=", "False", ")", "response", "=", "self", ".", "_perform_request", "(", "request", ")", "return", "_convert_xml_to_ranges", "(", "response", ")" ]
Register a new service with the local consul agent
def register ( self , id , name , address , port = None , tags = None , check = None ) : service = { } service [ 'ID' ] = id service [ 'Name' ] = name service [ 'Address' ] = address if port : service [ 'Port' ] = int ( port ) if tags : service [ 'Tags' ] = tags if check : service [ 'Check' ] = check r = requests . put ( self . url_register , json = service ) if r . status_code != 200 : raise consulRegistrationError ( 'PUT returned {}' . format ( r . status_code ) ) return r
6,163
https://github.com/bigdatacesga/service-discovery/blob/5298d68e4dbe7b23848c95a6f75b9d469fb29e4a/consul.py#L88-L104
[ "def", "get", "(", "self", ",", "id", "=", "None", ",", "name", "=", "None", ")", ":", "# Validate arguments - use an xor", "if", "not", "(", "id", "is", "None", ")", "^", "(", "name", "is", "None", ")", ":", "raise", "ValueError", "(", "\"Either id or name must be set (but not both!)\"", ")", "# If it's just ID provided, call the parent function", "if", "id", "is", "not", "None", ":", "return", "super", "(", "TaskQueueManager", ",", "self", ")", ".", "get", "(", "id", "=", "id", ")", "# Try getting the task queue by name", "return", "self", ".", "list", "(", "filters", "=", "{", "\"name\"", ":", "name", "}", ")", "[", "0", "]" ]
Deregister a service with the local consul agent
def deregister ( self , id ) : r = requests . put ( '{}/{}' . format ( self . url_deregister , id ) ) if r . status_code != 200 : raise consulDeregistrationError ( 'PUT returned {}' . format ( r . status_code ) ) return r
6,164
https://github.com/bigdatacesga/service-discovery/blob/5298d68e4dbe7b23848c95a6f75b9d469fb29e4a/consul.py#L106-L112
[ "def", "get", "(", "self", ",", "id", "=", "None", ",", "name", "=", "None", ")", ":", "# Validate arguments - use an xor", "if", "not", "(", "id", "is", "None", ")", "^", "(", "name", "is", "None", ")", ":", "raise", "ValueError", "(", "\"Either id or name must be set (but not both!)\"", ")", "# If it's just ID provided, call the parent function", "if", "id", "is", "not", "None", ":", "return", "super", "(", "TaskQueueManager", ",", "self", ")", ".", "get", "(", "id", "=", "id", ")", "# Try getting the task queue by name", "return", "self", ".", "list", "(", "filters", "=", "{", "\"name\"", ":", "name", "}", ")", "[", "0", "]" ]
Info about a given service
def info ( self , name ) : r = requests . get ( '{}/{}' . format ( self . url_service , name ) ) return r . json ( )
6,165
https://github.com/bigdatacesga/service-discovery/blob/5298d68e4dbe7b23848c95a6f75b9d469fb29e4a/consul.py#L119-L122
[ "def", "mergeDQarray", "(", "maskname", ",", "dqarr", ")", ":", "maskarr", "=", "None", "if", "maskname", "is", "not", "None", ":", "if", "isinstance", "(", "maskname", ",", "str", ")", ":", "# working with file on disk (default case)", "if", "os", ".", "path", ".", "exists", "(", "maskname", ")", ":", "mask", "=", "fileutil", ".", "openImage", "(", "maskname", ",", "memmap", "=", "False", ")", "maskarr", "=", "mask", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "mask", ".", "close", "(", ")", "else", ":", "if", "isinstance", "(", "maskname", ",", "fits", ".", "HDUList", ")", ":", "# working with a virtual input file", "maskarr", "=", "maskname", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "else", ":", "maskarr", "=", "maskname", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "if", "maskarr", "is", "not", "None", ":", "# merge array with dqarr now", "np", ".", "bitwise_and", "(", "dqarr", ",", "maskarr", ",", "dqarr", ")" ]
star this repo
def star ( self ) -> snug . Query [ bool ] : req = snug . PUT ( BASE + f'/user/starred/{self.owner}/{self.name}' ) return ( yield req ) . status_code == 204
6,166
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/relations.py#L15-L18
[ "def", "serverinfo", "(", "url", "=", "'http://localhost:8080/manager'", ",", "timeout", "=", "180", ")", ":", "data", "=", "_wget", "(", "'serverinfo'", ",", "{", "}", ",", "url", ",", "timeout", "=", "timeout", ")", "if", "data", "[", "'res'", "]", "is", "False", ":", "return", "{", "'error'", ":", "data", "[", "'msg'", "]", "}", "ret", "=", "{", "}", "data", "[", "'msg'", "]", ".", "pop", "(", "0", ")", "for", "line", "in", "data", "[", "'msg'", "]", ":", "tmp", "=", "line", ".", "split", "(", "':'", ")", "ret", "[", "tmp", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "tmp", "[", "1", "]", ".", "strip", "(", ")", "return", "ret" ]
This quickly builds a time - stamped message . If ts is None the current time is used .
def device_message ( device , code , ts = None , origin = None , type = None , severity = None , title = None , description = None , hint = None , * * metaData ) : # pylint: disable=redefined-builtin, too-many-arguments if ts is None : ts = local_now ( ) payload = MessagePayload ( device = device ) payload . messages . append ( Message ( code = code , ts = ts , origin = origin , type = type , severity = severity , title = title , description = description , hint = hint , * * metaData ) ) return dumps ( payload )
6,167
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/message.py#L120-L148
[ "def", "get_all_usb_devices", "(", "idVendor", ",", "idProduct", ")", ":", "all_dev", "=", "list", "(", "usb", ".", "core", ".", "find", "(", "find_all", "=", "True", ",", "idVendor", "=", "idVendor", ",", "idProduct", "=", "idProduct", ")", ")", "for", "dev", "in", "all_dev", ":", "try", ":", "dev", ".", "detach_kernel_driver", "(", "0", ")", "except", "usb", ".", "USBError", ":", "pass", "return", "all_dev" ]
Dump object to file .
def _dump ( obj , abspath , serializer_type , dumper_func = None , compress = True , overwrite = False , verbose = False , * * kwargs ) : _check_serializer_type ( serializer_type ) if not inspect . isfunction ( dumper_func ) : raise TypeError ( "dumper_func has to be a function take object as input " "and return binary!" ) prt_console ( "\nDump to '%s' ..." % abspath , verbose ) if os . path . exists ( abspath ) : if not overwrite : prt_console ( " Stop! File exists and overwrite is not allowed" , verbose , ) return st = time . clock ( ) b_or_str = dumper_func ( obj , * * kwargs ) if serializer_type is "str" : b = b_or_str . encode ( "utf-8" ) else : b = b_or_str if compress : b = zlib . compress ( b ) with atomic_write ( abspath , overwrite = overwrite , mode = "wb" ) as f : f . write ( b ) elapsed = time . clock ( ) - st prt_console ( " Complete! Elapse %.6f sec." % elapsed , verbose ) if serializer_type is "str" : return b_or_str else : return b
6,168
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/obj_file_io.py#L80-L145
[ "def", "AND", "(", "queryArr", ",", "exclude", "=", "None", ")", ":", "assert", "isinstance", "(", "queryArr", ",", "list", ")", ",", "\"provided argument as not a list\"", "assert", "len", "(", "queryArr", ")", ">", "0", ",", "\"queryArr had an empty list\"", "q", "=", "CombinedQuery", "(", ")", "q", ".", "setQueryParam", "(", "\"$and\"", ",", "[", "]", ")", "for", "item", "in", "queryArr", ":", "assert", "isinstance", "(", "item", ",", "(", "CombinedQuery", ",", "BaseQuery", ")", ")", ",", "\"item in the list was not a CombinedQuery or BaseQuery instance\"", "q", ".", "getQuery", "(", ")", "[", "\"$and\"", "]", ".", "append", "(", "item", ".", "getQuery", "(", ")", ")", "if", "exclude", "!=", "None", ":", "assert", "isinstance", "(", "exclude", ",", "(", "CombinedQuery", ",", "BaseQuery", ")", ")", ",", "\"exclude parameter was not a CombinedQuery or BaseQuery instance\"", "q", ".", "setQueryParam", "(", "\"$not\"", ",", "exclude", ".", "getQuery", "(", ")", ")", "return", "q" ]
load object from file .
def _load ( abspath , serializer_type , loader_func = None , decompress = True , verbose = False , * * kwargs ) : _check_serializer_type ( serializer_type ) if not inspect . isfunction ( loader_func ) : raise TypeError ( "loader_func has to be a function take binary as input " "and return an object!" ) prt_console ( "\nLoad from '%s' ..." % abspath , verbose ) if not os . path . exists ( abspath ) : raise ValueError ( "'%s' doesn't exist." % abspath ) st = time . clock ( ) with open ( abspath , "rb" ) as f : b = f . read ( ) if decompress : b = zlib . decompress ( b ) if serializer_type is "str" : obj = loader_func ( b . decode ( "utf-8" ) , * * kwargs ) else : obj = loader_func ( b , * * kwargs ) elapsed = time . clock ( ) - st prt_console ( " Complete! Elapse %.6f sec." % elapsed , verbose ) return obj
6,169
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/obj_file_io.py#L148-L196
[ "def", "defBoundary", "(", "self", ")", ":", "self", ".", "BoroCnstNatAll", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "# Find the natural borrowing constraint conditional on next period's state", "for", "j", "in", "range", "(", "self", ".", "StateCount", ")", ":", "PermShkMinNext", "=", "np", ".", "min", "(", "self", ".", "IncomeDstn_list", "[", "j", "]", "[", "1", "]", ")", "TranShkMinNext", "=", "np", ".", "min", "(", "self", ".", "IncomeDstn_list", "[", "j", "]", "[", "2", "]", ")", "self", ".", "BoroCnstNatAll", "[", "j", "]", "=", "(", "self", ".", "solution_next", ".", "mNrmMin", "[", "j", "]", "-", "TranShkMinNext", ")", "*", "(", "self", ".", "PermGroFac_list", "[", "j", "]", "*", "PermShkMinNext", ")", "/", "self", ".", "Rfree_list", "[", "j", "]", "self", ".", "BoroCnstNat_list", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "self", ".", "mNrmMin_list", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "self", ".", "BoroCnstDependency", "=", "np", ".", "zeros", "(", "(", "self", ".", "StateCount", ",", "self", ".", "StateCount", ")", ")", "+", "np", ".", "nan", "# The natural borrowing constraint in each current state is the *highest*", "# among next-state-conditional natural borrowing constraints that could", "# occur from this current state.", "for", "i", "in", "range", "(", "self", ".", "StateCount", ")", ":", "possible_next_states", "=", "self", ".", "MrkvArray", "[", "i", ",", ":", "]", ">", "0", "self", ".", "BoroCnstNat_list", "[", "i", "]", "=", "np", ".", "max", "(", "self", ".", "BoroCnstNatAll", "[", "possible_next_states", "]", ")", "# Explicitly handle the \"None\" case: ", "if", "self", ".", "BoroCnstArt", "is", "None", ":", "self", ".", "mNrmMin_list", "[", "i", "]", "=", "self", ".", "BoroCnstNat_list", "[", "i", "]", "else", ":", "self", ".", "mNrmMin_list", "[", "i", "]", "=", "np", ".", "max", "(", "[", "self", ".", "BoroCnstNat_list", "[", "i", "]", ",", "self", ".", "BoroCnstArt", "]", ")", "self", ".", "BoroCnstDependency", "[", "i", ",", ":", "]", "=", "self", ".", "BoroCnstNat_list", "[", "i", "]", "==", "self", ".", "BoroCnstNatAll" ]
Helper method for wrapping API requests mainly for catching errors in one place .
def _get_response ( self , method , endpoint , data = None ) : url = urljoin ( IVONA_REGION_ENDPOINTS [ self . region ] , endpoint ) response = getattr ( self . session , method ) ( url , json = data , ) if 'x-amzn-ErrorType' in response . headers : raise IvonaAPIException ( response . headers [ 'x-amzn-ErrorType' ] ) if response . status_code != requests . codes . ok : raise IvonaAPIException ( "Something wrong happened: {}" . format ( response . json ( ) ) ) return response
6,170
https://github.com/Pythonity/python-ivona-api/blob/490a2e502d4aa769b9f41603eb5d5e5ebf1ea912/ivona_api/ivona_api.py#L88-L116
[ "def", "toc", "(", "self", ")", ":", "elapsed", "=", "self", ".", "_time", "(", ")", "-", "self", ".", "tstart", "if", "self", ".", "verbose", ":", "self", ".", "write", "(", "'...toc(%r)=%.4fs\\n'", "%", "(", "self", ".", "label", ",", "elapsed", ")", ")", "self", ".", "flush", "(", ")", "return", "elapsed" ]
Returns a list of available voices via ListVoices endpoint
def get_available_voices ( self , language = None , gender = None ) : endpoint = 'ListVoices' data = dict ( ) if language : data . update ( { 'Voice' : { 'Language' : language } } ) if gender : data . update ( { 'Voice' : { 'Gender' : gender } } ) print ( data ) response = self . _get_response ( 'get' , endpoint , data ) return response . json ( ) [ 'Voices' ]
6,171
https://github.com/Pythonity/python-ivona-api/blob/490a2e502d4aa769b9f41603eb5d5e5ebf1ea912/ivona_api/ivona_api.py#L118-L143
[ "def", "_checksum", "(", "self", ",", "msg", ")", ":", "def", "carry_around_add", "(", "a", ",", "b", ")", ":", "c", "=", "a", "+", "b", "return", "(", "c", "&", "0xffff", ")", "+", "(", "c", ">>", "16", ")", "s", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "msg", ")", ",", "2", ")", ":", "w", "=", "(", "msg", "[", "i", "]", "<<", "8", ")", "+", "msg", "[", "i", "+", "1", "]", "s", "=", "carry_around_add", "(", "s", ",", "w", ")", "s", "=", "~", "s", "&", "0xffff", "return", "s" ]
Saves given text synthesized audio file via CreateSpeech endpoint
def text_to_speech ( self , text , file , voice_name = None , language = None ) : endpoint = 'CreateSpeech' data = { 'Input' : { 'Data' : text , } , 'OutputFormat' : { 'Codec' : self . codec . upper ( ) , } , 'Parameters' : { 'Rate' : self . rate , 'Volume' : self . volume , 'SentenceBreak' : self . sentence_break , 'ParagraphBreak' : self . paragraph_break , } , 'Voice' : { 'Name' : voice_name or self . voice_name , 'Language' : language or self . language , } , } response = self . _get_response ( 'post' , endpoint , data ) file . write ( response . content )
6,172
https://github.com/Pythonity/python-ivona-api/blob/490a2e502d4aa769b9f41603eb5d5e5ebf1ea912/ivona_api/ivona_api.py#L145-L184
[ "def", "deletecols", "(", "X", ",", "cols", ")", ":", "if", "isinstance", "(", "cols", ",", "str", ")", ":", "cols", "=", "cols", ".", "split", "(", "','", ")", "retain", "=", "[", "n", "for", "n", "in", "X", ".", "dtype", ".", "names", "if", "n", "not", "in", "cols", "]", "if", "len", "(", "retain", ")", ">", "0", ":", "return", "X", "[", "retain", "]", "else", ":", "return", "None" ]
Create an instance of ConfigCatClient and setup Auto Poll mode with custom options
def create_client_with_auto_poll ( api_key , poll_interval_seconds = 60 , max_init_wait_time_seconds = 5 , on_configuration_changed_callback = None , config_cache_class = None , base_url = None ) : if api_key is None : raise ConfigCatClientException ( 'API Key is required.' ) if poll_interval_seconds < 1 : poll_interval_seconds = 1 if max_init_wait_time_seconds < 0 : max_init_wait_time_seconds = 0 return ConfigCatClient ( api_key , poll_interval_seconds , max_init_wait_time_seconds , on_configuration_changed_callback , 0 , config_cache_class , base_url )
6,173
https://github.com/configcat/python-sdk/blob/7a893c7958d928276ca02c00d5239987a1acb8d6/configcatclient/__init__.py#L14-L39
[ "def", "_sumterm_prime", "(", "lexer", ")", ":", "tok", "=", "next", "(", "lexer", ")", "# '|' XORTERM SUMTERM'", "if", "isinstance", "(", "tok", ",", "OP_or", ")", ":", "xorterm", "=", "_xorterm", "(", "lexer", ")", "sumterm_prime", "=", "_sumterm_prime", "(", "lexer", ")", "if", "sumterm_prime", "is", "None", ":", "return", "xorterm", "else", ":", "return", "(", "'or'", ",", "xorterm", ",", "sumterm_prime", ")", "# null", "else", ":", "lexer", ".", "unpop_token", "(", "tok", ")", "return", "None" ]
Create an instance of ConfigCatClient and setup Lazy Load mode with custom options
def create_client_with_lazy_load ( api_key , cache_time_to_live_seconds = 60 , config_cache_class = None , base_url = None ) : if api_key is None : raise ConfigCatClientException ( 'API Key is required.' ) if cache_time_to_live_seconds < 1 : cache_time_to_live_seconds = 1 return ConfigCatClient ( api_key , 0 , 0 , None , cache_time_to_live_seconds , config_cache_class , base_url )
6,174
https://github.com/configcat/python-sdk/blob/7a893c7958d928276ca02c00d5239987a1acb8d6/configcatclient/__init__.py#L42-L60
[ "def", "unmatched", "(", "match", ")", ":", "start", ",", "end", "=", "match", ".", "span", "(", "0", ")", "return", "match", ".", "string", "[", ":", "start", "]", "+", "match", ".", "string", "[", "end", ":", "]" ]
Create an instance of ConfigCatClient and setup Manual Poll mode with custom options
def create_client_with_manual_poll ( api_key , config_cache_class = None , base_url = None ) : if api_key is None : raise ConfigCatClientException ( 'API Key is required.' ) return ConfigCatClient ( api_key , 0 , 0 , None , 0 , config_cache_class , base_url )
6,175
https://github.com/configcat/python-sdk/blob/7a893c7958d928276ca02c00d5239987a1acb8d6/configcatclient/__init__.py#L63-L77
[ "def", "_group_chunks_by_entities", "(", "self", ",", "chunks", ",", "entities", ")", ":", "for", "entity", "in", "entities", ":", "chunks_to_concat", "=", "chunks", ".", "get_overlaps", "(", "entity", "[", "'beginOffset'", "]", ",", "len", "(", "entity", "[", "'content'", "]", ")", ")", "if", "not", "chunks_to_concat", ":", "continue", "new_chunk_word", "=", "u''", ".", "join", "(", "[", "chunk", ".", "word", "for", "chunk", "in", "chunks_to_concat", "]", ")", "new_chunk", "=", "Chunk", "(", "new_chunk_word", ")", "chunks", ".", "swap", "(", "chunks_to_concat", ",", "new_chunk", ")", "return", "chunks" ]
decorator factory for NS queries
def basic_query ( returns ) : return compose ( reusable , map_send ( parse_request ) , map_yield ( prepare_params , snug . prefix_adder ( API_PREFIX ) ) , map_return ( loads ( returns ) ) , oneyield , )
6,176
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/ns/query.py#L43-L51
[ "def", "read_file", "(", "file_path", ")", ":", "with", "open", "(", "file_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "whole", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "if", "whole", ".", "startswith", "(", "'---'", ")", ":", "# may has yaml meta info, so we try to split it out", "sp", "=", "re", ".", "split", "(", "r'-{3,}'", ",", "whole", ".", "lstrip", "(", "'-'", ")", ",", "maxsplit", "=", "1", ")", "if", "len", "(", "sp", ")", "==", "2", ":", "# do have yaml meta info, so we read it", "return", "yaml", ".", "load", "(", "sp", "[", "0", "]", ")", ",", "sp", "[", "1", "]", ".", "lstrip", "(", ")", "return", "{", "}", ",", "whole" ]
departures for a station
def departures ( station : str ) -> snug . Query [ t . List [ Departure ] ] : return snug . GET ( 'avt' , params = { 'station' : station } )
6,177
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/ns/query.py#L61-L63
[ "def", "bam_conversion", "(", "job", ",", "samfile", ",", "sample_type", ",", "univ_options", ",", "samtools_options", ")", ":", "work_dir", "=", "os", ".", "getcwd", "(", ")", "input_files", "=", "{", "sample_type", "+", "'.sam'", ":", "samfile", "}", "input_files", "=", "get_files_from_filestore", "(", "job", ",", "input_files", ",", "work_dir", ",", "docker", "=", "True", ")", "bamfile", "=", "'/'", ".", "join", "(", "[", "work_dir", ",", "sample_type", "+", "'.bam'", "]", ")", "parameters", "=", "[", "'view'", ",", "'-bS'", ",", "'-o'", ",", "docker_path", "(", "bamfile", ")", ",", "input_files", "[", "sample_type", "+", "'.sam'", "]", "]", "docker_call", "(", "tool", "=", "'samtools'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "work_dir", ",", "dockerhub", "=", "univ_options", "[", "'dockerhub'", "]", ",", "tool_version", "=", "samtools_options", "[", "'version'", "]", ")", "output_file", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "bamfile", ")", "# The samfile is no longer useful so delete it", "job", ".", "fileStore", ".", "deleteGlobalFile", "(", "samfile", ")", "job", ".", "fileStore", ".", "logToMaster", "(", "'Ran sam2bam on %s:%s successfully'", "%", "(", "univ_options", "[", "'patient'", "]", ",", "sample_type", ")", ")", "return", "output_file" ]
journey recommendations from an origin to a destination station
def journey_options ( origin : str , destination : str , via : t . Optional [ str ] = None , before : t . Optional [ int ] = None , after : t . Optional [ int ] = None , time : t . Optional [ datetime ] = None , hsl : t . Optional [ bool ] = None , year_card : t . Optional [ bool ] = None ) -> ( snug . Query [ t . List [ Journey ] ] ) : return snug . GET ( 'treinplanner' , params = { 'fromStation' : origin , 'toStation' : destination , 'viaStation' : via , 'previousAdvices' : before , 'nextAdvices' : after , 'dateTime' : time , 'hslAllowed' : hsl , 'yearCard' : year_card , } )
6,178
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/ns/query.py#L67-L86
[ "def", "_filters_pb", "(", "self", ")", ":", "num_filters", "=", "len", "(", "self", ".", "_field_filters", ")", "if", "num_filters", "==", "0", ":", "return", "None", "elif", "num_filters", "==", "1", ":", "return", "_filter_pb", "(", "self", ".", "_field_filters", "[", "0", "]", ")", "else", ":", "composite_filter", "=", "query_pb2", ".", "StructuredQuery", ".", "CompositeFilter", "(", "op", "=", "enums", ".", "StructuredQuery", ".", "CompositeFilter", ".", "Operator", ".", "AND", ",", "filters", "=", "[", "_filter_pb", "(", "filter_", ")", "for", "filter_", "in", "self", ".", "_field_filters", "]", ",", ")", "return", "query_pb2", ".", "StructuredQuery", ".", "Filter", "(", "composite_filter", "=", "composite_filter", ")" ]
Generate fixed - length random string from your allowed character pool .
def rand_str ( length , allowed = CHARSET_ALPHA_DIGITS ) : res = list ( ) for _ in range ( length ) : res . append ( random . choice ( allowed ) ) return "" . join ( res )
6,179
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L25-L40
[ "def", "_ParseKeysFromFindSpecs", "(", "self", ",", "parser_mediator", ",", "win_registry", ",", "find_specs", ")", ":", "searcher", "=", "dfwinreg_registry_searcher", ".", "WinRegistrySearcher", "(", "win_registry", ")", "for", "registry_key_path", "in", "iter", "(", "searcher", ".", "Find", "(", "find_specs", "=", "find_specs", ")", ")", ":", "if", "parser_mediator", ".", "abort", ":", "break", "registry_key", "=", "searcher", ".", "GetKeyByPath", "(", "registry_key_path", ")", "self", ".", "_ParseKey", "(", "parser_mediator", ",", "registry_key", ")" ]
Gererate fixed - length random hexstring usually for md5 .
def rand_hexstr ( length , lower = True ) : if lower : return rand_str ( length , allowed = CHARSET_HEXSTR_LOWER ) else : return rand_str ( length , allowed = CHARSET_HEXSTR_UPPER )
6,180
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L43-L52
[ "def", "remove_selection", "(", "self", ",", "sel", ")", ":", "self", ".", "_selections", ".", "remove", "(", "sel", ")", "# <artist>.figure will be unset so we save them first.", "figures", "=", "{", "artist", ".", "figure", "for", "artist", "in", "[", "sel", ".", "annotation", "]", "+", "sel", ".", "extras", "}", "# ValueError is raised if the artist has already been removed.", "with", "suppress", "(", "ValueError", ")", ":", "sel", ".", "annotation", ".", "remove", "(", ")", "for", "artist", "in", "sel", ".", "extras", ":", "with", "suppress", "(", "ValueError", ")", ":", "artist", ".", "remove", "(", ")", "for", "cb", "in", "self", ".", "_callbacks", "[", "\"remove\"", "]", ":", "cb", "(", "sel", ")", "for", "figure", "in", "figures", ":", "figure", ".", "canvas", ".", "draw_idle", "(", ")" ]
Generate fixed - length random alpha only string .
def rand_alphastr ( length , lower = True , upper = True ) : if lower is True and upper is True : return rand_str ( length , allowed = string . ascii_letters ) if lower is True and upper is False : return rand_str ( length , allowed = string . ascii_lowercase ) if lower is False and upper is True : return rand_str ( length , allowed = string . ascii_uppercase ) else : raise Exception
6,181
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L55-L65
[ "def", "terminate", "(", "self", ",", "devices", ")", ":", "for", "device", "in", "devices", ":", "self", ".", "logger", ".", "info", "(", "'Terminating: %s'", ",", "device", ".", "id", ")", "try", ":", "device", ".", "delete", "(", ")", "except", "packet", ".", "baseapi", ".", "Error", ":", "raise", "PacketManagerException", "(", "'Unable to terminate instance \"{}\"'", ".", "format", "(", "device", ".", "id", ")", ")" ]
Random article text .
def rand_article ( num_p = ( 4 , 10 ) , num_s = ( 2 , 15 ) , num_w = ( 5 , 40 ) ) : article = list ( ) for _ in range ( random . randint ( * num_p ) ) : p = list ( ) for _ in range ( random . randint ( * num_s ) ) : s = list ( ) for _ in range ( random . randint ( * num_w ) ) : s . append ( rand_str ( random . randint ( 1 , 15 ) , string . ascii_lowercase ) ) p . append ( " " . join ( s ) ) article . append ( ". " . join ( p ) ) return "\n\n" . join ( article )
6,182
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L120-L138
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
this method resolves dependencies for the given key . call the method afther the item key was added to the list of avalable items
def _resolve_dep ( self , key ) : if key in self . future_values_key_dep : # there are some dependencies that can be resoled dep_list = self . future_values_key_dep [ key ] del self . future_values_key_dep [ key ] # remove dependencies also_finish = [ ] # iterate over the dependencies that can now be resoled for dep in dep_list : if self . __resolve_dep_helper ( dep , key ) is True : also_finish . append ( dep ) # maybe the resolving process leed to new deps that can be resolved for dep in also_finish : self . _resolve_dep ( dep )
6,183
https://github.com/JanHendrikDolling/configvalidator/blob/efde23a9352ae1fd6702b04ad964783ce11cbca5/configvalidator/tools/parser.py#L132-L148
[ "def", "setup_main_logger", "(", "file_logging", "=", "True", ",", "console", "=", "True", ",", "path", ":", "Optional", "[", "str", "]", "=", "None", ",", "level", "=", "logging", ".", "INFO", ")", ":", "if", "file_logging", "and", "console", ":", "log_config", "=", "LOGGING_CONFIGS", "[", "\"file_console\"", "]", "# type: ignore", "elif", "file_logging", ":", "log_config", "=", "LOGGING_CONFIGS", "[", "\"file_only\"", "]", "elif", "console", ":", "log_config", "=", "LOGGING_CONFIGS", "[", "\"console_only\"", "]", "else", ":", "log_config", "=", "LOGGING_CONFIGS", "[", "\"none\"", "]", "if", "path", ":", "log_config", "[", "\"handlers\"", "]", "[", "\"rotating\"", "]", "[", "\"filename\"", "]", "=", "path", "# type: ignore", "for", "_", ",", "handler_config", "in", "log_config", "[", "'handlers'", "]", ".", "items", "(", ")", ":", "# type: ignore", "handler_config", "[", "'level'", "]", "=", "level", "logging", ".", "config", ".", "dictConfig", "(", "log_config", ")", "# type: ignore", "def", "exception_hook", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", ":", "if", "is_python34", "(", ")", ":", "# Python3.4 does not seem to handle logger.exception() well", "import", "traceback", "traceback", "=", "\"\"", ".", "join", "(", "traceback", ".", "format_tb", "(", "exc_traceback", ")", ")", "+", "exc_type", ".", "name", "logging", ".", "error", "(", "\"Uncaught exception\\n%s\"", ",", "traceback", ")", "else", ":", "logging", ".", "exception", "(", "\"Uncaught exception\"", ",", "exc_info", "=", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", ")", "sys", ".", "excepthook", "=", "exception_hook" ]
get al list of all dependencies for the given item dep
def _get_all_refs ( self , dep , handled_refs = None ) : if handled_refs is None : handled_refs = [ dep ] else : if dep in handled_refs : return [ ] res = [ ] if dep in self . future_values_key_item : res . extend ( self . future_values_key_item [ dep ] [ "dependencies" ] . values ( ) ) add = [ ] for h_d in res : add . extend ( self . _get_all_refs ( h_d , handled_refs ) ) res . extend ( add ) return list ( set ( res ) )
6,184
https://github.com/JanHendrikDolling/configvalidator/blob/efde23a9352ae1fd6702b04ad964783ce11cbca5/configvalidator/tools/parser.py#L189-L206
[ "def", "point_stokes", "(", "self", ",", "context", ")", ":", "stokes", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "stokes", "[", ":", ",", ":", ",", "0", "]", "=", "1", "stokes", "[", ":", ",", ":", ",", "1", ":", "4", "]", "=", "0", "return", "stokes" ]
check for errors
def parse ( response ) : if response . status_code == 400 : try : msg = json . loads ( response . content ) [ 'message' ] except ( KeyError , ValueError ) : msg = '' raise ApiError ( msg ) return response
6,185
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/github/query.py#L68-L76
[ "def", "_read_single_point_data", "(", "self", ")", ":", "temp_dict", "=", "read_pattern", "(", "self", ".", "text", ",", "{", "\"final_energy\"", ":", "r\"\\s*SCF\\s+energy in the final basis set\\s+=\\s*([\\d\\-\\.]+)\"", "}", ")", "if", "temp_dict", ".", "get", "(", "'final_energy'", ")", "==", "None", ":", "self", ".", "data", "[", "'final_energy'", "]", "=", "None", "else", ":", "# -1 in case of pcm", "# Two lines will match the above; we want final calculation", "self", ".", "data", "[", "'final_energy'", "]", "=", "float", "(", "temp_dict", ".", "get", "(", "'final_energy'", ")", "[", "-", "1", "]", "[", "0", "]", ")" ]
Initiate a connection to a specific device .
def create ( cls , host = 'localhost' , port = 14999 , auto_reconnect = True , loop = None , protocol_class = AVR , update_callback = None ) : assert port >= 0 , 'Invalid port value: %r' % ( port ) conn = cls ( ) conn . host = host conn . port = port conn . _loop = loop or asyncio . get_event_loop ( ) conn . _retry_interval = 1 conn . _closed = False conn . _closing = False conn . _halted = False conn . _auto_reconnect = auto_reconnect def connection_lost ( ) : """Function callback for Protocoal class when connection is lost.""" if conn . _auto_reconnect and not conn . _closing : ensure_future ( conn . _reconnect ( ) , loop = conn . _loop ) conn . protocol = protocol_class ( connection_lost_callback = connection_lost , loop = conn . _loop , update_callback = update_callback ) yield from conn . _reconnect ( ) return conn
6,186
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/connection.py#L23-L76
[ "def", "get_info", "(", "self", ")", ":", "self", ".", "render", "(", ")", "info", "=", "super", "(", "Template", ",", "self", ")", ".", "get_info", "(", ")", "res", "=", "{", "}", "res", "[", "'name'", "]", "=", "self", ".", "get_name", "(", ")", "res", "[", "'mutation'", "]", "=", "{", "'current_index'", ":", "self", ".", "_current_index", ",", "'total_number'", ":", "self", ".", "num_mutations", "(", ")", "}", "res", "[", "'value'", "]", "=", "{", "'rendered'", ":", "{", "'base64'", ":", "b64encode", "(", "self", ".", "_current_rendered", ".", "tobytes", "(", ")", ")", ".", "decode", "(", ")", ",", "'length_in_bytes'", ":", "len", "(", "self", ".", "_current_rendered", ".", "tobytes", "(", ")", ")", ",", "}", "}", "res", "[", "'hash'", "]", "=", "self", ".", "hash", "(", ")", "res", "[", "'field'", "]", "=", "info", "return", "res" ]
Close the AVR device connection and don t try to reconnect .
def close ( self ) : self . log . warning ( 'Closing connection to AVR' ) self . _closing = True if self . protocol . transport : self . protocol . transport . close ( )
6,187
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/connection.py#L117-L122
[ "def", "apply_actions", "(", "self", ",", "actions", ")", ":", "modified", "=", "[", "]", "for", "a", "in", "actions", ":", "if", "\"dict\"", "in", "a", ":", "k", "=", "a", "[", "\"dict\"", "]", "modified", ".", "append", "(", "k", ")", "self", ".", "feffinp", "[", "k", "]", "=", "self", ".", "modify_object", "(", "a", "[", "\"action\"", "]", ",", "self", ".", "feffinp", "[", "k", "]", ")", "elif", "\"file\"", "in", "a", ":", "self", ".", "modify", "(", "a", "[", "\"action\"", "]", ",", "a", "[", "\"file\"", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Unrecognized format: {}\"", ".", "format", "(", "a", ")", ")", "if", "modified", ":", "feff", "=", "self", ".", "feffinp", "feff_input", "=", "\"\\n\\n\"", ".", "join", "(", "str", "(", "feff", "[", "k", "]", ")", "for", "k", "in", "[", "\"HEADER\"", ",", "\"PARAMETERS\"", ",", "\"POTENTIALS\"", ",", "\"ATOMS\"", "]", "if", "k", "in", "feff", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "feff", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "'.'", ",", "k", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "v", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "'.'", ",", "\"feff.inp\"", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "feff_input", ")" ]
Compress object to bytes .
def _compress_obj ( obj , level ) : return zlib . compress ( pickle . dumps ( obj , protocol = 2 ) , level )
6,188
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/ziplib.py#L34-L37
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Compress anything to bytes or string .
def compress ( obj , level = 6 , return_type = "bytes" ) : if isinstance ( obj , binary_type ) : b = _compress_bytes ( obj , level ) elif isinstance ( obj , string_types ) : b = _compress_str ( obj , level ) else : b = _compress_obj ( obj , level ) if return_type == "bytes" : return b elif return_type == "str" : return base64 . b64encode ( b ) . decode ( "utf-8" ) else : raise ValueError ( "'return_type' has to be one of 'bytes', 'str'!" )
6,189
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/ziplib.py#L52-L73
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
De - compress it to it s original .
def decompress ( obj , return_type = "bytes" ) : if isinstance ( obj , binary_type ) : b = zlib . decompress ( obj ) elif isinstance ( obj , string_types ) : b = zlib . decompress ( base64 . b64decode ( obj . encode ( "utf-8" ) ) ) else : raise TypeError ( "input cannot be anything other than str and bytes!" ) if return_type == "bytes" : return b elif return_type == "str" : return b . decode ( "utf-8" ) elif return_type == "obj" : return pickle . loads ( b ) else : raise ValueError ( "'return_type' has to be one of 'bytes', 'str' or 'obj'!" )
6,190
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/ziplib.py#L76-L99
[ "def", "read_td_job", "(", "job_id", ",", "engine", ",", "index_col", "=", "None", ",", "parse_dates", "=", "None", ")", ":", "# get job", "job", "=", "engine", ".", "connection", ".", "client", ".", "job", "(", "job_id", ")", "# result", "r", "=", "engine", ".", "get_result", "(", "job", ",", "wait", "=", "True", ")", "return", "r", ".", "to_dataframe", "(", "index_col", "=", "index_col", ",", "parse_dates", "=", "parse_dates", ")" ]
Build the Signature template for use with the Authorization header .
def build_signature_template ( key_id , algorithm , headers ) : param_map = { 'keyId' : key_id , 'algorithm' : algorithm , 'signature' : '%s' } if headers : headers = [ h . lower ( ) for h in headers ] param_map [ 'headers' ] = ' ' . join ( headers ) kv = map ( '{0[0]}="{0[1]}"' . format , param_map . items ( ) ) kv_string = ',' . join ( kv ) sig_string = 'Signature {0}' . format ( kv_string ) return sig_string
6,191
https://github.com/PSPC-SPAC-buyandsell/didauth/blob/e242fff8eddebf6ed52a65b161a229cdfbf5226e/didauth/utils.py#L115-L135
[ "def", "on_exception", "(", "self", ",", "exception", ")", ":", "logger", ".", "error", "(", "'Exception from stream!'", ",", "exc_info", "=", "True", ")", "self", ".", "streaming_exception", "=", "exception" ]
Feed data into database .
def train ( self , data , key_id , key_lat , key_lng , clear_old = True ) : engine , t_point = self . engine , self . t_point if clear_old : try : t_point . drop ( engine ) except : pass t_point . create ( engine ) table_data = list ( ) for record in data : id = key_id ( record ) lat = key_lat ( record ) lng = key_lng ( record ) row = { "id" : id , "lat" : lat , "lng" : lng , "data" : record } table_data . append ( row ) ins = t_point . insert ( ) engine . execute ( ins , table_data ) index = Index ( 'idx_lat_lng' , t_point . c . lat , t_point . c . lng ) index . create ( engine )
6,192
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/geo_search.py#L55-L95
[ "def", "create_alarm_subscription", "(", "self", ",", "on_data", "=", "None", ",", "timeout", "=", "60", ")", ":", "manager", "=", "WebSocketSubscriptionManager", "(", "self", ".", "_client", ",", "resource", "=", "'alarms'", ")", "# Represent subscription as a future", "subscription", "=", "AlarmSubscription", "(", "manager", ")", "wrapped_callback", "=", "functools", ".", "partial", "(", "_wrap_callback_parse_alarm_data", ",", "subscription", ",", "on_data", ")", "manager", ".", "open", "(", "wrapped_callback", ",", "instance", "=", "self", ".", "_instance", ",", "processor", "=", "self", ".", "_processor", ")", "# Wait until a reply or exception is received", "subscription", ".", "reply", "(", "timeout", "=", "timeout", ")", "return", "subscription" ]
Find n nearest point within certain distance from a point .
def find_n_nearest ( self , lat , lng , n = 5 , radius = None ) : engine , t_point = self . engine , self . t_point if radius : # Use a simple box filter to minimize candidates # Define latitude longitude boundary dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos ( lat ) * 69.172 lat_degr_rad = abs ( radius * 1.05 / dist_btwn_lat_deg ) lon_degr_rad = abs ( radius * 1.05 / dist_btwn_lon_deg ) lat_lower = lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng + lon_degr_rad filters = [ t_point . c . lat >= lat_lower , t_point . c . lat <= lat_upper , t_point . c . lat >= lng_lower , t_point . c . lat >= lng_upper , ] else : radius = 999999.9 filters = [ ] s = select ( [ t_point ] ) . where ( and_ ( * filters ) ) heap = list ( ) for row in engine . execute ( s ) : dist = great_circle ( ( lat , lng ) , ( row . lat , row . lng ) ) if dist <= radius : heap . append ( ( dist , row . data ) ) # Use heap sort to find top-K nearest n_nearest = heapq . nsmallest ( n , heap , key = lambda x : x [ 0 ] ) return n_nearest
6,193
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/geo_search.py#L97-L141
[ "def", "load_config", "(", "logdir", ")", ":", "# pylint: disable=missing-raises-doc", "config_path", "=", "logdir", "and", "os", ".", "path", ".", "join", "(", "logdir", ",", "'config.yaml'", ")", "if", "not", "config_path", "or", "not", "tf", ".", "gfile", ".", "Exists", "(", "config_path", ")", ":", "message", "=", "(", "'Cannot resume an existing run since the logging directory does not '", "'contain a configuration file.'", ")", "raise", "IOError", "(", "message", ")", "with", "tf", ".", "gfile", ".", "FastGFile", "(", "config_path", ",", "'r'", ")", "as", "file_", ":", "config", "=", "yaml", ".", "load", "(", "file_", ",", "Loader", "=", "yaml", ".", "Loader", ")", "message", "=", "'Resume run and write summaries and checkpoints to {}.'", "tf", ".", "logging", ".", "info", "(", "message", ".", "format", "(", "config", ".", "logdir", ")", ")", "return", "config" ]
this method is especially troublesome i do not reccommend making any changes to it you may notice it uplicates code fro smappdragon there is no way around this as far as i can tell it really might screw up a lot of stuff stip tweets has been purposely omitted as it isnt supported in pysmap
def sample ( self , k ) : def new_get_iterators ( ) : tweet_parser = smappdragon . TweetParser ( ) it = iter ( self . get_collection_iterators ( ) ) sample = list ( itertools . islice ( it , k ) ) random . shuffle ( sample ) for i , item in enumerate ( it , start = k + 1 ) : j = random . randrange ( i ) if j < k : sample [ j ] = item for tweet in sample : if all ( [ collection . limit != 0 and collection . limit <= count for collection in self . collections ] ) : return elif all ( [ tweet_parser . tweet_passes_filter ( collection . filter , tweet ) and tweet_parser . tweet_passes_custom_filter_list ( collection . custom_filters , tweet ) for collection in self . collections ] ) : yield tweet cp = copy . deepcopy ( self ) cp . get_collection_iterators = new_get_iterators return cp
6,194
https://github.com/SMAPPNYU/pysmap/blob/eb871992f40c53125129535e871525d5623c8c2d/pysmap/twitterutil/smapp_dataset.py#L451-L478
[ "def", "wait", "(", "self", ")", ":", "self", ".", "_done_event", ".", "wait", "(", "MAXINT", ")", "return", "self", ".", "_status", ",", "self", ".", "_exception" ]
Merge networks into a larger network .
def merge_networks ( output_file = "merged_network.txt" , * files ) : contacts = dict ( ) for network_file in files : with open ( network_file ) as network_file_handle : for line in network_file_handle : id_a , id_b , n_contacts = line . split ( "\t" ) pair = sorted ( ( id_a , id_b ) ) try : contacts [ pair ] += n_contacts except KeyError : contacts [ pair ] = n_contacts sorted_contacts = sorted ( contacts ) with open ( output_file , "w" ) as output_handle : for index_pair in sorted_contacts : id_a , id_b = index_pair n_contacts = contacts [ index_pair ] output_handle . write ( "{}\t{}\t{}\n" . format ( id_a , id_b , n_contacts ) )
6,195
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/network.py#L351-L388
[ "def", "filter", "(", "self", ",", "predicate", ")", "->", "'Observable'", ":", "source", "=", "self", "def", "subscribe", "(", "on_next", ")", ":", "def", "_next", "(", "x", ")", ":", "if", "predicate", "(", "x", ")", ":", "on_next", "(", "x", ")", "return", "source", ".", "subscribe", "(", "_next", ")", "return", "Observable", "(", "subscribe", ")" ]
Merge chunk data from different networks
def merge_chunk_data ( output_file = "merged_idx_contig_hit_size_cov.txt" , * files ) : chunks = dict ( ) for chunk_file in files : with open ( chunk_file ) as chunk_file_handle : for line in chunk_file_handle : chunk_id , chunk_name , hit , size , cov = line . split ( "\t" ) try : chunks [ chunk_id ] [ "hit" ] += hit chunks [ chunk_id ] [ "cov" ] += cov except KeyError : chunks [ chunk_id ] = { "name" : chunk_name , "hit" : hit , "size" : size , "cov" : cov , } sorted_chunks = sorted ( chunks ) with open ( output_file , "w" ) as output_handle : for chunk_id in sorted_chunks : my_chunk = chunks [ chunk_id ] name , hit , size , cov = ( my_chunk [ "name" ] , my_chunk [ "hit" ] , my_chunk [ "size" ] , my_chunk [ "cov" ] , ) my_line = "{}\t{}\t{}\t{}\t{}" . format ( chunk_id , name , hit , size , cov ) output_handle . write ( my_line )
6,196
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/network.py#L391-L435
[ "def", "trim_docstring", "(", "docstring", ")", ":", "if", "not", "docstring", ":", "return", "''", "# If you've got a line longer than this you have other problems...", "max_indent", "=", "1", "<<", "29", "# Convert tabs to spaces (following the normal Python rules)", "# and split into a list of lines:", "lines", "=", "docstring", ".", "expandtabs", "(", ")", ".", "splitlines", "(", ")", "# Determine minimum indentation (first line doesn't count):", "indent", "=", "max_indent", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "stripped", "=", "line", ".", "lstrip", "(", ")", "if", "stripped", ":", "indent", "=", "min", "(", "indent", ",", "len", "(", "line", ")", "-", "len", "(", "stripped", ")", ")", "# Remove indentation (first line is special):", "trimmed", "=", "[", "lines", "[", "0", "]", ".", "strip", "(", ")", "]", "if", "indent", "<", "max_indent", ":", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "trimmed", ".", "append", "(", "line", "[", "indent", ":", "]", ".", "rstrip", "(", ")", ")", "# Strip off trailing and leading blank lines:", "while", "trimmed", "and", "not", "trimmed", "[", "-", "1", "]", ":", "trimmed", ".", "pop", "(", ")", "while", "trimmed", "and", "not", "trimmed", "[", "0", "]", ":", "trimmed", ".", "pop", "(", "0", ")", "# Return a single string:", "return", "'\\n'", ".", "join", "(", "trimmed", ")" ]
Generate reads from ambiguous alignment file
def alignment_to_reads ( sam_merged , output_dir , parameters = DEFAULT_PARAMETERS , save_memory = True , * bin_fasta ) : # Just in case file objects are sent as input def get_file_string ( file_thing ) : try : file_string = file_thing . name except AttributeError : file_string = str ( file_thing ) return file_string # Global set of chunks against which reads are required to # map - we store them in a tuple that keeps track of the # original bin each chunk came from so we can reattribute the reads later bin_chunks = set ( ) for bin_file in bin_fasta : for record in SeqIO . parse ( bin_file , "fasta" ) : bin_chunks . add ( ( get_file_string ( bin_file ) , record . id ) ) chunk_size = int ( parameters [ "chunk_size" ] ) mapq_threshold = int ( parameters [ "mapq_threshold" ] ) def read_name ( read ) : return read . query_name . split ( ) [ 0 ] # Since reading a huge BAM file can take up a # lot of time and resources, we only do it once # but that requires opening fastq files for writing # as matching reads get detected along the # bam and keeping track of which ones are # currently open. def get_base_name ( bin_file ) : base_name = "." . join ( os . path . basename ( bin_file ) . split ( "." ) [ : - 1 ] ) output_path = os . path . join ( output_dir , "{}.readnames" . format ( base_name ) ) return output_path if save_memory : opened_files = dict ( ) else : read_names = dict ( ) with pysam . AlignmentFile ( sam_merged , "rb" ) as alignment_merged_handle : for ( my_read_name , alignment_pool ) in itertools . groupby ( alignment_merged_handle , read_name ) : for my_alignment in alignment_pool : relative_position = my_alignment . reference_start contig_name = my_alignment . reference_name chunk_position = relative_position // chunk_size # The 'chunk name' is used to detect macthing positions chunk_name = "{}_{}" . format ( contig_name , chunk_position ) # But such matching positions have to map acceptably quality_test = my_alignment . mapping_quality > mapq_threshold for bin_file in bin_fasta : chunk_tuple = ( bin_file , chunk_name ) if chunk_tuple in bin_chunks and quality_test : if save_memory : output_path = get_base_name ( bin_file ) try : output_handle = opened_files [ bin_file ] except KeyError : output_handle = open ( output_path , "w" ) opened_files [ bin_file ] = output_handle output_handle . write ( "@{}\n" . format ( my_read_name ) ) else : try : read_names [ my_read_name ] . append ( bin_file ) except KeyError : read_names [ my_read_name ] = [ bin_file ] for file_handle in opened_files . values ( ) : file_handle . close ( ) # Return unpaired file names for pair_unpaired_reads() to process if save_memory : return opened_files . keys ( ) else : return read_names
6,197
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/network.py#L438-L574
[ "def", "pop_messages", "(", "self", ",", "type", "=", "None", ")", ":", "key", "=", "self", ".", "_msg_key", "messages", "=", "[", "]", "if", "type", "is", "None", ":", "messages", "=", "self", ".", "pop", "(", "key", ",", "[", "]", ")", "else", ":", "keep_messages", "=", "[", "]", "for", "msg", "in", "self", ".", "get", "(", "key", ",", "[", "]", ")", ":", "if", "msg", ".", "type", "==", "type", ":", "messages", ".", "append", "(", "msg", ")", "else", ":", "keep_messages", ".", "append", "(", "msg", ")", "if", "not", "keep_messages", "and", "key", "in", "self", ":", "del", "self", "[", "key", "]", "else", ":", "self", "[", "key", "]", "=", "keep_messages", "if", "messages", ":", "self", ".", "save", "(", ")", "return", "messages" ]
Method which assigns handler to the tag encountered before the current or else sets it to None
def ResetHandler ( self , name ) : if name in self . tags : if len ( self . tags ) > 1 : key = len ( self . tags ) - 2 self . handler = None while key >= 0 : if self . tags [ key ] in self . structure : self . handler = self . structure [ self . tags [ key ] ] break key -= 1 else : self . handler = None
6,198
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Input/MxmlParser.py#L205-L225
[ "def", "create_keyspace_simple", "(", "name", ",", "replication_factor", ",", "durable_writes", "=", "True", ",", "connections", "=", "None", ")", ":", "_create_keyspace", "(", "name", ",", "durable_writes", ",", "'SimpleStrategy'", ",", "{", "'replication_factor'", ":", "replication_factor", "}", ",", "connections", "=", "connections", ")" ]
Returns a connection to a mongo - clusters .
def get_cluster ( self , label ) : for cluster in self . _clusters : if label == cluster [ 'label' ] : return self . _get_connection ( cluster ) raise AttributeError ( 'No such cluster %s.' % label )
6,199
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L26-L42
[ "def", "setOverlayTexelAspect", "(", "self", ",", "ulOverlayHandle", ",", "fTexelAspect", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTexelAspect", "result", "=", "fn", "(", "ulOverlayHandle", ",", "fTexelAspect", ")", "return", "result" ]