idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
229,800
def _fixWindowsPath ( dll ) : if sys . platform [ : 3 ] != 'win' : return # Nothing to do here pathToDll = os . path . dirname ( dll ) currentWindowsPath = os . getenv ( 'PATH' ) if pathToDll not in currentWindowsPath : # We will prepend the path, to avoid conflicts between DLLs newPath = pathToDll + ';' + currentWindowsPath os . putenv ( 'PATH' , newPath )
When the path to the DLL is not in Windows search path Windows will not be able to find other DLLs on the same directory so we have to add it to the path . This function takes care of it .
110
45
229,801
def list_to_bytes_list ( strList ) : pList = c_char_p * len ( strList ) # if strList is already a pointerarray or None, there is nothing to do if isinstance ( strList , ( pList , type ( None ) ) ) : return strList if not isinstance ( strList , ( list , set , tuple ) ) : raise TypeError ( "strList must be list, set or tuple, not " + str ( type ( strList ) ) ) pList = pList ( ) for i , elem in enumerate ( strList ) : pList [ i ] = str_to_bytes ( elem ) return pList
This function turns an array of strings into a pointer array with pointers pointing to the encodings of those strings Possibly contained bytes are kept as they are .
147
31
229,802
def check_strings ( strings , arrays ) : # if given a single element, turn it into a list if isinstance ( strings , int ) : strings = [ strings ] elif strings is None : strings = [ ] # check if all entries are integers for i , k in enumerate ( strings ) : if not isinstance ( k , int ) : raise TypeError ( ( 'Wrong type for index at {0} ' + 'in strings. Must be int, not {1}!' ) . format ( i , k ) ) # if given a single element, turn it into a list if isinstance ( arrays , int ) : arrays = [ arrays ] elif arrays is None : arrays = [ ] # check if all entries are integers for i , k in enumerate ( arrays ) : if not isinstance ( k , int ) : raise TypeError ( ( 'Wrong type for index at {0} ' + 'in arrays. Must be int, not {1}!' ) . format ( i , k ) ) # check if some index occurs in both if set ( strings ) . intersection ( arrays ) : raise ValueError ( 'One or more elements occur in both arrays and ' + ' strings. One parameter cannot be both list and string!' ) # create the checker that will check all arguments given by argsToCheck # and turn them into the right datatype. def checker ( func ) : def check_and_call ( * args ) : args = list ( args ) for i in strings : arg = args [ i ] args [ i ] = str_to_bytes ( arg ) for i in arrays : arg = args [ i ] args [ i ] = list_to_bytes_list ( arg ) return func ( * args ) return check_and_call return checker
Decorator function which can be used to automatically turn an incoming string into a bytes object and an incoming list to a pointer array if necessary .
379
29
229,803
def add ( self , item ) : def result_fnc ( f ) : if f . result ( ) : return True raise Full ( "Queue is full!" ) return self . offer ( item ) . continue_with ( result_fnc )
Adds the specified item to this queue if there is available space .
52
13
229,804
def add_all ( self , items ) : check_not_none ( items , "Value can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "Value can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( queue_add_all_codec , data_list = data_items )
Adds the elements in the specified collection to this queue .
95
11
229,805
def contains_all ( self , items ) : check_not_none ( items , "Items can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "item can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( queue_contains_all_codec , data_list = data_items )
Determines whether this queue contains all of the items in the specified collection or not .
96
18
229,806
def drain_to ( self , list , max_size = - 1 ) : def drain_result ( f ) : resp = f . result ( ) list . extend ( resp ) return len ( resp ) return self . _encode_invoke ( queue_drain_to_max_size_codec , max_size = max_size ) . continue_with ( drain_result )
Transfers all available items to the given list _ and removes these items from this queue . If a max_size is specified it transfers at most the given number of items . In case of a failure an item can exist in both collections or none of them .
83
53
229,807
def put ( self , item ) : check_not_none ( item , "Value can't be None" ) element_data = self . _to_data ( item ) return self . _encode_invoke ( queue_put_codec , value = element_data )
Adds the specified element into this queue . If there is no space it waits until necessary space becomes available .
59
21
229,808
def remove_all ( self , items ) : check_not_none ( items , "Value can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "Value can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( queue_compare_and_remove_all_codec , data_list = data_items )
Removes all of the elements of the specified collection from this queue .
100
14
229,809
def retain_all ( self , items ) : check_not_none ( items , "Value can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "Value can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( queue_compare_and_retain_all_codec , data_list = data_items )
Removes the items which are not contained in the specified collection . In other words only the items that are contained in the specified collection will be retained .
101
30
229,810
def get_and_add ( self , delta ) : return self . _invoke_internal ( pn_counter_add_codec , delta = delta , get_before_update = True )
Adds the given value to the current value and returns the previous value .
42
14
229,811
def add_and_get ( self , delta ) : return self . _invoke_internal ( pn_counter_add_codec , delta = delta , get_before_update = False )
Adds the given value to the current value and returns the updated value .
42
14
229,812
def get_and_subtract ( self , delta ) : return self . _invoke_internal ( pn_counter_add_codec , delta = - 1 * delta , get_before_update = True )
Subtracts the given value from the current value and returns the previous value .
47
17
229,813
def subtract_and_get ( self , delta ) : return self . _invoke_internal ( pn_counter_add_codec , delta = - 1 * delta , get_before_update = False )
Subtracts the given value from the current value and returns the updated value .
45
17
229,814
def shutdown ( self ) : if self . lifecycle . is_live : self . lifecycle . fire_lifecycle_event ( LIFECYCLE_STATE_SHUTTING_DOWN ) self . near_cache_manager . destroy_all_near_caches ( ) self . statistics . shutdown ( ) self . partition_service . shutdown ( ) self . heartbeat . shutdown ( ) self . cluster . shutdown ( ) self . reactor . shutdown ( ) self . lifecycle . fire_lifecycle_event ( LIFECYCLE_STATE_SHUTDOWN ) self . logger . info ( "Client shutdown." , extra = self . _logger_extras )
Shuts down this HazelcastClient .
143
8
229,815
def publish ( self , message ) : message_data = self . _to_data ( message ) self . _encode_invoke ( topic_publish_codec , message = message_data )
Publishes the message to all subscribers of this topic
43
10
229,816
def remove_listener ( self , registration_id ) : return self . _stop_listening ( registration_id , lambda i : topic_remove_message_listener_codec . encode_request ( self . name , i ) )
Stops receiving messages for the given message listener . If the given listener already removed this method does nothing .
52
21
229,817
def validate_serializer ( serializer , _type ) : if not issubclass ( serializer , _type ) : raise ValueError ( "Serializer should be an instance of {}" . format ( _type . __name__ ) )
Validates the serializer for given type .
51
9
229,818
def create_exception ( error_codec ) : if error_codec . error_code in ERROR_CODE_TO_ERROR : return ERROR_CODE_TO_ERROR [ error_codec . error_code ] ( error_codec . message ) stack_trace = "\n" . join ( [ "\tat %s.%s(%s:%s)" % ( x . declaring_class , x . method_name , x . file_name , x . line_number ) for x in error_codec . stack_trace ] ) message = "Got exception from server:\n %s: %s\n %s" % ( error_codec . class_name , error_codec . message , stack_trace ) return HazelcastError ( message )
Creates an exception with given error codec .
171
9
229,819
def capacity ( self ) : if not self . _capacity : def cache_capacity ( f ) : self . _capacity = f . result ( ) return f . result ( ) return self . _encode_invoke ( ringbuffer_capacity_codec ) . continue_with ( cache_capacity ) return ImmediateFuture ( self . _capacity )
Returns the capacity of this Ringbuffer .
73
8
229,820
def read_one ( self , sequence ) : check_not_negative ( sequence , "sequence can't be smaller than 0" ) return self . _encode_invoke ( ringbuffer_read_one_codec , sequence = sequence )
Reads one item from the Ringbuffer . If the sequence is one beyond the current tail this call blocks until an item is added . Currently it isn t possible to control how long this call is going to block .
51
43
229,821
def read_many ( self , start_sequence , min_count , max_count ) : check_not_negative ( start_sequence , "sequence can't be smaller than 0" ) check_true ( max_count >= min_count , "max count should be greater or equal to min count" ) check_true ( min_count <= self . capacity ( ) . result ( ) , "min count should be smaller or equal to capacity" ) check_true ( max_count < MAX_BATCH_SIZE , "max count can't be greater than %d" % MAX_BATCH_SIZE ) return self . _encode_invoke ( ringbuffer_read_many_codec , response_handler = self . _read_many_response_handler , start_sequence = start_sequence , min_count = min_count , max_count = max_count , filter = None )
Reads a batch of items from the Ringbuffer . If the number of available items after the first read item is smaller than the max_count these items are returned . So it could be the number of items read is smaller than the max_count . If there are less items available than min_count then this call blocks . Reading a batch of items is likely to perform better because less overhead is involved .
191
82
229,822
def init ( self , initial ) : if initial <= 0 : return False step = initial // BLOCK_SIZE with self . _lock : init = self . _atomic_long . compare_and_set ( 0 , step + 1 ) . result ( ) if init : self . _local = step self . _residue = ( initial % BLOCK_SIZE ) + 1 return init
Try to initialize this IdGenerator instance with the given id . The first generated id will be 1 greater than id .
82
24
229,823
def new_id ( self ) : with self . _lock : curr = self . _residue self . _residue += 1 if self . _residue >= BLOCK_SIZE : increment = self . _atomic_long . get_and_increment ( ) . result ( ) self . _local = increment self . _residue = 0 return self . new_id ( ) return self . _local * BLOCK_SIZE + curr
Generates and returns a cluster - wide unique id . Generated ids are guaranteed to be unique for the entire cluster as long as the cluster is live . If the cluster restarts then id generation will start from 0 .
100
45
229,824
def execute_on_key_owner ( self , key , task ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) partition_id = self . _client . partition_service . get_partition_id ( key_data ) uuid = self . _get_uuid ( ) return self . _encode_invoke_on_partition ( executor_service_submit_to_partition_codec , partition_id , uuid = uuid , callable = self . _to_data ( task ) , partition_id = partition_id )
Executes a task on the owner of the specified key .
141
12
229,825
def execute_on_member ( self , member , task ) : uuid = self . _get_uuid ( ) address = member . address return self . _execute_on_member ( address , uuid , self . _to_data ( task ) )
Executes a task on the specified member .
56
9
229,826
def execute_on_members ( self , members , task ) : task_data = self . _to_data ( task ) futures = [ ] uuid = self . _get_uuid ( ) for member in members : f = self . _execute_on_member ( member . address , uuid , task_data ) futures . append ( f ) return future . combine_futures ( * futures )
Executes a task on each of the specified members .
88
11
229,827
def execute_on_all_members ( self , task ) : return self . execute_on_members ( self . _client . cluster . get_member_list ( ) , task )
Executes a task on all of the known cluster members .
40
12
229,828
def add_listener ( self , on_lifecycle_change ) : id = str ( uuid . uuid4 ( ) ) self . _listeners [ id ] = on_lifecycle_change return id
Add a listener object to listen for lifecycle events .
46
11
229,829
def remove_listener ( self , registration_id ) : try : self . _listeners . pop ( registration_id ) return True except KeyError : return False
Removes a lifecycle listener .
35
7
229,830
def fire_lifecycle_event ( self , new_state ) : if new_state == LIFECYCLE_STATE_SHUTTING_DOWN : self . is_live = False self . state = new_state self . logger . info ( self . _git_info + "HazelcastClient is %s" , new_state , extra = self . _logger_extras ) for listener in list ( self . _listeners . values ( ) ) : try : listener ( new_state ) except : self . logger . exception ( "Exception in lifecycle listener" , extra = self . _logger_extras )
Called when instance s state changes .
137
8
229,831
def lock ( self , lease_time = - 1 ) : return self . _encode_invoke ( lock_lock_codec , invocation_timeout = MAX_SIZE , lease_time = to_millis ( lease_time ) , thread_id = thread_id ( ) , reference_id = self . reference_id_generator . get_and_increment ( ) )
Acquires the lock . If a lease time is specified lock will be released after this lease time .
83
21
229,832
def try_lock ( self , timeout = 0 , lease_time = - 1 ) : return self . _encode_invoke ( lock_try_lock_codec , invocation_timeout = MAX_SIZE , lease = to_millis ( lease_time ) , thread_id = thread_id ( ) , timeout = to_millis ( timeout ) , reference_id = self . reference_id_generator . get_and_increment ( ) )
Tries to acquire the lock . When the lock is not available
99
13
229,833
def contains_key ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( multi_map_contains_key_codec , key_data , key = key_data , thread_id = thread_id ( ) )
Determines whether this multimap contains an entry with the key .
84
14
229,834
def contains_entry ( self , key , value ) : check_not_none ( key , "key can't be None" ) check_not_none ( value , "value can't be None" ) key_data = self . _to_data ( key ) value_data = self . _to_data ( value ) return self . _encode_invoke_on_key ( multi_map_contains_entry_codec , key_data , key = key_data , value = value_data , thread_id = thread_id ( ) )
Returns whether the multimap contains an entry with the value .
121
12
229,835
def get ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( multi_map_get_codec , key_data , key = key_data , thread_id = thread_id ( ) )
Returns the list of values associated with the key . None if this map does not contain this key .
79
20
229,836
def is_locked ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( multi_map_is_locked_codec , key_data , key = key_data )
Checks the lock for the specified key . If the lock is acquired returns true . Otherwise returns false .
73
21
229,837
def remove ( self , key , value ) : check_not_none ( key , "key can't be None" ) check_not_none ( key , "value can't be None" ) key_data = self . _to_data ( key ) value_data = self . _to_data ( value ) return self . _encode_invoke_on_key ( multi_map_remove_entry_codec , key_data , key = key_data , value = value_data , thread_id = thread_id ( ) )
Removes the given key - value tuple from the multimap .
118
13
229,838
def remove_all ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( multi_map_remove_codec , key_data , key = key_data , thread_id = thread_id ( ) )
Removes all the entries with the given key and returns the value list associated with this key .
81
19
229,839
def put ( self , key , value ) : check_not_none ( key , "key can't be None" ) check_not_none ( value , "value can't be None" ) key_data = self . _to_data ( key ) value_data = self . _to_data ( value ) return self . _encode_invoke_on_key ( multi_map_put_codec , key_data , key = key_data , value = value_data , thread_id = thread_id ( ) )
Stores a key - value tuple in the multimap .
116
12
229,840
def value_count ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( multi_map_value_count_codec , key_data , key = key_data , thread_id = thread_id ( ) )
Returns the number of values that match the given key in the multimap .
83
15
229,841
def alter ( self , function ) : check_not_none ( function , "function can't be None" ) return self . _encode_invoke ( atomic_reference_alter_codec , function = self . _to_data ( function ) )
Alters the currently stored reference by applying a function on it .
54
13
229,842
def alter_and_get ( self , function ) : check_not_none ( function , "function can't be None" ) return self . _encode_invoke ( atomic_reference_alter_and_get_codec , function = self . _to_data ( function ) )
Alters the currently stored reference by applying a function on it and gets the result .
62
17
229,843
def contains ( self , expected ) : return self . _encode_invoke ( atomic_reference_contains_codec , expected = self . _to_data ( expected ) )
Checks if the reference contains the value .
39
9
229,844
def get_and_alter ( self , function ) : check_not_none ( function , "function can't be None" ) return self . _encode_invoke ( atomic_reference_get_and_alter_codec , function = self . _to_data ( function ) )
Alters the currently stored reference by applying a function on it on and gets the old value .
62
19
229,845
def get_and_set ( self , new_value ) : return self . _encode_invoke ( atomic_reference_get_and_set_codec , new_value = self . _to_data ( new_value ) )
Gets the old value and sets the new value .
52
11
229,846
def set ( self , new_value ) : return self . _encode_invoke ( atomic_reference_set_codec , new_value = self . _to_data ( new_value ) )
Atomically sets the given value .
44
8
229,847
def set_and_get ( self , new_value ) : return self . _encode_invoke ( atomic_reference_set_and_get_codec , new_value = self . _to_data ( new_value ) )
Sets and gets the value .
52
7
229,848
def get_type ( self ) : if self . total_size ( ) == 0 : return CONSTANT_TYPE_NULL return unpack_from ( FMT_BE_INT , self . _buffer , TYPE_OFFSET ) [ 0 ]
Returns serialization type of binary form .
53
8
229,849
def has_partition_hash ( self ) : return self . _buffer is not None and len ( self . _buffer ) >= HEAP_DATA_OVERHEAD and unpack_from ( FMT_BE_INT , self . _buffer , PARTITION_HASH_OFFSET ) [ 0 ] != 0
Determines whether this Data has partition hash or not .
67
12
229,850
def serializer_for ( self , obj ) : # 1-NULL serializer if obj is None : return self . _null_serializer obj_type = type ( obj ) # 2-Default serializers, Dataserializable, Portable, primitives, arrays, String and some helper types(BigInteger etc) serializer = self . lookup_default_serializer ( obj_type , obj ) # 3-Custom registered types by user if serializer is None : serializer = self . lookup_custom_serializer ( obj_type ) # 5-Global serializer if registered by user if serializer is None : serializer = self . lookup_global_serializer ( obj_type ) # 4 Internal serializer if serializer is None : serializer = self . lookup_python_serializer ( obj_type ) if serializer is None : raise HazelcastSerializationError ( "There is no suitable serializer for:" + str ( obj_type ) ) return serializer
Searches for a serializer for the provided object Serializers will be searched in this order ;
207
20
229,851
def is_expired ( self , max_idle_seconds ) : now = current_time ( ) return ( self . expiration_time is not None and self . expiration_time < now ) or ( max_idle_seconds is not None and self . last_access_time + max_idle_seconds < now )
Determines whether this record is expired or not .
71
11
229,852
def combine_futures ( * futures ) : expected = len ( futures ) results = [ ] completed = AtomicInteger ( ) combined = Future ( ) def done ( f ) : if not combined . done ( ) : if f . is_success ( ) : # TODO: ensure ordering of results as original list results . append ( f . result ( ) ) if completed . get_and_increment ( ) + 1 == expected : combined . set_result ( results ) else : combined . set_exception ( f . exception ( ) , f . traceback ( ) ) for future in futures : future . add_done_callback ( done ) return combined
Combines set of Futures .
139
7
229,853
def set_result ( self , result ) : if result is None : self . _result = NONE_RESULT else : self . _result = result self . _event . set ( ) self . _invoke_callbacks ( )
Sets the result of the Future .
50
8
229,854
def set_exception ( self , exception , traceback = None ) : if not isinstance ( exception , BaseException ) : raise RuntimeError ( "Exception must be of BaseException type" ) self . _exception = exception self . _traceback = traceback self . _event . set ( ) self . _invoke_callbacks ( )
Sets the exception for this Future in case of errors .
73
12
229,855
def result ( self ) : self . _reactor_check ( ) self . _event . wait ( ) if self . _exception : six . reraise ( self . _exception . __class__ , self . _exception , self . _traceback ) if self . _result == NONE_RESULT : return None else : return self . _result
Returns the result of the Future which makes the call synchronous if the result has not been computed yet .
78
21
229,856
def continue_with ( self , continuation_func , * args ) : future = Future ( ) def callback ( f ) : try : future . set_result ( continuation_func ( f , * args ) ) except : future . set_exception ( sys . exc_info ( ) [ 1 ] , sys . exc_info ( ) [ 2 ] ) self . add_done_callback ( callback ) return future
Create a continuation that executes when the Future is completed .
87
11
229,857
def on_auth ( self , f , connection , address ) : if f . is_success ( ) : self . logger . info ( "Authenticated with %s" , f . result ( ) , extra = self . _logger_extras ) with self . _new_connection_mutex : self . connections [ connection . endpoint ] = f . result ( ) try : self . _pending_connections . pop ( address ) except KeyError : pass for on_connection_opened , _ in self . _connection_listeners : if on_connection_opened : on_connection_opened ( f . result ( ) ) return f . result ( ) else : self . logger . debug ( "Error opening %s" , connection , extra = self . _logger_extras ) with self . _new_connection_mutex : try : self . _pending_connections . pop ( address ) except KeyError : pass six . reraise ( f . exception ( ) . __class__ , f . exception ( ) , f . traceback ( ) )
Checks for authentication of a connection .
228
8
229,858
def close_connection ( self , address , cause ) : try : connection = self . connections [ address ] connection . close ( cause ) except KeyError : self . logger . warning ( "No connection with %s was found to close." , address , extra = self . _logger_extras ) return False
Closes the connection with given address .
65
8
229,859
def start ( self ) : def _heartbeat ( ) : if not self . _client . lifecycle . is_live : return self . _heartbeat ( ) self . _heartbeat_timer = self . _client . reactor . add_timer ( self . _heartbeat_interval , _heartbeat ) self . _heartbeat_timer = self . _client . reactor . add_timer ( self . _heartbeat_interval , _heartbeat )
Starts sending periodic HeartBeat operations .
99
8
229,860
def send_message ( self , message ) : if not self . live ( ) : raise IOError ( "Connection is not live." ) message . add_flag ( BEGIN_END_FLAG ) self . write ( message . buffer )
Sends a message to this connection .
50
8
229,861
def receive_message ( self ) : # split frames while len ( self . _read_buffer ) >= INT_SIZE_IN_BYTES : frame_length = struct . unpack_from ( FMT_LE_INT , self . _read_buffer , 0 ) [ 0 ] if frame_length > len ( self . _read_buffer ) : return message = ClientMessage ( memoryview ( self . _read_buffer ) [ : frame_length ] ) self . _read_buffer = self . _read_buffer [ frame_length : ] self . _builder . on_message ( message )
Receives a message from this connection .
130
9
229,862
def init ( self , permits ) : check_not_negative ( permits , "Permits cannot be negative!" ) return self . _encode_invoke ( semaphore_init_codec , permits = permits )
Try to initialize this Semaphore instance with the given permit count .
46
14
229,863
def acquire ( self , permits = 1 ) : check_not_negative ( permits , "Permits cannot be negative!" ) return self . _encode_invoke ( semaphore_acquire_codec , permits = permits )
Acquires one or specified amount of permits if available and returns immediately reducing the number of available permits by one or given amount .
49
26
229,864
def reduce_permits ( self , reduction ) : check_not_negative ( reduction , "Reduction cannot be negative!" ) return self . _encode_invoke ( semaphore_reduce_permits_codec , reduction = reduction )
Shrinks the number of available permits by the indicated reduction . This method differs from acquire in that it does not block waiting for permits to become available .
53
31
229,865
def release ( self , permits = 1 ) : check_not_negative ( permits , "Permits cannot be negative!" ) return self . _encode_invoke ( semaphore_release_codec , permits = permits )
Releases one or given number of permits increasing the number of available permits by one or that amount .
48
20
229,866
def try_acquire ( self , permits = 1 , timeout = 0 ) : check_not_negative ( permits , "Permits cannot be negative!" ) return self . _encode_invoke ( semaphore_try_acquire_codec , permits = permits , timeout = to_millis ( timeout ) )
Tries to acquire one or the given number of permits if they are available and returns immediately with the value true reducing the number of available permits by the given amount .
68
33
229,867
def add_membership_listener ( self , member_added = None , member_removed = None , fire_for_existing = False ) : self . membership_listeners . append ( ( member_added , member_removed , fire_for_existing ) ) return self
Helper method for adding membership listeners
61
6
229,868
def set_custom_serializer ( self , _type , serializer ) : validate_type ( _type ) validate_serializer ( serializer , StreamSerializer ) self . _custom_serializers [ _type ] = serializer
Assign a serializer for the type .
50
9
229,869
def get ( self , property ) : return self . _properties . get ( property . name ) or os . getenv ( property . name ) or property . default_value
Gets the value of the given property . First checks client config properties then environment variables and lastly fall backs to the default value of the property .
36
30
229,870
def get_bool ( self , property ) : value = self . get ( property ) if isinstance ( value , bool ) : return value return value . lower ( ) == "true"
Gets the value of the given property as boolean .
39
11
229,871
def get_seconds ( self , property ) : return TimeUnit . to_seconds ( self . get ( property ) , property . time_unit )
Gets the value of the given property in seconds . If the value of the given property is not a number throws TypeError .
31
26
229,872
def get_seconds_positive_or_default ( self , property ) : seconds = self . get_seconds ( property ) return seconds if seconds > 0 else TimeUnit . to_seconds ( property . default_value , property . time_unit )
Gets the value of the given property in seconds . If the value of the given property is not a number throws TypeError . If the value of the given property in seconds is not positive tries to return the default value in seconds .
52
47
229,873
def start ( self ) : self . logger . debug ( "Starting partition service" , extra = self . _logger_extras ) def partition_updater ( ) : self . _do_refresh ( ) self . timer = self . _client . reactor . add_timer ( PARTITION_UPDATE_INTERVAL , partition_updater ) self . timer = self . _client . reactor . add_timer ( PARTITION_UPDATE_INTERVAL , partition_updater )
Starts the partition service .
105
6
229,874
def get_partition_owner ( self , partition_id ) : if partition_id not in self . partitions : self . _do_refresh ( ) return self . partitions . get ( partition_id , None )
Gets the owner of the partition if it s set . Otherwise it will trigger partition assignment .
47
19
229,875
def get_partition_id ( self , key ) : data = self . _client . serialization_service . to_data ( key ) count = self . get_partition_count ( ) if count <= 0 : return 0 return hash_to_index ( data . get_partition_hash ( ) , count )
Returns the partition id for a Data key .
70
9
229,876
def murmur_hash3_x86_32 ( data , offset , size , seed = 0x01000193 ) : key = bytearray ( data [ offset : offset + size ] ) length = len ( key ) nblocks = int ( length / 4 ) h1 = seed c1 = 0xcc9e2d51 c2 = 0x1b873593 # body for block_start in range ( 0 , nblocks * 4 , 4 ) : # ??? big endian? k1 = key [ block_start + 3 ] << 24 | key [ block_start + 2 ] << 16 | key [ block_start + 1 ] << 8 | key [ block_start + 0 ] k1 = c1 * k1 & 0xFFFFFFFF k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32 k1 = ( c2 * k1 ) & 0xFFFFFFFF h1 ^= k1 h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined _ROTL32 h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF # tail tail_index = nblocks * 4 k1 = 0 tail_size = length & 3 if tail_size >= 3 : k1 ^= key [ tail_index + 2 ] << 16 if tail_size >= 2 : k1 ^= key [ tail_index + 1 ] << 8 if tail_size >= 1 : k1 ^= key [ tail_index + 0 ] if tail_size != 0 : k1 = ( k1 * c1 ) & 0xFFFFFFFF k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # _ROTL32 k1 = ( k1 * c2 ) & 0xFFFFFFFF h1 ^= k1 result = _fmix ( h1 ^ length ) return - ( result & 0x80000000 ) | ( result & 0x7FFFFFFF )
murmur3 hash function to determine partition
442
8
229,877
def add ( self , item ) : check_not_none ( item , "Value can't be None" ) element_data = self . _to_data ( item ) return self . _encode_invoke ( list_add_codec , value = element_data )
Adds the specified item to the end of this list .
59
11
229,878
def add_at ( self , index , item ) : check_not_none ( item , "Value can't be None" ) element_data = self . _to_data ( item ) return self . _encode_invoke ( list_add_with_index_codec , index = index , value = element_data )
Adds the specified item at the specific position in this list . Element in this position and following elements are shifted to the right if any .
71
27
229,879
def add_all ( self , items ) : check_not_none ( items , "Value can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "Value can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( list_add_all_codec , value_list = data_items )
Adds all of the items in the specified collection to the end of this list . The order of new elements is determined by the specified collection s iterator .
95
30
229,880
def add_all_at ( self , index , items ) : check_not_none ( items , "Value can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "Value can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( list_add_all_with_index_codec , index = index , value_list = data_items )
Adds all of the elements in the specified collection into this list at the specified position . Elements in this positions and following elements are shifted to the right if any . The order of new elements is determined by the specified collection s iterator .
107
46
229,881
def contains_all ( self , items ) : check_not_none ( items , "Items can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "item can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( list_contains_all_codec , values = data_items )
Determines whether this list contains all of the items in specified collection or not .
94
17
229,882
def index_of ( self , item ) : check_not_none ( item , "Value can't be None" ) item_data = self . _to_data ( item ) return self . _encode_invoke ( list_index_of_codec , value = item_data )
Returns the first index of specified items s occurrences in this list . If specified item is not present in this list returns - 1 .
63
26
229,883
def last_index_of ( self , item ) : check_not_none ( item , "Value can't be None" ) item_data = self . _to_data ( item ) return self . _encode_invoke ( list_last_index_of_codec , value = item_data )
Returns the last index of specified items s occurrences in this list . If specified item is not present in this list returns - 1 .
67
26
229,884
def remove ( self , item ) : check_not_none ( item , "Value can't be None" ) item_data = self . _to_data ( item ) return self . _encode_invoke ( list_remove_codec , value = item_data )
Removes the specified element s first occurrence from the list if it exists in this list .
59
18
229,885
def remove_all ( self , items ) : check_not_none ( items , "Value can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "Value can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( list_compare_and_remove_all_codec , values = data_items )
Removes all of the elements that is present in the specified collection from this list .
98
17
229,886
def retain_all ( self , items ) : check_not_none ( items , "Value can't be None" ) data_items = [ ] for item in items : check_not_none ( item , "Value can't be None" ) data_items . append ( self . _to_data ( item ) ) return self . _encode_invoke ( list_compare_and_retain_all_codec , values = data_items )
Retains only the items that are contained in the specified collection . It means items which are not present in the specified collection are removed from this list .
99
30
229,887
def set_at ( self , index , item ) : check_not_none ( item , "Value can't be None" ) element_data = self . _to_data ( item ) return self . _encode_invoke ( list_set_codec , index = index , value = element_data )
Replaces the specified element with the element at the specified position in this list .
67
16
229,888
def add_index ( self , attribute , ordered = False ) : return self . _encode_invoke ( map_add_index_codec , attribute = attribute , ordered = ordered )
Adds an index to this map for the specified entries so that queries can run faster .
40
17
229,889
def add_interceptor ( self , interceptor ) : return self . _encode_invoke ( map_add_interceptor_codec , interceptor = self . _to_data ( interceptor ) )
Adds an interceptor for this map . Added interceptor will intercept operations and execute user defined methods .
45
20
229,890
def entry_set ( self , predicate = None ) : if predicate : predicate_data = self . _to_data ( predicate ) return self . _encode_invoke ( map_entries_with_predicate_codec , predicate = predicate_data ) else : return self . _encode_invoke ( map_entry_set_codec )
Returns a list clone of the mappings contained in this map .
76
13
229,891
def evict ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _evict_internal ( key_data )
Evicts the specified key from this map .
50
9
229,892
def execute_on_entries ( self , entry_processor , predicate = None ) : if predicate : return self . _encode_invoke ( map_execute_with_predicate_codec , entry_processor = self . _to_data ( entry_processor ) , predicate = self . _to_data ( predicate ) ) return self . _encode_invoke ( map_execute_on_all_keys_codec , entry_processor = self . _to_data ( entry_processor ) )
Applies the user defined EntryProcessor to all the entries in the map or entries in the map which satisfies the predicate if provided . Returns the results mapped by each key in the map .
109
38
229,893
def execute_on_key ( self , key , entry_processor ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _execute_on_key_internal ( key_data , entry_processor )
Applies the user defined EntryProcessor to the entry mapped by the key . Returns the object which is the result of EntryProcessor s process method .
65
31
229,894
def execute_on_keys ( self , keys , entry_processor ) : key_list = [ ] for key in keys : check_not_none ( key , "key can't be None" ) key_list . append ( self . _to_data ( key ) ) if len ( keys ) == 0 : return ImmediateFuture ( [ ] ) return self . _encode_invoke ( map_execute_on_keys_codec , entry_processor = self . _to_data ( entry_processor ) , keys = key_list )
Applies the user defined EntryProcessor to the entries mapped by the collection of keys . Returns the results mapped by each key in the collection .
117
29
229,895
def force_unlock ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( map_force_unlock_codec , key_data , key = key_data , reference_id = self . reference_id_generator . get_and_increment ( ) )
Releases the lock for the specified key regardless of the lock owner . It always successfully unlocks the key never blocks and returns immediately .
95
26
229,896
def get_all ( self , keys ) : check_not_none ( keys , "keys can't be None" ) if not keys : return ImmediateFuture ( { } ) partition_service = self . _client . partition_service partition_to_keys = { } for key in keys : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) partition_id = partition_service . get_partition_id ( key_data ) try : partition_to_keys [ partition_id ] [ key ] = key_data except KeyError : partition_to_keys [ partition_id ] = { key : key_data } return self . _get_all_internal ( partition_to_keys )
Returns the entries for the given keys .
168
8
229,897
def get_entry_view ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( map_get_entry_view_codec , key_data , key = key_data , thread_id = thread_id ( ) )
Returns the EntryView for the specified key .
85
9
229,898
def is_locked ( self , key ) : check_not_none ( key , "key can't be None" ) key_data = self . _to_data ( key ) return self . _encode_invoke_on_key ( map_is_locked_codec , key_data , key = key_data )
Checks the lock for the specified key . If the lock is acquired it returns true . Otherwise it returns false .
71
23
229,899
def key_set ( self , predicate = None ) : if predicate : predicate_data = self . _to_data ( predicate ) return self . _encode_invoke ( map_key_set_with_predicate_codec , predicate = predicate_data ) else : return self . _encode_invoke ( map_key_set_codec )
Returns a List clone of the keys contained in this map or the keys of the entries filtered with the predicate if provided .
77
24