desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Verify that timer cancellation is honored'
def test_timer_cancellation(self, *args):
connection = twistedreactor.TwistedConnection('1.2.3.4', cql_version='3.0.1') timeout = 0.1 callback = TimerCallback(timeout) timer = connection.create_timer(timeout, callback.invoke) timer.cancel() time.sleep(0.2) timer_manager = connection._loop._timers self.assertFalse(timer_manager._queue) self.assertFalse(timer_manager._new_timers) self.assertFalse(callback.was_invoked())
'Verify that the protocol class notifies the connection object that a successful connection was made.'
def test_makeConnection(self):
self.obj_ut.makeConnection(self.tr) self.assertTrue(self.mock_connection.client_connection_made.called)
'Verify that the dataReceived() callback writes the data to the connection object\'s buffer and calls handle_read().'
def test_receiving_data(self):
self.obj_ut.makeConnection(self.tr) self.obj_ut.dataReceived('foobar') self.assertTrue(self.mock_connection.handle_read.called) self.mock_connection._iobuf.write.assert_called_with('foobar')
'Verify that connection failed causes the connection object to close.'
def test_client_connection_failed(self):
exc = Exception('a test') self.obj_ut.clientConnectionFailed(None, Failure(exc)) self.mock_connection.defunct.assert_called_with(exc)
'Verify that connection lost causes the connection object to close.'
def test_client_connection_lost(self):
exc = Exception('a test') self.obj_ut.clientConnectionLost(None, Failure(exc)) self.mock_connection.defunct.assert_called_with(exc)
'Verify that __init__() works correctly.'
def test_connection_initialization(self):
self.mock_reactor_cft.assert_called_with(self.obj_ut.add_connection) self.obj_ut._loop._cleanup() self.mock_reactor_run.assert_called_with(installSignalHandlers=False)
'Verify that add_connection() gives us a valid twisted connector.'
@patch('twisted.internet.reactor.connectTCP') def test_add_connection(self, mock_connectTCP):
self.obj_ut.add_connection() self.assertTrue((self.obj_ut.connector is not None)) self.assertTrue(mock_connectTCP.called)
'Verifiy that _send_options_message() is called in client_connection_made()'
def test_client_connection_made(self):
self.obj_ut._send_options_message = Mock() self.obj_ut.client_connection_made() self.obj_ut._send_options_message.assert_called_with()
'Verify that close() disconnects the connector and errors callbacks.'
@patch('twisted.internet.reactor.connectTCP') def test_close(self, mock_connectTCP):
self.obj_ut.error_all_requests = Mock() self.obj_ut.add_connection() self.obj_ut.is_closed = False self.obj_ut.close() self.obj_ut.connector.disconnect.assert_called_with() self.assertTrue(self.obj_ut.connected_event.is_set()) self.assertTrue(self.obj_ut.error_all_requests.called)
'Verify that handle_read() processes incomplete messages properly.'
def test_handle_read__incomplete(self):
self.obj_ut.process_msg = Mock() self.assertEqual(self.obj_ut._iobuf.getvalue(), '') self.obj_ut._iobuf.write('\x84\x00\x00\x00\x00') self.obj_ut.handle_read() self.assertEqual(self.obj_ut._iobuf.getvalue(), '\x84\x00\x00\x00\x00') self.obj_ut._iobuf.write('\x00\x00\x00\x15') self.obj_ut.handle_read() self.assertEqual(self.obj_ut._iobuf.getvalue(), '\x84\x00\x00\x00\x00\x00\x00\x00\x15') self.assertEqual(self.obj_ut._current_frame.end_pos, 30) self.assertFalse(self.obj_ut.process_msg.called)
'Verify that handle_read() processes complete messages properly.'
def test_handle_read__fullmessage(self):
self.obj_ut.process_msg = Mock() self.assertEqual(self.obj_ut._iobuf.getvalue(), '') body = 'this is the drum roll' extra = 'NEXT' self.obj_ut._iobuf.write((('\x84\x01\x00\x02\x03\x00\x00\x00\x15' + body) + extra)) self.obj_ut.handle_read() self.assertEqual(self.obj_ut._iobuf.getvalue(), extra) self.obj_ut.process_msg.assert_called_with(_Frame(version=4, flags=1, stream=2, opcode=3, body_offset=9, end_pos=(9 + len(body))), body)
'Verifiy that push() calls transport.write(data).'
@patch('twisted.internet.reactor.connectTCP') def test_push(self, mock_connectTCP):
self.obj_ut.add_connection() self.obj_ut.push('123 pickup') self.mock_reactor_cft.assert_called_with(self.obj_ut.connector.transport.write, '123 pickup')
'Verify that timer timeouts are honored appropriately'
def test_multi_timer_validation(self, *args):
c = self.make_connection() c.initialize_reactor() submit_and_wait_for_completion(self, c, 0, 100, 1, 100) submit_and_wait_for_completion(self, c, 100, 0, (-1), 100) submit_and_wait_for_completion(self, c, 0, 100, 1, 100, True)
'Verify that timer cancellation is honored'
def test_timer_cancellation(self, *args):
connection = self.make_connection() timeout = 0.1 callback = TimerCallback(timeout) timer = connection.create_timer(timeout, callback.invoke) timer.cancel() time.sleep(0.2) timer_manager = connection._libevloop._timers self.assertFalse(timer_manager._queue) self.assertFalse(timer_manager._new_timers) self.assertFalse(callback.was_invoked())
'Test to check the appropriate calls are made @since 3.9 @jira_ticket PYTHON-713 @expected_result the values are correctly written @test_category connection'
def test_prepare_message(self):
message = PrepareMessage('a') io = Mock() message.send_body(io, 4) self._check_calls(io, [('\x00\x00\x00\x01',), ('a',)]) io.reset_mock() message.send_body(io, 5) self._check_calls(io, [('\x00\x00\x00\x01',), ('a',), ('\x00\x00\x00\x00',)])
'Test to check the appropriate calls are made @since 3.9 @jira_ticket PYTHON-713 @expected_result the values are correctly written @test_category connection'
def test_query_message(self):
message = QueryMessage('a', 3) io = Mock() message.send_body(io, 4) self._check_calls(io, [('\x00\x00\x00\x01',), ('a',), ('\x00\x03',), ('\x00',)]) io.reset_mock() message.send_body(io, 5) self._check_calls(io, [('\x00\x00\x00\x01',), ('a',), ('\x00\x03',), ('\x00\x00\x00\x00',)])
'Test to check the prepare flag is properly set, This should only happen for V5 at the moment. @since 3.9 @jira_ticket PYTHON-713 @expected_result the values are correctly written @test_category connection'
def test_prepare_flag(self):
message = PrepareMessage('a') io = Mock() for version in ProtocolVersion.SUPPORTED_VERSIONS: message.send_body(io, version) if ProtocolVersion.uses_prepare_flags(version): self.assertEqual(len(io.write.mock_calls), 3) else: self.assertEqual(len(io.write.mock_calls), 2) io.reset_mock()
'Ensure Host fails if not initialized properly'
def test_host_instantiations(self):
self.assertRaises(ValueError, Host, None, None) self.assertRaises(ValueError, Host, '127.0.0.1', None) self.assertRaises(ValueError, Host, None, SimpleConvictionPolicy)
'Test host equality has correct logic'
def test_host_equality(self):
a = Host('127.0.0.1', SimpleConvictionPolicy) b = Host('127.0.0.1', SimpleConvictionPolicy) c = Host('127.0.0.2', SimpleConvictionPolicy) self.assertEqual(a, b, 'Two Host instances should be equal when sharing.') self.assertNotEqual(a, c, 'Two Host instances should NOT be equal when using two different addresses.') self.assertNotEqual(b, c, 'Two Host instances should NOT be equal when using two different addresses.')
'Given message that has \'consistency\': \'value\', extract consistency value as a string :param msg: message with consistency value :return: String representing consistency value'
def extract_consistency(self, msg):
match = re.search("'consistency':\\s+'([\\w\\s]+)'", msg) return (match and match.group(1))
'Verify that Timeout exception object translates consistency from input value to correct output string'
def test_timeout_consistency(self):
consistency_str = self.extract_consistency(repr(Timeout('Timeout Message', consistency=None))) self.assertEqual(consistency_str, 'Not Set') for c in ConsistencyLevel.value_to_name.keys(): consistency_str = self.extract_consistency(repr(Timeout('Timeout Message', consistency=c))) self.assertEqual(consistency_str, ConsistencyLevel.value_to_name[c])
'Verify that Unavailable exception object translates consistency from input value to correct output string'
def test_unavailable_consistency(self):
consistency_str = self.extract_consistency(repr(Unavailable('Unavailable Message', consistency=None))) self.assertEqual(consistency_str, 'Not Set') for c in ConsistencyLevel.value_to_name.keys(): consistency_str = self.extract_consistency(repr(Unavailable('Timeout Message', consistency=c))) self.assertEqual(consistency_str, ConsistencyLevel.value_to_name[c])
'Test that users can define models with UDTs without initializing connections. Written to reproduce PYTHON-649.'
def test_initialization_without_existing_connection(self):
class Value(UserType, ): t = columns.Text() class DummyUDT(Model, ): __keyspace__ = 'ks' primary_key = columns.Integer(primary_key=True) value = columns.UserDefinedType(Value)
'Users can set the default session without having a default connection set.'
def test_set_session_without_existing_connection(self):
mock_session = Mock(row_factory=dict_factory, encoder=Mock(mapping={})) connection.set_session(mock_session)
'Users can\'t get the default session without having a default connection set.'
def test_get_session_fails_without_existing_connection(self):
with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): connection.get_session(connection=None)
'Users can\'t get the default cluster without having a default connection set.'
def test_get_cluster_fails_without_existing_connection(self):
with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): connection.get_cluster(connection=None)
'This is used to add a callback our pending list of callbacks. If reverse is specified we will invoke the callback in the opposite order that we added it'
def add_callback(self, fn, *args, **kwargs):
time_added = time.time() self.pending_callbacks.put((self.priority, (fn, args, kwargs, time_added))) if (not reversed): self.priority += 1 else: self.priority -= 1
'This tests the ordering of our various concurrent generator class ConcurrentExecutorListResults when queries complete in the order they were executed.'
def test_results_ordering_forward(self):
self.insert_and_validate_list_results(False, False)
'This tests the ordering of our various concurrent generator class ConcurrentExecutorListResults when queries complete in the reverse order they were executed.'
def test_results_ordering_reverse(self):
self.insert_and_validate_list_results(True, False)
'This tests the ordering of our various concurrent generator class ConcurrentExecutorListResults when queries complete in the order they were executed, with slow queries mixed in.'
def test_results_ordering_forward_slowdown(self):
self.insert_and_validate_list_results(False, True)
'This tests the ordering of our various concurrent generator class ConcurrentExecutorListResults when queries complete in the reverse order they were executed, with slow queries mixed in.'
def test_results_ordering_reverse_slowdown(self):
self.insert_and_validate_list_results(True, True)
'This tests the ordering of our various concurrent generator class ConcurrentExecutorGenResults when queries complete in the order they were executed.'
def test_results_ordering_forward_generator(self):
self.insert_and_validate_list_generator(False, False)
'This tests the ordering of our various concurrent generator class ConcurrentExecutorGenResults when queries complete in the reverse order they were executed.'
def test_results_ordering_reverse_generator(self):
self.insert_and_validate_list_generator(True, False)
'This tests the ordering of our various concurrent generator class ConcurrentExecutorGenResults when queries complete in the order they were executed, with slow queries mixed in.'
def test_results_ordering_forward_generator_slowdown(self):
self.insert_and_validate_list_generator(False, True)
'This tests the ordering of our various concurrent generator class ConcurrentExecutorGenResults when queries complete in the reverse order they were executed, with slow queries mixed in.'
def test_results_ordering_reverse_generator_slowdown(self):
self.insert_and_validate_list_generator(True, True)
'This utility method will execute submit various statements for execution using the ConcurrentExecutorListResults, then invoke a separate thread to execute the callback associated with the futures registered for those statements. The parameters will toggle various timing, and ordering changes. Finally it will validate that the results were returned in the order they were submitted :param reverse: Execute the callbacks in the opposite order that they were submitted :param slowdown: Cause intermittent queries to perform slowly'
def insert_and_validate_list_results(self, reverse, slowdown):
our_handler = MockResponseResponseFuture(reverse=reverse) mock_session = Mock() statements_and_params = zip(cycle(['INSERT INTO test3rf.test (k, v) VALUES (%s, 0)']), [(i,) for i in range(100)]) mock_session.execute_async.return_value = our_handler t = TimedCallableInvoker(our_handler, slowdown=slowdown) t.start() results = execute_concurrent(mock_session, statements_and_params) while (not our_handler.pending_callbacks.empty()): time.sleep(0.01) t.stop() self.validate_result_ordering(results)
'This utility method will execute submit various statements for execution using the ConcurrentExecutorGenResults, then invoke a separate thread to execute the callback associated with the futures registered for those statements. The parameters will toggle various timing, and ordering changes. Finally it will validate that the results were returned in the order they were submitted :param reverse: Execute the callbacks in the opposite order that they were submitted :param slowdown: Cause intermittent queries to perform slowly'
def insert_and_validate_list_generator(self, reverse, slowdown):
our_handler = MockResponseResponseFuture(reverse=reverse) mock_session = Mock() statements_and_params = zip(cycle(['INSERT INTO test3rf.test (k, v) VALUES (%s, 0)']), [(i,) for i in range(100)]) mock_session.execute_async.return_value = our_handler t = TimedCallableInvoker(our_handler, slowdown=slowdown) t.start() try: results = execute_concurrent(mock_session, statements_and_params, results_generator=True) self.validate_result_ordering(results) finally: t.stop()
'This method will validate that the timestamps returned from the result are in order. This indicates that the results were returned in the order they were submitted for execution :param results:'
def validate_result_ordering(self, results):
last_time_added = 0 for (success, result) in results: self.assertTrue(success) current_time_added = list(result)[0] if ('Windows' in platform.system()): self.assertLessEqual(last_time_added, current_time_added) else: self.assertLess(last_time_added, current_time_added) last_time_added = current_time_added
'Verify that recursion is controlled when raise_on_first_error=False and something is wrong with the query. PYTHON-585'
@mock_session_pools def test_recursion_limited(self):
max_recursion = sys.getrecursionlimit() s = Session(Cluster(), [Host('127.0.0.1', SimpleConvictionPolicy)]) self.assertRaises(TypeError, execute_concurrent_with_args, s, "doesn't matter", ([('param',)] * max_recursion), raise_on_first_error=True) results = execute_concurrent_with_args(s, "doesn't matter", ([('param',)] * max_recursion), raise_on_first_error=False) self.assertEqual(len(results), max_recursion) for r in results: self.assertFalse(r[0]) self.assertIsInstance(r[1], TypeError)
'Hook method for setting up class fixture before running tests in the class.'
@classmethod def setUpClass(cls):
if (not hasattr(cls, 'assertItemsEqual')): cls.assertItemsEqual = cls.assertCountEqual
'Basic code coverage testing that ensures different ReplicationStrategies can be initiated using parameters correctly.'
def test_replication_strategy(self):
rs = ReplicationStrategy() self.assertEqual(rs.create('OldNetworkTopologyStrategy', None), _UnknownStrategy('OldNetworkTopologyStrategy', None)) fake_options_map = {'options': 'map'} uks = rs.create('OldNetworkTopologyStrategy', fake_options_map) self.assertEqual(uks, _UnknownStrategy('OldNetworkTopologyStrategy', fake_options_map)) self.assertEqual(uks.make_token_replica_map({}, []), {}) fake_options_map = {'dc1': '3'} self.assertIsInstance(rs.create('NetworkTopologyStrategy', fake_options_map), NetworkTopologyStrategy) self.assertEqual(rs.create('NetworkTopologyStrategy', fake_options_map).dc_replication_factors, NetworkTopologyStrategy(fake_options_map).dc_replication_factors) fake_options_map = {'options': 'map'} self.assertIsNone(rs.create('SimpleStrategy', fake_options_map)) fake_options_map = {'options': 'map'} self.assertIsInstance(rs.create('LocalStrategy', fake_options_map), LocalStrategy) fake_options_map = {'options': 'map', 'replication_factor': 3} self.assertIsInstance(rs.create('SimpleStrategy', fake_options_map), SimpleStrategy) self.assertEqual(rs.create('SimpleStrategy', fake_options_map).replication_factor, SimpleStrategy(fake_options_map).replication_factor) self.assertEqual(rs.create('xxxxxxxx', fake_options_map), _UnknownStrategy('xxxxxxxx', fake_options_map)) self.assertRaises(NotImplementedError, rs.make_token_replica_map, None, None) self.assertRaises(NotImplementedError, rs.export_for_schema)
'Tests to ensure that when rf exceeds the number of nodes available, that we dont\' needlessly iterate trying to construct tokens for nodes that don\'t exist. @since 3.7 @jira_ticket PYTHON-379 @expected_result timing with 1500 rf should be same/similar to 3rf if we have 3 nodes @test_category metadata'
def test_nts_token_performance(self):
token_to_host_owner = {} ring = [] dc1hostnum = 3 current_token = 0 vnodes_per_host = 500 for i in range(dc1hostnum): host = Host('dc1.{0}'.format(i), SimpleConvictionPolicy) host.set_location_info('dc1', 'rack1') for vnode_num in range(vnodes_per_host): md5_token = MD5Token((current_token + vnode_num)) token_to_host_owner[md5_token] = host ring.append(md5_token) current_token += 1000 nts = NetworkTopologyStrategy({'dc1': 3}) start_time = timeit.default_timer() nts.make_token_replica_map(token_to_host_owner, ring) elapsed_base = (timeit.default_timer() - start_time) nts = NetworkTopologyStrategy({'dc1': 1500}) start_time = timeit.default_timer() nts.make_token_replica_map(token_to_host_owner, ring) elapsed_bad = (timeit.default_timer() - start_time) difference = (elapsed_bad - elapsed_base) self.assertTrue(((difference < 1) and (difference > (-1))))
'Test cassandra.metadata.protect_name output'
def test_protect_name(self):
self.assertEqual(protect_name('tests'), 'tests') self.assertEqual(protect_name("test's"), '"test\'s"') self.assertEqual(protect_name("test's"), '"test\'s"') self.assertEqual(protect_name('tests ?!@#$%^&*()'), '"tests ?!@#$%^&*()"') self.assertEqual(protect_name('1'), '"1"') self.assertEqual(protect_name('1test'), '"1test"')
'Test cassandra.metadata.protect_names output'
def test_protect_names(self):
self.assertEqual(protect_names(['tests']), ['tests']) self.assertEqual(protect_names(['tests', "test's", 'tests ?!@#$%^&*()', '1']), ['tests', '"test\'s"', '"tests ?!@#$%^&*()"', '"1"'])
'Test cassandra.metadata.protect_value output'
def test_protect_value(self):
self.assertEqual(protect_value(True), 'true') self.assertEqual(protect_value(False), 'false') self.assertEqual(protect_value(3.14), '3.14') self.assertEqual(protect_value(3), '3') self.assertEqual(protect_value('test'), "'test'") self.assertEqual(protect_value("test's"), "'test''s'") self.assertEqual(protect_value(None), 'NULL')
'Test cassandra.metadata.is_valid_name output'
def test_is_valid_name(self):
self.assertEqual(is_valid_name(None), False) self.assertEqual(is_valid_name('test'), True) self.assertEqual(is_valid_name('Test'), False) self.assertEqual(is_valid_name('t_____1'), True) self.assertEqual(is_valid_name('test1'), True) self.assertEqual(is_valid_name('1test1'), False) invalid_keywords = (cassandra.metadata.cql_keywords - cassandra.metadata.cql_keywords_unreserved) for keyword in invalid_keywords: self.assertEqual(is_valid_name(keyword), False)
'PYTHON-572'
def test_iterate_all_hosts_and_modify(self):
metadata = Metadata() metadata.add_or_return_host(Host('dc1.1', SimpleConvictionPolicy)) metadata.add_or_return_host(Host('dc1.2', SimpleConvictionPolicy)) self.assertEqual(len(metadata.all_hosts()), 2) for host in metadata.all_hosts(): metadata.remove_host(host) self.assertEqual(len(metadata.all_hosts()), 0)
'Ensure the following methods throw NIE\'s. If not, come back and test them.'
def test_not_implemented(self):
c = self.make_connection() self.assertRaises(NotImplementedError, c.close)
'For each element in an iterable of (system_time, expected_timestamp) pairs, call a :class:`cassandra.timestamps.MonotonicTimestampGenerator` with system_times as the underlying time.time() result, then assert that the result is expected_timestamp. Skips the check if expected_timestamp is None.'
@mock.patch('cassandra.timestamps.time') def _call_and_check_results(self, patched_time_module, system_time_expected_stamp_pairs, timestamp_generator=None):
patched_time_module.time = mock.Mock() (system_times, expected_timestamps) = zip(*system_time_expected_stamp_pairs) patched_time_module.time.side_effect = system_times tsg = (timestamp_generator or timestamps.MonotonicTimestampGenerator()) for expected in expected_timestamps: actual = tsg() if (expected is not None): self.assertEqual(actual, expected) with self.assertRaises(StopIteration): tsg()
'Test that MonotonicTimestampGenerator\'s output increases by 1 when the underlying system time is the same, then returns to normal when the system time increases again. @since 3.8.0 @expected_result Timestamps should increase monotonically over repeated system time. @test_category timing'
def test_timestamps_during_and_after_same_system_time(self):
self._call_and_check_results(system_time_expected_stamp_pairs=((15.0, (15 * 1000000.0)), (15.0, ((15 * 1000000.0) + 1)), (15.0, ((15 * 1000000.0) + 2)), (15.01, (15.01 * 1000000.0))))
'Test that MonotonicTimestampGenerator\'s output increases by 1 when the underlying system time goes backward, then returns to normal when the system time increases again. @since 3.8.0 @expected_result Timestamps should increase monotonically over system time going backwards. @test_category timing'
def test_timestamps_during_and_after_backwards_system_time(self):
self._call_and_check_results(system_time_expected_stamp_pairs=((15.0, (15 * 1000000.0)), (13.0, ((15 * 1000000.0) + 1)), (14.0, ((15 * 1000000.0) + 2)), (13.5, ((15 * 1000000.0) + 3)), (15.01, (15.01 * 1000000.0))))
'Tests there are logs @since 3.8.0 @jira_ticket PYTHON-676 @expected_result logs @test_category timing'
def test_basic_log_content(self):
tsg = timestamps.MonotonicTimestampGenerator(warning_threshold=1e-06, warning_interval=1e-06) tsg._last_warn = 12 tsg._next_timestamp(20, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) tsg._next_timestamp(16, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) self.assertLastCallArgRegex(self.patched_timestamp_log.warn.call_args, 'Clock skew detected:.*\\b16\\b.*\\b4\\b.*\\b20\\b')
'Tests there are no logs when there is a clock skew if logging is disabled @since 3.8.0 @jira_ticket PYTHON-676 @expected_result no logs @test_category timing'
def test_disable_logging(self):
no_warn_tsg = timestamps.MonotonicTimestampGenerator(warn_on_drift=False) no_warn_tsg.last = 100 no_warn_tsg._next_timestamp(99, no_warn_tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0)
'Tests there are no logs if `warning_threshold` is not exceeded @since 3.8.0 @jira_ticket PYTHON-676 @expected_result no logs @test_category timing'
def test_warning_threshold_respected_no_logging(self):
tsg = timestamps.MonotonicTimestampGenerator(warning_threshold=2e-06) (tsg.last, tsg._last_warn) = (100, 97) tsg._next_timestamp(98, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0)
'Tests there are logs if `warning_threshold` is exceeded @since 3.8.0 @jira_ticket PYTHON-676 @expected_result logs @test_category timing'
def test_warning_threshold_respected_logs(self):
tsg = timestamps.MonotonicTimestampGenerator(warning_threshold=1e-06, warning_interval=1e-06) (tsg.last, tsg._last_warn) = (100, 97) tsg._next_timestamp(98, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1)
'Tests there is only one log in the interval `warning_interval` @since 3.8.0 @jira_ticket PYTHON-676 @expected_result one log @test_category timing'
def test_warning_interval_respected_no_logging(self):
tsg = timestamps.MonotonicTimestampGenerator(warning_threshold=1e-06, warning_interval=2e-06) tsg.last = 100 tsg._next_timestamp(70, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) tsg._next_timestamp(71, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1)
'Tests there are logs again if the clock skew happens after`warning_interval` @since 3.8.0 @jira_ticket PYTHON-676 @expected_result logs @test_category timing'
def test_warning_interval_respected_logs(self):
tsg = timestamps.MonotonicTimestampGenerator(warning_interval=1e-06, warning_threshold=1e-06) tsg.last = 100 tsg._next_timestamp(70, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) tsg._next_timestamp(72, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 2)
'Tests when time is "stopped", values are assigned incrementally @since 3.8.0 @jira_ticket PYTHON-676 @expected_result the returned values increase @test_category timing'
def test_should_generate_incrementing_timestamps_for_all_threads(self):
lock = Lock() def request_time(): for _ in range(timestamp_to_generate): timestamp = tsg() with lock: generated_timestamps.append(timestamp) tsg = timestamps.MonotonicTimestampGenerator() fixed_time = 1 num_threads = 5 timestamp_to_generate = 1000 generated_timestamps = [] with mock.patch('time.time', new=mock.Mock(return_value=fixed_time)): threads = [] for _ in range(num_threads): threads.append(Thread(target=request_time)) for t in threads: t.start() for t in threads: t.join() self.assertEqual(len(generated_timestamps), (num_threads * timestamp_to_generate)) for (i, timestamp) in enumerate(sorted(generated_timestamps)): self.assertEqual(int((i + 1000000.0)), timestamp)
'Basic test with all schema versions agreeing'
def test_wait_for_schema_agreement(self):
self.assertTrue(self.control_connection.wait_for_schema_agreement()) self.assertEqual(self.time.clock, 0)
'wait_for_schema_agreement uses preloaded results if given for shared table queries'
def test_wait_for_schema_agreement_uses_preloaded_results_if_given(self):
preloaded_results = self._get_matching_schema_preloaded_results() self.assertTrue(self.control_connection.wait_for_schema_agreement(preloaded_results=preloaded_results)) self.assertEqual(self.time.clock, 0) self.assertEqual(self.connection.wait_for_responses.call_count, 0)
'wait_for_schema_agreement requery if schema does not match using preloaded results'
def test_wait_for_schema_agreement_falls_back_to_querying_if_schemas_dont_match_preloaded_result(self):
preloaded_results = self._get_nonmatching_schema_preloaded_results() self.assertTrue(self.control_connection.wait_for_schema_agreement(preloaded_results=preloaded_results)) self.assertEqual(self.time.clock, 0) self.assertEqual(self.connection.wait_for_responses.call_count, 1)
'Make sure the control connection sleeps and retries'
def test_wait_for_schema_agreement_fails(self):
self.connection.peer_results[1][1][2] = 'b' self.assertFalse(self.control_connection.wait_for_schema_agreement()) self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
'If rpc_address or schema_version isn\'t set, the host should be skipped'
def test_wait_for_schema_agreement_skipping(self):
self.connection.peer_results[1].append(['192.168.1.3', '10.0.0.3', None, 'dc1', 'rack1', ['3', '103', '203']]) self.connection.peer_results[1].append([None, None, 'b', 'dc1', 'rack1', ['4', '104', '204']]) self.connection.peer_results[1][1][3] = 'c' self.cluster.metadata.get_host('192.168.1.1').is_up = False self.assertTrue(self.control_connection.wait_for_schema_agreement()) self.assertEqual(self.time.clock, 0)
'If the rpc_address is 0.0.0.0, the "peer" column should be used instead.'
def test_wait_for_schema_agreement_rpc_lookup(self):
self.connection.peer_results[1].append(['0.0.0.0', PEER_IP, 'b', 'dc1', 'rack1', ['3', '103', '203']]) host = Host('0.0.0.0', SimpleConvictionPolicy) self.cluster.metadata.hosts[PEER_IP] = host host.is_up = False self.assertTrue(self.control_connection.wait_for_schema_agreement()) self.assertEqual(self.time.clock, 0) host.is_up = True self.assertFalse(self.control_connection.wait_for_schema_agreement()) self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
'refresh_nodes_and_tokens uses preloaded results if given for shared table queries'
def test_refresh_nodes_and_tokens_uses_preloaded_results_if_given(self):
preloaded_results = self._get_matching_schema_preloaded_results() self.control_connection._refresh_node_list_and_token_map(self.connection, preloaded_results=preloaded_results) meta = self.cluster.metadata self.assertEqual(meta.partitioner, 'Murmur3Partitioner') self.assertEqual(meta.cluster_name, 'foocluster') self.assertEqual(sorted(meta.all_hosts()), sorted(meta.token_map.keys())) for token_list in meta.token_map.values(): self.assertEqual(3, len(token_list)) for host in meta.all_hosts(): self.assertEqual(host.datacenter, 'dc1') self.assertEqual(host.rack, 'rack1') self.assertEqual(self.connection.wait_for_responses.call_count, 0)
'Test handling of an unknown partitioner.'
def test_refresh_nodes_and_tokens_no_partitioner(self):
self.connection.local_results[1][0][4] = None self.control_connection.refresh_node_list_and_token_map() meta = self.cluster.metadata self.assertEqual(meta.partitioner, None) self.assertEqual(meta.token_map, {})
'Submits a wide array of events make sure that each is scheduled to occur in the order they were received'
def test_event_delay_timing(self):
prior_delay = 0 for _ in range(100): for change_type in ('CREATED', 'DROPPED', 'UPDATED'): event = {'change_type': change_type, 'keyspace': '1', 'table': 'table1'} self.time.sleep(0.001) self.cluster.scheduler.reset_mock() self.control_connection._handle_schema_change(event) self.cluster.scheduler.mock_calls current_delay = self.cluster.scheduler.mock_calls[0][1][0] self.assertLess(prior_delay, current_delay) prior_delay = current_delay
'Converts :const:`None` to the string \'NULL\'.'
def cql_encode_none(self, val):
return 'NULL'
'Converts :class:`unicode` objects to UTF-8 encoded strings with quote escaping.'
def cql_encode_unicode(self, val):
return cql_quote(val.encode('utf-8'))
'Escapes quotes in :class:`str` objects.'
def cql_encode_str(self, val):
return cql_quote(val)
'Default encoder for all objects that do not have a specific encoder function registered. This function simply calls :meth:`str()` on the object.'
def cql_encode_object(self, val):
return str(val)
'Encode floats using repr to preserve precision'
def cql_encode_float(self, val):
if math.isinf(val): return ('Infinity' if (val > 0) else '-Infinity') elif math.isnan(val): return 'NaN' else: return repr(val)
'Converts a :class:`datetime.datetime` object to a (string) integer timestamp with millisecond precision.'
def cql_encode_datetime(self, val):
timestamp = calendar.timegm(val.utctimetuple()) return str(long(((timestamp * 1000.0) + (getattr(val, 'microsecond', 0) / 1000.0))))
'Converts a :class:`datetime.date` object to a string with format ``YYYY-MM-DD``.'
def cql_encode_date(self, val):
return ("'%s'" % val.strftime('%Y-%m-%d'))
'Converts a :class:`cassandra.util.Time` object to a string with format ``HH:MM:SS.mmmuuunnn``.'
def cql_encode_time(self, val):
return ("'%s'" % val)
'Encodes a :class:`cassandra.util.Date` object as an integer'
def cql_encode_date_ext(self, val):
return str((val.days_from_epoch + (2 ** 31)))
'Converts a sequence to a string of the form ``(item1, item2, ...)``. This is suitable for ``IN`` value lists.'
def cql_encode_sequence(self, val):
return ('(%s)' % ', '.join((self.mapping.get(type(v), self.cql_encode_object)(v) for v in val)))
'Converts a dict into a string of the form ``{key1: val1, key2: val2, ...}``. This is suitable for ``map`` type columns.'
def cql_encode_map_collection(self, val):
return ('{%s}' % ', '.join((('%s: %s' % (self.mapping.get(type(k), self.cql_encode_object)(k), self.mapping.get(type(v), self.cql_encode_object)(v))) for (k, v) in six.iteritems(val))))
'Converts a sequence to a string of the form ``[item1, item2, ...]``. This is suitable for ``list`` type columns.'
def cql_encode_list_collection(self, val):
return ('[%s]' % ', '.join((self.mapping.get(type(v), self.cql_encode_object)(v) for v in val)))
'Converts a sequence to a string of the form ``{item1, item2, ...}``. This is suitable for ``set`` type columns.'
def cql_encode_set_collection(self, val):
return ('{%s}' % ', '.join((self.mapping.get(type(v), self.cql_encode_object)(v) for v in val)))
'Converts any type into a CQL string, defaulting to ``cql_encode_object`` if :attr:`~Encoder.mapping` does not contain an entry for the type.'
def cql_encode_all_types(self, val):
return self.mapping.get(type(val), self.cql_encode_object)(val)
'cls._timeout_watcher runs in this loop forever. It is usually waiting for the next timeout on the cls._new_timer Event. When new timers are added, that event is set so that the watcher can wake up and possibly set an earlier timeout.'
@classmethod def service_timeouts(cls):
timer_manager = cls._timers while True: next_end = timer_manager.service_timeouts() sleep_time = (max((next_end - time.time()), 0) if next_end else 10000) cls._new_timer.wait(sleep_time) cls._new_timer.clear()
'Callback function that is called when data has been received on the connection. Reaches back to the Connection object and queues the data for processing.'
def dataReceived(self, data):
self.transport.connector.factory.conn._iobuf.write(data) self.transport.connector.factory.conn.handle_read()
'Callback function that is called when a connection has succeeded. Reaches back to the Connection object and confirms that the connection is ready.'
def connectionMade(self):
self.transport.connector.factory.conn.client_connection_made()
'Twisted function that defines which kind of protocol to use in the ClientFactory.'
def buildProtocol(self, addr):
return TwistedConnectionProtocol()
'Overridden twisted callback which is called when the connection attempt fails.'
def clientConnectionFailed(self, connector, reason):
log.debug('Connect failed: %s', reason) self.conn.defunct(reason.value)
'Overridden twisted callback which is called when the connection goes away (cleanly or otherwise). It should be safe to call defunct() here instead of just close, because we can assume that if the connection was closed cleanly, there are no requests to error out. If this assumption turns out to be false, we can call close() instead of defunct() when "reason" is an appropriate type.'
def clientConnectionLost(self, connector, reason):
log.debug('Connect lost: %s', reason) self.conn.defunct(reason.value)
'Initialization method. Note that we can\'t call reactor methods directly here because it\'s not thread-safe, so we schedule the reactor/connection stuff to be run from the event loop thread when it gets the chance.'
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs) self.is_closed = True self.connector = None reactor.callFromThread(self.add_connection) self._loop.maybe_start()
'Convenience function to connect and store the resulting connector.'
def add_connection(self):
self.connector = reactor.connectTCP(host=self.host, port=self.port, factory=TwistedConnectionClientFactory(self), timeout=self.connect_timeout)
'Called by twisted protocol when a connection attempt has succeeded.'
def client_connection_made(self):
with self.lock: self.is_closed = False self._send_options_message()
'Disconnect and error-out all requests.'
def close(self):
with self.lock: if self.is_closed: return self.is_closed = True log.debug('Closing connection (%s) to %s', id(self), self.host) self.connector.disconnect() log.debug('Closed socket to %s', self.host) if (not self.is_defunct): self.error_all_requests(ConnectionShutdown(('Connection to %s was closed' % self.host))) self.connected_event.set()
'Process the incoming data buffer.'
def handle_read(self):
self.process_io_buffer()
'This function is called when outgoing data should be queued for sending. Note that we can\'t call transport.write() directly because it is not thread-safe, so we schedule it to run from within the event loop when it gets the chance.'
def push(self, data):
reactor.callFromThread(self.connector.transport.write, data)
'Return the lower supported protocol version. Beta versions are omitted.'
@classmethod def get_lower_supported(cls, previous_version):
try: version = next((v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reverse=True) if ((v not in ProtocolVersion.BETA_VERSIONS) and (v < previous_version)))) except StopIteration: version = 0 return version
'function signature string in the form \'name([type0[,type1[...]]])\' can be used to uniquely identify overloaded function names within a keyspace'
@property def signature(self):
return self.format_signature(self.name, self.argument_types)
'Initializer value can be: - integer_type: absolute nanoseconds in the day - datetime.time: built-in time - string_type: a string time of the form "HH:MM:SS[.mmmuuunnn]"'
def __init__(self, value):
if isinstance(value, six.integer_types): self._from_timestamp(value) elif isinstance(value, datetime.time): self._from_time(value) elif isinstance(value, six.string_types): self._from_timestring(value) else: raise TypeError('Time arguments must be a whole number, datetime.time, or string')
'The hour component of this time (0-23)'
@property def hour(self):
return (self.nanosecond_time // Time.HOUR)
'The minute component of this time (0-59)'
@property def minute(self):
minutes = (self.nanosecond_time // Time.MINUTE) return (minutes % 60)
'The second component of this time (0-59)'
@property def second(self):
seconds = (self.nanosecond_time // Time.SECOND) return (seconds % 60)
'The fractional seconds component of the time, in nanoseconds'
@property def nanosecond(self):
return (self.nanosecond_time % Time.SECOND)