src_fm_fc_ms_ff
stringlengths
43
86.8k
target
stringlengths
20
276k
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nex...
@Test public void testFetchRequestWhenRecordTooLarge() { try { client.setNodeApiVersions(NodeApiVersions.create(Collections.singletonList( new ApiVersionsResponse.ApiVersion(ApiKeys.FETCH.id, (short) 2, (short) 2)))); makeFetchRequestWithIncompleteRecord(); try { fetcher.fetchedRecords(); fail("RecordTooLargeException ...
Fetcher implements SubscriptionState.Listener, Closeable { public int sendFetches() { Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests(); for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) { final FetchRequest.Builder request = fetchEntry.getValue(); final Node fetc...
@Test public void testFetchOnPausedPartition() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); subscriptions.pause(tp1); assertFalse(fetcher.sendFetches() > 0); assertTrue(client.requests().isEmpty()); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscr...
@Test public void testUpdateFetchPositionsNoneCommittedNoResetStrategy() { Set<TopicPartition> tps = new HashSet<>(Arrays.asList(tp1, tp2)); subscriptionsNoAutoReset.assignFromUser(tps); try { fetcherNoAutoReset.updateFetchPositions(tps); fail("Should have thrown NoOffsetForPartitionException"); } catch (NoOffsetForPar...
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, ...
@Test public void testGetAllTopics() { client.prepareResponse(newMetadataResponse(topicName, Errors.NONE)); Map<String, List<PartitionInfo>> allTopics = fetcher.getAllTopicMetadata(5000L); assertEquals(cluster.topics().size(), allTopics.size()); } @Test public void testGetAllTopicsDisconnect() { client.prepareResponse(...
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { R...
@Test(expected = InvalidTopicException.class) public void testGetTopicMetadataInvalidTopic() { client.prepareResponse(newMetadataResponse(topicName, Errors.INVALID_TOPIC_EXCEPTION)); fetcher.getTopicMetadata( new MetadataRequest.Builder(Collections.singletonList(topicName), true), 5000L); } @Test public void testGetTop...
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitio...
@Test public void testFetcherMetrics() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax); MetricName partitionLagMetric = metrics.metricName(tp1 + ".records-lag", metricGroup); Map<MetricName, KafkaMetric> allMetri...
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, Offset...
@Test public void testGetOffsetsForTimesTimeout() { try { fetcher.getOffsetsByTimes(Collections.singletonMap(new TopicPartition(topicName, 2), 1000L), 100L); fail("Should throw timeout exception."); } catch (TimeoutException e) { } } @Test public void testGetOffsetsForTimes() { assertTrue(fetcher.getOffsetsByTimes(new ...
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalS...
@Test(expected = IllegalArgumentException.class) public void testRuntimeExceptionInComplete() { RequestFuture<Exception> future = new RequestFuture<>(); future.complete(new RuntimeException()); } @Test(expected = IllegalStateException.class) public void invokeCompleteAfterAlreadyComplete() { RequestFuture<Void> future ...
RequestFuture implements ConsumerNetworkClient.PollCondition { public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complet...
@Test(expected = IllegalStateException.class) public void invokeRaiseAfterAlreadyFailed() { RequestFuture<Void> future = new RequestFuture<>(); future.raise(new RuntimeException()); future.raise(new RuntimeException()); }
ConsumerProtocol { public static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = SUBSCRIPTION_V0.read(buffer); ByteBuffer userD...
@Test public void deserializeNewSubscriptionVersion() { short version = 100; Schema subscriptionSchemaV100 = new Schema( new Field(ConsumerProtocol.TOPICS_KEY_NAME, new ArrayOf(Type.STRING)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING)); Struct subscriptionV100 = new Struct...
ConsumerProtocol { public static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = ASSIGNMENT_V0.read(buffer); ByteBuffer userData = ...
@Test public void deserializeNewAssignmentVersion() { short version = 100; Schema assignmentSchemaV100 = new Schema( new Field(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(ConsumerProtocol.TOPIC_ASSIGNMENT_V0)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING)); Struc...
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscr...
@Test public void testNormalHeartbeat() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.is...
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.sub...
@Test public void testNormalJoinGroupFollower() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId...
ConsumerCoordinator extends AbstractCoordinator { public void close(long timeoutMs) { client.disableWakeups(); long now = time.milliseconds(); long endTimeMs = now + timeoutMs; try { maybeAutoCommitOffsetsSync(timeoutMs); now = time.milliseconds(); if (pendingAsyncCommits.get() > 0 && endTimeMs > now) { ensureCoordinat...
@Test public void testLeaveGroupOnClose() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "lea...
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coor...
@Test public void testCommitOffsetSyncNotCoordinator() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NOT_COORDINATOR))); client.prepareResponse(groupCoordinatorResponse(node, E...
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entr...
@Test public void testRefreshOffset() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L)); coordinator.refres...
ConsumerCoordinator extends AbstractCoordinator { @Override public List<ProtocolMetadata> metadata() { this.joinedSubscription = subscriptions.subscription(); List<ProtocolMetadata> metadataList = new ArrayList<>(); for (PartitionAssignor assignor : assignors) { Subscription subscription = assignor.subscription(joinedS...
@Test public void testProtocolMetadataOrder() { RoundRobinAssignor roundRobin = new RoundRobinAssignor(); RangeAssignor range = new RangeAssignor(); try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(roundRobin, range), ConsumerConfig...
ConsumerNetworkClient implements Closeable { public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) { long now = time.milliseconds(); RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler(); ClientRequest clientRequest = client.newClientRequest(no...
@Test public void send() { client.prepareResponse(heartbeatResponse(Errors.NONE)); RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat()); assertEquals(1, consumerClient.pendingRequestCount()); assertEquals(1, consumerClient.pendingRequestCount(node)); assertFalse(future.isDone()); consumerClient...
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, ...
@Test public void doNotBlockIfPollConditionIsSatisfied() { NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class); ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, 100, 1000); EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(0L), EasyMock.anyLong())).an...
ConsumerNetworkClient implements Closeable { public void wakeup() { log.trace("Received user wakeup"); this.wakeup.set(true); this.client.wakeup(); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, ...
@Test public void wakeup() { RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat()); consumerClient.wakeup(); try { consumerClient.poll(0); fail(); } catch (WakeupException e) { } client.respond(heartbeatResponse(Errors.NONE)); consumerClient.poll(future); assertTrue(future.isDone()); }
ConsumerNetworkClient implements Closeable { public void awaitMetadataUpdate() { awaitMetadataUpdate(Long.MAX_VALUE); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, ...
@Test public void testAwaitForMetadataUpdateWithTimeout() { assertFalse(consumerClient.awaitMetadataUpdate(10L)); }
Heartbeat { public boolean shouldHeartbeat(long now) { return timeToNextHeartbeat(now) == 0; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHea...
@Test public void testShouldHeartbeat() { heartbeat.sentHeartbeat(time.milliseconds()); time.sleep((long) ((float) interval * 1.1)); assertTrue(heartbeat.shouldHeartbeat(time.milliseconds())); }
Heartbeat { public long timeToNextHeartbeat(long now) { long timeSinceLastHeartbeat = now - Math.max(lastHeartbeatSend, lastSessionReset); final long delayToNextHeartbeat; if (heartbeatFailed) delayToNextHeartbeat = retryBackoffMs; else delayToNextHeartbeat = heartbeatInterval; if (timeSinceLastHeartbeat > delayToNextH...
@Test public void testTimeToNextHeartbeat() { heartbeat.sentHeartbeat(0); assertEquals(100, heartbeat.timeToNextHeartbeat(0)); assertEquals(0, heartbeat.timeToNextHeartbeat(100)); assertEquals(0, heartbeat.timeToNextHeartbeat(200)); }
Heartbeat { public boolean sessionTimeoutExpired(long now) { return now - Math.max(lastSessionReset, lastHeartbeatReceive) > sessionTimeout; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long ...
@Test public void testSessionTimeoutExpired() { heartbeat.sentHeartbeat(time.milliseconds()); time.sleep(305); assertTrue(heartbeat.sessionTimeoutExpired(time.milliseconds())); }
AbstractCoordinator implements Closeable { public synchronized void ensureCoordinatorReady() { ensureCoordinatorReady(0, Long.MAX_VALUE); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, ...
@Test public void testCoordinatorDiscoveryBackoff() { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.blackout(coordinatorNode, 50L); long initialTime = mockTime.milliseconds(); coordinator.ensureCoordinatorRead...
AbstractCoordinator implements Closeable { protected synchronized RequestFuture<Void> lookupCoordinator() { if (findCoordinatorFuture == null) { Node node = this.client.leastLoadedNode(); if (node == null) { log.debug("No broker available to send GroupCoordinator request for group {}", groupId); return RequestFuture.no...
@Test public void testLookupCoordinator() throws Exception { mockClient.setNode(null); RequestFuture<Void> noBrokersAvailableFuture = coordinator.lookupCoordinator(); assertTrue("Failed future expected", noBrokersAvailableFuture.failed()); mockClient.setNode(node); RequestFuture<Void> future = coordinator.lookupCoordin...
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, ...
@Test public void testWakeupAfterJoinGroupSent() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isJoinGro...
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (Serializati...
@Test public void testJsonSchemaMetadataTranslation() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, ...
SubscriptionState { public void position(TopicPartition tp, long offset) { assignedState(tp).position(offset); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collect...
@Test(expected = IllegalStateException.class) public void cantChangePositionForNonAssignedPartition() { state.position(tp0, 1); }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } SubscriptionState(O...
@Test(expected = IllegalStateException.class) public void cantSubscribeTopicAndPattern() { state.subscribe(singleton(topic), rebalanceListener); state.subscribe(Pattern.compile(".*"), rebalanceListener); } @Test(expected = IllegalStateException.class) public void cantSubscribePatternAndTopic() { state.subscribe(Pattern...
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = n...
@Test public void testOneConsumerNoTopic() { String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList()))); assertEqual...
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> v...
@Test public void testOsDefaultSocketBufferSizes() throws Exception { Map<String, Object> config = new HashMap<>(); config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); config.put(ConsumerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, S...
KafkaConsumer implements Consumer<K, V> { public Set<String> subscription() { acquire(); try { return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, ...
@Test public void testSubscription() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(); consumer.subscribe(singletonList(topic)); assertEquals(singleton(topic), consumer.subscription()); assertTrue(consumer.assignment().isEmpty()); consumer.subscribe(Collections.<String>emptyList()); assertTrue(consumer.subscrip...
KafkaConsumer implements Consumer<K, V> { @Override public void pause(Collection<TopicPartition> partitions) { acquire(); try { for (TopicPartition partition: partitions) { log.debug("Pausing partition {}", partition); subscriptions.pause(partition); } } finally { release(); } } KafkaConsumer(Map<String, Object> config...
@Test public void testPause() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(); consumer.assign(singletonList(tp0)); assertEquals(singleton(tp0), consumer.assignment()); assertTrue(consumer.paused().isEmpty()); consumer.pause(singleton(tp0)); assertEquals(singleton(tp0), consumer.paused()); consumer.resume(sing...
ConsumerRecord { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } ConsumerRecord(String topic, int partition, long offset, ...
@Test @SuppressWarnings("deprecation") public void testNullChecksumInConstructor() { String key = "key"; String value = "value"; long timestamp = 242341324L; ConsumerRecord<String, String> record = new ConsumerRecord<>("topic", 0, 23L, timestamp, TimestampType.CREATE_TIME, null, key.length(), value.length(), key, value...
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.p...
@Test public void testOneConsumerNoTopic() { String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList()))); assertEqual...
Metadata { public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation) { this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners()); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long ref...
@Test public void testMetadata() throws Exception { long time = 0; metadata.update(Cluster.empty(), Collections.<String>emptySet(), time); assertFalse("No update needed.", metadata.timeToNextUpdate(time) == 0); metadata.requestUpdate(); assertFalse("Still no updated needed due to backoff", metadata.timeToNextUpdate(tim...
Metadata { public synchronized long timeToNextUpdate(long nowMs) { long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; return Math.max(timeToExpire, timeToAllowUpdate); } Metadata(lon...
@Test public void testTimeToNextUpdate() { checkTimeToNextUpdate(100, 1000); checkTimeToNextUpdate(1000, 100); checkTimeToNextUpdate(0, 0); checkTimeToNextUpdate(0, 100); checkTimeToNextUpdate(100, 0); }
Metadata { public synchronized void failedUpdate(long now) { this.lastRefreshMs = now; } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, Cluster...
@Test public void testFailedUpdate() { long time = 100; metadata.update(Cluster.empty(), Collections.<String>emptySet(), time); assertEquals(100, metadata.timeToNextUpdate(1000)); metadata.failedUpdate(1100); assertEquals(100, metadata.timeToNextUpdate(1100)); assertEquals(100, metadata.lastSuccessfulUpdate()); metadat...
Metadata { public synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now) { Objects.requireNonNull(cluster, "cluster should not be null"); this.needUpdate = false; this.lastRefreshMs = now; this.lastSuccessfulRefreshMs = now; this.version += 1; if (topicExpiryEnabled) { for (Iterator<Map.Entr...
@Test public void testClusterListenerGetsNotifiedOfUpdate() { long time = 0; MockClusterResourceListener mockClusterListener = new MockClusterResourceListener(); ClusterResourceListeners listeners = new ClusterResourceListeners(); listeners.maybeAdd(mockClusterListener); metadata = new Metadata(refreshBackoffMs, metada...
RecordMetadata { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, ...
@Test @SuppressWarnings("deprecation") public void testNullChecksum() { long timestamp = 2340234L; int keySize = 3; int valueSize = 5; RecordMetadata metadata = new RecordMetadata(new TopicPartition("foo", 0), 15L, 3L, timestamp, null, keySize, valueSize); assertEquals(DefaultRecord.computePartialChecksum(timestamp, ke...
MockProducer implements Producer<K, V> { @Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } this.transactionInitialized = true; } MockProducer(final Cluster cluster, ...
@Test public void shouldThrowOnInitTransactionIfProducerAlreadyInitializedForTransactions() { producer.initTransactions(); try { producer.initTransactions(); fail("Should have thrown as producer is already initialized"); } catch (IllegalStateException e) { } }
MockProducer implements Producer<K, V> { @Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } MockProducer(final C...
@Test(expected = IllegalStateException.class) public void shouldThrowOnBeginTransactionIfTransactionsNotInitialized() { producer.beginTransaction(); }
MockProducer implements Producer<K, V> { @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); Objects.requireNonNull(consumerGroupId);...
@Test(expected = IllegalStateException.class) public void shouldThrowOnSendOffsetsToTransactionIfTransactionsNotInitialized() { producer.sendOffsetsToTransaction(null, null); }
MockProducer implements Producer<K, V> { @Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerG...
@Test(expected = IllegalStateException.class) public void shouldThrowOnCommitIfTransactionsNotInitialized() { producer.commitTransaction(); }
MockProducer implements Producer<K, V> { @Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = fals...
@Test(expected = IllegalStateException.class) public void shouldThrowOnAbortIfTransactionsNotInitialized() { producer.abortTransaction(); }
MockProducer implements Producer<K, V> { public void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, ...
@Test(expected = IllegalStateException.class) public void shouldThrowFenceProducerIfTransactionsNotInitialized() { producer.fenceProducer(); }
MockProducer implements Producer<K, V> { @Override public void close() { close(0, null); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, fin...
@Test public void shouldThrowOnCloseIfProducerIsClosed() { producer.close(); try { producer.close(); fail("Should have thrown as producer is already closed"); } catch (IllegalStateException e) { } }
MockProducer implements Producer<K, V> { public boolean flushed() { return this.completions.isEmpty(); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, ...
@Test public void shouldBeFlushedIfNoBufferedRecords() { assertTrue(producer.flushed()); }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateExceptio...
@Test(expected = IllegalStateException.class) public void testFailIfNotReadyForSendNoProducerId() { transactionManager.failIfNotReadyForSend(); } @Test public void testFailIfNotReadyForSendIdempotentProducer() { TransactionManager idempotentTransactionManager = new TransactionManager(); idempotentTransactionManager.fai...
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartiti...
@Test public void testMaybeAddPartitionToTransaction() { long pid = 13131L; short epoch = 1; TopicPartition partition = new TopicPartition("foo", 0); doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(partition); assertTrue(transactionManager.hasPart...
TransactionManager { synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequenceNumber = sequenceNumbers.get(topicPartition); if (currentSequenceNumber == null) throw new IllegalStateException("Attempt to increment sequence number for a partition with no current seq...
@Test(expected = IllegalStateException.class) public void testInvalidSequenceIncrement() { TransactionManager transactionManager = new TransactionManager(); transactionManager.incrementSequenceNumber(tp0, 3333); }
TransactionManager { public synchronized void beginTransaction() { ensureTransactional(); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTrans...
@Test public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, n...
BufferPool { public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); this.lock.lock(); try { if (size...
@Test public void testDelayedAllocation() throws Exception { BufferPool pool = new BufferPool(5 * 1024, 1024, metrics, time, metricGroup); ByteBuffer buffer = pool.allocate(1024, maxBlockTimeMs); CountDownLatch doDealloc = asyncDeallocate(pool, buffer); CountDownLatch allocation = asyncAllocate(pool, 5 * 1024); assertE...
BufferPool { public long availableMemory() { lock.lock(); try { return this.availableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deal...
@Test public void testStressfulSituation() throws Exception { int numThreads = 10; final int iterations = 50000; final int poolableSize = 1024; final long totalMemory = numThreads / 2 * poolableSize; final BufferPool pool = new BufferPool(totalMemory, poolableSize, metrics, time, metricGroup); List<StressTestThread> th...
RecordAccumulator { public void abortIncompleteBatches() { do { abortBatches(); } while (appendsInProgress()); abortBatches(); this.batches.clear(); } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long...
@Test public void testAbortIncompleteBatches() throws Exception { long lingerMs = Long.MAX_VALUE; int numRecords = 100; final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0); final RecordAccumulator accum = new RecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionT...
RecordAccumulator { public List<ProducerBatch> expiredBatches(int requestTimeout, long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) { Deque<ProducerBatch> dq = entry.getValue(); TopicPartition tp = entry.getKey(); if...
@Test public void testExpiredBatches() throws InterruptedException { long retryBackoffMs = 100L; long lingerMs = 3000L; int requestTimeout = 60; int batchSize = 1025; RecordAccumulator accum = new RecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, lingerMs, ret...
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; tr...
@Test(expected = UnsupportedVersionException.class) public void testIdempotenceWithOldMagic() throws InterruptedException { ApiVersions apiVersions = new ApiVersions(); int batchSize = 1025; apiVersions.update("foobar", NodeApiVersions.create(Arrays.asList(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) ...
RecordAccumulator { public int splitAndReenqueue(ProducerBatch bigBatch) { CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<Producer...
@Test public void testSplitAndReenqueue() throws ExecutionException, InterruptedException { long now = time.milliseconds(); RecordAccumulator accum = new RecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10, 100L, metrics, time, new ApiVersions(), null); ByteBuffer buffer = ByteBuffer.allocate(4096); MemoryRecor...
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRec...
@Test public void testChecksumNullForMagicV2() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); assertNotNull(future); assertNull(future.checksumOrNull()); } @Tes...
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && re...
@Test public void testLargeLingerOldNowExpire() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); assertFalse(batch.maybeExpire(10240, 100L, now - 2L, Long.MAX_VALUE, false)); } @Test public void testLargeFullOldNowExpire() { ProducerBatch batch = new ProducerBatch(ne...
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(top...
@Test public void testKeyPartitionIsStable() { int partition = partitioner.partition("test", null, keyBytes, null, null, cluster); assertEquals("Same key should yield same partition", partition, partitioner.partition("test", null, keyBytes, null, null, cluster)); } @Test public void testRoundRobinWithUnavailablePartiti...
Sender implements Runnable { public void run() { log.debug("Starting Kafka producer I/O thread."); while (running) { try { run(time.milliseconds()); } catch (Exception e) { log.error("Uncaught error in kafka producer I/O thread: ", e); } } log.debug("Beginning shutdown of Kafka producer I/O thread, sending remaining re...
@Test public void testSimple() throws Exception { long offset = 0; Future<RecordMetadata> future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); sender.run(time.milliseconds()); assertEquals("We should have a single produce requ...
Sender implements Runnable { public static Sensor throttleTimeSensor(Metrics metrics) { String metricGrpName = SenderMetrics.METRIC_GROUP_NAME; Sensor produceThrottleTimeSensor = metrics.sensor("produce-throttle-time"); produceThrottleTimeSensor.add(metrics.metricName("produce-throttle-time-avg", metricGrpName, "The av...
@Test public void testQuotaMetrics() throws Exception { MockSelector selector = new MockSelector(time); Sensor throttleTimeSensor = Sender.throttleTimeSensor(metrics); Cluster cluster = TestUtils.singletonCluster("test", 1); Node node = cluster.nodes().get(0); NetworkClient client = new NetworkClient(selector, metadata...
KafkaProducer implements Producer<K, V> { @Override public void close() { close(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } KafkaProducer(Map<String, Object> configs); KafkaProducer(Map<String, Object> configs, Serializer<K> keySerializer, Serializer<V> valueSerializer); KafkaProducer(Properties properties); KafkaProd...
@Test public void testConstructorWithSerializers() { Properties producerProps = new Properties(); producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()).close(); } @Test public void testSerializerClose() thro...
KafkaProducer implements Producer<K, V> { @Override public Future<RecordMetadata> send(ProducerRecord<K, V> record) { return send(record, null); } KafkaProducer(Map<String, Object> configs); KafkaProducer(Map<String, Object> configs, Serializer<K> keySerializer, Serializer<V> valueSerializer); KafkaProducer(Propertie...
@PrepareOnlyThisForTest(Metadata.class) @Test public void testMetadataFetchOnStaleMetadata() throws Exception { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), n...
KafkaProducer implements Producer<K, V> { @Override public List<PartitionInfo> partitionsFor(String topic) { try { return waitOnMetadata(topic, null, maxBlockTimeMs).cluster.partitionsForTopic(topic); } catch (InterruptedException e) { throw new InterruptException(e); } } KafkaProducer(Map<String, Object> configs); Ka...
@Test public void testTopicRefreshInMetadata() throws Exception { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG, "600000"); KafkaProducer<String, String> producer = new KafkaProducer<>(props, new St...
KafkaAdminClient extends AdminClient { static <K, V> List<V> getOrCreateListValue(Map<K, List<V>> map, K key) { List<V> list = map.get(key); if (list != null) return list; list = new LinkedList<>(); map.put(key, list); return list; } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metada...
@Test public void testGetOrCreateListValue() { Map<String, List<String>> map = new HashMap<>(); List<String> fooList = KafkaAdminClient.getOrCreateListValue(map, "foo"); assertNotNull(fooList); fooList.add("a"); fooList.add("b"); List<String> fooList2 = KafkaAdminClient.getOrCreateListValue(map, "foo"); assertEquals(fo...
KafkaAdminClient extends AdminClient { static int calcTimeoutMsRemainingAsInt(long now, long deadlineMs) { long deltaMs = deadlineMs - now; if (deltaMs > Integer.MAX_VALUE) deltaMs = Integer.MAX_VALUE; else if (deltaMs < Integer.MIN_VALUE) deltaMs = Integer.MIN_VALUE; return (int) deltaMs; } private KafkaAdminClient(A...
@Test public void testCalcTimeoutMsRemainingAsInt() { assertEquals(0, KafkaAdminClient.calcTimeoutMsRemainingAsInt(1000, 1000)); assertEquals(100, KafkaAdminClient.calcTimeoutMsRemainingAsInt(1000, 1100)); assertEquals(Integer.MAX_VALUE, KafkaAdminClient.calcTimeoutMsRemainingAsInt(0, Long.MAX_VALUE)); assertEquals(Int...
KafkaAdminClient extends AdminClient { static String prettyPrintException(Throwable throwable) { if (throwable == null) return "Null exception."; if (throwable.getMessage() != null) { return throwable.getClass().getSimpleName() + ": " + throwable.getMessage(); } return throwable.getClass().getSimpleName(); } private K...
@Test public void testPrettyPrintException() { assertEquals("Null exception.", KafkaAdminClient.prettyPrintException(null)); assertEquals("TimeoutException", KafkaAdminClient.prettyPrintException(new TimeoutException())); assertEquals("TimeoutException: The foobar timed out.", KafkaAdminClient.prettyPrintException(new ...
KafkaAdminClient extends AdminClient { static String generateClientId(AdminClientConfig config) { String clientId = config.getString(AdminClientConfig.CLIENT_ID_CONFIG); if (!clientId.isEmpty()) return clientId; return "adminclient-" + ADMIN_CLIENT_ID_SEQUENCE.getAndIncrement(); } private KafkaAdminClient(AdminClientC...
@Test public void testGenerateClientId() { Set<String> ids = new HashSet<>(); for (int i = 0; i < 10; i++) { String id = KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "")); assertTrue("Got duplicate id " + id, !ids.contains(id)); ids.add(id); } assertEquals("myCustomId", KafkaAdminCli...
KafkaAdminClient extends AdminClient { @Override public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options) { final Map<String, KafkaFutureImpl<Void>> topicFutures = new HashMap<>(newTopics.size()); final Map<String, CreateTopicsRequest.TopicDetails> topicsMap = new ...
@Test public void testTimeoutWithoutMetadata() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().setNode(new Node(0, "localhost", 8121)); env.kafkaClient().prepareRes...
KafkaAdminClient extends AdminClient { @Override public DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options) { final long now = time.milliseconds(); final KafkaFutureImpl<Collection<AclBinding>> future = new KafkaFutureImpl<>(); runnable.call(new Call("describeAcls", calcDeadlineM...
@Test public void testDescribeAcls() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); env.kafkaClient().setNode(env.cluster().controller()); env...
KafkaAdminClient extends AdminClient { @Override public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) { final long now = time.milliseconds(); final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>(); final List<AclCreation> aclCreations = new ArrayList<>(); for (AclBi...
@Test public void testCreateAcls() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); env.kafkaClient().setNode(env.cluster().controller()); env.k...
KafkaAdminClient extends AdminClient { @Override public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) { final long now = time.milliseconds(); final Map<AclBindingFilter, KafkaFutureImpl<FilterResults>> futures = new HashMap<>(); final List<AclBindingFilter> filterList = ne...
@Test public void testDeleteAcls() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); env.kafkaClient().setNode(env.cluster().controller()); env.k...
JmxReporter implements MetricsReporter { public void close() { synchronized (LOCK) { for (KafkaMbean mbean : this.mbeans.values()) unregister(mbean); } } JmxReporter(); JmxReporter(String prefix); @Override void configure(Map<String, ?> configs); @Override void init(List<KafkaMetric> metrics); @Override void metricCha...
@Test public void testJmxRegistration() throws Exception { Metrics metrics = new Metrics(); try { metrics.addReporter(new JmxReporter()); Sensor sensor = metrics.sensor("kafka.requests"); sensor.add(metrics.metricName("pack.bean1.avg", "grp1"), new Avg()); sensor.add(metrics.metricName("pack.bean2.total", "grp2"), new ...
Metrics implements Closeable { public MetricName metricName(String name, String group, String description, Map<String, String> tags) { Map<String, String> combinedTag = new LinkedHashMap<>(config.tags()); combinedTag.putAll(tags); return new MetricName(name, group, description, combinedTag); } Metrics(); Metrics(Time ...
@Test public void testMetricName() { MetricName n1 = metrics.metricName("name", "group", "description", "key1", "value1", "key2", "value2"); Map<String, String> tags = new HashMap<String, String>(); tags.put("key1", "value1"); tags.put("key2", "value2"); MetricName n2 = metrics.metricName("name", "group", "description"...
Metrics implements Closeable { public Sensor sensor(String name) { return this.sensor(name, Sensor.RecordingLevel.INFO); } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time tim...
@Test(expected = IllegalArgumentException.class) public void testBadSensorHierarchy() { Sensor p = metrics.sensor("parent"); Sensor c1 = metrics.sensor("child1", p); Sensor c2 = metrics.sensor("child2", p); metrics.sensor("gc", c1, c2); }
Metrics implements Closeable { public void removeSensor(String name) { Sensor sensor = sensors.get(name); if (sensor != null) { List<Sensor> childSensors = null; synchronized (sensor) { synchronized (this) { if (sensors.remove(name, sensor)) { for (KafkaMetric metric : sensor.metrics()) removeMetric(metric.metricName()...
@Test public void testRemoveSensor() { int size = metrics.metrics().size(); Sensor parent1 = metrics.sensor("test.parent1"); parent1.add(metrics.metricName("test.parent1.count", "grp1"), new Count()); Sensor parent2 = metrics.sensor("test.parent2"); parent2.add(metrics.metricName("test.parent2.count", "grp1"), new Coun...
Metrics implements Closeable { public synchronized KafkaMetric removeMetric(MetricName metricName) { KafkaMetric metric = this.metrics.remove(metricName); if (metric != null) { for (MetricsReporter reporter : reporters) reporter.metricRemoval(metric); } return metric; } Metrics(); Metrics(Time time); Metrics(MetricCo...
@Test public void testRemoveMetric() { int size = metrics.metrics().size(); metrics.addMetric(metrics.metricName("test1", "grp1"), new Count()); metrics.addMetric(metrics.metricName("test2", "grp1"), new Count()); assertNotNull(metrics.removeMetric(metrics.metricName("test1", "grp1"))); assertNull(metrics.metrics().get...
Metrics implements Closeable { public MetricName metricInstance(MetricNameTemplate template, String... keyValue) { return metricInstance(template, getTags(keyValue)); } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultCon...
@Test public void testMetricInstances() { MetricName n1 = metrics.metricInstance(SampleMetrics.METRIC1, "key1", "value1", "key2", "value2"); Map<String, String> tags = new HashMap<String, String>(); tags.put("key1", "value1"); tags.put("key2", "value2"); MetricName n2 = metrics.metricInstance(SampleMetrics.METRIC2, tag...
Sensor { public boolean shouldRecord() { return this.recordingLevel.shouldRecord(config.recordLevel().id); } Sensor(Metrics registry, String name, Sensor[] parents, MetricConfig config, Time time, long inactiveSensorExpirationTimeSeconds, RecordingLevel recordingLevel); String name(); void record(); boolean ...
@Test public void testRecordLevelEnum() { Sensor.RecordingLevel configLevel = Sensor.RecordingLevel.INFO; assertTrue(Sensor.RecordingLevel.INFO.shouldRecord(configLevel.id)); assertFalse(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id)); configLevel = Sensor.RecordingLevel.DEBUG; assertTrue(Sensor.RecordingLeve...
Histogram { public Histogram(BinScheme binScheme) { this.hist = new float[binScheme.bins()]; this.count = 0.0f; this.binScheme = binScheme; } Histogram(BinScheme binScheme); void record(double value); double value(double quantile); float[] counts(); void clear(); @Override String toString(); }
@Test public void testHistogram() { BinScheme scheme = new ConstantBinScheme(12, -5, 5); Histogram hist = new Histogram(scheme); for (int i = -5; i < 5; i++) hist.record(i); for (int i = 0; i < 10; i++) assertEquals(scheme.fromBin(i + 1), hist.value(i / 10.0 + EPS), EPS); } @Test public void testHistogram() { BinScheme...
Selector implements Selectable, AutoCloseable { @Override public void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize) throws IOException { if (this.channels.containsKey(id)) throw new IllegalStateException("There is already a connection for id " + id); SocketChannel socketChanne...
@Test(expected = IOException.class) public void testNoRouteToHost() throws Exception { selector.connect("0", new InetSocketAddress("some.invalid.hostname.foo.bar.local", server.port), BUFFER_SIZE, BUFFER_SIZE); } @Test public void testLargeMessageSequence() throws Exception { int bufferSize = 512 * 1024; String node = ...
Selector implements Selectable, AutoCloseable { @Override public void mute(String id) { KafkaChannel channel = channelOrFail(id, true); mute(channel); } Selector(int maxReceiveSize, long connectionMaxIdleMs, Metrics metrics, Time time, Stri...
@Test public void testMute() throws Exception { blockingConnect("0"); blockingConnect("1"); selector.send(createSend("0", "hello")); selector.send(createSend("1", "hi")); selector.mute("1"); while (selector.completedReceives().isEmpty()) selector.poll(5); assertEquals("We should have only one response", 1, selector.com...
SslTransportLayer implements TransportLayer { protected void startHandshake() throws IOException { this.netReadBuffer = ByteBuffer.allocate(netReadBufferSize()); this.netWriteBuffer = ByteBuffer.allocate(netWriteBufferSize()); this.appReadBuffer = ByteBuffer.allocate(applicationBufferSize()); netWriteBuffer.position(0)...
@Test public void testClientEndpointNotValidated() throws Exception { String node = "0"; clientCertStores = new CertStores(false, "non-existent.com"); serverCertStores = new CertStores(true, "localhost"); sslServerConfigs = serverCertStores.getTrustingConfig(clientCertStores); sslClientConfigs = clientCertStores.getTru...
SslTransportLayer implements TransportLayer { @Override public void close() throws IOException { if (closing) return; closing = true; sslEngine.closeOutbound(); try { if (isConnected()) { if (!flush(netWriteBuffer)) { throw new IOException("Remaining data in the network buffer, can't send SSL close message."); } netWri...
@Test public void testListenerConfigOverride() throws Exception { String node = "0"; ListenerName clientListenerName = new ListenerName("client"); sslServerConfigs.put(SslConfigs.SSL_CLIENT_AUTH_CONFIG, "required"); sslServerConfigs.put(clientListenerName.configPrefix() + SslConfigs.SSL_CLIENT_AUTH_CONFIG, "none"); ser...
AclBinding { @Override public boolean equals(Object o) { if (!(o instanceof AclBinding)) return false; AclBinding other = (AclBinding) o; return resource.equals(other.resource) && entry.equals(other.entry); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final Access...
@Test public void testMatching() throws Exception { assertTrue(ACL1.equals(ACL1)); final AclBinding acl1Copy = new AclBinding( new Resource(ResourceType.TOPIC, "mytopic"), new AccessControlEntry("User:ANONYMOUS", "", AclOperation.ALL, AclPermissionType.ALLOW)); assertTrue(ACL1.equals(acl1Copy)); assertTrue(acl1Copy.equ...
AclBinding { public boolean isUnknown() { return resource.isUnknown() || entry.isUnknown(); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final AccessControlEntry entry(); AclBindingFilter toFilter(); @Override String toString(); @Override boolean equals(Object o);...
@Test public void testUnknowns() throws Exception { assertFalse(ACL1.isUnknown()); assertFalse(ACL2.isUnknown()); assertFalse(ACL3.isUnknown()); assertFalse(ANY_ANONYMOUS.isUnknown()); assertFalse(ANY_DENY.isUnknown()); assertFalse(ANY_MYTOPIC.isUnknown()); assertTrue(UNKNOWN_ACL.isUnknown()); }
AclBinding { public AclBindingFilter toFilter() { return new AclBindingFilter(resource.toFilter(), entry.toFilter()); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final AccessControlEntry entry(); AclBindingFilter toFilter(); @Override String toString(); @Override...
@Test public void testMatchesAtMostOne() throws Exception { assertEquals(null, ACL1.toFilter().findIndefiniteField()); assertEquals(null, ACL2.toFilter().findIndefiniteField()); assertEquals(null, ACL3.toFilter().findIndefiniteField()); assertFalse(ANY_ANONYMOUS.matchesAtMostOne()); assertFalse(ANY_DENY.matchesAtMostOn...
JaasContext { public static JaasContext load(JaasContext.Type contextType, ListenerName listenerName, Map<String, ?> configs) { String listenerContextName; String globalContextName; switch (contextType) { case CLIENT: if (listenerName != null) throw new IllegalArgumentException("listenerName should be null for CLIENT")...
@Test(expected = IllegalArgumentException.class) public void testLoadForServerWithWrongListenerName() throws IOException { writeConfiguration("Server", "test.LoginModule required;"); JaasContext.load(JaasContext.Type.SERVER, new ListenerName("plaintext"), Collections.<String, Object>emptyMap()); } @Test(expected = Ille...
SslFactory implements Configurable { @Override public void configure(Map<String, ?> configs) throws KafkaException { this.protocol = (String) configs.get(SslConfigs.SSL_PROTOCOL_CONFIG); this.provider = (String) configs.get(SslConfigs.SSL_PROVIDER_CONFIG); @SuppressWarnings("unchecked") List<String> cipherSuitesList = ...
@Test public void testSslFactoryWithoutPasswordConfiguration() throws Exception { File trustStoreFile = File.createTempFile("truststore", ".jks"); Map<String, Object> serverSslConfig = TestSslUtils.createSslConfig(false, true, Mode.SERVER, trustStoreFile, "server"); serverSslConfig.remove(SslConfigs.SSL_TRUSTSTORE_PASS...
KerberosName { public static KerberosName parse(String principalName) { Matcher match = NAME_PARSER.matcher(principalName); if (!match.matches()) { if (principalName.contains("@")) { throw new IllegalArgumentException("Malformed Kerberos name: " + principalName); } else { return new KerberosName(principalName, null, nu...
@Test public void testParse() throws IOException { List<String> rules = new ArrayList<>(Arrays.asList( "RULE:[1:$1](App\\..*)s/App\\.(.*)/$1/g", "RULE:[2:$1](App\\..*)s/App\\.(.*)/$1/g", "DEFAULT" )); KerberosShortNamer shortNamer = KerberosShortNamer.fromUnparsedRules("REALM.COM", rules); KerberosName name = KerberosN...
ScramCredentialUtils { public static String credentialToString(ScramCredential credential) { return String.format("%s=%s,%s=%s,%s=%s,%s=%d", SALT, DatatypeConverter.printBase64Binary(credential.salt()), STORED_KEY, DatatypeConverter.printBase64Binary(credential.storedKey()), SERVER_KEY, DatatypeConverter.printBase64Bin...
@Test public void generateCredential() { ScramCredential credential1 = formatter.generateCredential("password", 4096); ScramCredential credential2 = formatter.generateCredential("password", 4096); assertNotEquals(ScramCredentialUtils.credentialToString(credential1), ScramCredentialUtils.credentialToString(credential2))...
ScramCredentialUtils { public static ScramCredential credentialFromString(String str) { Properties props = toProps(str); if (props.size() != 4 || !props.containsKey(SALT) || !props.containsKey(STORED_KEY) || !props.containsKey(SERVER_KEY) || !props.containsKey(ITERATIONS)) { throw new IllegalArgumentException("Credenti...
@Test(expected = IllegalArgumentException.class) public void invalidCredential() { ScramCredentialUtils.credentialFromString("abc"); }
ScramCredentialUtils { public static void createCache(CredentialCache cache, Collection<String> enabledMechanisms) { for (String mechanism : ScramMechanism.mechanismNames()) { if (enabledMechanisms.contains(mechanism)) cache.createCache(mechanism, ScramCredential.class); } } static String credentialToString(ScramCrede...
@Test public void scramCredentialCache() throws Exception { CredentialCache cache = new CredentialCache(); ScramCredentialUtils.createCache(cache, Arrays.asList("SCRAM-SHA-512", "PLAIN")); assertNotNull("Cache not created for enabled mechanism", cache.cache(ScramMechanism.SCRAM_SHA_512.mechanismName(), ScramCredential....
KafkaPrincipal implements Principal { @Override public int hashCode() { int result = principalType.hashCode(); result = 31 * result + name.hashCode(); return result; } KafkaPrincipal(String principalType, String name); static KafkaPrincipal fromString(String str); @Override String toString(); @Override boolean equals(O...
@Test public void testEqualsAndHashCode() { String name = "KafkaUser"; KafkaPrincipal principal1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, name); KafkaPrincipal principal2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, name); Assert.assertEquals(principal1.hashCode(), principal2.hashCode()); Assert.assertEquals(pr...
ConfigDef { public ConfigDef define(ConfigKey key) { if (configKeys.containsKey(key.name)) { throw new ConfigException("Configuration " + key.name + " is defined twice."); } if (key.group != null && !groups.contains(key.group)) { groups.add(key.group); } configKeys.put(key.name, key); return this; } ConfigDef(); Confi...
@Test(expected = ConfigException.class) public void testInvalidDefault() { new ConfigDef().define("a", Type.INT, "hello", Importance.HIGH, "docs"); } @Test(expected = ConfigException.class) public void testDefinedTwice() { new ConfigDef().define("a", Type.STRING, Importance.HIGH, "docs").define("a", Type.INT, Importanc...
ConfigDef { public Map<String, Object> parse(Map<?, ?> props) { List<String> undefinedConfigKeys = undefinedDependentConfigs(); if (!undefinedConfigKeys.isEmpty()) { String joined = Utils.join(undefinedConfigKeys, ","); throw new ConfigException("Some configurations in are referred in the dependents, but not defined: "...
@Test public void testSslPasswords() { ConfigDef def = new ConfigDef(); SslConfigs.addClientSslSupport(def); Properties props = new Properties(); props.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "key_password"); props.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "keystore_password"); props.put(SslConfigs.SSL_TRUSTSTORE_PA...
ConfigDef { Map<String, Object> parseForValidate(Map<String, String> props, Map<String, ConfigValue> configValues) { Map<String, Object> parsed = new HashMap<>(); Set<String> configsWithNoParent = getConfigsWithNoParent(); for (String name: configsWithNoParent) { parseForValidate(name, props, parsed, configValues); } r...
@Test public void testParseForValidate() { Map<String, Object> expectedParsed = new HashMap<>(); expectedParsed.put("a", 1); expectedParsed.put("b", null); expectedParsed.put("c", null); expectedParsed.put("d", 10); Map<String, ConfigValue> expected = new HashMap<>(); String errorMessageB = "Missing required configurat...