src_fm_fc_ms_ff
stringlengths
43
86.8k
target
stringlengths
20
276k
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testFetchRequestWhenRecordTooLarge() { try { client.setNodeApiVersions(NodeApiVersions.create(Collections.singletonList( new ApiVersionsResponse.ApiVersion(ApiKeys.FETCH.id, (short) 2, (short) 2)))); makeFetchRequestWithIncompleteRecord(); try { fetcher.fetchedRecords(); fail("RecordTooLargeException should have been raised"); } catch (RecordTooLargeException e) { assertTrue(e.getMessage().startsWith("There are some messages at [Partition=Offset]: ")); assertEquals(0, subscriptions.position(tp1).longValue()); } } finally { client.setNodeApiVersions(NodeApiVersions.create()); } } @Test public void testFetchRequestInternalError() { makeFetchRequestWithIncompleteRecord(); try { fetcher.fetchedRecords(); fail("RecordTooLargeException should have been raised"); } catch (KafkaException e) { assertTrue(e.getMessage().startsWith("Failed to make progress reading messages")); assertEquals(0, subscriptions.position(tp1).longValue()); } }
Fetcher implements SubscriptionState.Listener, Closeable { public int sendFetches() { Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests(); for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) { final FetchRequest.Builder request = fetchEntry.getValue(); final Node fetchTarget = fetchEntry.getKey(); log.debug("Sending {} fetch for partitions {} to broker {}", isolationLevel, request.fetchData().keySet(), fetchTarget); client.send(fetchTarget, request) .addListener(new RequestFutureListener<ClientResponse>() { @Override public void onSuccess(ClientResponse resp) { FetchResponse response = (FetchResponse) resp.responseBody(); if (!matchesRequestedPartitions(request, response)) { log.warn("Ignoring fetch response containing partitions {} since it does not match " + "the requested partitions {}", response.responseData().keySet(), request.fetchData().keySet()); return; } Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet()); FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions); for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) { TopicPartition partition = entry.getKey(); long fetchOffset = request.fetchData().get(partition).fetchOffset; FetchResponse.PartitionData fetchData = entry.getValue(); log.debug("Fetch {} at offset {} for partition {} returned fetch data {}", isolationLevel, fetchOffset, partition, fetchData); completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion())); } sensors.fetchLatency.record(resp.requestLatencyMs()); } @Override public void onFailure(RuntimeException e) { log.debug("Fetch request {} to {} failed", request.fetchData(), fetchTarget, e); } }); } return fetchRequestMap.size(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testFetchOnPausedPartition() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); subscriptions.pause(tp1); assertFalse(fetcher.sendFetches() > 0); assertTrue(client.requests().isEmpty()); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionsNoneCommittedNoResetStrategy() { Set<TopicPartition> tps = new HashSet<>(Arrays.asList(tp1, tp2)); subscriptionsNoAutoReset.assignFromUser(tps); try { fetcherNoAutoReset.updateFetchPositions(tps); fail("Should have thrown NoOffsetForPartitionException"); } catch (NoOffsetForPartitionException e) { Set<TopicPartition> partitions = e.partitions(); assertEquals(tps, partitions); } } @Test public void testUpdateFetchPositionToCommitted() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.committed(tp1, new OffsetAndMetadata(5)); fetcher.updateFetchPositions(singleton(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); } @Test public void testUpdateFetchPositionResetToDefaultOffset() { subscriptions.assignFromUser(singleton(tp1)); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.EARLIEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); } @Test public void testUpdateFetchPositionResetToLatestOffset() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); } @Test public void testListOffsetsSendsIsolationLevel() { for (final IsolationLevel isolationLevel : IsolationLevel.values()) { Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, isolationLevel); subscriptions.assignFromUser(singleton(tp1)); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.LATEST); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { ListOffsetRequest request = (ListOffsetRequest) body; return request.isolationLevel() == isolationLevel; } }, listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); } } @Test public void testUpdateFetchPositionResetToEarliestOffset() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.EARLIEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.EARLIEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); } @Test public void testUpdateFetchPositionDisconnect() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L), true); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); } @Test public void testUpdateFetchPositionOfPausedPartitionsRequiringOffsetReset() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.committed(tp1, new OffsetAndMetadata(0)); subscriptions.pause(tp1); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 10L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertFalse(subscriptions.isFetchable(tp1)); assertTrue(subscriptions.hasValidPosition(tp1)); assertEquals(10, subscriptions.position(tp1).longValue()); } @Test public void testUpdateFetchPositionOfPausedPartitionsWithoutACommittedOffset() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.pause(tp1); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.EARLIEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 0L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertFalse(subscriptions.isFetchable(tp1)); assertTrue(subscriptions.hasValidPosition(tp1)); assertEquals(0, subscriptions.position(tp1).longValue()); } @Test public void testUpdateFetchPositionOfPausedPartitionsWithoutAValidPosition() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.committed(tp1, new OffsetAndMetadata(0)); subscriptions.pause(tp1); subscriptions.seek(tp1, 10); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertFalse(subscriptions.isFetchable(tp1)); assertTrue(subscriptions.hasValidPosition(tp1)); assertEquals(10, subscriptions.position(tp1).longValue()); } @Test public void testUpdateFetchPositionOfPausedPartitionsWithAValidPosition() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.committed(tp1, new OffsetAndMetadata(0)); subscriptions.seek(tp1, 10); subscriptions.pause(tp1); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertFalse(subscriptions.isFetchable(tp1)); assertTrue(subscriptions.hasValidPosition(tp1)); assertEquals(10, subscriptions.position(tp1).longValue()); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testGetAllTopics() { client.prepareResponse(newMetadataResponse(topicName, Errors.NONE)); Map<String, List<PartitionInfo>> allTopics = fetcher.getAllTopicMetadata(5000L); assertEquals(cluster.topics().size(), allTopics.size()); } @Test public void testGetAllTopicsDisconnect() { client.prepareResponse(null, true); client.prepareResponse(newMetadataResponse(topicName, Errors.NONE)); Map<String, List<PartitionInfo>> allTopics = fetcher.getAllTopicMetadata(5000L); assertEquals(cluster.topics().size(), allTopics.size()); } @Test(expected = TimeoutException.class) public void testGetAllTopicsTimeout() { fetcher.getAllTopicMetadata(50L); } @Test public void testGetAllTopicsUnauthorized() { client.prepareResponse(newMetadataResponse(topicName, Errors.TOPIC_AUTHORIZATION_FAILED)); try { fetcher.getAllTopicMetadata(10L); fail(); } catch (TopicAuthorizationException e) { assertEquals(singleton(topicName), e.unauthorizedTopics()); } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test(expected = InvalidTopicException.class) public void testGetTopicMetadataInvalidTopic() { client.prepareResponse(newMetadataResponse(topicName, Errors.INVALID_TOPIC_EXCEPTION)); fetcher.getTopicMetadata( new MetadataRequest.Builder(Collections.singletonList(topicName), true), 5000L); } @Test public void testGetTopicMetadataUnknownTopic() { client.prepareResponse(newMetadataResponse(topicName, Errors.UNKNOWN_TOPIC_OR_PARTITION)); Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata( new MetadataRequest.Builder(Collections.singletonList(topicName), true), 5000L); assertNull(topicMetadata.get(topicName)); } @Test public void testGetTopicMetadataLeaderNotAvailable() { client.prepareResponse(newMetadataResponse(topicName, Errors.LEADER_NOT_AVAILABLE)); client.prepareResponse(newMetadataResponse(topicName, Errors.NONE)); Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata( new MetadataRequest.Builder(Collections.singletonList(topicName), true), 5000L); assertTrue(topicMetadata.containsKey(topicName)); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testFetcherMetrics() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax); MetricName partitionLagMetric = metrics.metricName(tp1 + ".records-lag", metricGroup); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric); assertEquals(Double.NEGATIVE_INFINITY, recordsFetchLagMax.value(), EPSILON); fetchRecords(tp1, MemoryRecords.EMPTY, Errors.NONE, 100L, 0); assertEquals(100, recordsFetchLagMax.value(), EPSILON); KafkaMetric partitionLag = allMetrics.get(partitionLagMetric); assertEquals(100, partitionLag.value(), EPSILON); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); fetchRecords(tp1, builder.build(), Errors.NONE, 200L, 0); assertEquals(197, recordsFetchLagMax.value(), EPSILON); assertEquals(197, partitionLag.value(), EPSILON); subscriptions.unsubscribe(); assertFalse(allMetrics.containsKey(partitionLagMetric)); } @Test public void testReadCommittedLagMetric() { Metrics metrics = new Metrics(); fetcher = createFetcher(subscriptions, metrics, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax); MetricName partitionLagMetric = metrics.metricName(tp1 + ".records-lag", metricGroup); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric); assertEquals(Double.NEGATIVE_INFINITY, recordsFetchLagMax.value(), EPSILON); fetchRecords(tp1, MemoryRecords.EMPTY, Errors.NONE, 100L, 50L, 0); assertEquals(50, recordsFetchLagMax.value(), EPSILON); KafkaMetric partitionLag = allMetrics.get(partitionLagMetric); assertEquals(50, partitionLag.value(), EPSILON); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); fetchRecords(tp1, builder.build(), Errors.NONE, 200L, 150L, 0); assertEquals(147, recordsFetchLagMax.value(), EPSILON); assertEquals(147, partitionLag.value(), EPSILON); subscriptions.unsubscribe(); assertFalse(allMetrics.containsKey(partitionLagMetric)); } @Test public void testFetchResponseMetrics() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); int expectedBytes = 0; for (Record record : records.records()) expectedBytes += record.sizeInBytes(); fetchRecords(tp1, records, Errors.NONE, 100L, 0); assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON); assertEquals(3, recordsCountAverage.value(), EPSILON); } @Test public void testFetchResponseMetricsPartialResponse() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 1); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); int expectedBytes = 0; for (Record record : records.records()) { if (record.offset() >= 1) expectedBytes += record.sizeInBytes(); } fetchRecords(tp1, records, Errors.NONE, 100L, 0); assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON); assertEquals(2, recordsCountAverage.value(), EPSILON); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testGetOffsetsForTimesTimeout() { try { fetcher.getOffsetsByTimes(Collections.singletonMap(new TopicPartition(topicName, 2), 1000L), 100L); fail("Should throw timeout exception."); } catch (TimeoutException e) { } } @Test public void testGetOffsetsForTimes() { assertTrue(fetcher.getOffsetsByTimes(new HashMap<TopicPartition, Long>(), 100L).isEmpty()); testGetOffsetsForTimesWithError(Errors.NONE, Errors.NONE, -1L, 100L, null, 100L); testGetOffsetsForTimesWithError(Errors.NONE, Errors.NONE, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.NOT_LEADER_FOR_PARTITION, Errors.INVALID_REQUEST, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.NONE, Errors.NOT_LEADER_FOR_PARTITION, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.NOT_LEADER_FOR_PARTITION, Errors.NONE, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.NONE, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, Errors.NONE, 10L, 100L, null, 100L); testGetOffsetsForTimesWithError(Errors.BROKER_NOT_AVAILABLE, Errors.NONE, 10L, 100L, 10L, 100L); } @Test(expected = TimeoutException.class) public void testBatchedListOffsetsMetadataErrors() { Map<TopicPartition, ListOffsetResponse.PartitionData> partitionData = new HashMap<>(); partitionData.put(tp1, new ListOffsetResponse.PartitionData(Errors.NOT_LEADER_FOR_PARTITION, ListOffsetResponse.UNKNOWN_TIMESTAMP, ListOffsetResponse.UNKNOWN_OFFSET)); partitionData.put(tp2, new ListOffsetResponse.PartitionData(Errors.UNKNOWN_TOPIC_OR_PARTITION, ListOffsetResponse.UNKNOWN_TIMESTAMP, ListOffsetResponse.UNKNOWN_OFFSET)); client.prepareResponse(new ListOffsetResponse(0, partitionData)); Map<TopicPartition, Long> offsetsToSearch = new HashMap<>(); offsetsToSearch.put(tp1, ListOffsetRequest.EARLIEST_TIMESTAMP); offsetsToSearch.put(tp2, ListOffsetRequest.EARLIEST_TIMESTAMP); fetcher.getOffsetsByTimes(offsetsToSearch, 0); }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } boolean isDone(); boolean awaitDone(long timeout, TimeUnit unit); @SuppressWarnings("unchecked") T value(); boolean succeeded(); boolean failed(); boolean isRetriable(); RuntimeException exception(); void complete(T value); void raise(RuntimeException e); void raise(Errors error); void addListener(RequestFutureListener<T> listener); RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter); void chain(final RequestFuture<T> future); static RequestFuture<T> failure(RuntimeException e); static RequestFuture<Void> voidSuccess(); static RequestFuture<T> coordinatorNotAvailable(); static RequestFuture<T> leaderNotAvailable(); static RequestFuture<T> noBrokersAvailable(); static RequestFuture<T> staleMetadata(); @Override boolean shouldBlock(); }
@Test(expected = IllegalArgumentException.class) public void testRuntimeExceptionInComplete() { RequestFuture<Exception> future = new RequestFuture<>(); future.complete(new RuntimeException()); } @Test(expected = IllegalStateException.class) public void invokeCompleteAfterAlreadyComplete() { RequestFuture<Void> future = new RequestFuture<>(); future.complete(null); future.complete(null); }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireFailure(); } finally { completedLatch.countDown(); } } boolean isDone(); boolean awaitDone(long timeout, TimeUnit unit); @SuppressWarnings("unchecked") T value(); boolean succeeded(); boolean failed(); boolean isRetriable(); RuntimeException exception(); void complete(T value); void raise(RuntimeException e); void raise(Errors error); void addListener(RequestFutureListener<T> listener); RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter); void chain(final RequestFuture<T> future); static RequestFuture<T> failure(RuntimeException e); static RequestFuture<Void> voidSuccess(); static RequestFuture<T> coordinatorNotAvailable(); static RequestFuture<T> leaderNotAvailable(); static RequestFuture<T> noBrokersAvailable(); static RequestFuture<T> staleMetadata(); @Override boolean shouldBlock(); }
@Test(expected = IllegalStateException.class) public void invokeRaiseAfterAlreadyFailed() { RequestFuture<Void> future = new RequestFuture<>(); future.raise(new RuntimeException()); future.raise(new RuntimeException()); }
ConsumerProtocol { public static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = SUBSCRIPTION_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<String> topics = new ArrayList<>(); for (Object topicObj : struct.getArray(TOPICS_KEY_NAME)) topics.add((String) topicObj); return new PartitionAssignor.Subscription(topics, userData); } static ByteBuffer serializeSubscription(PartitionAssignor.Subscription subscription); static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer); static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer); static ByteBuffer serializeAssignment(PartitionAssignor.Assignment assignment); static final String PROTOCOL_TYPE; static final String VERSION_KEY_NAME; static final String TOPICS_KEY_NAME; static final String TOPIC_KEY_NAME; static final String PARTITIONS_KEY_NAME; static final String TOPIC_PARTITIONS_KEY_NAME; static final String USER_DATA_KEY_NAME; static final short CONSUMER_PROTOCOL_V0; static final Schema CONSUMER_PROTOCOL_HEADER_SCHEMA; static final Schema SUBSCRIPTION_V0; static final Schema TOPIC_ASSIGNMENT_V0; static final Schema ASSIGNMENT_V0; }
@Test public void deserializeNewSubscriptionVersion() { short version = 100; Schema subscriptionSchemaV100 = new Schema( new Field(ConsumerProtocol.TOPICS_KEY_NAME, new ArrayOf(Type.STRING)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING)); Struct subscriptionV100 = new Struct(subscriptionSchemaV100); subscriptionV100.set(ConsumerProtocol.TOPICS_KEY_NAME, new Object[]{"topic"}); subscriptionV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0])); subscriptionV100.set("foo", "bar"); Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA); headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version); ByteBuffer buffer = ByteBuffer.allocate(subscriptionV100.sizeOf() + headerV100.sizeOf()); headerV100.writeTo(buffer); subscriptionV100.writeTo(buffer); buffer.flip(); Subscription subscription = ConsumerProtocol.deserializeSubscription(buffer); assertEquals(Arrays.asList("topic"), subscription.topics()); }
ConsumerProtocol { public static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = ASSIGNMENT_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<TopicPartition> partitions = new ArrayList<>(); for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) { Struct assignment = (Struct) structObj; String topic = assignment.getString(TOPIC_KEY_NAME); for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) { Integer partition = (Integer) partitionObj; partitions.add(new TopicPartition(topic, partition)); } } return new PartitionAssignor.Assignment(partitions, userData); } static ByteBuffer serializeSubscription(PartitionAssignor.Subscription subscription); static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer); static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer); static ByteBuffer serializeAssignment(PartitionAssignor.Assignment assignment); static final String PROTOCOL_TYPE; static final String VERSION_KEY_NAME; static final String TOPICS_KEY_NAME; static final String TOPIC_KEY_NAME; static final String PARTITIONS_KEY_NAME; static final String TOPIC_PARTITIONS_KEY_NAME; static final String USER_DATA_KEY_NAME; static final short CONSUMER_PROTOCOL_V0; static final Schema CONSUMER_PROTOCOL_HEADER_SCHEMA; static final Schema SUBSCRIPTION_V0; static final Schema TOPIC_ASSIGNMENT_V0; static final Schema ASSIGNMENT_V0; }
@Test public void deserializeNewAssignmentVersion() { short version = 100; Schema assignmentSchemaV100 = new Schema( new Field(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(ConsumerProtocol.TOPIC_ASSIGNMENT_V0)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING)); Struct assignmentV100 = new Struct(assignmentSchemaV100); assignmentV100.set(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new Object[]{new Struct(ConsumerProtocol.TOPIC_ASSIGNMENT_V0) .set(ConsumerProtocol.TOPIC_KEY_NAME, "foo") .set(ConsumerProtocol.PARTITIONS_KEY_NAME, new Object[]{1})}); assignmentV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0])); assignmentV100.set("foo", "bar"); Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA); headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version); ByteBuffer buffer = ByteBuffer.allocate(assignmentV100.sizeOf() + headerV100.sizeOf()); headerV100.writeTo(buffer); assignmentV100.writeTo(buffer); buffer.flip(); PartitionAssignor.Assignment assignment = ConsumerProtocol.deserializeAssignment(buffer); assertEquals(toSet(Arrays.asList(new TopicPartition("foo", 1))), toSet(assignment.partitions())); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testNormalHeartbeat() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.NONE)); consumerClient.poll(0); assertTrue(future.isDone()); assertTrue(future.succeeded()); } @Test(expected = GroupAuthorizationException.class) public void testGroupReadUnauthorized() { subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupLeaderResponse(0, "memberId", Collections.<String, List<String>>emptyMap(), Errors.GROUP_AUTHORIZATION_FAILED)); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); } @Test public void testCoordinatorNotAvailable() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.COORDINATOR_NOT_AVAILABLE)); time.sleep(sessionTimeoutMs); consumerClient.poll(0); assertTrue(future.isDone()); assertTrue(future.failed()); assertEquals(Errors.COORDINATOR_NOT_AVAILABLE.exception(), future.exception()); assertTrue(coordinator.coordinatorUnknown()); } @Test public void testNotCoordinator() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.NOT_COORDINATOR)); time.sleep(sessionTimeoutMs); consumerClient.poll(0); assertTrue(future.isDone()); assertTrue(future.failed()); assertEquals(Errors.NOT_COORDINATOR.exception(), future.exception()); assertTrue(coordinator.coordinatorUnknown()); } @Test public void testCoordinatorDisconnect() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.NONE), true); time.sleep(sessionTimeoutMs); consumerClient.poll(0); assertTrue(future.isDone()); assertTrue(future.failed()); assertTrue(future.exception() instanceof DisconnectException); assertTrue(coordinator.coordinatorUnknown()); } @Test(expected = ApiException.class) public void testJoinGroupInvalidGroupId() { final String consumerId = "leader"; subscriptions.subscribe(singleton(topic1), rebalanceListener); metadata.setTopics(singletonList(topic1)); metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupLeaderResponse(0, consumerId, Collections.<String, List<String>>emptyMap(), Errors.INVALID_GROUP_ID)); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); } @Test public void testAutoCommitDynamicAssignment() { final String consumerId = "consumer"; ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true); subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); subscriptions.seek(t1p, 100); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); time.sleep(autoCommitIntervalMs); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); assertEquals(100L, subscriptions.committed(t1p).offset()); } @Test public void testAutoCommitDynamicAssignmentRebalance() { final String consumerId = "consumer"; ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true); subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(autoCommitIntervalMs); consumerClient.poll(0); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); subscriptions.seek(t1p, 100); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); time.sleep(autoCommitIntervalMs); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); assertEquals(100L, subscriptions.committed(t1p).offset()); } @Test public void testAutoCommitManualAssignment() { ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true); subscriptions.assignFromUser(singleton(t1p)); subscriptions.seek(t1p, 100); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); time.sleep(autoCommitIntervalMs); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); assertEquals(100L, subscriptions.committed(t1p).offset()); } @Test public void testAutoCommitManualAssignmentCoordinatorUnknown() { ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true); subscriptions.assignFromUser(singleton(t1p)); subscriptions.seek(t1p, 100); consumerClient.poll(0); time.sleep(autoCommitIntervalMs); consumerClient.poll(0); assertNull(subscriptions.committed(t1p)); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(retryBackoffMs); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); assertEquals(100L, subscriptions.committed(t1p).offset()); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testNormalJoinGroupFollower() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { SyncGroupRequest sync = (SyncGroupRequest) body; return sync.memberId().equals(consumerId) && sync.generationId() == 1 && sync.groupAssignment().isEmpty(); } }, syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); assertEquals(singleton(topic1), subscriptions.groupSubscription()); assertEquals(1, rebalanceListener.revokedCount); assertEquals(Collections.emptySet(), rebalanceListener.revoked); assertEquals(1, rebalanceListener.assignedCount); assertEquals(singleton(t1p), rebalanceListener.assigned); } @Test public void testPatternJoinGroupFollower() { final String consumerId = "consumer"; subscriptions.subscribe(Pattern.compile("test.*"), rebalanceListener); metadata.setTopics(singletonList(topic1)); metadata.update(TestUtils.singletonCluster(topic1, 1), Collections.<String>emptySet(), time.milliseconds()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { SyncGroupRequest sync = (SyncGroupRequest) body; return sync.memberId().equals(consumerId) && sync.generationId() == 1 && sync.groupAssignment().isEmpty(); } }, syncGroupResponse(Arrays.asList(t1p, t2p), Errors.NONE)); client.prepareMetadataUpdate(cluster, Collections.<String>emptySet()); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(2, subscriptions.assignedPartitions().size()); assertEquals(2, subscriptions.subscription().size()); assertEquals(1, rebalanceListener.revokedCount); assertEquals(1, rebalanceListener.assignedCount); assertEquals(2, rebalanceListener.assigned.size()); } @Test public void testUnknownMemberIdOnSyncGroup() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(Collections.<TopicPartition>emptyList(), Errors.UNKNOWN_MEMBER_ID)); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { JoinGroupRequest joinRequest = (JoinGroupRequest) body; return joinRequest.memberId().equals(JoinGroupRequest.UNKNOWN_MEMBER_ID); } }, joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); } @Test public void testRebalanceInProgressOnSyncGroup() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(Collections.<TopicPartition>emptyList(), Errors.REBALANCE_IN_PROGRESS)); client.prepareResponse(joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); } @Test public void testIllegalGenerationOnSyncGroup() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(Collections.<TopicPartition>emptyList(), Errors.ILLEGAL_GENERATION)); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { JoinGroupRequest joinRequest = (JoinGroupRequest) body; return joinRequest.memberId().equals(JoinGroupRequest.UNKNOWN_MEMBER_ID); } }, joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); } @Test public void testDisconnectInJoin() { subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, "consumer", "leader", Errors.NONE), true); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(joinGroupFollowerResponse(1, "consumer", "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); assertEquals(1, rebalanceListener.revokedCount); assertEquals(1, rebalanceListener.assignedCount); assertEquals(singleton(t1p), rebalanceListener.assigned); }
ConsumerCoordinator extends AbstractCoordinator { public void close(long timeoutMs) { client.disableWakeups(); long now = time.milliseconds(); long endTimeMs = now + timeoutMs; try { maybeAutoCommitOffsetsSync(timeoutMs); now = time.milliseconds(); if (pendingAsyncCommits.get() > 0 && endTimeMs > now) { ensureCoordinatorReady(now, endTimeMs - now); now = time.milliseconds(); } } finally { super.close(Math.max(0, endTimeMs - now)); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testLeaveGroupOnClose() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); final AtomicBoolean received = new AtomicBoolean(false); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { received.set(true); LeaveGroupRequest leaveRequest = (LeaveGroupRequest) body; return leaveRequest.memberId().equals(consumerId) && leaveRequest.groupId().equals(groupId); } }, new LeaveGroupResponse(Errors.NONE)); coordinator.close(0); assertTrue(received.get()); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testCommitOffsetSyncNotCoordinator() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NOT_COORDINATOR))); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L)), Long.MAX_VALUE); } @Test public void testCommitOffsetSyncCoordinatorNotAvailable() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.COORDINATOR_NOT_AVAILABLE))); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L)), Long.MAX_VALUE); } @Test public void testCommitOffsetSyncCoordinatorDisconnected() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE)), true); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L)), Long.MAX_VALUE); } @Test(expected = KafkaException.class) public void testCommitUnknownTopicOrPartition() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.UNKNOWN_TOPIC_OR_PARTITION))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); } @Test(expected = OffsetMetadataTooLarge.class) public void testCommitOffsetMetadataTooLarge() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.OFFSET_METADATA_TOO_LARGE))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); } @Test(expected = CommitFailedException.class) public void testCommitOffsetIllegalGeneration() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.ILLEGAL_GENERATION))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); } @Test(expected = CommitFailedException.class) public void testCommitOffsetUnknownMemberId() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.UNKNOWN_MEMBER_ID))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); } @Test(expected = CommitFailedException.class) public void testCommitOffsetRebalanceInProgress() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.REBALANCE_IN_PROGRESS))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); } @Test(expected = KafkaException.class) public void testCommitOffsetSyncCallbackWithNonRetriableException() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.UNKNOWN)), false); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L)), Long.MAX_VALUE); } @Test(expected = IllegalArgumentException.class) public void testCommitSyncNegativeOffset() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(-1L)), Long.MAX_VALUE); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testRefreshOffset() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L)); coordinator.refreshCommittedOffsetsIfNeeded(); assertFalse(subscriptions.refreshCommitsNeeded()); assertEquals(100L, subscriptions.committed(t1p).offset()); } @Test public void testRefreshOffsetLoadInProgress() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L)); coordinator.refreshCommittedOffsetsIfNeeded(); assertFalse(subscriptions.refreshCommitsNeeded()); assertEquals(100L, subscriptions.committed(t1p).offset()); } @Test public void testRefreshOffsetsGroupNotAuthorized() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(Errors.GROUP_AUTHORIZATION_FAILED)); try { coordinator.refreshCommittedOffsetsIfNeeded(); fail("Expected group authorization error"); } catch (GroupAuthorizationException e) { assertEquals(groupId, e.groupId()); } } @Test(expected = KafkaException.class) public void testRefreshOffsetUnknownTopicOrPartition() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(t1p, Errors.UNKNOWN_TOPIC_OR_PARTITION, "", 100L)); coordinator.refreshCommittedOffsetsIfNeeded(); } @Test public void testRefreshOffsetNotCoordinatorForConsumer() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR)); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L)); coordinator.refreshCommittedOffsetsIfNeeded(); assertFalse(subscriptions.refreshCommitsNeeded()); assertEquals(100L, subscriptions.committed(t1p).offset()); } @Test public void testRefreshOffsetWithNoFetchableOffsets() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", -1L)); coordinator.refreshCommittedOffsetsIfNeeded(); assertFalse(subscriptions.refreshCommitsNeeded()); assertEquals(null, subscriptions.committed(t1p)); }
ConsumerCoordinator extends AbstractCoordinator { @Override public List<ProtocolMetadata> metadata() { this.joinedSubscription = subscriptions.subscription(); List<ProtocolMetadata> metadataList = new ArrayList<>(); for (PartitionAssignor assignor : assignors) { Subscription subscription = assignor.subscription(joinedSubscription); ByteBuffer metadata = ConsumerProtocol.serializeSubscription(subscription); metadataList.add(new ProtocolMetadata(assignor.name(), metadata)); } return metadataList; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testProtocolMetadataOrder() { RoundRobinAssignor roundRobin = new RoundRobinAssignor(); RangeAssignor range = new RangeAssignor(); try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(roundRobin, range), ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true); List<ProtocolMetadata> metadata = coordinator.metadata(); assertEquals(2, metadata.size()); assertEquals(roundRobin.name(), metadata.get(0).name()); assertEquals(range.name(), metadata.get(1).name()); } try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(range, roundRobin), ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true); List<ProtocolMetadata> metadata = coordinator.metadata(); assertEquals(2, metadata.size()); assertEquals(range.name(), metadata.get(0).name()); assertEquals(roundRobin.name(), metadata.get(1).name()); } }
ConsumerNetworkClient implements Closeable { public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) { long now = time.milliseconds(); RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler(); ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, completionHandler); unsent.put(node, clientRequest); client.wakeup(); return completionHandler.future; } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void send() { client.prepareResponse(heartbeatResponse(Errors.NONE)); RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat()); assertEquals(1, consumerClient.pendingRequestCount()); assertEquals(1, consumerClient.pendingRequestCount(node)); assertFalse(future.isDone()); consumerClient.poll(future); assertTrue(future.isDone()); assertTrue(future.succeeded()); ClientResponse clientResponse = future.value(); HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody(); assertEquals(Errors.NONE, response.error()); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void doNotBlockIfPollConditionIsSatisfied() { NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class); ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, 100, 1000); EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(0L), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList()); EasyMock.replay(mockNetworkClient); consumerClient.poll(Long.MAX_VALUE, time.milliseconds(), new ConsumerNetworkClient.PollCondition() { @Override public boolean shouldBlock() { return false; } }); EasyMock.verify(mockNetworkClient); } @Test public void blockWhenPollConditionNotSatisfied() { long timeout = 4000L; NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class); ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, 100, 1000); EasyMock.expect(mockNetworkClient.inFlightRequestCount()).andReturn(1); EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(timeout), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList()); EasyMock.replay(mockNetworkClient); consumerClient.poll(timeout, time.milliseconds(), new ConsumerNetworkClient.PollCondition() { @Override public boolean shouldBlock() { return true; } }); EasyMock.verify(mockNetworkClient); } @Test public void blockOnlyForRetryBackoffIfNoInflightRequests() { long retryBackoffMs = 100L; NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class); ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, retryBackoffMs, 1000L); EasyMock.expect(mockNetworkClient.inFlightRequestCount()).andReturn(0); EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(retryBackoffMs), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList()); EasyMock.replay(mockNetworkClient); consumerClient.poll(Long.MAX_VALUE, time.milliseconds(), new ConsumerNetworkClient.PollCondition() { @Override public boolean shouldBlock() { return true; } }); EasyMock.verify(mockNetworkClient); }
ConsumerNetworkClient implements Closeable { public void wakeup() { log.trace("Received user wakeup"); this.wakeup.set(true); this.client.wakeup(); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void wakeup() { RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat()); consumerClient.wakeup(); try { consumerClient.poll(0); fail(); } catch (WakeupException e) { } client.respond(heartbeatResponse(Errors.NONE)); consumerClient.poll(future); assertTrue(future.isDone()); }
ConsumerNetworkClient implements Closeable { public void awaitMetadataUpdate() { awaitMetadataUpdate(Long.MAX_VALUE); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void testAwaitForMetadataUpdateWithTimeout() { assertFalse(consumerClient.awaitMetadataUpdate(10L)); }
Heartbeat { public boolean shouldHeartbeat(long now) { return timeToNextHeartbeat(now) == 0; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
@Test public void testShouldHeartbeat() { heartbeat.sentHeartbeat(time.milliseconds()); time.sleep((long) ((float) interval * 1.1)); assertTrue(heartbeat.shouldHeartbeat(time.milliseconds())); }
Heartbeat { public long timeToNextHeartbeat(long now) { long timeSinceLastHeartbeat = now - Math.max(lastHeartbeatSend, lastSessionReset); final long delayToNextHeartbeat; if (heartbeatFailed) delayToNextHeartbeat = retryBackoffMs; else delayToNextHeartbeat = heartbeatInterval; if (timeSinceLastHeartbeat > delayToNextHeartbeat) return 0; else return delayToNextHeartbeat - timeSinceLastHeartbeat; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
@Test public void testTimeToNextHeartbeat() { heartbeat.sentHeartbeat(0); assertEquals(100, heartbeat.timeToNextHeartbeat(0)); assertEquals(0, heartbeat.timeToNextHeartbeat(100)); assertEquals(0, heartbeat.timeToNextHeartbeat(200)); }
Heartbeat { public boolean sessionTimeoutExpired(long now) { return now - Math.max(lastSessionReset, lastHeartbeatReceive) > sessionTimeout; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
@Test public void testSessionTimeoutExpired() { heartbeat.sentHeartbeat(time.milliseconds()); time.sleep(305); assertTrue(heartbeat.sessionTimeoutExpired(time.milliseconds())); }
AbstractCoordinator implements Closeable { public synchronized void ensureCoordinatorReady() { ensureCoordinatorReady(0, Long.MAX_VALUE); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testCoordinatorDiscoveryBackoff() { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.blackout(coordinatorNode, 50L); long initialTime = mockTime.milliseconds(); coordinator.ensureCoordinatorReady(); long endTime = mockTime.milliseconds(); assertTrue(endTime - initialTime >= RETRY_BACKOFF_MS); }
AbstractCoordinator implements Closeable { protected synchronized RequestFuture<Void> lookupCoordinator() { if (findCoordinatorFuture == null) { Node node = this.client.leastLoadedNode(); if (node == null) { log.debug("No broker available to send GroupCoordinator request for group {}", groupId); return RequestFuture.noBrokersAvailable(); } else findCoordinatorFuture = sendGroupCoordinatorRequest(node); } return findCoordinatorFuture; } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testLookupCoordinator() throws Exception { mockClient.setNode(null); RequestFuture<Void> noBrokersAvailableFuture = coordinator.lookupCoordinator(); assertTrue("Failed future expected", noBrokersAvailableFuture.failed()); mockClient.setNode(node); RequestFuture<Void> future = coordinator.lookupCoordinator(); assertFalse("Request not sent", future.isDone()); assertTrue("New request sent while one is in progress", future == coordinator.lookupCoordinator()); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); assertTrue("New request not sent after previous completed", future != coordinator.lookupCoordinator()); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterJoinGroupSent() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest && invocations == 1) throw new WakeupException(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); } @Test public void testWakeupAfterJoinGroupSentExternalCompletion() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest && invocations == 1) throw new WakeupException(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); consumerClient.poll(0); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); } @Test public void testWakeupAfterJoinGroupReceived() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest) consumerClient.wakeup(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); } @Test public void testWakeupAfterJoinGroupReceivedExternalCompletion() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest) consumerClient.wakeup(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); consumerClient.poll(0); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); } @Test public void testWakeupAfterSyncGroupSent() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isSyncGroupRequest = body instanceof SyncGroupRequest; if (isSyncGroupRequest && invocations == 1) throw new WakeupException(); return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); } @Test public void testWakeupAfterSyncGroupSentExternalCompletion() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isSyncGroupRequest = body instanceof SyncGroupRequest; if (isSyncGroupRequest && invocations == 1) throw new WakeupException(); return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); consumerClient.poll(0); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); } @Test public void testWakeupAfterSyncGroupReceived() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { boolean isSyncGroupRequest = body instanceof SyncGroupRequest; if (isSyncGroupRequest) consumerClient.wakeup(); return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); } @Test public void testWakeupAfterSyncGroupReceivedExternalCompletion() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { boolean isSyncGroupRequest = body instanceof SyncGroupRequest; if (isSyncGroupRequest) consumerClient.wakeup(); return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); consumerClient.poll(0); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testJsonSchemaMetadataTranslation() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); converted = parse(converter.fromConnectData(TOPIC, Schema.OPTIONAL_BOOLEAN_SCHEMA, null)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": true }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertTrue(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).isNull()); converted = parse(converter.fromConnectData(TOPIC, SchemaBuilder.bool().defaultValue(true).build(), true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false, \"default\": true }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); converted = parse(converter.fromConnectData(TOPIC, SchemaBuilder.bool().required().name("bool").version(3).doc("the documentation").parameter("foo", "bar").build(), true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false, \"name\": \"bool\", \"version\": 3, \"doc\": \"the documentation\", \"parameters\": { \"foo\": \"bar\" }}"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); } @Test public void booleanToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); } @Test public void byteToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT8_SCHEMA, (byte) 12)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int8\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(12, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).intValue()); } @Test public void shortToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT16_SCHEMA, (short) 12)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int16\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(12, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).intValue()); } @Test public void intToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT32_SCHEMA, 12)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int32\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(12, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).intValue()); } @Test public void longToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT64_SCHEMA, 4398046511104L)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int64\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(4398046511104L, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).longValue()); } @Test public void floatToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.FLOAT32_SCHEMA, 12.34f)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"float\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(12.34f, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).floatValue(), 0.001); } @Test public void doubleToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.FLOAT64_SCHEMA, 12.34)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"double\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(12.34, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).doubleValue(), 0.001); } @Test public void bytesToJson() throws IOException { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.BYTES_SCHEMA, "test-string".getBytes())); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"bytes\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(ByteBuffer.wrap("test-string".getBytes()), ByteBuffer.wrap(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).binaryValue())); } @Test public void stringToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, "test-string")); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"string\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals("test-string", converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).textValue()); } @Test public void arrayToJson() { Schema int32Array = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, int32Array, Arrays.asList(1, 2, 3))); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"array\", \"items\": { \"type\": \"int32\", \"optional\": false }, \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(JsonNodeFactory.instance.arrayNode().add(1).add(2).add(3), converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)); } @Test public void mapToJsonStringKeys() { Schema stringIntMap = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA).build(); Map<String, Integer> input = new HashMap<>(); input.put("key1", 12); input.put("key2", 15); JsonNode converted = parse(converter.fromConnectData(TOPIC, stringIntMap, input)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"map\", \"keys\": { \"type\" : \"string\", \"optional\": false }, \"values\": { \"type\" : \"int32\", \"optional\": false }, \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(JsonNodeFactory.instance.objectNode().put("key1", 12).put("key2", 15), converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)); } @Test public void mapToJsonNonStringKeys() { Schema intIntMap = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build(); Map<Integer, Integer> input = new HashMap<>(); input.put(1, 12); input.put(2, 15); JsonNode converted = parse(converter.fromConnectData(TOPIC, intIntMap, input)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"map\", \"keys\": { \"type\" : \"int32\", \"optional\": false }, \"values\": { \"type\" : \"int32\", \"optional\": false }, \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertTrue(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).isArray()); ArrayNode payload = (ArrayNode) converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertEquals(2, payload.size()); Set<JsonNode> payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add(1).add(12), JsonNodeFactory.instance.arrayNode().add(2).add(15))), payloadEntries ); } @Test public void structToJson() { Schema schema = SchemaBuilder.struct().field("field1", Schema.BOOLEAN_SCHEMA).field("field2", Schema.STRING_SCHEMA).field("field3", Schema.STRING_SCHEMA).field("field4", Schema.BOOLEAN_SCHEMA).build(); Struct input = new Struct(schema).put("field1", true).put("field2", "string2").put("field3", "string3").put("field4", false); JsonNode converted = parse(converter.fromConnectData(TOPIC, schema, input)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"struct\", \"optional\": false, \"fields\": [{ \"field\": \"field1\", \"type\": \"boolean\", \"optional\": false }, { \"field\": \"field2\", \"type\": \"string\", \"optional\": false }, { \"field\": \"field3\", \"type\": \"string\", \"optional\": false }, { \"field\": \"field4\", \"type\": \"boolean\", \"optional\": false }] }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(JsonNodeFactory.instance.objectNode() .put("field1", true) .put("field2", "string2") .put("field3", "string3") .put("field4", false), converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)); } @Test public void decimalToJson() throws IOException { JsonNode converted = parse(converter.fromConnectData(TOPIC, Decimal.schema(2), new BigDecimal(new BigInteger("156"), 2))); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"bytes\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"parameters\": { \"scale\": \"2\" } }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertArrayEquals(new byte[]{0, -100}, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).binaryValue()); } @Test public void dateToJson() throws IOException { GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.DATE, 10000); java.util.Date date = calendar.getTime(); JsonNode converted = parse(converter.fromConnectData(TOPIC, Date.SCHEMA, date)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int32\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1 }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertTrue(payload.isInt()); assertEquals(10000, payload.intValue()); } @Test public void timeToJson() throws IOException { GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 14400000); java.util.Date date = calendar.getTime(); JsonNode converted = parse(converter.fromConnectData(TOPIC, Time.SCHEMA, date)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int32\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1 }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertTrue(payload.isInt()); assertEquals(14400000, payload.longValue()); } @Test public void timestampToJson() throws IOException { GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 2000000000); calendar.add(Calendar.MILLISECOND, 2000000000); java.util.Date date = calendar.getTime(); JsonNode converted = parse(converter.fromConnectData(TOPIC, Timestamp.SCHEMA, date)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int64\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1 }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertTrue(payload.isLong()); assertEquals(4000000000L, payload.longValue()); } @Test public void nullSchemaAndPrimitiveToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, null, true)); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); } @Test public void nullSchemaAndArrayToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, null, Arrays.asList(1, "string", true))); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertEquals(JsonNodeFactory.instance.arrayNode().add(1).add("string").add(true), converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)); } @Test public void nullSchemaAndMapToJson() { Map<String, Object> input = new HashMap<>(); input.put("key1", 12); input.put("key2", "string"); input.put("key3", true); JsonNode converted = parse(converter.fromConnectData(TOPIC, null, input)); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertEquals(JsonNodeFactory.instance.objectNode().put("key1", 12).put("key2", "string").put("key3", true), converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)); } @Test public void nullSchemaAndMapNonStringKeysToJson() { Map<Object, Object> input = new HashMap<>(); input.put("string", 12); input.put(52, "string"); input.put(false, true); JsonNode converted = parse(converter.fromConnectData(TOPIC, null, input)); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertTrue(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).isArray()); ArrayNode payload = (ArrayNode) converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertEquals(3, payload.size()); Set<JsonNode> payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add("string").add(12), JsonNodeFactory.instance.arrayNode().add(52).add("string"), JsonNodeFactory.instance.arrayNode().add(false).add(true))), payloadEntries ); } @Test(expected = DataException.class) public void mismatchSchemaJson() { converter.fromConnectData(TOPIC, Schema.FLOAT64_SCHEMA, true); } @Test public void testCacheSchemaToJsonConversion() { Cache<Schema, ObjectNode> cache = Whitebox.getInternalState(converter, "fromConnectSchemaCache"); assertEquals(0, cache.size()); converter.fromConnectData(TOPIC, SchemaBuilder.bool().build(), true); assertEquals(1, cache.size()); converter.fromConnectData(TOPIC, SchemaBuilder.bool().build(), true); assertEquals(1, cache.size()); converter.fromConnectData(TOPIC, SchemaBuilder.bool().optional().build(), true); assertEquals(2, cache.size()); }
SubscriptionState { public void position(TopicPartition tp, long offset) { assignedState(tp).position(offset); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collection<String> topics); void resetGroupSubscription(); void assignFromUser(Set<TopicPartition> partitions); void assignFromSubscribed(Collection<TopicPartition> assignments); void subscribe(Pattern pattern, ConsumerRebalanceListener listener); boolean hasPatternSubscription(); boolean hasNoSubscriptionOrUserAssignment(); void unsubscribe(); Pattern subscribedPattern(); Set<String> subscription(); Set<TopicPartition> pausedPartitions(); Set<String> groupSubscription(); void committed(TopicPartition tp, OffsetAndMetadata offset); OffsetAndMetadata committed(TopicPartition tp); void needRefreshCommits(); boolean refreshCommitsNeeded(); void commitsRefreshed(); void seek(TopicPartition tp, long offset); Set<TopicPartition> assignedPartitions(); List<TopicPartition> fetchablePartitions(); boolean partitionsAutoAssigned(); void position(TopicPartition tp, long offset); Long position(TopicPartition tp); Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel); void updateHighWatermark(TopicPartition tp, long highWatermark); void updateLastStableOffset(TopicPartition tp, long lastStableOffset); Map<TopicPartition, OffsetAndMetadata> allConsumed(); void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy); void needOffsetReset(TopicPartition partition); boolean hasDefaultOffsetResetPolicy(); boolean isOffsetResetNeeded(TopicPartition partition); OffsetResetStrategy resetStrategy(TopicPartition partition); boolean hasAllFetchPositions(Collection<TopicPartition> partitions); boolean hasAllFetchPositions(); Set<TopicPartition> missingFetchPositions(); boolean isAssigned(TopicPartition tp); boolean isPaused(TopicPartition tp); boolean isFetchable(TopicPartition tp); boolean hasValidPosition(TopicPartition tp); void pause(TopicPartition tp); void resume(TopicPartition tp); void movePartitionToEnd(TopicPartition tp); ConsumerRebalanceListener listener(); void addListener(Listener listener); void fireOnAssignment(Set<TopicPartition> assignment); }
@Test(expected = IllegalStateException.class) public void cantChangePositionForNonAssignedPartition() { state.position(tp0, 1); }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collection<String> topics); void resetGroupSubscription(); void assignFromUser(Set<TopicPartition> partitions); void assignFromSubscribed(Collection<TopicPartition> assignments); void subscribe(Pattern pattern, ConsumerRebalanceListener listener); boolean hasPatternSubscription(); boolean hasNoSubscriptionOrUserAssignment(); void unsubscribe(); Pattern subscribedPattern(); Set<String> subscription(); Set<TopicPartition> pausedPartitions(); Set<String> groupSubscription(); void committed(TopicPartition tp, OffsetAndMetadata offset); OffsetAndMetadata committed(TopicPartition tp); void needRefreshCommits(); boolean refreshCommitsNeeded(); void commitsRefreshed(); void seek(TopicPartition tp, long offset); Set<TopicPartition> assignedPartitions(); List<TopicPartition> fetchablePartitions(); boolean partitionsAutoAssigned(); void position(TopicPartition tp, long offset); Long position(TopicPartition tp); Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel); void updateHighWatermark(TopicPartition tp, long highWatermark); void updateLastStableOffset(TopicPartition tp, long lastStableOffset); Map<TopicPartition, OffsetAndMetadata> allConsumed(); void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy); void needOffsetReset(TopicPartition partition); boolean hasDefaultOffsetResetPolicy(); boolean isOffsetResetNeeded(TopicPartition partition); OffsetResetStrategy resetStrategy(TopicPartition partition); boolean hasAllFetchPositions(Collection<TopicPartition> partitions); boolean hasAllFetchPositions(); Set<TopicPartition> missingFetchPositions(); boolean isAssigned(TopicPartition tp); boolean isPaused(TopicPartition tp); boolean isFetchable(TopicPartition tp); boolean hasValidPosition(TopicPartition tp); void pause(TopicPartition tp); void resume(TopicPartition tp); void movePartitionToEnd(TopicPartition tp); ConsumerRebalanceListener listener(); void addListener(Listener listener); void fireOnAssignment(Set<TopicPartition> assignment); }
@Test(expected = IllegalStateException.class) public void cantSubscribeTopicAndPattern() { state.subscribe(singleton(topic), rebalanceListener); state.subscribe(Pattern.compile(".*"), rebalanceListener); } @Test(expected = IllegalStateException.class) public void cantSubscribePatternAndTopic() { state.subscribe(Pattern.compile(".*"), rebalanceListener); state.subscribe(singleton(topic), rebalanceListener); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testOneConsumerNoTopic() { String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList()))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); } @Test public void testOneConsumerNonexistentTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); } @Test public void testOneConsumerOneTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertAssignment(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); } @Test public void testOnlyAssignsPartitionsFromSubscribedTopics() { String topic = "topic"; String otherTopic = "other"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); partitionsPerTopic.put(otherTopic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertAssignment(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); } @Test public void testOneConsumerMultipleTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 1); partitionsPerTopic.put(topic2, 2); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertAssignment(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId)); } @Test public void testTwoConsumersOneTopicOnePartition() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertAssignment(partitions(tp(topic, 0)), assignment.get(consumer1)); assertAssignment(Collections.<TopicPartition>emptyList(), assignment.get(consumer2)); } @Test public void testTwoConsumersOneTopicTwoPartitions() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertAssignment(partitions(tp(topic, 0)), assignment.get(consumer1)); assertAssignment(partitions(tp(topic, 1)), assignment.get(consumer2)); } @Test public void testMultipleConsumersMixedTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; String consumer3 = "consumer3"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); consumers.put(consumer3, new Subscription(topics(topic1))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertAssignment(partitions(tp(topic1, 0)), assignment.get(consumer1)); assertAssignment(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer2)); assertAssignment(partitions(tp(topic1, 2)), assignment.get(consumer3)); } @Test public void testTwoConsumersTwoTopicsSixPartitions() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 3); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1, topic2))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertAssignment(partitions(tp(topic1, 0), tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer1)); assertAssignment(partitions(tp(topic1, 2), tp(topic2, 2)), assignment.get(consumer2)); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
@Test public void testOsDefaultSocketBufferSizes() throws Exception { Map<String, Object> config = new HashMap<>(); config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); config.put(ConsumerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>( config, new ByteArrayDeserializer(), new ByteArrayDeserializer()); consumer.close(); } @Test public void testInterceptorConstructorClose() throws Exception { try { Properties props = new Properties(); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName()); KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>( props, new StringDeserializer(), new StringDeserializer()); assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); assertEquals(0, MockConsumerInterceptor.CLOSE_COUNT.get()); consumer.close(); assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get()); Assert.assertNull(MockConsumerInterceptor.CLUSTER_META.get()); } finally { MockConsumerInterceptor.resetCounters(); } } @Test public void closeShouldBeIdempotent() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(); consumer.close(); consumer.close(); }
KafkaConsumer implements Consumer<K, V> { public Set<String> subscription() { acquire(); try { return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
@Test public void testSubscription() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(); consumer.subscribe(singletonList(topic)); assertEquals(singleton(topic), consumer.subscription()); assertTrue(consumer.assignment().isEmpty()); consumer.subscribe(Collections.<String>emptyList()); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); consumer.assign(singletonList(tp0)); assertTrue(consumer.subscription().isEmpty()); assertEquals(singleton(tp0), consumer.assignment()); consumer.unsubscribe(); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); consumer.close(); }
KafkaConsumer implements Consumer<K, V> { @Override public void pause(Collection<TopicPartition> partitions) { acquire(); try { for (TopicPartition partition: partitions) { log.debug("Pausing partition {}", partition); subscriptions.pause(partition); } } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
@Test public void testPause() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(); consumer.assign(singletonList(tp0)); assertEquals(singleton(tp0), consumer.assignment()); assertTrue(consumer.paused().isEmpty()); consumer.pause(singleton(tp0)); assertEquals(singleton(tp0), consumer.paused()); consumer.resume(singleton(tp0)); assertTrue(consumer.paused().isEmpty()); consumer.unsubscribe(); assertTrue(consumer.paused().isEmpty()); consumer.close(); }
ConsumerRecord { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } ConsumerRecord(String topic, int partition, long offset, K key, V value); ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, long checksum, int serializedKeySize, int serializedValueSize, K key, V value); ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, Long checksum, int serializedKeySize, int serializedValueSize, K key, V value, Headers headers); String topic(); int partition(); Headers headers(); K key(); V value(); long offset(); long timestamp(); TimestampType timestampType(); @Deprecated long checksum(); int serializedKeySize(); int serializedValueSize(); @Override String toString(); static final long NO_TIMESTAMP; static final int NULL_SIZE; static final int NULL_CHECKSUM; }
@Test @SuppressWarnings("deprecation") public void testNullChecksumInConstructor() { String key = "key"; String value = "value"; long timestamp = 242341324L; ConsumerRecord<String, String> record = new ConsumerRecord<>("topic", 0, 23L, timestamp, TimestampType.CREATE_TIME, null, key.length(), value.length(), key, value, new RecordHeaders()); assertEquals(DefaultRecord.computePartialChecksum(timestamp, key.length(), value.length()), record.checksum()); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testOneConsumerNoTopic() { String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList()))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); } @Test public void testOneConsumerNonexistentTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); } @Test public void testOneConsumerOneTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); } @Test public void testOnlyAssignsPartitionsFromSubscribedTopics() { String topic = "topic"; String otherTopic = "other"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); partitionsPerTopic.put(otherTopic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); } @Test public void testOneConsumerMultipleTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 1); partitionsPerTopic.put(topic2, 2); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2)))); assertEquals(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId)); } @Test public void testTwoConsumersOneTopicOnePartition() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(Collections.<TopicPartition>emptyList(), assignment.get(consumer2)); } @Test public void testTwoConsumersOneTopicTwoPartitions() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(partitions(tp(topic, 1)), assignment.get(consumer2)); } @Test public void testMultipleConsumersMixedTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; String consumer3 = "consumer3"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); consumers.put(consumer3, new Subscription(topics(topic1))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic1, 0)), assignment.get(consumer1)); assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer2)); assertEquals(partitions(tp(topic1, 2)), assignment.get(consumer3)); } @Test public void testTwoConsumersTwoTopicsSixPartitions() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 3); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1, topic2))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic1, 0), tp(topic1, 2), tp(topic2, 1)), assignment.get(consumer1)); assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 2)), assignment.get(consumer2)); }
Metadata { public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation) { this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners()); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }
@Test public void testMetadata() throws Exception { long time = 0; metadata.update(Cluster.empty(), Collections.<String>emptySet(), time); assertFalse("No update needed.", metadata.timeToNextUpdate(time) == 0); metadata.requestUpdate(); assertFalse("Still no updated needed due to backoff", metadata.timeToNextUpdate(time) == 0); time += refreshBackoffMs; assertTrue("Update needed now that backoff time expired", metadata.timeToNextUpdate(time) == 0); String topic = "my-topic"; Thread t1 = asyncFetch(topic, 500); Thread t2 = asyncFetch(topic, 500); assertTrue("Awaiting update", t1.isAlive()); assertTrue("Awaiting update", t2.isAlive()); while (t1.isAlive() || t2.isAlive()) { if (metadata.timeToNextUpdate(time) == 0) { metadata.update(TestUtils.singletonCluster(topic, 1), Collections.<String>emptySet(), time); time += refreshBackoffMs; } Thread.sleep(1); } t1.join(); t2.join(); assertFalse("No update needed.", metadata.timeToNextUpdate(time) == 0); time += metadataExpireMs; assertTrue("Update needed due to stale metadata.", metadata.timeToNextUpdate(time) == 0); }
Metadata { public synchronized long timeToNextUpdate(long nowMs) { long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; return Math.max(timeToExpire, timeToAllowUpdate); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }
@Test public void testTimeToNextUpdate() { checkTimeToNextUpdate(100, 1000); checkTimeToNextUpdate(1000, 100); checkTimeToNextUpdate(0, 0); checkTimeToNextUpdate(0, 100); checkTimeToNextUpdate(100, 0); }
Metadata { public synchronized void failedUpdate(long now) { this.lastRefreshMs = now; } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }
@Test public void testFailedUpdate() { long time = 100; metadata.update(Cluster.empty(), Collections.<String>emptySet(), time); assertEquals(100, metadata.timeToNextUpdate(1000)); metadata.failedUpdate(1100); assertEquals(100, metadata.timeToNextUpdate(1100)); assertEquals(100, metadata.lastSuccessfulUpdate()); metadata.needMetadataForAllTopics(true); metadata.update(Cluster.empty(), Collections.<String>emptySet(), time); assertEquals(100, metadata.timeToNextUpdate(1000)); }
Metadata { public synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now) { Objects.requireNonNull(cluster, "cluster should not be null"); this.needUpdate = false; this.lastRefreshMs = now; this.lastSuccessfulRefreshMs = now; this.version += 1; if (topicExpiryEnabled) { for (Iterator<Map.Entry<String, Long>> it = topics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, Long> entry = it.next(); long expireMs = entry.getValue(); if (expireMs == TOPIC_EXPIRY_NEEDS_UPDATE) entry.setValue(now + TOPIC_EXPIRY_MS); else if (expireMs <= now) { it.remove(); log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", entry.getKey(), expireMs, now); } } } for (Listener listener: listeners) listener.onMetadataUpdate(cluster, unavailableTopics); String previousClusterId = cluster.clusterResource().clusterId(); if (this.needMetadataForAllTopics) { this.needUpdate = false; this.cluster = getClusterForCurrentTopics(cluster); } else { this.cluster = cluster; } if (!cluster.isBootstrapConfigured()) { String clusterId = cluster.clusterResource().clusterId(); if (clusterId == null ? previousClusterId != null : !clusterId.equals(previousClusterId)) log.info("Cluster ID: {}", cluster.clusterResource().clusterId()); clusterResourceListeners.onUpdate(cluster.clusterResource()); } notifyAll(); log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }
@Test public void testClusterListenerGetsNotifiedOfUpdate() { long time = 0; MockClusterResourceListener mockClusterListener = new MockClusterResourceListener(); ClusterResourceListeners listeners = new ClusterResourceListeners(); listeners.maybeAdd(mockClusterListener); metadata = new Metadata(refreshBackoffMs, metadataExpireMs, true, false, listeners); String hostName = "www.example.com"; Cluster cluster = Cluster.bootstrap(Arrays.asList(new InetSocketAddress(hostName, 9002))); metadata.update(cluster, Collections.<String>emptySet(), time); assertFalse("ClusterResourceListener should not called when metadata is updated with bootstrap Cluster", MockClusterResourceListener.IS_ON_UPDATE_CALLED.get()); metadata.update(new Cluster( "dummy", Arrays.asList(new Node(0, "host1", 1000)), Arrays.asList( new PartitionInfo("topic", 0, null, null, null), new PartitionInfo("topic1", 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()), Collections.<String>emptySet(), 100); assertEquals("MockClusterResourceListener did not get cluster metadata correctly", "dummy", mockClusterListener.clusterResource().clusterId()); assertTrue("MockClusterResourceListener should be called when metadata is updated with non-bootstrap Cluster", MockClusterResourceListener.IS_ON_UPDATE_CALLED.get()); }
RecordMetadata { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, Long checksum, int serializedKeySize, int serializedValueSize); @Deprecated RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, long checksum, int serializedKeySize, int serializedValueSize); long offset(); long timestamp(); @Deprecated long checksum(); int serializedKeySize(); int serializedValueSize(); String topic(); int partition(); @Override String toString(); static final int UNKNOWN_PARTITION; }
@Test @SuppressWarnings("deprecation") public void testNullChecksum() { long timestamp = 2340234L; int keySize = 3; int valueSize = 5; RecordMetadata metadata = new RecordMetadata(new TopicPartition("foo", 0), 15L, 3L, timestamp, null, keySize, valueSize); assertEquals(DefaultRecord.computePartialChecksum(timestamp, keySize, valueSize), metadata.checksum()); }
MockProducer implements Producer<K, V> { @Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } this.transactionInitialized = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test public void shouldThrowOnInitTransactionIfProducerAlreadyInitializedForTransactions() { producer.initTransactions(); try { producer.initTransactions(); fail("Should have thrown as producer is already initialized"); } catch (IllegalStateException e) { } }
MockProducer implements Producer<K, V> { @Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowOnBeginTransactionIfTransactionsNotInitialized() { producer.beginTransaction(); }
MockProducer implements Producer<K, V> { @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); Objects.requireNonNull(consumerGroupId); if (offsets.size() == 0) { return; } Map<TopicPartition, OffsetAndMetadata> uncommittedOffsets = this.uncommittedConsumerGroupOffsets.get(consumerGroupId); if (uncommittedOffsets == null) { uncommittedOffsets = new HashMap<>(); this.uncommittedConsumerGroupOffsets.put(consumerGroupId, uncommittedOffsets); } uncommittedOffsets.putAll(offsets); this.sentOffsets = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowOnSendOffsetsToTransactionIfTransactionsNotInitialized() { producer.sendOffsetsToTransaction(null, null); }
MockProducer implements Producer<K, V> { @Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerGroupOffsets.add(this.uncommittedConsumerGroupOffsets); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.transactionCommitted = true; this.transactionAborted = false; this.transactionInFlight = false; ++this.commitCount; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowOnCommitIfTransactionsNotInitialized() { producer.commitTransaction(); }
MockProducer implements Producer<K, V> { @Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = false; this.transactionAborted = true; this.transactionInFlight = false; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowOnAbortIfTransactionsNotInitialized() { producer.abortTransaction(); }
MockProducer implements Producer<K, V> { public void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowFenceProducerIfTransactionsNotInitialized() { producer.fenceProducer(); }
MockProducer implements Producer<K, V> { @Override public void close() { close(0, null); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test public void shouldThrowOnCloseIfProducerIsClosed() { producer.close(); try { producer.close(); fail("Should have thrown as producer is already closed"); } catch (IllegalStateException e) { } }
MockProducer implements Producer<K, V> { public boolean flushed() { return this.completions.isEmpty(); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test public void shouldBeFlushedIfNoBufferedRecords() { assertTrue(producer.flushed()); }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test(expected = IllegalStateException.class) public void testFailIfNotReadyForSendNoProducerId() { transactionManager.failIfNotReadyForSend(); } @Test public void testFailIfNotReadyForSendIdempotentProducer() { TransactionManager idempotentTransactionManager = new TransactionManager(); idempotentTransactionManager.failIfNotReadyForSend(); } @Test(expected = IllegalStateException.class) public void testFailIfNotReadyForSendNoOngoingTransaction() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.failIfNotReadyForSend(); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test public void testMaybeAddPartitionToTransaction() { long pid = 13131L; short epoch = 1; TopicPartition partition = new TopicPartition("foo", 0); doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(partition); assertTrue(transactionManager.hasPartitionsToAdd()); assertFalse(transactionManager.isPartitionAdded(partition)); assertTrue(transactionManager.isPartitionPendingAdd(partition)); prepareAddPartitionsToTxn(partition, Errors.NONE); sender.run(time.milliseconds()); assertFalse(transactionManager.hasPartitionsToAdd()); assertTrue(transactionManager.isPartitionAdded(partition)); assertFalse(transactionManager.isPartitionPendingAdd(partition)); transactionManager.maybeAddPartitionToTransaction(partition); assertFalse(transactionManager.hasPartitionsToAdd()); assertTrue(transactionManager.isPartitionAdded(partition)); assertFalse(transactionManager.isPartitionPendingAdd(partition)); } @Test(expected = IllegalStateException.class) public void testMaybeAddPartitionToTransactionBeforeInitTransactions() { transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); } @Test(expected = IllegalStateException.class) public void testMaybeAddPartitionToTransactionBeforeBeginTransaction() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); }
TransactionManager { synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequenceNumber = sequenceNumbers.get(topicPartition); if (currentSequenceNumber == null) throw new IllegalStateException("Attempt to increment sequence number for a partition with no current sequence."); currentSequenceNumber += increment; sequenceNumbers.put(topicPartition, currentSequenceNumber); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test(expected = IllegalStateException.class) public void testInvalidSequenceIncrement() { TransactionManager transactionManager = new TransactionManager(); transactionManager.incrementSequenceNumber(tp0, 3333); }
TransactionManager { public synchronized void beginTransaction() { ensureTransactional(); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT); Node node1 = new Node(0, "localhost", 1111); PartitionInfo part1 = new PartitionInfo(topic, 0, node1, null, null); Cluster cluster = new Cluster(null, Arrays.asList(node1), Arrays.asList(part1), Collections.<String>emptySet(), Collections.<String>emptySet()); Set<Node> nodes = new HashSet<>(); nodes.add(node1); Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(cluster, nodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(drainedBatches.containsKey(node1.id())); assertTrue(drainedBatches.get(node1.id()).isEmpty()); }
BufferPool { public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); this.lock.lock(); try { if (size == poolableSize && !this.free.isEmpty()) return this.free.pollFirst(); int freeListSize = freeSize() * this.poolableSize; if (this.availableMemory + freeListSize >= size) { freeUp(size); ByteBuffer allocatedBuffer = allocateByteBuffer(size); this.availableMemory -= size; return allocatedBuffer; } else { int accumulated = 0; ByteBuffer buffer = null; boolean hasError = true; Condition moreMemory = this.lock.newCondition(); try { long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); this.waiters.addLast(moreMemory); while (accumulated < size) { long startWaitNs = time.nanoseconds(); long timeNs; boolean waitingTimeElapsed; try { waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); } finally { long endWaitNs = time.nanoseconds(); timeNs = Math.max(0L, endWaitNs - startWaitNs); this.waitTime.record(timeNs, time.milliseconds()); } if (waitingTimeElapsed) { throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms."); } remainingTimeToBlockNs -= timeNs; if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { buffer = this.free.pollFirst(); accumulated = size; } else { freeUp(size - accumulated); int got = (int) Math.min(size - accumulated, this.availableMemory); this.availableMemory -= got; accumulated += got; } } if (buffer == null) buffer = allocateByteBuffer(size); hasError = false; return buffer; } finally { if (hasError) this.availableMemory += accumulated; this.waiters.remove(moreMemory); } } } finally { try { if (!(this.availableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { lock.unlock(); } } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deallocate(ByteBuffer buffer, int size); void deallocate(ByteBuffer buffer); long availableMemory(); long unallocatedMemory(); int queued(); int poolableSize(); long totalMemory(); }
@Test public void testDelayedAllocation() throws Exception { BufferPool pool = new BufferPool(5 * 1024, 1024, metrics, time, metricGroup); ByteBuffer buffer = pool.allocate(1024, maxBlockTimeMs); CountDownLatch doDealloc = asyncDeallocate(pool, buffer); CountDownLatch allocation = asyncAllocate(pool, 5 * 1024); assertEquals("Allocation shouldn't have happened yet, waiting on memory.", 1L, allocation.getCount()); doDealloc.countDown(); assertTrue("Allocation should succeed soon after de-allocation", allocation.await(1, TimeUnit.SECONDS)); }
BufferPool { public long availableMemory() { lock.lock(); try { return this.availableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deallocate(ByteBuffer buffer, int size); void deallocate(ByteBuffer buffer); long availableMemory(); long unallocatedMemory(); int queued(); int poolableSize(); long totalMemory(); }
@Test public void testStressfulSituation() throws Exception { int numThreads = 10; final int iterations = 50000; final int poolableSize = 1024; final long totalMemory = numThreads / 2 * poolableSize; final BufferPool pool = new BufferPool(totalMemory, poolableSize, metrics, time, metricGroup); List<StressTestThread> threads = new ArrayList<StressTestThread>(); for (int i = 0; i < numThreads; i++) threads.add(new StressTestThread(pool, iterations)); for (StressTestThread thread : threads) thread.start(); for (StressTestThread thread : threads) thread.join(); for (StressTestThread thread : threads) assertTrue("Thread should have completed all iterations successfully.", thread.success.get()); assertEquals(totalMemory, pool.availableMemory()); }
RecordAccumulator { public void abortIncompleteBatches() { do { abortBatches(); } while (appendsInProgress()); abortBatches(); this.batches.clear(); } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test public void testAbortIncompleteBatches() throws Exception { long lingerMs = Long.MAX_VALUE; int numRecords = 100; final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0); final RecordAccumulator accum = new RecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs, 100L, metrics, time, new ApiVersions(), null); class TestCallback implements Callback { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { assertTrue(exception.getMessage().equals("Producer is closed forcefully.")); numExceptionReceivedInCallback.incrementAndGet(); } } for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs); RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(accum.hasUndrained()); assertTrue(accum.hasIncomplete()); int numDrainedRecords = 0; for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) { for (ProducerBatch batch : drainedEntry.getValue()) { assertTrue(batch.isClosed()); assertFalse(batch.produceFuture.completed()); numDrainedRecords += batch.recordCount; } } assertTrue(numDrainedRecords > 0 && numDrainedRecords < numRecords); accum.abortIncompleteBatches(); assertEquals(numRecords, numExceptionReceivedInCallback.get()); assertFalse(accum.hasUndrained()); assertFalse(accum.hasIncomplete()); }
RecordAccumulator { public List<ProducerBatch> expiredBatches(int requestTimeout, long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) { Deque<ProducerBatch> dq = entry.getValue(); TopicPartition tp = entry.getKey(); if (!muted.contains(tp)) { synchronized (dq) { ProducerBatch lastBatch = dq.peekLast(); Iterator<ProducerBatch> batchIterator = dq.iterator(); while (batchIterator.hasNext()) { ProducerBatch batch = batchIterator.next(); boolean isFull = batch != lastBatch || batch.isFull(); if (batch.maybeExpire(requestTimeout, retryBackoffMs, now, this.lingerMs, isFull)) { expiredBatches.add(batch); batchIterator.remove(); } else { break; } } } } } return expiredBatches; } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test public void testExpiredBatches() throws InterruptedException { long retryBackoffMs = 100L; long lingerMs = 3000L; int requestTimeout = 60; int batchSize = 1025; RecordAccumulator accum = new RecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, lingerMs, retryBackoffMs, metrics, time, new ApiVersions(), null); int appends = expectedNumAppends(batchSize); for (int i = 0; i < appends; i++) { accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); } accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); Set<Node> readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); time.sleep(requestTimeout + 1); accum.mutePartition(tp1); List<ProducerBatch> expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should not be expired when the partition is muted", 0, expiredBatches.size()); accum.unmutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should be expired", 1, expiredBatches.size()); assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); time.sleep(lingerMs); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); time.sleep(requestTimeout + 1); accum.mutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should not be expired when metadata is still available and partition is muted", 0, expiredBatches.size()); accum.unmutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should be expired when the partition is not muted", 1, expiredBatches.size()); assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); time.sleep(lingerMs); readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals("There should be only one batch.", drained.get(node1.id()).size(), 1); time.sleep(1000L); accum.reenqueue(drained.get(node1.id()).get(0), time.milliseconds()); time.sleep(requestTimeout + retryBackoffMs); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should not be expired.", 0, expiredBatches.size()); time.sleep(1L); accum.mutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should not be expired when the partition is muted", 0, expiredBatches.size()); accum.unmutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should be expired when the partition is not muted.", 1, expiredBatches.size()); }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test(expected = UnsupportedVersionException.class) public void testIdempotenceWithOldMagic() throws InterruptedException { ApiVersions apiVersions = new ApiVersions(); int batchSize = 1025; apiVersions.update("foobar", NodeApiVersions.create(Arrays.asList(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); RecordAccumulator accum = new RecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, 10, 100L, metrics, time, apiVersions, new TransactionManager()); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); } @Test public void testSplitFrequency() throws InterruptedException { long seed = System.currentTimeMillis(); Random random = new Random(); random.setSeed(seed); final int batchSize = 1024; final int numMessages = 1000; RecordAccumulator accum = new RecordAccumulator(batchSize, 3 * 1024, CompressionType.GZIP, 10, 100L, metrics, time, new ApiVersions(), null); for (int goodCompRatioPercentage = 1; goodCompRatioPercentage < 100; goodCompRatioPercentage++) { int numSplit = 0; int numBatches = 0; CompressionRatioEstimator.resetEstimation(topic); for (int i = 0; i < numMessages; i++) { int dice = random.nextInt(100); byte[] value = (dice < goodCompRatioPercentage) ? bytesWithGoodCompression(random) : bytesWithPoorCompression(random, 100); accum.append(tp1, 0L, null, value, Record.EMPTY_HEADERS, null, 0); BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); numSplit += result.numSplit; numBatches += result.numBatches; } time.sleep(10); BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); numSplit += result.numSplit; numBatches += result.numBatches; assertTrue(String.format("Total num batches = %d, split batches = %d, more than 10%% of the batch splits. " + "Random seed is " + seed, numBatches, numSplit), (double) numSplit / numBatches < 0.1f); } }
RecordAccumulator { public int splitAndReenqueue(ProducerBatch bigBatch) { CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { ProducerBatch batch = dq.pollLast(); incomplete.add(batch); synchronized (partitionDequeue) { partitionDequeue.addFirst(batch); } } return numSplitBatches; } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test public void testSplitAndReenqueue() throws ExecutionException, InterruptedException { long now = time.milliseconds(); RecordAccumulator accum = new RecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10, 100L, metrics, time, new ApiVersions(), null); ByteBuffer buffer = ByteBuffer.allocate(4096); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(tp1, builder, now, true); byte[] value = new byte[1024]; final AtomicInteger acked = new AtomicInteger(0); Callback cb = new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { acked.incrementAndGet(); } }; Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now); Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now); assertNotNull(future1); assertNotNull(future2); batch.close(); accum.reenqueue(batch, now); time.sleep(101L); RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertTrue("The batch should be ready", result.readyNodes.size() > 0); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals("Only node1 should be drained", 1, drained.size()); assertEquals("Only one batch should be drained", 1, drained.get(node1.id()).size()); accum.splitAndReenqueue(drained.get(node1.id()).get(0)); time.sleep(101L); drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); drained.get(node1.id()).get(0).done(acked.get(), 100L, null); assertEquals("The first message should have been acked.", 1, acked.get()); assertTrue(future1.isDone()); assertEquals(0, future1.get().offset()); drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); drained.get(node1.id()).get(0).done(acked.get(), 100L, null); assertEquals("Both message should have been acked.", 2, acked.get()); assertTrue(future2.isDone()); assertEquals(1, future2.get().offset()); }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
@Test public void testChecksumNullForMagicV2() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); assertNotNull(future); assertNull(future.checksumOrNull()); } @Test public void testAppendedChecksumMagicV0AndV1() { for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1)) { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(128), magic, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); byte[] key = "hi".getBytes(); byte[] value = "there".getBytes(); FutureRecordMetadata future = batch.tryAppend(now, key, value, Record.EMPTY_HEADERS, null, now); assertNotNull(future); byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME); long expectedChecksum = LegacyRecord.computeChecksum(magic, attributes, now, key, value); assertEquals(expectedChecksum, future.checksumOrNull().longValue()); } }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
@Test public void testLargeLingerOldNowExpire() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); assertFalse(batch.maybeExpire(10240, 100L, now - 2L, Long.MAX_VALUE, false)); } @Test public void testLargeFullOldNowExpire() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); assertFalse(batch.maybeExpire(10240, 10240L, now - 2L, 10240L, true)); }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } void configure(Map<String, ?> configs); int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster); void close(); }
@Test public void testKeyPartitionIsStable() { int partition = partitioner.partition("test", null, keyBytes, null, null, cluster); assertEquals("Same key should yield same partition", partition, partitioner.partition("test", null, keyBytes, null, null, cluster)); } @Test public void testRoundRobinWithUnavailablePartitions() { int countForPart0 = 0; int countForPart2 = 0; for (int i = 1; i <= 100; i++) { int part = partitioner.partition("test", null, null, null, null, cluster); assertTrue("We should never choose a leader-less node in round robin", part == 0 || part == 2); if (part == 0) countForPart0++; else countForPart2++; } assertEquals("The distribution between two available partitions should be even", countForPart0, countForPart2); } @Test public void testRoundRobin() throws InterruptedException { final String topicA = "topicA"; final String topicB = "topicB"; List<PartitionInfo> allPartitions = asList(new PartitionInfo(topicA, 0, node0, nodes, nodes), new PartitionInfo(topicA, 1, node1, nodes, nodes), new PartitionInfo(topicA, 2, node2, nodes, nodes), new PartitionInfo(topicB, 0, node0, nodes, nodes) ); Cluster testCluster = new Cluster("clusterId", asList(node0, node1, node2), allPartitions, Collections.<String>emptySet(), Collections.<String>emptySet()); final Map<Integer, Integer> partitionCount = new HashMap<>(); for (int i = 0; i < 30; ++i) { int partition = partitioner.partition(topicA, null, null, null, null, testCluster); Integer count = partitionCount.get(partition); if (null == count) count = 0; partitionCount.put(partition, count + 1); if (i % 5 == 0) { partitioner.partition(topicB, null, null, null, null, testCluster); } } assertEquals(10, (int) partitionCount.get(0)); assertEquals(10, (int) partitionCount.get(1)); assertEquals(10, (int) partitionCount.get(2)); }
Sender implements Runnable { public void run() { log.debug("Starting Kafka producer I/O thread."); while (running) { try { run(time.milliseconds()); } catch (Exception e) { log.error("Uncaught error in kafka producer I/O thread: ", e); } } log.debug("Beginning shutdown of Kafka producer I/O thread, sending remaining records."); while (!forceClose && (this.accumulator.hasUndrained() || this.client.inFlightRequestCount() > 0)) { try { run(time.milliseconds()); } catch (Exception e) { log.error("Uncaught error in kafka producer I/O thread: ", e); } } if (forceClose) { this.accumulator.abortIncompleteBatches(); } try { this.client.close(); } catch (Exception e) { log.error("Failed to close network client", e); } log.debug("Shutdown of Kafka producer I/O thread has completed."); } Sender(KafkaClient client, Metadata metadata, RecordAccumulator accumulator, boolean guaranteeMessageOrder, int maxRequestSize, short acks, int retries, Metrics metrics, Time time, int requestTimeout, long retryBackoffMs, TransactionManager transactionManager, ApiVersions apiVersions); void run(); void initiateClose(); void forceClose(); void wakeup(); static Sensor throttleTimeSensor(Metrics metrics); }
@Test public void testSimple() throws Exception { long offset = 0; Future<RecordMetadata> future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); sender.run(time.milliseconds()); assertEquals("We should have a single produce request in flight.", 1, client.inFlightRequestCount()); assertTrue(client.hasInFlightRequests()); client.respond(produceResponse(tp0, offset, Errors.NONE, 0)); sender.run(time.milliseconds()); assertEquals("All requests completed.", 0, client.inFlightRequestCount()); assertFalse(client.hasInFlightRequests()); sender.run(time.milliseconds()); assertTrue("Request should be completed", future.isDone()); assertEquals(offset, future.get().offset()); } @Test public void testMessageFormatDownConversion() throws Exception { long offset = 0; apiVersions.update("0", NodeApiVersions.create()); Future<RecordMetadata> future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; apiVersions.update("0", NodeApiVersions.create(Collections.singleton( new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { ProduceRequest request = (ProduceRequest) body; if (request.version() != 2) return false; MemoryRecords records = request.partitionRecordsOrFail().get(tp0); return records != null && records.sizeInBytes() > 0 && records.hasMatchingMagic(RecordBatch.MAGIC_VALUE_V1); } }, produceResponse(tp0, offset, Errors.NONE, 0)); sender.run(time.milliseconds()); sender.run(time.milliseconds()); assertTrue("Request should be completed", future.isDone()); assertEquals(offset, future.get().offset()); } @Test public void testDownConversionForMismatchedMagicValues() throws Exception { long offset = 0; apiVersions.update("0", NodeApiVersions.create()); Future<RecordMetadata> future1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; apiVersions.update("0", NodeApiVersions.create(Collections.singleton( new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); Future<RecordMetadata> future2 = accumulator.append(tp1, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; apiVersions.update("0", NodeApiVersions.create()); ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(Errors.NONE, offset, RecordBatch.NO_TIMESTAMP); Map<TopicPartition, ProduceResponse.PartitionResponse> partResp = new HashMap<>(); partResp.put(tp0, resp); partResp.put(tp1, resp); ProduceResponse produceResponse = new ProduceResponse(partResp, 0); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { ProduceRequest request = (ProduceRequest) body; if (request.version() != 2) return false; Map<TopicPartition, MemoryRecords> recordsMap = request.partitionRecordsOrFail(); if (recordsMap.size() != 2) return false; for (MemoryRecords records : recordsMap.values()) { if (records == null || records.sizeInBytes() == 0 || !records.hasMatchingMagic(RecordBatch.MAGIC_VALUE_V1)) return false; } return true; } }, produceResponse); sender.run(time.milliseconds()); sender.run(time.milliseconds()); assertTrue("Request should be completed", future1.isDone()); assertTrue("Request should be completed", future2.isDone()); } @Test public void testRetries() throws Exception { int maxRetries = 1; Metrics m = new Metrics(); try { Sender sender = new Sender(client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT, 50, null, apiVersions); Future<RecordMetadata> future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); sender.run(time.milliseconds()); String id = client.requests().peek().destination(); Node node = new Node(Integer.parseInt(id), "localhost", 0); assertEquals(1, client.inFlightRequestCount()); assertTrue(client.hasInFlightRequests()); assertTrue("Client ready status should be true", client.isReady(node, 0L)); client.disconnect(id); assertEquals(0, client.inFlightRequestCount()); assertFalse(client.hasInFlightRequests()); assertFalse("Client ready status should be false", client.isReady(node, 0L)); sender.run(time.milliseconds()); sender.run(time.milliseconds()); sender.run(time.milliseconds()); assertEquals(1, client.inFlightRequestCount()); assertTrue(client.hasInFlightRequests()); long offset = 0; client.respond(produceResponse(tp0, offset, Errors.NONE, 0)); sender.run(time.milliseconds()); assertTrue("Request should have retried and completed", future.isDone()); assertEquals(offset, future.get().offset()); future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); for (int i = 0; i < maxRetries + 1; i++) { client.disconnect(client.requests().peek().destination()); sender.run(time.milliseconds()); sender.run(time.milliseconds()); sender.run(time.milliseconds()); } sender.run(time.milliseconds()); completedWithError(future, Errors.NETWORK_EXCEPTION); } finally { m.close(); } } @Test public void testSendInOrder() throws Exception { int maxRetries = 1; Metrics m = new Metrics(); try { Sender sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT, 50, null, apiVersions); Cluster cluster1 = TestUtils.clusterWith(2, "test", 2); metadata.update(cluster1, Collections.<String>emptySet(), time.milliseconds()); TopicPartition tp2 = new TopicPartition("test", 1); accumulator.append(tp2, 0L, "key1".getBytes(), "value1".getBytes(), null, null, MAX_BLOCK_TIMEOUT); sender.run(time.milliseconds()); sender.run(time.milliseconds()); String id = client.requests().peek().destination(); assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey()); Node node = new Node(Integer.parseInt(id), "localhost", 0); assertEquals(1, client.inFlightRequestCount()); assertTrue(client.hasInFlightRequests()); assertTrue("Client ready status should be true", client.isReady(node, 0L)); time.sleep(900); accumulator.append(tp2, 0L, "key2".getBytes(), "value2".getBytes(), null, null, MAX_BLOCK_TIMEOUT); Cluster cluster2 = TestUtils.singletonCluster("test", 2); metadata.update(cluster2, Collections.<String>emptySet(), time.milliseconds()); sender.run(time.milliseconds()); assertEquals(1, client.inFlightRequestCount()); assertTrue(client.hasInFlightRequests()); } finally { m.close(); } } @Test public void testAppendInExpiryCallback() throws InterruptedException { int messagesPerBatch = 10; final AtomicInteger expiryCallbackCount = new AtomicInteger(0); final AtomicReference<Exception> unexpectedException = new AtomicReference<>(); final byte[] key = "key".getBytes(); final byte[] value = "value".getBytes(); final long maxBlockTimeMs = 1000; Callback callback = new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception instanceof TimeoutException) { expiryCallbackCount.incrementAndGet(); try { accumulator.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); } catch (InterruptedException e) { throw new RuntimeException("Unexpected interruption", e); } } else if (exception != null) unexpectedException.compareAndSet(null, exception); } }; for (int i = 0; i < messagesPerBatch; i++) accumulator.append(tp1, 0L, key, value, null, callback, maxBlockTimeMs); time.sleep(10000); Node clusterNode = this.cluster.nodes().get(0); client.disconnect(clusterNode.idString()); client.blackout(clusterNode, 100); sender.run(time.milliseconds()); assertEquals("Callbacks not invoked for expiry", messagesPerBatch, expiryCallbackCount.get()); assertNull("Unexpected exception", unexpectedException.get()); assertTrue(accumulator.batches().containsKey(tp1)); assertEquals(1, accumulator.batches().get(tp1).size()); assertEquals(messagesPerBatch, accumulator.batches().get(tp1).peekFirst().recordCount); } @Test public void testMetadataTopicExpiry() throws Exception { long offset = 0; metadata.update(Cluster.empty(), Collections.<String>emptySet(), time.milliseconds()); Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); assertTrue("Topic not added to metadata", metadata.containsTopic(tp0.topic())); metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds()); sender.run(time.milliseconds()); client.respond(produceResponse(tp0, offset++, Errors.NONE, 0)); sender.run(time.milliseconds()); assertEquals("Request completed.", 0, client.inFlightRequestCount()); assertFalse(client.hasInFlightRequests()); sender.run(time.milliseconds()); assertTrue("Request should be completed", future.isDone()); assertTrue("Topic not retained in metadata list", metadata.containsTopic(tp0.topic())); time.sleep(Metadata.TOPIC_EXPIRY_MS); metadata.update(Cluster.empty(), Collections.<String>emptySet(), time.milliseconds()); assertFalse("Unused topic has not been expired", metadata.containsTopic(tp0.topic())); future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); assertTrue("Topic not added to metadata", metadata.containsTopic(tp0.topic())); metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds()); sender.run(time.milliseconds()); client.respond(produceResponse(tp0, offset++, Errors.NONE, 0)); sender.run(time.milliseconds()); assertEquals("Request completed.", 0, client.inFlightRequestCount()); assertFalse(client.hasInFlightRequests()); sender.run(time.milliseconds()); assertTrue("Request should be completed", future.isDone()); } @Test public void testClusterAuthorizationExceptionInProduceRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); client.setNode(new Node(1, "localhost", 33343)); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent(); } }, produceResponse(tp0, -1, Errors.CLUSTER_AUTHORIZATION_FAILED, 0)); sender.run(time.milliseconds()); assertTrue(future.isDone()); try { future.get(); fail("Future should have raised ClusterAuthorizationException"); } catch (ExecutionException e) { assertTrue(e.getCause() instanceof ClusterAuthorizationException); } assertSendFailure(ClusterAuthorizationException.class); } @Test public void testSequenceNumberIncrement() throws InterruptedException { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId, (short) 0)); setupWithTransactionState(transactionManager); client.setNode(new Node(1, "localhost", 33343)); int maxRetries = 10; Metrics m = new Metrics(); Sender sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { if (body instanceof ProduceRequest) { ProduceRequest request = (ProduceRequest) body; MemoryRecords records = request.partitionRecordsOrFail().get(tp0); Iterator<MutableRecordBatch> batchIterator = records.batches().iterator(); assertTrue(batchIterator.hasNext()); RecordBatch batch = batchIterator.next(); assertFalse(batchIterator.hasNext()); assertEquals(0, batch.baseSequence()); assertEquals(producerId, batch.producerId()); assertEquals(0, batch.producerEpoch()); return true; } return false; } }, produceResponse(tp0, 0, Errors.NONE, 0)); sender.run(time.milliseconds()); sender.run(time.milliseconds()); sender.run(time.milliseconds()); assertTrue(responseFuture.isDone()); assertEquals((long) transactionManager.sequenceNumber(tp0), 1L); } @Test public void testAbortRetryWhenProducerIdChanges() throws InterruptedException { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId, (short) 0)); setupWithTransactionState(transactionManager); client.setNode(new Node(1, "localhost", 33343)); int maxRetries = 10; Metrics m = new Metrics(); Sender sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); sender.run(time.milliseconds()); String id = client.requests().peek().destination(); Node node = new Node(Integer.valueOf(id), "localhost", 0); assertEquals(1, client.inFlightRequestCount()); assertTrue("Client ready status should be true", client.isReady(node, 0L)); client.disconnect(id); assertEquals(0, client.inFlightRequestCount()); assertFalse("Client ready status should be false", client.isReady(node, 0L)); transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId + 1, (short) 0)); sender.run(time.milliseconds()); sender.run(time.milliseconds()); sender.run(time.milliseconds()); assertEquals("Expected requests to be aborted after pid change", 0, client.inFlightRequestCount()); KafkaMetric recordErrors = m.metrics().get(m.metricName("record-error-rate", METRIC_GROUP, "")); assertTrue("Expected non-zero value for record send errors", recordErrors.value() > 0); assertTrue(responseFuture.isDone()); assertEquals((long) transactionManager.sequenceNumber(tp0), 0L); } @Test public void testResetWhenOutOfOrderSequenceReceived() throws InterruptedException { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId, (short) 0)); setupWithTransactionState(transactionManager); client.setNode(new Node(1, "localhost", 33343)); int maxRetries = 10; Metrics m = new Metrics(); Sender sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); sender.run(time.milliseconds()); assertEquals(1, client.inFlightRequestCount()); client.respond(produceResponse(tp0, 0, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, 0)); sender.run(time.milliseconds()); assertTrue(responseFuture.isDone()); assertFalse("Expected transaction state to be reset upon receiving an OutOfOrderSequenceException", transactionManager.hasProducerId()); } @Test public void testTransactionalSplitBatchAndSend() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1); TransactionManager txnManager = new TransactionManager("testSplitBatchAndSend", 60000, 100); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); txnManager.beginTransaction(); txnManager.maybeAddPartitionToTransaction(tp); client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp, Errors.NONE))); sender.run(time.milliseconds()); testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp); }
Sender implements Runnable { public static Sensor throttleTimeSensor(Metrics metrics) { String metricGrpName = SenderMetrics.METRIC_GROUP_NAME; Sensor produceThrottleTimeSensor = metrics.sensor("produce-throttle-time"); produceThrottleTimeSensor.add(metrics.metricName("produce-throttle-time-avg", metricGrpName, "The average throttle time in ms"), new Avg()); produceThrottleTimeSensor.add(metrics.metricName("produce-throttle-time-max", metricGrpName, "The maximum throttle time in ms"), new Max()); return produceThrottleTimeSensor; } Sender(KafkaClient client, Metadata metadata, RecordAccumulator accumulator, boolean guaranteeMessageOrder, int maxRequestSize, short acks, int retries, Metrics metrics, Time time, int requestTimeout, long retryBackoffMs, TransactionManager transactionManager, ApiVersions apiVersions); void run(); void initiateClose(); void forceClose(); void wakeup(); static Sensor throttleTimeSensor(Metrics metrics); }
@Test public void testQuotaMetrics() throws Exception { MockSelector selector = new MockSelector(time); Sensor throttleTimeSensor = Sender.throttleTimeSensor(metrics); Cluster cluster = TestUtils.singletonCluster("test", 1); Node node = cluster.nodes().get(0); NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor); short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion(); ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0)); selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer))); while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds()); selector.clear(); for (int i = 1; i <= 3; i++) { int throttleTimeMs = 100 * i; ProduceRequest.Builder builder = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) 1, 1000, Collections.<TopicPartition, MemoryRecords>emptyMap()); ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null); client.send(request, time.milliseconds()); client.poll(1, time.milliseconds()); ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs); buffer = response.serialize(ApiKeys.PRODUCE.latestVersion(), new ResponseHeader(request.correlationId())); selector.completeReceive(new NetworkReceive(node.idString(), buffer)); client.poll(1, time.milliseconds()); selector.clear(); } Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric avgMetric = allMetrics.get(metrics.metricName("produce-throttle-time-avg", METRIC_GROUP, "")); KafkaMetric maxMetric = allMetrics.get(metrics.metricName("produce-throttle-time-max", METRIC_GROUP, "")); assertEquals(250, avgMetric.value(), EPS); assertEquals(400, maxMetric.value(), EPS); client.close(); }
KafkaProducer implements Producer<K, V> { @Override public void close() { close(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } KafkaProducer(Map<String, Object> configs); KafkaProducer(Map<String, Object> configs, Serializer<K> keySerializer, Serializer<V> valueSerializer); KafkaProducer(Properties properties); KafkaProducer(Properties properties, Serializer<K> keySerializer, Serializer<V> valueSerializer); @SuppressWarnings({"unchecked", "deprecation"}) private KafkaProducer(ProducerConfig config, Serializer<K> keySerializer, Serializer<V> valueSerializer); void initTransactions(); void beginTransaction(); void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); void commitTransaction(); void abortTransaction(); @Override Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override // 发送消息,将消息放入RecordAccumulator暂存 Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); @Override void flush(); @Override // 从Metadata中获取指定的topc分区信息 List<PartitionInfo> partitionsFor(String topic); @Override Map<MetricName, ? extends Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); }
@Test public void testConstructorWithSerializers() { Properties producerProps = new Properties(); producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()).close(); } @Test public void testSerializerClose() throws Exception { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); configs.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL); final int oldInitCount = MockSerializer.INIT_COUNT.get(); final int oldCloseCount = MockSerializer.CLOSE_COUNT.get(); KafkaProducer<byte[], byte[]> producer = new KafkaProducer<byte[], byte[]>( configs, new MockSerializer(), new MockSerializer()); assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get()); producer.close(); assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); assertEquals(oldCloseCount + 2, MockSerializer.CLOSE_COUNT.get()); } @Test public void testInterceptorConstructClose() throws Exception { try { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MockProducerInterceptor.class.getName()); props.setProperty(MockProducerInterceptor.APPEND_STRING_PROP, "something"); KafkaProducer<String, String> producer = new KafkaProducer<String, String>( props, new StringSerializer(), new StringSerializer()); assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); assertEquals(0, MockProducerInterceptor.CLOSE_COUNT.get()); Assert.assertNull(MockProducerInterceptor.CLUSTER_META.get()); producer.close(); assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); assertEquals(1, MockProducerInterceptor.CLOSE_COUNT.get()); } finally { MockProducerInterceptor.resetCounters(); } } @Test public void testPartitionerClose() throws Exception { try { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, MockPartitioner.class.getName()); KafkaProducer<String, String> producer = new KafkaProducer<String, String>( props, new StringSerializer(), new StringSerializer()); assertEquals(1, MockPartitioner.INIT_COUNT.get()); assertEquals(0, MockPartitioner.CLOSE_COUNT.get()); producer.close(); assertEquals(1, MockPartitioner.INIT_COUNT.get()); assertEquals(1, MockPartitioner.CLOSE_COUNT.get()); } finally { MockPartitioner.resetCounters(); } } @Test public void testOsDefaultSocketBufferSizes() throws Exception { Map<String, Object> config = new HashMap<>(); config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); config.put(ProducerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); config.put(ProducerConfig.RECEIVE_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>( config, new ByteArraySerializer(), new ByteArraySerializer()); producer.close(); } @Test public void closeShouldBeIdempotent() { Properties producerProps = new Properties(); producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); Producer producer = new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()); producer.close(); producer.close(); }
KafkaProducer implements Producer<K, V> { @Override public Future<RecordMetadata> send(ProducerRecord<K, V> record) { return send(record, null); } KafkaProducer(Map<String, Object> configs); KafkaProducer(Map<String, Object> configs, Serializer<K> keySerializer, Serializer<V> valueSerializer); KafkaProducer(Properties properties); KafkaProducer(Properties properties, Serializer<K> keySerializer, Serializer<V> valueSerializer); @SuppressWarnings({"unchecked", "deprecation"}) private KafkaProducer(ProducerConfig config, Serializer<K> keySerializer, Serializer<V> valueSerializer); void initTransactions(); void beginTransaction(); void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); void commitTransaction(); void abortTransaction(); @Override Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override // 发送消息,将消息放入RecordAccumulator暂存 Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); @Override void flush(); @Override // 从Metadata中获取指定的topc分区信息 List<PartitionInfo> partitionsFor(String topic); @Override Map<MetricName, ? extends Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); }
@PrepareOnlyThisForTest(Metadata.class) @Test public void testMetadataFetchOnStaleMetadata() throws Exception { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); Metadata metadata = PowerMock.createNiceMock(Metadata.class); MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); String topic = "topic"; ProducerRecord<String, String> initialRecord = new ProducerRecord<>(topic, "value"); ProducerRecord<String, String> extendedRecord = new ProducerRecord<>(topic, 2, null, "value"); Collection<Node> nodes = Collections.singletonList(new Node(0, "host1", 1000)); final Cluster emptyCluster = new Cluster(null, nodes, Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet()); final Cluster initialCluster = new Cluster( "dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()); final Cluster extendedCluster = new Cluster( "dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList( new PartitionInfo(topic, 0, null, null, null), new PartitionInfo(topic, 1, null, null, null), new PartitionInfo(topic, 2, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()); final int refreshAttempts = 5; EasyMock.expect(metadata.fetch()).andReturn(emptyCluster).times(refreshAttempts - 1); EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); PowerMock.replay(metadata); producer.send(initialRecord); PowerMock.verify(metadata); PowerMock.reset(metadata); EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); PowerMock.replay(metadata); producer.send(initialRecord, null); PowerMock.verify(metadata); PowerMock.reset(metadata); EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); PowerMock.replay(metadata); try { producer.send(extendedRecord, null); fail("Expected KafkaException to be raised"); } catch (KafkaException e) { } PowerMock.verify(metadata); PowerMock.reset(metadata); EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); EasyMock.expect(metadata.fetch()).andReturn(extendedCluster).once(); EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); PowerMock.replay(metadata); producer.send(extendedRecord, null); PowerMock.verify(metadata); } @PrepareOnlyThisForTest(Metadata.class) @Test public void testHeaders() throws Exception { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); ExtendedSerializer keySerializer = PowerMock.createNiceMock(ExtendedSerializer.class); ExtendedSerializer valueSerializer = PowerMock.createNiceMock(ExtendedSerializer.class); KafkaProducer<String, String> producer = new KafkaProducer<>(props, keySerializer, valueSerializer); Metadata metadata = PowerMock.createNiceMock(Metadata.class); MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); String topic = "topic"; Collection<Node> nodes = Collections.singletonList(new Node(0, "host1", 1000)); final Cluster cluster = new Cluster( "dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()); EasyMock.expect(metadata.fetch()).andReturn(cluster).anyTimes(); PowerMock.replay(metadata); String value = "value"; ProducerRecord<String, String> record = new ProducerRecord<>(topic, value); EasyMock.expect(keySerializer.serialize(topic, record.headers(), null)).andReturn(null).once(); EasyMock.expect(valueSerializer.serialize(topic, record.headers(), value)).andReturn(value.getBytes()).once(); PowerMock.replay(keySerializer); PowerMock.replay(valueSerializer); record.headers().add(new RecordHeader("test", "header2".getBytes())); producer.send(record, null); try { record.headers().add(new RecordHeader("test", "test".getBytes())); fail("Expected IllegalStateException to be raised"); } catch (IllegalStateException ise) { } assertTrue(Arrays.equals(record.headers().lastHeader("test").value(), "header2".getBytes())); PowerMock.verify(valueSerializer); PowerMock.verify(keySerializer); } @PrepareOnlyThisForTest(Metadata.class) @Test public void testInterceptorPartitionSetOnTooLargeRecord() throws Exception { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "1"); String topic = "topic"; ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value"); KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); Metadata metadata = PowerMock.createNiceMock(Metadata.class); MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); final Cluster cluster = new Cluster( "dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()); EasyMock.expect(metadata.fetch()).andReturn(cluster).once(); ProducerInterceptors interceptors = PowerMock.createMock(ProducerInterceptors.class); EasyMock.expect(interceptors.onSend(record)).andReturn(record); interceptors.onSendError(EasyMock.eq(record), EasyMock.<TopicPartition>notNull(), EasyMock.<Exception>notNull()); EasyMock.expectLastCall(); MemberModifier.field(KafkaProducer.class, "interceptors").set(producer, interceptors); PowerMock.replay(metadata); EasyMock.replay(interceptors); producer.send(record); EasyMock.verify(interceptors); }
KafkaProducer implements Producer<K, V> { @Override public List<PartitionInfo> partitionsFor(String topic) { try { return waitOnMetadata(topic, null, maxBlockTimeMs).cluster.partitionsForTopic(topic); } catch (InterruptedException e) { throw new InterruptException(e); } } KafkaProducer(Map<String, Object> configs); KafkaProducer(Map<String, Object> configs, Serializer<K> keySerializer, Serializer<V> valueSerializer); KafkaProducer(Properties properties); KafkaProducer(Properties properties, Serializer<K> keySerializer, Serializer<V> valueSerializer); @SuppressWarnings({"unchecked", "deprecation"}) private KafkaProducer(ProducerConfig config, Serializer<K> keySerializer, Serializer<V> valueSerializer); void initTransactions(); void beginTransaction(); void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); void commitTransaction(); void abortTransaction(); @Override Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override // 发送消息,将消息放入RecordAccumulator暂存 Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); @Override void flush(); @Override // 从Metadata中获取指定的topc分区信息 List<PartitionInfo> partitionsFor(String topic); @Override Map<MetricName, ? extends Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); }
@Test public void testTopicRefreshInMetadata() throws Exception { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG, "600000"); KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); long refreshBackoffMs = 500L; long metadataExpireMs = 60000L; final Metadata metadata = new Metadata(refreshBackoffMs, metadataExpireMs, true, true, new ClusterResourceListeners()); final Time time = new MockTime(); MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); MemberModifier.field(KafkaProducer.class, "time").set(producer, time); final String topic = "topic"; Thread t = new Thread() { @Override public void run() { long startTimeMs = System.currentTimeMillis(); for (int i = 0; i < 10; i++) { while (!metadata.updateRequested() && System.currentTimeMillis() - startTimeMs < 1000) yield(); metadata.update(Cluster.empty(), Collections.singleton(topic), time.milliseconds()); time.sleep(60 * 1000L); } } }; t.start(); try { producer.partitionsFor(topic); fail("Expect TimeoutException"); } catch (TimeoutException e) { } Assert.assertTrue("Topic should still exist in metadata", metadata.containsTopic(topic)); }
KafkaAdminClient extends AdminClient { static <K, V> List<V> getOrCreateListValue(Map<K, List<V>> map, K key) { List<V> list = map.get(key); if (list != null) return list; list = new LinkedList<>(); map.put(key, list); return list; } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata, Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }
@Test public void testGetOrCreateListValue() { Map<String, List<String>> map = new HashMap<>(); List<String> fooList = KafkaAdminClient.getOrCreateListValue(map, "foo"); assertNotNull(fooList); fooList.add("a"); fooList.add("b"); List<String> fooList2 = KafkaAdminClient.getOrCreateListValue(map, "foo"); assertEquals(fooList, fooList2); assertTrue(fooList2.contains("a")); assertTrue(fooList2.contains("b")); List<String> barList = KafkaAdminClient.getOrCreateListValue(map, "bar"); assertNotNull(barList); assertTrue(barList.isEmpty()); }
KafkaAdminClient extends AdminClient { static int calcTimeoutMsRemainingAsInt(long now, long deadlineMs) { long deltaMs = deadlineMs - now; if (deltaMs > Integer.MAX_VALUE) deltaMs = Integer.MAX_VALUE; else if (deltaMs < Integer.MIN_VALUE) deltaMs = Integer.MIN_VALUE; return (int) deltaMs; } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata, Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }
@Test public void testCalcTimeoutMsRemainingAsInt() { assertEquals(0, KafkaAdminClient.calcTimeoutMsRemainingAsInt(1000, 1000)); assertEquals(100, KafkaAdminClient.calcTimeoutMsRemainingAsInt(1000, 1100)); assertEquals(Integer.MAX_VALUE, KafkaAdminClient.calcTimeoutMsRemainingAsInt(0, Long.MAX_VALUE)); assertEquals(Integer.MIN_VALUE, KafkaAdminClient.calcTimeoutMsRemainingAsInt(Long.MAX_VALUE, 0)); }
KafkaAdminClient extends AdminClient { static String prettyPrintException(Throwable throwable) { if (throwable == null) return "Null exception."; if (throwable.getMessage() != null) { return throwable.getClass().getSimpleName() + ": " + throwable.getMessage(); } return throwable.getClass().getSimpleName(); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata, Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }
@Test public void testPrettyPrintException() { assertEquals("Null exception.", KafkaAdminClient.prettyPrintException(null)); assertEquals("TimeoutException", KafkaAdminClient.prettyPrintException(new TimeoutException())); assertEquals("TimeoutException: The foobar timed out.", KafkaAdminClient.prettyPrintException(new TimeoutException("The foobar timed out."))); }
KafkaAdminClient extends AdminClient { static String generateClientId(AdminClientConfig config) { String clientId = config.getString(AdminClientConfig.CLIENT_ID_CONFIG); if (!clientId.isEmpty()) return clientId; return "adminclient-" + ADMIN_CLIENT_ID_SEQUENCE.getAndIncrement(); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata, Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }
@Test public void testGenerateClientId() { Set<String> ids = new HashSet<>(); for (int i = 0; i < 10; i++) { String id = KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "")); assertTrue("Got duplicate id " + id, !ids.contains(id)); ids.add(id); } assertEquals("myCustomId", KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "myCustomId"))); }
KafkaAdminClient extends AdminClient { @Override public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options) { final Map<String, KafkaFutureImpl<Void>> topicFutures = new HashMap<>(newTopics.size()); final Map<String, CreateTopicsRequest.TopicDetails> topicsMap = new HashMap<>(newTopics.size()); for (NewTopic newTopic : newTopics) { if (topicFutures.get(newTopic.name()) == null) { topicFutures.put(newTopic.name(), new KafkaFutureImpl<Void>()); topicsMap.put(newTopic.name(), newTopic.convertToTopicDetails()); } } final long now = time.milliseconds(); runnable.call(new Call("createTopics", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) { @Override public AbstractRequest.Builder createRequest(int timeoutMs) { return new CreateTopicsRequest.Builder(topicsMap, timeoutMs, options.shouldValidateOnly()); } @Override public void handleResponse(AbstractResponse abstractResponse) { CreateTopicsResponse response = (CreateTopicsResponse) abstractResponse; for (Map.Entry<String, ApiError> entry : response.errors().entrySet()) { KafkaFutureImpl<Void> future = topicFutures.get(entry.getKey()); if (future == null) { log.warn("Server response mentioned unknown topic {}", entry.getKey()); } else { ApiException exception = entry.getValue().exception(); if (exception != null) { future.completeExceptionally(exception); } else { future.complete(null); } } } for (Map.Entry<String, KafkaFutureImpl<Void>> entry : topicFutures.entrySet()) { KafkaFutureImpl<Void> future = entry.getValue(); if (!future.isDone()) { future.completeExceptionally(new ApiException("The server response did not " + "contain a reference to node " + entry.getKey())); } } } @Override void handleFailure(Throwable throwable) { completeAllExceptionally(topicFutures.values(), throwable); } }, now); return new CreateTopicsResult(new HashMap<String, KafkaFuture<Void>>(topicFutures)); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata, Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }
@Test public void testTimeoutWithoutMetadata() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().setNode(new Node(0, "localhost", 8121)); env.kafkaClient().prepareResponse(new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(Integer.valueOf(0), asList(new Integer[]{0, 1, 2})))), new CreateTopicsOptions().timeoutMs(1000)).all(); assertFutureError(future, TimeoutException.class); } } @Test public void testCreateTopics() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); env.kafkaClient().setNode(env.cluster().controller()); env.kafkaClient().prepareResponse(new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(Integer.valueOf(0), asList(new Integer[]{0, 1, 2})))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
KafkaAdminClient extends AdminClient { @Override public DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options) { final long now = time.milliseconds(); final KafkaFutureImpl<Collection<AclBinding>> future = new KafkaFutureImpl<>(); runnable.call(new Call("describeAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override AbstractRequest.Builder createRequest(int timeoutMs) { return new DescribeAclsRequest.Builder(filter); } @Override void handleResponse(AbstractResponse abstractResponse) { DescribeAclsResponse response = (DescribeAclsResponse) abstractResponse; if (response.error().isFailure()) { future.completeExceptionally(response.error().exception()); } else { future.complete(response.acls()); } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }, now); return new DescribeAclsResult(future); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata, Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }
@Test public void testDescribeAcls() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); env.kafkaClient().setNode(env.cluster().controller()); env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, ApiError.NONE, asList(ACL1, ACL2))); assertCollectionIs(env.adminClient().describeAcls(FILTER1).values().get(), ACL1, ACL2); env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, ApiError.NONE, Collections.<AclBinding>emptySet())); assertTrue(env.adminClient().describeAcls(FILTER2).values().get().isEmpty()); env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, new ApiError(Errors.SECURITY_DISABLED, "Security is disabled"), Collections.<AclBinding>emptySet())); assertFutureError(env.adminClient().describeAcls(FILTER2).values(), SecurityDisabledException.class); } }
KafkaAdminClient extends AdminClient { @Override public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) { final long now = time.milliseconds(); final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>(); final List<AclCreation> aclCreations = new ArrayList<>(); for (AclBinding acl : acls) { if (futures.get(acl) == null) { KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); futures.put(acl, future); String indefinite = acl.toFilter().findIndefiniteField(); if (indefinite == null) { aclCreations.add(new AclCreation(acl)); } else { future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " + indefinite)); } } } runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override AbstractRequest.Builder createRequest(int timeoutMs) { return new CreateAclsRequest.Builder(aclCreations); } @Override void handleResponse(AbstractResponse abstractResponse) { CreateAclsResponse response = (CreateAclsResponse) abstractResponse; List<AclCreationResponse> responses = response.aclCreationResponses(); Iterator<AclCreationResponse> iter = responses.iterator(); for (AclCreation aclCreation : aclCreations) { KafkaFutureImpl<Void> future = futures.get(aclCreation.acl()); if (!iter.hasNext()) { future.completeExceptionally(new UnknownServerException( "The broker reported no creation result for the given ACL.")); } else { AclCreationResponse creation = iter.next(); if (creation.error().isFailure()) { future.completeExceptionally(creation.error().exception()); } else { future.complete(null); } } } } @Override void handleFailure(Throwable throwable) { completeAllExceptionally(futures.values(), throwable); } }, now); return new CreateAclsResult(new HashMap<AclBinding, KafkaFuture<Void>>(futures)); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata, Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }
@Test public void testCreateAcls() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); env.kafkaClient().setNode(env.cluster().controller()); env.kafkaClient().prepareResponse(new CreateAclsResponse(0, asList(new AclCreationResponse(ApiError.NONE), new AclCreationResponse(ApiError.NONE)))); CreateAclsResult results = env.adminClient().createAcls(asList(ACL1, ACL2)); assertCollectionIs(results.values().keySet(), ACL1, ACL2); for (KafkaFuture<Void> future : results.values().values()) future.get(); results.all().get(); env.kafkaClient().prepareResponse(new CreateAclsResponse(0, asList( new AclCreationResponse(new ApiError(Errors.SECURITY_DISABLED, "Security is disabled")), new AclCreationResponse(ApiError.NONE)) )); results = env.adminClient().createAcls(asList(ACL1, ACL2)); assertCollectionIs(results.values().keySet(), ACL1, ACL2); assertFutureError(results.values().get(ACL1), SecurityDisabledException.class); results.values().get(ACL2).get(); assertFutureError(results.all(), SecurityDisabledException.class); } }
KafkaAdminClient extends AdminClient { @Override public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) { final long now = time.milliseconds(); final Map<AclBindingFilter, KafkaFutureImpl<FilterResults>> futures = new HashMap<>(); final List<AclBindingFilter> filterList = new ArrayList<>(); for (AclBindingFilter filter : filters) { if (futures.get(filter) == null) { filterList.add(filter); futures.put(filter, new KafkaFutureImpl<FilterResults>()); } } runnable.call(new Call("deleteAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override AbstractRequest.Builder createRequest(int timeoutMs) { return new DeleteAclsRequest.Builder(filterList); } @Override void handleResponse(AbstractResponse abstractResponse) { DeleteAclsResponse response = (DeleteAclsResponse) abstractResponse; List<AclFilterResponse> responses = response.responses(); Iterator<AclFilterResponse> iter = responses.iterator(); for (AclBindingFilter filter : filterList) { KafkaFutureImpl<FilterResults> future = futures.get(filter); if (!iter.hasNext()) { future.completeExceptionally(new UnknownServerException( "The broker reported no deletion result for the given filter.")); } else { AclFilterResponse deletion = iter.next(); if (deletion.error().isFailure()) { future.completeExceptionally(deletion.error().exception()); } else { List<FilterResult> filterResults = new ArrayList<>(); for (AclDeletionResult deletionResult : deletion.deletions()) { filterResults.add(new FilterResult(deletionResult.acl(), deletionResult.error().exception())); } future.complete(new FilterResults(filterResults)); } } } } @Override void handleFailure(Throwable throwable) { completeAllExceptionally(futures.values(), throwable); } }, now); return new DeleteAclsResult(new HashMap<AclBindingFilter, KafkaFuture<FilterResults>>(futures)); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata, Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }
@Test public void testDeleteAcls() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); env.kafkaClient().setNode(env.cluster().controller()); env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList( new AclFilterResponse(asList(new AclDeletionResult(ACL1), new AclDeletionResult(ACL2))), new AclFilterResponse(new ApiError(Errors.SECURITY_DISABLED, "No security"), Collections.<AclDeletionResult>emptySet())))); DeleteAclsResult results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2)); Map<AclBindingFilter, KafkaFuture<FilterResults>> filterResults = results.values(); FilterResults filter1Results = filterResults.get(FILTER1).get(); assertEquals(null, filter1Results.values().get(0).exception()); assertEquals(ACL1, filter1Results.values().get(0).binding()); assertEquals(null, filter1Results.values().get(1).exception()); assertEquals(ACL2, filter1Results.values().get(1).binding()); assertFutureError(filterResults.get(FILTER2), SecurityDisabledException.class); assertFutureError(results.all(), SecurityDisabledException.class); env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList( new AclFilterResponse(asList(new AclDeletionResult(ACL1), new AclDeletionResult(new ApiError(Errors.SECURITY_DISABLED, "No security"), ACL2))), new AclFilterResponse(Collections.<AclDeletionResult>emptySet())))); results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2)); assertTrue(results.values().get(FILTER2).get().values().isEmpty()); assertFutureError(results.all(), SecurityDisabledException.class); env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList( new AclFilterResponse(asList(new AclDeletionResult(ACL1))), new AclFilterResponse(asList(new AclDeletionResult(ACL2)))))); results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2)); Collection<AclBinding> deleted = results.all().get(); assertCollectionIs(deleted, ACL1, ACL2); } }
JmxReporter implements MetricsReporter { public void close() { synchronized (LOCK) { for (KafkaMbean mbean : this.mbeans.values()) unregister(mbean); } } JmxReporter(); JmxReporter(String prefix); @Override void configure(Map<String, ?> configs); @Override void init(List<KafkaMetric> metrics); @Override void metricChange(KafkaMetric metric); @Override void metricRemoval(KafkaMetric metric); void close(); }
@Test public void testJmxRegistration() throws Exception { Metrics metrics = new Metrics(); try { metrics.addReporter(new JmxReporter()); Sensor sensor = metrics.sensor("kafka.requests"); sensor.add(metrics.metricName("pack.bean1.avg", "grp1"), new Avg()); sensor.add(metrics.metricName("pack.bean2.total", "grp2"), new Total()); Sensor sensor2 = metrics.sensor("kafka.blah"); sensor2.add(metrics.metricName("pack.bean1.some", "grp1"), new Total()); sensor2.add(metrics.metricName("pack.bean2.some", "grp1"), new Total()); } finally { metrics.close(); } }
Metrics implements Closeable { public MetricName metricName(String name, String group, String description, Map<String, String> tags) { Map<String, String> combinedTag = new LinkedHashMap<>(config.tags()); combinedTag.putAll(tags); return new MetricName(name, group, description, combinedTag); } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration); MetricName metricName(String name, String group, String description, Map<String, String> tags); MetricName metricName(String name, String group, String description); MetricName metricName(String name, String group); MetricName metricName(String name, String group, String description, String... keyValue); MetricName metricName(String name, String group, Map<String, String> tags); static String toHtmlTable(String domain, List<MetricNameTemplate> allMetrics); MetricConfig config(); Sensor getSensor(String name); Sensor sensor(String name); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel); Sensor sensor(String name, Sensor... parents); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents); void removeSensor(String name); void addMetric(MetricName metricName, Measurable measurable); synchronized void addMetric(MetricName metricName, MetricConfig config, Measurable measurable); synchronized KafkaMetric removeMetric(MetricName metricName); synchronized void addReporter(MetricsReporter reporter); Map<MetricName, KafkaMetric> metrics(); List<MetricsReporter> reporters(); KafkaMetric metric(MetricName metricName); MetricName metricInstance(MetricNameTemplate template, String... keyValue); MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags); @Override void close(); }
@Test public void testMetricName() { MetricName n1 = metrics.metricName("name", "group", "description", "key1", "value1", "key2", "value2"); Map<String, String> tags = new HashMap<String, String>(); tags.put("key1", "value1"); tags.put("key2", "value2"); MetricName n2 = metrics.metricName("name", "group", "description", tags); assertEquals("metric names created in two different ways should be equal", n1, n2); try { metrics.metricName("name", "group", "description", "key1"); fail("Creating MetricName with an odd number of keyValue should fail"); } catch (IllegalArgumentException e) { } }
Metrics implements Closeable { public Sensor sensor(String name) { return this.sensor(name, Sensor.RecordingLevel.INFO); } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration); MetricName metricName(String name, String group, String description, Map<String, String> tags); MetricName metricName(String name, String group, String description); MetricName metricName(String name, String group); MetricName metricName(String name, String group, String description, String... keyValue); MetricName metricName(String name, String group, Map<String, String> tags); static String toHtmlTable(String domain, List<MetricNameTemplate> allMetrics); MetricConfig config(); Sensor getSensor(String name); Sensor sensor(String name); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel); Sensor sensor(String name, Sensor... parents); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents); void removeSensor(String name); void addMetric(MetricName metricName, Measurable measurable); synchronized void addMetric(MetricName metricName, MetricConfig config, Measurable measurable); synchronized KafkaMetric removeMetric(MetricName metricName); synchronized void addReporter(MetricsReporter reporter); Map<MetricName, KafkaMetric> metrics(); List<MetricsReporter> reporters(); KafkaMetric metric(MetricName metricName); MetricName metricInstance(MetricNameTemplate template, String... keyValue); MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags); @Override void close(); }
@Test(expected = IllegalArgumentException.class) public void testBadSensorHierarchy() { Sensor p = metrics.sensor("parent"); Sensor c1 = metrics.sensor("child1", p); Sensor c2 = metrics.sensor("child2", p); metrics.sensor("gc", c1, c2); }
Metrics implements Closeable { public void removeSensor(String name) { Sensor sensor = sensors.get(name); if (sensor != null) { List<Sensor> childSensors = null; synchronized (sensor) { synchronized (this) { if (sensors.remove(name, sensor)) { for (KafkaMetric metric : sensor.metrics()) removeMetric(metric.metricName()); log.debug("Removed sensor with name {}", name); childSensors = childrenSensors.remove(sensor); } } } if (childSensors != null) { for (Sensor childSensor : childSensors) removeSensor(childSensor.name()); } } } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration); MetricName metricName(String name, String group, String description, Map<String, String> tags); MetricName metricName(String name, String group, String description); MetricName metricName(String name, String group); MetricName metricName(String name, String group, String description, String... keyValue); MetricName metricName(String name, String group, Map<String, String> tags); static String toHtmlTable(String domain, List<MetricNameTemplate> allMetrics); MetricConfig config(); Sensor getSensor(String name); Sensor sensor(String name); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel); Sensor sensor(String name, Sensor... parents); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents); void removeSensor(String name); void addMetric(MetricName metricName, Measurable measurable); synchronized void addMetric(MetricName metricName, MetricConfig config, Measurable measurable); synchronized KafkaMetric removeMetric(MetricName metricName); synchronized void addReporter(MetricsReporter reporter); Map<MetricName, KafkaMetric> metrics(); List<MetricsReporter> reporters(); KafkaMetric metric(MetricName metricName); MetricName metricInstance(MetricNameTemplate template, String... keyValue); MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags); @Override void close(); }
@Test public void testRemoveSensor() { int size = metrics.metrics().size(); Sensor parent1 = metrics.sensor("test.parent1"); parent1.add(metrics.metricName("test.parent1.count", "grp1"), new Count()); Sensor parent2 = metrics.sensor("test.parent2"); parent2.add(metrics.metricName("test.parent2.count", "grp1"), new Count()); Sensor child1 = metrics.sensor("test.child1", parent1, parent2); child1.add(metrics.metricName("test.child1.count", "grp1"), new Count()); Sensor child2 = metrics.sensor("test.child2", parent2); child2.add(metrics.metricName("test.child2.count", "grp1"), new Count()); Sensor grandChild1 = metrics.sensor("test.gchild2", child2); grandChild1.add(metrics.metricName("test.gchild2.count", "grp1"), new Count()); Sensor sensor = metrics.getSensor("test.parent1"); assertNotNull(sensor); metrics.removeSensor("test.parent1"); assertNull(metrics.getSensor("test.parent1")); assertNull(metrics.metrics().get(metrics.metricName("test.parent1.count", "grp1"))); assertNull(metrics.getSensor("test.child1")); assertNull(metrics.childrenSensors().get(sensor)); assertNull(metrics.metrics().get(metrics.metricName("test.child1.count", "grp1"))); sensor = metrics.getSensor("test.gchild2"); assertNotNull(sensor); metrics.removeSensor("test.gchild2"); assertNull(metrics.getSensor("test.gchild2")); assertNull(metrics.childrenSensors().get(sensor)); assertNull(metrics.metrics().get(metrics.metricName("test.gchild2.count", "grp1"))); sensor = metrics.getSensor("test.child2"); assertNotNull(sensor); metrics.removeSensor("test.child2"); assertNull(metrics.getSensor("test.child2")); assertNull(metrics.childrenSensors().get(sensor)); assertNull(metrics.metrics().get(metrics.metricName("test.child2.count", "grp1"))); sensor = metrics.getSensor("test.parent2"); assertNotNull(sensor); metrics.removeSensor("test.parent2"); assertNull(metrics.getSensor("test.parent2")); assertNull(metrics.childrenSensors().get(sensor)); assertNull(metrics.metrics().get(metrics.metricName("test.parent2.count", "grp1"))); assertEquals(size, metrics.metrics().size()); }
Metrics implements Closeable { public synchronized KafkaMetric removeMetric(MetricName metricName) { KafkaMetric metric = this.metrics.remove(metricName); if (metric != null) { for (MetricsReporter reporter : reporters) reporter.metricRemoval(metric); } return metric; } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration); MetricName metricName(String name, String group, String description, Map<String, String> tags); MetricName metricName(String name, String group, String description); MetricName metricName(String name, String group); MetricName metricName(String name, String group, String description, String... keyValue); MetricName metricName(String name, String group, Map<String, String> tags); static String toHtmlTable(String domain, List<MetricNameTemplate> allMetrics); MetricConfig config(); Sensor getSensor(String name); Sensor sensor(String name); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel); Sensor sensor(String name, Sensor... parents); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents); void removeSensor(String name); void addMetric(MetricName metricName, Measurable measurable); synchronized void addMetric(MetricName metricName, MetricConfig config, Measurable measurable); synchronized KafkaMetric removeMetric(MetricName metricName); synchronized void addReporter(MetricsReporter reporter); Map<MetricName, KafkaMetric> metrics(); List<MetricsReporter> reporters(); KafkaMetric metric(MetricName metricName); MetricName metricInstance(MetricNameTemplate template, String... keyValue); MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags); @Override void close(); }
@Test public void testRemoveMetric() { int size = metrics.metrics().size(); metrics.addMetric(metrics.metricName("test1", "grp1"), new Count()); metrics.addMetric(metrics.metricName("test2", "grp1"), new Count()); assertNotNull(metrics.removeMetric(metrics.metricName("test1", "grp1"))); assertNull(metrics.metrics().get(metrics.metricName("test1", "grp1"))); assertNotNull(metrics.metrics().get(metrics.metricName("test2", "grp1"))); assertNotNull(metrics.removeMetric(metrics.metricName("test2", "grp1"))); assertNull(metrics.metrics().get(metrics.metricName("test2", "grp1"))); assertEquals(size, metrics.metrics().size()); }
Metrics implements Closeable { public MetricName metricInstance(MetricNameTemplate template, String... keyValue) { return metricInstance(template, getTags(keyValue)); } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration); MetricName metricName(String name, String group, String description, Map<String, String> tags); MetricName metricName(String name, String group, String description); MetricName metricName(String name, String group); MetricName metricName(String name, String group, String description, String... keyValue); MetricName metricName(String name, String group, Map<String, String> tags); static String toHtmlTable(String domain, List<MetricNameTemplate> allMetrics); MetricConfig config(); Sensor getSensor(String name); Sensor sensor(String name); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel); Sensor sensor(String name, Sensor... parents); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents); void removeSensor(String name); void addMetric(MetricName metricName, Measurable measurable); synchronized void addMetric(MetricName metricName, MetricConfig config, Measurable measurable); synchronized KafkaMetric removeMetric(MetricName metricName); synchronized void addReporter(MetricsReporter reporter); Map<MetricName, KafkaMetric> metrics(); List<MetricsReporter> reporters(); KafkaMetric metric(MetricName metricName); MetricName metricInstance(MetricNameTemplate template, String... keyValue); MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags); @Override void close(); }
@Test public void testMetricInstances() { MetricName n1 = metrics.metricInstance(SampleMetrics.METRIC1, "key1", "value1", "key2", "value2"); Map<String, String> tags = new HashMap<String, String>(); tags.put("key1", "value1"); tags.put("key2", "value2"); MetricName n2 = metrics.metricInstance(SampleMetrics.METRIC2, tags); assertEquals("metric names created in two different ways should be equal", n1, n2); try { metrics.metricInstance(SampleMetrics.METRIC1, "key1"); fail("Creating MetricName with an odd number of keyValue should fail"); } catch (IllegalArgumentException e) { } Map<String, String> parentTagsWithValues = new HashMap<>(); parentTagsWithValues.put("parent-tag", "parent-tag-value"); Map<String, String> childTagsWithValues = new HashMap<>(); childTagsWithValues.put("child-tag", "child-tag-value"); try (Metrics inherited = new Metrics(new MetricConfig().tags(parentTagsWithValues), Arrays.asList((MetricsReporter) new JmxReporter()), time, true)) { MetricName inheritedMetric = inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, childTagsWithValues); Map<String, String> filledOutTags = inheritedMetric.tags(); assertEquals("parent-tag should be set properly", filledOutTags.get("parent-tag"), "parent-tag-value"); assertEquals("child-tag should be set properly", filledOutTags.get("child-tag"), "child-tag-value"); try { inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, parentTagsWithValues); fail("Creating MetricName should fail if the child metrics are not defined at runtime"); } catch (IllegalArgumentException e) { } try { Map<String, String> runtimeTags = new HashMap<>(); runtimeTags.put("child-tag", "child-tag-value"); runtimeTags.put("tag-not-in-template", "unexpected-value"); inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, runtimeTags); fail("Creating MetricName should fail if there is a tag at runtime that is not in the template"); } catch (IllegalArgumentException e) { } } }
Sensor { public boolean shouldRecord() { return this.recordingLevel.shouldRecord(config.recordLevel().id); } Sensor(Metrics registry, String name, Sensor[] parents, MetricConfig config, Time time, long inactiveSensorExpirationTimeSeconds, RecordingLevel recordingLevel); String name(); void record(); boolean shouldRecord(); void record(double value); void record(double value, long timeMs); void record(double value, long timeMs, boolean checkQuotas); void checkQuotas(); void checkQuotas(long timeMs); void add(CompoundStat stat); synchronized void add(CompoundStat stat, MetricConfig config); void add(MetricName metricName, MeasurableStat stat); synchronized void add(MetricName metricName, MeasurableStat stat, MetricConfig config); boolean hasExpired(); }
@Test public void testRecordLevelEnum() { Sensor.RecordingLevel configLevel = Sensor.RecordingLevel.INFO; assertTrue(Sensor.RecordingLevel.INFO.shouldRecord(configLevel.id)); assertFalse(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id)); configLevel = Sensor.RecordingLevel.DEBUG; assertTrue(Sensor.RecordingLevel.INFO.shouldRecord(configLevel.id)); assertTrue(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id)); assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.DEBUG.toString()), Sensor.RecordingLevel.DEBUG); assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.INFO.toString()), Sensor.RecordingLevel.INFO); } @Test public void testShouldRecord() { MetricConfig debugConfig = new MetricConfig().recordLevel(Sensor.RecordingLevel.DEBUG); MetricConfig infoConfig = new MetricConfig().recordLevel(Sensor.RecordingLevel.INFO); Sensor infoSensor = new Sensor(null, "infoSensor", null, debugConfig, new SystemTime(), 0, Sensor.RecordingLevel.INFO); assertTrue(infoSensor.shouldRecord()); infoSensor = new Sensor(null, "infoSensor", null, debugConfig, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG); assertTrue(infoSensor.shouldRecord()); Sensor debugSensor = new Sensor(null, "debugSensor", null, infoConfig, new SystemTime(), 0, Sensor.RecordingLevel.INFO); assertTrue(debugSensor.shouldRecord()); debugSensor = new Sensor(null, "debugSensor", null, infoConfig, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG); assertFalse(debugSensor.shouldRecord()); }
Histogram { public Histogram(BinScheme binScheme) { this.hist = new float[binScheme.bins()]; this.count = 0.0f; this.binScheme = binScheme; } Histogram(BinScheme binScheme); void record(double value); double value(double quantile); float[] counts(); void clear(); @Override String toString(); }
@Test public void testHistogram() { BinScheme scheme = new ConstantBinScheme(12, -5, 5); Histogram hist = new Histogram(scheme); for (int i = -5; i < 5; i++) hist.record(i); for (int i = 0; i < 10; i++) assertEquals(scheme.fromBin(i + 1), hist.value(i / 10.0 + EPS), EPS); } @Test public void testHistogram() { BinScheme scheme = new ConstantBinScheme(10, -5, 5); Histogram hist = new Histogram(scheme); for (int i = -5; i < 5; i++) hist.record(i); for (int i = 0; i < 10; i++) assertEquals(scheme.fromBin(i), hist.value(i / 10.0 + EPS), EPS); }
Selector implements Selectable, AutoCloseable { @Override public void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize) throws IOException { if (this.channels.containsKey(id)) throw new IllegalStateException("There is already a connection for id " + id); SocketChannel socketChannel = SocketChannel.open(); socketChannel.configureBlocking(false); Socket socket = socketChannel.socket(); socket.setKeepAlive(true); if (sendBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE) socket.setSendBufferSize(sendBufferSize); if (receiveBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE) socket.setReceiveBufferSize(receiveBufferSize); socket.setTcpNoDelay(true); boolean connected; try { connected = socketChannel.connect(address); } catch (UnresolvedAddressException e) { socketChannel.close(); throw new IOException("Can't resolve address: " + address, e); } catch (IOException e) { socketChannel.close(); throw e; } SelectionKey key = socketChannel.register(nioSelector, SelectionKey.OP_CONNECT); KafkaChannel channel; try { channel = channelBuilder.buildChannel(id, key, maxReceiveSize); } catch (Exception e) { try { socketChannel.close(); } finally { key.cancel(); } throw new IOException("Channel could not be created for socket " + socketChannel, e); } key.attach(channel); this.channels.put(id, channel); if (connected) { log.debug("Immediately connected to node {}", channel.id()); immediatelyConnectedKeys.add(key); key.interestOps(0); } } Selector(int maxReceiveSize, long connectionMaxIdleMs, Metrics metrics, Time time, String metricGrpPrefix, // group的前缀 Map<String, String> metricTags, // 创建MetricName时候使用的tags集合 boolean metricsPerConnection, boolean recordTimePerConnection, ChannelBuilder channelBuilder); Selector(int maxReceiveSize, long connectionMaxIdleMs, Metrics metrics, Time time, String metricGrpPrefix, Map<String, String> metricTags, boolean metricsPerConnection, ChannelBuilder channelBuilder); Selector(long connectionMaxIdleMS, Metrics metrics, Time time, String metricGrpPrefix, ChannelBuilder channelBuilder); @Override // 创建 KafkaChannel添加到channels保存 void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize); void register(String id, SocketChannel socketChannel); @Override void wakeup(); @Override void close(); void send(Send send); @Override // 轮训的时候根据选在键读写,分别调用kafka通道的read和write void poll(long timeout); @Override List<Send> completedSends(); @Override List<NetworkReceive> completedReceives(); @Override Map<String, ChannelState> disconnected(); @Override List<String> connected(); @Override void mute(String id); @Override void unmute(String id); @Override void muteAll(); @Override void unmuteAll(); void close(String id); @Override boolean isChannelReady(String id); List<KafkaChannel> channels(); KafkaChannel channel(String id); KafkaChannel closingChannel(String id); Set<SelectionKey> keys(); static final long NO_IDLE_TIMEOUT_MS; }
@Test(expected = IOException.class) public void testNoRouteToHost() throws Exception { selector.connect("0", new InetSocketAddress("some.invalid.hostname.foo.bar.local", server.port), BUFFER_SIZE, BUFFER_SIZE); } @Test public void testLargeMessageSequence() throws Exception { int bufferSize = 512 * 1024; String node = "0"; int reqs = 50; InetSocketAddress addr = new InetSocketAddress("localhost", server.port); connect(node, addr); String requestPrefix = TestUtils.randomString(bufferSize); sendAndReceive(node, requestPrefix, 0, reqs); }
Selector implements Selectable, AutoCloseable { @Override public void mute(String id) { KafkaChannel channel = channelOrFail(id, true); mute(channel); } Selector(int maxReceiveSize, long connectionMaxIdleMs, Metrics metrics, Time time, String metricGrpPrefix, // group的前缀 Map<String, String> metricTags, // 创建MetricName时候使用的tags集合 boolean metricsPerConnection, boolean recordTimePerConnection, ChannelBuilder channelBuilder); Selector(int maxReceiveSize, long connectionMaxIdleMs, Metrics metrics, Time time, String metricGrpPrefix, Map<String, String> metricTags, boolean metricsPerConnection, ChannelBuilder channelBuilder); Selector(long connectionMaxIdleMS, Metrics metrics, Time time, String metricGrpPrefix, ChannelBuilder channelBuilder); @Override // 创建 KafkaChannel添加到channels保存 void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize); void register(String id, SocketChannel socketChannel); @Override void wakeup(); @Override void close(); void send(Send send); @Override // 轮训的时候根据选在键读写,分别调用kafka通道的read和write void poll(long timeout); @Override List<Send> completedSends(); @Override List<NetworkReceive> completedReceives(); @Override Map<String, ChannelState> disconnected(); @Override List<String> connected(); @Override void mute(String id); @Override void unmute(String id); @Override void muteAll(); @Override void unmuteAll(); void close(String id); @Override boolean isChannelReady(String id); List<KafkaChannel> channels(); KafkaChannel channel(String id); KafkaChannel closingChannel(String id); Set<SelectionKey> keys(); static final long NO_IDLE_TIMEOUT_MS; }
@Test public void testMute() throws Exception { blockingConnect("0"); blockingConnect("1"); selector.send(createSend("0", "hello")); selector.send(createSend("1", "hi")); selector.mute("1"); while (selector.completedReceives().isEmpty()) selector.poll(5); assertEquals("We should have only one response", 1, selector.completedReceives().size()); assertEquals("The response should not be from the muted node", "0", selector.completedReceives().get(0).source()); selector.unmute("1"); do { selector.poll(5); } while (selector.completedReceives().isEmpty()); assertEquals("We should have only one response", 1, selector.completedReceives().size()); assertEquals("The response should be from the previously muted node", "1", selector.completedReceives().get(0).source()); }
SslTransportLayer implements TransportLayer { protected void startHandshake() throws IOException { this.netReadBuffer = ByteBuffer.allocate(netReadBufferSize()); this.netWriteBuffer = ByteBuffer.allocate(netWriteBufferSize()); this.appReadBuffer = ByteBuffer.allocate(applicationBufferSize()); netWriteBuffer.position(0); netWriteBuffer.limit(0); netReadBuffer.position(0); netReadBuffer.limit(0); handshakeComplete = false; closing = false; sslEngine.beginHandshake(); handshakeStatus = sslEngine.getHandshakeStatus(); } SslTransportLayer(String channelId, SelectionKey key, SSLEngine sslEngine, boolean enableRenegotiation); static SslTransportLayer create(String channelId, SelectionKey key, SSLEngine sslEngine); @Override boolean ready(); @Override boolean finishConnect(); @Override void disconnect(); @Override SocketChannel socketChannel(); @Override boolean isOpen(); @Override boolean isConnected(); @Override void close(); @Override boolean hasPendingWrites(); @Override void handshake(); @Override int read(ByteBuffer dst); @Override long read(ByteBuffer[] dsts); @Override long read(ByteBuffer[] dsts, int offset, int length); @Override int write(ByteBuffer src); @Override long write(ByteBuffer[] srcs, int offset, int length); @Override long write(ByteBuffer[] srcs); Principal peerPrincipal(); SSLSession sslSession(); @Override void addInterestOps(int ops); @Override void removeInterestOps(int ops); @Override boolean isMute(); @Override long transferFrom(FileChannel fileChannel, long position, long count); }
@Test public void testClientEndpointNotValidated() throws Exception { String node = "0"; clientCertStores = new CertStores(false, "non-existent.com"); serverCertStores = new CertStores(true, "localhost"); sslServerConfigs = serverCertStores.getTrustingConfig(clientCertStores); sslClientConfigs = clientCertStores.getTrustingConfig(serverCertStores); SslChannelBuilder serverChannelBuilder = new SslChannelBuilder(Mode.SERVER) { @Override protected SslTransportLayer buildTransportLayer(SslFactory sslFactory, String id, SelectionKey key, String host) throws IOException { SocketChannel socketChannel = (SocketChannel) key.channel(); SSLEngine sslEngine = sslFactory.createSslEngine(host, socketChannel.socket().getPort()); SSLParameters sslParams = sslEngine.getSSLParameters(); sslParams.setEndpointIdentificationAlgorithm("HTTPS"); sslEngine.setSSLParameters(sslParams); TestSslTransportLayer transportLayer = new TestSslTransportLayer(id, key, sslEngine, BUFFER_SIZE, BUFFER_SIZE, BUFFER_SIZE); transportLayer.startHandshake(); return transportLayer; } }; serverChannelBuilder.configure(sslServerConfigs); server = new NioEchoServer(ListenerName.forSecurityProtocol(SecurityProtocol.SSL), SecurityProtocol.SSL, new TestSecurityConfig(sslServerConfigs), "localhost", serverChannelBuilder); server.start(); createSelector(sslClientConfigs); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); NetworkTestUtils.checkClientConnection(selector, node, 100, 10); }
SslTransportLayer implements TransportLayer { @Override public void close() throws IOException { if (closing) return; closing = true; sslEngine.closeOutbound(); try { if (isConnected()) { if (!flush(netWriteBuffer)) { throw new IOException("Remaining data in the network buffer, can't send SSL close message."); } netWriteBuffer.clear(); SSLEngineResult wrapResult = sslEngine.wrap(emptyBuf, netWriteBuffer); if (wrapResult.getStatus() != SSLEngineResult.Status.CLOSED) { throw new IOException("Unexpected status returned by SSLEngine.wrap, expected CLOSED, received " + wrapResult.getStatus() + ". Will not send close message to peer."); } netWriteBuffer.flip(); flush(netWriteBuffer); } } catch (IOException ie) { log.warn("Failed to send SSL Close message ", ie); } finally { try { socketChannel.socket().close(); socketChannel.close(); } finally { key.attach(null); key.cancel(); } } } SslTransportLayer(String channelId, SelectionKey key, SSLEngine sslEngine, boolean enableRenegotiation); static SslTransportLayer create(String channelId, SelectionKey key, SSLEngine sslEngine); @Override boolean ready(); @Override boolean finishConnect(); @Override void disconnect(); @Override SocketChannel socketChannel(); @Override boolean isOpen(); @Override boolean isConnected(); @Override void close(); @Override boolean hasPendingWrites(); @Override void handshake(); @Override int read(ByteBuffer dst); @Override long read(ByteBuffer[] dsts); @Override long read(ByteBuffer[] dsts, int offset, int length); @Override int write(ByteBuffer src); @Override long write(ByteBuffer[] srcs, int offset, int length); @Override long write(ByteBuffer[] srcs); Principal peerPrincipal(); SSLSession sslSession(); @Override void addInterestOps(int ops); @Override void removeInterestOps(int ops); @Override boolean isMute(); @Override long transferFrom(FileChannel fileChannel, long position, long count); }
@Test public void testListenerConfigOverride() throws Exception { String node = "0"; ListenerName clientListenerName = new ListenerName("client"); sslServerConfigs.put(SslConfigs.SSL_CLIENT_AUTH_CONFIG, "required"); sslServerConfigs.put(clientListenerName.configPrefix() + SslConfigs.SSL_CLIENT_AUTH_CONFIG, "none"); server = createEchoServer(SecurityProtocol.SSL); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); createSelector(sslClientConfigs); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); NetworkTestUtils.checkClientConnection(selector, node, 100, 10); selector.close(); sslClientConfigs.remove(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG); sslClientConfigs.remove(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG); sslClientConfigs.remove(SslConfigs.SSL_KEY_PASSWORD_CONFIG); createSelector(sslClientConfigs); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); NetworkTestUtils.waitForChannelClose(selector, node, ChannelState.AUTHENTICATE); selector.close(); server.close(); server = createEchoServer(clientListenerName, SecurityProtocol.SSL); addr = new InetSocketAddress("localhost", server.port()); createSelector(sslClientConfigs); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); NetworkTestUtils.checkClientConnection(selector, node, 100, 10); } @Test public void testNetworkThreadTimeRecorded() throws Exception { selector.close(); this.selector = new Selector(NetworkReceive.UNLIMITED, 5000, new Metrics(), Time.SYSTEM, "MetricGroup", new HashMap<String, String>(), false, true, channelBuilder); String node = "0"; server = createEchoServer(SecurityProtocol.SSL); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); String message = TestUtils.randomString(10 * 1024); NetworkTestUtils.waitForChannelReady(selector, node); KafkaChannel channel = selector.channel(node); assertTrue("SSL handshake time not recorded", channel.getAndResetNetworkThreadTimeNanos() > 0); assertEquals("Time not reset", 0, channel.getAndResetNetworkThreadTimeNanos()); selector.mute(node); selector.send(new NetworkSend(node, ByteBuffer.wrap(message.getBytes()))); while (selector.completedSends().isEmpty()) { selector.poll(100L); } assertTrue("Send time not recorded", channel.getAndResetNetworkThreadTimeNanos() > 0); assertEquals("Time not reset", 0, channel.getAndResetNetworkThreadTimeNanos()); selector.unmute(node); while (selector.completedReceives().isEmpty()) { selector.poll(100L); } assertTrue("Receive time not recorded", channel.getAndResetNetworkThreadTimeNanos() > 0); }
AclBinding { @Override public boolean equals(Object o) { if (!(o instanceof AclBinding)) return false; AclBinding other = (AclBinding) o; return resource.equals(other.resource) && entry.equals(other.entry); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final AccessControlEntry entry(); AclBindingFilter toFilter(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); }
@Test public void testMatching() throws Exception { assertTrue(ACL1.equals(ACL1)); final AclBinding acl1Copy = new AclBinding( new Resource(ResourceType.TOPIC, "mytopic"), new AccessControlEntry("User:ANONYMOUS", "", AclOperation.ALL, AclPermissionType.ALLOW)); assertTrue(ACL1.equals(acl1Copy)); assertTrue(acl1Copy.equals(ACL1)); assertTrue(ACL2.equals(ACL2)); assertFalse(ACL1.equals(ACL2)); assertFalse(ACL2.equals(ACL1)); assertTrue(AclBindingFilter.ANY.matches(ACL1)); assertFalse(AclBindingFilter.ANY.equals(ACL1)); assertTrue(AclBindingFilter.ANY.matches(ACL2)); assertFalse(AclBindingFilter.ANY.equals(ACL2)); assertTrue(AclBindingFilter.ANY.matches(ACL3)); assertFalse(AclBindingFilter.ANY.equals(ACL3)); assertTrue(AclBindingFilter.ANY.equals(AclBindingFilter.ANY)); assertTrue(ANY_ANONYMOUS.matches(ACL1)); assertFalse(ANY_ANONYMOUS.equals(ACL1)); assertFalse(ANY_ANONYMOUS.matches(ACL2)); assertFalse(ANY_ANONYMOUS.equals(ACL2)); assertTrue(ANY_ANONYMOUS.matches(ACL3)); assertFalse(ANY_ANONYMOUS.equals(ACL3)); assertFalse(ANY_DENY.matches(ACL1)); assertFalse(ANY_DENY.matches(ACL2)); assertTrue(ANY_DENY.matches(ACL3)); assertTrue(ANY_MYTOPIC.matches(ACL1)); assertTrue(ANY_MYTOPIC.matches(ACL2)); assertFalse(ANY_MYTOPIC.matches(ACL3)); assertTrue(ANY_ANONYMOUS.matches(UNKNOWN_ACL)); assertTrue(ANY_DENY.matches(UNKNOWN_ACL)); assertTrue(UNKNOWN_ACL.equals(UNKNOWN_ACL)); assertFalse(ANY_MYTOPIC.matches(UNKNOWN_ACL)); }
AclBinding { public boolean isUnknown() { return resource.isUnknown() || entry.isUnknown(); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final AccessControlEntry entry(); AclBindingFilter toFilter(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); }
@Test public void testUnknowns() throws Exception { assertFalse(ACL1.isUnknown()); assertFalse(ACL2.isUnknown()); assertFalse(ACL3.isUnknown()); assertFalse(ANY_ANONYMOUS.isUnknown()); assertFalse(ANY_DENY.isUnknown()); assertFalse(ANY_MYTOPIC.isUnknown()); assertTrue(UNKNOWN_ACL.isUnknown()); }
AclBinding { public AclBindingFilter toFilter() { return new AclBindingFilter(resource.toFilter(), entry.toFilter()); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final AccessControlEntry entry(); AclBindingFilter toFilter(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); }
@Test public void testMatchesAtMostOne() throws Exception { assertEquals(null, ACL1.toFilter().findIndefiniteField()); assertEquals(null, ACL2.toFilter().findIndefiniteField()); assertEquals(null, ACL3.toFilter().findIndefiniteField()); assertFalse(ANY_ANONYMOUS.matchesAtMostOne()); assertFalse(ANY_DENY.matchesAtMostOne()); assertFalse(ANY_MYTOPIC.matchesAtMostOne()); }
JaasContext { public static JaasContext load(JaasContext.Type contextType, ListenerName listenerName, Map<String, ?> configs) { String listenerContextName; String globalContextName; switch (contextType) { case CLIENT: if (listenerName != null) throw new IllegalArgumentException("listenerName should be null for CLIENT"); globalContextName = GLOBAL_CONTEXT_NAME_CLIENT; listenerContextName = null; break; case SERVER: if (listenerName == null) throw new IllegalArgumentException("listenerName should not be null for SERVER"); globalContextName = GLOBAL_CONTEXT_NAME_SERVER; listenerContextName = listenerName.value().toLowerCase(Locale.ROOT) + "." + GLOBAL_CONTEXT_NAME_SERVER; break; default: throw new IllegalArgumentException("Unexpected context type " + contextType); } return load(contextType, listenerContextName, globalContextName, configs); } JaasContext(String name, Type type, Configuration configuration); static JaasContext load(JaasContext.Type contextType, ListenerName listenerName, Map<String, ?> configs); String name(); Type type(); Configuration configuration(); List<AppConfigurationEntry> configurationEntries(); String configEntryOption(String key, String loginModuleName); }
@Test(expected = IllegalArgumentException.class) public void testLoadForServerWithWrongListenerName() throws IOException { writeConfiguration("Server", "test.LoginModule required;"); JaasContext.load(JaasContext.Type.SERVER, new ListenerName("plaintext"), Collections.<String, Object>emptyMap()); } @Test(expected = IllegalArgumentException.class) public void testLoadForClientWithListenerName() { JaasContext.load(JaasContext.Type.CLIENT, new ListenerName("foo"), Collections.<String, Object>emptyMap()); }
SslFactory implements Configurable { @Override public void configure(Map<String, ?> configs) throws KafkaException { this.protocol = (String) configs.get(SslConfigs.SSL_PROTOCOL_CONFIG); this.provider = (String) configs.get(SslConfigs.SSL_PROVIDER_CONFIG); @SuppressWarnings("unchecked") List<String> cipherSuitesList = (List<String>) configs.get(SslConfigs.SSL_CIPHER_SUITES_CONFIG); if (cipherSuitesList != null) this.cipherSuites = cipherSuitesList.toArray(new String[cipherSuitesList.size()]); @SuppressWarnings("unchecked") List<String> enabledProtocolsList = (List<String>) configs.get(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); if (enabledProtocolsList != null) this.enabledProtocols = enabledProtocolsList.toArray(new String[enabledProtocolsList.size()]); String endpointIdentification = (String) configs.get(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); if (endpointIdentification != null) this.endpointIdentification = endpointIdentification; String secureRandomImplementation = (String) configs.get(SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG); if (secureRandomImplementation != null) { try { this.secureRandomImplementation = SecureRandom.getInstance(secureRandomImplementation); } catch (GeneralSecurityException e) { throw new KafkaException(e); } } String clientAuthConfig = clientAuthConfigOverride; if (clientAuthConfig == null) clientAuthConfig = (String) configs.get(SslConfigs.SSL_CLIENT_AUTH_CONFIG); if (clientAuthConfig != null) { if (clientAuthConfig.equals("required")) this.needClientAuth = true; else if (clientAuthConfig.equals("requested")) this.wantClientAuth = true; } this.kmfAlgorithm = (String) configs.get(SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); this.tmfAlgorithm = (String) configs.get(SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); createKeystore((String) configs.get(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG), (String) configs.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG), (Password) configs.get(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), (Password) configs.get(SslConfigs.SSL_KEY_PASSWORD_CONFIG)); createTruststore((String) configs.get(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), (String) configs.get(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), (Password) configs.get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); try { this.sslContext = createSSLContext(); } catch (Exception e) { throw new KafkaException(e); } } SslFactory(Mode mode); SslFactory(Mode mode, String clientAuthConfigOverride); @Override void configure(Map<String, ?> configs); SSLEngine createSslEngine(String peerHost, int peerPort); SSLContext sslContext(); }
@Test public void testSslFactoryWithoutPasswordConfiguration() throws Exception { File trustStoreFile = File.createTempFile("truststore", ".jks"); Map<String, Object> serverSslConfig = TestSslUtils.createSslConfig(false, true, Mode.SERVER, trustStoreFile, "server"); serverSslConfig.remove(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG); SslFactory sslFactory = new SslFactory(Mode.SERVER); try { sslFactory.configure(serverSslConfig); } catch (Exception e) { fail("An exception was thrown when configuring the truststore without a password: " + e); } }
KerberosName { public static KerberosName parse(String principalName) { Matcher match = NAME_PARSER.matcher(principalName); if (!match.matches()) { if (principalName.contains("@")) { throw new IllegalArgumentException("Malformed Kerberos name: " + principalName); } else { return new KerberosName(principalName, null, null); } } else { return new KerberosName(match.group(1), match.group(3), match.group(4)); } } KerberosName(String serviceName, String hostName, String realm); static KerberosName parse(String principalName); @Override String toString(); String serviceName(); String hostName(); String realm(); }
@Test public void testParse() throws IOException { List<String> rules = new ArrayList<>(Arrays.asList( "RULE:[1:$1](App\\..*)s/App\\.(.*)/$1/g", "RULE:[2:$1](App\\..*)s/App\\.(.*)/$1/g", "DEFAULT" )); KerberosShortNamer shortNamer = KerberosShortNamer.fromUnparsedRules("REALM.COM", rules); KerberosName name = KerberosName.parse("App.service-name/example.com@REALM.COM"); assertEquals("App.service-name", name.serviceName()); assertEquals("example.com", name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("service-name", shortNamer.shortName(name)); name = KerberosName.parse("App.service-name@REALM.COM"); assertEquals("App.service-name", name.serviceName()); assertNull(name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("service-name", shortNamer.shortName(name)); name = KerberosName.parse("user/host@REALM.COM"); assertEquals("user", name.serviceName()); assertEquals("host", name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("user", shortNamer.shortName(name)); } @Test public void testParse() throws IOException { List<String> rules = Arrays.asList( "RULE:[1:$1](App\\..*)s/App\\.(.*)/$1/g", "RULE:[2:$1](App\\..*)s/App\\.(.*)/$1/g", "DEFAULT" ); KerberosShortNamer shortNamer = KerberosShortNamer.fromUnparsedRules("REALM.COM", rules); KerberosName name = KerberosName.parse("App.service-name/example.com@REALM.COM"); assertEquals("App.service-name", name.serviceName()); assertEquals("example.com", name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("service-name", shortNamer.shortName(name)); name = KerberosName.parse("App.service-name@REALM.COM"); assertEquals("App.service-name", name.serviceName()); assertNull(name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("service-name", shortNamer.shortName(name)); name = KerberosName.parse("user/host@REALM.COM"); assertEquals("user", name.serviceName()); assertEquals("host", name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("user", shortNamer.shortName(name)); } @Test public void testToLowerCase() throws Exception { List<String> rules = Arrays.asList( "RULE:[1:$1]/L", "RULE:[2:$1](Test.*)s/ABC "RULE:[2:$1](ABC.*)s/ABC/XYZ/g/L", "RULE:[2:$1](App\\..*)s/App\\.(.*)/$1/g/L", "RULE:[2:$1]/L", "DEFAULT" ); KerberosShortNamer shortNamer = KerberosShortNamer.fromUnparsedRules("REALM.COM", rules); KerberosName name = KerberosName.parse("User@REALM.COM"); assertEquals("user", shortNamer.shortName(name)); name = KerberosName.parse("TestABC/host@FOO.COM"); assertEquals("test", shortNamer.shortName(name)); name = KerberosName.parse("ABC_User_ABC/host@FOO.COM"); assertEquals("xyz_user_xyz", shortNamer.shortName(name)); name = KerberosName.parse("App.SERVICE-name/example.com@REALM.COM"); assertEquals("service-name", shortNamer.shortName(name)); name = KerberosName.parse("User/root@REALM.COM"); assertEquals("user", shortNamer.shortName(name)); } @Test public void testToUpperCase() throws Exception { List<String> rules = Arrays.asList( "RULE:[1:$1]/U", "RULE:[2:$1](Test.*)s/ABC "RULE:[2:$1](ABC.*)s/ABC/XYZ/g/U", "RULE:[2:$1](App\\..*)s/App\\.(.*)/$1/g/U", "RULE:[2:$1]/U", "DEFAULT" ); KerberosShortNamer shortNamer = KerberosShortNamer.fromUnparsedRules("REALM.COM", rules); KerberosName name = KerberosName.parse("User@REALM.COM"); assertEquals("USER", shortNamer.shortName(name)); name = KerberosName.parse("TestABC/host@FOO.COM"); assertEquals("TEST", shortNamer.shortName(name)); name = KerberosName.parse("ABC_User_ABC/host@FOO.COM"); assertEquals("XYZ_USER_XYZ", shortNamer.shortName(name)); name = KerberosName.parse("App.SERVICE-name/example.com@REALM.COM"); assertEquals("SERVICE-NAME", shortNamer.shortName(name)); name = KerberosName.parse("User/root@REALM.COM"); assertEquals("USER", shortNamer.shortName(name)); }
ScramCredentialUtils { public static String credentialToString(ScramCredential credential) { return String.format("%s=%s,%s=%s,%s=%s,%s=%d", SALT, DatatypeConverter.printBase64Binary(credential.salt()), STORED_KEY, DatatypeConverter.printBase64Binary(credential.storedKey()), SERVER_KEY, DatatypeConverter.printBase64Binary(credential.serverKey()), ITERATIONS, credential.iterations()); } static String credentialToString(ScramCredential credential); static ScramCredential credentialFromString(String str); static void createCache(CredentialCache cache, Collection<String> enabledMechanisms); }
@Test public void generateCredential() { ScramCredential credential1 = formatter.generateCredential("password", 4096); ScramCredential credential2 = formatter.generateCredential("password", 4096); assertNotEquals(ScramCredentialUtils.credentialToString(credential1), ScramCredentialUtils.credentialToString(credential2)); }
ScramCredentialUtils { public static ScramCredential credentialFromString(String str) { Properties props = toProps(str); if (props.size() != 4 || !props.containsKey(SALT) || !props.containsKey(STORED_KEY) || !props.containsKey(SERVER_KEY) || !props.containsKey(ITERATIONS)) { throw new IllegalArgumentException("Credentials not valid: " + str); } byte[] salt = DatatypeConverter.parseBase64Binary(props.getProperty(SALT)); byte[] storedKey = DatatypeConverter.parseBase64Binary(props.getProperty(STORED_KEY)); byte[] serverKey = DatatypeConverter.parseBase64Binary(props.getProperty(SERVER_KEY)); int iterations = Integer.parseInt(props.getProperty(ITERATIONS)); return new ScramCredential(salt, storedKey, serverKey, iterations); } static String credentialToString(ScramCredential credential); static ScramCredential credentialFromString(String str); static void createCache(CredentialCache cache, Collection<String> enabledMechanisms); }
@Test(expected = IllegalArgumentException.class) public void invalidCredential() { ScramCredentialUtils.credentialFromString("abc"); }
ScramCredentialUtils { public static void createCache(CredentialCache cache, Collection<String> enabledMechanisms) { for (String mechanism : ScramMechanism.mechanismNames()) { if (enabledMechanisms.contains(mechanism)) cache.createCache(mechanism, ScramCredential.class); } } static String credentialToString(ScramCredential credential); static ScramCredential credentialFromString(String str); static void createCache(CredentialCache cache, Collection<String> enabledMechanisms); }
@Test public void scramCredentialCache() throws Exception { CredentialCache cache = new CredentialCache(); ScramCredentialUtils.createCache(cache, Arrays.asList("SCRAM-SHA-512", "PLAIN")); assertNotNull("Cache not created for enabled mechanism", cache.cache(ScramMechanism.SCRAM_SHA_512.mechanismName(), ScramCredential.class)); assertNull("Cache created for disabled mechanism", cache.cache(ScramMechanism.SCRAM_SHA_256.mechanismName(), ScramCredential.class)); CredentialCache.Cache<ScramCredential> sha512Cache = cache.cache(ScramMechanism.SCRAM_SHA_512.mechanismName(), ScramCredential.class); ScramFormatter formatter = new ScramFormatter(ScramMechanism.SCRAM_SHA_512); ScramCredential credentialA = formatter.generateCredential("password", 4096); sha512Cache.put("userA", credentialA); assertEquals(credentialA, sha512Cache.get("userA")); assertNull("Invalid user credential", sha512Cache.get("userB")); }
KafkaPrincipal implements Principal { @Override public int hashCode() { int result = principalType.hashCode(); result = 31 * result + name.hashCode(); return result; } KafkaPrincipal(String principalType, String name); static KafkaPrincipal fromString(String str); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); @Override String getName(); String getPrincipalType(); static final String SEPARATOR; static final String USER_TYPE; final static KafkaPrincipal ANONYMOUS; }
@Test public void testEqualsAndHashCode() { String name = "KafkaUser"; KafkaPrincipal principal1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, name); KafkaPrincipal principal2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, name); Assert.assertEquals(principal1.hashCode(), principal2.hashCode()); Assert.assertEquals(principal1, principal2); }
ConfigDef { public ConfigDef define(ConfigKey key) { if (configKeys.containsKey(key.name)) { throw new ConfigException("Configuration " + key.name + " is defined twice."); } if (key.group != null && !groups.contains(key.group)) { groups.add(key.group); } configKeys.put(key.name, key); return this; } ConfigDef(); ConfigDef(ConfigDef base); Set<String> names(); ConfigDef define(ConfigKey key); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation); ConfigDef define(String name, Type type, Importance importance, String documentation); ConfigDef defineInternal(final String name, final Type type, final Object defaultValue, final Importance importance); Map<String, ConfigKey> configKeys(); List<String> groups(); ConfigDef withClientSslSupport(); ConfigDef withClientSaslSupport(); Map<String, Object> parse(Map<?, ?> props); List<ConfigValue> validate(Map<String, String> props); Map<String, ConfigValue> validateAll(Map<String, String> props); static Object parseType(String name, Object value, Type type); static String convertToString(Object parsedValue, Type type); String toHtmlTable(); String toRst(); String toEnrichedRst(); void embed(final String keyPrefix, final String groupPrefix, final int startingOrd, final ConfigDef child); static final Object NO_DEFAULT_VALUE; }
@Test(expected = ConfigException.class) public void testInvalidDefault() { new ConfigDef().define("a", Type.INT, "hello", Importance.HIGH, "docs"); } @Test(expected = ConfigException.class) public void testDefinedTwice() { new ConfigDef().define("a", Type.STRING, Importance.HIGH, "docs").define("a", Type.INT, Importance.HIGH, "docs"); } @Test(expected = ConfigException.class) public void testInvalidDefaultRange() { new ConfigDef().define("name", Type.INT, -1, Range.between(0, 10), Importance.HIGH, "docs"); } @Test(expected = ConfigException.class) public void testInvalidDefaultString() { new ConfigDef().define("name", Type.STRING, "bad", ValidString.in("valid", "values"), Importance.HIGH, "docs"); }
ConfigDef { public Map<String, Object> parse(Map<?, ?> props) { List<String> undefinedConfigKeys = undefinedDependentConfigs(); if (!undefinedConfigKeys.isEmpty()) { String joined = Utils.join(undefinedConfigKeys, ","); throw new ConfigException("Some configurations in are referred in the dependents, but not defined: " + joined); } Map<String, Object> values = new HashMap<>(); for (ConfigKey key : configKeys.values()) values.put(key.name, parseValue(key, props.get(key.name), props.containsKey(key.name))); return values; } ConfigDef(); ConfigDef(ConfigDef base); Set<String> names(); ConfigDef define(ConfigKey key); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation); ConfigDef define(String name, Type type, Importance importance, String documentation); ConfigDef defineInternal(final String name, final Type type, final Object defaultValue, final Importance importance); Map<String, ConfigKey> configKeys(); List<String> groups(); ConfigDef withClientSslSupport(); ConfigDef withClientSaslSupport(); Map<String, Object> parse(Map<?, ?> props); List<ConfigValue> validate(Map<String, String> props); Map<String, ConfigValue> validateAll(Map<String, String> props); static Object parseType(String name, Object value, Type type); static String convertToString(Object parsedValue, Type type); String toHtmlTable(); String toRst(); String toEnrichedRst(); void embed(final String keyPrefix, final String groupPrefix, final int startingOrd, final ConfigDef child); static final Object NO_DEFAULT_VALUE; }
@Test public void testSslPasswords() { ConfigDef def = new ConfigDef(); SslConfigs.addClientSslSupport(def); Properties props = new Properties(); props.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "key_password"); props.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "keystore_password"); props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "truststore_password"); Map<String, Object> vals = def.parse(props); assertEquals(new Password("key_password"), vals.get(SslConfigs.SSL_KEY_PASSWORD_CONFIG)); assertEquals(Password.HIDDEN, vals.get(SslConfigs.SSL_KEY_PASSWORD_CONFIG).toString()); assertEquals(new Password("keystore_password"), vals.get(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG)); assertEquals(Password.HIDDEN, vals.get(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG).toString()); assertEquals(new Password("truststore_password"), vals.get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); assertEquals(Password.HIDDEN, vals.get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).toString()); }
ConfigDef { Map<String, Object> parseForValidate(Map<String, String> props, Map<String, ConfigValue> configValues) { Map<String, Object> parsed = new HashMap<>(); Set<String> configsWithNoParent = getConfigsWithNoParent(); for (String name: configsWithNoParent) { parseForValidate(name, props, parsed, configValues); } return parsed; } ConfigDef(); ConfigDef(ConfigDef base); Set<String> names(); ConfigDef define(ConfigKey key); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation); ConfigDef define(String name, Type type, Importance importance, String documentation); ConfigDef defineInternal(final String name, final Type type, final Object defaultValue, final Importance importance); Map<String, ConfigKey> configKeys(); List<String> groups(); ConfigDef withClientSslSupport(); ConfigDef withClientSaslSupport(); Map<String, Object> parse(Map<?, ?> props); List<ConfigValue> validate(Map<String, String> props); Map<String, ConfigValue> validateAll(Map<String, String> props); static Object parseType(String name, Object value, Type type); static String convertToString(Object parsedValue, Type type); String toHtmlTable(); String toRst(); String toEnrichedRst(); void embed(final String keyPrefix, final String groupPrefix, final int startingOrd, final ConfigDef child); static final Object NO_DEFAULT_VALUE; }
@Test public void testParseForValidate() { Map<String, Object> expectedParsed = new HashMap<>(); expectedParsed.put("a", 1); expectedParsed.put("b", null); expectedParsed.put("c", null); expectedParsed.put("d", 10); Map<String, ConfigValue> expected = new HashMap<>(); String errorMessageB = "Missing required configuration \"b\" which has no default value."; String errorMessageC = "Missing required configuration \"c\" which has no default value."; ConfigValue configA = new ConfigValue("a", 1, Collections.<Object>emptyList(), Collections.<String>emptyList()); ConfigValue configB = new ConfigValue("b", null, Collections.<Object>emptyList(), Arrays.asList(errorMessageB, errorMessageB)); ConfigValue configC = new ConfigValue("c", null, Collections.<Object>emptyList(), Arrays.asList(errorMessageC)); ConfigValue configD = new ConfigValue("d", 10, Collections.<Object>emptyList(), Collections.<String>emptyList()); expected.put("a", configA); expected.put("b", configB); expected.put("c", configC); expected.put("d", configD); ConfigDef def = new ConfigDef() .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)) .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", Arrays.asList("b"), new IntegerRecommender(false)); Map<String, String> props = new HashMap<>(); props.put("a", "1"); props.put("d", "10"); Map<String, ConfigValue> configValues = new HashMap<>(); for (String name : def.configKeys().keySet()) { configValues.put(name, new ConfigValue(name)); } Map<String, Object> parsed = def.parseForValidate(props, configValues); assertEquals(expectedParsed, parsed); assertEquals(expected, configValues); }