language
stringclasses
1 value
repo
stringclasses
60 values
path
stringlengths
22
294
class_span
dict
source
stringlengths
13
1.16M
target
stringlengths
1
113
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/common/logging/internal/LoggerFactoryImpl.java
{ "start": 1316, "end": 1463 }
class ____, but also // scans the classloader hierarchy for programmatic configuration. Here we // just delegate to use the String
name
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/metrics/MetricsTrackingStateFactoryTest.java
{ "start": 3080, "end": 12356 }
class ____ { @Parameter public Tuple2<Boolean, Boolean> enableLatencyOrSizeTracking; @Parameters(name = "enable latency or size tracking: {0}") public static Collection<Tuple2<Boolean, Boolean>> enabled() { return Arrays.asList( Tuple2.of(true, true), Tuple2.of(true, false), Tuple2.of(false, true), Tuple2.of(false, false)); } private LatencyTrackingStateConfig getLatencyTrackingStateConfig() { UnregisteredMetricsGroup metricsGroup = new UnregisteredMetricsGroup(); return LatencyTrackingStateConfig.newBuilder() .setEnabled(enableLatencyOrSizeTracking.f0) .setMetricGroup(metricsGroup) .build(); } private SizeTrackingStateConfig getSizeTrackingStateConfig() { UnregisteredMetricsGroup metricsGroup = new UnregisteredMetricsGroup(); return SizeTrackingStateConfig.newBuilder() .setEnabled(enableLatencyOrSizeTracking.f1) .setMetricGroup(metricsGroup) .build(); } @TestTemplate @SuppressWarnings("unchecked") <K, N> void testTrackValueState() throws Exception { try (MockKeyedStateBackend<String> backend = createMock()) { ValueStateDescriptor<String> valueStateDescriptor = new ValueStateDescriptor<>("value", String.class); InternalValueState<K, N, String> valueState = backend.createOrUpdateInternalState( VoidNamespaceSerializer.INSTANCE, valueStateDescriptor); InternalKvState<K, ?, ?> latencyTrackingState = MetricsTrackingStateFactory.createStateAndWrapWithMetricsTrackingIfEnabled( valueState, null, valueStateDescriptor, getLatencyTrackingStateConfig(), getSizeTrackingStateConfig()); if (enableLatencyOrSizeTracking.f0 || enableLatencyOrSizeTracking.f1) { assertThat(latencyTrackingState).isInstanceOf(MetricsTrackingValueState.class); } else { assertThat(latencyTrackingState).isEqualTo(valueState); } } } @TestTemplate @SuppressWarnings("unchecked") <K, N> void testTrackListState() throws Exception { try (MockKeyedStateBackend<String> backend = createMock()) { ListStateDescriptor<String> listStateDescriptor = new ListStateDescriptor<>("list", String.class); InternalListState<K, N, String> listState = backend.createOrUpdateInternalState( VoidNamespaceSerializer.INSTANCE, listStateDescriptor); InternalKvState<K, N, ?> latencyTrackingState = MetricsTrackingStateFactory.createStateAndWrapWithMetricsTrackingIfEnabled( listState, null, listStateDescriptor, getLatencyTrackingStateConfig(), getSizeTrackingStateConfig()); if (enableLatencyOrSizeTracking.f0 || enableLatencyOrSizeTracking.f1) { assertThat(latencyTrackingState).isInstanceOf(MetricsTrackingListState.class); } else { assertThat(latencyTrackingState).isEqualTo(listState); } } } @TestTemplate @SuppressWarnings("unchecked") <K, N> void testTrackMapState() throws Exception { try (MockKeyedStateBackend<String> backend = createMock()) { MapStateDescriptor<String, Long> mapStateDescriptor = new MapStateDescriptor<>("map", String.class, Long.class); InternalMapState<K, N, String, Long> mapState = backend.createOrUpdateInternalState( VoidNamespaceSerializer.INSTANCE, mapStateDescriptor); InternalKvState<K, N, ?> latencyTrackingState = MetricsTrackingStateFactory.createStateAndWrapWithMetricsTrackingIfEnabled( mapState, null, mapStateDescriptor, getLatencyTrackingStateConfig(), getSizeTrackingStateConfig()); if (enableLatencyOrSizeTracking.f0 || enableLatencyOrSizeTracking.f1) { assertThat(latencyTrackingState).isInstanceOf(MetricsTrackingMapState.class); } else { assertThat(latencyTrackingState).isEqualTo(mapState); } } } @TestTemplate @SuppressWarnings("unchecked") <K, N> void testTrackReducingState() throws Exception { try (MockKeyedStateBackend<String> backend = createMock()) { ReducingStateDescriptor<Long> reducingStateDescriptor = new ReducingStateDescriptor<>("reducing", Long::sum, Long.class); InternalReducingState<K, N, Long> reducingState = backend.createOrUpdateInternalState( VoidNamespaceSerializer.INSTANCE, reducingStateDescriptor); InternalKvState<K, N, ?> latencyTrackingState = MetricsTrackingStateFactory.createStateAndWrapWithMetricsTrackingIfEnabled( reducingState, null, reducingStateDescriptor, getLatencyTrackingStateConfig(), getSizeTrackingStateConfig()); if (enableLatencyOrSizeTracking.f0 || enableLatencyOrSizeTracking.f1) { assertThat(latencyTrackingState).isInstanceOf(MetricsTrackingReducingState.class); } else { assertThat(latencyTrackingState).isEqualTo(reducingState); } } } @TestTemplate @SuppressWarnings("unchecked") <K, N> void testTrackAggregatingState() throws Exception { try (MockKeyedStateBackend<String> backend = createMock()) { AggregatingStateDescriptor<Long, Long, Long> aggregatingStateDescriptor = new AggregatingStateDescriptor<>( "aggregate", new AggregateFunction<Long, Long, Long>() { private static final long serialVersionUID = 1L; @Override public Long createAccumulator() { return 0L; } @Override public Long add(Long value, Long accumulator) { return value + accumulator; } @Override public Long getResult(Long accumulator) { return accumulator; } @Override public Long merge(Long a, Long b) { return a + b; } }, Long.class); InternalAggregatingState<K, N, Long, Long, Long> aggregatingState = backend.createOrUpdateInternalState( VoidNamespaceSerializer.INSTANCE, aggregatingStateDescriptor); InternalKvState<K, N, ?> latencyTrackingState = MetricsTrackingStateFactory.createStateAndWrapWithMetricsTrackingIfEnabled( aggregatingState, null, aggregatingStateDescriptor, getLatencyTrackingStateConfig(), getSizeTrackingStateConfig()); if (enableLatencyOrSizeTracking.f0 || enableLatencyOrSizeTracking.f1) { assertThat(latencyTrackingState) .isInstanceOf(MetricsTrackingAggregatingState.class); } else { assertThat(latencyTrackingState).isEqualTo(aggregatingState); } } } private MockKeyedStateBackend<String> createMock() { return new MockKeyedStateBackendBuilder<>( new KvStateRegistry().createTaskRegistry(new JobID(), new JobVertexID()), StringSerializer.INSTANCE, getClass().getClassLoader(), 1, KeyGroupRange.of(0, 0), new ExecutionConfig(), TtlTimeProvider.DEFAULT, getLatencyTrackingStateConfig(), getSizeTrackingStateConfig(), emptyList(), UncompressedStreamCompressionDecorator.INSTANCE, new CloseableRegistry(), MockKeyedStateBackend.MockSnapshotSupplier.EMPTY) .build(); } }
MetricsTrackingStateFactoryTest
java
quarkusio__quarkus
extensions/micrometer/runtime/src/main/java/io/quarkus/micrometer/runtime/config/MicrometerConfig.java
{ "start": 480, "end": 5907 }
interface ____ { /** * Micrometer metrics support. * <p> * Micrometer metrics support is enabled by default. */ @WithDefault("true") boolean enabled(); /** * Micrometer MeterRegistry discovery. * <p> * Micrometer MeterRegistry implementations discovered on the classpath * will be enabled automatically by default. */ @WithDefault("true") boolean registryEnabledDefault(); /** * Micrometer MeterBinder discovery. * In other words, enables the automatic metrics instrumentation. * <p> * Micrometer MeterBinder implementations discovered on the classpath * will be enabled automatically by default. In other words, automatic metrics instrumentation will be ON by default. * <p> * <code>quarkus.micrometer.binder.enable-all</code> overrides this property, meaning when this is set to * <code>false</code>, and <code>enable-all</code> is true, discovery of all MeterBinder will still happen. */ @WithDefault("true") boolean binderEnabledDefault(); /** Build / static runtime config for binders */ BinderConfig binder(); /** Build / static runtime config for exporters */ ExportConfig export(); /** * For MeterRegistry configurations with optional 'enabled' attributes, * determine whether the registry is enabled using {@link #registryEnabledDefault} * as the default value. */ default boolean checkRegistryEnabledWithDefault(CapabilityEnabled config) { if (enabled()) { Optional<Boolean> configValue = config.enabled(); return configValue.orElseGet(this::registryEnabledDefault); } return false; } /** * For MeterBinder configurations with optional 'enabled' attributes, * determine whether the binder is enabled using {@link #binderEnabledDefault} * as the default value. * * @deprecated use {@link #isEnabled(CapabilityEnabled)} instead. */ @Deprecated default boolean checkBinderEnabledWithDefault(CapabilityEnabled config) { if (enabled()) { Optional<Boolean> configValue = config.enabled(); return configValue.orElseGet(this::binderEnabledDefault); } return false; } /** * Determines if a capability is enabled based on the {@link MicrometerConfig} configurations and the following rules: * <p> * <ul> * <li> * The {@link MicrometerConfig#enabled()} has precedence over all configurations, it means that if * <code>quarkus.micrometer.enabled</code> * is set to <code>false</code>, all metrics are disabled. * </li> * <li> * If the <code>quarkus.micrometer.binder.enable-all</code> is set to <code>true</code>, independently if the * parameter <code>aBoolean</code> resolve to <code>true</code> or <code>false</code> the metric will be enabled. * </li> * <li> * If the <code>quarkus.micrometer.binder.enable-all</code> is set to <code>false</code>, the parameter * <code>aBoolean</code> * will be used to determine if the metric is enabled or not. If <code>aBoolean</code> is empty, the metric will be * disabled. * </li> * </ul> * * @param aBoolean the optional boolean value to check if the capability is enabled * @return <code>true</code> if the capability is enabled, <code>false</code> otherwise. */ default boolean isEnabled(Optional<Boolean> aBoolean) { if (enabled()) { if (this.binder().enableAll()) { return true; } else { return aBoolean.orElse(false); } } return false; } /** * Determines if a capability is enabled based on the {@link MicrometerConfig} configurations and the following rules: * <p> * <ul> * <li> * The {@link MicrometerConfig#enabled()} has precedence over all configurations, it means that if * <code>quarkus.micrometer.enabled</code> * is set to <code>false</code>, all metrics are disabled. * </li> * <li> * If the <code>quarkus.micrometer.binder.enable-all</code> is set to <code>true</code>, independently if the * parameter <code>aBoolean</code> resolve to <code>true</code> or <code>false</code> the metric will be enabled. * </li> * <li> * If the <code>quarkus.micrometer.binder.enable-all</code> is set to <code>false</code>, the parameter <code>config</code> * will be used to determine if the metric is enabled or not. If <code>config.enabled()</code> is empty, the * {@link MicrometerConfig#binderEnabledDefault()} will be used to determine if the metric is enabled or not. * </li> * </ul> * * @param config the {@link CapabilityEnabled} to check if the capability is enabled * @return <code>true</code> if the capability is enabled, <code>false</code> otherwise. */ default boolean isEnabled(CapabilityEnabled config) { if (enabled()) { if (this.binder().enableAll()) { return true; } else { Optional<Boolean> configValue = config.enabled(); return configValue.orElseGet(this::binderEnabledDefault); } } return false; } /** Build / static runtime config for binders */ @ConfigGroup
MicrometerConfig
java
google__dagger
javatests/dagger/functional/membersinject/MembersWithInstanceNameTest.java
{ "start": 1844, "end": 2145 }
interface ____ { // As of writing, the method name is the one that matters, but name the // parameter the same anyway in case that changes. Builder instance(@BindsInstance BoundInstance instance); TestComponent build(); } } @Component(modules = TestModule.class)
Builder
java
apache__flink
flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
{ "start": 2751, "end": 3484 }
class ____ be extended to implement * distributed file systems, or local file systems. The abstraction by this file system is very * simple, and the set of available operations quite limited, to support the common denominator of a * wide range of file systems. For example, appending to or mutating existing files is not * supported. * * <p>Flink implements and supports some file system types directly (for example the default * machine-local file system). Other file system types are accessed by an implementation that * bridges to the suite of file systems supported by Hadoop (such as for example HDFS). * * <h2>Scope and Purpose</h2> * * <p>The purpose of this abstraction is used to expose a common and well defined
may
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/CheckedExceptionNotThrownTest.java
{ "start": 1035, "end": 1542 }
class ____ { private final BugCheckerRefactoringTestHelper helper = BugCheckerRefactoringTestHelper.newInstance(CheckedExceptionNotThrown.class, getClass()); private final CompilationTestHelper compilationHelper = CompilationTestHelper.newInstance(CheckedExceptionNotThrown.class, getClass()); @Test public void noExceptionThrown_entireThrowsBlockRemoved() { helper .addInputLines( "Test.java", """ public final
CheckedExceptionNotThrownTest
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java
{ "start": 1609, "end": 13116 }
class ____ implements ToXContentObject, Writeable { public static final ParseField SEARCH_AFTER = new ParseField("search_after"); private static final Object[] EMPTY_SORT_VALUES = new Object[0]; private Object[] sortValues = EMPTY_SORT_VALUES; public SearchAfterBuilder() {} /** * Read from a stream. */ public SearchAfterBuilder(StreamInput in) throws IOException { int size = in.readVInt(); sortValues = new Object[size]; for (int i = 0; i < size; i++) { sortValues[i] = in.readGenericValue(); } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(StreamOutput::writeGenericValue, sortValues); } public SearchAfterBuilder setSortValues(Object[] values) { if (values == null) { throw new NullPointerException("Values cannot be null."); } if (values.length == 0) { throw new IllegalArgumentException("Values must contains at least one value."); } for (int i = 0; i < values.length; i++) { if (values[i] == null) continue; if (values[i] instanceof String) continue; if (values[i] instanceof Text) continue; if (values[i] instanceof Long) continue; if (values[i] instanceof Integer) continue; if (values[i] instanceof Short) continue; if (values[i] instanceof Byte) continue; if (values[i] instanceof Double) continue; if (values[i] instanceof Float) continue; if (values[i] instanceof Boolean) continue; if (values[i] instanceof BigInteger) continue; throw new IllegalArgumentException("Can't handle " + SEARCH_AFTER + " field value of type [" + values[i].getClass() + "]"); } sortValues = new Object[values.length]; System.arraycopy(values, 0, sortValues, 0, values.length); return this; } public Object[] getSortValues() { return Arrays.copyOf(sortValues, sortValues.length); } public static FieldDoc buildFieldDoc(SortAndFormats sort, Object[] values, @Nullable String collapseField) { if (sort == null || sort.sort.getSort() == null || sort.sort.getSort().length == 0) { throw new IllegalArgumentException("Sort must contain at least one field."); } SortField[] sortFields = sort.sort.getSort(); if (sortFields.length != values.length) { throw new IllegalArgumentException( SEARCH_AFTER.getPreferredName() + " has " + values.length + " value(s) but sort has " + sort.sort.getSort().length + "." ); } if (collapseField != null && (sortFields.length > 1 || Objects.equals(sortFields[0].getField(), collapseField) == false)) { throw new IllegalArgumentException( "Cannot use [collapse] in conjunction with [" + SEARCH_AFTER.getPreferredName() + "] unless the search is sorted on the same field. Multiple sort fields are not allowed." ); } Object[] fieldValues = new Object[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { SortField sortField = sortFields[i]; DocValueFormat format = sort.formats[i]; if (values[i] != null) { fieldValues[i] = convertValueFromSortField(values[i], sortField, format); } else { fieldValues[i] = null; } } /* * We set the doc id to Integer.MAX_VALUE in order to make sure that the search starts "after" the first document that is equal to * the field values. */ return new FieldDoc(Integer.MAX_VALUE, 0, fieldValues); } /** * Returns the inner {@link SortField.Type} expected for this sort field. */ static SortField.Type extractSortType(SortField sortField) { if (sortField.getComparatorSource() instanceof IndexFieldData.XFieldComparatorSource) { return ((IndexFieldData.XFieldComparatorSource) sortField.getComparatorSource()).reducedType(); } else if (sortField instanceof SortedSetSortField) { return SortField.Type.STRING; } else if (sortField instanceof SortedNumericSortField) { return ((SortedNumericSortField) sortField).getNumericType(); } else if ("LatLonPointSortField".equals(sortField.getClass().getSimpleName())) { // for geo distance sorting return SortField.Type.DOUBLE; } else { return sortField.getType(); } } static Object convertValueFromSortField(Object value, SortField sortField, DocValueFormat format) { SortField.Type sortType = extractSortType(sortField); return convertValueFromSortType(sortField.getField(), sortType, value, format); } private static Object convertValueFromSortType(String fieldName, SortField.Type sortType, Object value, DocValueFormat format) { try { switch (sortType) { case DOC: if (value instanceof Number valueNumber) { return (valueNumber).intValue(); } return Integer.parseInt(value.toString()); case INT: // As mixing INT and LONG sort in a single request is allowed, // we may get search_after values that are larger than Integer.MAX_VALUE // in this case convert them to Integer.MAX_VALUE if (value instanceof Number valueNumber) { if (valueNumber.longValue() > Integer.MAX_VALUE) { valueNumber = Integer.MAX_VALUE; } return (valueNumber).intValue(); } return Integer.parseInt(value.toString()); case SCORE, FLOAT: if (value instanceof Number) { return ((Number) value).floatValue(); } return Float.parseFloat(value.toString()); case DOUBLE: if (value instanceof Number) { return ((Number) value).doubleValue(); } return Double.parseDouble(value.toString()); case LONG: // for unsigned_long field type we want to pass search_after value through formatting if (value instanceof Number && format != DocValueFormat.UNSIGNED_LONG_SHIFTED) { return ((Number) value).longValue(); } return format.parseLong( value.toString(), false, () -> { throw new IllegalStateException("now() is not allowed in [search_after] key"); } ); case STRING_VAL: case STRING: if (value instanceof BytesRef bytesRef) { // _tsid is stored and ordered as BytesRef. We should not format it return bytesRef; } else { return format.parseBytesRef(value); } default: throw new IllegalArgumentException( "Comparator type [" + sortType.name() + "] for field [" + fieldName + "] is not supported." ); } } catch (NumberFormatException e) { throw new IllegalArgumentException( "Failed to parse " + SEARCH_AFTER.getPreferredName() + " value for field [" + fieldName + "].", e ); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); innerToXContent(builder); builder.endObject(); return builder; } public void innerToXContent(XContentBuilder builder) throws IOException { builder.array(SEARCH_AFTER.getPreferredName(), sortValues); } public static SearchAfterBuilder fromXContent(XContentParser parser) throws IOException { SearchAfterBuilder builder = new SearchAfterBuilder(); XContentParser.Token token = parser.currentToken(); List<Object> values = new ArrayList<>(); if (token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_NUMBER) { switch (parser.numberType()) { case INT -> values.add(parser.intValue()); case LONG -> values.add(parser.longValue()); case DOUBLE -> values.add(parser.doubleValue()); case FLOAT -> values.add(parser.floatValue()); case BIG_INTEGER -> values.add(parser.text()); default -> throw new IllegalArgumentException( "[search_after] does not accept numbers of type [" + parser.numberType() + "], got " + parser.text() ); } } else if (token == XContentParser.Token.VALUE_STRING) { values.add(parser.text()); } else if (token == XContentParser.Token.VALUE_BOOLEAN) { values.add(parser.booleanValue()); } else if (token == XContentParser.Token.VALUE_NULL) { values.add(null); } else { throw new ParsingException( parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING + "] or [" + XContentParser.Token.VALUE_NUMBER + "] or [" + XContentParser.Token.VALUE_BOOLEAN + "] or [" + XContentParser.Token.VALUE_NULL + "] but found [" + token + "] inside search_after." ); } } } else { throw new ParsingException( parser.getTokenLocation(), "Expected [" + XContentParser.Token.START_ARRAY + "] in [" + SEARCH_AFTER.getPreferredName() + "] but found [" + token + "] inside search_after", parser.getTokenLocation() ); } builder.setSortValues(values.toArray()); return builder; } @Override public boolean equals(Object other) { if ((other instanceof SearchAfterBuilder) == false) { return false; } boolean value = Arrays.equals(sortValues, ((SearchAfterBuilder) other).sortValues); return value; } @Override public int hashCode() { return Objects.hash(this.sortValues); } @Override public String toString() { return Strings.toString(this, true, true); } }
SearchAfterBuilder
java
apache__kafka
server/src/main/java/org/apache/kafka/server/quota/StrictControllerMutationQuota.java
{ "start": 1485, "end": 2783 }
class ____ extends AbstractControllerMutationQuota { private final Sensor quotaSensor; /** * Creates a new StrictControllerMutationQuota with the specified time source and quota sensor. * * @param time the Time object used for time-based calculations and quota tracking * @param quotaSensor the Sensor object that tracks quota usage for a specific user/clientId pair * @throws IllegalArgumentException if time or quotaSensor is null */ public StrictControllerMutationQuota(Time time, Sensor quotaSensor) { super(time); this.quotaSensor = Objects.requireNonNull(quotaSensor, "quotaSensor cannot be null"); } @Override public boolean isExceeded() { return lastThrottleTimeMs > 0; } @Override public void record(double permits) { var timeMs = time.milliseconds(); try { synchronized (quotaSensor) { quotaSensor.checkQuotas(timeMs); quotaSensor.record(permits, timeMs, false); } } catch (QuotaViolationException e) { updateThrottleTime(e, timeMs); throw new ThrottlingQuotaExceededException((int) lastThrottleTimeMs, Errors.THROTTLING_QUOTA_EXCEEDED.message()); } } }
StrictControllerMutationQuota
java
reactor__reactor-core
reactor-core/src/test/java/reactor/test/subscriber/AssertSubscriber.java
{ "start": 11798, "end": 12278 }
class ____: expected = " + clazz + ", actual = " + e, null); } } if (s > 1) { throw new AssertionError("Multiple errors: " + s, null); } return this; } public final AssertSubscriber<T> assertErrorMessage(String message) { assertNotComplete(); int s = errors.size(); if (s == 0) { assertionError("No error", null); } if (s == 1) { if (!Objects.equals(message, errors.get(0) .getMessage())) { assertionError("Error
incompatible
java
mockito__mockito
mockito-core/src/test/java/org/mockito/internal/stubbing/answers/InvocationInfoTest.java
{ "start": 7644, "end": 7873 }
class ____ { abstract void iAmAbstract(); } return TheAbstract.class.getDeclaredMethod("iAmAbstract"); } private Method iAmNotAbstract() throws NoSuchMethodException { abstract
TheAbstract
java
elastic__elasticsearch
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Add.java
{ "start": 642, "end": 1437 }
class ____ extends DateTimeArithmeticOperation implements BinaryComparisonInversible { public Add(Source source, Expression left, Expression right) { super(source, left, right, SqlBinaryArithmeticOperation.ADD); } @Override protected NodeInfo<Add> info() { return NodeInfo.create(this, Add::new, left(), right()); } @Override protected Add replaceChildren(Expression left, Expression right) { return new Add(source(), left, right); } @Override public Add swapLeftAndRight() { return new Add(source(), right(), left()); } @Override public ArithmeticOperationFactory binaryComparisonInverse() { return Sub::new; } @Override protected boolean isCommutative() { return true; } }
Add
java
google__dagger
javatests/dagger/functional/subcomponent/multibindings/MultibindingSubcomponents.java
{ "start": 6647, "end": 6864 }
interface ____ extends ProvidesBoundInParent, ProvidesBoundInParentAndChild, ProvidesBoundInChild, ProvidesSetOfRequiresMultibindings {} @Component(modules = ParentMultibindingModule.class)
Grandchild
java
apache__flink
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/common/VectorSearchTableFunctionTestBase.java
{ "start": 1978, "end": 23403 }
class ____ extends TableTestBase { private TableTestUtil util; protected abstract TableTestUtil getUtil(); @BeforeEach public void setup() { util = getUtil(); // Create test table util.tableEnv() .executeSql( "CREATE TABLE QueryTable (\n" + " a INT,\n" + " b BIGINT,\n" + " c STRING,\n" + " d ARRAY<FLOAT>,\n" + " rowtime TIMESTAMP(3),\n" + " proctime as PROCTIME(),\n" + " WATERMARK FOR rowtime AS rowtime - INTERVAL '1' SECOND\n" + ") with (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true' " + ")"); util.tableEnv() .executeSql( "CREATE TABLE VectorTable (\n" + " e INT,\n" + " f BIGINT,\n" + " g ARRAY<FLOAT>\n" + ") with (\n" + " 'connector' = 'values',\n" + " 'enable-vector-search' = 'true'," + " 'bounded' = 'true'" + ")"); util.tableEnv() .executeSql( "CREATE TABLE VectorTableWithProctime (\n" + " e INT,\n" + " f BIGINT,\n" + " g ARRAY<FLOAT>,\n" + " proctime as PROCTIME()\n" + ") with (\n" + " 'connector' = 'values',\n" + " 'enable-vector-search' = 'true',\n" + " 'bounded' = 'true'" + ")"); util.tableEnv() .executeSql( "CREATE TABLE VectorTableWithMetadata(\n" + " e INT,\n" + " f ARRAY<FLOAT> METADATA,\n" + " g ARRAY<FLOAT>,\n" + " h AS e + 1\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'readable-metadata' = 'f:ARRAY<FLOAT>',\n" + " 'bounded' = 'true'" + ")"); } @Test void testSimple() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, 10" + ")\n" + ")"; util.verifyRelPlan(sql); } @Test void testLiteralValue() { String sql = "SELECT * FROM LATERAL TABLE(VECTOR_SEARCH(TABLE VectorTable, DESCRIPTOR(`g`), ARRAY[1.5, 2.0], 10))"; util.verifyRelPlan(sql); } @Test void testLiteralValueWithoutLateralKeyword() { String sql = "SELECT * FROM TABLE(VECTOR_SEARCH(TABLE VectorTable, DESCRIPTOR(`g`), ARRAY[1.5, 2.0], 10))"; util.verifyRelPlan(sql); } @Test void testNamedArgument() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " SEARCH_TABLE => TABLE VectorTable,\n" + " COLUMN_TO_QUERY => QueryTable.d,\n" + " COLUMN_TO_SEARCH => DESCRIPTOR(`g`),\n" + " TOP_K => 10" + " )\n" + ")"; util.verifyRelPlan(sql); } @Test void testOutOfOrderNamedArgument() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " COLUMN_TO_QUERY => QueryTable.d,\n" + " COLUMN_TO_SEARCH => DESCRIPTOR(`g`),\n" + " TOP_K => 10,\n" + " SEARCH_TABLE => TABLE VectorTable\n" + " )\n" + ")"; util.verifyRelPlan(sql); } @Test void testNamedArgumentWithRuntimeConfig() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " COLUMN_TO_QUERY => QueryTable.d,\n" + " COLUMN_TO_SEARCH => DESCRIPTOR(`g`),\n" + " TOP_K => 10,\n" + " CONFIG => MAP['async', 'true', 'timeout', '100s'],\n" + " SEARCH_TABLE => TABLE VectorTable\n" + " )\n" + ")"; util.verifyRelPlan(sql); } @Test void testNameConflicts() { util.tableEnv() .executeSql( "CREATE TABLE NameConflictTable(\n" + " a INT,\n" + " score ARRAY<FLOAT>,\n" + " score0 ARRAY<FLOAT>,\n" + " score1 ARRAY<FLOAT>\n" + ") WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true'" + ")"); util.verifyRelPlan( "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE NameConflictTable, DESCRIPTOR(`score`), QueryTable.d, 10))"); } @Test void testDescriptorTypeIsNotExpected() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`f`), QueryTable.d, 10" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( ValidationException.class, "Expect search column `f` type is ARRAY<FLOAT> or ARRAY<DOUBLE>, but its type is BIGINT.")); } @Test void testDescriptorContainsMultipleColumns() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`f`, `g`), QueryTable.d, 10" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( ValidationException.class, "Expect parameter COLUMN_TO_SEARCH for VECTOR_SEARCH only contains one column, but multiple columns are found in operand DESCRIPTOR(`f`, `g`).")); } @Test void testQueryColumnIsNotArray() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.c, 10" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( ValidationException.class, "Can not cast the query column type STRING to target type FLOAT ARRAY. Please keep the query column type is same to the search column type.")); } @Test void testIllegalTopKValue1() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, 10.0" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( ValidationException.class, "Expect parameter top_k is an INTEGER NOT NULL literal in VECTOR_SEARCH, but it is 10.0 with type DECIMAL(3, 1) NOT NULL.")); } @Test void testIllegalTopKValue2() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, QueryTable.a" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( ValidationException.class, "Expect parameter top_k is an INTEGER NOT NULL literal in VECTOR_SEARCH, but it is QueryTable.a with type INT.")); } @Test void testIllegalTopKValue3() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, 0" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( ValidationException.class, "Parameter top_k must be greater than 0, but was 0.")); } @Test void testSearchTableWithCalc() { // calc -> source util.verifyRelPlan( "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTableWithProctime, DESCRIPTOR(`g`), QueryTable.d, 10))"); } @Test void testSearchTableWithProjection() { util.tableEnv() .executeSql( String.format( "CREATE FUNCTION add_one AS '%s'", JavaUserDefinedScalarFunctions.JavaFunc0.class.getName())); util.verifyRelPlan( "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " (SELECT add_one(e) as e1, g, proctime FROM VectorTableWithProctime), DESCRIPTOR(`g`), QueryTable.d, 10))"); } @Test void testSearchTableWithMetadataTable() { util.verifyRelPlan( "SELECT * FROM QueryTable, LATERAL TABLE(\n" + " VECTOR_SEARCH(\n" + " TABLE VectorTableWithMetadata,\n" + " DESCRIPTOR(`g`),\n" + " QueryTable.d,\n" + " 10" + " )\n" + ")"); } @Test void testSearchTableWithDescriptorUsingMetadata() { util.verifyRelPlan( "SELECT * FROM QueryTable, LATERAL TABLE(\n" + " VECTOR_SEARCH(\n" + " TABLE VectorTableWithMetadata,\n" + " DESCRIPTOR(`f`),\n" + " QueryTable.d,\n" + " 10" + " )\n" + ")"); } @Test void testSearchTableUsingUDFComputedColumn() { util.tableEnv() .executeSql( String.format("CREATE FUNCTION udf AS '%s'", TestArrayUDF.class.getName())); util.tableEnv() .executeSql( "CREATE TABLE VectorTableWithComputedColumn (\n" + " e INT NOT NULL,\n" + " f BIGINT,\n" + " g ARRAY<FLOAT>,\n" + " h as udf(e)\n" + ") with (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true'" + ")"); assertThatThrownBy( () -> util.verifyRelPlan( "SELECT * FROM QueryTable, LATERAL TABLE(\n" + " VECTOR_SEARCH(\n" + " TABLE VectorTableWithComputedColumn,\n" + " DESCRIPTOR(`h`),\n" + " QueryTable.d,\n" + " 10" + " )\n" + ")")) .satisfies( FlinkAssertions.anyCauseMatches( TableException.class, "VECTOR_SEARCH can not find column `h` in the search_table default_catalog.default_database.VectorTableWithComputedColumn physical output type. " + "Currently, Flink doesn't support to use computed column as the search column.")); } @Test void testSearchTableWithSnapshot() throws Exception { String catalogName = "ttc"; TestTimeTravelCatalog catalog = new TestTimeTravelCatalog(catalogName); Map<String, String> options = new HashMap<>(); options.put("connector", "values"); options.put("bounded", "true"); catalog.registerTableForTimeTravel( "t1", Schema.newBuilder() .column("f1", DataTypes.INT()) .column("f2", DataTypes.ARRAY(DataTypes.DOUBLE())) .build(), options, DateTimeTestUtil.toEpochMills( "2023-07-31 00:00:00", "yyyy-MM-dd HH:mm:ss", ZoneId.of("UTC"))); TableEnvironment tEnv = util.tableEnv(); tEnv.registerCatalog(catalogName, catalog); tEnv.useCatalog(catalogName); assertThatThrownBy( () -> util.verifyRelPlan( "SELECT * FROM (select *, proctime() pts from t1) qt, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " (SELECT * FROM t1 FOR SYSTEM_TIME AS OF qt.pts), DESCRIPTOR(`f2`), qt.f2, 10))")) .satisfies( FlinkAssertions.anyCauseMatches( RelOptPlanner.CannotPlanException.class, "VECTOR_SEARCH does not support FlinkLogicalSnapshot node in parameter search_table.")); } @Test void testSearchTableWithFilter() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " (SELECT * FROM VectorTable WHERE e > 0),\n" + " DESCRIPTOR(`g`),\n" + " QueryTable.d,\n" + " 10" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( RelOptPlanner.CannotPlanException.class, "VECTOR_SEARCH does not support filter on parameter search_table.")); } @Test void testSearchTableWithWatermark() { // watermark assigner -> calc -> scan if (util.isBounded()) { return; } util.tableEnv() .executeSql( "CREATE TABLE IllegalTable (\n" + " e INT,\n" + " f BIGINT,\n" + " g ARRAY<FLOAT>,\n" + " rowtime TIMESTAMP(3),\n" + " proctime as PROCTIME(),\n" + " WATERMARK FOR rowtime AS rowtime - INTERVAL '1' SECOND\n" + ") with (\n" + " 'connector' = 'values'\n" + ")"); String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE IllegalTable, DESCRIPTOR(`g`), QueryTable.d, 10))"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( RelOptPlanner.CannotPlanException.class, "VECTOR_SEARCH does not support FlinkLogicalWatermarkAssigner node in parameter search_table.")); } @Test void testSearchTableNonExistColumn() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`z`), QueryTable.d, 10" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies(FlinkAssertions.anyCauseMatches("Unknown identifier 'z'")); } @Test public void testIllegalRuntimeConfigType() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, 10, 10" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( ValidationException.class, "Config param should be a MAP.")); } @Test public void testIllegalConfigValue1() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, 10, MAP['async', 'yes']" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( IllegalArgumentException.class, "Unrecognized option for boolean: yes. Expected either true or false(case insensitive)")); } @Test public void testIllegalConfigValue2() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, 10, MAP['async', 'true', 'max-concurrent-operations', '-1']" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyRelPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( ValidationException.class, "Invalid runtime config option 'max-concurrent-operations'. Its value should be positive integer but was -1.")); } @Test public void testPreferAsync() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, 10, MAP['async', 'true']" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyExecPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( TableException.class, "Require async mode")); } @Test public void testUsingRuntimeConfigToAdjustConnectorParameter() { String sql = "SELECT * FROM QueryTable, LATERAL TABLE(\n" + "VECTOR_SEARCH(\n" + " TABLE VectorTable, DESCRIPTOR(`g`), QueryTable.d, 10, MAP['enable-vector-search', 'false']" + ")\n" + ")"; assertThatThrownBy(() -> util.verifyExecPlan(sql)) .satisfies( FlinkAssertions.anyCauseMatches( IllegalArgumentException.class, "Require option enable-vector-search true.")); } public static
VectorSearchTableFunctionTestBase
java
spring-projects__spring-boot
module/spring-boot-opentelemetry/src/test/java/org/springframework/boot/opentelemetry/autoconfigure/logging/otlp/OtlpLoggingAutoConfigurationIntegrationTests.java
{ "start": 1786, "end": 4926 }
class ____ { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withPropertyValues("spring.application.name=otlp-logs-test", "management.opentelemetry.logging.export.otlp.headers.Authorization=Bearer my-token") .withConfiguration(AutoConfigurations.of(OpenTelemetrySdkAutoConfiguration.class, OpenTelemetryLoggingAutoConfiguration.class, OtlpLoggingAutoConfiguration.class)); private final MockWebServer mockWebServer = new MockWebServer(); @BeforeEach void setUp() throws IOException { this.mockWebServer.start(); } @AfterEach void tearDown() throws IOException { this.mockWebServer.close(); } @Test void httpLogRecordExporterShouldUseProtobufAndNoCompressionByDefault() { this.mockWebServer.enqueue(new MockResponse()); this.contextRunner .withPropertyValues("management.opentelemetry.logging.export.otlp.endpoint=http://localhost:%d/v1/logs" .formatted(this.mockWebServer.getPort())) .run((context) -> { logMessage(context); RecordedRequest request = this.mockWebServer.takeRequest(10, TimeUnit.SECONDS); assertThat(request).isNotNull(); assertThat(request.getRequestLine()).contains("/v1/logs"); assertThat(request.getHeader("Content-Type")).isEqualTo("application/x-protobuf"); assertThat(request.getHeader("Content-Encoding")).isNull(); assertThat(request.getBodySize()).isPositive(); try (Buffer body = request.getBody()) { assertLogMessage(body); } }); } @Test void httpLogRecordExporterCanBeConfiguredToUseGzipCompression() { this.mockWebServer.enqueue(new MockResponse()); this.contextRunner .withPropertyValues( "management.opentelemetry.logging.export.otlp.endpoint=http://localhost:%d/v1/logs" .formatted(this.mockWebServer.getPort()), "management.opentelemetry.logging.export.otlp.compression=gzip") .run((context) -> { logMessage(context); RecordedRequest request = this.mockWebServer.takeRequest(10, TimeUnit.SECONDS); assertThat(request).isNotNull(); assertThat(request.getRequestLine()).contains("/v1/logs"); assertThat(request.getHeader("Content-Type")).isEqualTo("application/x-protobuf"); assertThat(request.getHeader("Content-Encoding")).isEqualTo("gzip"); assertThat(request.getBodySize()).isPositive(); try (Buffer uncompressed = new Buffer(); Buffer body = request.getBody()) { uncompressed.writeAll(new GzipSource(body)); assertLogMessage(uncompressed); } }); } private static void logMessage(ApplicationContext context) { SdkLoggerProvider loggerProvider = context.getBean(SdkLoggerProvider.class); loggerProvider.get("test") .logRecordBuilder() .setSeverity(Severity.INFO) .setSeverityText("INFO") .setBody("Hello") .setTimestamp(Instant.now()) .emit(); } private static void assertLogMessage(Buffer body) { String string = body.readString(StandardCharsets.UTF_8); assertThat(string).contains("otlp-logs-test"); assertThat(string).contains("test"); assertThat(string).contains("INFO"); assertThat(string).contains("Hello"); } }
OtlpLoggingAutoConfigurationIntegrationTests
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/annotations/OnDelete.java
{ "start": 1077, "end": 1312 }
class ____ { * &#064;Id * long id; * ... * &#064;ElementCollection * &#064;OnDelete(action = CASCADE) * String&lt;String&gt; keywords; * } * * &#064;Entity * &#064;OnDelete(action = CASCADE) *
Publication
java
elastic__elasticsearch
x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/IndexStatus.java
{ "start": 310, "end": 765 }
enum ____ { CLOSED(false), UNHEALTHY(false), TOO_OLD(false), NEEDS_CREATION(true), NEEDS_VERSION_BUMP(true), UP_TO_DATE(false), NEEDS_MAPPINGS_UPDATE(true); /** * Whether a status is for informational purposes only or whether it should be acted upon and may change cluster state. */ public final boolean actionable; IndexStatus(boolean actionable) { this.actionable = actionable; } }
IndexStatus
java
grpc__grpc-java
api/src/test/java/io/grpc/NameResolverRegistryTest.java
{ "start": 1139, "end": 8386 }
class ____ { private final URI uri = URI.create("dns:///localhost"); private final NameResolver.Args args = NameResolver.Args.newBuilder() .setDefaultPort(8080) .setProxyDetector(mock(ProxyDetector.class)) .setSynchronizationContext(new SynchronizationContext(mock(UncaughtExceptionHandler.class))) .setServiceConfigParser(mock(ServiceConfigParser.class)) .setChannelLogger(mock(ChannelLogger.class)) .build(); @Test public void register_unavilableProviderThrows() { NameResolverRegistry reg = new NameResolverRegistry(); try { reg.register(new BaseProvider(false, 5)); fail("Should throw"); } catch (IllegalArgumentException e) { assertThat(e).hasMessageThat().contains("isAvailable() returned false"); } assertThat(reg.providers()).isEmpty(); } @Test public void deregister() { NameResolverRegistry reg = new NameResolverRegistry(); NameResolverProvider p1 = new BaseProvider(true, 5); NameResolverProvider p2 = new BaseProvider(true, 5); NameResolverProvider p3 = new BaseProvider(true, 5); String sameScheme = p1.getDefaultScheme(); reg.register(p1); reg.register(p2); reg.register(p3); assertThat(reg.providers().get(sameScheme)).isSameInstanceAs(p1); reg.deregister(p2); assertThat(reg.providers().get(sameScheme)).isSameInstanceAs(p1); reg.deregister(p1); assertThat(reg.providers().get(sameScheme)).isSameInstanceAs(p3); } @Test public void provider_sorted() { NameResolverRegistry reg = new NameResolverRegistry(); NameResolverProvider p1 = new BaseProvider(true, 5); NameResolverProvider p2 = new BaseProvider(true, 3); NameResolverProvider p3 = new BaseProvider(true, 8); NameResolverProvider p4 = new BaseProvider(true, 3); NameResolverProvider p5 = new BaseProvider(true, 8); String sameScheme = p1.getDefaultScheme(); reg.register(p1); reg.register(p2); reg.register(p3); reg.register(p4); reg.register(p5); assertThat(reg.providers().get(sameScheme)).isSameInstanceAs(p3); } @Test public void getDefaultScheme_noProvider() { NameResolver.Factory factory = new NameResolverRegistry().asFactory(); assertThat(factory.getDefaultScheme()).isEqualTo("unknown"); } @Test public void newNameResolver_providerReturnsNull() { NameResolverRegistry registry = new NameResolverRegistry(); registry.register( new BaseProvider(true, 5, "noScheme") { @Override public NameResolver newNameResolver(URI passedUri, NameResolver.Args passedArgs) { assertThat(passedUri).isSameInstanceAs(uri); assertThat(passedArgs).isSameInstanceAs(args); return null; } }); assertThat(registry.asFactory().newNameResolver(uri, args)).isNull(); assertThat(registry.asFactory().getDefaultScheme()).isEqualTo("noScheme"); } @Test public void newNameResolver_providerReturnsNonNull() { NameResolverRegistry registry = new NameResolverRegistry(); registry.register(new BaseProvider(true, 5, uri.getScheme()) { @Override public NameResolver newNameResolver(URI passedUri, NameResolver.Args passedArgs) { return null; } }); final NameResolver nr = new NameResolver() { @Override public String getServiceAuthority() { throw new UnsupportedOperationException(); } @Override public void start(Listener2 listener) { throw new UnsupportedOperationException(); } @Override public void shutdown() { throw new UnsupportedOperationException(); } }; registry.register( new BaseProvider(true, 4, uri.getScheme()) { @Override public NameResolver newNameResolver(URI passedUri, NameResolver.Args passedArgs) { return nr; } }); registry.register( new BaseProvider(true, 3, uri.getScheme()) { @Override public NameResolver newNameResolver(URI passedUri, NameResolver.Args passedArgs) { fail("Should not be called"); throw new AssertionError(); } }); assertThat(registry.asFactory().newNameResolver(uri, args)).isNull(); assertThat(registry.asFactory().getDefaultScheme()).isEqualTo(uri.getScheme()); } @Test public void newNameResolver_multipleScheme() { NameResolverRegistry registry = new NameResolverRegistry(); registry.register(new BaseProvider(true, 5, uri.getScheme()) { @Override public NameResolver newNameResolver(URI passedUri, NameResolver.Args passedArgs) { return null; } }); final NameResolver nr = new NameResolver() { @Override public String getServiceAuthority() { throw new UnsupportedOperationException(); } @Override public void start(Listener2 listener) { throw new UnsupportedOperationException(); } @Override public void shutdown() { throw new UnsupportedOperationException(); } }; registry.register( new BaseProvider(true, 4, "other") { @Override public NameResolver newNameResolver(URI passedUri, NameResolver.Args passedArgs) { return nr; } }); assertThat(registry.asFactory().newNameResolver(uri, args)).isNull(); assertThat(registry.asFactory().newNameResolver(URI.create("/0.0.0.0:80"), args)).isNull(); assertThat(registry.asFactory().newNameResolver(URI.create("///0.0.0.0:80"), args)).isNull(); assertThat(registry.asFactory().newNameResolver(URI.create("other:///0.0.0.0:80"), args)) .isSameInstanceAs(nr); assertThat(registry.asFactory().newNameResolver(URI.create("OTHER:///0.0.0.0:80"), args)) .isSameInstanceAs(nr); assertThat(registry.asFactory().getDefaultScheme()).isEqualTo("dns"); } @Test public void newNameResolver_noProvider() { NameResolver.Factory factory = new NameResolverRegistry().asFactory(); assertThat(factory.newNameResolver(uri, args)).isNull(); assertThat(factory.getDefaultScheme()).isEqualTo("unknown"); } @Test public void baseProviders() { Map<String, NameResolverProvider> providers = NameResolverRegistry.getDefaultRegistry().providers(); assertThat(providers).hasSize(1); // 2 name resolvers from core assertThat(providers.get("dns")) .isInstanceOf(io.grpc.internal.DnsNameResolverProvider.class); assertThat(NameResolverRegistry.getDefaultRegistry().asFactory().getDefaultScheme()) .isEqualTo("dns"); } @Test public void getClassesViaHardcoded_classesPresent() throws Exception { List<Class<?>> classes = NameResolverRegistry.getHardCodedClasses(); assertThat(classes).containsExactly(io.grpc.internal.DnsNameResolverProvider.class); } @Test public void provided() { for (NameResolverProvider current : InternalServiceProviders.getCandidatesViaServiceLoader( NameResolverProvider.class, getClass().getClassLoader())) { if (current instanceof DnsNameResolverProvider) { return; } } fail("DnsNameResolverProvider not registered"); } private static
NameResolverRegistryTest
java
spring-projects__spring-boot
smoke-test/spring-boot-smoke-test-testng/src/main/java/smoketest/testng/SampleTestNGApplication.java
{ "start": 1033, "end": 1616 }
class ____ { private static final Log logger = LogFactory.getLog(SampleTestNGApplication.class); @Bean protected ServletContextListener listener() { return new ServletContextListener() { @Override public void contextInitialized(ServletContextEvent sce) { logger.info("ServletContext initialized"); } @Override public void contextDestroyed(ServletContextEvent sce) { logger.info("ServletContext destroyed"); } }; } public static void main(String[] args) { SpringApplication.run(SampleTestNGApplication.class, args); } }
SampleTestNGApplication
java
quarkusio__quarkus
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/MultipleHttpAnnotationsTest.java
{ "start": 1082, "end": 1228 }
class ____ { @Path("hello") @GET @POST public String hello() { return "hello"; } } }
Resource
java
apache__maven
impl/maven-testing/src/main/java/org/apache/maven/api/plugin/testing/Basedir.java
{ "start": 1415, "end": 1517 }
class ____:</p> * <pre> * {@code * @MojoTest * @Basedir("src/test/resources/my-test-project") *
level
java
apache__kafka
core/src/test/java/kafka/server/share/SharePartitionManagerTest.java
{ "start": 7176, "end": 178298 }
class ____ { private static final int DEFAULT_RECORD_LOCK_DURATION_MS = 30000; private static final int MAX_DELIVERY_COUNT = 5; private static final short MAX_IN_FLIGHT_MESSAGES = 200; private static final byte BATCH_OPTIMIZED = ShareAcquireMode.BATCH_OPTIMIZED.id(); private static final short MAX_FETCH_RECORDS = 500; private static final int DELAYED_SHARE_FETCH_MAX_WAIT_MS = 2000; private static final int DELAYED_SHARE_FETCH_TIMEOUT_MS = 3000; private static final int BATCH_SIZE = 500; private static final FetchParams FETCH_PARAMS = new FetchParams( FetchRequest.ORDINARY_CONSUMER_ID, -1, DELAYED_SHARE_FETCH_MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); private static final String TIMER_NAME_PREFIX = "share-partition-manager"; private static final String CONNECTION_ID = "id-1"; static final int DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL = 1000; static final long REMOTE_FETCH_MAX_WAIT_MS = 6000L; private MockTime time; private ReplicaManager mockReplicaManager; private BrokerTopicStats brokerTopicStats; private SharePartitionManager sharePartitionManager; private static final List<TopicIdPartition> EMPTY_PART_LIST = List.of(); private static final List<ShareFetchResponseData.AcquiredRecords> EMPTY_ACQUIRED_RECORDS = List.of(); @BeforeEach public void setUp() { time = new MockTime(); kafka.utils.TestUtils.clearYammerMetrics(); brokerTopicStats = new BrokerTopicStats(); mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); when(mockReplicaManager.getPartitionOrException((TopicPartition) any())).thenReturn(partition); } @AfterEach public void tearDown() throws Exception { if (sharePartitionManager != null) { sharePartitionManager.close(); } brokerTopicStats.close(); assertNoReaperThreadsPendingClose(); } @Test public void testNewContextReturnsFinalContextWithoutRequestData() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); // Create a new share session with an initial share fetch request List<TopicIdPartition> reqData1 = List.of(tp0, tp1); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, memberId, ShareRequestMetadata.FINAL_EPOCH, true, CONNECTION_ID); assertEquals(FinalContext.class, context2.getClass()); } @Test public void testNewContextReturnsFinalContextWithRequestData() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); // Create a new share session with an initial share fetch request List<TopicIdPartition> reqData1 = List.of(tp0, tp1); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); // Sending a Request with FINAL_EPOCH. This should return a FinalContext. List<TopicIdPartition> reqData2 = List.of(tp0, tp1); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, memberId, ShareRequestMetadata.FINAL_EPOCH, true, CONNECTION_ID); assertEquals(FinalContext.class, context2.getClass()); } @Test public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequestData() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); // Create a new share session with an initial share fetch request List<TopicIdPartition> reqData1 = List.of(tp0, tp1); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); // shareFetch is not empty, and it contains tpId1, which should return FinalContext instance since it is FINAL_EPOCH List<TopicIdPartition> reqData2 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); assertInstanceOf(FinalContext.class, sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, memberId, ShareRequestMetadata.FINAL_EPOCH, true, CONNECTION_ID)); } @Test public void testNewContextThrowsErrorWhenShareSessionNotFoundOnFinalEpoch() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext("grp", EMPTY_PART_LIST, EMPTY_PART_LIST, Uuid.randomUuid().toString(), ShareRequestMetadata.FINAL_EPOCH, false, CONNECTION_ID)); } @Test public void testNewContextThrowsErrorWhenAcknowledgeDataPresentOnInitialEpoch() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); assertThrows(InvalidRequestException.class, () -> sharePartitionManager.newContext("grp", List.of(tp0, tp1), EMPTY_PART_LIST, Uuid.randomUuid().toString(), ShareRequestMetadata.INITIAL_EPOCH, true, CONNECTION_ID)); } @Test public void testNewContextThrowsErrorWhenShareSessionCacheIsFullOnInitialEpoch() { // Define a cache with max size 1 ShareSessionCache cache = new ShareSessionCache(1); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); // Create a new share session with an initial share fetch request List<TopicIdPartition> reqData = List.of(tp0, tp1); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, memberId1, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); // Trying to create a new share session, but since cache is already full, it should throw an exception assertThrows(ShareSessionLimitReachedException.class, () -> sharePartitionManager.newContext("grp", reqData, EMPTY_PART_LIST, memberId2, ShareRequestMetadata.INITIAL_EPOCH, false, "id-2")); } @Test public void testNewContextExistingSessionNewRequestWithInitialEpoch() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); List<TopicIdPartition> reqData = List.of(tp0, tp1); // Create a new share session with an initial share fetch request ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); assertEquals(1, cache.size()); // Sending another request with INITIAL_EPOCH and same share session key. This should return a new ShareSessionContext // and delete the older one. ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context1).isSubsequent()); assertEquals(1, cache.size()); } @Test public void testNewContext() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Map<Uuid, String> topicNames = new HashMap<>(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); topicNames.put(tpId0, "foo"); topicNames.put(tpId1, "bar"); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(tpId1, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(tpId1, new TopicPartition("bar", 1)); String groupId = "grp"; // Create a new share session with an initial share fetch request List<TopicIdPartition> reqData2 = List.of(tp0, tp1); String memberId = Uuid.randomUuid().toString(); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); ((ShareSessionContext) context2).shareFetchData().forEach(topicIdPartition -> assertTrue(reqData2.contains(topicIdPartition))); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); respData2.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); respData2.put(tp1, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, memberId, respData2); assertEquals(Errors.NONE, resp2.error()); assertEquals(respData2, resp2.responseData(topicNames)); ShareSessionKey shareSessionKey2 = new ShareSessionKey(groupId, memberId); // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, shareSessionKey2.memberId(), 5, true, "id-2")); // Test trying to create a new session with a non-existent session key String memberId4 = Uuid.randomUuid().toString(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, memberId4, 1, true, "id-3")); // Continue the first share session we created. ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, shareSessionKey2.memberId(), 1, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); ShareSessionContext shareSessionContext5 = (ShareSessionContext) context5; synchronized (shareSessionContext5.session()) { shareSessionContext5.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); assertTrue(reqData2.contains(topicIdPartition)); }); } ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, memberId, respData2); assertEquals(Errors.NONE, resp5.error()); assertEquals(0, resp5.responseData(topicNames).size()); // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, shareSessionKey2.memberId(), 5, true, CONNECTION_ID)); // Test generating a throttled response for a subsequent share session ShareFetchContext context7 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, shareSessionKey2.memberId(), 2, true, CONNECTION_ID); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // Get the final share session. ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, memberId, ShareRequestMetadata.FINAL_EPOCH, true, CONNECTION_ID); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData8 = new LinkedHashMap<>(); respData8.put(tp2, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); respData8.put(tp3, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); ShareFetchResponse resp8 = context8.updateAndGenerateResponseData(groupId, memberId, respData8); assertEquals(Errors.NONE, resp8.error()); // Close the session. CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> releaseResponse = sharePartitionManager.releaseSession(groupId, memberId); assertTrue(releaseResponse.isDone()); assertFalse(releaseResponse.isCompletedExceptionally()); assertEquals(0, cache.size()); } @Test public void testAcknowledgeSessionUpdateThrowsOnInitialEpoch() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.acknowledgeSessionUpdate("grp", Uuid.randomUuid().toString(), ShareRequestMetadata.INITIAL_EPOCH)); } @Test public void testAcknowledgeSessionUpdateThrowsWhenShareSessionNotFound() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); // The share session corresponding to this memberId has not been created yet. This should throw an exception. assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.acknowledgeSessionUpdate("grp", Uuid.randomUuid().toString(), ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH))); } @Test public void testAcknowledgeSessionUpdateThrowsInvalidShareSessionEpochException() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); // Create a new share session with an initial share fetch request ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); // The expected epoch from the share session should be 1, but we are passing 2. This should throw an exception. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.acknowledgeSessionUpdate("grp", memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)))); } @Test public void testAcknowledgeSessionUpdateSuccessOnSubsequentEpoch() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); // Create a new share session with an initial share fetch request ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); // The expected epoch from the share session should be 1, and we are passing the same. So, execution should be successful. assertDoesNotThrow( () -> sharePartitionManager.acknowledgeSessionUpdate("grp", memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH))); } @Test public void testAcknowledgeSessionUpdateSuccessOnFinalEpoch() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); // Create a new share session with an initial share fetch request ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, memberId, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); // The expected epoch from the share session should be 1, but we are passing the Final Epoch (-1). This should throw an exception. assertDoesNotThrow( () -> sharePartitionManager.acknowledgeSessionUpdate("grp", memberId, ShareRequestMetadata.FINAL_EPOCH)); } @Test public void testSubsequentShareSession() { sharePartitionManager = SharePartitionManagerBuilder.builder().build(); Map<Uuid, String> topicNames = new HashMap<>(); Uuid fooId = Uuid.randomUuid(); Uuid barId = Uuid.randomUuid(); topicNames.put(fooId, "foo"); topicNames.put(barId, "bar"); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); // Create a new share session with foo-0 and foo-1 List<TopicIdPartition> reqData1 = List.of(tp0, tp1); String groupId = "grp"; String memberId1 = Uuid.randomUuid().toString(); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, memberId1, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData1 = new LinkedHashMap<>(); respData1.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(tp0.partition())); respData1.put(tp1, new ShareFetchResponseData.PartitionData().setPartitionIndex(tp1.partition())); ShareFetchResponse resp1 = context1.updateAndGenerateResponseData(groupId, memberId1, respData1); assertEquals(Errors.NONE, resp1.error()); assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent fetch request that removes foo-0 and adds bar-0 List<TopicIdPartition> reqData2 = List.of(tp2); List<TopicIdPartition> removed2 = new ArrayList<>(); removed2.add(tp0); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, removed2, memberId1, 1, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); Set<TopicIdPartition> expectedTopicIdPartitions2 = new HashSet<>(); expectedTopicIdPartitions2.add(tp1); expectedTopicIdPartitions2.add(tp2); Set<TopicIdPartition> actualTopicIdPartitions2 = new HashSet<>(); ShareSessionContext shareSessionContext = (ShareSessionContext) context2; shareSessionContext.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); actualTopicIdPartitions2.add(topicIdPartition); }); assertEquals(expectedTopicIdPartitions2, actualTopicIdPartitions2); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); respData2.put(tp1, new ShareFetchResponseData.PartitionData().setPartitionIndex(tp1.partition())); respData2.put(tp2, new ShareFetchResponseData.PartitionData().setPartitionIndex(tp2.partition())); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, memberId1, respData2); assertEquals(Errors.NONE, resp2.error()); assertEquals(1, resp2.data().responses().size()); assertEquals(barId, resp2.data().responses().stream().findFirst().get().topicId()); assertEquals(1, resp2.data().responses().stream().findFirst().get().partitions().size()); assertEquals(0, resp2.data().responses().stream().findFirst().get().partitions().get(0).partitionIndex()); assertEquals(1, resp2.responseData(topicNames).size()); } @Test public void testZeroSizeShareSession() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Map<Uuid, String> topicNames = new HashMap<>(); Uuid fooId = Uuid.randomUuid(); topicNames.put(fooId, "foo"); TopicIdPartition foo0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session with foo-0 and foo-1 List<TopicIdPartition> reqData1 = List.of(foo0, foo1); String groupId = "grp"; String memberId1 = Uuid.randomUuid().toString(); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, memberId1, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData1 = new LinkedHashMap<>(); respData1.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); respData1.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); ShareFetchResponse resp1 = context1.updateAndGenerateResponseData(groupId, memberId1, respData1); assertEquals(Errors.NONE, resp1.error()); assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent share request that removes foo-0 and foo-1 // Verify that the previous share session was closed. List<TopicIdPartition> removed2 = new ArrayList<>(); removed2.add(foo0); removed2.add(foo1); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, removed2, memberId1, 1, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, memberId1, respData2); assertTrue(resp2.responseData(topicNames).isEmpty()); assertEquals(1, cache.size()); } @Test public void testToForgetPartitions() { String groupId = "grp"; ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid fooId = Uuid.randomUuid(); Uuid barId = Uuid.randomUuid(); TopicIdPartition foo = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); TopicIdPartition bar = new TopicIdPartition(barId, new TopicPartition("bar", 0)); String memberId1 = Uuid.randomUuid().toString(); List<TopicIdPartition> reqData1 = List.of(foo, bar); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, memberId1, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertPartitionsPresent((ShareSessionContext) context1, List.of(foo, bar)); mockUpdateAndGenerateResponseData(context1, groupId, memberId1); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(foo), memberId1, 1, true, CONNECTION_ID); // So foo is removed but not the others. assertPartitionsPresent((ShareSessionContext) context2, List.of(bar)); mockUpdateAndGenerateResponseData(context2, groupId, memberId1); ShareFetchContext context3 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(bar), memberId1, 2, true, CONNECTION_ID); assertPartitionsPresent((ShareSessionContext) context3, EMPTY_PART_LIST); } // This test simulates a share session where the topic ID changes broker side (the one handling the request) in both the metadata cache and the log // -- as though the topic is deleted and recreated. @Test public void testShareSessionUpdateTopicIdsBrokerSide() { String groupId = "grp"; ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid fooId = Uuid.randomUuid(); Uuid barId = Uuid.randomUuid(); TopicIdPartition foo = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); TopicIdPartition bar = new TopicIdPartition(barId, new TopicPartition("bar", 1)); Map<Uuid, String> topicNames = new HashMap<>(); topicNames.put(fooId, "foo"); topicNames.put(barId, "bar"); // Create a new share session with foo-0 and bar-1 List<TopicIdPartition> reqData1 = List.of(foo, bar); String memberId1 = Uuid.randomUuid().toString(); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, memberId1, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData1 = new LinkedHashMap<>(); respData1.put(bar, new ShareFetchResponseData.PartitionData().setPartitionIndex(bar.partition())); respData1.put(foo, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo.partition()).setErrorCode( Errors.UNKNOWN_TOPIC_OR_PARTITION.code())); ShareFetchResponse resp1 = context1.updateAndGenerateResponseData(groupId, memberId1, respData1); assertEquals(Errors.NONE, resp1.error()); assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent share fetch request as though no topics changed. ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, memberId1, 1, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertTrue(((ShareSessionContext) context2).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); // Likely if the topic ID is different in the broker, it will be different in the log. Simulate the log check finding an inconsistent ID. respData2.put(foo, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo.partition()).setErrorCode( Errors.INCONSISTENT_TOPIC_ID.code())); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, memberId1, respData2); assertEquals(Errors.NONE, resp2.error()); // We should have the inconsistent topic ID error on the partition assertEquals(Errors.INCONSISTENT_TOPIC_ID.code(), resp2.responseData(topicNames).get(foo).errorCode()); } @Test public void testGetErroneousAndValidTopicIdPartitions() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); TopicIdPartition tpNull1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(null, 0)); TopicIdPartition tpNull2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(null, 1)); String groupId = "grp"; // Create a new share session with an initial share fetch request List<TopicIdPartition> reqData2 = List.of(tp0, tp1, tpNull1); String memberId2 = Uuid.randomUuid().toString(); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, memberId2, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); assertErroneousAndValidTopicIdPartitions(context2.getErroneousAndValidTopicIdPartitions(), List.of(tpNull1), List.of(tp0, tp1)); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); respData2.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); respData2.put(tp1, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); respData2.put(tpNull1, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, memberId2, respData2); assertEquals(Errors.NONE, resp2.error()); ShareSessionKey shareSessionKey2 = new ShareSessionKey(groupId, memberId2); // Check for throttled response ShareFetchResponse resp2Throttle = context2.throttleResponse(100); assertEquals(Errors.NONE, resp2Throttle.error()); assertEquals(100, resp2Throttle.throttleTimeMs()); // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, shareSessionKey2.memberId(), 5, true, CONNECTION_ID)); // Test trying to create a new session with a non-existent session key assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, Uuid.randomUuid().toString(), 1, true, CONNECTION_ID)); // Continue the first share session we created. ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, shareSessionKey2.memberId(), 1, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); assertErroneousAndValidTopicIdPartitions(context5.getErroneousAndValidTopicIdPartitions(), List.of(tpNull1), List.of(tp0, tp1)); ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, memberId2, respData2); assertEquals(Errors.NONE, resp5.error()); // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, shareSessionKey2.memberId(), 5, true, CONNECTION_ID)); // Test generating a throttled response for a subsequent share session List<TopicIdPartition> reqData7 = List.of(tpNull2); ShareFetchContext context7 = sharePartitionManager.newContext(groupId, reqData7, EMPTY_PART_LIST, shareSessionKey2.memberId(), 2, true, CONNECTION_ID); // Check for throttled response ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); assertErroneousAndValidTopicIdPartitions(context7.getErroneousAndValidTopicIdPartitions(), List.of(tpNull1, tpNull2), List.of(tp0, tp1)); // Get the final share session. ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, memberId2, ShareRequestMetadata.FINAL_EPOCH, true, CONNECTION_ID); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); assertErroneousAndValidTopicIdPartitions(context8.getErroneousAndValidTopicIdPartitions(), EMPTY_PART_LIST, EMPTY_PART_LIST); // Check for throttled response ShareFetchResponse resp8 = context8.throttleResponse(100); assertEquals(Errors.NONE, resp8.error()); assertEquals(100, resp8.throttleTimeMs()); // Close the session. CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> releaseResponse = sharePartitionManager.releaseSession(groupId, memberId2); assertTrue(releaseResponse.isDone()); assertFalse(releaseResponse.isCompletedExceptionally()); assertEquals(0, cache.size()); } @Test public void testShareFetchContextResponseSize() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Map<Uuid, String> topicNames = new HashMap<>(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); topicNames.put(tpId0, "foo"); topicNames.put(tpId1, "bar"); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(tpId1, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(tpId1, new TopicPartition("bar", 1)); String groupId = "grp"; // Create a new share session with an initial share fetch request List<TopicIdPartition> reqData2 = List.of(tp0, tp1); // For response size expected value calculation ObjectSerializationCache objectSerializationCache = new ObjectSerializationCache(); short version = ApiKeys.SHARE_FETCH.latestVersion(); String memberId2 = Uuid.randomUuid().toString(); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, memberId2, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); respData2.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); respData2.put(tp1, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); int respSize2 = context2.responseSize(respData2, version); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, memberId2, respData2); assertEquals(Errors.NONE, resp2.error()); assertEquals(respData2, resp2.responseData(topicNames)); // We add 4 here in response to 4 being added in sizeOf() method in ShareFetchResponse class. assertEquals(4 + resp2.data().size(objectSerializationCache, version), respSize2); ShareSessionKey shareSessionKey2 = new ShareSessionKey(groupId, memberId2); // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, shareSessionKey2.memberId(), 5, true, CONNECTION_ID)); // Test trying to create a new session with a non-existent session key String memberId4 = Uuid.randomUuid().toString(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, memberId4, 1, true, CONNECTION_ID)); // Continue the first share session we created. List<TopicIdPartition> reqData5 = List.of(tp2); ShareFetchContext context5 = sharePartitionManager.newContext(groupId, reqData5, EMPTY_PART_LIST, shareSessionKey2.memberId(), 1, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData5 = new LinkedHashMap<>(); respData5.put(tp2, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); int respSize5 = context5.responseSize(respData5, version); ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, memberId2, respData5); assertEquals(Errors.NONE, resp5.error()); // We add 4 here in response to 4 being added in sizeOf() method in ShareFetchResponse class. assertEquals(4 + resp5.data().size(objectSerializationCache, version), respSize5); // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, shareSessionKey2.memberId(), 5, true, CONNECTION_ID)); // Test generating a throttled response for a subsequent share session ShareFetchContext context7 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, shareSessionKey2.memberId(), 2, true, CONNECTION_ID); int respSize7 = context7.responseSize(respData2, version); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // We add 4 here in response to 4 being added in sizeOf() method in ShareFetchResponse class. assertEquals(4 + new ShareFetchResponseData().size(objectSerializationCache, version), respSize7); // Get the final share session. ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, memberId2, ShareRequestMetadata.FINAL_EPOCH, true, CONNECTION_ID); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData8 = new LinkedHashMap<>(); respData8.put(tp3, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); int respSize8 = context8.responseSize(respData8, version); ShareFetchResponse resp8 = context8.updateAndGenerateResponseData(groupId, memberId2, respData8); assertEquals(Errors.NONE, resp8.error()); // We add 4 here in response to 4 being added in sizeOf() method in ShareFetchResponse class. assertEquals(4 + resp8.data().size(objectSerializationCache, version), respSize8); } @Test public void testCachedTopicPartitionsWithNoTopicPartitions() { ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); List<TopicIdPartition> result = sharePartitionManager.cachedTopicIdPartitionsInShareSession("grp", Uuid.randomUuid().toString()); assertTrue(result.isEmpty()); } @Test public void testCachedTopicPartitionsForValidShareSessions() { ShareSessionCache cache = new ShareSessionCache(10); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(tpId1, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(tpId1, new TopicPartition("bar", 1)); String groupId = "grp"; String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); when(sp0.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); when(sp1.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); when(sp2.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .withPartitionCache(partitionCache) .build(); // Create a new share session with an initial share fetch request. List<TopicIdPartition> reqData1 = List.of(tp0, tp1); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, memberId1, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareSessionKey shareSessionKey1 = new ShareSessionKey(groupId, memberId1); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData1 = new LinkedHashMap<>(); respData1.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); respData1.put(tp1, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); ShareFetchResponse resp1 = context1.updateAndGenerateResponseData(groupId, memberId1, respData1); assertEquals(Errors.NONE, resp1.error()); assertEquals(Set.of(tp0, tp1), new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Create a new share session with an initial share fetch request. List<TopicIdPartition> reqData2 = List.of(tp2); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, memberId2, ShareRequestMetadata.INITIAL_EPOCH, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); ShareSessionKey shareSessionKey2 = new ShareSessionKey(groupId, memberId2); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); respData2.put(tp2, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, memberId2, respData2); assertEquals(Errors.NONE, resp2.error()); assertEquals(List.of(tp2), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Continue the first share session we created. List<TopicIdPartition> reqData3 = List.of(tp2); ShareFetchContext context3 = sharePartitionManager.newContext(groupId, reqData3, EMPTY_PART_LIST, shareSessionKey1.memberId(), 1, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context3); assertTrue(((ShareSessionContext) context3).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData3 = new LinkedHashMap<>(); respData3.put(tp2, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); ShareFetchResponse resp3 = context3.updateAndGenerateResponseData(groupId, memberId1, respData3); assertEquals(Errors.NONE, resp3.error()); assertEquals(Set.of(tp0, tp1, tp2), new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Continue the second session we created. List<TopicIdPartition> reqData4 = List.of(tp3); ShareFetchContext context4 = sharePartitionManager.newContext(groupId, reqData4, List.of(tp2), shareSessionKey2.memberId(), 1, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context4); assertTrue(((ShareSessionContext) context4).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData4 = new LinkedHashMap<>(); respData4.put(tp3, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); ShareFetchResponse resp4 = context4.updateAndGenerateResponseData(groupId, memberId2, respData4); assertEquals(Errors.NONE, resp4.error()); assertEquals(List.of(tp3), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Get the final share session. ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, memberId1, ShareRequestMetadata.FINAL_EPOCH, true, CONNECTION_ID); assertEquals(FinalContext.class, context5.getClass()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData5 = new LinkedHashMap<>(); ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, memberId1, respData5); assertEquals(Errors.NONE, resp5.error()); assertFalse(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1).isEmpty()); // Close the first session. sharePartitionManager.releaseSession(groupId, memberId1); assertTrue(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1).isEmpty()); // Continue the second share session . ShareFetchContext context6 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(tp3), shareSessionKey2.memberId(), 2, true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context6); assertTrue(((ShareSessionContext) context6).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData6 = new LinkedHashMap<>(); ShareFetchResponse resp6 = context6.updateAndGenerateResponseData(groupId, memberId2, respData6); assertEquals(Errors.NONE, resp6.error()); assertEquals(EMPTY_PART_LIST, sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); } @Test public void testSharePartitionKey() { SharePartitionKey sharePartitionKey1 = new SharePartitionKey("mock-group-1", new TopicIdPartition(new Uuid(0L, 1L), new TopicPartition("test", 0))); SharePartitionKey sharePartitionKey2 = new SharePartitionKey("mock-group-2", new TopicIdPartition(new Uuid(0L, 1L), new TopicPartition("test", 0))); SharePartitionKey sharePartitionKey3 = new SharePartitionKey("mock-group-1", new TopicIdPartition(new Uuid(1L, 1L), new TopicPartition("test-1", 0))); SharePartitionKey sharePartitionKey4 = new SharePartitionKey("mock-group-1", new TopicIdPartition(new Uuid(0L, 1L), new TopicPartition("test", 1))); SharePartitionKey sharePartitionKey5 = new SharePartitionKey("mock-group-1", new TopicIdPartition(new Uuid(0L, 0L), new TopicPartition("test-2", 0))); SharePartitionKey sharePartitionKey1Copy = new SharePartitionKey("mock-group-1", new TopicIdPartition(new Uuid(0L, 1L), new TopicPartition("test", 0))); assertEquals(sharePartitionKey1, sharePartitionKey1Copy); assertNotEquals(sharePartitionKey1, sharePartitionKey2); assertNotEquals(sharePartitionKey1, sharePartitionKey3); assertNotEquals(sharePartitionKey1, sharePartitionKey4); assertNotEquals(sharePartitionKey1, sharePartitionKey5); assertNotNull(sharePartitionKey1); } @Test public void testMultipleSequentialShareFetches() { String groupId = "grp"; Uuid memberId1 = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); Uuid barId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(barId, new TopicPartition("bar", 1)); TopicIdPartition tp4 = new TopicIdPartition(fooId, new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(barId, new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(fooId, new TopicPartition("foo", 3)); List<TopicIdPartition> topicIdPartitions = List.of(tp0, tp1, tp2, tp3, tp4, tp5, tp6); mockFetchOffsetForTimestamp(mockReplicaManager); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp0, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp2, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp3, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp4, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp5, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp6, 1); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); doAnswer(invocation -> buildLogReadResult(topicIdPartitions)).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); CompletableFuture<Map<TopicIdPartition, PartitionData>> future = sharePartitionManager.fetchMessages( groupId, memberId1.toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 1, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); assertTrue(future.isDone()); Mockito.verify(mockReplicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 3, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); assertTrue(future.isDone()); Mockito.verify(mockReplicaManager, times(2)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 10, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); assertTrue(future.isDone()); Mockito.verify(mockReplicaManager, times(3)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); // Should have 6 total fetches, 3 fetches for topic foo (though 4 partitions but 3 fetches) and 3 // fetches for topic bar (though 3 partitions but 3 fetches). validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(6, 0, 0, 0), Map.of("foo", new TopicMetrics(3, 0, 0, 0), "bar", new TopicMetrics(3, 0, 0, 0)) ); } @Test public void testReplicaManagerFetchShouldNotProceed() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); Map<TopicIdPartition, ShareFetchResponseData.PartitionData> result = future.join(); assertEquals(0, result.size()); // Should have 1 fetch recorded and no failed as the fetch did complete without error. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(1, 0, 0, 0), Map.of("foo", new TopicMetrics(1, 0, 0, 0)) ); } @Test public void testReplicaManagerFetchShouldProceed() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0); mockFetchOffsetForTimestamp(mockReplicaManager); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp0, 1); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); doAnswer(invocation -> buildLogReadResult(topicIdPartitions)).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); // Since the nextFetchOffset does not point to endOffset + 1, i.e. some of the records in the cachedState are AVAILABLE, // even though the maxInFlightMessages limit is exceeded, replicaManager.readFromLog should be called Mockito.verify(mockReplicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); // Should have 1 fetch recorded. assertEquals(1, brokerTopicStats.allTopicsStats().totalShareFetchRequestRate().count()); assertEquals(1, brokerTopicStats.numTopics()); assertEquals(1, brokerTopicStats.topicStats(tp0.topic()).totalShareFetchRequestRate().count()); } @Test public void testCloseSharePartitionManager() throws Exception { Timer timer = Mockito.mock(SystemTimerReaper.class); ShareGroupMetrics shareGroupMetrics = Mockito.mock(ShareGroupMetrics.class); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withTimer(timer) .withShareGroupMetrics(shareGroupMetrics) .build(); // Verify that 0 calls are made to timer.close() and shareGroupMetrics.close(). Mockito.verify(timer, times(0)).close(); Mockito.verify(shareGroupMetrics, times(0)).close(); // Closing the sharePartitionManager closes timer object in sharePartitionManager. sharePartitionManager.close(); // Verify that the timer object in sharePartitionManager is closed by checking the calls to timer.close() and shareGroupMetrics.close(). Mockito.verify(timer, times(1)).close(); Mockito.verify(shareGroupMetrics, times(1)).close(); } @Test public void testReleaseSessionSuccess() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 2)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("baz", 4)); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); when(sp1.releaseAcquiredRecords(ArgumentMatchers.eq(memberId))).thenReturn(CompletableFuture.completedFuture(null)); when(sp2.releaseAcquiredRecords(ArgumentMatchers.eq(memberId))).thenReturn(FutureUtils.failedFuture( new InvalidRecordStateException("Unable to release acquired records for the batch") )); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); when(cache.get(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); when(cache.remove(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); ImplicitLinkedHashCollection<CachedSharePartition> partitionMap = new ImplicitLinkedHashCollection<>(3); partitionMap.add(new CachedSharePartition(tp1)); partitionMap.add(new CachedSharePartition(tp2)); partitionMap.add(new CachedSharePartition(tp3)); when(shareSession.partitionMap()).thenReturn(partitionMap); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.releaseSession(groupId, memberId); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(3, result.size()); assertTrue(result.containsKey(tp1)); assertTrue(result.containsKey(tp2)); assertTrue(result.containsKey(tp3)); assertEquals(0, result.get(tp1).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp1).errorCode()); assertEquals(2, result.get(tp2).partitionIndex()); assertEquals(Errors.INVALID_RECORD_STATE.code(), result.get(tp2).errorCode()); assertEquals("Unable to release acquired records for the batch", result.get(tp2).errorMessage()); // tp3 was not a part of partitionCache. assertEquals(4, result.get(tp3).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp3).errorCode()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp3).errorMessage()); // Shouldn't have any metrics for fetch and acknowledge. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(0, 0, 0, 0), Map.of() ); } @Test public void testReleaseSessionWithIncorrectGroupId() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); when(cache.get(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); ImplicitLinkedHashCollection<CachedSharePartition> partitionMap = new ImplicitLinkedHashCollection<>(3); partitionMap.add(new CachedSharePartition(tp1)); when(shareSession.partitionMap()).thenReturn(partitionMap); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); // Calling releaseSession with incorrect groupId. CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.releaseSession("grp-2", memberId); assertTrue(resultFuture.isDone()); assertTrue(resultFuture.isCompletedExceptionally()); Throwable exception = assertThrows(ExecutionException.class, resultFuture::get); assertInstanceOf(ShareSessionNotFoundException.class, exception.getCause()); } @Test public void testReleaseSessionWithIncorrectMemberId() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); // Member with random Uuid so that it does not match the memberId. when(cache.get(new ShareSessionKey(groupId, Uuid.randomUuid().toString()))).thenReturn(shareSession); ImplicitLinkedHashCollection<CachedSharePartition> partitionMap = new ImplicitLinkedHashCollection<>(3); partitionMap.add(new CachedSharePartition(tp1)); when(shareSession.partitionMap()).thenReturn(partitionMap); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.releaseSession(groupId, memberId); assertTrue(resultFuture.isDone()); assertTrue(resultFuture.isCompletedExceptionally()); Throwable exception = assertThrows(ExecutionException.class, resultFuture::get); assertInstanceOf(ShareSessionNotFoundException.class, exception.getCause()); } @Test public void testReleaseSessionWithEmptyTopicPartitions() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); when(cache.get(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); when(cache.remove(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); when(shareSession.partitionMap()).thenReturn(new ImplicitLinkedHashCollection<>()); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); // Empty list of TopicIdPartitions to releaseSession. This should return an empty map. CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.releaseSession(groupId, memberId); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(0, result.size()); } @Test public void testReleaseSessionWithNullShareSession() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); ShareSessionCache cache = mock(ShareSessionCache.class); // Null share session in get response so empty topic partitions should be returned. when(cache.get(new ShareSessionKey(groupId, memberId))).thenReturn(null); // Make the response not null for remove so can further check for the return value from topic partitions. when(cache.remove(new ShareSessionKey(groupId, memberId))).thenReturn(mock(ShareSession.class)); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.releaseSession(groupId, memberId); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(0, result.size()); } @Test public void testAcknowledgeSinglePartition() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp = mock(SharePartition.class); when(sp.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp), sp); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp, List.of( new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) )); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(1, result.size()); assertTrue(result.containsKey(tp)); assertEquals(0, result.get(tp).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp).errorCode()); validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(0, 0, 1, 0), Map.of("foo", new TopicMetrics(0, 0, 1, 0)) ); } @Test public void testAcknowledgeMultiplePartition() throws Exception { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); SharePartition sp3 = mock(SharePartition.class); when(sp1.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); when(sp2.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); when(sp3.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withShareGroupMetrics(shareGroupMetrics) .withBrokerTopicStats(brokerTopicStats) .build(); Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp1, List.of( new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) )); acknowledgeTopics.put(tp2, List.of( new ShareAcknowledgementBatch(15, 26, List.of((byte) 2)), new ShareAcknowledgementBatch(34, 56, List.of((byte) 2)) )); acknowledgeTopics.put(tp3, List.of( new ShareAcknowledgementBatch(4, 15, List.of((byte) 3)), new ShareAcknowledgementBatch(16, 21, List.of((byte) 3)) )); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(3, result.size()); assertTrue(result.containsKey(tp1)); assertTrue(result.containsKey(tp2)); assertTrue(result.containsKey(tp3)); assertEquals(0, result.get(tp1).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp1).errorCode()); assertEquals(0, result.get(tp2).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp2).errorCode()); assertEquals(0, result.get(tp3).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp3).errorCode()); assertEquals(42, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).count()); assertEquals(35, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).count()); assertEquals(18, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).count()); assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).meanRate() > 0); assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).meanRate() > 0); assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).meanRate() > 0); // Should have 3 successful acknowledgement and 1 successful acknowledgement per topic. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(0, 0, 3, 0), Map.of(tp1.topic(), new TopicMetrics(0, 0, 1, 0), tp2.topic(), new TopicMetrics(0, 0, 1, 0), tp3.topic(), new TopicMetrics(0, 0, 1, 0)) ); shareGroupMetrics.close(); } @Test public void testAcknowledgeIndividualOffsets() throws Exception { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); List<ShareAcknowledgementBatch> ack1 = List.of( new ShareAcknowledgementBatch(12, 12, List.of((byte) 1))); List<ShareAcknowledgementBatch> ack2 = List.of( new ShareAcknowledgementBatch(15, 20, List.of((byte) 2, (byte) 3, (byte) 2, (byte) 2, (byte) 3, (byte) 2))); when(sp1.acknowledge(memberId, ack1)).thenReturn(CompletableFuture.completedFuture(null)); when(sp2.acknowledge(memberId, ack2)).thenReturn(CompletableFuture.completedFuture(null)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withShareGroupMetrics(shareGroupMetrics) .withBrokerTopicStats(brokerTopicStats) .build(); Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = Map.of(tp1, ack1, tp2, ack2); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(2, result.size()); assertTrue(result.containsKey(tp1)); assertTrue(result.containsKey(tp2)); assertEquals(0, result.get(tp1).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp1).errorCode()); assertEquals(0, result.get(tp2).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp2).errorCode()); assertEquals(1, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).count()); assertEquals(4, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).count()); assertEquals(2, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).count()); assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).meanRate() > 0); assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).meanRate() > 0); assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).meanRate() > 0); shareGroupMetrics.close(); } @Test public void testAcknowledgeIncorrectGroupId() { String groupId = "grp"; String groupId2 = "grp2"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp = mock(SharePartition.class); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp), sp); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .withShareGroupMetrics(shareGroupMetrics) .build(); Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp, List.of( new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) )); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.acknowledge(memberId, groupId2, acknowledgeTopics); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(1, result.size()); assertTrue(result.containsKey(tp)); assertEquals(0, result.get(tp).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp).errorCode()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp).errorMessage()); // No metric should be recorded as acknowledge failed. assertEquals(0, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).count()); assertEquals(0, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).count()); assertEquals(0, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).count()); // Should have 1 acknowledge recorded and 1 failed. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(0, 0, 1, 1), Map.of(tp.topic(), new TopicMetrics(0, 0, 1, 1)) ); } @Test public void testAcknowledgeIncorrectMemberId() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp = mock(SharePartition.class); when(sp.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(FutureUtils.failedFuture( new InvalidRequestException("Member is not the owner of batch record") )); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp), sp); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp, List.of( new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) )); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(1, result.size()); assertTrue(result.containsKey(tp)); assertEquals(0, result.get(tp).partitionIndex()); assertEquals(Errors.INVALID_REQUEST.code(), result.get(tp).errorCode()); assertEquals("Member is not the owner of batch record", result.get(tp).errorMessage()); // Should have 1 acknowledge recorded and 1 failed. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(0, 0, 1, 1), Map.of(tp.topic(), new TopicMetrics(0, 0, 1, 1)) ); } @Test public void testAcknowledgeEmptyPartitionCacheMap() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo4", 3)); sharePartitionManager = SharePartitionManagerBuilder.builder() .withBrokerTopicStats(brokerTopicStats) .build(); Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp, List.of( new ShareAcknowledgementBatch(78, 90, List.of((byte) 2)), new ShareAcknowledgementBatch(94, 99, List.of((byte) 2)) )); CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertEquals(1, result.size()); assertTrue(result.containsKey(tp)); assertEquals(3, result.get(tp).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp).errorCode()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp).errorMessage()); // Should have 1 acknowledge recorded and 1 failed. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(0, 0, 1, 1), Map.of(tp.topic(), new TopicMetrics(0, 0, 1, 1)) ); } @Test public void testAcknowledgeCompletesDelayedShareFetchRequest() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); // mocked share partitions sp1 and sp2 can be acquired once there is an acknowledgement for it. doAnswer(invocation -> { when(sp1.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp1).acknowledge(ArgumentMatchers.eq(memberId), any()); doAnswer(invocation -> { when(sp2.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp2).acknowledge(ArgumentMatchers.eq(memberId), any()); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), topicIdPartitions, BATCH_OPTIMIZED, BATCH_SIZE, 100, brokerTopicStats); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 2); // Initially you cannot acquire records for both sp1 and sp2. when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); when(sp1.acquire(anyString(), any(ShareAcquireMode.class), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(ShareAcquiredRecords.empty()); when(sp2.acquire(anyString(), any(ShareAcquireMode.class), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(ShareAcquiredRecords.empty()); List<DelayedOperationKey> delayedShareFetchWatchKeys = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); LinkedHashMap<TopicIdPartition, SharePartition> sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp1, sp1); sharePartitions.put(tp2, sp2); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build(); delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchWatchKeys); // Since acquisition lock for sp1 and sp2 cannot be acquired, we should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp1, List.of( new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) )); assertEquals(2, delayedShareFetchPurgatory.watched()); // Acknowledgement request for sp1. sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); // Since sp1 is acknowledged, the delayedShareFetchPurgatory should have 1 watched key corresponding to sp2. assertEquals(1, delayedShareFetchPurgatory.watched()); Mockito.verify(sp1, times(1)).nextFetchOffset(); Mockito.verify(sp2, times(0)).nextFetchOffset(); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); // Should have 1 acknowledge recorded as other topic is acknowledgement request is not sent. assertEquals(1, brokerTopicStats.allTopicsStats().totalShareAcknowledgementRequestRate().count()); assertEquals(1, brokerTopicStats.numTopics()); assertEquals(1, brokerTopicStats.topicStats(tp1.topic()).totalShareAcknowledgementRequestRate().count()); } @Test public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); SharePartition sp3 = mock(SharePartition.class); // mocked share partitions sp1, sp2 and sp3 can be acquired once there is an acknowledgement for it. doAnswer(invocation -> { when(sp1.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp1).acknowledge(ArgumentMatchers.eq(memberId), any()); doAnswer(invocation -> { when(sp2.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp2).acknowledge(ArgumentMatchers.eq(memberId), any()); doAnswer(invocation -> { when(sp3.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp3).acknowledge(ArgumentMatchers.eq(memberId), any()); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), topicIdPartitions, BATCH_OPTIMIZED, BATCH_SIZE, 100, brokerTopicStats); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); // Initially you cannot acquire records for both all 3 share partitions. when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); when(sp3.maybeAcquireFetchLock(any())).thenReturn(true); when(sp3.canAcquireRecords()).thenReturn(false); List<DelayedOperationKey> delayedShareFetchWatchKeys = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); LinkedHashMap<TopicIdPartition, SharePartition> sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp1, sp1); sharePartitions.put(tp2, sp2); sharePartitions.put(tp3, sp3); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) .build(); delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchWatchKeys); // Since acquisition lock for sp1 and sp2 cannot be acquired, we should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp3, List.of( new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) )); // Acknowledgement request for sp3. sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); // Since neither sp1 and sp2 have been acknowledged, the delayedShareFetchPurgatory should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); Mockito.verify(sp1, times(0)).nextFetchOffset(); Mockito.verify(sp2, times(0)).nextFetchOffset(); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); // Should have 1 acknowledge recorded as other 2 topics acknowledgement request is not sent. assertEquals(1, brokerTopicStats.allTopicsStats().totalShareAcknowledgementRequestRate().count()); assertEquals(1, brokerTopicStats.numTopics()); assertEquals(1, brokerTopicStats.topicStats(tp3.topic()).totalShareAcknowledgementRequestRate().count()); } @Test public void testReleaseSessionCompletesDelayedShareFetchRequest() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); SharePartition sp3 = mock(SharePartition.class); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); when(cache.remove(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); // mocked share partitions sp1 and sp2 can be acquired once there is a release acquired records on session close request for it. doAnswer(invocation -> { when(sp1.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp1).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); doAnswer(invocation -> { when(sp2.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp2).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); when(sp3.releaseAcquiredRecords(ArgumentMatchers.eq(memberId))).thenReturn(CompletableFuture.completedFuture(null)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), topicIdPartitions, BATCH_OPTIMIZED, BATCH_SIZE, 100, brokerTopicStats); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 1); // Initially you cannot acquire records for both sp1 and sp2. when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); List<DelayedOperationKey> delayedShareFetchWatchKeys = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = spy(SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withCache(cache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .build()); LinkedHashMap<TopicIdPartition, SharePartition> sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp1, sp1); sharePartitions.put(tp2, sp2); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build(); delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchWatchKeys); // Since acquisition lock for sp1 and sp2 cannot be acquired, we should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); // The share session for this share group member returns tp1 and tp3, tp1 is common in both the delayed fetch request and the share session. when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId)).thenReturn(List.of(tp1, tp3)); doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); when(sp1.acquire(anyString(), any(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(new ShareAcquiredRecords(EMPTY_ACQUIRED_RECORDS, 0)); // Release acquired records on session close request for tp1 and tp3. sharePartitionManager.releaseSession(groupId, memberId); // Since sp1's request to release acquired records on session close is completed, the delayedShareFetchPurgatory // should have 1 watched key corresponding to sp2. assertEquals(1, delayedShareFetchPurgatory.watched()); Mockito.verify(sp1, times(1)).nextFetchOffset(); Mockito.verify(sp2, times(0)).nextFetchOffset(); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); } @Test public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp1, tp2); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); SharePartition sp3 = mock(SharePartition.class); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); when(cache.remove(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); // mocked share partitions sp1, sp2 and sp3 can be acquired once there is a release acquired records on session close for it. doAnswer(invocation -> { when(sp1.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp1).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); doAnswer(invocation -> { when(sp2.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp2).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); doAnswer(invocation -> { when(sp3.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp3).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), new CompletableFuture<>(), topicIdPartitions, BATCH_OPTIMIZED, BATCH_SIZE, 100, brokerTopicStats); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); // Initially you cannot acquire records for both all 3 share partitions. when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); when(sp3.maybeAcquireFetchLock(any())).thenReturn(true); when(sp3.canAcquireRecords()).thenReturn(false); List<DelayedOperationKey> delayedShareFetchWatchKeys = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = spy(SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withCache(cache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .build()); LinkedHashMap<TopicIdPartition, SharePartition> sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp1, sp1); sharePartitions.put(tp2, sp2); sharePartitions.put(tp3, sp3); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) .build(); delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchWatchKeys); // Since acquisition lock for sp1 and sp2 cannot be acquired, we should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); // The share session for this share group member returns tp1 and tp3. No topic partition is common in // both the delayed fetch request and the share session. when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId)).thenReturn(List.of(tp3)); // Release acquired records on session close for sp3. sharePartitionManager.releaseSession(groupId, memberId); // Since neither sp1 and sp2 are a part of the release acquired records request on session close, the // delayedShareFetchPurgatory should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); Mockito.verify(sp1, times(0)).nextFetchOffset(); Mockito.verify(sp2, times(0)).nextFetchOffset(); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); } @Test public void testPendingInitializationShouldCompleteFetchRequest() throws Exception { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Keep the initialization future pending, so fetch request is stuck. CompletableFuture<Void> pendingInitializationFuture = new CompletableFuture<>(); when(sp0.maybeInitialize()).thenReturn(pendingInitializationFuture); when(sp0.loadStartTimeMs()).thenReturn(10L); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); Time time = mock(Time.class); when(time.hiResClockMs()).thenReturn(100L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTime(time) .withShareGroupMetrics(shareGroupMetrics) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); // Verify that the fetch request is completed. TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); assertTrue(future.join().isEmpty()); // Verify that replica manager fetch is not called. Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); assertFalse(pendingInitializationFuture.isDone()); assertEquals(0, shareGroupMetrics.partitionLoadTimeMs().count()); // Complete the pending initialization future. pendingInitializationFuture.complete(null); // Verify the partition load time metrics. assertEquals(1, shareGroupMetrics.partitionLoadTimeMs().count()); assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().min()); assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().max()); assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().sum()); // Should have 1 fetch recorded. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(1, 0, 0, 0), Map.of(tp0.topic(), new TopicMetrics(1, 0, 0, 0)) ); shareGroupMetrics.close(); } @Test public void testPartitionLoadTimeMetricWithMultiplePartitions() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); List<TopicIdPartition> topicIdPartitions = List.of(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); // Keep the initialization future pending, so fetch request is stuck. CompletableFuture<Void> pendingInitializationFuture1 = new CompletableFuture<>(); when(sp0.maybeInitialize()).thenReturn(pendingInitializationFuture1); when(sp0.loadStartTimeMs()).thenReturn(10L); CompletableFuture<Void> pendingInitializationFuture2 = new CompletableFuture<>(); when(sp1.maybeInitialize()).thenReturn(pendingInitializationFuture2); when(sp1.loadStartTimeMs()).thenReturn(40L); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); Time time = mock(Time.class); when(time.hiResClockMs()).thenReturn(100L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTime(time) .withShareGroupMetrics(shareGroupMetrics) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); // Verify that the fetch request is completed. TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); assertFalse(pendingInitializationFuture1.isDone()); assertFalse(pendingInitializationFuture2.isDone()); assertEquals(0, shareGroupMetrics.partitionLoadTimeMs().count()); // Complete the first pending initialization future. pendingInitializationFuture1.complete(null); // Verify the partition load time metrics for first partition. assertEquals(1, shareGroupMetrics.partitionLoadTimeMs().count()); assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().min()); assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().max()); assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().sum()); // Complete the second pending initialization future. pendingInitializationFuture2.complete(null); // Verify the partition load time metrics for both partitions. assertEquals(2, shareGroupMetrics.partitionLoadTimeMs().count()); assertEquals(60.0, shareGroupMetrics.partitionLoadTimeMs().min()); assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().max()); assertEquals(150.0, shareGroupMetrics.partitionLoadTimeMs().sum()); shareGroupMetrics.close(); } @Test public void testDelayedInitializationShouldCompleteFetchRequest() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Keep the 2 initialization futures pending and 1 completed with leader not available exception. CompletableFuture<Void> pendingInitializationFuture1 = new CompletableFuture<>(); CompletableFuture<Void> pendingInitializationFuture2 = new CompletableFuture<>(); when(sp0.maybeInitialize()). thenReturn(pendingInitializationFuture1) .thenReturn(pendingInitializationFuture2) .thenReturn(CompletableFuture.failedFuture(new LeaderNotAvailableException("Leader not available"))); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> shareFetchPurgatorySpy = spy(new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true)); mockReplicaManagerDelayedShareFetch(mockReplicaManager, shareFetchPurgatorySpy); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); // Send 3 requests for share fetch for same share partition. CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future1 = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future2 = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future3 = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); Mockito.verify(sp0, times(3)).maybeInitialize(); Mockito.verify(mockReplicaManager, times(3)).addDelayedShareFetchRequest(any(), any()); Mockito.verify(shareFetchPurgatorySpy, times(3)).tryCompleteElseWatch(any(), any()); Mockito.verify(shareFetchPurgatorySpy, times(0)).checkAndComplete(any()); // All 3 requests should be pending. assertFalse(future1.isDone()); assertFalse(future2.isDone()); assertFalse(future3.isDone()); // Complete one pending initialization future. pendingInitializationFuture1.complete(null); Mockito.verify(mockReplicaManager, times(1)).completeDelayedShareFetchRequest(any()); Mockito.verify(shareFetchPurgatorySpy, times(1)).checkAndComplete(any()); pendingInitializationFuture2.complete(null); Mockito.verify(mockReplicaManager, times(2)).completeDelayedShareFetchRequest(any()); Mockito.verify(shareFetchPurgatorySpy, times(2)).checkAndComplete(any()); // Verify that replica manager fetch is not called. Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); // Should have 3 fetch recorded. assertEquals(3, brokerTopicStats.allTopicsStats().totalShareFetchRequestRate().count()); assertEquals(1, brokerTopicStats.numTopics()); assertEquals(3, brokerTopicStats.topicStats(tp0.topic()).totalShareFetchRequestRate().count()); } @Test public void testSharePartitionInitializationExceptions() throws Exception { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); // Return LeaderNotAvailableException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new LeaderNotAvailableException("Leader not available"))); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); // Exception for client should not occur for LeaderNotAvailableException, this exception is to communicate // between SharePartitionManager and SharePartition to retry the request as SharePartition is not yet ready. assertFalse(future.isCompletedExceptionally()); assertTrue(future.join().isEmpty()); Mockito.verify(sp0, times(0)).markFenced(); // Verify that the share partition is still in the cache on LeaderNotAvailableException. assertEquals(1, partitionCache.size()); // Return IllegalStateException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new IllegalStateException("Illegal state"))); future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Illegal state"); Mockito.verify(sp0, times(1)).markFenced(); assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return CoordinatorNotAvailableException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new CoordinatorNotAvailableException("Coordinator not available"))); future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.COORDINATOR_NOT_AVAILABLE, "Coordinator not available"); Mockito.verify(sp0, times(2)).markFenced(); assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return InvalidRequestException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new InvalidRequestException("Invalid request"))); future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.INVALID_REQUEST, "Invalid request"); Mockito.verify(sp0, times(3)).markFenced(); assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return FencedStateEpochException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new FencedStateEpochException("Fenced state epoch"))); future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced state epoch"); Mockito.verify(sp0, times(4)).markFenced(); assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return NotLeaderOrFollowerException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new NotLeaderOrFollowerException("Not leader or follower"))); future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Not leader or follower"); Mockito.verify(sp0, times(5)).markFenced(); assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return RuntimeException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new RuntimeException("Runtime exception"))); future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Runtime exception"); Mockito.verify(sp0, times(6)).markFenced(); assertTrue(partitionCache.isEmpty()); // Should have 7 fetch recorded and 6 failures as 1 fetch was waiting on initialization and // didn't error out. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(7, 6, 0, 0), Map.of(tp0.topic(), new TopicMetrics(7, 6, 0, 0)) ); } @Test public void testShareFetchProcessingExceptions() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0); SharePartitionCache partitionCache = mock(SharePartitionCache.class); // Throw the exception for first fetch request. Return share partition for next. when(partitionCache.computeIfAbsent(any(), any())) .thenThrow(new RuntimeException("Error creating instance")); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing for delayed share fetch request not finished."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Error creating instance"); // Should have 1 fetch recorded and 1 failure. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(1, 1, 0, 0), Map.of(tp0.topic(), new TopicMetrics(1, 1, 0, 0)) ); } @Test public void testSharePartitionInitializationFailure() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0); // Send map to check no share partition is created. SharePartitionCache partitionCache = new SharePartitionCache(); // Validate when partition is not the leader. Partition partition = mock(Partition.class); when(partition.isLeader()).thenReturn(false); ReplicaManager replicaManager = mock(ReplicaManager.class); // First check should throw KafkaStorageException, second check should return partition which // is not leader. when(replicaManager.getPartitionOrException(any(TopicPartition.class))) .thenThrow(new KafkaStorageException("Exception")) .thenReturn(partition); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(replicaManager) .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); // Validate when exception is thrown. CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing for delayed share fetch request not finished."); validateShareFetchFutureException(future, tp0, Errors.KAFKA_STORAGE_ERROR, "Exception"); assertTrue(partitionCache.isEmpty()); // Validate when partition is not leader. future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing for delayed share fetch request not finished."); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER); assertTrue(partitionCache.isEmpty()); // Should have 2 fetch recorded and 2 failure. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(2, 2, 0, 0), Map.of(tp0.topic(), new TopicMetrics(2, 2, 0, 0)) ); } @Test public void testSharePartitionPartialInitializationFailure() throws Exception { String groupId = "grp"; Uuid memberId1 = Uuid.randomUuid(); // For tp0, share partition instantiation will fail. TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); // For tp1, share fetch should succeed. TopicIdPartition tp1 = new TopicIdPartition(memberId1, new TopicPartition("foo", 1)); // For tp2, share partition initialization will fail. TopicIdPartition tp2 = new TopicIdPartition(memberId1, new TopicPartition("foo", 2)); List<TopicIdPartition> topicIdPartitions = List.of(tp0, tp1, tp2); // Mark partition0 as not the leader. Partition partition0 = mock(Partition.class); when(partition0.isLeader()).thenReturn(false); ReplicaManager replicaManager = mock(ReplicaManager.class); when(replicaManager.getPartitionOrException(any(TopicPartition.class))) .thenReturn(partition0); // Mock share partition for tp1, so it can succeed. SharePartition sp1 = mock(SharePartition.class); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); when(sp1.acquire(anyString(), any(ShareAcquireMode.class), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(new ShareAcquiredRecords(EMPTY_ACQUIRED_RECORDS, 0)); // Fail initialization for tp2. SharePartition sp2 = mock(SharePartition.class); partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); when(sp2.maybeInitialize()).thenReturn(CompletableFuture.failedFuture(new FencedStateEpochException("Fenced state epoch"))); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, replicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(replicaManager, delayedShareFetchPurgatory); when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(replicaManager) .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .withTimer(mockTimer) .build(); // Validate when exception is thrown. CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); assertTrue(future.isDone()); assertFalse(future.isCompletedExceptionally()); Map<TopicIdPartition, PartitionData> partitionDataMap = future.get(); assertEquals(3, partitionDataMap.size()); assertTrue(partitionDataMap.containsKey(tp0)); assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code(), partitionDataMap.get(tp0).errorCode()); assertTrue(partitionDataMap.containsKey(tp1)); assertEquals(Errors.NONE.code(), partitionDataMap.get(tp1).errorCode()); assertTrue(partitionDataMap.containsKey(tp2)); assertEquals(Errors.FENCED_STATE_EPOCH.code(), partitionDataMap.get(tp2).errorCode()); assertEquals("Fenced state epoch", partitionDataMap.get(tp2).errorMessage()); Mockito.verify(replicaManager, times(1)).completeDelayedShareFetchRequest( new DelayedShareFetchGroupKey(groupId, tp2)); Mockito.verify(replicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); // Should have 1 fetch recorded and 1 failure as single topic has multiple partition fetch // and failure. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(1, 1, 0, 0), Map.of(tp0.topic(), new TopicMetrics(1, 1, 0, 0)) ); } @Test public void testReplicaManagerFetchException() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); doThrow(new RuntimeException("Exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Exception"); // Verify that the share partition is still in the cache on exception. assertEquals(1, partitionCache.size()); // Throw NotLeaderOrFollowerException from replica manager fetch which should evict instance from the cache. doThrow(new NotLeaderOrFollowerException("Leader exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Leader exception"); assertTrue(partitionCache.isEmpty()); // Should have 2 fetch recorded and 2 failures. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(2, 2, 0, 0), Map.of(tp0.topic(), new TopicMetrics(2, 2, 0, 0)) ); } @Test public void testReplicaManagerFetchMultipleSharePartitionsException() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); SharePartition sp1 = mock(SharePartition.class); // Do not make the share partition acquirable hence it shouldn't be removed from the cache, // as it won't be part of replica manager readFromLog request. when(sp1.maybeAcquireFetchLock(any())).thenReturn(false); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); // Throw FencedStateEpochException from replica manager fetch which should evict instance from the cache. doThrow(new FencedStateEpochException("Fenced exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced exception"); // Verify that tp1 is still in the cache on exception. assertEquals(1, partitionCache.size()); assertEquals(sp1, partitionCache.get(new SharePartitionKey(groupId, tp1))); // Make sp1 acquirable and add sp0 back in partition cache. Both share partitions should be // removed from the cache. when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Throw FencedStateEpochException from replica manager fetch which should evict instance from the cache. doThrow(new FencedStateEpochException("Fenced exception again")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, List.of(tp0, tp1), Errors.FENCED_STATE_EPOCH, "Fenced exception again"); assertTrue(partitionCache.isEmpty()); // Should have 4 fetch recorded (2 fetch and 2 topics) and 3 failures as sp1 was not acquired // in first fetch and shall have empty response. Similarly, tp0 should record 2 failures and // tp1 should record 1 failure. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(4, 3, 0, 0), Map.of(tp0.topic(), new TopicMetrics(2, 2, 0, 0), tp1.topic(), new TopicMetrics(2, 1, 0, 0)) ); } @Test public void testListenerRegistration() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); List<TopicIdPartition> topicIdPartitions = List.of(tp0, tp1); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); when(mockReplicaManager.getPartitionOrException((TopicPartition) Mockito.any())).thenReturn(partition); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(mockReplicaManager) .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture<Map<TopicIdPartition, PartitionData>> future = sharePartitionManager.fetchMessages( groupId, memberId, FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); assertTrue(future.isDone()); // Validate that the listener is registered. verify(mockReplicaManager, times(2)).maybeAddListener(any(), any()); // The share partition initialization should error out as further mocks are not provided, the // metrics should mark fetch as failed. validateBrokerTopicStatsMetrics( brokerTopicStats, new TopicMetrics(2, 2, 0, 0), Map.of(tp0.topic(), new TopicMetrics(1, 1, 0, 0), tp1.topic(), new TopicMetrics(1, 1, 0, 0)) ); } @Test public void testSharePartitionListenerOnFailed() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); SharePartitionCache partitionCache = new SharePartitionCache(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onFailed); } @Test public void testSharePartitionListenerOnDeleted() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); SharePartitionCache partitionCache = new SharePartitionCache(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onDeleted); } @Test public void testSharePartitionListenerOnBecomingFollower() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); SharePartitionCache partitionCache = new SharePartitionCache(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onBecomingFollower); } @Test public void testFetchMessagesRotatePartitions() { String groupId = "grp"; Uuid memberId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 1)); TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); List<TopicIdPartition> topicIdPartitions = List.of(tp0, tp1, tp2, tp3, tp4, tp5, tp6); sharePartitionManager = Mockito.spy(SharePartitionManagerBuilder.builder().withBrokerTopicStats(brokerTopicStats).build()); // Capture the arguments passed to processShareFetch. ArgumentCaptor<ShareFetch> captor = ArgumentCaptor.forClass(ShareFetch.class); sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); verify(sharePartitionManager, times(1)).processShareFetch(captor.capture()); // Verify the partitions rotation, no rotation. ShareFetch resultShareFetch = captor.getValue(); validateRotatedListEquals(resultShareFetch.topicIdPartitions(), topicIdPartitions, 0); // Single rotation. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 1, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); verify(sharePartitionManager, times(2)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 1. resultShareFetch = captor.getValue(); validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); // Rotation by 3, less that the number of partitions. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 3, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); verify(sharePartitionManager, times(3)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 3. resultShareFetch = captor.getValue(); validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 3); // Rotation by 12, more than the number of partitions. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_OPTIMIZED, 12, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); verify(sharePartitionManager, times(4)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 5 (12 % 7). resultShareFetch = captor.getValue(); validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 5); // Rotation by Integer.MAX_VALUE, boundary test. sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_OPTIMIZED, Integer.MAX_VALUE, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); verify(sharePartitionManager, times(5)).processShareFetch(captor.capture()); // Verify the partitions rotation, rotate by 1 (2147483647 % 7). resultShareFetch = captor.getValue(); validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); } @Test public void testCreateIdleShareFetchTask() throws Exception { ReplicaManager replicaManager = mock(ReplicaManager.class); MockTimer mockTimer = new MockTimer(time); long maxWaitMs = 1000L; // Set up the mock to capture and add the timer task Mockito.doAnswer(invocation -> { TimerTask timerTask = invocation.getArgument(0); mockTimer.add(timerTask); return null; }).when(replicaManager).addShareFetchTimerRequest(Mockito.any(TimerTask.class)); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(replicaManager) .withTime(time) .withTimer(mockTimer) .build(); CompletableFuture<Void> future = sharePartitionManager.createIdleShareFetchTimerTask(maxWaitMs); // Future should not be completed immediately assertFalse(future.isDone()); mockTimer.advanceClock(maxWaitMs / 2); assertFalse(future.isDone()); mockTimer.advanceClock((maxWaitMs / 2) + 1); // Verify the future is completed after the wait time assertTrue(future.isDone()); assertFalse(future.isCompletedExceptionally()); } @Test public void testOnShareVersionToggle() { String groupId = "grp"; SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); SharePartition sp3 = mock(SharePartition.class); // Mock the share partitions corresponding to the topic partitions. SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put( new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0))), sp0 ); partitionCache.put( new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0))), sp1 ); partitionCache.put( new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0))), sp2 ); partitionCache.put( new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo4", 0))), sp3 ); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .build(); assertEquals(4, partitionCache.size()); sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, false); // Because we are toggling to a share version which does not support share groups, the cache inside share partitions must be cleared. assertEquals(0, partitionCache.size()); //Check if all share partitions have been fenced. Mockito.verify(sp0).markFenced(); Mockito.verify(sp1).markFenced(); Mockito.verify(sp2).markFenced(); Mockito.verify(sp3).markFenced(); } @Test public void testOnShareVersionToggleWhenEnabledFromConfig() { SharePartition sp0 = mock(SharePartition.class); // Mock the share partitions corresponding to the topic partitions. SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.put( new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))), sp0 ); sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCache(partitionCache) .build(); assertEquals(1, partitionCache.size()); sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, true); // Though share version is toggled to off, but it's enabled from config, hence the cache should not be cleared. assertEquals(1, partitionCache.size()); Mockito.verify(sp0, times(0)).markFenced(); } @Test public void testShareGroupListener() { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); ShareSessionCache cache = new ShareSessionCache(10); cache.maybeCreateSession(groupId, memberId1, new ImplicitLinkedHashCollection<>(), CONNECTION_ID); cache.maybeCreateSession(groupId, memberId2, new ImplicitLinkedHashCollection<>(), "id-2"); SharePartitionCache partitionCache = new SharePartitionCache(); partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp0), k -> sp0); partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp1), k -> sp1); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .build(); assertEquals(2, cache.size()); assertEquals(2, partitionCache.size()); // Invoke listeners by simulating connection disconnect for memberId1. cache.connectionDisconnectListener().onDisconnect(CONNECTION_ID); // Session cache should remove the memberId1. assertEquals(1, cache.size()); // Partition cache should not remove the share partitions as the group is not empty. assertEquals(2, partitionCache.size()); assertNotNull(cache.get(new ShareSessionKey(groupId, memberId2))); // Invoke listeners by simulating connection disconnect for memberId2. cache.connectionDisconnectListener().onDisconnect("id-2"); // Session cache should remove the memberId2. assertEquals(0, cache.size()); // Partition cache should remove the share partitions as the group is empty. assertEquals(0, partitionCache.size()); Mockito.verify(sp0, times(1)).markFenced(); Mockito.verify(sp1, times(1)).markFenced(); Mockito.verify(mockReplicaManager, times(2)).removeListener(any(), any()); } @Test public void testShareGroupListenerWithEmptyCache() { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); String memberId1 = Uuid.randomUuid().toString(); SharePartition sp0 = mock(SharePartition.class); ShareSessionCache cache = new ShareSessionCache(10); cache.maybeCreateSession(groupId, memberId1, new ImplicitLinkedHashCollection<>(), CONNECTION_ID); SharePartitionCache partitionCache = spy(new SharePartitionCache()); partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp0), k -> sp0); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .build(); assertEquals(1, cache.size()); assertEquals(1, partitionCache.size()); // Clean up share session and partition cache. sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, false); assertEquals(0, cache.size()); assertEquals(0, partitionCache.size()); Mockito.verify(sp0, times(1)).markFenced(); Mockito.verify(mockReplicaManager, times(1)).removeListener(any(), any()); Mockito.verify(partitionCache, times(0)).topicIdPartitionsForGroup(groupId); // Invoke listeners by simulating connection disconnect for member. As the group is empty, // hence onGroupEmpty method should be invoked and should complete without any exception. cache.connectionDisconnectListener().onDisconnect(CONNECTION_ID); // Verify that the listener is called for the group. Mockito.verify(partitionCache, times(1)).topicIdPartitionsForGroup(groupId); } private Timer systemTimerReaper() { return new SystemTimerReaper( TIMER_NAME_PREFIX + "-test-reaper", new SystemTimer(TIMER_NAME_PREFIX + "-test-timer")); } private void assertNoReaperThreadsPendingClose() throws InterruptedException { TestUtils.waitForCondition( () -> Thread.getAllStackTraces().keySet().stream().noneMatch(t -> t.getName().contains(TIMER_NAME_PREFIX)), "Found unexpected reaper threads with name containing: " + TIMER_NAME_PREFIX); } private void testSharePartitionListener( SharePartitionKey sharePartitionKey, SharePartitionCache partitionCache, ReplicaManager mockReplicaManager, Consumer<TopicPartition> listenerConsumer ) { // Add another share partition to the cache. TopicPartition tp = new TopicPartition("foo", 1); TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); SharePartitionKey spk = new SharePartitionKey("grp", tpId); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); partitionCache.put(sharePartitionKey, sp0); partitionCache.put(spk, sp1); // Invoke listener for first share partition. listenerConsumer.accept(sharePartitionKey.topicIdPartition().topicPartition()); // Validate that the share partition is removed from the cache. assertEquals(1, partitionCache.size()); assertFalse(partitionCache.containsKey(sharePartitionKey)); verify(sp0, times(1)).markFenced(); verify(mockReplicaManager, times(1)).removeListener(any(), any()); // Invoke listener for non-matching share partition. listenerConsumer.accept(tp); // The non-matching share partition should not be removed as the listener is attached to a different topic partition. assertEquals(1, partitionCache.size()); verify(sp1, times(0)).markFenced(); // Verify the remove listener is not called for the second share partition. verify(mockReplicaManager, times(1)).removeListener(any(), any()); } private ShareFetchResponseData.PartitionData noErrorShareFetchResponse() { return new ShareFetchResponseData.PartitionData().setPartitionIndex(0); } private ShareFetchResponseData.PartitionData errorShareFetchResponse(Short errorCode) { return new ShareFetchResponseData.PartitionData().setPartitionIndex(0).setErrorCode(errorCode); } private void mockUpdateAndGenerateResponseData(ShareFetchContext context, String groupId, String memberId) { LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> data = new LinkedHashMap<>(); if (context.getClass() == ShareSessionContext.class) { ShareSessionContext shareSessionContext = (ShareSessionContext) context; if (!shareSessionContext.isSubsequent()) { shareSessionContext.shareFetchData().forEach(topicIdPartition -> data.put(topicIdPartition, topicIdPartition.topic() == null ? errorShareFetchResponse(Errors.UNKNOWN_TOPIC_ID.code()) : noErrorShareFetchResponse())); } else { synchronized (shareSessionContext.session()) { shareSessionContext.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); data.put(topicIdPartition, topicIdPartition.topic() == null ? errorShareFetchResponse(Errors.UNKNOWN_TOPIC_ID.code()) : noErrorShareFetchResponse()); }); } } } context.updateAndGenerateResponseData(groupId, memberId, data); } private void assertPartitionsPresent(ShareSessionContext context, List<TopicIdPartition> partitions) { Set<TopicIdPartition> partitionsInContext = new HashSet<>(); if (!context.isSubsequent()) { partitionsInContext.addAll(context.shareFetchData()); } else { context.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); partitionsInContext.add(topicIdPartition); }); } Set<TopicIdPartition> partitionsSet = new HashSet<>(partitions); assertEquals(partitionsSet, partitionsInContext); } private void assertErroneousAndValidTopicIdPartitions( ErroneousAndValidPartitionData erroneousAndValidPartitionData, List<TopicIdPartition> expectedErroneous, List<TopicIdPartition> expectedValid) { Set<TopicIdPartition> expectedErroneousSet = new HashSet<>(expectedErroneous); Set<TopicIdPartition> expectedValidSet = new HashSet<>(expectedValid); Set<TopicIdPartition> actualErroneousPartitions = new HashSet<>(); erroneousAndValidPartitionData.erroneous().forEach((topicIdPartition, partitionData) -> actualErroneousPartitions.add(topicIdPartition)); Set<TopicIdPartition> actualValidPartitions = new HashSet<>(erroneousAndValidPartitionData.validTopicIdPartitions()); assertEquals(expectedErroneousSet, actualErroneousPartitions); assertEquals(expectedValidSet, actualValidPartitions); } private Partition mockPartition() { Partition partition = mock(Partition.class); when(partition.isLeader()).thenReturn(true); when(partition.getLeaderEpoch()).thenReturn(1); return partition; } private void validateShareFetchFutureException(CompletableFuture<Map<TopicIdPartition, PartitionData>> future, TopicIdPartition topicIdPartition, Errors error) { validateShareFetchFutureException(future, List.of(topicIdPartition), error, null); } private void validateShareFetchFutureException(CompletableFuture<Map<TopicIdPartition, PartitionData>> future, TopicIdPartition topicIdPartition, Errors error, String message) { validateShareFetchFutureException(future, List.of(topicIdPartition), error, message); } private void validateShareFetchFutureException(CompletableFuture<Map<TopicIdPartition, PartitionData>> future, List<TopicIdPartition> topicIdPartitions, Errors error, String message) { assertFalse(future.isCompletedExceptionally()); Map<TopicIdPartition, ShareFetchResponseData.PartitionData> result = future.join(); assertEquals(topicIdPartitions.size(), result.size()); topicIdPartitions.forEach(topicIdPartition -> { assertTrue(result.containsKey(topicIdPartition)); assertEquals(topicIdPartition.partition(), result.get(topicIdPartition).partitionIndex()); assertEquals(error.code(), result.get(topicIdPartition).errorCode()); assertEquals(message, result.get(topicIdPartition).errorMessage()); }); } private void mockFetchOffsetForTimestamp(ReplicaManager replicaManager) { FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty()); Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())). when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); } private void validateBrokerTopicStatsMetrics( BrokerTopicStats brokerTopicStats, TopicMetrics expectedAllTopicMetrics, Map<String, TopicMetrics> expectedTopicMetrics ) { if (expectedAllTopicMetrics != null) { assertEquals(expectedAllTopicMetrics.totalShareFetchRequestCount, brokerTopicStats.allTopicsStats().totalShareFetchRequestRate().count()); assertEquals(expectedAllTopicMetrics.failedShareFetchRequestCount, brokerTopicStats.allTopicsStats().failedShareFetchRequestRate().count()); assertEquals(expectedAllTopicMetrics.totalShareAcknowledgementRequestCount, brokerTopicStats.allTopicsStats().totalShareAcknowledgementRequestRate().count()); assertEquals(expectedAllTopicMetrics.failedShareAcknowledgementRequestCount, brokerTopicStats.allTopicsStats().failedShareAcknowledgementRequestRate().count()); } // Validate tracked topic metrics. assertEquals(expectedTopicMetrics.size(), brokerTopicStats.numTopics()); expectedTopicMetrics.forEach((topic, metrics) -> { BrokerTopicMetrics topicMetrics = brokerTopicStats.topicStats(topic); assertEquals(metrics.totalShareFetchRequestCount, topicMetrics.totalShareFetchRequestRate().count()); assertEquals(metrics.failedShareFetchRequestCount, topicMetrics.failedShareFetchRequestRate().count()); assertEquals(metrics.totalShareAcknowledgementRequestCount, topicMetrics.totalShareAcknowledgementRequestRate().count()); assertEquals(metrics.failedShareAcknowledgementRequestCount, topicMetrics.failedShareAcknowledgementRequestRate().count()); }); } static Seq<Tuple2<TopicIdPartition, LogReadResult>> buildLogReadResult(List<TopicIdPartition> topicIdPartitions) { List<Tuple2<TopicIdPartition, LogReadResult>> logReadResults = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> logReadResults.add(new Tuple2<>(topicIdPartition, new LogReadResult( new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), MemoryRecords.withRecords( Compression.NONE, new SimpleRecord("test-key".getBytes(), "test-value".getBytes()))), Optional.empty(), -1L, -1L, -1L, -1L, -1L, OptionalLong.empty(), OptionalInt.empty(), Errors.NONE )))); return CollectionConverters.asScala(logReadResults).toSeq(); } @SuppressWarnings("unchecked") static void mockReplicaManagerDelayedShareFetch(ReplicaManager replicaManager, DelayedOperationPurgatory<DelayedShareFetch> delayedShareFetchPurgatory) { doAnswer(invocationOnMock -> { Object[] args = invocationOnMock.getArguments(); DelayedShareFetchKey key = (DelayedShareFetchKey) args[0]; delayedShareFetchPurgatory.checkAndComplete(key); return null; }).when(replicaManager).completeDelayedShareFetchRequest(any(DelayedShareFetchKey.class)); doAnswer(invocationOnMock -> { Object[] args = invocationOnMock.getArguments(); DelayedShareFetch operation = (DelayedShareFetch) args[0]; List<DelayedOperationKey> keys = (List<DelayedOperationKey>) args[1]; delayedShareFetchPurgatory.tryCompleteElseWatch(operation, keys); return null; }).when(replicaManager).addDelayedShareFetchRequest(any(), any()); } private record TopicMetrics( long totalShareFetchRequestCount, long failedShareFetchRequestCount, long totalShareAcknowledgementRequestCount, long failedShareAcknowledgementRequestCount ) { } static
SharePartitionManagerTest
java
apache__spark
examples/src/main/java/org/apache/spark/examples/ml/JavaMaxAbsScalerExample.java
{ "start": 1456, "end": 2645 }
class ____ { public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaMaxAbsScalerExample") .getOrCreate(); // $example on$ List<Row> data = Arrays.asList( RowFactory.create(0, Vectors.dense(1.0, 0.1, -8.0)), RowFactory.create(1, Vectors.dense(2.0, 1.0, -4.0)), RowFactory.create(2, Vectors.dense(4.0, 10.0, 8.0)) ); StructType schema = new StructType(new StructField[]{ new StructField("id", DataTypes.IntegerType, false, Metadata.empty()), new StructField("features", new VectorUDT(), false, Metadata.empty()) }); Dataset<Row> dataFrame = spark.createDataFrame(data, schema); MaxAbsScaler scaler = new MaxAbsScaler() .setInputCol("features") .setOutputCol("scaledFeatures"); // Compute summary statistics and generate MaxAbsScalerModel MaxAbsScalerModel scalerModel = scaler.fit(dataFrame); // rescale each feature to range [-1, 1]. Dataset<Row> scaledData = scalerModel.transform(dataFrame); scaledData.select("features", "scaledFeatures").show(); // $example off$ spark.stop(); } }
JavaMaxAbsScalerExample
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/NonCanonicalTypeTest.java
{ "start": 5673, "end": 5968 }
class ____ { void test() { var c = boolean.class; } } """) .doTest(); } @Test public void method_noFinding() { compilationHelper .addSourceLines( "Super.java", """
Test
java
apache__hadoop
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/enums/FileType.java
{ "start": 1041, "end": 1166 }
enum ____ { /** * Parquet file. */ PARQUET, /** * Non-parquet file. */ NON_PARQUET }
FileType
java
apache__camel
components/camel-jt400/src/test/java/org/apache/camel/component/jt400/Jt400TestSupport.java
{ "start": 1050, "end": 1228 }
class ____ JT400 component unit tests. It creates a mock connection pool, registers it under the ID * {@code "mockPool"} and releases it after the test runs. */ public abstract
for
java
FasterXML__jackson-databind
src/test/java/tools/jackson/databind/deser/creators/ValueInstantiatorTest.java
{ "start": 3749, "end": 4595 }
class ____ extends InstantiatorBase { @Override public String getValueTypeDesc() { return MyMap.class.getName(); } @Override public boolean canCreateFromObjectWith() { return true; } @Override public CreatorProperty[] getFromObjectArguments(DeserializationConfig config) { return new CreatorProperty[] { CreatorProperty.construct(new PropertyName("name"), config.constructType(String.class), null, null, null, null, 0, null, PropertyMetadata.STD_REQUIRED) }; } @Override public Object createFromObjectWith(DeserializationContext ctxt, Object[] args) { return new MyMap((String) args[0]); } } static
CreatorMapInstantiator
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/processor/WireTapIgnoreInvalidEndpointTest.java
{ "start": 1003, "end": 1632 }
class ____ extends ContextTestSupport { @Test public void testIgnoreInvalid() throws Exception { getMockEndpoint("mock:result").expectedMessageCount(1); template.sendBody("direct:start", "Hello World"); assertMockEndpointsSatisfied(); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { from("direct:start") .wireTap("xxx:invalid").ignoreInvalidEndpoint() .to("mock:result"); } }; } }
WireTapIgnoreInvalidEndpointTest
java
spring-projects__spring-framework
spring-webflux/src/main/java/org/springframework/web/reactive/result/view/script/RenderingContext.java
{ "start": 1052, "end": 2421 }
class ____ { private final ApplicationContext applicationContext; private final Locale locale; private final Function<String, String> templateLoader; private final String url; /** * Create a new {@code RenderingContext}. * * @param applicationContext the application context * @param locale the locale of the rendered template * @param templateLoader a function that takes a template path as input and returns * the template content as a String * @param url the URL of the rendered template */ public RenderingContext(ApplicationContext applicationContext, Locale locale, Function<String, String> templateLoader, String url) { this.applicationContext = applicationContext; this.locale = locale; this.templateLoader = templateLoader; this.url = url; } /** * Return the application context. */ public ApplicationContext getApplicationContext() { return this.applicationContext; } /** * Return the locale of the rendered template. */ public Locale getLocale() { return this.locale; } /** * Return a function that takes a template path as input and returns the template * content as a String. */ public Function<String, String> getTemplateLoader() { return this.templateLoader; } /** * Return the URL of the rendered template. */ public String getUrl() { return this.url; } }
RenderingContext
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/LongLiteralLowerCaseSuffixTest.java
{ "start": 3792, "end": 4632 }
class ____ { // This constant string includes non-ASCII characters to make sure that we're not confusing // bytes and chars: @SuppressWarnings("unused") private static final String TEST_STRING = "Îñţérñåţîöñåļîžåţîờñ"; public void underscoredLowerCase() { // BUG: Diagnostic contains: value = 0_1__2L long value = 0_1__2l; } }\ """) .doTest(); } @Test public void negativeCase() { compilationHelper .addSourceLines( "LongLiteralLowerCaseSuffixNegativeCases.java", """ package com.google.errorprone.bugpatterns.testdata; /** * Negative cases for {@link LongLiteralLowerCaseSuffix} * * @author Simon Nickerson (sjnickerson@google.com) */ public
LongLiteralLowerCaseSuffixPositiveCase2
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/api/chararray/CharArrayAssert_doesNotHaveDuplicates_Test.java
{ "start": 908, "end": 1261 }
class ____ extends CharArrayAssertBaseTest { @Override protected CharArrayAssert invoke_api_method() { return assertions.doesNotHaveDuplicates(); } @Override protected void verify_internal_effects() { verify(arrays).assertDoesNotHaveDuplicates(getInfo(assertions), getActual(assertions)); } }
CharArrayAssert_doesNotHaveDuplicates_Test
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/metamodel/attributeInSuper/EmbeddableInSuperClassTest.java
{ "start": 706, "end": 1372 }
class ____ { @Test @JiraKey(value = "HHH-6475") public void ensureAttributeForEmbeddableIsGeneratedInMappedSuperClass(EntityManagerFactoryScope scope) { EmbeddableType<EmbeddableEntity> embeddableType = scope.getEntityManagerFactory().getMetamodel().embeddable( EmbeddableEntity.class ); Attribute<?, ?> attribute = embeddableType.getAttribute( "foo" ); assertNotNull( attribute ); ManagedType<AbstractEntity> managedType = scope.getEntityManagerFactory().getMetamodel().managedType( AbstractEntity.class ); assertNotNull( managedType ); attribute = managedType.getAttribute( "embedded" ); assertNotNull( attribute ); } }
EmbeddableInSuperClassTest
java
apache__maven
impl/maven-impl/src/main/java/org/apache/maven/impl/DefaultChecksumAlgorithmService.java
{ "start": 5907, "end": 7053 }
class ____ implements ChecksumAlgorithm { private final ChecksumAlgorithmFactory factory; DefaultChecksumAlgorithm(ChecksumAlgorithmFactory factory) { this.factory = factory; } @Override public String getName() { return factory.getName(); } @Override public String getFileExtension() { return factory.getFileExtension(); } @Override public ChecksumCalculator getCalculator() { return new DefaultChecksumCalculator(factory.getAlgorithm()); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } DefaultChecksumAlgorithm that = (DefaultChecksumAlgorithm) o; return Objects.equals(factory.getName(), that.factory.getName()); } @Override public int hashCode() { return Objects.hash(factory.getName()); } } private static
DefaultChecksumAlgorithm
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/time/PeriodFromTest.java
{ "start": 1042, "end": 2575 }
class ____ { private final CompilationTestHelper helper = CompilationTestHelper.newInstance(PeriodFrom.class, getClass()); @SuppressWarnings("PeriodFrom") @Test public void failures() { assertThrows(DateTimeException.class, () -> Period.from(Duration.ZERO)); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofNanos(1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofNanos(-1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofMillis(1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofMillis(-1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofSeconds(1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofSeconds(-1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofMinutes(1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofMinutes(-1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofDays(1))); assertThrows(DateTimeException.class, () -> Period.from(Duration.ofDays(-1))); TemporalAmount temporalAmount = Duration.ofDays(3); assertThrows(DateTimeException.class, () -> Period.from(temporalAmount)); } @Test public void periodFrom() { helper .addSourceLines( "TestClass.java", "import java.time.Duration;", "import java.time.Period;", "import java.time.temporal.TemporalAmount;", "public
PeriodFromTest
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java
{ "start": 21931, "end": 22159 }
interface ____ {} """) .addSourceLines( "threadsafety/Test.java", """ package threadsafety; import com.google.errorprone.annotations.ThreadSafe;
Super
java
google__guice
core/test/com/google/inject/spi/ProviderMethodsTest.java
{ "start": 17765, "end": 18319 }
class ____ cause errors assertContains( expected.getMessage(), "Long was bound multiple times.", "Integer was bound multiple times."); } } @Test public void testProvidesMethodsDefinedInSuperClass() { Injector injector = Guice.createInjector(new Sub1Module()); assertEquals(42, injector.getInstance(Integer.class).intValue()); assertEquals(42L, injector.getInstance(Long.class).longValue()); assertEquals(42D, injector.getInstance(Double.class).doubleValue(), 0.0); } private static
bindings
java
apache__hadoop
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractDelete.java
{ "start": 1044, "end": 1243 }
class ____ extends AbstractContractDeleteTest { @Override protected AbstractFSContract createContract(Configuration conf) { return new RawlocalFSContract(conf); } }
TestRawlocalContractDelete
java
quarkusio__quarkus
extensions/panache/hibernate-reactive-panache/deployment/src/test/java/io/quarkus/hibernate/reactive/panache/test/config/ConfigEnabledFalseTest.java
{ "start": 393, "end": 1209 }
class ____ { @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .withApplicationRoot(jar -> jar.addClass(MyEntity.class)) .withConfigurationResource("application.properties") // We shouldn't get any build error caused by Panache consuming build items that are not produced // See https://github.com/quarkusio/quarkus/issues/28842 .overrideConfigKey("quarkus.hibernate-orm.enabled", "false"); @Test public void startsWithoutError() { // Quarkus started without problem, even though the Panache extension is present. // Just check that Hibernate ORM is disabled. assertThat(Arc.container().instance(Mutiny.SessionFactory.class).get()) .isNull(); } }
ConfigEnabledFalseTest
java
elastic__elasticsearch
server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java
{ "start": 5226, "end": 6182 }
class ____ extends Plugin implements ClusterPlugin, EnginePlugin, ActionPlugin { volatile int numIndexingCopies = 1; static final String NODE_ATTR_UNPROMOTABLE_ONLY = "unpromotableonly"; @Override public ShardRoutingRoleStrategy getShardRoutingRoleStrategy() { return new ShardRoutingRoleStrategy() { @Override public ShardRouting.Role newReplicaRole() { return ShardRouting.Role.SEARCH_ONLY; } @Override public ShardRouting.Role newEmptyRole(int copyIndex) { assert 0 < numIndexingCopies; return copyIndex < numIndexingCopies ? ShardRouting.Role.INDEX_ONLY : ShardRouting.Role.SEARCH_ONLY; } }; } // This is implemented in stateless, but for the tests we need to provide a simple implementation public static
TestPlugin
java
hibernate__hibernate-orm
tooling/metamodel-generator/src/main/java/org/hibernate/processor/validation/ProcessorSessionFactory.java
{ "start": 32091, "end": 38178 }
class ____ ? getHibernateEntityName((TypeElement) element) : null; } private TypeMirror getElementCollectionElementType(Element property) { final AnnotationMirror annotation = getAnnotation(property, "ElementCollection"); final TypeMirror classType = (TypeMirror) getAnnotationMember(annotation, "getElementCollectionClass"); return classType == null || classType.getKind() == TypeKind.VOID ? getCollectionElementType(property) : classType; } @Override protected String getSupertype(String entityName) { return asElement(findEntityClass(entityName).getSuperclass()) .getSimpleName().toString(); } @Override protected boolean isSubtype(String entityName, String subtypeEntityName) { return typeUtil.isSubtype( findEntityClass(entityName).asType(), findEntityClass(subtypeEntityName).asType()); } @Override boolean isClassDefined(String qualifiedName) { return findClassByQualifiedName(qualifiedName)!=null; } @Override boolean isFieldDefined(String qualifiedClassName, String fieldName) { final TypeElement type = findClassByQualifiedName(qualifiedClassName); return type != null && type.getEnclosedElements().stream() .anyMatch(element -> element.getKind() == ElementKind.FIELD && element.getSimpleName().contentEquals(fieldName)); } @Override boolean isConstructorDefined(String qualifiedClassName, List<Type> argumentTypes) { final TypeElement symbol = findClassByQualifiedName(qualifiedClassName); if (symbol==null) { return false; } for (Element cons: symbol.getEnclosedElements()) { if ( cons.getKind() == ElementKind.CONSTRUCTOR ) { final ExecutableElement constructor = (ExecutableElement) cons; final List<? extends VariableElement> parameters = constructor.getParameters(); if (parameters.size()==argumentTypes.size()) { boolean argumentsCheckOut = true; for (int i=0; i<argumentTypes.size(); i++) { final Type type = argumentTypes.get(i); final VariableElement param = parameters.get(i); if (param.asType().getKind().isPrimitive()) { final Class<?> primitive; try { primitive = toPrimitiveClass( type.getReturnedClass() ); } catch (Exception e) { continue; } if (!toPrimitiveClass(param).equals(primitive)) { argumentsCheckOut = false; break; } } else { final TypeElement typeClass; if ( type instanceof EntityType entityType ) { typeClass = findEntityClass(entityType.getAssociatedEntityName()); } //TODO: // else if (type instanceof CompositeCustomType) { // typeClass = ((Component) ((CompositeCustomType) type).getUserType()).type; // } else if (type instanceof BasicType) { final String className; //TODO: custom impl of getReturnedClassName() // for many more Hibernate types! try { className = type.getReturnedClassName(); } catch (Exception e) { continue; } typeClass = findClassByQualifiedName(className); } else { //TODO: what other Hibernate Types do we // need to consider here? continue; } if (typeClass != null && !typeUtil.isSubtype( typeClass.asType(), param.asType() ) ) { argumentsCheckOut = false; break; } } } if (argumentsCheckOut) { return true; //matching constructor found! } } } } return false; } private static Class<?> toPrimitiveClass(VariableElement param) { return switch ( param.asType().getKind() ) { case BOOLEAN -> boolean.class; case CHAR -> char.class; case INT -> int.class; case SHORT -> short.class; case BYTE -> byte.class; case LONG -> long.class; case FLOAT -> float.class; case DOUBLE -> double.class; default -> Object.class; }; } private TypeElement findClassByQualifiedName(String path) { return path == null ? null : elementUtil.getTypeElement(path); } private static AccessType getDefaultAccessType(TypeElement type) { //iterate up the superclass hierarchy while (type!=null) { for (Element member: type.getEnclosedElements()) { if (isId(member)) { return member instanceof ExecutableElement ? AccessType.PROPERTY : AccessType.FIELD; } } type = (TypeElement) asElement(type.getSuperclass()); } return AccessType.FIELD; } private static String propertyName(Element symbol) { String name = symbol.getSimpleName().toString(); if (symbol.getKind() == ElementKind.METHOD) { if (name.startsWith("get")) { name = name.substring(3); } else if (name.startsWith("is")) { name = name.substring(2); } return Introspector.decapitalize(name); } else { return name; } } private static boolean isPersistable(Element member, AccessType accessType) { if (isStatic(member) || isTransient(member)) { return false; } else if (member.getKind() == ElementKind.FIELD) { return accessType == AccessType.FIELD // || member.getAnnotation( accessAnnotation ) != null; || hasAnnotation(member, "Access"); } else if (member.getKind() == ElementKind.METHOD) { return isGetterMethod((ExecutableElement) member) && (accessType == AccessType.PROPERTY // || member.getAnnotation( accessAnnotation ) != null); || hasAnnotation(member, "Access")); } else { return false; } } private static TypeMirror memberType(Element member) { if (member instanceof ExecutableElement executableElement) { return executableElement.getReturnType(); } else if (member instanceof VariableElement) { return member.asType(); } else { throw new IllegalArgumentException("Not a member"); } } public static Element asElement(TypeMirror type) { if ( type == null ) { return null; } else { return switch ( type.getKind() ) { case DECLARED -> ((DeclaredType) type).asElement(); case TYPEVAR -> ((TypeVariable) type).asElement(); default -> null; }; } } }
names
java
quarkusio__quarkus
extensions/security/test-utils/src/main/java/io/quarkus/security/test/utils/IdentityMock.java
{ "start": 3162, "end": 4055 }
class ____ extends AbstractSecurityIdentityAssociation { @Inject IdentityMock identity; @Inject IdentityProviderManager identityProviderManager; @Override protected IdentityProviderManager getIdentityProviderManager() { return identityProviderManager; } @Override public Uni<SecurityIdentity> getDeferredIdentity() { if (applyAugmentors) { return identityProviderManager.authenticate(new IdentityMockAuthenticationRequest()); } return Uni.createFrom().item(identity); } @Override public SecurityIdentity getIdentity() { if (applyAugmentors) { return getDeferredIdentity().await().indefinitely(); } return identity; } } public static final
IdentityAssociationMock
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/serializer/features/NotWriteDefaultValueFieldTest.java
{ "start": 323, "end": 593 }
class ____ extends TestCase { public void test_not_write_default() throws Exception { assertEquals("{}", JSON.toJSONString(new Model(0))); assertEquals("{\"id\":1}", JSON.toJSONString(new Model(1))); } public static
NotWriteDefaultValueFieldTest
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/EnumOrdinalTest.java
{ "start": 831, "end": 1120 }
class ____ { private final CompilationTestHelper testHelper = CompilationTestHelper.newInstance(EnumOrdinal.class, getClass()); @Test public void positive_enumOrdinal() { testHelper .addSourceLines( "Test.java", """
EnumOrdinalTest
java
apache__logging-log4j2
log4j-core/src/main/java/org/apache/logging/log4j/core/appender/ConsoleAppender.java
{ "start": 2998, "end": 7410 }
enum ____ { /** Standard output. */ SYSTEM_OUT { @Override public Charset getDefaultCharset() { // "sun.stdout.encoding" is only set when running from the console. return getCharset("sun.stdout.encoding", Charset.defaultCharset()); } }, /** Standard error output. */ SYSTEM_ERR { @Override public Charset getDefaultCharset() { // "sun.stderr.encoding" is only set when running from the console. return getCharset("sun.stderr.encoding", Charset.defaultCharset()); } }; public abstract Charset getDefaultCharset(); protected Charset getCharset(final String property, final Charset defaultCharset) { return new PropertiesUtil(PropertiesUtil.getSystemProperties()) .getCharsetProperty(property, defaultCharset); } } private ConsoleAppender( final String name, final Layout<? extends Serializable> layout, final Filter filter, final OutputStreamManager manager, final boolean ignoreExceptions, final Target target, final Property[] properties) { super(name, layout, filter, ignoreExceptions, true, properties, manager); this.target = target; } /** * Creates a Console Appender. * * @param layout The layout to use (required). * @param filter The Filter or null. * @param target The target ("SYSTEM_OUT" or "SYSTEM_ERR"). The default is "SYSTEM_OUT". * @param name The name of the Appender (required). * @param follow If true will follow changes to the underlying output stream. * @param ignoreExceptions If {@code "true"} (default) exceptions encountered when appending events are logged; otherwise they * are propagated to the caller. * @return The ConsoleAppender. * @deprecated Deprecated in 2.7; use {@link #newBuilder()}. */ @Deprecated public static ConsoleAppender createAppender( Layout<? extends Serializable> layout, final Filter filter, final String target, final String name, final String follow, final String ignoreExceptions) { return newBuilder() .setLayout(layout) .setFilter(filter) .setTarget(target == null ? DEFAULT_TARGET : Target.valueOf(target)) .setName(name) .setFollow(Boolean.parseBoolean(follow)) .setIgnoreExceptions(Booleans.parseBoolean(ignoreExceptions, true)) .build(); } /** * Creates a Console Appender. * * @param layout The layout to use (required). * @param filter The Filter or null. * @param target The target (SYSTEM_OUT or SYSTEM_ERR). The default is SYSTEM_OUT. * @param name The name of the Appender (required). * @param follow If true will follow changes to the underlying output stream. * @param direct If true will write directly to {@link java.io.FileDescriptor} and bypass * {@link System#out}/{@link System#err}. * @param ignoreExceptions If {@code "true"} (default) exceptions encountered when appending events are logged; otherwise they * are propagated to the caller. * @return The ConsoleAppender. * @deprecated Deprecated in 2.7; use {@link #newBuilder()}. */ @Deprecated public static ConsoleAppender createAppender( // @formatter:off Layout<? extends Serializable> layout, final Filter filter, Target target, final String name, final boolean follow, final boolean direct, final boolean ignoreExceptions) { return newBuilder() .setLayout(layout) .setFilter(filter) .setTarget(target == null ? DEFAULT_TARGET : target) .setName(name) .setFollow(follow) .setDirect(direct) .setIgnoreExceptions(ignoreExceptions) .build(); } public static ConsoleAppender createDefaultAppenderForLayout(final Layout<? extends Serializable> layout) { // this method cannot use the builder
Target
java
spring-projects__spring-framework
spring-beans/src/test/java/org/springframework/beans/BeanUtilsTests.java
{ "start": 29363, "end": 29460 }
interface ____ { String getId(); List<String> getLineItems(); } private static
OrderSummary
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/grant/MySqlGrantTest_16.java
{ "start": 969, "end": 2350 }
class ____ extends MysqlTest { public void test_0() throws Exception { String sql = "GRANT DROP ON mydb.* TO 'someuser'@'somehost';"; MySqlStatementParser parser = new MySqlStatementParser(sql); List<SQLStatement> statementList = parser.parseStatementList(); SQLStatement stmt = statementList.get(0); // print(statementList); assertEquals(1, statementList.size()); MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor(); stmt.accept(visitor); String output = SQLUtils.toMySqlString(stmt); assertEquals("GRANT DROP ON mydb.* TO 'someuser'@'somehost';", // output); // System.out.println("Tables : " + visitor.getTables()); // System.out.println("fields : " + visitor.getColumns()); // System.out.println("coditions : " + visitor.getConditions()); // System.out.println("orderBy : " + visitor.getOrderByColumns()); assertEquals(1, visitor.getTables().size()); assertEquals(0, visitor.getColumns().size()); assertEquals(0, visitor.getConditions().size()); // assertTrue(visitor.getTables().containsKey(new TableStat.Name("City"))); // assertTrue(visitor.getTables().containsKey(new TableStat.Name("t2"))); // assertTrue(visitor.getColumns().contains(new Column("t2", "id"))); } }
MySqlGrantTest_16
java
apache__flink
flink-tests/src/test/java/org/apache/flink/test/state/TaskManagerWideRocksDbMemorySharingITCase.java
{ "start": 2776, "end": 7757 }
class ____ extends TestLogger { private static final int PARALLELISM = 4; private static final int NUMBER_OF_JOBS = 5; private static final int NUMBER_OF_TASKS = NUMBER_OF_JOBS * PARALLELISM; private static final MemorySize SHARED_MEMORY = MemorySize.ofMebiBytes(NUMBER_OF_TASKS * 25); private MiniClusterWithClientResource cluster; @Rule public final SharedObjects sharedObjects = SharedObjects.create(); @Before public void init() throws Exception { cluster = new MiniClusterWithClientResource( new MiniClusterResourceConfiguration.Builder() .setConfiguration(getConfiguration()) .setNumberTaskManagers(1) .setNumberSlotsPerTaskManager(NUMBER_OF_TASKS) .build()); cluster.before(); } @After public void destroy() { cluster.after(); } @Test public void testBlockCache() throws Exception { List<Cache> createdCaches = new CopyOnWriteArrayList<>(); List<WriteBufferManager> createdWriteBufferManagers = new CopyOnWriteArrayList<>(); TestingRocksDBMemoryFactory memoryFactory = new TestingRocksDBMemoryFactory( sharedObjects.add(createdCaches), sharedObjects.add(createdWriteBufferManagers)); List<JobID> jobIDs = new ArrayList<>(NUMBER_OF_JOBS); try { for (int i = 0; i < NUMBER_OF_JOBS; i++) { jobIDs.add(cluster.getRestClusterClient().submitJob(dag(memoryFactory)).get()); } for (JobID jid : jobIDs) { waitForAllTaskRunning(cluster.getMiniCluster(), jid, false); } Assert.assertEquals(1, createdCaches.size()); Assert.assertEquals(1, createdWriteBufferManagers.size()); } finally { for (JobID jobID : jobIDs) { try { cluster.getRestClusterClient().cancel(jobID).get(); } catch (Exception e) { log.warn("Can not cancel job {}", jobID, e); } } } } private JobGraph dag(RocksDBMemoryFactory memoryFactory) { Configuration configuration = new Configuration(); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration); env.setParallelism(PARALLELISM); // currently we could not use config option to replace RocksDBMemoryFactory EmbeddedRocksDBStateBackend backend = new EmbeddedRocksDBStateBackend(true); backend.setRocksDBMemoryFactory(memoryFactory); // don't flush memtables by checkpoints env.enableCheckpointing(24 * 60 * 60 * 1000, CheckpointingMode.EXACTLY_ONCE); RestartStrategyUtils.configureNoRestartStrategy(env); DataStreamSource<Long> src = env.fromSequence(Long.MIN_VALUE, Long.MAX_VALUE); src.keyBy(number -> number) .map( new RichMapFunction<Long, Long>() { private ListState<byte[]> state; private int payloadSize; @Override public void open(OpenContext openContext) throws Exception { super.open(openContext); this.state = getRuntimeContext() .getListState( new ListStateDescriptor<>( "state", byte[].class)); // let each task to grow its state at a different speed // to increase the probability of reporting different memory usages // among different tasks this.payloadSize = 4 + new Random().nextInt(7); } @Override public Long map(Long value) throws Exception { state.add(new byte[payloadSize]); Thread.sleep(1L); return value; } }) .sinkTo(new DiscardingSink<>()); return StateBackendUtils.configureStateBackendAndGetJobGraph(env, backend); } private static Configuration getConfiguration() { Configuration configuration = new Configuration(); configuration.set(RocksDBOptions.FIX_PER_TM_MEMORY_SIZE, SHARED_MEMORY); configuration.set(RocksDBOptions.USE_MANAGED_MEMORY, false); return configuration; } private static
TaskManagerWideRocksDbMemorySharingITCase
java
spring-projects__spring-boot
module/spring-boot-couchbase/src/main/java/org/springframework/boot/couchbase/health/CouchbaseHealthIndicator.java
{ "start": 1157, "end": 1753 }
class ____ extends AbstractHealthIndicator { private final Cluster cluster; /** * Create an indicator with the specified {@link Cluster}. * @param cluster the Couchbase Cluster */ public CouchbaseHealthIndicator(Cluster cluster) { super("Couchbase health check failed"); Assert.notNull(cluster, "'cluster' must not be null"); this.cluster = cluster; } @Override protected void doHealthCheck(Health.Builder builder) throws Exception { DiagnosticsResult diagnostics = this.cluster.diagnostics(); new CouchbaseHealth(diagnostics).applyTo(builder); } }
CouchbaseHealthIndicator
java
elastic__elasticsearch
modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3SessionCredentialsRestIT.java
{ "start": 1236, "end": 2918 }
class ____ extends AbstractRepositoryS3RestTestCase { private static final String PREFIX = getIdentifierPrefix("RepositoryS3SessionCredentialsRestIT"); private static final String BUCKET = PREFIX + "bucket"; private static final String BASE_PATH = PREFIX + "base_path"; private static final String ACCESS_KEY = PREFIX + "access-key"; private static final String SECRET_KEY = PREFIX + "secret-key"; private static final String SESSION_TOKEN = PREFIX + "session-token"; private static final String CLIENT = "session_credentials_client"; private static final S3HttpFixture s3Fixture = new S3HttpFixture( true, BUCKET, BASE_PATH, fixedAccessKeyAndToken(ACCESS_KEY, SESSION_TOKEN, ANY_REGION, "s3") ); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") .keystore("s3.client." + CLIENT + ".access_key", ACCESS_KEY) .keystore("s3.client." + CLIENT + ".secret_key", SECRET_KEY) .keystore("s3.client." + CLIENT + ".session_token", SESSION_TOKEN) .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) .build(); @ClassRule public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); @Override protected String getTestRestCluster() { return cluster.getHttpAddresses(); } @Override protected String getBucketName() { return BUCKET; } @Override protected String getBasePath() { return BASE_PATH; } @Override protected String getClientName() { return CLIENT; } }
RepositoryS3SessionCredentialsRestIT
java
redisson__redisson
redisson/src/main/java/org/redisson/api/RGeoAsync.java
{ "start": 856, "end": 15509 }
interface ____<V> extends RScoredSortedSetAsync<V> { /** * Adds geospatial member. * * @param longitude - longitude of object * @param latitude - latitude of object * @param member - object itself * @return number of elements added to the sorted set, * not including elements already existing for which * the score was updated */ RFuture<Long> addAsync(double longitude, double latitude, V member); /** * Adds geospatial members. * * @param entries - objects * @return number of elements added to the sorted set, * not including elements already existing for which * the score was updated */ RFuture<Long> addAsync(GeoEntry... entries); /** * Adds geospatial member only if it's already exists. * <p> * Requires <b>Redis 6.2.0 and higher.</b> * * @param longitude - longitude of object * @param latitude - latitude of object * @param member - object itself * @return number of elements added to the sorted set */ RFuture<Boolean> addIfExistsAsync(double longitude, double latitude, V member); /** * Adds geospatial members only if it's already exists. * <p> * Requires <b>Redis 6.2.0 and higher.</b> * * @param entries - objects * @return number of elements added to the sorted set */ RFuture<Long> addIfExistsAsync(GeoEntry... entries); /** * Adds geospatial member only if has not been added before. * <p> * Requires <b>Redis 6.2.0 and higher.</b> * * @param longitude - longitude of object * @param latitude - latitude of object * @param member - object itself * @return number of elements added to the sorted set */ RFuture<Boolean> tryAddAsync(double longitude, double latitude, V member); /** * Adds geospatial members only if has not been added before. * <p> * Requires <b>Redis 6.2.0 and higher.</b> * * @param entries - objects * @return number of elements added to the sorted set */ RFuture<Long> tryAddAsync(GeoEntry... entries); /** * Returns distance between members in <code>GeoUnit</code> units. * * @param firstMember - first object * @param secondMember - second object * @param geoUnit - geo unit * @return distance */ RFuture<Double> distAsync(V firstMember, V secondMember, GeoUnit geoUnit); /** * Returns 11 characters Geohash string mapped by defined member. * * @param members - objects * @return hash mapped by object */ RFuture<Map<V, String>> hashAsync(V... members); /** * Returns geo-position mapped by defined member. * * @param members - objects * @return geo position mapped by object */ RFuture<Map<V, GeoPosition>> posAsync(V... members); /** * Returns the members of a sorted set, which are within the * borders of specified search conditions. * <p> * Usage examples: * <pre> * RFuture objects = geo.searchAsync(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS) * .order(GeoOrder.ASC) * .count(1))); * </pre> * <pre> * RFuture objects = geo.searchAsync(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS))); * </pre> * <p> * Requires <b>Redis 3.2.10 and higher.</b> * * @param args - search conditions object * @return list of memebers */ RFuture<List<V>> searchAsync(GeoSearchArgs args); /* * Use searchAsync() method instead * */ @Deprecated RFuture<List<V>> radiusAsync(double longitude, double latitude, double radius, GeoUnit geoUnit); /* * Use searchAsync() method instead * */ @Deprecated RFuture<List<V>> radiusAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, int count); /* * Use searchAsync() method instead * */ @Deprecated RFuture<List<V>> radiusAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, GeoOrder geoOrder); /* * Use searchAsync() method instead * */ @Deprecated RFuture<List<V>> radiusAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /** * Returns the distance mapped by member of a sorted set, * which are within the borders of specified search conditions. * <p> * Usage examples: * <pre> * RFuture objects = geo.searchWithDistanceAsync(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS) * .order(GeoOrder.ASC) * .count(1))); * </pre> * <pre> * RFuture objects = geo.searchWithDistanceAsync(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS))); * </pre> * <p> * Requires <b>Redis 3.2.10 and higher.</b> * * @param args - search conditions object * @return distance mapped by object */ RFuture<Map<V, Double>> searchWithDistanceAsync(GeoSearchArgs args); /* * Use searchWithDistanceAsync() method instead * */ @Deprecated RFuture<Map<V, Double>> radiusWithDistanceAsync(double longitude, double latitude, double radius, GeoUnit geoUnit); /* * Use searchWithDistanceAsync() method instead * */ @Deprecated RFuture<Map<V, Double>> radiusWithDistanceAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, int count); /* * Use searchWithDistanceAsync() method instead * */ @Deprecated RFuture<Map<V, Double>> radiusWithDistanceAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, GeoOrder geoOrder); /* * Use searchWithDistanceAsync() method instead * */ @Deprecated RFuture<Map<V, Double>> radiusWithDistanceAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /** * Returns the position mapped by member of a sorted set, * which are within the borders of specified search conditions. * <p> * Usage examples: * <pre> * List objects = geo.searchWithPosition(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS) * .order(GeoOrder.ASC) * .count(1))); * </pre> * <pre> * List objects = geo.searchWithPosition(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS))); * </pre> * <p> * Requires <b>Redis 3.2.10 and higher.</b> * * @param args - search conditions object * @return position mapped by object */ RFuture<Map<V, GeoPosition>> searchWithPositionAsync(GeoSearchArgs args); /* * Use searchWithPositionAsync() method instead * */ @Deprecated RFuture<Map<V, GeoPosition>> radiusWithPositionAsync(double longitude, double latitude, double radius, GeoUnit geoUnit); /* * Use searchWithPositionAsync() method instead * */ @Deprecated RFuture<Map<V, GeoPosition>> radiusWithPositionAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, int count); /* * Use searchWithPositionAsync() method instead * */ @Deprecated RFuture<Map<V, GeoPosition>> radiusWithPositionAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, GeoOrder geoOrder); /* * Use searchWithPositionAsync() method instead * */ @Deprecated RFuture<Map<V, GeoPosition>> radiusWithPositionAsync(double longitude, double latitude, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /* * Use searchAsync() method instead * */ @Deprecated RFuture<List<V>> radiusAsync(V member, double radius, GeoUnit geoUnit); /* * Use searchAsync() method instead * */ @Deprecated RFuture<List<V>> radiusAsync(V member, double radius, GeoUnit geoUnit, int count); /* * Use searchAsync() method instead * */ @Deprecated RFuture<List<V>> radiusAsync(V member, double radius, GeoUnit geoUnit, GeoOrder geoOrder); /* * Use searchAsync() method instead * */ @Deprecated RFuture<List<V>> radiusAsync(V member, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /* * Use searchWithDistanceAsync() method instead * */ @Deprecated RFuture<Map<V, Double>> radiusWithDistanceAsync(V member, double radius, GeoUnit geoUnit); /* * Use searchWithDistanceAsync() method instead * */ @Deprecated RFuture<Map<V, Double>> radiusWithDistanceAsync(V member, double radius, GeoUnit geoUnit, int count); /* * Use searchWithDistanceAsync() method instead * */ @Deprecated RFuture<Map<V, Double>> radiusWithDistanceAsync(V member, double radius, GeoUnit geoUnit, GeoOrder geoOrder); /* * Use searchWithDistanceAsync() method instead * */ @Deprecated RFuture<Map<V, Double>> radiusWithDistanceAsync(V member, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /* * Use searchWithPositionAsync() method instead * */ @Deprecated RFuture<Map<V, GeoPosition>> radiusWithPositionAsync(V member, double radius, GeoUnit geoUnit); /* * Use searchWithPositionAsync() method instead * */ @Deprecated RFuture<Map<V, GeoPosition>> radiusWithPositionAsync(V member, double radius, GeoUnit geoUnit, int count); /* * Use searchWithPositionAsync() method instead * */ @Deprecated RFuture<Map<V, GeoPosition>> radiusWithPositionAsync(V member, double radius, GeoUnit geoUnit, GeoOrder geoOrder); /* * Use searchWithPositionAsync() method instead * */ @Deprecated RFuture<Map<V, GeoPosition>> radiusWithPositionAsync(V member, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /** * Finds the members of a sorted set, * which are within the borders of specified search conditions. * <p> * Stores result to <code>destName</code>. * <p> * Usage examples: * <pre> * long count = geo.storeSearchTo(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS) * .order(GeoOrder.ASC) * .count(1))); * </pre> * <pre> * long count = geo.storeSearchTo(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS))); * </pre> * * @param args - search conditions object * @return length of result */ RFuture<Long> storeSearchToAsync(String destName, GeoSearchArgs args); /* * Use storeSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreToAsync(String destName, double longitude, double latitude, double radius, GeoUnit geoUnit); /* * Use storeSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreToAsync(String destName, double longitude, double latitude, double radius, GeoUnit geoUnit, int count); /* * Use storeSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreToAsync(String destName, double longitude, double latitude, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /* * Use storeSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreToAsync(String destName, V member, double radius, GeoUnit geoUnit); /* * Use storeSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreToAsync(String destName, V member, double radius, GeoUnit geoUnit, int count); /* * Use storeSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreToAsync(String destName, V member, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /** * Finds the members of a sorted set, * which are within the borders of specified search conditions. * <p> * Stores result to <code>destName</code> sorted by distance. * <p> * Usage examples: * <pre> * long count = geo.storeSortedSearchTo(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS) * .order(GeoOrder.ASC) * .count(1))); * </pre> * <pre> * long count = geo.storeSortedSearchTo(GeoSearchArgs.from(15, 37) * .radius(200, GeoUnit.KILOMETERS))); * </pre> * * @param args - search conditions object * @return length of result */ RFuture<Long> storeSortedSearchToAsync(String destName, GeoSearchArgs args); /* * Use storeSortedSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreSortedToAsync(String destName, double longitude, double latitude, double radius, GeoUnit geoUnit); /* * Use storeSortedSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreSortedToAsync(String destName, double longitude, double latitude, double radius, GeoUnit geoUnit, int count); /* * Use storeSortedSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreSortedToAsync(String destName, double longitude, double latitude, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); /* * Use storeSortedSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreSortedToAsync(String destName, V member, double radius, GeoUnit geoUnit); /* * Use storeSortedSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreSortedToAsync(String destName, V member, double radius, GeoUnit geoUnit, int count); /* * Use storeSortedSearchToAsync() method instead * */ @Deprecated RFuture<Long> radiusStoreSortedToAsync(String destName, V member, double radius, GeoUnit geoUnit, GeoOrder geoOrder, int count); }
RGeoAsync
java
apache__dubbo
dubbo-rpc/dubbo-rpc-api/src/test/java/org/apache/dubbo/rpc/filter/EchoFilterTest.java
{ "start": 1341, "end": 3117 }
class ____ { Filter echoFilter = new EchoFilter(); @SuppressWarnings("unchecked") @Test void testEcho() { Invocation invocation = createMockRpcInvocation(); Invoker<DemoService> invoker = createMockInvoker(invocation); given(invocation.getMethodName()).willReturn("$echo"); Result filterResult = echoFilter.invoke(invoker, invocation); assertEquals("hello", filterResult.getValue()); } @SuppressWarnings("unchecked") @Test void testNonEcho() { Invocation invocation = createMockRpcInvocation(); Invoker<DemoService> invoker = createMockInvoker(invocation); given(invocation.getMethodName()).willReturn("echo"); Result filterResult = echoFilter.invoke(invoker, invocation); assertEquals("High", filterResult.getValue()); } Invocation createMockRpcInvocation() { Invocation invocation = mock(RpcInvocation.class); given(invocation.getParameterTypes()).willReturn(new Class<?>[] {Enum.class}); given(invocation.getArguments()).willReturn(new Object[] {"hello"}); given(invocation.getObjectAttachments()).willReturn(null); return invocation; } Invoker<DemoService> createMockInvoker(Invocation invocation) { Invoker<DemoService> invoker = mock(Invoker.class); given(invoker.isAvailable()).willReturn(true); given(invoker.getInterface()).willReturn(DemoService.class); AppResponse result = new AppResponse(); result.setValue("High"); given(invoker.invoke(invocation)).willReturn(result); URL url = URL.valueOf("test://test:11/test?group=dubbo&version=1.1"); given(invoker.getUrl()).willReturn(url); return invoker; } }
EchoFilterTest
java
alibaba__fastjson
src/test/java/data/media/Image.java
{ "start": 63, "end": 175 }
class ____ implements java.io.Serializable { private static final long serialVersionUID = 1L; public
Image
java
mybatis__mybatis-3
src/test/java/org/apache/ibatis/submitted/cache/CacheTest.java
{ "start": 12920, "end": 13090 }
interface ____ { } @CacheNamespaceRef(value = PersonMapper.class, name = "org.apache.ibatis.submitted.cache.PersonMapper") private
CustomCacheUnsupportedPropertyMapper
java
apache__camel
components/camel-freemarker/src/main/java/org/apache/camel/component/freemarker/FreemarkerEndpoint.java
{ "start": 1789, "end": 7249 }
class ____ extends ResourceEndpoint { @UriParam(defaultValue = "false") private boolean allowTemplateFromHeader; @UriParam private String encoding; @UriParam private int templateUpdateDelay; @UriParam private Configuration configuration; public FreemarkerEndpoint() { } public FreemarkerEndpoint(String uri, Component component, String resourceUri) { super(uri, component, resourceUri); } @Override public boolean isRemote() { return false; } @Override public ExchangePattern getExchangePattern() { return ExchangePattern.InOut; } @Override protected String createEndpointUri() { return "freemarker:" + getResourceUri(); } public boolean isAllowTemplateFromHeader() { return allowTemplateFromHeader; } /** * Whether to allow to use resource template from header or not (default false). * * Enabling this allows to specify dynamic templates via message header. However this can be seen as a potential * security vulnerability if the header is coming from a malicious user, so use this with care. */ public void setAllowTemplateFromHeader(boolean allowTemplateFromHeader) { this.allowTemplateFromHeader = allowTemplateFromHeader; } /** * Sets the encoding to be used for loading the template file. */ public void setEncoding(String encoding) { this.encoding = encoding; } public String getEncoding() { return encoding; } public int getTemplateUpdateDelay() { return templateUpdateDelay; } /** * Number of seconds the loaded template resource will remain in the cache. */ public void setTemplateUpdateDelay(int templateUpdateDelay) { this.templateUpdateDelay = templateUpdateDelay; } public Configuration getConfiguration() { return configuration; } /** * Sets the Freemarker configuration to use */ public void setConfiguration(Configuration configuration) { this.configuration = configuration; } public FreemarkerEndpoint findOrCreateEndpoint(String uri, String newResourceUri) { String newUri = uri.replace(getResourceUri(), newResourceUri); log.debug("Getting endpoint with URI: {}", newUri); return getCamelContext().getEndpoint(newUri, FreemarkerEndpoint.class); } @Override public void clearContentCache() { configuration.clearTemplateCache(); } @Override protected void onExchange(Exchange exchange) throws Exception { String path = getResourceUri(); ObjectHelper.notNull(configuration, "configuration"); ObjectHelper.notNull(path, "resourceUri"); if (allowTemplateFromHeader) { String newResourceUri = exchange.getIn().getHeader(FreemarkerConstants.FREEMARKER_RESOURCE_URI, String.class); if (newResourceUri != null) { exchange.getIn().removeHeader(FreemarkerConstants.FREEMARKER_RESOURCE_URI); log.debug("{} set to {} creating new endpoint to handle exchange", FreemarkerConstants.FREEMARKER_RESOURCE_URI, newResourceUri); FreemarkerEndpoint newEndpoint = findOrCreateEndpoint(getEndpointUri(), newResourceUri); newEndpoint.onExchange(exchange); return; } } Reader reader = null; String content = null; if (allowTemplateFromHeader) { content = exchange.getIn().getHeader(FreemarkerConstants.FREEMARKER_TEMPLATE, String.class); } if (content != null) { // use content from header reader = new StringReader(content); // remove the header to avoid it being propagated in the routing exchange.getIn().removeHeader(FreemarkerConstants.FREEMARKER_TEMPLATE); } Object dataModel = null; if (allowTemplateFromHeader) { dataModel = exchange.getIn().getHeader(FreemarkerConstants.FREEMARKER_DATA_MODEL, Object.class); } if (dataModel == null) { dataModel = ExchangeHelper.createVariableMap(exchange, isAllowContextMapAll()); } // let freemarker parse and generate the result in buffer Template template; if (reader == null && ResourceHelper.hasScheme(path)) { // favour to use Camel to load via resource loader reader = new InputStreamReader(getResourceAsInputStream()); } if (reader != null) { log.debug("Freemarker is evaluating template read from header {} using context: {}", FreemarkerConstants.FREEMARKER_TEMPLATE, dataModel); template = new Template("temp", reader, new Configuration(Configuration.VERSION_2_3_34)); } else { log.debug("Freemarker is evaluating {} using context: {}", path, dataModel); if (getEncoding() != null) { template = configuration.getTemplate(path, getEncoding()); } else { template = configuration.getTemplate(path); } } StringWriter buffer = new StringWriter(); template.process(dataModel, buffer); buffer.flush(); // now lets store the result ExchangeHelper.setInOutBodyPatternAware(exchange, buffer.toString()); } }
FreemarkerEndpoint
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/tool/schema/extract/internal/InformationExtractorOracleImpl.java
{ "start": 241, "end": 682 }
class ____ extends InformationExtractorJdbcDatabaseMetaDataImpl { public InformationExtractorOracleImpl(ExtractionContext extractionContext) { super( extractionContext ); } @Override public boolean supportsBulkPrimaryKeyRetrieval() { return true; } @Override public boolean supportsBulkForeignKeyRetrieval() { return true; } // Unfortunately, there is no support for table wildcard for indexes }
InformationExtractorOracleImpl
java
apache__kafka
clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaEntity.java
{ "start": 991, "end": 2415 }
class ____ { private final Map<String, String> entries; /** * The type of an entity entry. */ public static final String USER = "user"; public static final String CLIENT_ID = "client-id"; public static final String IP = "ip"; public static boolean isValidEntityType(String entityType) { return Objects.equals(entityType, USER) || Objects.equals(entityType, CLIENT_ID) || Objects.equals(entityType, IP); } /** * Constructs a quota entity for the given types and names. If a name is null, * then it is mapped to the built-in default entity name. * * @param entries maps entity type to its name */ public ClientQuotaEntity(Map<String, String> entries) { this.entries = entries; } /** * @return map of entity type to its name */ public Map<String, String> entries() { return this.entries; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClientQuotaEntity that = (ClientQuotaEntity) o; return Objects.equals(entries, that.entries); } @Override public int hashCode() { return Objects.hash(entries); } @Override public String toString() { return "ClientQuotaEntity(entries=" + entries + ")"; } }
ClientQuotaEntity
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/search/runtime/LongScriptFieldTermsQueryTests.java
{ "start": 655, "end": 2826 }
class ____ extends AbstractLongScriptFieldQueryTestCase<LongScriptFieldTermsQuery> { @Override protected LongScriptFieldTermsQuery createTestInstance() { Set<Long> terms = new HashSet<>(); int count = between(1, 100); while (terms.size() < count) { terms.add(randomLong()); } return new LongScriptFieldTermsQuery(randomScript(), leafFactory, randomAlphaOfLength(5), terms); } @Override protected LongScriptFieldTermsQuery copy(LongScriptFieldTermsQuery orig) { return new LongScriptFieldTermsQuery(orig.script(), leafFactory, orig.fieldName(), orig.terms()); } @Override protected LongScriptFieldTermsQuery mutate(LongScriptFieldTermsQuery orig) { Script script = orig.script(); String fieldName = orig.fieldName(); Set<Long> terms = orig.terms(); switch (randomInt(2)) { case 0 -> script = randomValueOtherThan(script, this::randomScript); case 1 -> fieldName += "modified"; case 2 -> { terms = new HashSet<>(terms); while (false == terms.add(randomLong())) { // Random long was already in the set } } default -> fail(); } return new LongScriptFieldTermsQuery(script, leafFactory, fieldName, terms); } @Override public void testMatches() { LongScriptFieldTermsQuery query = new LongScriptFieldTermsQuery(randomScript(), leafFactory, "test", Set.of(1L, 2L, 3L)); assertTrue(query.matches(new long[] { 1 }, 1)); assertTrue(query.matches(new long[] { 2 }, 1)); assertTrue(query.matches(new long[] { 3 }, 1)); assertTrue(query.matches(new long[] { 1, 0 }, 2)); assertTrue(query.matches(new long[] { 0, 1 }, 2)); assertFalse(query.matches(new long[] { 0 }, 1)); assertFalse(query.matches(new long[] { 0, 1 }, 1)); } @Override protected void assertToString(LongScriptFieldTermsQuery query) { assertThat(query.toString(query.fieldName()), equalTo(query.terms().toString())); } }
LongScriptFieldTermsQueryTests
java
spring-projects__spring-security
config/src/test/java/org/springframework/security/config/annotation/method/configuration/Authz.java
{ "start": 2254, "end": 2375 }
class ____ extends AuthorizationDecision { public AuthzResult(boolean granted) { super(granted); } } }
AuthzResult
java
google__error-prone
core/src/main/java/com/google/errorprone/bugpatterns/EqualsIncompatibleType.java
{ "start": 8323, "end": 8977 }
class ____ { private String receiverTypeString; private String argumentTypeString; private TypeStringPair(Type receiverType, Type argumentType) { receiverTypeString = Signatures.prettyType(receiverType); argumentTypeString = Signatures.prettyType(argumentType); if (argumentTypeString.equals(receiverTypeString)) { receiverTypeString = receiverType.toString(); argumentTypeString = argumentType.toString(); } } private String getReceiverTypeString() { return receiverTypeString; } private String getArgumentTypeString() { return argumentTypeString; } } }
TypeStringPair
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java
{ "start": 1313, "end": 2248 }
class ____ extends BaseRestHandler { @Override public List<Route> routes() { return List.of(new Route(GET, "/_ingest/pipeline"), new Route(GET, "/_ingest/pipeline/{id}")); } @Override public String getName() { return "ingest_get_pipeline_action"; } @Override public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { final var request = new GetPipelineRequest( getMasterNodeTimeout(restRequest), restRequest.paramAsBoolean("summary", false), Strings.splitStringByCommaToArray(restRequest.param("id")) ); return channel -> new RestCancellableNodeClient(client, restRequest.getHttpChannel()).execute( GetPipelineAction.INSTANCE, request, new RestToXContentListener<>(channel, GetPipelineResponse::status) ); } }
RestGetPipelineAction
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KeystoneEndpointBuilderFactory.java
{ "start": 13682, "end": 14009 }
class ____ extends AbstractEndpointBuilder implements KeystoneEndpointBuilder, AdvancedKeystoneEndpointBuilder { public KeystoneEndpointBuilderImpl(String path) { super(componentName, path); } } return new KeystoneEndpointBuilderImpl(path); } }
KeystoneEndpointBuilderImpl
java
google__error-prone
core/src/test/java/com/google/errorprone/matchers/AnnotationMatcherTest.java
{ "start": 9898, "end": 10514 }
class ____ { @SampleAnnotation1 public void foo() {} } """); assertCompiles( nodeWithAnnotationMatches( /* shouldMatch= */ true, new AnnotationMatcher<Tree>(AT_LEAST_ONE, isType("com.google.SampleAnnotation1")))); assertCompiles( nodeWithAnnotationMatches( /* shouldMatch= */ true, new AnnotationMatcher<Tree>(ALL, isType("com.google.SampleAnnotation1")))); } @Test public void shouldMatchAnnotationOnParameter() { writeFile( "A.java", """ package com.google; public
A
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/job/checkpoints/AbstractCheckpointStatsHandler.java
{ "start": 2193, "end": 2388 }
class ____ checkpoint handlers that will cache the {@link CheckpointStatsSnapshot} * object. * * @param <R> the response type * @param <M> the message parameters */ @Internal public abstract
for
java
spring-projects__spring-security
config/src/test/java/org/springframework/security/config/web/server/FormLoginTests.java
{ "start": 16596, "end": 17233 }
class ____ { private WebDriver driver; @FindBy(css = "button[type=submit]") private WebElement submit; public DefaultLogoutPage(WebDriver webDriver) { this.driver = webDriver; } public DefaultLogoutPage assertAt() { assertThat(this.driver.getTitle()).isEqualTo("Confirm Log Out?"); return this; } public DefaultLoginPage logout() { this.submit.click(); return DefaultLoginPage.create(this.driver); } static DefaultLogoutPage to(WebDriver driver) { driver.get("http://localhost/logout"); return PageFactory.initElements(driver, DefaultLogoutPage.class); } } public static
DefaultLogoutPage
java
mockito__mockito
mockito-core/src/test/java/org/mockito/internal/creation/instance/ConstructorInstantiatorTest.java
{ "start": 582, "end": 652 }
class ____ { SomeClass2(String x) {} } static
SomeClass2
java
alibaba__fastjson
src/test/java/com/alibaba/fastjson/deserializer/issues3796/bean/ObjectR.java
{ "start": 94, "end": 892 }
class ____ { private int a; private int b; private int c; private int d; private List<Integer> e; private int f; public int getA() { return a; } public void setA(int a) { this.a = a; } public int getB() { return b; } public void setB(int b) { this.b = b; } public int getC() { return c; } public void setC(int c) { this.c = c; } public int getD() { return d; } public void setD(int d) { this.d = d; } public List<Integer> getE() { return e; } public void setE(List<Integer> e) { this.e = e; } public int getF() { return f; } public void setF(int f) { this.f = f; } }
ObjectR
java
lettuce-io__lettuce-core
src/main/java/io/lettuce/core/dynamic/output/OutputRegistryCommandOutputFactoryResolver.java
{ "start": 730, "end": 3164 }
class ____ extends CommandOutputResolverSupport implements CommandOutputFactoryResolver { @SuppressWarnings("rawtypes") private static final ClassTypeInformation<CommandOutput> COMMAND_OUTPUT = ClassTypeInformation.from(CommandOutput.class); private final OutputRegistry outputRegistry; /** * Create a new {@link OutputRegistryCommandOutputFactoryResolver} given {@link OutputRegistry}. * * @param outputRegistry must not be {@code null}. */ public OutputRegistryCommandOutputFactoryResolver(OutputRegistry outputRegistry) { LettuceAssert.notNull(outputRegistry, "OutputRegistry must not be null"); this.outputRegistry = outputRegistry; } @Override public CommandOutputFactory resolveCommandOutput(OutputSelector outputSelector) { Map<OutputType, CommandOutputFactory> registry = outputRegistry.getRegistry(); List<OutputType> outputTypes = registry.keySet().stream().filter((outputType) -> !outputType.isStreaming()) .collect(Collectors.toList()); List<OutputType> candidates = getCandidates(outputTypes, outputSelector); if (candidates.isEmpty()) { return null; } return registry.get(candidates.get(0)); } @Override public CommandOutputFactory resolveStreamingCommandOutput(OutputSelector outputSelector) { Map<OutputType, CommandOutputFactory> registry = outputRegistry.getRegistry(); List<OutputType> outputTypes = registry.keySet().stream().filter(OutputType::isStreaming).collect(Collectors.toList()); List<OutputType> candidates = getCandidates(outputTypes, outputSelector); if (candidates.isEmpty()) { return null; } return registry.get(candidates.get(0)); } private List<OutputType> getCandidates(Collection<OutputType> outputTypes, OutputSelector outputSelector) { return outputTypes.stream().filter(outputType -> { if (COMMAND_OUTPUT.getType().isAssignableFrom(outputSelector.getOutputType().getRawClass())) { if (outputSelector.getOutputType().getRawClass().isAssignableFrom(outputType.getCommandOutputClass())) { return true; } } return isAssignableFrom(outputSelector, outputType); }).collect(Collectors.toList()); } }
OutputRegistryCommandOutputFactoryResolver
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/idclassgeneratedvalue/MultiplePK.java
{ "start": 251, "end": 1368 }
class ____ implements Serializable { private final Long id1; private final Long id2; private final Long id3; // AnnotationSourceProcessor (incorrectly) requires this to be transient; see HHH-4819 and HHH-4820 private final transient int cachedHashCode; private MultiplePK() { id1 = null; id2 = null; id3 = null; cachedHashCode = super.hashCode(); } public MultiplePK(Long id1, Long id2, Long id3) { this.id1 = id1; this.id2 = id2; this.id3 = id3; this.cachedHashCode = calculateHashCode(); } private int calculateHashCode() { int result = id1.hashCode(); result = 31 * result + id2.hashCode(); return result; } public Long getId1() { return id1; } public Long getId2() { return id2; } public Long getId3() { return id3; } @Override public boolean equals(Object o) { if ( this == o ) { return true; } if ( o == null || getClass() != o.getClass() ) { return false; } MultiplePK multiplePK = (MultiplePK) o; return id1.equals( multiplePK.id1 ) && id2.equals( multiplePK.id2 ) && id3.equals( multiplePK.id3); } @Override public int hashCode() { return cachedHashCode; } }
MultiplePK
java
mybatis__mybatis-3
src/test/java/org/apache/ibatis/submitted/duplicate_resource_loaded/Mapper.java
{ "start": 767, "end": 836 }
interface ____ { List<Map<String, Object>> selectAllBlogs(); }
Mapper
java
quarkusio__quarkus
extensions/cache/deployment/src/test/java/io/quarkus/cache/test/deployment/DeploymentExceptionsTest.java
{ "start": 5590, "end": 5720 }
class ____ extends KeyGen2 { public KeyGen3(String arg) { super(arg); } } private static
KeyGen3
java
apache__camel
components/camel-clickup/src/main/java/org/apache/camel/component/clickup/model/WebhookCreationResult.java
{ "start": 1075, "end": 2021 }
class ____ implements Serializable { @Serial private static final long serialVersionUID = 0L; @JsonProperty("id") private String id; @JsonProperty("webhook") private Webhook webhook; @JsonProperty("err") private String error; @JsonProperty("ECODE") private String errorCode; public String getId() { return id; } public Webhook getWebhook() { return webhook; } public String getError() { return error; } public String getErrorCode() { return errorCode; } public boolean isError() { return this.error != null; } @Override public String toString() { return "WebhookCreationResult{" + "id='" + id + '\'' + ", webhook=" + webhook + ", error='" + error + '\'' + ", errorCode='" + errorCode + '\'' + '}'; } }
WebhookCreationResult
java
apache__kafka
clients/src/test/java/org/apache/kafka/common/protocol/types/StructTest.java
{ "start": 1018, "end": 3718 }
class ____ { private static final Schema FLAT_STRUCT_SCHEMA = new Schema( new Field("int8", Type.INT8, ""), new Field("int16", Type.INT16, ""), new Field("int32", Type.INT32, ""), new Field("int64", Type.INT64, ""), new Field("boolean", Type.BOOLEAN, ""), new Field("float64", Type.FLOAT64, ""), new Field("string", Type.STRING, "")); private static final Schema ARRAY_SCHEMA = new Schema(new Field("array", new ArrayOf(new ArrayOf(Type.INT8)), "")); private static final Schema NESTED_CHILD_SCHEMA = new Schema( new Field("int8", Type.INT8, "")); private static final Schema NESTED_SCHEMA = new Schema( new Field("array", new ArrayOf(ARRAY_SCHEMA), ""), new Field("nested", NESTED_CHILD_SCHEMA, "")); @Test public void testEquals() { Struct struct1 = new Struct(FLAT_STRUCT_SCHEMA) .set("int8", (byte) 12) .set("int16", (short) 12) .set("int32", 12) .set("int64", (long) 12) .set("boolean", true) .set("float64", 0.5) .set("string", "foobar"); Struct struct2 = new Struct(FLAT_STRUCT_SCHEMA) .set("int8", (byte) 12) .set("int16", (short) 12) .set("int32", 12) .set("int64", (long) 12) .set("boolean", true) .set("float64", 0.5) .set("string", "foobar"); Struct struct3 = new Struct(FLAT_STRUCT_SCHEMA) .set("int8", (byte) 12) .set("int16", (short) 12) .set("int32", 12) .set("int64", (long) 12) .set("boolean", true) .set("float64", 0.5) .set("string", "mismatching string"); assertEquals(struct1, struct2); assertNotEquals(struct1, struct3); Object[] array = {(byte) 1, (byte) 2}; struct1 = new Struct(NESTED_SCHEMA) .set("array", array) .set("nested", new Struct(NESTED_CHILD_SCHEMA).set("int8", (byte) 12)); Object[] array2 = {(byte) 1, (byte) 2}; struct2 = new Struct(NESTED_SCHEMA) .set("array", array2) .set("nested", new Struct(NESTED_CHILD_SCHEMA).set("int8", (byte) 12)); Object[] array3 = {(byte) 1, (byte) 2, (byte) 3}; struct3 = new Struct(NESTED_SCHEMA) .set("array", array3) .set("nested", new Struct(NESTED_CHILD_SCHEMA).set("int8", (byte) 13)); assertEquals(struct1, struct2); assertNotEquals(struct1, struct3); } }
StructTest
java
google__guava
android/guava-testlib/src/com/google/common/testing/ArbitraryInstances.java
{ "start": 19930, "end": 20424 }
class ____ extends ByteSink implements Serializable { private static final NullByteSink INSTANCE = new NullByteSink(); @Override public OutputStream openStream() { return ByteStreams.nullOutputStream(); } } // Compare by toString() to satisfy 2 properties: // 1. compareTo(null) should throw NullPointerException // 2. the order is deterministic and easy to understand, for debugging purpose. @SuppressWarnings("ComparableType") private static final
NullByteSink
java
apache__kafka
storage/src/test/java/org/apache/kafka/tiered/storage/utils/TieredStorageTestUtils.java
{ "start": 3320, "end": 12495 }
class ____ { // Log cleanup interval is configured to be 500 ms. We need to wait at least that amount of time before // segments eligible for deletion gets physically removed. public static final Integer STORAGE_WAIT_TIMEOUT_SEC = 5; // The default value of log cleanup interval is 30 secs, and it increases the test execution time. private static final Integer LOG_CLEANUP_INTERVAL_MS = 500; private static final Integer RLM_TASK_INTERVAL_MS = 500; private static final Integer RLMM_INIT_RETRY_INTERVAL_MS = 300; public static TopicDescription describeTopic(TieredStorageTestContext context, String topic) throws ExecutionException, InterruptedException { return describeTopics(context, List.of(topic)).get(topic); } public static Map<String, TopicDescription> describeTopics(TieredStorageTestContext context, List<String> topics) throws ExecutionException, InterruptedException { return context.admin() .describeTopics(topics) .allTopicNames() .get(); } /** * Get the records found in the local tiered storage. * Snapshot does not sort the filesets by base offset. * @param context The test context. * @param topicPartition The topic-partition of the records. * @return The records found in the local tiered storage. */ public static List<Record> tieredStorageRecords(TieredStorageTestContext context, TopicPartition topicPartition) { return context.takeTieredStorageSnapshot() .getFilesets(topicPartition) .stream() .map(fileset -> { try { return fileset.getRecords(); } catch (IOException e) { throw new RuntimeException(e); } }) .sorted(Comparator.comparingLong(records -> records.get(0).offset())) .flatMap(Collection::stream) .toList(); } @SuppressWarnings("removal") public static Properties createPropsForRemoteStorage(String testClassName, String storageDirPath, int brokerCount, int numRemoteLogMetadataPartitions, Properties overridingProps) { Assertions.assertTrue(STORAGE_WAIT_TIMEOUT_SEC > TimeUnit.MILLISECONDS.toSeconds(RLM_TASK_INTERVAL_MS), "STORAGE_WAIT_TIMEOUT_SEC should be greater than RLM_TASK_INTERVAL_MS"); // Configure the tiered storage in Kafka. Set an interval of 1 second for the remote log manager background // activity to ensure the tiered storage has enough room to be exercised within the lifetime of a test. // // The replication factor of the remote log metadata topic needs to be chosen so that in resiliency // tests, metadata can survive the loss of one replica for its topic-partitions. // // The second-tier storage system is mocked via the LocalTieredStorage instance which persists transferred // data files on the local file system. overridingProps.put(ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG, "true"); overridingProps.put(ServerConfigs.UNSTABLE_FEATURE_VERSIONS_ENABLE_CONFIG, "true"); overridingProps.setProperty(REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); overridingProps.setProperty(REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP, LocalTieredStorage.class.getName()); overridingProps.setProperty(REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, TopicBasedRemoteLogMetadataManager.class.getName()); overridingProps.setProperty(REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, RLM_TASK_INTERVAL_MS.toString()); overridingProps.setProperty(REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP, "PLAINTEXT"); overridingProps.setProperty(REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP, storageConfigPrefix(testClassName, "")); overridingProps.setProperty(REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP, metadataConfigPrefix(testClassName, "")); overridingProps.setProperty( metadataConfigPrefix(testClassName, TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP), String.valueOf(numRemoteLogMetadataPartitions)); overridingProps.setProperty( metadataConfigPrefix(testClassName, TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR_PROP), String.valueOf(brokerCount)); // The below two configurations ensures inactive log segments are deleted fast enough so that // the integration tests can confirm a given log segment is present only in the second-tier storage. // Note that this does not impact the eligibility of a log segment to be offloaded to the // second-tier storage. overridingProps.setProperty(LOG_INITIAL_TASK_DELAY_MS_CONFIG, LOG_CLEANUP_INTERVAL_MS.toString()); overridingProps.setProperty(LOG_CLEANUP_INTERVAL_MS_CONFIG, LOG_CLEANUP_INTERVAL_MS.toString()); // The directory of the second-tier storage needs to be constant across all instances of storage managers // in every broker and throughout the test. Indeed, as brokers are restarted during the test. // You can override this property with a fixed path of your choice if you wish to use a non-temporary // directory to access its content after a test terminated. overridingProps.setProperty(storageConfigPrefix(testClassName, STORAGE_DIR_CONFIG), storageDirPath); // This configuration will remove all the remote files when close is called in remote storage manager. // Storage manager close is being called while the server is actively processing the socket requests, // so enabling this config can break the existing tests. // NOTE: When using TestUtils#tempDir(), the folder gets deleted when VM terminates. overridingProps.setProperty(storageConfigPrefix(testClassName, DELETE_ON_CLOSE_CONFIG), "false"); // Set a small number of retry interval for retrying RemoteLogMetadataManager resources initialization to speed up the test overridingProps.setProperty(metadataConfigPrefix(testClassName, REMOTE_LOG_METADATA_INITIALIZATION_RETRY_INTERVAL_MS_PROP), RLMM_INIT_RETRY_INTERVAL_MS.toString()); // Set 2 log dirs to make sure JBOD feature is working correctly overridingProps.setProperty(ServerLogConfigs.LOG_DIRS_CONFIG, TestUtils.tempDir().getAbsolutePath() + "," + TestUtils.tempDir().getAbsolutePath()); // Disable unnecessary log cleaner overridingProps.setProperty(CleanerConfig.LOG_CLEANER_ENABLE_PROP, "false"); return overridingProps; } public static Map<String, String> createTopicConfigForRemoteStorage(boolean enableRemoteStorage, int maxRecordBatchPerSegment) { Map<String, String> topicProps = new HashMap<>(); // Enables remote log storage for this topic. topicProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, String.valueOf(enableRemoteStorage)); // Ensure offset and time indexes are generated for every record. topicProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "1"); // Leverage the use of the segment index size to create a log-segment accepting one and only one record. // The minimum size of the indexes is that of an entry, which is 8 for the offset index and 12 for the // time index. Hence, since the topic is configured to generate index entries for every record with, for // a "small" number of records (i.e. such that the average record size times the number of records is // much less than the segment size), the number of records which hold in a segment is the multiple of 12 // defined below. topicProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, String.valueOf(12 * maxRecordBatchPerSegment)); // To verify records physically absent from Kafka's storage can be consumed via the second tier storage, we // want to delete log segments as soon as possible. When tiered storage is active, an inactive log // segment is not eligible for deletion until it has been offloaded, which guarantees all segments // should be offloaded before deletion, and their consumption is possible thereafter. topicProps.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "1"); return topicProps; } private static String storageConfigPrefix(String testClassName, String key) { return "rsm.config." + testClassName + "." + key; } private static String metadataConfigPrefix(String testClassName, String key) { return "rlmm.config." + testClassName + "." + key; } }
TieredStorageTestUtils
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/api/atomic/longadder/LongAdderAssert_isBetween_Longs_Test.java
{ "start": 922, "end": 1271 }
class ____ extends LongAdderAssertBaseTest { @Override protected LongAdderAssert invoke_api_method() { return assertions.isBetween(6L, 8L); } @Override protected void verify_internal_effects() { verify(longs).assertIsBetween(getInfo(assertions), getActual(assertions).longValue(), 6L, 8L); } }
LongAdderAssert_isBetween_Longs_Test
java
quarkusio__quarkus
extensions/grpc/deployment/src/test/java/io/quarkus/grpc/server/scaling/SingleGrpcVerticleTest.java
{ "start": 501, "end": 1161 }
class ____ extends ScalingTestBase { @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer( () -> ShrinkWrap.create(JavaArchive.class) .addPackage(GreeterGrpc.class.getPackage()) .addClass(ThreadReturningGreeterService.class)) .withConfigurationResource("single-instance-config.properties"); @Test public void shouldUseMultipleThreads() throws InterruptedException, TimeoutException, ExecutionException { Set<String> threads = getThreadsUsedFor100Requests(); assertThat(threads).hasSize(1); } }
SingleGrpcVerticleTest
java
apache__flink
flink-core/src/test/java/org/apache/flink/api/java/typeutils/ListTypeInfoTest.java
{ "start": 1021, "end": 1391 }
class ____ extends TypeInformationTestBase<ListTypeInfo<?>> { @Override protected ListTypeInfo<?>[] getTestData() { return new ListTypeInfo<?>[] { new ListTypeInfo<>(BasicTypeInfo.STRING_TYPE_INFO), new ListTypeInfo<>(BasicTypeInfo.BOOLEAN_TYPE_INFO), new ListTypeInfo<>(Object.class), }; } }
ListTypeInfoTest
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/iterative/concurrent/BlockingBackChannel.java
{ "start": 1291, "end": 2502 }
class ____ { /** Buffer to send back the superstep results. */ private final SerializedUpdateBuffer buffer; /** A one element queue used for blocking hand over of the buffer. */ private final BlockingQueue<SerializedUpdateBuffer> queue; public BlockingBackChannel(SerializedUpdateBuffer buffer) { this.buffer = buffer; queue = new ArrayBlockingQueue<SerializedUpdateBuffer>(1); } /** * Called by iteration head after it has sent all input for the current superstep through the * data channel (blocks iteration head). */ public DataInputView getReadEndAfterSuperstepEnded() { try { return queue.take().switchBuffers(); } catch (InterruptedException | IOException e) { throw new RuntimeException(e); } } /** Called by iteration tail to save the output of the current superstep. */ public DataOutputView getWriteEnd() { return buffer; } /** * Called by iteration tail to signal that all input of a superstep has been processed (unblocks * iteration head). */ public void notifyOfEndOfSuperstep() { queue.offer(buffer); } }
BlockingBackChannel
java
spring-projects__spring-boot
module/spring-boot-micrometer-metrics-test/src/test/java/org/springframework/boot/micrometer/metrics/test/autoconfigure/MetricsContextCustomizerFactoryTests.java
{ "start": 6548, "end": 6629 }
class ____ { } @AutoConfigureMetrics(export = false) static
MetricsExportDefault
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/impl/SimpleRegistryWrapTest.java
{ "start": 1052, "end": 2032 }
class ____ { private final SimpleRegistry registry = new SimpleRegistry() { @Override public Object wrap(Object value) { return "wrap" + value; } @Override public Object unwrap(Object value) { return "unwrap" + value; } }; @BeforeEach public void setUp() { registry.bind("a", "123"); registry.bind("b", "456"); } @Test public void testLookupByName() { assertEquals("unwrapwrap123", registry.lookupByName("a")); } @Test public void testLookupByNameAndType() { assertEquals("unwrapwrap456", registry.lookupByNameAndType("b", String.class)); } @Test public void testLookupByType() { Map<?, ?> map = registry.findByTypeWithName(String.class); assertEquals(2, map.size()); assertEquals("unwrapwrap123", map.get("a")); assertEquals("unwrapwrap456", map.get("b")); } }
SimpleRegistryWrapTest
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/internal/floatarrays/FloatArrays_assertHasSizeLessThan_Test.java
{ "start": 1037, "end": 1828 }
class ____ extends FloatArraysBaseTest { @Test void should_fail_if_actual_is_null() { assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertHasSizeLessThan(someInfo(), null, 6)) .withMessage(actualIsNull()); } @Test void should_fail_if_size_of_actual_is_not_less_than_boundary() { assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertHasSizeLessThan(someInfo(), actual, 1)) .withMessage(shouldHaveSizeLessThan(actual, actual.length, 1).create()); } @Test void should_pass_if_size_of_actual_is_less_than_boundary() { arrays.assertHasSizeLessThan(someInfo(), actual, 4); } }
FloatArrays_assertHasSizeLessThan_Test
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
{ "start": 2164, "end": 16995 }
class ____ extends ContainerLaunchContext { ContainerLaunchContextProto proto = ContainerLaunchContextProto.getDefaultInstance(); ContainerLaunchContextProto.Builder builder = null; boolean viaProto = false; private Map<String, LocalResource> localResources = null; private ByteBuffer tokens = null; private ByteBuffer tokensConf = null; private Map<String, ByteBuffer> serviceData = null; private Map<String, String> environment = null; private List<String> commands = null; private Map<ApplicationAccessType, String> applicationACLS = null; private ContainerRetryContext containerRetryContext = null; public ContainerLaunchContextPBImpl() { builder = ContainerLaunchContextProto.newBuilder(); } public ContainerLaunchContextPBImpl(ContainerLaunchContextProto proto) { this.proto = proto; viaProto = true; } public ContainerLaunchContextProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } @Override public int hashCode() { return getProto().hashCode(); } @Override public boolean equals(Object other) { if (other == null) return false; if (other.getClass().isAssignableFrom(this.getClass())) { return this.getProto().equals(this.getClass().cast(other).getProto()); } return false; } @Override public String toString() { return TextFormat.shortDebugString(getProto()); } protected final ByteBuffer convertFromProtoFormat(ByteString byteString) { return ProtoUtils.convertFromProtoFormat(byteString); } protected final ByteString convertToProtoFormat(ByteBuffer byteBuffer) { return ProtoUtils.convertToProtoFormat(byteBuffer); } private void mergeLocalToBuilder() { if (this.localResources != null) { addLocalResourcesToProto(); } if (this.tokens != null) { builder.setTokens(convertToProtoFormat(this.tokens)); } if (this.tokensConf != null) { builder.setTokensConf(convertToProtoFormat(this.tokensConf)); } if (this.serviceData != null) { addServiceDataToProto(); } if (this.environment != null) { addEnvToProto(); } if (this.commands != null) { addCommandsToProto(); } if (this.applicationACLS != null) { addApplicationACLs(); } if (this.containerRetryContext != null) { builder.setContainerRetryContext( convertToProtoFormat(this.containerRetryContext)); } } private void mergeLocalToProto() { if (viaProto) maybeInitBuilder(); mergeLocalToBuilder(); proto = builder.build(); viaProto = true; } private void maybeInitBuilder() { if (viaProto || builder == null) { builder = ContainerLaunchContextProto.newBuilder(proto); } viaProto = false; } @Override public List<String> getCommands() { initCommands(); return this.commands; } private void initCommands() { if (this.commands != null) { return; } ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; List<String> list = p.getCommandList(); this.commands = new ArrayList<String>(); for (String c : list) { this.commands.add(c); } } @Override public void setCommands(final List<String> commands) { if (commands == null) return; initCommands(); this.commands.clear(); this.commands.addAll(commands); } private void addCommandsToProto() { maybeInitBuilder(); builder.clearCommand(); if (this.commands == null) return; builder.addAllCommand(this.commands); } @Override public Map<String, LocalResource> getLocalResources() { initLocalResources(); return this.localResources; } private void initLocalResources() { if (this.localResources != null) { return; } ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; List<StringLocalResourceMapProto> list = p.getLocalResourcesList(); this.localResources = new HashMap<String, LocalResource>(); for (StringLocalResourceMapProto c : list) { this.localResources.put(c.getKey(), convertFromProtoFormat(c.getValue())); } } @Override public void setLocalResources( final Map<String, LocalResource> localResources) { if (localResources == null) return; checkLocalResources(localResources); initLocalResources(); this.localResources.clear(); this.localResources.putAll(localResources); } private void checkLocalResources(Map<String, LocalResource> localResources) { for (Map.Entry<String, LocalResource> rsrcEntry : localResources .entrySet()) { if (rsrcEntry.getValue() == null || rsrcEntry.getValue().getResource() == null) { throw new NullPointerException( "Null resource URL for local resource " + rsrcEntry.getKey() + " : " + rsrcEntry.getValue()); } else if (rsrcEntry.getValue().getType() == null) { throw new NullPointerException( "Null resource type for local resource " + rsrcEntry.getKey() + " : " + rsrcEntry.getValue()); } else if (rsrcEntry.getValue().getVisibility() == null) { throw new NullPointerException( "Null resource visibility for local resource " + rsrcEntry.getKey() + " : " + rsrcEntry.getValue()); } } } private void addLocalResourcesToProto() { maybeInitBuilder(); builder.clearLocalResources(); if (localResources == null) return; Iterable<StringLocalResourceMapProto> iterable = new Iterable<StringLocalResourceMapProto>() { @Override public Iterator<StringLocalResourceMapProto> iterator() { return new Iterator<StringLocalResourceMapProto>() { Iterator<String> keyIter = localResources.keySet().iterator(); @Override public void remove() { throw new UnsupportedOperationException(); } @Override public StringLocalResourceMapProto next() { String key = keyIter.next(); return StringLocalResourceMapProto.newBuilder().setKey(key). setValue(convertToProtoFormat(localResources.get(key))).build(); } @Override public boolean hasNext() { return keyIter.hasNext(); } }; } }; builder.addAllLocalResources(iterable); } @Override public ByteBuffer getTokens() { ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; if (this.tokens != null) { return this.tokens; } if (!p.hasTokens()) { return null; } this.tokens = convertFromProtoFormat(p.getTokens()); return this.tokens; } @Override public void setTokens(ByteBuffer tokens) { maybeInitBuilder(); if (tokens == null) { builder.clearTokens(); } this.tokens = tokens; } @Override public ByteBuffer getTokensConf() { ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; if (this.tokensConf != null) { return this.tokensConf; } if (!p.hasTokensConf()) { return null; } this.tokensConf = convertFromProtoFormat(p.getTokensConf()); return this.tokensConf; } @Override public void setTokensConf(ByteBuffer tokensConf) { maybeInitBuilder(); if (tokensConf == null) { builder.clearTokensConf(); } this.tokensConf = tokensConf; } @Override public Map<String, ByteBuffer> getServiceData() { initServiceData(); return this.serviceData; } private void initServiceData() { if (this.serviceData != null) { return; } ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; List<StringBytesMapProto> list = p.getServiceDataList(); this.serviceData = new HashMap<String, ByteBuffer>(); for (StringBytesMapProto c : list) { this.serviceData.put(c.getKey(), convertFromProtoFormat(c.getValue())); } } @Override public void setServiceData(final Map<String, ByteBuffer> serviceData) { if (serviceData == null) return; initServiceData(); this.serviceData.putAll(serviceData); } private void addServiceDataToProto() { maybeInitBuilder(); builder.clearServiceData(); if (serviceData == null) return; Iterable<StringBytesMapProto> iterable = new Iterable<StringBytesMapProto>() { @Override public Iterator<StringBytesMapProto> iterator() { return new Iterator<StringBytesMapProto>() { Iterator<String> keyIter = serviceData.keySet().iterator(); @Override public void remove() { throw new UnsupportedOperationException(); } @Override public StringBytesMapProto next() { String key = keyIter.next(); return StringBytesMapProto.newBuilder().setKey(key).setValue( convertToProtoFormat(serviceData.get(key))).build(); } @Override public boolean hasNext() { return keyIter.hasNext(); } }; } }; builder.addAllServiceData(iterable); } @Override public Map<String, String> getEnvironment() { initEnv(); return this.environment; } private void initEnv() { if (this.environment != null) { return; } ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; List<StringStringMapProto> list = p.getEnvironmentList(); this.environment = new HashMap<String, String>(); for (StringStringMapProto c : list) { this.environment.put(StringInterner.weakIntern(c.getKey()), StringInterner.weakIntern(c.getValue())); } } @Override public void setEnvironment(final Map<String, String> env) { if (env == null) return; initEnv(); this.environment.clear(); for (Map.Entry<String, String> e : env.entrySet()) { this.environment.put(StringInterner.weakIntern(e.getKey()), StringInterner.weakIntern(e.getValue())); } } private void addEnvToProto() { maybeInitBuilder(); builder.clearEnvironment(); if (environment == null) return; Iterable<StringStringMapProto> iterable = new Iterable<StringStringMapProto>() { @Override public Iterator<StringStringMapProto> iterator() { return new Iterator<StringStringMapProto>() { Iterator<String> keyIter = environment.keySet().iterator(); @Override public void remove() { throw new UnsupportedOperationException(); } @Override public StringStringMapProto next() { String key = keyIter.next(); String value = environment.get(key); if (value == null) { value = ""; } return StringStringMapProto.newBuilder().setKey(key) .setValue((value)).build(); } @Override public boolean hasNext() { return keyIter.hasNext(); } }; } }; builder.addAllEnvironment(iterable); } @Override public Map<ApplicationAccessType, String> getApplicationACLs() { initApplicationACLs(); return this.applicationACLS; } private void initApplicationACLs() { if (this.applicationACLS != null) { return; } ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; List<ApplicationACLMapProto> list = p.getApplicationACLsList(); this.applicationACLS = new HashMap<ApplicationAccessType, String>(list .size()); for (ApplicationACLMapProto aclProto : list) { this.applicationACLS.put(ProtoUtils.convertFromProtoFormat(aclProto .getAccessType()), StringInterner.weakIntern(aclProto.getAcl())); } } private void addApplicationACLs() { maybeInitBuilder(); builder.clearApplicationACLs(); if (applicationACLS == null) { return; } Iterable<? extends ApplicationACLMapProto> values = new Iterable<ApplicationACLMapProto>() { @Override public Iterator<ApplicationACLMapProto> iterator() { return new Iterator<ApplicationACLMapProto>() { Iterator<ApplicationAccessType> aclsIterator = applicationACLS .keySet().iterator(); @Override public boolean hasNext() { return aclsIterator.hasNext(); } @Override public ApplicationACLMapProto next() { ApplicationAccessType key = aclsIterator.next(); return ApplicationACLMapProto.newBuilder().setAcl( applicationACLS.get(key)).setAccessType( ProtoUtils.convertToProtoFormat(key)).build(); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } }; this.builder.addAllApplicationACLs(values); } @Override public void setApplicationACLs( final Map<ApplicationAccessType, String> appACLs) { if (appACLs == null) return; initApplicationACLs(); this.applicationACLS.clear(); for (Map.Entry<ApplicationAccessType, String> e : appACLs.entrySet()) { this.applicationACLS.put(e.getKey(), StringInterner.weakIntern(e.getValue())); } } public ContainerRetryContext getContainerRetryContext() { ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; if (this.containerRetryContext != null) { return this.containerRetryContext; } if (!p.hasContainerRetryContext()) { return null; } this.containerRetryContext = convertFromProtoFormat( p.getContainerRetryContext()); return this.containerRetryContext; } public void setContainerRetryContext(ContainerRetryContext retryContext) { maybeInitBuilder(); if (retryContext == null) { builder.clearContainerRetryContext(); } this.containerRetryContext = retryContext; } private LocalResourcePBImpl convertFromProtoFormat(LocalResourceProto p) { return new LocalResourcePBImpl(p); } private LocalResourceProto convertToProtoFormat(LocalResource t) { return ((LocalResourcePBImpl)t).getProto(); } private ContainerRetryContextPBImpl convertFromProtoFormat( ContainerRetryContextProto p) { return new ContainerRetryContextPBImpl(p); } private ContainerRetryContextProto convertToProtoFormat( ContainerRetryContext t) { return ((ContainerRetryContextPBImpl)t).getProto(); } }
ContainerLaunchContextPBImpl
java
quarkusio__quarkus
extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/inheritance/classdenyall/ClassDenyAllBaseResourceWithoutPathExtParentRes_SecurityOnParent.java
{ "start": 76, "end": 220 }
class ____ extends ClassDenyAllParentResourceWithPath_SecurityOnParent { }
ClassDenyAllBaseResourceWithoutPathExtParentRes_SecurityOnParent
java
google__guice
core/src/com/google/inject/util/Modules.java
{ "start": 6716, "end": 7399 }
class ____ implements OverriddenModuleBuilder { private final ImmutableSet<Module> baseModules; // TODO(diamondm) checkArgument(!baseModules.isEmpty())? private RealOverriddenModuleBuilder(Iterable<? extends Module> baseModules) { this.baseModules = ImmutableSet.copyOf(baseModules); } @Override public Module with(Module... overrides) { return with(Arrays.asList(overrides)); } @Override public Module with() { return with(Arrays.asList()); } @Override public Module with(Iterable<? extends Module> overrides) { return new OverrideModule(overrides, baseModules); } } static
RealOverriddenModuleBuilder
java
lettuce-io__lettuce-core
src/main/java/io/lettuce/core/cluster/RedisClusterClient.java
{ "start": 7763, "end": 57793 }
class ____ extends AbstractRedisClient { private static final InternalLogger logger = InternalLoggerFactory.getInstance(RedisClusterClient.class); private final ClusterTopologyRefresh refresh; private final ClusterTopologyRefreshScheduler topologyRefreshScheduler = new ClusterTopologyRefreshScheduler( this::getClusterClientOptions, this::getPartitions, this::refreshPartitionsAsync, getResources()); private final Iterable<RedisURI> initialUris; private volatile Partitions partitions; /** * Non-private constructor to make {@link RedisClusterClient} proxyable. */ protected RedisClusterClient() { super(null); this.initialUris = Collections.emptyList(); this.refresh = createTopologyRefresh(); } /** * Initialize the client with a list of cluster URI's. All uris are tried in sequence for connecting initially to the * cluster. If any uri is successful for connection, the others are not tried anymore. The initial uri is needed to discover * the cluster structure for distributing the requests. * * @param clientResources the client resources. If {@code null}, the client will create a new dedicated instance of client * resources and keep track of them. * @param redisURIs iterable of initial {@link RedisURI cluster URIs}. Must not be {@code null} and not empty. */ protected RedisClusterClient(ClientResources clientResources, Iterable<RedisURI> redisURIs) { super(clientResources); assertNotEmpty(redisURIs); assertSameOptions(redisURIs); this.initialUris = Collections.unmodifiableList(LettuceLists.newList(redisURIs)); this.refresh = createTopologyRefresh(); setOptions(ClusterClientOptions.create()); } private static void assertSameOptions(Iterable<RedisURI> redisURIs) { Boolean ssl = null; Boolean startTls = null; Boolean verifyPeer = null; for (RedisURI redisURI : redisURIs) { if (ssl == null) { ssl = redisURI.isSsl(); } if (startTls == null) { startTls = redisURI.isStartTls(); } if (verifyPeer == null) { verifyPeer = redisURI.isVerifyPeer(); } if (ssl.booleanValue() != redisURI.isSsl()) { throw new IllegalArgumentException( "RedisURI " + redisURI + " SSL is not consistent with the other seed URI SSL settings"); } if (startTls.booleanValue() != redisURI.isStartTls()) { throw new IllegalArgumentException( "RedisURI " + redisURI + " StartTLS is not consistent with the other seed URI StartTLS settings"); } if (verifyPeer.booleanValue() != redisURI.isVerifyPeer()) { throw new IllegalArgumentException( "RedisURI " + redisURI + " VerifyPeer is not consistent with the other seed URI VerifyPeer settings"); } } } /** * Create a new client that connects to the supplied {@link RedisURI uri} with default {@link ClientResources}. You can * connect to different Redis servers but you must supply a {@link RedisURI} on connecting. * * @param redisURI the Redis URI, must not be {@code null} * @return a new instance of {@link RedisClusterClient} */ public static RedisClusterClient create(RedisURI redisURI) { assertNotNull(redisURI); return create(Collections.singleton(redisURI)); } /** * Create a new client that connects to the supplied {@link RedisURI uri} with default {@link ClientResources}. You can * connect to different Redis servers but you must supply a {@link RedisURI} on connecting. * * @param redisURIs one or more Redis URI, must not be {@code null} and not empty. * @return a new instance of {@link RedisClusterClient} */ public static RedisClusterClient create(Iterable<RedisURI> redisURIs) { assertNotEmpty(redisURIs); assertSameOptions(redisURIs); return new RedisClusterClient(null, redisURIs); } /** * Create a new client that connects to the supplied uri with default {@link ClientResources}. You can connect to different * Redis servers but you must supply a {@link RedisURI} on connecting. * * @param uri the Redis URI, must not be empty or {@code null}. * @return a new instance of {@link RedisClusterClient} */ public static RedisClusterClient create(String uri) { LettuceAssert.notEmpty(uri, "URI must not be empty"); return create(RedisClusterURIUtil.toRedisURIs(URI.create(uri))); } /** * Create a new client that connects to the supplied {@link RedisURI uri} with shared {@link ClientResources}. You need to * shut down the {@link ClientResources} upon shutting down your application.You can connect to different Redis servers but * you must supply a {@link RedisURI} on connecting. * * @param clientResources the client resources, must not be {@code null} * @param redisURI the Redis URI, must not be {@code null} * @return a new instance of {@link RedisClusterClient} */ public static RedisClusterClient create(ClientResources clientResources, RedisURI redisURI) { assertNotNull(clientResources); assertNotNull(redisURI); return create(clientResources, Collections.singleton(redisURI)); } /** * Create a new client that connects to the supplied uri with shared {@link ClientResources}.You need to shut down the * {@link ClientResources} upon shutting down your application. You can connect to different Redis servers but you must * supply a {@link RedisURI} on connecting. * * @param clientResources the client resources, must not be {@code null} * @param uri the Redis URI, must not be empty or {@code null}. * @return a new instance of {@link RedisClusterClient} */ public static RedisClusterClient create(ClientResources clientResources, String uri) { assertNotNull(clientResources); LettuceAssert.notEmpty(uri, "URI must not be empty"); return create(clientResources, RedisClusterURIUtil.toRedisURIs(URI.create(uri))); } /** * Create a new client that connects to the supplied {@link RedisURI uri} with shared {@link ClientResources}. You need to * shut down the {@link ClientResources} upon shutting down your application.You can connect to different Redis servers but * you must supply a {@link RedisURI} on connecting. * * @param clientResources the client resources, must not be {@code null} * @param redisURIs one or more Redis URI, must not be {@code null} and not empty * @return a new instance of {@link RedisClusterClient} */ public static RedisClusterClient create(ClientResources clientResources, Iterable<RedisURI> redisURIs) { assertNotNull(clientResources); assertNotEmpty(redisURIs); assertSameOptions(redisURIs); return new RedisClusterClient(clientResources, redisURIs); } /** * Set the {@link ClusterClientOptions} for the client. * * @param clientOptions client options for the client and connections that are created after setting the options */ public void setOptions(ClusterClientOptions clientOptions) { super.setOptions(clientOptions); } /** * Retrieve the cluster view. Partitions are shared amongst all connections opened by this client instance. * * @return the partitions. */ public Partitions getPartitions() { if (partitions == null) { get(initializePartitions(), e -> new RedisException("Cannot obtain initial Redis Cluster topology", e)); } return partitions; } /** * Returns the seed {@link RedisURI} for the topology refreshing. This method is called before each topology refresh to * provide an {@link Iterable} of {@link RedisURI} that is used to perform the next topology refresh. * <p> * Subclasses of {@link RedisClusterClient} may override that method. * * @return {@link Iterable} of {@link RedisURI} for the next topology refresh. */ protected Iterable<RedisURI> getTopologyRefreshSource() { boolean initialSeedNodes = !useDynamicRefreshSources(); Iterable<RedisURI> seed; if (initialSeedNodes || partitions == null || partitions.isEmpty()) { seed = this.initialUris; } else { List<RedisURI> uris = new ArrayList<>(); for (RedisClusterNode partition : TopologyComparators.sortByUri(partitions)) { uris.add(partition.getUri()); } seed = uris; } return seed; } /** * Connect to a Redis Cluster and treat keys and values as UTF-8 strings. * <p> * What to expect from this connection: * </p> * <ul> * <li>A <i>default</i> connection is created to the node with the lowest latency</li> * <li>Keyless commands are send to the default connection</li> * <li>Single-key keyspace commands are routed to the appropriate node</li> * <li>Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node</li> * <li>Pub/sub commands are sent to the node that handles the slot derived from the pub/sub channel</li> * </ul> * * @return A new stateful Redis Cluster connection */ public StatefulRedisClusterConnection<String, String> connect() { return connect(newStringStringCodec()); } /** * Connect to a Redis Cluster. Use the supplied {@link RedisCodec codec} to encode/decode keys and values. * <p> * What to expect from this connection: * </p> * <ul> * <li>A <i>default</i> connection is created to the node with the lowest latency</li> * <li>Keyless commands are send to the default connection</li> * <li>Single-key keyspace commands are routed to the appropriate node</li> * <li>Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node</li> * <li>Pub/sub commands are sent to the node that handles the slot derived from the pub/sub channel</li> * </ul> * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param <K> Key type * @param <V> Value type * @return A new stateful Redis Cluster connection */ public <K, V> StatefulRedisClusterConnection<K, V> connect(RedisCodec<K, V> codec) { assertInitialPartitions(); return getConnection(connectClusterAsync(codec)); } /** * Connect asynchronously to a Redis Cluster. Use the supplied {@link RedisCodec codec} to encode/decode keys and values. * Connecting asynchronously requires an initialized topology. Call {@link #getPartitions()} first, otherwise the connect * will fail with a{@link IllegalStateException}. * <p> * What to expect from this connection: * </p> * <ul> * <li>A <i>default</i> connection is created to the node with the lowest latency</li> * <li>Keyless commands are send to the default connection</li> * <li>Single-key keyspace commands are routed to the appropriate node</li> * <li>Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node</li> * <li>Pub/sub commands are sent to the node that handles the slot derived from the pub/sub channel</li> * </ul> * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param <K> Key type * @param <V> Value type * @return a {@link CompletableFuture} that is notified with the connection progress. * @since 5.1 */ public <K, V> CompletableFuture<StatefulRedisClusterConnection<K, V>> connectAsync(RedisCodec<K, V> codec) { return transformAsyncConnectionException(connectClusterAsync(codec), getInitialUris()); } /** * Connect to a Redis Cluster using pub/sub connections and treat keys and values as UTF-8 strings. * <p> * What to expect from this connection: * </p> * <ul> * <li>A <i>default</i> connection is created to the node with the least number of clients</li> * <li>Pub/sub commands are sent to the node with the least number of clients</li> * <li>Keyless commands are send to the default connection</li> * <li>Single-key keyspace commands are routed to the appropriate node</li> * <li>Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node</li> * </ul> * * @return A new stateful Redis Cluster connection */ public StatefulRedisClusterPubSubConnection<String, String> connectPubSub() { return connectPubSub(newStringStringCodec()); } /** * Connect to a Redis Cluster using pub/sub connections. Use the supplied {@link RedisCodec codec} to encode/decode keys and * values. * <p> * What to expect from this connection: * </p> * <ul> * <li>A <i>default</i> connection is created to the node with the least number of clients</li> * <li>Pub/sub commands are sent to the node with the least number of clients</li> * <li>Keyless commands are send to the default connection</li> * <li>Single-key keyspace commands are routed to the appropriate node</li> * <li>Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node</li> * </ul> * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param <K> Key type * @param <V> Value type * @return A new stateful Redis Cluster connection */ public <K, V> StatefulRedisClusterPubSubConnection<K, V> connectPubSub(RedisCodec<K, V> codec) { assertInitialPartitions(); return getConnection(connectClusterPubSubAsync(codec)); } /** * Connect asynchronously to a Redis Cluster using pub/sub connections. Use the supplied {@link RedisCodec codec} to * encode/decode keys and values. Connecting asynchronously requires an initialized topology. Call {@link #getPartitions()} * first, otherwise the connect will fail with a{@link IllegalStateException}. * <p> * What to expect from this connection: * </p> * <ul> * <li>A <i>default</i> connection is created to the node with the least number of clients</li> * <li>Pub/sub commands are sent to the node with the least number of clients</li> * <li>Keyless commands are send to the default connection</li> * <li>Single-key keyspace commands are routed to the appropriate node</li> * <li>Multi-key keyspace commands require the same slot-hash and are routed to the appropriate node</li> * </ul> * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param <K> Key type * @param <V> Value type * @return a {@link CompletableFuture} that is notified with the connection progress. * @since 5.1 */ public <K, V> CompletableFuture<StatefulRedisClusterPubSubConnection<K, V>> connectPubSubAsync(RedisCodec<K, V> codec) { return transformAsyncConnectionException(connectClusterPubSubAsync(codec), getInitialUris()); } StatefulRedisConnection<String, String> connectToNode(SocketAddress socketAddress) { return connectToNode(newStringStringCodec(), socketAddress.toString(), null, Mono.just(socketAddress)); } /** * Create a connection to a redis socket address. * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param nodeId the nodeId * @param clusterWriter global cluster writer * @param socketAddressSupplier supplier for the socket address * @param <K> Key type * @param <V> Value type * @return A new connection */ <K, V> StatefulRedisConnection<K, V> connectToNode(RedisCodec<K, V> codec, String nodeId, RedisChannelWriter clusterWriter, Mono<SocketAddress> socketAddressSupplier) { return getConnection(connectToNodeAsync(codec, nodeId, clusterWriter, socketAddressSupplier)); } /** * Create a connection to a redis socket address. * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param nodeId the nodeId * @param clusterWriter global cluster writer * @param socketAddressSupplier supplier for the socket address * @param <K> Key type * @param <V> Value type * @return A new connection */ <K, V> ConnectionFuture<StatefulRedisConnection<K, V>> connectToNodeAsync(RedisCodec<K, V> codec, String nodeId, RedisChannelWriter clusterWriter, Mono<SocketAddress> socketAddressSupplier) { assertNotNull(codec); assertNotEmpty(initialUris); LettuceAssert.notNull(socketAddressSupplier, "SocketAddressSupplier must not be null"); ClusterNodeEndpoint endpoint = new ClusterNodeEndpoint(getClusterClientOptions(), getResources(), clusterWriter); RedisChannelWriter writer = endpoint; if (CommandExpiryWriter.isSupported(getClusterClientOptions())) { writer = CommandExpiryWriter.buildCommandExpiryWriter(writer, getClusterClientOptions(), getResources()); } if (CommandListenerWriter.isSupported(getCommandListeners())) { writer = new CommandListenerWriter(writer, getCommandListeners()); } StatefulRedisConnectionImpl<K, V> connection = newStatefulRedisConnection(writer, endpoint, codec, getFirstUri().getTimeout(), getClusterClientOptions().getJsonParser()); connection.setAuthenticationHandler( createHandler(connection, getFirstUri().getCredentialsProvider(), false, getOptions())); ConnectionFuture<StatefulRedisConnection<K, V>> connectionFuture = connectStatefulAsync(connection, endpoint, getFirstUri(), socketAddressSupplier, () -> new CommandHandler(getClusterClientOptions(), getResources(), endpoint)); return connectionFuture.whenComplete((conn, throwable) -> { if (throwable != null) { connection.closeAsync(); } }); } /** * Create a new instance of {@link StatefulRedisConnectionImpl} or a subclass. * <p> * Subclasses of {@link RedisClusterClient} may override that method. * * @param channelWriter the channel writer * @param pushHandler the handler for push notifications * @param codec codec * @param timeout default timeout * @param parser the JSON parser to be used * @param <K> Key-Type * @param <V> Value Type * @return new instance of StatefulRedisConnectionImpl */ protected <K, V> StatefulRedisConnectionImpl<K, V> newStatefulRedisConnection(RedisChannelWriter channelWriter, PushHandler pushHandler, RedisCodec<K, V> codec, Duration timeout, Supplier<JsonParser> parser) { return new StatefulRedisConnectionImpl<>(channelWriter, pushHandler, codec, timeout, parser); } /** * Create a new instance of {@link StatefulRedisConnectionImpl} or a subclass. * <p> * Subclasses of {@link RedisClusterClient} may override that method. * * @param channelWriter the channel writer * @param pushHandler the handler for push notifications * @param codec codec * @param timeout default timeout * @param <K> Key-Type * @param <V> Value Type * @return new instance of StatefulRedisConnectionImpl */ protected <K, V> StatefulRedisConnectionImpl<K, V> newStatefulRedisConnection(RedisChannelWriter channelWriter, PushHandler pushHandler, RedisCodec<K, V> codec, Duration timeout) { return new StatefulRedisConnectionImpl<>(channelWriter, pushHandler, codec, timeout); } /** * Create a pub/sub connection to a redis socket address. * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param nodeId the nodeId * @param socketAddressSupplier supplier for the socket address * @param <K> Key type * @param <V> Value type * @return A new connection */ <K, V> ConnectionFuture<StatefulRedisPubSubConnection<K, V>> connectPubSubToNodeAsync(RedisCodec<K, V> codec, String nodeId, Mono<SocketAddress> socketAddressSupplier) { assertNotNull(codec); assertNotEmpty(initialUris); LettuceAssert.notNull(socketAddressSupplier, "SocketAddressSupplier must not be null"); logger.debug("connectPubSubToNode(" + nodeId + ")"); PubSubEndpoint<K, V> endpoint = new PubSubEndpoint<>(getClusterClientOptions(), getResources()); RedisChannelWriter writer = endpoint; if (CommandExpiryWriter.isSupported(getClusterClientOptions())) { writer = CommandExpiryWriter.buildCommandExpiryWriter(writer, getClusterClientOptions(), getResources()); } if (CommandListenerWriter.isSupported(getCommandListeners())) { writer = new CommandListenerWriter(writer, getCommandListeners()); } StatefulRedisPubSubConnectionImpl<K, V> connection = new StatefulRedisPubSubConnectionImpl<>(endpoint, writer, codec, getFirstUri().getTimeout()); connection.setAuthenticationHandler( createHandler(connection, getFirstUri().getCredentialsProvider(), true, getOptions())); ConnectionFuture<StatefulRedisPubSubConnection<K, V>> connectionFuture = connectStatefulAsync(connection, endpoint, getFirstUri(), socketAddressSupplier, () -> new PubSubCommandHandler<>(getClusterClientOptions(), getResources(), codec, endpoint)); return connectionFuture.whenComplete((conn, throwable) -> { if (throwable != null) { connection.closeAsync(); } }); } /** * Create a clustered pub/sub connection with command distributor. * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param <K> Key type * @param <V> Value type * @return a new connection */ private <K, V> CompletableFuture<StatefulRedisClusterConnection<K, V>> connectClusterAsync(RedisCodec<K, V> codec) { if (partitions == null) { return Futures.failed(new IllegalStateException( "Partitions not initialized. Initialize via RedisClusterClient.getPartitions().")); } topologyRefreshScheduler.activateTopologyRefreshIfNeeded(); logger.debug("connectCluster(" + initialUris + ")"); DefaultEndpoint endpoint = new DefaultEndpoint(getClusterClientOptions(), getResources()); RedisChannelWriter writer = endpoint; if (CommandExpiryWriter.isSupported(getClusterClientOptions())) { writer = CommandExpiryWriter.buildCommandExpiryWriter(writer, getClusterClientOptions(), getResources()); } if (CommandListenerWriter.isSupported(getCommandListeners())) { writer = new CommandListenerWriter(writer, getCommandListeners()); } ClusterDistributionChannelWriter clusterWriter = new ClusterDistributionChannelWriter(writer, getClusterClientOptions(), topologyRefreshScheduler); PooledClusterConnectionProvider<K, V> pooledClusterConnectionProvider = new PooledClusterConnectionProvider<>(this, clusterWriter, codec, topologyRefreshScheduler); clusterWriter.setClusterConnectionProvider(pooledClusterConnectionProvider); StatefulRedisClusterConnectionImpl<K, V> connection = newStatefulRedisClusterConnection(clusterWriter, pooledClusterConnectionProvider, codec, getFirstUri().getTimeout(), getClusterClientOptions().getJsonParser()); connection.setReadFrom(ReadFrom.UPSTREAM); connection.setPartitions(partitions); Supplier<CommandHandler> commandHandlerSupplier = () -> new CommandHandler(getClusterClientOptions(), getResources(), endpoint); Mono<SocketAddress> socketAddressSupplier = getSocketAddressSupplier(connection::getPartitions, TopologyComparators::sortByClientCount); Mono<StatefulRedisClusterConnectionImpl<K, V>> connectionMono = Mono .defer(() -> connect(socketAddressSupplier, endpoint, connection, commandHandlerSupplier)); for (int i = 1; i < getConnectionAttempts(); i++) { connectionMono = connectionMono .onErrorResume(t -> connect(socketAddressSupplier, endpoint, connection, commandHandlerSupplier)); } return connectionMono .doOnNext( c -> connection.registerCloseables(closeableResources, clusterWriter, pooledClusterConnectionProvider)) .map(it -> (StatefulRedisClusterConnection<K, V>) it).toFuture(); } /** * Create a new instance of {@link StatefulRedisClusterConnectionImpl} or a subclass. * <p> * Subclasses of {@link RedisClusterClient} may override that method. * * @param channelWriter the channel writer * @param pushHandler the handler for push notifications * @param codec codec * @param timeout default timeout * @param parser the Json parser to be used * @param <K> Key-Type * @param <V> Value Type * @return new instance of StatefulRedisClusterConnectionImpl */ protected <V, K> StatefulRedisClusterConnectionImpl<K, V> newStatefulRedisClusterConnection( RedisChannelWriter channelWriter, ClusterPushHandler pushHandler, RedisCodec<K, V> codec, Duration timeout, Supplier<JsonParser> parser) { return new StatefulRedisClusterConnectionImpl(channelWriter, pushHandler, codec, timeout, parser); } /** * Create a new instance of {@link StatefulRedisClusterConnectionImpl} or a subclass. * <p> * Subclasses of {@link RedisClusterClient} may override that method. * * @param channelWriter the channel writer * @param pushHandler the handler for push notifications * @param codec codec * @param timeout default timeout * @param <K> Key-Type * @param <V> Value Type * @return new instance of StatefulRedisClusterConnectionImpl */ protected <V, K> StatefulRedisClusterConnectionImpl<K, V> newStatefulRedisClusterConnection( RedisChannelWriter channelWriter, ClusterPushHandler pushHandler, RedisCodec<K, V> codec, Duration timeout) { return new StatefulRedisClusterConnectionImpl(channelWriter, pushHandler, codec, timeout); } private <T, K, V> Mono<T> connect(Mono<SocketAddress> socketAddressSupplier, DefaultEndpoint endpoint, StatefulRedisClusterConnectionImpl<K, V> connection, Supplier<CommandHandler> commandHandlerSupplier) { ConnectionFuture<T> future = connectStatefulAsync(connection, endpoint, getFirstUri(), socketAddressSupplier, commandHandlerSupplier); return Mono.fromCompletionStage(future).doOnError(t -> logger.warn(t.getMessage())); } private <T, K, V> Mono<T> connect(Mono<SocketAddress> socketAddressSupplier, DefaultEndpoint endpoint, StatefulRedisConnectionImpl<K, V> connection, Supplier<CommandHandler> commandHandlerSupplier) { ConnectionFuture<T> future = connectStatefulAsync(connection, endpoint, getFirstUri(), socketAddressSupplier, commandHandlerSupplier); return Mono.fromCompletionStage(future).doOnError(t -> logger.warn(t.getMessage())); } /** * Create a clustered connection with command distributor. * * @param codec Use this codec to encode/decode keys and values, must not be {@code null} * @param <K> Key type * @param <V> Value type * @return a new connection */ private <K, V> CompletableFuture<StatefulRedisClusterPubSubConnection<K, V>> connectClusterPubSubAsync( RedisCodec<K, V> codec) { if (partitions == null) { return Futures.failed(new IllegalStateException( "Partitions not initialized. Initialize via RedisClusterClient.getPartitions().")); } topologyRefreshScheduler.activateTopologyRefreshIfNeeded(); logger.debug("connectClusterPubSub(" + initialUris + ")"); PubSubClusterEndpoint<K, V> endpoint = new PubSubClusterEndpoint<>(getClusterClientOptions(), getResources()); RedisChannelWriter writer = endpoint; if (CommandExpiryWriter.isSupported(getClusterClientOptions())) { writer = CommandExpiryWriter.buildCommandExpiryWriter(writer, getClusterClientOptions(), getResources()); } if (CommandListenerWriter.isSupported(getCommandListeners())) { writer = new CommandListenerWriter(writer, getCommandListeners()); } ClusterDistributionChannelWriter clusterWriter = new ClusterDistributionChannelWriter(writer, getClusterClientOptions(), topologyRefreshScheduler); ClusterPubSubConnectionProvider<K, V> pooledClusterConnectionProvider = new ClusterPubSubConnectionProvider<>(this, clusterWriter, codec, endpoint.getUpstreamListener(), topologyRefreshScheduler); StatefulRedisClusterPubSubConnectionImpl<K, V> connection = new StatefulRedisClusterPubSubConnectionImpl<>(endpoint, pooledClusterConnectionProvider, clusterWriter, codec, getFirstUri().getTimeout()); clusterWriter.setClusterConnectionProvider(pooledClusterConnectionProvider); connection.setPartitions(partitions); connection.setAuthenticationHandler( createHandler(connection, getFirstUri().getCredentialsProvider(), true, getOptions())); Supplier<CommandHandler> commandHandlerSupplier = () -> new PubSubCommandHandler<>(getClusterClientOptions(), getResources(), codec, endpoint); Mono<SocketAddress> socketAddressSupplier = getSocketAddressSupplier(connection::getPartitions, TopologyComparators::sortByClientCount); Mono<StatefulRedisClusterPubSubConnectionImpl<K, V>> connectionMono = Mono .defer(() -> connect(socketAddressSupplier, endpoint, connection, commandHandlerSupplier)); for (int i = 1; i < getConnectionAttempts(); i++) { connectionMono = connectionMono .onErrorResume(t -> connect(socketAddressSupplier, endpoint, connection, commandHandlerSupplier)); } return connectionMono .doOnNext( c -> connection.registerCloseables(closeableResources, clusterWriter, pooledClusterConnectionProvider)) .map(it -> (StatefulRedisClusterPubSubConnection<K, V>) it).toFuture(); } private int getConnectionAttempts() { return Math.max(1, partitions.size()); } /** * Initiates a channel connection considering {@link ClientOptions} initialization options, authentication and client name * options. */ @SuppressWarnings("unchecked") private <K, V, T extends StatefulRedisClusterConnectionImpl<K, V>, S> ConnectionFuture<S> connectStatefulAsync(T connection, DefaultEndpoint endpoint, RedisURI connectionSettings, Mono<SocketAddress> socketAddressSupplier, Supplier<CommandHandler> commandHandlerSupplier) { ConnectionBuilder connectionBuilder = createConnectionBuilder(connection, connection.getConnectionState(), endpoint, connectionSettings, socketAddressSupplier, commandHandlerSupplier); ConnectionFuture<RedisChannelHandler<K, V>> future = initializeChannelAsync(connectionBuilder); return future.thenApply(channelHandler -> (S) connection); } /** * Initiates a channel connection considering {@link ClientOptions} initialization options, authentication and client name * options. */ @SuppressWarnings("unchecked") private <K, V, T extends StatefulRedisConnectionImpl<K, V>, S> ConnectionFuture<S> connectStatefulAsync(T connection, DefaultEndpoint endpoint, RedisURI connectionSettings, Mono<SocketAddress> socketAddressSupplier, Supplier<CommandHandler> commandHandlerSupplier) { ConnectionBuilder connectionBuilder = createConnectionBuilder(connection, connection.getConnectionState(), endpoint, connectionSettings, socketAddressSupplier, commandHandlerSupplier); ConnectionFuture<RedisChannelHandler<K, V>> future = initializeChannelAsync(connectionBuilder); return future.thenApply(channelHandler -> (S) connection); } private <K, V> ConnectionBuilder createConnectionBuilder(RedisChannelHandler<K, V> connection, ConnectionState state, DefaultEndpoint endpoint, RedisURI connectionSettings, Mono<SocketAddress> socketAddressSupplier, Supplier<CommandHandler> commandHandlerSupplier) { ConnectionBuilder connectionBuilder; if (connectionSettings.isSsl()) { SslConnectionBuilder sslConnectionBuilder = SslConnectionBuilder.sslConnectionBuilder(); sslConnectionBuilder.ssl(connectionSettings); connectionBuilder = sslConnectionBuilder; } else { connectionBuilder = ConnectionBuilder.connectionBuilder(); } state.apply(connectionSettings); connectionBuilder.connectionInitializer(createHandshake(state)); connectionBuilder.reconnectionListener(new ReconnectEventListener(topologyRefreshScheduler)); connectionBuilder.clientOptions(getClusterClientOptions()); connectionBuilder.connection(connection); connectionBuilder.clientResources(getResources()); connectionBuilder.endpoint(endpoint); connectionBuilder.commandHandler(commandHandlerSupplier); connectionBuilder(socketAddressSupplier, connectionBuilder, connection.getConnectionEvents(), connectionSettings); return connectionBuilder; } /** * Refresh partitions and re-initialize the routing table. * * @deprecated since 6.0. Renamed to {@link #refreshPartitions()}. */ @Deprecated public void reloadPartitions() { refreshPartitions(); } /** * Refresh partitions and re-initialize the routing table. * * @since 6.0 */ public void refreshPartitions() { get(refreshPartitionsAsync().toCompletableFuture(), e -> new RedisException("Cannot reload Redis Cluster topology", e)); } /** * Asynchronously reload partitions and re-initialize the distribution table. * * @return a {@link CompletionStage} that signals completion. * @since 6.0 */ public CompletionStage<Void> refreshPartitionsAsync() { List<RedisURI> sources = new ArrayList<>(); Iterable<RedisURI> topologyRefreshSource = getTopologyRefreshSource(); for (RedisURI redisURI : topologyRefreshSource) { sources.add(redisURI); } EventRecorder.RecordableEvent event = EventRecorder.getInstance().start(new TopologyRefreshEvent(sources)); if (partitions == null) { return initializePartitions().thenAccept(Partitions::updateCache) .whenComplete((unused, throwable) -> event.record()); } return loadPartitionsAsync().thenAccept(loadedPartitions -> { if (TopologyComparators.isChanged(getPartitions(), loadedPartitions)) { logger.debug("Using a new cluster topology"); List<RedisClusterNode> before = new ArrayList<>(getPartitions()); List<RedisClusterNode> after = new ArrayList<>(loadedPartitions); getResources().eventBus().publish(new ClusterTopologyChangedEvent(before, after)); } this.partitions.reload(loadedPartitions.getPartitions()); updatePartitionsInConnections(); }).whenComplete((unused, throwable) -> event.record()); } /** * Suspend periodic topology refresh if it was activated previously. Suspending cancels the periodic schedule without * interrupting any running topology refresh. Suspension is in place until obtaining a new {@link #connect connection}. * * @since 6.3 */ public void suspendTopologyRefresh() { topologyRefreshScheduler.suspendTopologyRefresh(); } /** * Return whether a scheduled or adaptive topology refresh is in progress. * * @return {@code true} if a topology refresh is in progress. * @since 6.3 */ public boolean isTopologyRefreshInProgress() { return topologyRefreshScheduler.isTopologyRefreshInProgress(); } protected void updatePartitionsInConnections() { forEachClusterConnection(input -> { input.setPartitions(partitions); }); forEachClusterPubSubConnection(input -> { input.setPartitions(partitions); }); } protected CompletableFuture<Partitions> initializePartitions() { return loadPartitionsAsync().thenApply(it -> this.partitions = it); } private void assertInitialPartitions() { if (partitions == null) { get(initializePartitions(), e -> new RedisConnectionException("Unable to establish a connection to Redis Cluster", e)); } } /** * Retrieve partitions. Nodes within {@link Partitions} are ordered by latency. Lower latency nodes come first. * * @return Partitions */ protected Partitions loadPartitions() { return get(loadPartitionsAsync(), Function.identity()); } private static <T> T get(CompletableFuture<T> future, Function<RedisException, RedisException> mapper) { try { return future.get(); } catch (ExecutionException e) { if (e.getCause() instanceof RedisException) { throw mapper.apply((RedisException) e.getCause()); } throw Exceptions.bubble(e); } catch (Exception e) { throw Exceptions.bubble(e); } } /** * Retrieve partitions. Nodes within {@link Partitions} are ordered by latency. Lower latency nodes come first. * * @return future that emits {@link Partitions} upon a successful topology lookup. * @since 6.0 */ protected CompletableFuture<Partitions> loadPartitionsAsync() { Iterable<RedisURI> topologyRefreshSource = getTopologyRefreshSource(); CompletableFuture<Partitions> future = new CompletableFuture<>(); fetchPartitions(topologyRefreshSource).whenComplete((nodes, throwable) -> { if (throwable == null) { future.complete(nodes); return; } // Attempt recovery using initial seed nodes if (useDynamicRefreshSources() && topologyRefreshSource != initialUris) { fetchPartitions(initialUris).whenComplete((nextNodes, nextThrowable) -> { if (nextThrowable != null) { Throwable exception = Exceptions.unwrap(nextThrowable); exception.addSuppressed(Exceptions.unwrap(throwable)); future.completeExceptionally(exception); } else { future.complete(nextNodes); } }); } else { future.completeExceptionally(Exceptions.unwrap(throwable)); } }); Predicate<RedisClusterNode> nodeFilter = getClusterClientOptions().getNodeFilter(); if (nodeFilter != ClusterClientOptions.DEFAULT_NODE_FILTER) { return future.thenApply(partitions -> { List<RedisClusterNode> toRemove = new ArrayList<>(); for (RedisClusterNode partition : partitions) { if (!nodeFilter.test(partition)) { toRemove.add(partition); } } partitions.removeAll(toRemove); return partitions; }); } return future; } private CompletionStage<Partitions> fetchPartitions(Iterable<RedisURI> topologyRefreshSource) { CompletionStage<Map<RedisURI, Partitions>> topology = refresh.loadViews(topologyRefreshSource, getClusterClientOptions().getSocketOptions().getConnectTimeout(), useDynamicRefreshSources()); return topology.thenApply(partitions -> { if (partitions.isEmpty()) { throw new RedisException(String.format("Cannot retrieve initial cluster partitions from initial URIs %s", topologyRefreshSource)); } Partitions loadedPartitions = determinePartitions(this.partitions, partitions); RedisURI viewedBy = getViewedBy(partitions, loadedPartitions); for (RedisClusterNode partition : loadedPartitions) { if (viewedBy != null) { RedisURI uri = partition.getUri(); RedisClusterURIUtil.applyUriConnectionSettings(viewedBy, uri); } } topologyRefreshScheduler.activateTopologyRefreshIfNeeded(); return loadedPartitions; }); } /** * Determines a {@link Partitions topology view} based on the current and the obtain topology views. * * @param current the current topology view. May be {@code null} if {@link RedisClusterClient} has no topology view yet. * @param topologyViews the obtain topology views * @return the {@link Partitions topology view} to use. */ protected Partitions determinePartitions(Partitions current, Map<RedisURI, Partitions> topologyViews) { if (current == null) { return PartitionsConsensus.HEALTHY_MAJORITY.getPartitions(null, topologyViews); } return PartitionsConsensus.KNOWN_MAJORITY.getPartitions(current, topologyViews); } /** * Sets the new cluster topology. The partitions are not applied to existing connections. * * @param partitions partitions object */ public void setPartitions(Partitions partitions) { this.partitions = partitions; } /** * Shutdown this client and close all open connections asynchronously. The client should be discarded after calling * shutdown. * * @param quietPeriod the quiet period as described in the documentation * @param timeout the maximum amount of time to wait until the executor is shutdown regardless if a task was submitted * during the quiet period * @param timeUnit the unit of {@code quietPeriod} and {@code timeout} * @since 4.4 */ @Override public CompletableFuture<Void> shutdownAsync(long quietPeriod, long timeout, TimeUnit timeUnit) { suspendTopologyRefresh(); return super.shutdownAsync(quietPeriod, timeout, timeUnit); } // ------------------------------------------------------------------------- // Implementation hooks and helper methods // ------------------------------------------------------------------------- /** * Returns the first {@link RedisURI} configured with this {@link RedisClusterClient} instance. * * @return the first {@link RedisURI}. */ protected RedisURI getFirstUri() { assertNotEmpty(initialUris); Iterator<RedisURI> iterator = initialUris.iterator(); return iterator.next(); } /** * Returns a {@link Supplier} for {@link SocketAddress connection points}. * * @param sortFunction Sort function to enforce a specific order. The sort function must not change the order or the input * parameter but create a new collection with the desired order, must not be {@code null}. * @return {@link Supplier} for {@link SocketAddress connection points}. */ protected Mono<SocketAddress> getSocketAddressSupplier(Supplier<Partitions> partitionsSupplier, Function<Partitions, Collection<RedisClusterNode>> sortFunction) { LettuceAssert.notNull(sortFunction, "Sort function must not be null"); RoundRobinSocketAddressSupplier socketAddressSupplier = new RoundRobinSocketAddressSupplier(partitionsSupplier, sortFunction, getResources()); return Mono.defer(() -> { if (partitions.isEmpty()) { return Mono.fromCallable(() -> { SocketAddress socketAddress = getResources().socketAddressResolver().resolve(getFirstUri()); logger.debug("Resolved SocketAddress {} using {}", socketAddress, getFirstUri()); return socketAddress; }); } return Mono.fromCallable(socketAddressSupplier::get); }); } /** * Returns an {@link Iterable} of the initial {@link RedisURI URIs}. * * @return the initial {@link RedisURI URIs} */ protected Iterable<RedisURI> getInitialUris() { return initialUris; } /** * Apply a {@link Consumer} of {@link StatefulRedisClusterConnectionImpl} to all active connections. * * @param function the {@link Consumer}. */ protected void forEachClusterConnection(Consumer<StatefulRedisClusterConnectionImpl<?, ?>> function) { forEachCloseable(input -> input instanceof StatefulRedisClusterConnectionImpl, function); } /** * Apply a {@link Consumer} of {@link StatefulRedisClusterPubSubConnectionImpl} to all active connections. * * @param function the {@link Consumer}. */ protected void forEachClusterPubSubConnection(Consumer<StatefulRedisClusterPubSubConnectionImpl<?, ?>> function) { forEachCloseable(input -> input instanceof StatefulRedisClusterPubSubConnectionImpl, function); } /** * Apply a {@link Consumer} of {@link Closeable} to all active connections. * * @param <T> * @param function the {@link Consumer}. */ @SuppressWarnings("unchecked") protected <T extends Closeable> void forEachCloseable(Predicate<? super Closeable> selector, Consumer<T> function) { for (Closeable c : closeableResources) { if (selector.test(c)) { function.accept((T) c); } } } /** * Template method to create {@link ClusterTopologyRefresh}. Can be overriden by subclasses. * * @return * @since 6.0.3 */ protected ClusterTopologyRefresh createTopologyRefresh() { return ClusterTopologyRefresh.create(new NodeConnectionFactoryImpl(), getResources()); } /** * Returns {@code true} if {@link ClusterTopologyRefreshOptions#useDynamicRefreshSources() dynamic refresh sources} are * enabled. * <p> * Subclasses of {@link RedisClusterClient} may override that method. * * @return {@code true} if dynamic refresh sources are used. * @see ClusterTopologyRefreshOptions#useDynamicRefreshSources() */ protected boolean useDynamicRefreshSources() { ClusterTopologyRefreshOptions topologyRefreshOptions = getClusterClientOptions().getTopologyRefreshOptions(); return topologyRefreshOptions.useDynamicRefreshSources(); } /** * Returns a {@link String} {@link RedisCodec codec}. * * @return a {@link String} {@link RedisCodec codec}. * @see StringCodec#UTF8 */ protected RedisCodec<String, String> newStringStringCodec() { return StringCodec.UTF8; } /** * Resolve a {@link RedisURI} from a map of cluster views by {@link Partitions} as key * * @param map the map * @param partitions the key * @return a {@link RedisURI} or null */ private static RedisURI getViewedBy(Map<RedisURI, Partitions> map, Partitions partitions) { for (Map.Entry<RedisURI, Partitions> entry : map.entrySet()) { if (entry.getValue() == partitions) { return entry.getKey(); } } return null; } ClusterClientOptions getClusterClientOptions() { return (ClusterClientOptions) getOptions(); } protected static <T> CompletableFuture<T> transformAsyncConnectionException(CompletionStage<T> future, Iterable<RedisURI> target) { return ConnectionFuture.from(null, future.toCompletableFuture()).thenCompose((v, e) -> { if (e != null) { return Futures.failed(RedisConnectionException.create(target.toString(), e)); } return CompletableFuture.completedFuture(v); }).toCompletableFuture(); } private static <K, V> void assertNotNull(RedisCodec<K, V> codec) { LettuceAssert.notNull(codec, "RedisCodec must not be null"); } private static void assertNotEmpty(Iterable<RedisURI> redisURIs) { LettuceAssert.notNull(redisURIs, "RedisURIs must not be null"); LettuceAssert.isTrue(redisURIs.iterator().hasNext(), "RedisURIs must not be empty"); } private static RedisURI assertNotNull(RedisURI redisURI) { LettuceAssert.notNull(redisURI, "RedisURI must not be null"); return redisURI; } private static void assertNotNull(ClientResources clientResources) { LettuceAssert.notNull(clientResources, "ClientResources must not be null"); } private
RedisClusterClient
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/execution/librarycache/LibraryCacheManager.java
{ "start": 2273, "end": 2357 }
class ____. */ void shutdown(); /** Handle to retrieve a user code
loaders
java
elastic__elasticsearch
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java
{ "start": 435, "end": 756 }
class ____ extends ActionType<DeletePrivilegesResponse> { public static final DeletePrivilegesAction INSTANCE = new DeletePrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/privilege/delete"; private DeletePrivilegesAction() { super(NAME); } }
DeletePrivilegesAction
java
apache__camel
components/camel-azure/camel-azure-storage-blob/src/test/java/org/apache/camel/component/azure/storage/blob/operations/BlobOperationsTest.java
{ "start": 2970, "end": 9807 }
class ____ extends CamelTestSupport { private BlobConfiguration configuration; @Mock private BlobClientWrapper client; @BeforeEach public void setup() { configuration = new BlobConfiguration(); configuration.setAccountName("cameldev"); configuration.setContainerName("awesome2"); } @Test void testGetBlob() throws IOException { // mocking final Map<String, Object> mockedResults = new HashMap<>(); mockedResults.put("inputStream", new ByteArrayInputStream("testInput".getBytes(Charset.defaultCharset()))); mockedResults.put("properties", createBlobProperties()); when(client.openInputStream(any(), any())).thenReturn(mockedResults); final Exchange exchange = new DefaultExchange(context); // first: test with no exchange provided final BlobOperations operations = new BlobOperations(configuration, client); final BlobOperationResponse response = operations.getBlob(null); assertNotNull(response); assertNotNull(response.getBody()); assertNotNull(response.getHeaders()); assertNotNull(response.getHeaders().get(BlobConstants.CREATION_TIME)); assertEquals("testInput", new BufferedReader(new InputStreamReader((InputStream) response.getBody())).readLine()); // second: test with exchange provided configuration.setBlobType(BlobType.blockblob); final BlobOperationResponse response2 = operations.getBlob(exchange); assertNotNull(response2); assertNotNull(response2.getBody()); assertNotNull(response2.getHeaders()); assertNotNull(response2.getHeaders().get(BlobConstants.CREATION_TIME)); // third: test with exchange provided but with outputstream set // mocking final ResponseBase<BlobDownloadHeaders, Void> mockedResults2 = new ResponseBase<>( null, 200, new HttpHeaders().set("x-test-header", "123"), null, new BlobDownloadHeaders().setETag("tag1")); when(client.downloadWithResponse(any(), any(), any(), any(), anyBoolean(), any())).thenReturn(mockedResults2); exchange.getIn().setBody(new ByteArrayOutputStream()); final BlobOperationResponse response3 = operations.getBlob(exchange); assertNotNull(response3); assertNotNull(response3.getBody()); assertNotNull(response3.getHeaders()); assertEquals("tag1", response3.getHeaders().get(BlobConstants.E_TAG)); } @Test void testUploadBlockBlob() throws Exception { // mocking final BlockBlobItem blockBlobItem = new BlockBlobItem("testTag", OffsetDateTime.now(), null, false, null); final HttpHeaders httpHeaders = new HttpHeaders().set("x-test-header", "123"); when(client.uploadBlockBlob(any(), anyLong(), any(), any(), any(), any(), any(), any())) .thenReturn(new ResponseBase<>(null, 200, httpHeaders, blockBlobItem, null)); final Exchange exchange = new DefaultExchange(context); exchange.getIn().setBody(new ByteArrayInputStream("test".getBytes(Charset.defaultCharset()))); // test upload with input stream final BlobOperations operations = new BlobOperations(configuration, client); final BlobOperationResponse operationResponse = operations.uploadBlockBlob(exchange); assertNotNull(operationResponse); assertTrue((boolean) operationResponse.getBody()); assertNotNull(operationResponse.getHeaders()); assertEquals("testTag", operationResponse.getHeaders().get(BlobConstants.E_TAG)); assertEquals("123", ((HttpHeaders) operationResponse.getHeaders().get(BlobConstants.RAW_HTTP_HEADERS)) .get("x-test-header").getValue()); } @Test void testUploadBlockBlobWithLease() throws Exception { final BlockBlobItem blockBlobItem = new BlockBlobItem("testTag", OffsetDateTime.now(), null, false, null); final HttpHeaders httpHeaders = new HttpHeaders().set("x-test-header", "123"); when(client.uploadBlockBlob(any(), anyLong(), any(), any(), any(), any(), any(), any())) .thenReturn(new ResponseBase<>(null, 200, httpHeaders, blockBlobItem, null)); final BlobLeaseClient leaseClient = mock(BlobLeaseClient.class); when(leaseClient.acquireLease(anyInt())).thenReturn("leaseId"); when(client.getLeaseClient()).thenReturn(leaseClient); final Exchange exchange = new DefaultExchange(context); exchange.getIn().setBody(new ByteArrayInputStream("test".getBytes(Charset.defaultCharset()))); final BlobOperations operations = new BlobOperations(configuration, client); configuration.setLeaseBlob(true); configuration.setLeaseDurationInSeconds(-1); final BlobOperationResponse operationResponse = operations.uploadBlockBlob(exchange); assertNotNull(operationResponse); assertTrue((boolean) operationResponse.getBody()); assertNotNull(operationResponse.getHeaders()); assertEquals("testTag", operationResponse.getHeaders().get(BlobConstants.E_TAG)); assertEquals("123", ((HttpHeaders) operationResponse.getHeaders().get(BlobConstants.RAW_HTTP_HEADERS)) .get("x-test-header").getValue()); verify(client, times(1)).getLeaseClient(); verify(leaseClient, times(1)).acquireLease(-1); } @Test void testStageBlockBlobList() throws Exception { final HttpHeaders httpHeaders = new HttpHeaders().set("x-test-header", "123"); when(client.stageBlockBlob(anyString(), any(), anyLong(), any(), any(), any())).thenReturn(httpHeaders); final Exchange exchange = new DefaultExchange(context); exchange.getIn().setBody("test"); exchange.getIn().setHeader(BlobConstants.COMMIT_BLOCK_LIST_LATER, true); // test final BlobOperations operations = new BlobOperations(configuration, client); // in case of invalid payload assertThrows(IllegalArgumentException.class, () -> operations.stageBlockBlobList(exchange)); // in case of correct payload exchange.getIn().setBody(BlobBlock.createBlobBlock("1", new ByteArrayInputStream("test".getBytes()))); // test again final BlobOperationResponse response = operations.stageBlockBlobList(exchange); assertNotNull(response); assertTrue((boolean) response.getBody()); } private BlobProperties createBlobProperties() { return new BlobProperties( OffsetDateTime.now(), null, null, 0L, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); } }
BlobOperationsTest
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/naming/ListCaseTest.java
{ "start": 606, "end": 681 }
class ____ { public List<String> values; } public static
Model
java
spring-projects__spring-security
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/authentication/OAuth2ClientAuthenticationContext.java
{ "start": 2595, "end": 3453 }
class ____ extends AbstractBuilder<OAuth2ClientAuthenticationContext, Builder> { private Builder(OAuth2ClientAuthenticationToken authentication) { super(authentication); } /** * Sets the {@link RegisteredClient registered client}. * @param registeredClient the {@link RegisteredClient} * @return the {@link Builder} for further configuration */ public Builder registeredClient(RegisteredClient registeredClient) { return put(RegisteredClient.class, registeredClient); } /** * Builds a new {@link OAuth2ClientAuthenticationContext}. * @return the {@link OAuth2ClientAuthenticationContext} */ @Override public OAuth2ClientAuthenticationContext build() { Assert.notNull(get(RegisteredClient.class), "registeredClient cannot be null"); return new OAuth2ClientAuthenticationContext(getContext()); } } }
Builder
java
mockito__mockito
mockito-core/src/main/java/org/mockito/Mock.java
{ "start": 712, "end": 1050 }
class ____ readable.</li> * <li>Makes the verification error easier to read because the <b>field name</b> is used to identify the mock.</li> * <li>Automatically detects static mocks of type {@link MockedStatic} and infers the static mock type of the type parameter.</li> * </ul> * * <pre class="code"><code class="java"> * public
more
java
assertj__assertj-core
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/optional/OptionalAssert_contains_usingDefaultComparator_Test.java
{ "start": 1104, "end": 2243 }
class ____ { private final Comparator<String> STRING_COMPARATOR = Comparator.comparing(String::toLowerCase); @Test void should_succeed_if_default_equal_content() { // GIVEN OptionalAssert<String> optionalAssert = assertThat(Optional.of("hello")); String expected = "hello"; // set to different strategy optionalAssert.usingValueComparator(STRING_COMPARATOR).contains(expected); // WHEN/THEN // go back to default strategy optionalAssert.usingDefaultValueComparator().contains(expected); } @Test void should_fail_if_different_capitalisation() { // GIVEN Optional<String> actual = Optional.of("hello"); String expected = "HellO"; OptionalAssert<String> optionalAssert = assertThat(actual); optionalAssert.usingValueComparator(STRING_COMPARATOR).contains(expected); // WHEN // go back to default strategy var assertionError = expectAssertionError(() -> optionalAssert.usingDefaultValueComparator().contains(expected)); // THEN then(assertionError).hasMessageContainingAll(actual.toString(), expected); } }
OptionalAssert_contains_usingDefaultComparator_Test
java
spring-projects__spring-framework
spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/MethodValidationTests.java
{ "start": 4267, "end": 17184 }
class ____ { private static final Person mockPerson = mock(Person.class); private static final Errors mockErrors = mock(Errors.class); private final MockHttpServletRequest request = new MockHttpServletRequest(); private final MockHttpServletResponse response = new MockHttpServletResponse(); private RequestMappingHandlerAdapter handlerAdapter; private InvocationCountingValidator jakartaValidator; private final TestConstraintValidator testConstraintValidator = new TestConstraintValidator(); @BeforeEach void setup() throws Exception { LocaleContextHolder.setDefaultLocale(Locale.UK); LocalValidatorFactoryBean validatorBean = new LocalValidatorFactoryBean(); validatorBean.setConstraintValidatorFactory(new TestConstraintValidatorFactory(this.testConstraintValidator)); validatorBean.afterPropertiesSet(); this.jakartaValidator = new InvocationCountingValidator(validatorBean); this.handlerAdapter = initHandlerAdapter(this.jakartaValidator); this.request.setMethod("POST"); this.request.setContentType(MediaType.APPLICATION_FORM_URLENCODED_VALUE); this.request.addHeader("Accept", "text/plain"); this.request.setAttribute(HandlerMapping.URI_TEMPLATE_VARIABLES_ATTRIBUTE, new HashMap<>(0)); } private static RequestMappingHandlerAdapter initHandlerAdapter(Validator validator) { ConfigurableWebBindingInitializer bindingInitializer = new ConfigurableWebBindingInitializer(); bindingInitializer.setValidator(validator); GenericWebApplicationContext context = new GenericWebApplicationContext(); context.refresh(); RequestMappingHandlerAdapter handlerAdapter = new RequestMappingHandlerAdapter(); handlerAdapter.setWebBindingInitializer(bindingInitializer); handlerAdapter.setApplicationContext(context); handlerAdapter.setBeanFactory(context.getBeanFactory()); handlerAdapter.setMessageConverters( List.of(new StringHttpMessageConverter(), new JacksonJsonHttpMessageConverter())); handlerAdapter.afterPropertiesSet(); return handlerAdapter; } @AfterEach void reset() { LocaleContextHolder.setDefaultLocale(null); } @Test void modelAttribute() { HandlerMethod hm = handlerMethod(new ValidController(), c -> c.handle(mockPerson)); this.request.addParameter("name", "name=Faustino1234"); MethodArgumentNotValidException ex = catchThrowableOfType(MethodArgumentNotValidException.class, () -> this.handlerAdapter.handle(this.request, this.response, hm)); assertThat(this.jakartaValidator.getValidationCount()).isEqualTo(1); assertThat(this.jakartaValidator.getMethodValidationCount()).as("Method validation unexpected").isEqualTo(0); assertBeanResult(ex.getBindingResult(), "student", List.of(""" Field error in object 'student' on field 'name': rejected value [name=Faustino1234]; \ codes [Size.student.name,Size.name,Size.java.lang.String,Size]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [student.name,name]; arguments []; default message [name],10,1]; \ default message [size must be between 1 and 10]""" )); } @Test void modelAttributeWithBindingResult() throws Exception { HandlerMethod hm = handlerMethod(new ValidController(), c -> c.handle(mockPerson, mockErrors)); this.request.addParameter("name", "name=Faustino1234"); this.handlerAdapter.handle(this.request, this.response, hm); assertThat(this.jakartaValidator.getValidationCount()).isEqualTo(1); assertThat(this.jakartaValidator.getMethodValidationCount()).as("Method validation unexpected").isEqualTo(0); assertThat(response.getContentAsString()).isEqualTo(""" org.springframework.validation.BeanPropertyBindingResult: 1 errors Field error in object 'student' on field 'name': rejected value [name=Faustino1234]; \ codes [Size.student.name,Size.name,Size.java.lang.String,Size]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [student.name,name]; arguments []; default message [name],10,1]; \ default message [size must be between 1 and 10]"""); } @Test void modelAttributeWithBindingResultAndRequestHeader() { HandlerMethod hm = handlerMethod(new ValidController(), c -> c.handle(mockPerson, mockErrors, "")); this.request.addParameter("name", "name=Faustino1234"); this.request.addHeader("myHeader", "123"); HandlerMethodValidationException ex = catchThrowableOfType(HandlerMethodValidationException.class, () -> this.handlerAdapter.handle(this.request, this.response, hm)); assertThat(this.jakartaValidator.getValidationCount()).isEqualTo(1); assertThat(this.jakartaValidator.getMethodValidationCount()).isEqualTo(1); assertThat(ex.getParameterValidationResults()).hasSize(2); assertBeanResult(ex.getBeanResults().get(0), "student", List.of(""" Field error in object 'student' on field 'name': rejected value [name=Faustino1234]; \ codes [Size.student.name,Size.name,Size.java.lang.String,Size]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [student.name,name]; arguments []; default message [name],10,1]; \ default message [size must be between 1 and 10]""" )); assertValueResult(ex.getValueResults().get(0), 2, "123", List.of(""" org.springframework.validation.beanvalidation.MethodValidationAdapter$ViolationMessageSourceResolvable: \ codes [Size.validController#handle.myHeader,Size.myHeader,Size.java.lang.String,Size]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [validController#handle.myHeader,myHeader]; arguments []; default message [myHeader],10,5]; \ default message [size must be between 5 and 10]""" )); } @Test void validatedWithMethodValidation() throws Exception { // 1 for @Validated argument validation + 1 for method validation of @RequestHeader this.jakartaValidator.setMaxInvocationsExpected(2); HandlerMethod hm = handlerMethod(new ValidController(), c -> c.handleValidated(mockPerson, mockErrors, "")); this.request.addParameter("name", "name=Faustino1234"); this.request.addHeader("myHeader", "12345"); this.handlerAdapter.handle(this.request, this.response, hm); assertThat(jakartaValidator.getValidationCount()).isEqualTo(2); assertThat(jakartaValidator.getMethodValidationCount()).isEqualTo(1); assertThat(response.getContentAsString()).isEqualTo(""" org.springframework.validation.BeanPropertyBindingResult: 1 errors Field error in object 'person' on field 'name': rejected value [name=Faustino1234]; \ codes [Size.person.name,Size.name,Size.java.lang.String,Size]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [person.name,name]; arguments []; default message [name],10,1]; \ default message [size must be between 1 and 10]"""); } @Test void validateList() { HandlerMethod hm = handlerMethod(new ValidController(), c -> c.handle(List.of(mockPerson, mockPerson))); this.request.setContentType(MediaType.APPLICATION_JSON_VALUE); this.request.setContent("[{\"name\":\"Faustino1234\"},{\"name\":\"Cayetana6789\"}]".getBytes(UTF_8)); HandlerMethodValidationException ex = catchThrowableOfType(HandlerMethodValidationException.class, () -> this.handlerAdapter.handle(this.request, this.response, hm)); assertThat(this.jakartaValidator.getValidationCount()).isEqualTo(1); assertThat(this.jakartaValidator.getMethodValidationCount()).isEqualTo(1); assertThat(ex.getParameterValidationResults()).hasSize(2); assertBeanResult(ex.getBeanResults().get(0), "personList", List.of(""" Field error in object 'personList' on field 'name': rejected value [Faustino1234]; \ codes [Size.personList.name,Size.name,Size.java.lang.String,Size]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [personList.name,name]; arguments []; default message [name],10,1]; \ default message [size must be between 1 and 10]""" )); assertBeanResult(ex.getBeanResults().get(1), "personList", List.of(""" Field error in object 'personList' on field 'name': rejected value [Cayetana6789]; \ codes [Size.personList.name,Size.name,Size.java.lang.String,Size]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [personList.name,name]; arguments []; default message [name],10,1]; \ default message [size must be between 1 and 10]""" )); } @Test void jakartaAndSpringValidator() throws Exception { HandlerMethod hm = handlerMethod(new InitBinderController(), ibc -> ibc.handle(mockPerson, mockErrors, "")); this.request.addParameter("name", "name=Faustino1234"); this.request.addHeader("myHeader", "12345"); this.handlerAdapter.handle(this.request, this.response, hm); assertThat(jakartaValidator.getValidationCount()).isEqualTo(1); assertThat(jakartaValidator.getMethodValidationCount()).isEqualTo(1); assertThat(response.getContentAsString()).isEqualTo(""" org.springframework.validation.BeanPropertyBindingResult: 2 errors Field error in object 'person' on field 'name': rejected value [name=Faustino1234]; \ codes [TOO_LONG.person.name,TOO_LONG.name,TOO_LONG.java.lang.String,TOO_LONG]; \ arguments []; default message [length must be 10 or under] Field error in object 'person' on field 'name': rejected value [name=Faustino1234]; \ codes [Size.person.name,Size.name,Size.java.lang.String,Size]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [person.name,name]; arguments []; default message [name],10,1]; \ default message [size must be between 1 and 10]"""); } @Test void springValidator() throws Exception { HandlerMethod hm = handlerMethod(new ValidController(), c -> c.handle(mockPerson, mockErrors)); this.request.addParameter("name", "name=Faustino1234"); RequestMappingHandlerAdapter springValidatorHandlerAdapter = initHandlerAdapter(new PersonValidator()); springValidatorHandlerAdapter.handle(this.request, this.response, hm); assertThat(response.getContentAsString()).isEqualTo(""" org.springframework.validation.BeanPropertyBindingResult: 1 errors Field error in object 'student' on field 'name': rejected value [name=Faustino1234]; \ codes [TOO_LONG.student.name,TOO_LONG.name,TOO_LONG.java.lang.String,TOO_LONG]; \ arguments []; default message [length must be 10 or under]"""); } @Test // gh-34105 void typeConstraint() { this.testConstraintValidator.setReject(true); HandlerMethod hm = handlerMethod(new ValidController(), c -> c.handle(mockPerson, "")); this.request.addHeader("header", "12345"); this.request.setContentType("application/json"); this.request.setContent("{\"name\":\"Faustino\"}".getBytes(UTF_8)); HandlerMethodValidationException ex = catchThrowableOfType(HandlerMethodValidationException.class, () -> this.handlerAdapter.handle(this.request, this.response, hm)); List<ParameterValidationResult> results = ex.getParameterValidationResults(); assertThat(results).hasSize(1); ParameterValidationResult result = results.get(0); assertThat(result).isInstanceOf(ParameterErrors.class); assertBeanResult((Errors) result, "person", List.of(""" Error in object 'person': codes [TestConstraint.person,TestConstraint]; \ arguments [org.springframework.context.support.DefaultMessageSourceResolvable: \ codes [person]; arguments []; default message []]; default message [Fail message]\ """ )); } @SuppressWarnings("unchecked") private static <T> HandlerMethod handlerMethod(T controller, Consumer<T> mockCallConsumer) { Method method = ResolvableMethod.on((Class<T>) controller.getClass()).mockCall(mockCallConsumer).method(); return new HandlerMethod(controller, method).createWithValidateFlags(); } @SuppressWarnings("SameParameterValue") private static void assertBeanResult(Errors errors, String objectName, List<String> fieldErrors) { assertThat(errors.getObjectName()).isEqualTo(objectName); assertThat(errors.getAllErrors()) .extracting(ObjectError::toString) .containsExactlyInAnyOrderElementsOf(fieldErrors); } @SuppressWarnings("SameParameterValue") private static void assertValueResult( ParameterValidationResult result, int parameterIndex, Object argument, List<String> errors) { assertThat(result.getMethodParameter().getParameterIndex()).isEqualTo(parameterIndex); assertThat(result.getArgument()).isEqualTo(argument); assertThat(result.getResolvableErrors()) .extracting(MessageSourceResolvable::toString) .containsExactlyInAnyOrderElementsOf(errors); } @TestConstraint @SuppressWarnings("unused") private record Person(@Size(min = 1, max = 10) @JsonProperty("name") String name) { @Override public String name() { return this.name; } } @SuppressWarnings({"unused", "SameParameterValue", "UnusedReturnValue"}) @RestController static
MethodValidationTests
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesPersistentVolumesClaimsEndpointBuilderFactory.java
{ "start": 1685, "end": 12189 }
interface ____ extends EndpointProducerBuilder { default AdvancedKubernetesPersistentVolumesClaimsEndpointBuilder advanced() { return (AdvancedKubernetesPersistentVolumesClaimsEndpointBuilder) this; } /** * The Kubernetes API Version to use. * * The option is a: <code>java.lang.String</code> type. * * Group: producer * * @param apiVersion the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder apiVersion(String apiVersion) { doSetProperty("apiVersion", apiVersion); return this; } /** * The dns domain, used for ServiceCall EIP. * * The option is a: <code>java.lang.String</code> type. * * Group: producer * * @param dnsDomain the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder dnsDomain(String dnsDomain) { doSetProperty("dnsDomain", dnsDomain); return this; } /** * Default KubernetesClient to use if provided. * * The option is a: * <code>io.fabric8.kubernetes.client.KubernetesClient</code> type. * * Group: producer * * @param kubernetesClient the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder kubernetesClient(io.fabric8.kubernetes.client.KubernetesClient kubernetesClient) { doSetProperty("kubernetesClient", kubernetesClient); return this; } /** * Default KubernetesClient to use if provided. * * The option will be converted to a * <code>io.fabric8.kubernetes.client.KubernetesClient</code> type. * * Group: producer * * @param kubernetesClient the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder kubernetesClient(String kubernetesClient) { doSetProperty("kubernetesClient", kubernetesClient); return this; } /** * The namespace. * * The option is a: <code>java.lang.String</code> type. * * Group: producer * * @param namespace the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder namespace(String namespace) { doSetProperty("namespace", namespace); return this; } /** * Producer operation to do on Kubernetes. * * The option is a: <code>java.lang.String</code> type. * * Group: producer * * @param operation the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder operation(String operation) { doSetProperty("operation", operation); return this; } /** * The port name, used for ServiceCall EIP. * * The option is a: <code>java.lang.String</code> type. * * Group: producer * * @param portName the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder portName(String portName) { doSetProperty("portName", portName); return this; } /** * The port protocol, used for ServiceCall EIP. * * The option is a: <code>java.lang.String</code> type. * * Default: tcp * Group: producer * * @param portProtocol the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder portProtocol(String portProtocol) { doSetProperty("portProtocol", portProtocol); return this; } /** * The CA Cert Data. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param caCertData the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder caCertData(String caCertData) { doSetProperty("caCertData", caCertData); return this; } /** * The CA Cert File. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param caCertFile the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder caCertFile(String caCertFile) { doSetProperty("caCertFile", caCertFile); return this; } /** * The Client Cert Data. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param clientCertData the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder clientCertData(String clientCertData) { doSetProperty("clientCertData", clientCertData); return this; } /** * The Client Cert File. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param clientCertFile the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder clientCertFile(String clientCertFile) { doSetProperty("clientCertFile", clientCertFile); return this; } /** * The Key Algorithm used by the client. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param clientKeyAlgo the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder clientKeyAlgo(String clientKeyAlgo) { doSetProperty("clientKeyAlgo", clientKeyAlgo); return this; } /** * The Client Key data. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param clientKeyData the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder clientKeyData(String clientKeyData) { doSetProperty("clientKeyData", clientKeyData); return this; } /** * The Client Key file. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param clientKeyFile the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder clientKeyFile(String clientKeyFile) { doSetProperty("clientKeyFile", clientKeyFile); return this; } /** * The Client Key Passphrase. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param clientKeyPassphrase the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder clientKeyPassphrase(String clientKeyPassphrase) { doSetProperty("clientKeyPassphrase", clientKeyPassphrase); return this; } /** * The Auth Token. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param oauthToken the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder oauthToken(String oauthToken) { doSetProperty("oauthToken", oauthToken); return this; } /** * Password to connect to Kubernetes. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param password the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder password(String password) { doSetProperty("password", password); return this; } /** * Define if the certs we used are trusted anyway or not. * * The option is a: <code>java.lang.Boolean</code> type. * * Default: false * Group: security * * @param trustCerts the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder trustCerts(Boolean trustCerts) { doSetProperty("trustCerts", trustCerts); return this; } /** * Define if the certs we used are trusted anyway or not. * * The option will be converted to a <code>java.lang.Boolean</code> * type. * * Default: false * Group: security * * @param trustCerts the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder trustCerts(String trustCerts) { doSetProperty("trustCerts", trustCerts); return this; } /** * Username to connect to Kubernetes. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param username the value to set * @return the dsl builder */ default KubernetesPersistentVolumesClaimsEndpointBuilder username(String username) { doSetProperty("username", username); return this; } } /** * Advanced builder for endpoint for the Kubernetes Persistent Volume Claim component. */ public
KubernetesPersistentVolumesClaimsEndpointBuilder
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/annotations/PropertyRef.java
{ "start": 727, "end": 988 }
class ____ { * &#64;Id Integer id; * &#64;OneToOne * &#64;PropertyRef("socialSecurityNumber") * Employee employee; * } * </pre> * Generally more useful with composite keys: * <pre> * &#64;Embeddable *
TaxDetails
java
quarkusio__quarkus
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/decorators/validation/MultipleDelegateInjectionPointsTest.java
{ "start": 1163, "end": 1498 }
class ____ implements Converter<String> { @Inject @Delegate Converter<String> delegate1; @Inject @Delegate Converter<String> delegate2; @Override public String convert(String value) { return null; } } }
DecoratorWithMultipleDelegateInjetionPoints
java
google__error-prone
core/src/test/java/com/google/errorprone/refaster/testdata/input/IfTemplateExample.java
{ "start": 783, "end": 1293 }
class ____ { public String example() { String foo; if (Math.random() < 0.5) { foo = "bar"; } else { foo = "baz"; } Comparator<String> comparator; if (true) { comparator = new Comparator<String>() { @Override public int compare(String a, String b) { return a.length() - b.length(); } }; } else { comparator = String.CASE_INSENSITIVE_ORDER; } System.out.println(comparator); return foo; } }
IfTemplateExample
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/search/vectors/AbstractIVFKnnVectorQueryTestCase.java
{ "start": 46373, "end": 48042 }
class ____ extends Query { private final FixedBitSet docs; ThrowingBitSetQuery(FixedBitSet docs) { this.docs = docs; } @Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException { BitSetIterator bitSetIterator = new BitSetIterator(docs, docs.approximateCardinality()) { @Override public BitSet getBitSet() { throw new UnsupportedOperationException("reusing BitSet is not supported"); } }; final var scorer = new ConstantScoreScorer(score(), scoreMode, bitSetIterator); return new DefaultScorerSupplier(scorer); } @Override public boolean isCacheable(LeafReaderContext ctx) { return false; } }; } @Override public void visit(QueryVisitor visitor) {} @Override public String toString(String field) { return "throwingBitSetQuery"; } @Override public boolean equals(Object other) { return sameClassAs(other) && docs.equals(((ThrowingBitSetQuery) other).docs); } @Override public int hashCode() { return 31 * classHash() + docs.hashCode(); } } }
ThrowingBitSetQuery
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/language/simple/SimpleTest.java
{ "start": 101659, "end": 102015 }
class ____ { private final int id; private final String name; public OrderLine(int id, String name) { this.id = id; this.name = name; } public int getId() { return id; } public String getName() { return name; } } public static
OrderLine
java
quarkusio__quarkus
integration-tests/oidc-wiremock/src/main/java/io/quarkus/it/keycloak/SecurityEventListener.java
{ "start": 374, "end": 935 }
class ____ { public void event(@Observes AuthenticationFailureEvent event) { RoutingContext vertxContext = (RoutingContext) event.getEventProperties() .get(RoutingContext.class.getName()); AuthenticationFailedException ex = (AuthenticationFailedException) event.getAuthenticationFailure(); if ("expired".equals(ex.getAttribute(OidcConstants.ACCESS_TOKEN_VALUE))) { vertxContext.response().setStatusCode(401); vertxContext.response().end("Token: expired"); } } }
SecurityEventListener
java
apache__flink
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/FirstValueAggFunctionWithOrderTest.java
{ "start": 1916, "end": 1982 }
class ____ `accumulate` method with * order argument. */ final
tests