language
stringclasses
1 value
repo
stringclasses
60 values
path
stringlengths
22
294
class_span
dict
source
stringlengths
13
1.16M
target
stringlengths
1
113
java
apache__flink
flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/aggregate/AbstractPythonStreamAggregateOperator.java
{ "start": 2961, "end": 12386 }
class ____ extends AbstractOneInputPythonFunctionOperator<RowData, RowData> { private static final long serialVersionUID = 1L; @VisibleForTesting static final byte NORMAL_RECORD = 0; @VisibleForTesting static final byte TRIGGER_TIMER = 1; private final PythonAggregateFunctionInfo[] aggregateFunctions; private final DataViewSpec[][] dataViewSpecs; /** The input logical type. */ protected final RowType inputType; /** The output logical type. */ protected final RowType outputType; /** The array of the key indexes. */ private final int[] grouping; /** The index of a count aggregate used to calculate the number of accumulated rows. */ private final int indexOfCountStar; /** Generate retract messages if true. */ private final boolean generateUpdateBefore; /** The maximum NUMBER of the states cached in Python side. */ private final int stateCacheSize; /** The maximum number of cached entries in a single Python MapState. */ private final int mapStateReadCacheSize; private final int mapStateWriteCacheSize; private transient Object keyForTimerService; /** The user-defined function input logical type. */ protected transient RowType userDefinedFunctionInputType; /** The user-defined function output logical type. */ protected transient RowType userDefinedFunctionOutputType; /** The TypeSerializer for udf execution results. */ transient TypeSerializer<RowData> udfOutputTypeSerializer; /** The TypeSerializer for udf input elements. */ transient TypeSerializer<RowData> udfInputTypeSerializer; /** Reusable InputStream used to holding the execution results to be deserialized. */ protected transient ByteArrayInputStreamWithPos bais; /** InputStream Wrapper. */ protected transient DataInputViewStreamWrapper baisWrapper; /** Reusable OutputStream used to holding the serialized input elements. */ protected transient ByteArrayOutputStreamWithPos baos; /** OutputStream Wrapper. */ protected transient DataOutputViewStreamWrapper baosWrapper; /** The collector used to collect records. */ protected transient StreamRecordRowDataWrappingCollector rowDataWrapper; public AbstractPythonStreamAggregateOperator( Configuration config, RowType inputType, RowType outputType, PythonAggregateFunctionInfo[] aggregateFunctions, DataViewSpec[][] dataViewSpecs, int[] grouping, int indexOfCountStar, boolean generateUpdateBefore) { super(config); this.inputType = Preconditions.checkNotNull(inputType); this.outputType = Preconditions.checkNotNull(outputType); this.aggregateFunctions = aggregateFunctions; this.dataViewSpecs = dataViewSpecs; this.grouping = grouping; this.indexOfCountStar = indexOfCountStar; this.generateUpdateBefore = generateUpdateBefore; this.stateCacheSize = config.get(PythonOptions.STATE_CACHE_SIZE); this.mapStateReadCacheSize = config.get(PythonOptions.MAP_STATE_READ_CACHE_SIZE); this.mapStateWriteCacheSize = config.get(PythonOptions.MAP_STATE_WRITE_CACHE_SIZE); } @Override @SuppressWarnings("unchecked") public void open() throws Exception { bais = new ByteArrayInputStreamWithPos(); baisWrapper = new DataInputViewStreamWrapper(bais); baos = new ByteArrayOutputStreamWithPos(); baosWrapper = new DataOutputViewStreamWrapper(baos); userDefinedFunctionInputType = createUserDefinedFunctionInputType(); udfInputTypeSerializer = PythonTypeUtils.toInternalSerializer(userDefinedFunctionInputType); userDefinedFunctionOutputType = createUserDefinedFunctionOutputType(); udfOutputTypeSerializer = PythonTypeUtils.toInternalSerializer(userDefinedFunctionOutputType); rowDataWrapper = new StreamRecordRowDataWrappingCollector(output); super.open(); } @Override public void processElement(StreamRecord<RowData> element) throws Exception { RowData value = element.getValue(); processElementInternal(value); elementCount++; checkInvokeFinishBundleByCount(); emitResults(); } @Override public PythonFunctionRunner createPythonFunctionRunner() throws Exception { return BeamTablePythonFunctionRunner.stateful( getContainingTask().getEnvironment(), getRuntimeContext().getTaskInfo().getTaskName(), createPythonEnvironmentManager(), getFunctionUrn(), getUserDefinedFunctionsProto(), getFlinkMetricContainer(), getKeyedStateBackend(), getKeySerializer(), getWindowSerializer(), getContainingTask().getEnvironment().getMemoryManager(), getOperatorConfig() .getManagedMemoryFractionOperatorUseCaseOfSlot( ManagedMemoryUseCase.PYTHON, getContainingTask().getJobConfiguration(), getContainingTask() .getEnvironment() .getTaskManagerInfo() .getConfiguration(), getContainingTask() .getEnvironment() .getUserCodeClassLoader() .asClassLoader()), createInputCoderInfoDescriptor(userDefinedFunctionInputType), createOutputCoderInfoDescriptor(userDefinedFunctionOutputType)); } /** * As the beam state gRPC service will access the KeyedStateBackend in parallel with this * operator, we must override this method to prevent changing the current key of the * KeyedStateBackend while the beam service is handling requests. */ @Override public void setCurrentKey(Object key) { keyForTimerService = key; } @Override public Object getCurrentKey() { return keyForTimerService; } @Override public PythonEnv getPythonEnv() { return aggregateFunctions[0].getPythonFunction().getPythonEnv(); } @VisibleForTesting TypeSerializer getKeySerializer() { return PythonTypeUtils.toInternalSerializer(getKeyType()); } protected RowType getKeyType() { return (RowType) Projection.of(grouping).project(inputType); } TypeSerializer getWindowSerializer() { return null; } /** * Gets the proto representation of the Python user-defined aggregate functions to be executed. */ protected FlinkFnApi.UserDefinedAggregateFunctions getUserDefinedFunctionsProto() { FlinkFnApi.UserDefinedAggregateFunctions.Builder builder = FlinkFnApi.UserDefinedAggregateFunctions.newBuilder(); builder.setMetricEnabled(config.get(PYTHON_METRIC_ENABLED)); builder.setProfileEnabled(config.get(PYTHON_PROFILE_ENABLED)); builder.addAllGrouping(Arrays.stream(grouping).boxed().collect(Collectors.toList())); builder.setGenerateUpdateBefore(generateUpdateBefore); builder.setIndexOfCountStar(indexOfCountStar); builder.setKeyType(toProtoType(getKeyType())); builder.setStateCacheSize(stateCacheSize); builder.setMapStateReadCacheSize(mapStateReadCacheSize); builder.setMapStateWriteCacheSize(mapStateWriteCacheSize); for (int i = 0; i < aggregateFunctions.length; i++) { DataViewSpec[] specs = null; if (i < dataViewSpecs.length) { specs = dataViewSpecs[i]; } builder.addUdfs( ProtoUtils.createUserDefinedAggregateFunctionProto( aggregateFunctions[i], specs)); } builder.addAllJobParameters( getRuntimeContext().getGlobalJobParameters().entrySet().stream() .map( entry -> FlinkFnApi.JobParameter.newBuilder() .setKey(entry.getKey()) .setValue(entry.getValue()) .build()) .collect(Collectors.toList())); return builder.build(); } public abstract String getFunctionUrn(); public abstract void processElementInternal(RowData value) throws Exception; public abstract RowType createUserDefinedFunctionInputType(); public abstract RowType createUserDefinedFunctionOutputType(); public FlinkFnApi.CoderInfoDescriptor createInputCoderInfoDescriptor(RowType runnerInputType) { return createRowTypeCoderInfoDescriptorProto( runnerInputType, FlinkFnApi.CoderInfoDescriptor.Mode.MULTIPLE, false); } public FlinkFnApi.CoderInfoDescriptor createOutputCoderInfoDescriptor(RowType runnerOutType) { return createRowTypeCoderInfoDescriptorProto( runnerOutType, FlinkFnApi.CoderInfoDescriptor.Mode.MULTIPLE, false); } }
AbstractPythonStreamAggregateOperator
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/nestedsource/parameter/LetterEntity.java
{ "start": 246, "end": 1274 }
class ____ { private String fontType; private int fontSize; private String letterHeading; private String letterBody; private String letterSignature; public String getFontType() { return fontType; } public void setFontType(String fontType) { this.fontType = fontType; } public int getFontSize() { return fontSize; } public void setFontSize(int fontSize) { this.fontSize = fontSize; } public String getLetterHeading() { return letterHeading; } public void setLetterHeading(String letterHeading) { this.letterHeading = letterHeading; } public String getLetterBody() { return letterBody; } public void setLetterBody(String letterBody) { this.letterBody = letterBody; } public String getLetterSignature() { return letterSignature; } public void setLetterSignature(String letterSignature) { this.letterSignature = letterSignature; } }
LetterEntity
java
google__guice
core/src/com/google/inject/Key.java
{ "start": 13782, "end": 14898 }
class ____ implements AnnotationStrategy { final Annotation annotation; AnnotationInstanceStrategy(Annotation annotation) { this.annotation = checkNotNull(annotation, "annotation"); } @Override public boolean hasAttributes() { return true; } @Override public AnnotationStrategy withoutAttributes() { return new AnnotationTypeStrategy(getAnnotationType(), annotation); } @Override public Annotation getAnnotation() { return annotation; } @Override public Class<? extends Annotation> getAnnotationType() { return annotation.annotationType(); } @Override public boolean equals(Object o) { if (!(o instanceof AnnotationInstanceStrategy)) { return false; } AnnotationInstanceStrategy other = (AnnotationInstanceStrategy) o; return annotation.equals(other.annotation); } @Override public int hashCode() { return annotation.hashCode(); } @Override public String toString() { return annotation.toString(); } } static
AnnotationInstanceStrategy
java
quarkusio__quarkus
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/generatedresolvers/HierarchyTest.java
{ "start": 683, "end": 2176 }
class ____ { @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .withApplicationRoot((jar) -> jar .addClasses(Foo.class, Bar.class) .addAsResource(new StringAsset("{foo.name}"), "templates/test.html")); @Inject Template test; @Inject Engine engine; @Test public void testGeneratedResolvers() { List<ValueResolver> resolvers = engine.getValueResolvers(); ValueResolver fooResolver = null; ValueResolver barResolver = null; for (ValueResolver valueResolver : resolvers) { if (valueResolver.getClass().getName().endsWith(ValueResolverGenerator.SUFFIX) && valueResolver.getClass().getName().contains("Foo")) { fooResolver = valueResolver; } if (valueResolver.getClass().getName().endsWith(ValueResolverGenerator.SUFFIX) && valueResolver.getClass().getName().contains("Bar")) { barResolver = valueResolver; } } assertNotNull(fooResolver); assertNotNull(barResolver); assertTrue(barResolver.getPriority() > fooResolver.getPriority(), "Bar resolver priority " + barResolver.getPriority() + " is not higher than Foo resolver priority " + fooResolver.getPriority()); assertEquals("bar", test.data("foo", new Bar()).render()); } @TemplateData public static
HierarchyTest
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java
{ "start": 4557, "end": 8532 }
class ____ extends Scorer { private final AbstractLongFieldScript script; private final TwoPhaseIterator twoPhase; private final DocIdSetIterator disi; private final float weight; protected DistanceScorer(AbstractLongFieldScript script, int maxDoc, float boost) { this.script = script; twoPhase = new TwoPhaseIterator(DocIdSetIterator.all(maxDoc)) { @Override public boolean matches() { return GeoPointScriptFieldDistanceFeatureQuery.this.matches(script, approximation.docID()); } @Override public float matchCost() { return MATCH_COST; } }; disi = TwoPhaseIterator.asDocIdSetIterator(twoPhase); this.weight = boost; } @Override public int docID() { return disi.docID(); } @Override public float score() throws IOException { if (script.count() == 0) { return 0; } return GeoPointScriptFieldDistanceFeatureQuery.this.score(weight, getDistance(script)); } @Override public float getMaxScore(int upTo) { return weight; } @Override public DocIdSetIterator iterator() { return disi; } @Override public TwoPhaseIterator twoPhaseIterator() { return twoPhase; } } private double getDistance(AbstractLongFieldScript script) { double minDistance = Double.POSITIVE_INFINITY; for (int i = 0; i < script.count(); i++) { minDistance = Math.min(minDistance, getDistanceFromEncoded(script.values()[i])); } return minDistance; } private double getDistanceFromEncoded(long encoded) { int latitudeBits = (int) (encoded >> 32); int longitudeBits = (int) (encoded & 0xFFFFFFFF); double lat = GeoEncodingUtils.decodeLatitude(latitudeBits); double lon = GeoEncodingUtils.decodeLongitude(longitudeBits); return SloppyMath.haversinMeters(originLat, originLon, lat, lon); } long valueWithMinAbsoluteDistance(AbstractLongFieldScript script) { double minDistance = Double.POSITIVE_INFINITY; long minDistanceValue = Long.MAX_VALUE; for (int i = 0; i < script.count(); i++) { double distance = getDistanceFromEncoded(script.values()[i]); if (distance < minDistance) { minDistance = distance; minDistanceValue = script.values()[i]; } } return minDistanceValue; } float score(float weight, double distance) { return (float) (weight * (pivotDistance / (pivotDistance + distance))); } @Override public String toString(String field) { StringBuilder b = new StringBuilder(); if (false == fieldName().equals(field)) { b.append(fieldName()).append(":"); } b.append(getClass().getSimpleName()); b.append("(lat=").append(originLat); b.append(",lon=").append(originLon); b.append(",pivot=").append(pivotDistance).append(")"); return b.toString(); } @Override public int hashCode() { return Objects.hash(super.hashCode(), originLat, originLon, pivotDistance); } @Override public boolean equals(Object obj) { if (false == super.equals(obj)) { return false; } GeoPointScriptFieldDistanceFeatureQuery other = (GeoPointScriptFieldDistanceFeatureQuery) obj; return originLon == other.originLon && originLat == other.originLat && pivotDistance == other.pivotDistance; } @Override public void visit(QueryVisitor visitor) { if (visitor.acceptField(fieldName())) { visitor.visitLeaf(this); } } }
DistanceScorer
java
spring-projects__spring-framework
spring-context/src/test/java/org/springframework/validation/beanvalidation/BeanValidationBeanRegistrationAotProcessorTests.java
{ "start": 8383, "end": 8437 }
interface ____ { Exists[] value(); } } static
List
java
quarkusio__quarkus
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/applicationfieldaccess/PublicFieldAccessAssociationsTest.java
{ "start": 6307, "end": 11168 }
enum ____ { ONE_TO_ONE { @Override public void setValue(ContainingEntity entity, ContainedEntity containedEntity) { entity.oneToOne = containedEntity; } @Override public void assertValueAndLaziness(ContainingEntity entity, ContainedEntity containedEntity) { // No expectations regarding laziness on ToOne associations assertThat(entity.oneToOne).isEqualTo(containedEntity); consumeValue(entity.oneToOne); } }, MANY_TO_ONE { @Override public void setValue(ContainingEntity entity, ContainedEntity containedEntity) { entity.manyToOne = containedEntity; } @Override public void assertValueAndLaziness(ContainingEntity entity, ContainedEntity containedEntity) { // No expectations regarding laziness on ToOne associations assertThat(entity.manyToOne).isEqualTo(containedEntity); consumeValue(entity.manyToOne); } }, ONE_TO_MANY { @Override public void setValue(ContainingEntity entity, ContainedEntity containedEntity) { entity.oneToMany.add(containedEntity); } @Override public void assertValueAndLaziness(ContainingEntity entity, ContainedEntity containedEntity) { assertThat((Object) entity.oneToMany).returns(false, Hibernate::isInitialized); assertThat(entity.oneToMany).containsExactly(containedEntity); assertThat((Object) entity.oneToMany).returns(true, Hibernate::isInitialized); } }, MANY_TO_MANY { @Override public void setValue(ContainingEntity entity, ContainedEntity containedEntity) { entity.manyToMany.add(containedEntity); } @Override public void assertValueAndLaziness(ContainingEntity entity, ContainedEntity containedEntity) { assertThat((Object) entity.manyToMany).returns(false, Hibernate::isInitialized); assertThat(entity.manyToMany).containsExactly(containedEntity); assertThat((Object) entity.manyToMany).returns(true, Hibernate::isInitialized); } }, ONE_TO_ONE_MAPPED_BY { @Override public void setValue(ContainingEntity entity, ContainedEntity containedEntity) { entity.oneToOneMappedBy = containedEntity; containedEntity.oneToOne = entity; } @Override public void assertValueAndLaziness(ContainingEntity entity, ContainedEntity containedEntity) { // No expectations regarding laziness on ToOne associations assertThat(entity.oneToOneMappedBy).isEqualTo(containedEntity); consumeValue(entity.oneToOneMappedBy); } }, ONE_TO_MANY_MAPPED_BY { @Override public void setValue(ContainingEntity entity, ContainedEntity containedEntity) { entity.oneToManyMappedBy.add(containedEntity); containedEntity.manyToOne = entity; } @Override public void assertValueAndLaziness(ContainingEntity entity, ContainedEntity containedEntity) { assertThat((Object) entity.oneToManyMappedBy).returns(false, Hibernate::isInitialized); assertThat(entity.oneToManyMappedBy).containsExactly(containedEntity); assertThat((Object) entity.oneToManyMappedBy).returns(true, Hibernate::isInitialized); } }, MANY_TO_MANY_MAPPED_BY { @Override public void setValue(ContainingEntity entity, ContainedEntity containedEntity) { entity.manyToManyMappedBy.add(containedEntity); containedEntity.manyToMany.add(entity); } @Override public void assertValueAndLaziness(ContainingEntity entity, ContainedEntity containedEntity) { assertThat((Object) entity.manyToManyMappedBy).returns(false, Hibernate::isInitialized); assertThat(entity.manyToManyMappedBy).containsExactly(containedEntity); assertThat((Object) entity.manyToManyMappedBy).returns(true, Hibernate::isInitialized); } }; protected void consumeValue(ContainedEntity entity) { assertThat(entity.value).isEqualTo(CONTAINED_VALUE); } public abstract void setValue(ContainingEntity entity, ContainedEntity containedEntity); public abstract void assertValueAndLaziness(ContainingEntity entity, ContainedEntity containedEntity); } }
FieldAccessEnhancedDelegate
java
apache__camel
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/MongodbComponentBuilderFactory.java
{ "start": 1856, "end": 5961 }
interface ____ extends ComponentBuilder<MongoDbComponent> { /** * Shared client used for connection. All endpoints generated from the * component will share this connection client. * * The option is a: * &lt;code&gt;com.mongodb.client.MongoClient&lt;/code&gt; type. * * Group: common * * @param mongoConnection the value to set * @return the dsl builder */ default MongodbComponentBuilder mongoConnection(com.mongodb.client.MongoClient mongoConnection) { doSetProperty("mongoConnection", mongoConnection); return this; } /** * Allows for bridging the consumer to the Camel routing Error Handler, * which mean any exceptions (if possible) occurred while the Camel * consumer is trying to pickup incoming messages, or the likes, will * now be processed as a message and handled by the routing Error * Handler. Important: This is only possible if the 3rd party component * allows Camel to be alerted if an exception was thrown. Some * components handle this internally only, and therefore * bridgeErrorHandler is not possible. In other situations we may * improve the Camel component to hook into the 3rd party component and * make this possible for future releases. By default the consumer will * use the org.apache.camel.spi.ExceptionHandler to deal with * exceptions, that will be logged at WARN or ERROR level and ignored. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: consumer * * @param bridgeErrorHandler the value to set * @return the dsl builder */ default MongodbComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) { doSetProperty("bridgeErrorHandler", bridgeErrorHandler); return this; } /** * Whether the producer should be started lazy (on the first message). * By starting lazy you can use this to allow CamelContext and routes to * startup in situations where a producer may otherwise fail during * starting and cause the route to fail being started. By deferring this * startup to be lazy then the startup failure can be handled during * routing messages via Camel's routing error handlers. Beware that when * the first message is processed then creating and starting the * producer may take a little time and prolong the total processing time * of the processing. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: producer * * @param lazyStartProducer the value to set * @return the dsl builder */ default MongodbComponentBuilder lazyStartProducer(boolean lazyStartProducer) { doSetProperty("lazyStartProducer", lazyStartProducer); return this; } /** * Whether autowiring is enabled. This is used for automatic autowiring * options (the option must be marked as autowired) by looking up in the * registry to find if there is a single instance of matching type, * which then gets configured on the component. This can be used for * automatic configuring JDBC data sources, JMS connection factories, * AWS Clients, etc. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: true * Group: advanced * * @param autowiredEnabled the value to set * @return the dsl builder */ default MongodbComponentBuilder autowiredEnabled(boolean autowiredEnabled) { doSetProperty("autowiredEnabled", autowiredEnabled); return this; } }
MongodbComponentBuilder
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/bug/Issue5845.java
{ "start": 243, "end": 1559 }
class ____ extends TestCase { public void test_for_issue() throws Exception { String sql = "delete from table01 t where t.id=1"; List<DbType> dbTypes = new ArrayList<>(); for (DbType dbType : DbType.values()) { try { String mergeSql = ParameterizedOutputVisitorUtils.parameterize(sql, dbType); System.out.println(dbType + "==" + mergeSql); dbTypes.add(dbType); } catch (Exception e) { System.out.println(dbType + "==" + e.getMessage()); } } for (DbType dbT : dbTypes) { System.out.println("DbType." + dbT + ","); } for (DbType dbType : new DbType[]{DbType.db2, DbType.postgresql, DbType.oracle, DbType.mysql, DbType.mariadb, DbType.oceanbase, DbType.edb, DbType.elastic_search, DbType.drds, DbType.oceanbase_oracle, DbType.greenplum, DbType.gaussdb, DbType.tidb, DbType.goldendb, }) { String mergeSql = ParameterizedOutputVisitorUtils.parameterize(sql, dbType); System.out.println(dbType + "==" + mergeSql); dbTypes.add(dbType); } } }
Issue5845
java
google__dagger
dagger-compiler/main/java/dagger/internal/codegen/validation/BindsInstanceParameterValidator.java
{ "start": 1176, "end": 1565 }
class ____ extends BindsInstanceElementValidator<XExecutableParameterElement> { @Inject BindsInstanceParameterValidator(InjectionAnnotations injectionAnnotations) { super(injectionAnnotations); } @Override protected ElementValidator elementValidator(XExecutableParameterElement parameter) { return new Validator(parameter); } private
BindsInstanceParameterValidator
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/bugs/_2541/Issue2541Mapper.java
{ "start": 302, "end": 555 }
interface ____ { Issue2541Mapper INSTANCE = Mappers.getMapper( Issue2541Mapper.class ); Target map(Source source); default <T> Optional<T> toOptional(@Nullable T value) { return Optional.ofNullable( value ); }
Issue2541Mapper
java
spring-projects__spring-framework
spring-messaging/src/test/java/org/springframework/messaging/simp/stomp/StompClientSupportTests.java
{ "start": 935, "end": 2459 }
class ____ { private final StompClientSupport stompClient = new StompClientSupport() {}; @Test void defaultHeartbeatValidation() { trySetDefaultHeartbeat(new long[] {-1, 0}); trySetDefaultHeartbeat(new long[] {0, -1}); } private void trySetDefaultHeartbeat(long[] heartbeat) { assertThatIllegalArgumentException().isThrownBy(() -> this.stompClient.setDefaultHeartbeat(heartbeat)); } @Test void defaultHeartbeatValue() { assertThat(this.stompClient.getDefaultHeartbeat()).isEqualTo(new long[] {10000, 10000}); } @Test void isDefaultHeartbeatEnabled() { assertThat(this.stompClient.getDefaultHeartbeat()).isEqualTo(new long[] {10000, 10000}); assertThat(this.stompClient.isDefaultHeartbeatEnabled()).isTrue(); this.stompClient.setDefaultHeartbeat(new long[] {0, 0}); assertThat(this.stompClient.isDefaultHeartbeatEnabled()).isFalse(); } @Test void processConnectHeadersDefault() { StompHeaders connectHeaders = this.stompClient.processConnectHeaders(null); assertThat(connectHeaders).isNotNull(); assertThat(connectHeaders.getHeartbeat()).isEqualTo(new long[] {10000, 10000}); } @Test void processConnectHeadersWithExplicitHeartbeat() { StompHeaders connectHeaders = new StompHeaders(); connectHeaders.setHeartbeat(new long[] {15000, 15000}); connectHeaders = this.stompClient.processConnectHeaders(connectHeaders); assertThat(connectHeaders).isNotNull(); assertThat(connectHeaders.getHeartbeat()).isEqualTo(new long[] {15000, 15000}); } }
StompClientSupportTests
java
apache__flink
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/ml/MLPredictRunner.java
{ "start": 1686, "end": 3174 }
class ____ extends AbstractFunctionRunner { private final GeneratedCollector<ListenableCollector<RowData>> generatedCollector; protected transient ListenableCollector<RowData> collector; public MLPredictRunner( GeneratedFunction<FlatMapFunction<RowData, RowData>> generatedFetcher, GeneratedCollector<ListenableCollector<RowData>> generatedCollector) { super(generatedFetcher); this.generatedCollector = generatedCollector; } @Override public void open(OpenContext openContext) throws Exception { super.open(openContext); this.collector = generatedCollector.newInstance(getRuntimeContext().getUserCodeClassLoader()); FunctionUtils.setFunctionRuntimeContext(collector, getRuntimeContext()); FunctionUtils.openFunction(collector, openContext); } @Override public void processElement( RowData in, ProcessFunction<RowData, RowData>.Context ctx, Collector<RowData> out) throws Exception { prepareCollector(in, out); fetcher.flatMap(in, collector); } @Override public void close() throws Exception { super.close(); if (collector != null) { FunctionUtils.closeFunction(collector); } } public void prepareCollector(RowData in, Collector<RowData> out) { collector.setCollector(out); collector.setInput(in); collector.reset(); } }
MLPredictRunner
java
reactor__reactor-core
reactor-core/src/test/java/reactor/core/publisher/FluxRepeatPredicateTest.java
{ "start": 1149, "end": 5210 }
class ____ { @Test public void predicateNull() { assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> { Flux.never() .repeat(null); }); } @Test public void nMinusOne() { Flux<Integer> source = Flux.just(1, 2, 3); assertThatIllegalArgumentException() .isThrownBy(() -> source.repeat(-1, () -> true)) .withMessage("numRepeat >= 0 required"); } @Test public void nZero() { StepVerifier.create(Flux.just(1, 2, 3) .repeat(0, () -> true)) .expectNext(1, 2, 3) .verifyComplete(); } @Test public void nOne() { StepVerifier.create(Flux.just(1, 2, 3) .repeat(1, () -> true)) .expectNext(1, 2, 3) .expectNext(1, 2, 3) .verifyComplete(); } @Test public void nTwo() { StepVerifier.create(Flux.just(1, 2, 3) .repeat(2, () -> true)) .expectNext(1, 2, 3) .expectNext(1, 2, 3) .expectNext(1, 2, 3) .verifyComplete(); } @Test public void normal() { int[] times = {1}; AssertSubscriber<Integer> ts = AssertSubscriber.create(); Flux.range(1, 5) .repeat(() -> times[0]-- > 0) .subscribe(ts); ts.assertValues(1, 2, 3, 4, 5, 1, 2, 3, 4, 5) .assertNoError() .assertComplete(); } @Test public void normalBackpressured() { int[] times = {1}; AssertSubscriber<Integer> ts = AssertSubscriber.create(0); Flux.range(1, 5) .repeat(() -> times[0]-- > 0) .subscribe(ts); ts.assertNoValues() .assertNoError() .assertNotComplete(); ts.request(2); ts.assertValues(1, 2) .assertNoError() .assertNotComplete(); ts.request(5); ts.assertValues(1, 2, 3, 4, 5, 1, 2) .assertNoError() .assertNotComplete(); ts.request(10); ts.assertValues(1, 2, 3, 4, 5, 1, 2, 3, 4, 5) .assertNoError() .assertComplete(); } @Test public void dontRepeat() { AssertSubscriber<Integer> ts = AssertSubscriber.create(); Flux.range(1, 5) .repeat(() -> false) .subscribe(ts); ts.assertValues(1, 2, 3, 4, 5) .assertNoError() .assertComplete(); } @Test public void predicateThrows() { AssertSubscriber<Integer> ts = AssertSubscriber.create(); Flux.range(1, 5) .repeat(() -> { throw new RuntimeException("forced failure"); }) .subscribe(ts); ts.assertValues(1, 2, 3, 4, 5) .assertError(RuntimeException.class) .assertErrorMessage("forced failure") .assertNotComplete(); } @Test public void alwaysTrueWithNSimilarToSimpleN() { List<Integer> expected = Flux.just(1, 2, 3).repeat(3).collectList().block(); List<Integer> result = Flux.just(1, 2, 3).repeat(3, () -> true).collectList().block(); assertThat(result).containsExactlyElementsOf(expected); } @Test public void alwaysFalseWithNSimilarToSimpleZero() { List<Integer> expected = Flux.just(1, 2, 3).repeat(0).collectList().block(); List<Integer> result = Flux.just(1, 2, 3).repeat(3, () -> false).collectList().block(); assertThat(result).containsExactlyElementsOf(expected); } @Test public void scanOperator(){ Flux<Integer> parent = Flux.just(1); FluxRepeatPredicate<Integer> test = new FluxRepeatPredicate<>(parent, () -> true); assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent); assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC); } @Test public void scanSubscriber(){ Flux<Integer> source = Flux.just(1); CoreSubscriber<Integer> actual = new LambdaSubscriber<>(null, e -> {}, null, null); FluxRepeatPredicate.RepeatPredicateSubscriber<Integer> test = new FluxRepeatPredicate.RepeatPredicateSubscriber<>(source, actual, () -> true); Subscription parent = Operators.emptySubscription(); test.onSubscribe(parent); assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent); assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual); assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC); } }
FluxRepeatPredicateTest
java
google__dagger
javatests/dagger/functional/membersinject/MembersInjectGenericParent.java
{ "start": 676, "end": 736 }
class ____<T> { @Inject T t; }
MembersInjectGenericParent
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/customsql/CustomSqlGeneratedTest.java
{ "start": 1076, "end": 2444 }
class ____ { @Test public void testCustomSqlWithGenerated(SessionFactoryScope scope) { Custom c = new Custom(); c.name = "name"; c.text = "text"; scope.inTransaction(s->{ s.persist(c); s.flush(); Custom cc = s.find(Custom.class, c.id); assertEquals(cc.text, "TEXT"); assertEquals(cc.name, "NAME"); cc.name = "eman"; cc.text = "more text"; s.flush(); cc = s.find(Custom.class, c.id); assertThat(cc.text ).isEqualTo( "MORE TEXT"); assertThat( cc.name ).isEqualTo( "EMAN" ); s.remove(cc); s.flush(); cc = s.find(Custom.class, c.id); assertEquals(cc.text, "DELETED"); assertEquals(cc.name, "DELETED"); }); } @Entity @Table(name = "CustomPrimary") @SecondaryTable(name = "CustomSecondary") @SQLInsert(sql="insert into CustomPrimary (name, revision, id) values (upper(?),?,?)") @SQLInsert(table = "CustomSecondary", sql="insert into CustomSecondary (text, id) values (upper(?),?)") @SQLUpdate(sql="update CustomPrimary set name = upper(?), revision = ? where id = ? and revision = ?") @SQLUpdate(table = "CustomSecondary", sql="update CustomSecondary set text = upper(?) where id = ?") @SQLDelete(sql="update CustomPrimary set name = 'DELETED' where id = ? and revision = ?") @SQLDelete(table = "CustomSecondary", sql="update CustomSecondary set text = 'DELETED' where id = ?") static
CustomSqlGeneratedTest
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/cluster/ClusterStatePublicationEvent.java
{ "start": 866, "end": 6704 }
class ____ { /** * Sentinel value so that we can assert each field is set once and only once on each successful event, and at most once on each failure. */ private static final long NOT_SET = -1L; private final BatchSummary summary; private final ClusterState oldState; private final ClusterState newState; private final Task task; private final long computationTimeMillis; private final long publicationStartTimeMillis; private volatile long publicationContextConstructionElapsedMillis = NOT_SET; private volatile long publicationCommitElapsedMillis = NOT_SET; private volatile long publicationCompletionElapsedMillis = NOT_SET; private volatile long masterApplyElapsedMillis = NOT_SET; public ClusterStatePublicationEvent( BatchSummary summary, ClusterState oldState, ClusterState newState, Task task, long computationTimeMillis, long publicationStartTimeMillis ) { this.summary = summary; this.oldState = oldState; this.newState = newState; this.task = task; this.computationTimeMillis = computationTimeMillis; this.publicationStartTimeMillis = publicationStartTimeMillis; } public BatchSummary getSummary() { return summary; } public ClusterState getOldState() { return oldState; } public ClusterState getNewState() { return newState; } public Task getTask() { return task; } public long getComputationTimeMillis() { return computationTimeMillis; } public long getPublicationStartTimeMillis() { return publicationStartTimeMillis; } public void setPublicationContextConstructionElapsedMillis(long millis) { assert millis >= 0; assert publicationContextConstructionElapsedMillis == NOT_SET; publicationContextConstructionElapsedMillis = millis; } public void setPublicationCommitElapsedMillis(long millis) { assert millis >= 0; assert publicationCommitElapsedMillis == NOT_SET; publicationCommitElapsedMillis = millis; } public void setPublicationCompletionElapsedMillis(long millis) { assert millis >= 0; assert publicationCompletionElapsedMillis == NOT_SET; publicationCompletionElapsedMillis = millis; } public void setMasterApplyElapsedMillis(long millis) { assert millis >= 0; assert masterApplyElapsedMillis == NOT_SET; masterApplyElapsedMillis = millis; } /** * @return how long in milliseconds it took to construct the publication context, which includes computing a cluster state diff and * serializing the cluster states for future transmission. */ public long getPublicationContextConstructionElapsedMillis() { assert publicationContextConstructionElapsedMillis != NOT_SET; return publicationContextConstructionElapsedMillis; } /** * @return how long in milliseconds it took to commit the publication, i.e. the elapsed time from the publication start until the master * receives publish responses from a majority of master nodes indicating that the state has been received and persisted there. */ public long getPublicationCommitElapsedMillis() { assert publicationCommitElapsedMillis != NOT_SET; return publicationCommitElapsedMillis; } /** * @return how long in milliseconds it took to complete the publication, i.e. the elapsed time from the publication start until all * nodes except the master have applied the cluster state. */ public long getPublicationCompletionElapsedMillis() { assert publicationCompletionElapsedMillis != NOT_SET; return publicationCompletionElapsedMillis; } /** * @return how long in milliseconds it took for the master to apply the cluster state, which happens after publication completion. */ public long getMasterApplyElapsedMillis() { assert masterApplyElapsedMillis != NOT_SET; return masterApplyElapsedMillis; } /** * @return how long in milliseconds it took to construct the publication context, which includes computing a cluster state diff and * serializing the cluster states for future transmission, or zero if not set. */ public long maybeGetPublicationContextConstructionElapsedMillis() { return ifSet(publicationContextConstructionElapsedMillis); } /** * @return how long in milliseconds it took to commit the publication, i.e. the elapsed time from the publication start until the master * receives publish responses from a majority of master nodes indicating that the state has been received and persisted there, * or zero if not set. */ public long maybeGetPublicationCommitElapsedMillis() { return ifSet(publicationCommitElapsedMillis); } /** * @return how long in milliseconds it took to complete the publication, i.e. the elapsed time from the publication start until all * nodes except the master have applied the cluster state, or zero if not set. */ public long maybeGetPublicationCompletionElapsedMillis() { return ifSet(publicationCompletionElapsedMillis); } /** * @return how long in milliseconds it took for the master to apply the cluster state, which happens after publication completion, or * zero if not set. */ public long maybeGetMasterApplyElapsedMillis() { return ifSet(masterApplyElapsedMillis); } private static long ifSet(long millis) { assert millis == NOT_SET || millis >= 0; return millis == NOT_SET ? 0 : millis; } }
ClusterStatePublicationEvent
java
apache__dubbo
dubbo-common/src/test/java/org/apache/dubbo/common/extension/ext11_no_adaptive/NoAdaptiveExt.java
{ "start": 961, "end": 1017 }
interface ____ { String echo(String s); }
NoAdaptiveExt
java
grpc__grpc-java
xds/src/main/java/io/grpc/xds/WrrLocalityLoadBalancer.java
{ "start": 1668, "end": 5498 }
class ____ extends LoadBalancer { private final XdsLogger logger; private final Helper helper; private final GracefulSwitchLoadBalancer switchLb; private final LoadBalancerRegistry lbRegistry; WrrLocalityLoadBalancer(Helper helper) { this(helper, LoadBalancerRegistry.getDefaultRegistry()); } WrrLocalityLoadBalancer(Helper helper, LoadBalancerRegistry lbRegistry) { this.helper = checkNotNull(helper, "helper"); this.lbRegistry = lbRegistry; switchLb = new GracefulSwitchLoadBalancer(helper); logger = XdsLogger.withLogId( InternalLogId.allocate("xds-wrr-locality-lb", helper.getAuthority())); logger.log(XdsLogLevel.INFO, "Created"); } @Override public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) { logger.log(XdsLogLevel.DEBUG, "Received resolution result: {0}", resolvedAddresses); // The configuration with the child policy is combined with the locality weights // to produce the weighted target LB config. WrrLocalityConfig wrrLocalityConfig = (WrrLocalityConfig) resolvedAddresses.getLoadBalancingPolicyConfig(); // A map of locality weights is built up from the locality weight attributes in each address. Map<String, Integer> localityWeights = new HashMap<>(); for (EquivalentAddressGroup eag : resolvedAddresses.getAddresses()) { Attributes eagAttrs = eag.getAttributes(); String locality = eagAttrs.get(EquivalentAddressGroup.ATTR_LOCALITY_NAME); Integer localityWeight = eagAttrs.get(XdsAttributes.ATTR_LOCALITY_WEIGHT); if (locality == null) { Status unavailableStatus = Status.UNAVAILABLE.withDescription( "wrr_locality error: no locality provided"); helper.updateBalancingState(TRANSIENT_FAILURE, new FixedResultPicker(PickResult.withError(unavailableStatus))); return unavailableStatus; } if (localityWeight == null) { Status unavailableStatus = Status.UNAVAILABLE.withDescription( "wrr_locality error: no weight provided for locality " + locality); helper.updateBalancingState(TRANSIENT_FAILURE, new FixedResultPicker(PickResult.withError(unavailableStatus))); return unavailableStatus; } if (!localityWeights.containsKey(locality)) { localityWeights.put(locality, localityWeight); } else if (!localityWeights.get(locality).equals(localityWeight)) { logger.log(XdsLogLevel.WARNING, "Locality {0} has both weights {1} and {2}, using weight {1}", locality, localityWeights.get(locality), localityWeight); } } // Weighted target LB expects a WeightedPolicySelection for each locality as it will create a // child LB for each. Map<String, WeightedPolicySelection> weightedPolicySelections = new HashMap<>(); for (String locality : localityWeights.keySet()) { weightedPolicySelections.put(locality, new WeightedPolicySelection(localityWeights.get(locality), wrrLocalityConfig.childConfig)); } Object switchConfig = GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig( lbRegistry.getProvider(WEIGHTED_TARGET_POLICY_NAME), new WeightedTargetConfig(weightedPolicySelections)); return switchLb.acceptResolvedAddresses( resolvedAddresses.toBuilder() .setLoadBalancingPolicyConfig(switchConfig) .build()); } @Override public void handleNameResolutionError(Status error) { logger.log(XdsLogLevel.WARNING, "Received name resolution error: {0}", error); switchLb.handleNameResolutionError(error); } @Override public void shutdown() { switchLb.shutdown(); } /** * The LB config for {@link WrrLocalityLoadBalancer}. */ static final
WrrLocalityLoadBalancer
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/iterative/concurrent/BrokerTest.java
{ "start": 2664, "end": 3425 }
class ____ implements Callable<StringPair> { private final Random random; private final Broker<String> broker; private final String key; private final String value; IterationHead(Broker<String> broker, Integer key, String value) { this.broker = broker; this.key = String.valueOf(key); this.value = value; random = new Random(); } @Override public StringPair call() throws Exception { Thread.sleep(random.nextInt(10)); // System.out.println("Head " + key + " hands in " + value); broker.handIn(key, value); Thread.sleep(random.nextInt(10)); return null; } }
IterationHead
java
netty__netty
codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java
{ "start": 1010, "end": 5537 }
class ____ extends AbstractMemoryHttpData implements Attribute { public MemoryAttribute(String name) { this(name, HttpConstants.DEFAULT_CHARSET); } public MemoryAttribute(String name, long definedSize) { this(name, definedSize, HttpConstants.DEFAULT_CHARSET); } public MemoryAttribute(String name, Charset charset) { super(name, charset, 0); } public MemoryAttribute(String name, long definedSize, Charset charset) { super(name, charset, definedSize); } public MemoryAttribute(String name, String value) throws IOException { this(name, value, HttpConstants.DEFAULT_CHARSET); // Attribute have no default size } public MemoryAttribute(String name, String value, Charset charset) throws IOException { super(name, charset, 0); // Attribute have no default size setValue(value); } @Override public HttpDataType getHttpDataType() { return HttpDataType.Attribute; } @Override public String getValue() { return getByteBuf().toString(getCharset()); } @Override public void setValue(String value) throws IOException { ObjectUtil.checkNotNull(value, "value"); byte [] bytes = value.getBytes(getCharset()); checkSize(bytes.length); ByteBuf buffer = wrappedBuffer(bytes); if (definedSize > 0) { definedSize = buffer.readableBytes(); } setContent(buffer); } @Override public void addContent(ByteBuf buffer, boolean last) throws IOException { int localsize = buffer.readableBytes(); try { checkSize(size + localsize); } catch (IOException e) { buffer.release(); throw e; } if (definedSize > 0 && definedSize < size + localsize) { definedSize = size + localsize; } super.addContent(buffer, last); } @Override public int hashCode() { return getName().hashCode(); } @Override public boolean equals(Object o) { if (!(o instanceof Attribute)) { return false; } Attribute attribute = (Attribute) o; return getName().equalsIgnoreCase(attribute.getName()); } @Override public int compareTo(InterfaceHttpData other) { if (!(other instanceof Attribute)) { throw new ClassCastException("Cannot compare " + getHttpDataType() + " with " + other.getHttpDataType()); } return compareTo((Attribute) other); } public int compareTo(Attribute o) { return getName().compareToIgnoreCase(o.getName()); } @Override public String toString() { return getName() + '=' + getValue(); } @Override public Attribute copy() { final ByteBuf content = content(); return replace(content != null ? content.copy() : null); } @Override public Attribute duplicate() { final ByteBuf content = content(); return replace(content != null ? content.duplicate() : null); } @Override public Attribute retainedDuplicate() { ByteBuf content = content(); if (content != null) { content = content.retainedDuplicate(); boolean success = false; try { Attribute duplicate = replace(content); success = true; return duplicate; } finally { if (!success) { content.release(); } } } else { return replace(null); } } @Override public Attribute replace(ByteBuf content) { MemoryAttribute attr = new MemoryAttribute(getName()); attr.setCharset(getCharset()); if (content != null) { try { attr.setContent(content); } catch (IOException e) { throw new ChannelException(e); } } attr.setCompleted(isCompleted()); return attr; } @Override public Attribute retain() { super.retain(); return this; } @Override public Attribute retain(int increment) { super.retain(increment); return this; } @Override public Attribute touch() { super.touch(); return this; } @Override public Attribute touch(Object hint) { super.touch(hint); return this; } }
MemoryAttribute
java
elastic__elasticsearch
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java
{ "start": 984, "end": 1320 }
class ____ extends ActionType<AcknowledgedResponse> { public static final DeleteTrainedModelAction INSTANCE = new DeleteTrainedModelAction(); public static final String NAME = "cluster:admin/xpack/ml/inference/delete"; private DeleteTrainedModelAction() { super(NAME); } public static
DeleteTrainedModelAction
java
spring-projects__spring-framework
spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsTests.java
{ "start": 111332, "end": 111733 }
interface ____ { @AliasFor(annotation = ImplicitAliasesContextConfiguration.class, attribute = "xmlFiles") String xml() default ""; @AliasFor(annotation = ImplicitAliasesContextConfiguration.class, attribute = "groovyScripts") String groovy() default ""; } @ImplicitAliasesContextConfiguration @Retention(RetentionPolicy.RUNTIME) @
SingleLocationTransitiveImplicitAliasesContextConfiguration
java
spring-projects__spring-boot
loader/spring-boot-loader/src/test/java/org/springframework/boot/loader/zip/ZipStringTests.java
{ "start": 6766, "end": 6861 }
enum ____ { STRING, CHAR_SEQUENCE, DATA_BLOCK, SINGLE_BYTE_READ_DATA_BLOCK } }
HashSourceType
java
spring-projects__spring-framework
spring-core-test/src/main/java/org/springframework/core/test/tools/SourceFile.java
{ "start": 6270, "end": 7770 }
class ____ = builder.addSource(new StringReader(makeRecordsLookLikeClasses(content))); } Assert.state(javaSource.getClasses().size() == 1, "Source must define a single class"); JavaClass javaClass = javaSource.getClasses().get(0); return (javaSource.getPackage() != null) ? (javaSource.getPackageName() + "." + javaClass.getName()) : javaClass.getName(); } catch (Exception ex) { throw new IllegalStateException( "Unable to parse source file content:\n\n" + content, ex); } } private static String makeRecordsLookLikeClasses(String content) { Pattern pattern = Pattern.compile("record\\s(\\S+)\\("); Matcher matcher = pattern.matcher(content); if (matcher.find()) { StringBuilder result = new StringBuilder(); result.append(content.substring(0, matcher.start()) + "class"); result.append(content.substring(matcher.start() + 6, matcher.end() - 1)); int parenthesesCount = 1; for (int i = matcher.end(); i < content.length(); i++) { char ch = content.charAt(i); if (parenthesesCount > 0) { if (ch == '(') { parenthesesCount++; } else if (ch == ')') { parenthesesCount--; } } else { result.append(ch); } } return makeRecordsLookLikeClasses(result.toString()); } return content; } /** * Use {@code assertThat(sourceFile)} rather than calling this method * directly. */ @Override public SourceFileAssert assertThat() { return new SourceFileAssert(this); } }
javaSource
java
redisson__redisson
redisson/src/main/java/org/redisson/api/CacheRx.java
{ "start": 1114, "end": 11097 }
interface ____<K, V> { /** * This method retrieves an entry from the cache. * * If the cache uses the read-through pattern, and the method would return null * because the entry is not present in the cache, then the cache's {@link CacheLoader} * will try to load the entry. * * @param key the key whose value should be returned * @return the element, or null if the entry does not exist. * @throws IllegalStateException if the cache is in a closed state * @throws NullPointerException if the key is null * @throws CacheException if there is a problem retrieving the entry from the cache */ Maybe<V> get(K key); /** * This method accepts a set of requested keys and retrieves a collection of entries from the * {@link CacheRx}, returning them as a {@link Map} of the associated values. * * If the cache uses the read-through pattern, and the method would return null for a key * because an entry is not present in the cache, the Cache's {@link CacheLoader} will try to * load the entry. If a key's entry cannot be loaded, the key will not appear in the Map. * * @param keys The keys whose values should be returned. * @return A Map of entries associated with the given keys. If a key is not found * in the cache, it will not be in the Map. * @throws NullPointerException if keys is null or contains a null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem retrieving the entries from the cache */ Single<Map<K, V>> getAll(Set<? extends K> keys); /** * This method returns a Boolean true/false value, depending on whether the * {@link CacheRx} has a mapping for a key k such that key.equals(k). * * * @param key the key with a possible mapping in the cache. * @return true if such a mapping exists * @throws NullPointerException if key is null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Single<Boolean> containsKey(K key); /** * This method places the given value V in the cache and associates it with the given key K. * * If the {@link CacheRx} already has a mapping for the key, the previous * value is replaced by the given value V. * This occurs if and only if {@link #containsKey(Object) c.containsKey(k)} * would return true.) * * @param key the key to place in the cache * @param value the value to associate with the given key * @return void * @throws NullPointerException if the key or value is null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Completable put(K key, V value); /** * This method places the given key and value in the cache. * Any value already in the cache is returned and replaced by the new given value. * This occurs if and only if {@link #containsKey(Object) c.containsKey(k)} * would return true.) * If there was no value already in the cache, the method returns null. * * @param key the key to place in the cache * @param value the value to associate with the given key * @return the previous value in the cache, or null if none already existed * @throws NullPointerException if the key or value is null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Maybe<V> getAndPut(K key, V value); /** * This method copies all of the entries from the given Map to the {@link CacheRx}. * * This method is equivalent to calling * {@link #put(Object, Object) put(k, v)} on this cache one time for each mapping * from key k to value v in the given Map. * * Individual puts may occur in any order. * * If entries in the cache corresponding to entries in the Map, or the Map itself, is * changed or removed during this operation, then the behavior of this method is * not defined. * * If default consistency mode is enabled, then each put is atomic but not * the entire putAll operation. Listeners can observe individual updates. * * @param map the Map that contains the entries to be copied to the cache * @return void * @throws NullPointerException if the map is null or contains null keys or values. * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache. */ Completable putAll(java.util.Map<? extends K, ? extends V> map); /** * This method places the given key and value in the cache atomically, if the key is * not already associated with a value in the cache. * * @param key the key to place in the cache * @param value the value to associate with the given key * @return true if the value was successfully placed in the cache * @throws NullPointerException if the key or value is null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Single<Boolean> putIfAbsent(K key, V value); /** * This method deletes the mapping for a given key from the cache, if it is present. * * This occurs if and only if there is a mapping from key k to * value v such that * (key==null ? k==null : key.equals(k)). * * This method returns true if the removal was successful, * or false if there was no such mapping. * * * @param key the key whose mapping will be deleted * @return returns true if successful, or false if there was no mapping * @throws NullPointerException if the key is null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Single<Boolean> remove(K key); /** * This method atomically removes a key's mapping only if it is currently mapped to the * provided value. * * @param key the key whose mapping will be deleted * @param oldValue the value that should be mapped to the given key * @return returns true if successful, or false if there was no such mapping * @throws NullPointerException if the key is null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Single<Boolean> remove(K key, V oldValue); /** * This method atomically removes the entry for a key only if it is currently mapped to some * value. * * @param key the given key * @return the value if it existed, or null if it did not * @throws NullPointerException if the key is null. * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Maybe<V> getAndRemove(K key); /** * This method atomically replaces an entry only if the key is currently mapped to a * given value. * * @param key the key associated with the given oldValue * @param oldValue the value that should be associated with the key * @param newValue the value that will be associated with the key * @return true if the value was replaced, or false if not * @throws NullPointerException if the key or values are null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Single<Boolean> replace(K key, V oldValue, V newValue); /** * This method atomically replaces an entry only if the key is currently mapped to some * value. * * @param key the key mapped to the given value * @param value the value mapped to the given key * @return true if the value was replaced, or false if not * @throws NullPointerException if the key or value is null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Single<Boolean> replace(K key, V value); /** * This method atomically replaces a given key's value if and only if the key is currently * mapped to a value. * * @param key the key associated with the given value * @param value the value associated with the given key * @return the previous value mapped to the given key, or * null if there was no such mapping. * @throws NullPointerException if the key or value is null * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Maybe<V> getAndReplace(K key, V value); /** * This method deletes the entries for the given keys. * * The order in which the individual entries are removed is undefined. * * For every entry in the key set, the following are called: * • any registered {@link CacheEntryRemovedListener}s • if the cache is a write-through cache, the {@link CacheWriter} * If the key set is empty, the {@link CacheWriter} is not called. * * @param keys the keys to remove * @return void * @throws NullPointerException if keys is null or if it contains a null key * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Completable removeAll(Set<? extends K> keys); /** * This method empties the cache's contents, without notifying listeners or * {@link CacheWriter}s. * * @return void * @throws IllegalStateException if the cache is in a closed state * @throws CacheException if there is a problem with the cache */ Completable clear(); }
CacheRx
java
apache__camel
core/camel-management/src/test/java/org/apache/camel/management/ManagedRoundRobinLoadBalancerTest.java
{ "start": 1370, "end": 3152 }
class ____ extends ManagementTestSupport { @Test public void testManageRoundRobinLoadBalancer() throws Exception { MockEndpoint foo = getMockEndpoint("mock:foo"); foo.expectedMessageCount(1); MockEndpoint bar = getMockEndpoint("mock:bar"); bar.expectedMessageCount(1); template.sendBodyAndHeader("direct:start", "Hello World", "foo", "123"); template.sendBodyAndHeader("direct:start", "Bye World", "foo", "123"); assertMockEndpointsSatisfied(); // get the stats for the route MBeanServer mbeanServer = getMBeanServer(); // get the object name for the delayer ObjectName on = getCamelObjectName(TYPE_PROCESSOR, "mysend"); // should be on route1 String routeId = (String) mbeanServer.getAttribute(on, "RouteId"); assertEquals("route1", routeId); String camelId = (String) mbeanServer.getAttribute(on, "CamelId"); assertEquals(context.getManagementName(), camelId); String state = (String) mbeanServer.getAttribute(on, "State"); assertEquals(ServiceStatus.Started.name(), state); Integer size = (Integer) mbeanServer.getAttribute(on, "Size"); assertEquals(2, size.intValue()); String last = (String) mbeanServer.getAttribute(on, "LastChosenProcessorId"); assertEquals("bar", last); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { @Override public void configure() { from("direct:start") .loadBalance().roundRobin().id("mysend") .to("mock:foo").id("foo").to("mock:bar").id("bar"); } }; } }
ManagedRoundRobinLoadBalancerTest
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryTestMethodPrefixTest.java
{ "start": 1404, "end": 1680 }
class ____ { @Test public void foo() {} } """) .doTest(); } @Test public void negative() { helper .addInputLines( "T.java", """ import org.junit.Test;
T
java
apache__dubbo
dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/context/annotation/EnableConfigurationBeanBinding.java
{ "start": 1650, "end": 3599 }
interface ____ { /** * The default value for {@link #multiple()} * * @since 1.0.6 */ boolean DEFAULT_MULTIPLE = false; /** * The default value for {@link #ignoreUnknownFields()} * * @since 1.0.6 */ boolean DEFAULT_IGNORE_UNKNOWN_FIELDS = true; /** * The default value for {@link #ignoreInvalidFields()} * * @since 1.0.6 */ boolean DEFAULT_IGNORE_INVALID_FIELDS = true; /** * The name prefix of the properties that are valid to bind to the type of configuration. * * @return the name prefix of the properties to bind */ String prefix(); /** * @return The binding type of configuration. */ Class<?> type(); /** * It indicates whether {@link #prefix()} binding to multiple Spring Beans. * * @return the default value is <code>false</code> * @see #DEFAULT_MULTIPLE */ boolean multiple() default DEFAULT_MULTIPLE; /** * Set whether to ignore unknown fields, that is, whether to ignore bind * parameters that do not have corresponding fields in the target object. * <p>Default is "true". Turn this off to enforce that all bind parameters * must have a matching field in the target object. * * @return the default value is <code>true</code> * @see #DEFAULT_IGNORE_UNKNOWN_FIELDS */ boolean ignoreUnknownFields() default DEFAULT_IGNORE_UNKNOWN_FIELDS; /** * Set whether to ignore invalid fields, that is, whether to ignore bind * parameters that have corresponding fields in the target object which are * not accessible (for example because of null values in the nested path). * <p>Default is "true". * * @return the default value is <code>true</code> * @see #DEFAULT_IGNORE_INVALID_FIELDS */ boolean ignoreInvalidFields() default DEFAULT_IGNORE_INVALID_FIELDS; }
EnableConfigurationBeanBinding
java
elastic__elasticsearch
x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java
{ "start": 1405, "end": 19018 }
class ____ { private StringUtils() {} public static final String EMPTY = ""; public static final String NEW_LINE = "\n"; public static final String SQL_WILDCARD = "%"; public static final String WILDCARD = "*"; public static final String EXCLUSION = "-"; private static final String[] INTEGER_ORDINALS = new String[] { "th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th" }; private static final String INVALID_REGEX_SEQUENCE = "Invalid sequence - escape character is not followed by special wildcard char"; // CamelCase to camel_case (and isNaN to is_nan) public static String camelCaseToUnderscore(String string) { if (Strings.hasText(string) == false) { return EMPTY; } StringBuilder sb = new StringBuilder(); String s = string.trim(); boolean previousCharWasUp = false; for (int i = 0; i < s.length(); i++) { char ch = s.charAt(i); if (Character.isAlphabetic(ch)) { if (Character.isUpperCase(ch)) { // append `_` when encountering a capital after a small letter, but only if not the last letter. if (i > 0 && i < s.length() - 1 && previousCharWasUp == false) { sb.append("_"); } previousCharWasUp = true; } else { previousCharWasUp = (ch == '_'); } } else { previousCharWasUp = true; } sb.append(ch); } return sb.toString().toUpperCase(Locale.ROOT); } // CAMEL_CASE to camelCase public static String underscoreToLowerCamelCase(String string) { if (Strings.hasText(string) == false) { return EMPTY; } StringBuilder sb = new StringBuilder(); String s = string.trim().toLowerCase(Locale.ROOT); boolean previousCharWasUnderscore = false; for (int i = 0; i < s.length(); i++) { char ch = s.charAt(i); if (ch == '_') { previousCharWasUnderscore = true; } else { if (previousCharWasUnderscore) { sb.append(Character.toUpperCase(ch)); previousCharWasUnderscore = false; } else { sb.append(ch); } } } return sb.toString(); } // % -> .* // _ -> . // escape character - can be 0 (in which case no regex gets escaped) or // should be followed by % or _ (otherwise an exception is thrown) public static String likeToJavaPattern(String pattern, char escape) { StringBuilder regex = new StringBuilder(pattern.length() + 4); boolean escaped = false; regex.append('^'); for (int i = 0; i < pattern.length(); i++) { char curr = pattern.charAt(i); if (escaped == false && (curr == escape) && escape != 0) { escaped = true; if (i + 1 == pattern.length()) { throw new InvalidArgumentException(INVALID_REGEX_SEQUENCE); } } else { switch (curr) { case '%' -> regex.append(escaped ? SQL_WILDCARD : ".*"); case '_' -> regex.append(escaped ? "_" : "."); default -> { if (escaped) { throw new InvalidArgumentException(INVALID_REGEX_SEQUENCE); } // escape special regex characters switch (curr) { case '\\', '^', '$', '.', '*', '?', '+', '|', '(', ')', '[', ']', '{', '}' -> regex.append('\\'); } regex.append(curr); } } escaped = false; } } regex.append('$'); return regex.toString(); } // * -> .* // ? -> . // escape character - can be 0 (in which case no regex gets escaped) or // should be followed by * or ? or the escape character itself (otherwise an exception is thrown). // Using * or ? as escape characters should be avoided because it will make it impossible to enter them as literals public static String wildcardToJavaPattern(String pattern, char escape) { StringBuilder regex = new StringBuilder(pattern.length() + 4); boolean escaped = false; regex.append('^'); for (int i = 0; i < pattern.length(); i++) { char curr = pattern.charAt(i); if (escaped == false && (curr == escape) && escape != 0) { escaped = true; if (i + 1 == pattern.length()) { throw new InvalidArgumentException(INVALID_REGEX_SEQUENCE); } } else { switch (curr) { case '*' -> regex.append(escaped ? "\\*" : ".*"); case '?' -> regex.append(escaped ? "\\?" : "."); default -> { if (escaped && escape != curr) { throw new InvalidArgumentException(INVALID_REGEX_SEQUENCE); } // escape special regex characters switch (curr) { case '\\', '^', '$', '.', '*', '?', '+', '|', '(', ')', '[', ']', '{', '}' -> regex.append('\\'); } regex.append(curr); } } escaped = false; } } regex.append('$'); return regex.toString(); } /** * Translates a Lucene wildcard pattern to a Lucene RegExp one. * Note: all RegExp "optional" characters are escaped too (allowing the use of the {@code RegExp.ALL} flag). * @param wildcard Lucene wildcard pattern * @return Lucene RegExp pattern */ public static String luceneWildcardToRegExp(String wildcard) { StringBuilder regex = new StringBuilder(); for (int i = 0, wcLen = wildcard.length(); i < wcLen; i++) { char c = wildcard.charAt(i); // this will work chunking through Unicode as long as all values matched are ASCII switch (c) { case WildcardQuery.WILDCARD_STRING -> regex.append(".*"); case WildcardQuery.WILDCARD_CHAR -> regex.append("."); case WildcardQuery.WILDCARD_ESCAPE -> { if (i + 1 < wcLen) { // consume the wildcard escaping, consider the next char char next = wildcard.charAt(i + 1); i++; switch (next) { case WildcardQuery.WILDCARD_STRING, WildcardQuery.WILDCARD_CHAR, WildcardQuery.WILDCARD_ESCAPE -> // escape `*`, `.`, `\`, since these are special chars in RegExp as well regex.append("\\"); // default: unnecessary escaping -- just ignore the escaping } regex.append(next); } else { // "else fallthru, lenient parsing with a trailing \" -- according to WildcardQuery#toAutomaton regex.append("\\\\"); } } // reserved RegExp characters case '"', '$', '(', ')', '+', '.', '[', ']', '^', '{', '|', '}' -> regex.append("\\").append(c); // reserved optional RegExp characters case '#', '&', '<', '>' -> regex.append("\\").append(c); default -> regex.append(c); } } return regex.toString(); } /** * Translates a like pattern to a Lucene wildcard. * This methods pays attention to the custom escape char which gets converted into \ (used by Lucene). * <pre> * % -&gt; * * _ -&gt; ? * escape character - can be 0 (in which case no regex gets escaped) or should be followed by * % or _ (otherwise an exception is thrown) * </pre> */ public static String likeToLuceneWildcard(String pattern, char escape) { StringBuilder wildcard = new StringBuilder(pattern.length() + 4); boolean escaped = false; for (int i = 0; i < pattern.length(); i++) { char curr = pattern.charAt(i); if (escaped == false && (curr == escape) && escape != 0) { if (i + 1 == pattern.length()) { throw new InvalidArgumentException(INVALID_REGEX_SEQUENCE); } escaped = true; } else { switch (curr) { case '%' -> wildcard.append(escaped ? SQL_WILDCARD : WILDCARD); case '_' -> wildcard.append(escaped ? "_" : "?"); default -> { if (escaped) { throw new InvalidArgumentException(INVALID_REGEX_SEQUENCE); } // escape special regex characters switch (curr) { case '\\', '*', '?' -> wildcard.append('\\'); } wildcard.append(curr); } } escaped = false; } } return wildcard.toString(); } /** * Translates a like pattern to pattern for ES index name expression resolver. * * Note the resolver only supports * (not ?) and has no notion of escaping. This is not really an issue since we don't allow * * anyway in the pattern. */ public static String likeToIndexWildcard(String pattern, char escape) { StringBuilder wildcard = new StringBuilder(pattern.length() + 4); boolean escaped = false; for (int i = 0; i < pattern.length(); i++) { char curr = pattern.charAt(i); if (escaped == false && (curr == escape) && escape != 0) { if (i + 1 == pattern.length()) { throw new InvalidArgumentException(INVALID_REGEX_SEQUENCE); } escaped = true; } else { switch (curr) { case '%' -> wildcard.append(escaped ? SQL_WILDCARD : WILDCARD); case '_' -> wildcard.append(escaped ? "_" : "*"); default -> { if (escaped) { throw new InvalidArgumentException(INVALID_REGEX_SEQUENCE); } // the resolver doesn't support escaping... wildcard.append(curr); } } escaped = false; } } return wildcard.toString(); } public static String likeToUnescaped(String pattern, char escape) { StringBuilder wildcard = new StringBuilder(pattern.length()); boolean escaped = false; for (int i = 0; i < pattern.length(); i++) { char curr = pattern.charAt(i); if (escaped == false && curr == escape && escape != 0) { escaped = true; } else { if (escaped && (curr == '%' || curr == '_' || curr == escape)) { wildcard.append(curr); } else { if (escaped) { wildcard.append(escape); } wildcard.append(curr); } escaped = false; } } // corner-case when the escape char is the last char if (escaped) { wildcard.append(escape); } return wildcard.toString(); } public static String toString(SearchSourceBuilder source) { try (XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true)) { source.toXContent(builder, ToXContent.EMPTY_PARAMS); return Strings.toString(builder); } catch (IOException e) { throw new RuntimeException("error rendering", e); } } public static List<String> findSimilar(String match, Iterable<String> potentialMatches) { LevenshteinDistance ld = new LevenshteinDistance(); List<Tuple<Float, String>> scoredMatches = new ArrayList<>(); for (String potentialMatch : potentialMatches) { float distance = ld.getDistance(match, potentialMatch); if (distance >= 0.5f) { scoredMatches.add(new Tuple<>(distance, potentialMatch)); } } CollectionUtil.timSort(scoredMatches, (a, b) -> b.v1().compareTo(a.v1())); return scoredMatches.stream().map(a -> a.v2()).collect(toList()); } public static double parseDouble(String string) throws InvalidArgumentException { double value; try { value = Double.parseDouble(string); } catch (NumberFormatException nfe) { throw new InvalidArgumentException(nfe, "Cannot parse number [{}]", string); } if (Double.isInfinite(value)) { throw new InvalidArgumentException("Number [{}] is too large", string); } if (Double.isNaN(value)) { throw new InvalidArgumentException("[{}] cannot be parsed as a number (NaN)", string); } return value; } public static long parseLong(String string) throws InvalidArgumentException { try { return Long.parseLong(string); } catch (NumberFormatException nfe) { try { BigInteger bi = new BigInteger(string); try { bi.longValueExact(); } catch (ArithmeticException ae) { throw new InvalidArgumentException("Number [{}] is too large", string); } } catch (NumberFormatException ex) { // parsing fails, go through } throw new InvalidArgumentException("Cannot parse number [{}]", string); } } public static Number parseIntegral(String string) throws InvalidArgumentException { BigInteger bi; try { bi = new BigInteger(string); } catch (NumberFormatException ex) { throw new InvalidArgumentException(ex, "Cannot parse number [{}]", string); } if (bi.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0) { if (isUnsignedLong(bi) == false) { throw new InvalidArgumentException("Number [{}] is too large", string); } return bi; } if (bi.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { throw new InvalidArgumentException("Magnitude of negative number [{}] is too large", string); } // try to downsize to int if possible (since that's the most common type) if (bi.intValue() == bi.longValue()) { // ternary operator would always promote to Long return bi.intValueExact(); } else { return bi.longValueExact(); } } public static BytesRef parseIP(String string) { var inetAddress = InetAddresses.forString(string); return new BytesRef(InetAddressPoint.encode(inetAddress)); } public static String ordinal(int i) { return switch (i % 100) { case 11, 12, 13 -> i + "th"; default -> i + INTEGER_ORDINALS[i % 10]; }; } public static Tuple<String, String> splitQualifiedIndex(String indexName) { String[] split = RemoteClusterAware.splitIndexName(indexName); return Tuple.tuple(split[0], split[1]); } public static String qualifyAndJoinIndices(String cluster, String[] indices) { StringJoiner sj = new StringJoiner(","); for (String index : indices) { sj.add(cluster != null ? buildRemoteIndexName(cluster, index) : index); } return sj.toString(); } public static boolean isQualified(String indexWildcard) { return RemoteClusterAware.isRemoteIndexName(indexWildcard); } public static boolean isInteger(String value) { for (char c : value.trim().toCharArray()) { if (isDigit(c) == false) { return false; } } return true; } private static boolean isLetter(char c) { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); } private static boolean isDigit(char c) { return c >= '0' && c <= '9'; } private static boolean isUnderscore(char c) { return c == '_'; } private static boolean isLetterOrDigitOrUnderscore(char c) { return isLetter(c) || isDigit(c) || isUnderscore(c); } private static boolean isLetterOrUnderscore(char c) { return isLetter(c) || isUnderscore(c); } public static boolean isValidParamName(String value) { // A valid name starts with a letter or _ if (isLetterOrUnderscore(value.charAt(0)) == false) { return false; } // contain only letter, digit or _ for (char c : value.toCharArray()) { if (isLetterOrDigitOrUnderscore(c) == false) { return false; } } return true; } }
StringUtils
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java
{ "start": 718, "end": 1875 }
class ____ extends AbstractRankDocWireSerializingTestCase<RankDoc> { protected RankDoc createTestRankDoc() { RankDoc rankDoc = new RankDoc(randomNonNegativeInt(), randomFloat(), randomIntBetween(0, 1)); rankDoc.rank = randomNonNegativeInt(); return rankDoc; } @Override protected List<NamedWriteableRegistry.Entry> getAdditionalNamedWriteables() { return Collections.emptyList(); } @Override protected Writeable.Reader<RankDoc> instanceReader() { return RankDoc::new; } @Override protected RankDoc mutateInstance(RankDoc instance) throws IOException { RankDoc mutated = new RankDoc(instance.doc, instance.score, instance.shardIndex); mutated.rank = instance.rank; if (frequently()) { mutated.doc = randomNonNegativeInt(); } if (frequently()) { mutated.score = randomFloat(); } if (frequently()) { mutated.shardIndex = randomNonNegativeInt(); } if (frequently()) { mutated.rank = randomNonNegativeInt(); } return mutated; } }
RankDocTests
java
elastic__elasticsearch
distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java
{ "start": 1529, "end": 9368 }
class ____ extends Exception { private final Path jvmOptionsFile; Path jvmOptionsFile() { return jvmOptionsFile; } private final SortedMap<Integer, String> invalidLines; SortedMap<Integer, String> invalidLines() { return invalidLines; } JvmOptionsFileParserException(final Path jvmOptionsFile, final SortedMap<Integer, String> invalidLines) { this.jvmOptionsFile = jvmOptionsFile; this.invalidLines = invalidLines; } } /** * Determines the jvm options that should be passed to the Elasticsearch Java process. * * <p> This method works by joining the options found in {@link SystemJvmOptions}, the {@code jvm.options} file, * files in the {@code jvm.options.d} directory, and the options given by the {@code ES_JAVA_OPTS} environment * variable. * * @param args the start-up arguments * @param processInfo information about the CLI process. * @param tmpDir the directory that should be passed to {@code -Djava.io.tmpdir} * @param machineDependentHeap the heap configurator to use * @return the list of options to put on the Java command line * @throws InterruptedException if the java subprocess is interrupted * @throws IOException if there is a problem reading any of the files * @throws UserException if there is a problem parsing the `jvm.options` file or `jvm.options.d` files */ public static List<String> determineJvmOptions( ServerArgs args, ProcessInfo processInfo, Path tmpDir, MachineDependentHeap machineDependentHeap ) throws InterruptedException, IOException, UserException { final JvmOptionsParser parser = new JvmOptionsParser(); final Map<String, String> substitutions = new HashMap<>(); substitutions.put("ES_TMPDIR", tmpDir.toString()); substitutions.put("ES_PATH_CONF", args.configDir().toString()); final String envOptions = processInfo.envVars().get("ES_JAVA_OPTS"); try { return Collections.unmodifiableList( parser.jvmOptions(args, args.configDir(), tmpDir, envOptions, substitutions, processInfo.sysprops(), machineDependentHeap) ); } catch (final JvmOptionsFileParserException e) { final String errorMessage = String.format( Locale.ROOT, "encountered [%d] error%s parsing [%s]%s", e.invalidLines().size(), e.invalidLines().size() == 1 ? "" : "s", e.jvmOptionsFile(), System.lineSeparator() ); StringBuilder msg = new StringBuilder(errorMessage); int count = 0; for (final Map.Entry<Integer, String> entry : e.invalidLines().entrySet()) { count++; final String message = String.format( Locale.ROOT, "[%d]: encountered improperly formatted JVM option in [%s] on line number [%d]: [%s]%s", count, e.jvmOptionsFile(), entry.getKey(), entry.getValue(), System.lineSeparator() ); msg.append(message); } throw new UserException(ExitCodes.CONFIG, msg.toString()); } } private List<String> jvmOptions( ServerArgs args, final Path config, Path tmpDir, final String esJavaOpts, final Map<String, String> substitutions, final Map<String, String> cliSysprops, final MachineDependentHeap machineDependentHeap ) throws InterruptedException, IOException, JvmOptionsFileParserException, UserException { final List<String> jvmOptions = readJvmOptionsFiles(config); if (esJavaOpts != null) { jvmOptions.addAll(Arrays.stream(esJavaOpts.split("\\s+")).filter(Predicate.not(String::isBlank)).toList()); } final List<String> substitutedJvmOptions = substitutePlaceholders(jvmOptions, Collections.unmodifiableMap(substitutions)); final SystemMemoryInfo memoryInfo = new OverridableSystemMemoryInfo(substitutedJvmOptions, new DefaultSystemMemoryInfo()); substitutedJvmOptions.addAll(machineDependentHeap.determineHeapSettings(args.nodeSettings(), memoryInfo, substitutedJvmOptions)); final List<String> ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions, args.nodeSettings()); final List<String> systemJvmOptions = SystemJvmOptions.systemJvmOptions(args.nodeSettings(), cliSysprops); final List<String> apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), args.secrets(), args.logsDir(), tmpDir); final List<String> finalJvmOptions = new ArrayList<>( systemJvmOptions.size() + substitutedJvmOptions.size() + ergonomicJvmOptions.size() + apmOptions.size() ); finalJvmOptions.addAll(systemJvmOptions); // add the system JVM options first so that they can be overridden finalJvmOptions.addAll(substitutedJvmOptions); finalJvmOptions.addAll(ergonomicJvmOptions); finalJvmOptions.addAll(apmOptions); return finalJvmOptions; } List<String> readJvmOptionsFiles(final Path config) throws IOException, JvmOptionsFileParserException { final ArrayList<Path> jvmOptionsFiles = new ArrayList<>(); jvmOptionsFiles.add(config.resolve("jvm.options")); final Path jvmOptionsDirectory = config.resolve("jvm.options.d"); if (Files.isDirectory(jvmOptionsDirectory)) { try (DirectoryStream<Path> jvmOptionsDirectoryStream = Files.newDirectoryStream(config.resolve("jvm.options.d"), "*.options")) { // collect the matching JVM options files after sorting them by Path::compareTo StreamSupport.stream(jvmOptionsDirectoryStream.spliterator(), false).sorted().forEach(jvmOptionsFiles::add); } } final List<String> jvmOptions = new ArrayList<>(); for (final Path jvmOptionsFile : jvmOptionsFiles) { final SortedMap<Integer, String> invalidLines = new TreeMap<>(); try ( InputStream is = Files.newInputStream(jvmOptionsFile); Reader reader = new InputStreamReader(is, StandardCharsets.UTF_8); BufferedReader br = new BufferedReader(reader) ) { parse(Runtime.version().feature(), br, jvmOptions::add, invalidLines::put); } if (invalidLines.isEmpty() == false) { throw new JvmOptionsFileParserException(jvmOptionsFile, invalidLines); } } return jvmOptions; } static List<String> substitutePlaceholders(final List<String> jvmOptions, final Map<String, String> substitutions) { final Map<String, String> placeholderSubstitutions = substitutions.entrySet() .stream() .collect(Collectors.toMap(e -> "${" + e.getKey() + "}", Map.Entry::getValue)); return jvmOptions.stream().map(jvmOption -> { String actualJvmOption = jvmOption; int start = jvmOption.indexOf("${"); if (start >= 0 && jvmOption.indexOf('}', start) > 0) { for (final Map.Entry<String, String> placeholderSubstitution : placeholderSubstitutions.entrySet()) { actualJvmOption = actualJvmOption.replace(placeholderSubstitution.getKey(), placeholderSubstitution.getValue()); } } return actualJvmOption; }).collect(Collectors.toList()); } /** * Callback for valid JVM options. */
JvmOptionsFileParserException
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/type/descriptor/jdbc/LocalDateTimeJdbcType.java
{ "start": 592, "end": 1156 }
class ____ extends AbstractJavaTimeJdbcType<LocalDateTime> { public static LocalDateTimeJdbcType INSTANCE = new LocalDateTimeJdbcType(); public LocalDateTimeJdbcType() { super( LocalDateTime.class ); } @Override public int getJdbcTypeCode() { return SqlTypes.LOCAL_DATE_TIME; } @Override public int getDdlTypeCode() { return SqlTypes.TIMESTAMP; } @Override public <T> JdbcLiteralFormatter<T> getJdbcLiteralFormatter(JavaType<T> javaType) { return new JdbcLiteralFormatterTemporal<>( javaType, TemporalType.TIMESTAMP ); } }
LocalDateTimeJdbcType
java
spring-projects__spring-framework
spring-context/src/test/java/org/springframework/context/annotation/configuration/ImportTests.java
{ "start": 10459, "end": 10591 }
class ____ { @Bean TestBean m() { return new TestBean(); } } @Configuration static
WithMultipleArgumentsToImportAnnotation
java
mockito__mockito
mockito-core/src/test/java/org/mockito/internal/verification/checkers/AtLeastXNumberOfInvocationsCheckerTest.java
{ "start": 912, "end": 3751 }
class ____ { @Test public void shouldMarkActualInvocationsAsVerifiedInOrder() { InOrderContext context = new InOrderContextImpl(); // given Invocation invocation = new InvocationBuilder().simpleMethod().toInvocation(); Invocation invocationTwo = new InvocationBuilder().differentMethod().toInvocation(); // when checkAtLeastNumberOfInvocations( asList(invocation, invocationTwo), new InvocationMatcher(invocation), 1, context); // then assertThat(invocation.isVerified()).isTrue(); } @Test public void shouldReportTooFewInvocationsInOrder() { InOrderContext context = new InOrderContextImpl(); // given Invocation invocation = new InvocationBuilder().simpleMethod().toInvocation(); Invocation invocationTwo = new InvocationBuilder().differentMethod().toInvocation(); // when assertThatThrownBy( () -> checkAtLeastNumberOfInvocations( asList(invocation, invocationTwo), new InvocationMatcher(invocation), 2, context)) .isInstanceOf(VerificationInOrderFailure.class) .hasMessageContainingAll( "iMethods.simpleMethod();", "Wanted *at least* 2 times", "But was 1 time"); } @Test public void shouldMarkActualInvocationsAsVerified() { // given Invocation invocation = new InvocationBuilder().simpleMethod().toInvocation(); Invocation invocationTwo = new InvocationBuilder().differentMethod().toInvocation(); // when checkAtLeastNumberOfInvocations( asList(invocation, invocationTwo), new InvocationMatcher(invocation), 1); // then assertThat(invocation.isVerified()).isTrue(); } @Test public void shouldReportTooFewInvocations() { // given Invocation invocation = new InvocationBuilder().simpleMethod().toInvocation(); Invocation invocationTwo = new InvocationBuilder().differentMethod().toInvocation(); // when assertThatThrownBy( () -> { checkAtLeastNumberOfInvocations( asList(invocation, invocationTwo), new InvocationMatcher(invocation), 2); }) .isInstanceOf(TooFewActualInvocations.class) .hasMessageContainingAll( "iMethods.simpleMethod();", "Wanted *at least* 2 times", "But was 1 time"); } }
AtLeastXNumberOfInvocationsCheckerTest
java
elastic__elasticsearch
x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RestRepositoryAnalyzeAction.java
{ "start": 901, "end": 3800 }
class ____ extends BaseRestHandler { @Override public List<Route> routes() { return List.of(new Route(POST, "/_snapshot/{repository}/_analyze")); } @Override public String getName() { return "repository_analyze"; } @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { RepositoryAnalyzeAction.Request analyzeRepositoryRequest = new RepositoryAnalyzeAction.Request(request.param("repository")); analyzeRepositoryRequest.blobCount(request.paramAsInt("blob_count", analyzeRepositoryRequest.getBlobCount())); analyzeRepositoryRequest.concurrency(request.paramAsInt("concurrency", analyzeRepositoryRequest.getConcurrency())); analyzeRepositoryRequest.registerOperationCount( request.paramAsInt("register_operation_count", analyzeRepositoryRequest.getRegisterOperationCount()) ); analyzeRepositoryRequest.readNodeCount(request.paramAsInt("read_node_count", analyzeRepositoryRequest.getReadNodeCount())); analyzeRepositoryRequest.earlyReadNodeCount( request.paramAsInt("early_read_node_count", analyzeRepositoryRequest.getEarlyReadNodeCount()) ); analyzeRepositoryRequest.seed(request.paramAsLong("seed", analyzeRepositoryRequest.getSeed())); analyzeRepositoryRequest.rareActionProbability( request.paramAsDouble("rare_action_probability", analyzeRepositoryRequest.getRareActionProbability()) ); analyzeRepositoryRequest.maxBlobSize(request.paramAsSize("max_blob_size", analyzeRepositoryRequest.getMaxBlobSize())); analyzeRepositoryRequest.maxTotalDataSize( request.paramAsSize("max_total_data_size", analyzeRepositoryRequest.getMaxTotalDataSize()) ); analyzeRepositoryRequest.timeout(request.paramAsTime("timeout", analyzeRepositoryRequest.getTimeout())); analyzeRepositoryRequest.detailed(request.paramAsBoolean("detailed", analyzeRepositoryRequest.getDetailed())); analyzeRepositoryRequest.abortWritePermitted( request.paramAsBoolean("rarely_abort_writes", analyzeRepositoryRequest.isAbortWritePermitted()) ); RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); return channel -> cancelClient.execute( RepositoryAnalyzeAction.INSTANCE, analyzeRepositoryRequest, new RestToXContentListener<>(channel) { @Override public RestResponse buildResponse(RepositoryAnalyzeAction.Response response, XContentBuilder builder) throws Exception { builder.humanReadable(request.paramAsBoolean("human", true)); return super.buildResponse(response, builder); } } ); } }
RestRepositoryAnalyzeAction
java
spring-projects__spring-data-jpa
spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/AbstractStringBasedJpaQuery.java
{ "start": 9689, "end": 9844 }
interface ____ { QueryProvider getSorted(EntityQuery query, Sort sort, ReturnedType returnedType); } /** * No-op query rewriter. */
QuerySortRewriter
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/parser/deser/DefaultObjectDeserializerTest8.java
{ "start": 212, "end": 557 }
class ____ extends TestCase { public <T> void test_1() throws Exception { VO<T> vo = JSON.parseObject("{\"value\":[{\"id\":123}]}", new TypeReference<VO<T>>() { }); Assert.assertNotNull(vo.getValue()[0]); Assert.assertTrue(vo.getValue()[0] instanceof Map); } public static
DefaultObjectDeserializerTest8
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/boot/spi/BootstrapContext.java
{ "start": 3655, "end": 6900 }
class ____ capabilities. */ ClassLoaderAccess getClassLoaderAccess(); /** * Access to the shared {@link ClassmateContext} object used * throughout the bootstrap process. */ @Incubating ClassmateContext getClassmateContext(); /** * Access to the {@link ArchiveDescriptorFactory} used for scanning. * * @return The {@link ArchiveDescriptorFactory} */ ArchiveDescriptorFactory getArchiveDescriptorFactory(); /** * Access to the options to be used for scanning. * * @return The scan options */ ScanOptions getScanOptions(); /** * Access to the environment for scanning. * * @apiNote Consider this temporary; see discussion on {@link ScanEnvironment}. * * @return The scan environment */ ScanEnvironment getScanEnvironment(); /** * Access to the {@link org.hibernate.boot.archive.scan.spi.Scanner} to be used * for scanning. * <p> * Can be: * <ul> * <li>An instance of {@link org.hibernate.boot.archive.scan.spi.Scanner}, * <li>a {@code Class} reference to the {@code Scanner} implementor, or * <li>a string naming the {@code Scanner} implementor. * </ul> * * @return The scanner */ Object getScanner(); /** * Access to the Jandex index passed by call to * {@link org.hibernate.boot.MetadataBuilder#applyIndexView(Object)}, if any. * * @return The Jandex index * * @deprecated Set via the {@code hibernate-models} setting {@code hibernate.models.jandex.index} instead */ @Deprecated Object getJandexView(); /** * Access to any SQL functions explicitly registered with the * {@link org.hibernate.boot.MetadataBuilder}. * This does not include {@code Dialect}-registered functions. * <p> * Should never return {@code null}. * * @return The {@link SqmFunctionDescriptor}s registered via {@code MetadataBuilder} */ Map<String, SqmFunctionDescriptor> getSqlFunctions(); /** * Access to any {@link AuxiliaryDatabaseObject}s explicitly registered with * the {@link org.hibernate.boot.MetadataBuilder}. * This does not include {@link AuxiliaryDatabaseObject}s defined in mappings. * <p> * Should never return {@code null}. * * @return The {@link AuxiliaryDatabaseObject}s registered via {@code MetadataBuilder} */ Collection<AuxiliaryDatabaseObject> getAuxiliaryDatabaseObjectList(); /** * Access to collected {@link jakarta.persistence.AttributeConverter} definitions. * <p> * Should never return {@code null}. * * @return The {@link ConverterDescriptor}s registered via {@code MetadataBuilder} */ Collection<ConverterDescriptor<?, ?>> getAttributeConverters(); /** * Access to all explicit cache region mappings. * <p> * Should never return {@code null}. * * @return Explicit cache region mappings */ Collection<CacheRegionDefinition> getCacheRegionDefinitions(); /** * @see ManagedTypeRepresentationResolver */ ManagedTypeRepresentationResolver getRepresentationStrategySelector(); /** * Releases the "bootstrap only" resources held by this {@code BootstrapContext}. */ void release(); /** * To support Envers. */ void registerAdHocBasicType(BasicType<?> basicType); /** * To support Envers. */ <T> BasicType<T> resolveAdHocBasicType(String key); }
loading
java
elastic__elasticsearch
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalReservedSecurityStateHandlerProvider.java
{ "start": 822, "end": 1966 }
class ____ implements ReservedStateHandlerProvider { protected final LocalStateSecurity plugin; public LocalReservedSecurityStateHandlerProvider() { throw new IllegalStateException("Provider must be constructed using PluginsService"); } public LocalReservedSecurityStateHandlerProvider(LocalStateSecurity plugin) { this.plugin = plugin; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; LocalReservedSecurityStateHandlerProvider that = (LocalReservedSecurityStateHandlerProvider) o; return plugin.equals(that.plugin); } @Override public int hashCode() { return Objects.hash(plugin); } @Override public Collection<ReservedProjectStateHandler<?>> projectHandlers() { for (Plugin subPlugin : plugin.plugins()) { if (subPlugin instanceof Security security) { return security.reservedProjectStateHandlers(); } } return Collections.emptyList(); } }
LocalReservedSecurityStateHandlerProvider
java
quarkusio__quarkus
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/bindings/stereotype/InterceptorBindingOnStereotypeTest.java
{ "start": 2959, "end": 3805 }
class ____ { static int aroundConstruct = 0; static int postConstruct = 0; static int aroundInvoke = 0; static int preDestroy = 0; @AroundConstruct Object aroundConstruct(InvocationContext ctx) throws Exception { aroundConstruct++; return ctx.proceed(); } @PostConstruct Object postConstruct(InvocationContext ctx) throws Exception { postConstruct++; return ctx.proceed(); } @AroundInvoke Object aroundInvoke(InvocationContext ctx) throws Exception { aroundInvoke++; return ctx.proceed(); } @PreDestroy Object preDestroy(InvocationContext ctx) throws Exception { preDestroy++; return ctx.proceed(); } } }
MyInterceptor
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/collectionincompatibletype/CollectionIncompatibleTypeTest.java
{ "start": 2235, "end": 6371 }
class ____ { /* Tests for API coverage */ public void collection(Collection<Integer> collection1, Collection<String> collection2) { // BUG: Diagnostic contains: Argument '"bad"' should not be passed to this method // its type String is not compatible with its collection's type argument Integer collection1.contains("bad"); // BUG: Diagnostic contains: collection1.remove("bad"); // BUG: Diagnostic contains: Argument 'collection2' should not be passed to this method // its type Collection<String> has a type argument String that is not compatible with its // collection's type argument Integer collection1.containsAll(collection2); // BUG: Diagnostic contains: collection1.removeAll(collection2); // BUG: Diagnostic contains: collection1.retainAll(collection2); } public void collectionSubtype(ArrayList<Integer> arrayList1, ArrayList<String> arrayList2) { // BUG: Diagnostic contains: Argument '"bad"' should not be passed to this method // its type String is not compatible with its collection's type argument Integer arrayList1.contains("bad"); // BUG: Diagnostic contains: arrayList1.remove("bad"); // BUG: Diagnostic contains: Argument 'arrayList2' should not be passed to this method // its type ArrayList<String> has a type argument String that is not compatible with its // collection's type argument Integer arrayList1.containsAll(arrayList2); // BUG: Diagnostic contains: arrayList1.removeAll(arrayList2); // BUG: Diagnostic contains: arrayList1.retainAll(arrayList2); } public boolean deque(Deque<Integer> deque) { // BUG: Diagnostic contains: boolean result = deque.removeFirstOccurrence("bad"); // BUG: Diagnostic contains: return result && deque.removeLastOccurrence("bad"); } public boolean dequeSubtype(LinkedList<Integer> linkedList) { // BUG: Diagnostic contains: boolean result = linkedList.removeFirstOccurrence("bad"); // BUG: Diagnostic contains: return result && linkedList.removeLastOccurrence("bad"); } public String dictionary(Dictionary<Integer, String> dictionary) { // BUG: Diagnostic contains: String result = dictionary.get("bad"); // BUG: Diagnostic contains: return result + dictionary.remove("bad"); } public String dictionarySubtype(Hashtable<Integer, String> hashtable) { // BUG: Diagnostic contains: String result = hashtable.get("bad"); // BUG: Diagnostic contains: return result + hashtable.remove("bad"); } public int list() { List<String> list = new ArrayList<String>(); // BUG: Diagnostic contains: int result = list.indexOf(1); // BUG: Diagnostic contains: return result + list.lastIndexOf(1); } public void listSubtype() { ArrayList<String> arrayList = new ArrayList<>(); // BUG: Diagnostic contains: int result = arrayList.indexOf(1); // BUG: Diagnostic contains: result = arrayList.lastIndexOf(1); } public boolean map() { Map<Integer, String> map = new HashMap<>(); // BUG: Diagnostic contains: String result = map.get("bad"); // BUG: Diagnostic contains: result = map.getOrDefault("bad", "soBad"); // BUG: Diagnostic contains: boolean result2 = map.containsKey("bad"); // BUG: Diagnostic contains: result2 = map.containsValue(1); // BUG: Diagnostic contains: result = map.remove("bad"); return false; } public boolean mapSubtype() { ConcurrentNavigableMap<Integer, String> concurrentNavigableMap = new ConcurrentSkipListMap<>(); // BUG: Diagnostic contains: String result = concurrentNavigableMap.get("bad"); // BUG: Diagnostic contains: boolean result2 = concurrentNavigableMap.containsKey("bad"); // BUG: Diagnostic contains: result2 = concurrentNavigableMap.containsValue(1); // BUG: Diagnostic contains: result = concurrentNavigableMap.remove("bad"); return false; } public int stack(Stack<Integer> stack) { // BUG: Diagnostic contains: return stack.search("bad"); } private static
CollectionIncompatibleTypePositiveCases
java
apache__flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/workflow/RefreshWorkflow.java
{ "start": 1009, "end": 1320 }
interface ____ provide the related information to operate * the refresh workflow of {@link CatalogMaterializedTable}, the operation of refresh workflow * include create, modify, drop, etc. * * @see CreateRefreshWorkflow * @see ModifyRefreshWorkflow * @see DeleteRefreshWorkflow */ @PublicEvolving public
that
java
quarkusio__quarkus
independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/resource/basic/resource/ResponseInfoResource.java
{ "start": 435, "end": 1173 }
class ____ { private static Logger LOG = Logger.getLogger(ResponseInfoResource.class); @Path("/simple") @GET public boolean get(@QueryParam("abs") String abs) { LOG.debug("abs query: " + abs); URI base; if (abs == null) { base = PortProviderUtil.createURI("/new/one"); } else { base = PortProviderUtil.createURI("/" + abs + "/new/one"); } Response response = Response.temporaryRedirect(URI.create("new/one")).build(); URI uri = (URI) response.getMetadata().getFirst(HttpHeaders.LOCATION); LOG.debug("Location uri: " + uri); Assertions.assertEquals(base.getPath(), uri.getPath()); return true; } }
ResponseInfoResource
java
apache__avro
lang/java/ipc/src/test/java/org/apache/avro/io/Perf.java
{ "start": 57315, "end": 58218 }
class ____ extends ResolvingTest { private static final String ENUM_WRITER = "{ \"type\": \"enum\", \"name\":\"E\", \"symbols\": [\"A\", \"B\"] }"; private static final String ENUM_READER = "{ \"type\": \"enum\", \"name\":\"E\", \"symbols\": [\"A\",\"B\",\"C\",\"D\",\"E\"] }"; public ExtendedEnumResolveTest() throws IOException { super("ExtendedEnum", ENUM_READER, ENUM_WRITER); } @Override void genSourceData() { Random r = newRandom(); Schema eSchema = writeSchema.getField("f").schema(); sourceData = new GenericRecord[count]; for (int i = 0; i < sourceData.length; i++) { GenericRecord rec = new GenericData.Record(writeSchema); int tag = r.nextInt(2); rec.put("f", GenericData.get().createEnum(eSchema.getEnumSymbols().get(tag), eSchema)); sourceData[i] = rec; } } } static
ExtendedEnumResolveTest
java
elastic__elasticsearch
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthName.java
{ "start": 718, "end": 1185 }
class ____ extends NamedDateTimeFunction { public MonthName(Source source, Expression field, ZoneId zoneId) { super(source, field, zoneId, NameExtractor.MONTH_NAME); } @Override protected NodeCtor2<Expression, ZoneId, BaseDateTimeFunction> ctorForInfo() { return MonthName::new; } @Override protected MonthName replaceChild(Expression newChild) { return new MonthName(source(), newChild, zoneId()); } }
MonthName
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java
{ "start": 21665, "end": 21703 }
class ____ for test. * */ public
easy
java
spring-cloud__spring-cloud-gateway
spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/filter/ReactiveLoadBalancerClientFilterTests.java
{ "start": 3728, "end": 19716 }
class ____ { private ServerWebExchange exchange; private GatewayLoadBalancerProperties properties; @Mock private GatewayFilterChain chain; @Mock private LoadBalancerClientFactory clientFactory; @Mock private LoadBalancerProperties loadBalancerProperties; @InjectMocks private ReactiveLoadBalancerClientFilter filter; @BeforeEach void setup() { properties = new GatewayLoadBalancerProperties(); exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/mypath").build()); } @Test void shouldNotFilterWhenGatewayRequestUrlIsMissing() { filter.filter(exchange, chain); verify(chain).filter(exchange); verifyNoMoreInteractions(chain); verifyNoInteractions(clientFactory); } @Test void shouldNotFilterWhenGatewayRequestUrlSchemeIsNotLb() { URI uri = UriComponentsBuilder.fromUriString("http://myservice").build().toUri(); exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, uri); filter.filter(exchange, chain); verify(chain).filter(exchange); verifyNoMoreInteractions(chain); verifyNoInteractions(clientFactory); } @Test void shouldThrowExceptionWhenNoServiceInstanceIsFound() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); assertThatExceptionOfType(NotFoundException.class).isThrownBy(() -> { URI uri = UriComponentsBuilder.fromUriString("lb://myservice").build().toUri(); exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, uri); filter.filter(exchange, chain).block(); }); } @SuppressWarnings("unchecked") @Test void shouldFilter() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); URI url = UriComponentsBuilder.fromUriString("lb://myservice").build().toUri(); exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, url); ServiceInstance serviceInstance = new DefaultServiceInstance("myservice1", "myservice", "localhost", 8080, true); RoundRobinLoadBalancer loadBalancer = new RoundRobinLoadBalancer( ServiceInstanceListSuppliers.toProvider("myservice", serviceInstance), "myservice", -1); when(clientFactory.getInstance("myservice", ReactorServiceInstanceLoadBalancer.class)).thenReturn(loadBalancer); when(chain.filter(exchange)).thenReturn(Mono.empty()); filter.filter(exchange, chain).block(); assertThat((LinkedHashSet<URI>) exchange.getAttribute(GATEWAY_ORIGINAL_REQUEST_URL_ATTR)).contains(url); verify(clientFactory).getInstance("myservice", ReactorServiceInstanceLoadBalancer.class); verify(clientFactory).getInstances("myservice", LoadBalancerLifecycle.class); verifyNoMoreInteractions(clientFactory); assertThat((URI) exchange.getAttribute(GATEWAY_REQUEST_URL_ATTR)) .isEqualTo(URI.create("https://localhost:8080/mypath")); verify(chain).filter(exchange); verifyNoMoreInteractions(chain); } @Test void happyPath() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); MockServerHttpRequest request = MockServerHttpRequest.get("http://localhost/get?a=b").build(); URI lbUri = URI.create("lb://service1?a=b"); ServerWebExchange webExchange = testFilter(request, lbUri); URI uri = webExchange.getRequiredAttribute(GATEWAY_REQUEST_URL_ATTR); assertThat(uri).hasScheme("http").hasHost("service1-host1").hasParameter("a", "b"); } @Test void noQueryParams() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); MockServerHttpRequest request = MockServerHttpRequest.get("http://localhost/get").build(); ServerWebExchange webExchange = testFilter(request, URI.create("lb://service1")); URI uri = webExchange.getRequiredAttribute(GATEWAY_REQUEST_URL_ATTR); assertThat(uri).hasScheme("http").hasHost("service1-host1"); } @Test void encodedParameters() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); URI url = UriComponentsBuilder.fromUriString("http://localhost/get?a=b&c=d[]") .buildAndExpand() .encode() .toUri(); MockServerHttpRequest request = MockServerHttpRequest.method(HttpMethod.GET, url).build(); URI lbUrl = UriComponentsBuilder.fromUriString("lb://service1?a=b&c=d[]").buildAndExpand().encode().toUri(); // prove that it is encoded assertThat(lbUrl.getRawQuery()).isEqualTo("a=b&c=d%5B%5D"); assertThat(lbUrl).hasParameter("c", "d[]"); ServerWebExchange webExchange = testFilter(request, lbUrl); URI uri = webExchange.getRequiredAttribute(GATEWAY_REQUEST_URL_ATTR); assertThat(uri).hasScheme("http").hasHost("service1-host1").hasParameter("a", "b").hasParameter("c", "d[]"); // prove that it is not double encoded assertThat(uri.getRawQuery()).isEqualTo("a=b&c=d%5B%5D"); } @Test void unencodedParameters() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); URI url = URI.create("http://localhost/get?a=b&c=d[]"); MockServerHttpRequest request = MockServerHttpRequest.method(HttpMethod.GET, url).build(); URI lbUrl = URI.create("lb://service1?a=b&c=d[]"); // prove that it is unencoded assertThat(lbUrl.getRawQuery()).isEqualTo("a=b&c=d[]"); ServerWebExchange webExchange = testFilter(request, lbUrl); URI uri = webExchange.getRequiredAttribute(GATEWAY_REQUEST_URL_ATTR); assertThat(uri).hasScheme("http").hasHost("service1-host1").hasParameter("a", "b").hasParameter("c", "d[]"); // prove that it is NOT encoded assertThat(uri.getRawQuery()).isEqualTo("a=b&c=d[]"); } @Test void happyPathWithAttributeRatherThanScheme() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); MockServerHttpRequest request = MockServerHttpRequest.get("ws://localhost/get?a=b").build(); URI lbUri = URI.create("ws://service1?a=b"); exchange = MockServerWebExchange.from(request); exchange.getAttributes().put(GATEWAY_SCHEME_PREFIX_ATTR, "lb"); ServerWebExchange webExchange = testFilter(exchange, lbUri); URI uri = webExchange.getRequiredAttribute(GATEWAY_REQUEST_URL_ATTR); assertThat(uri).hasScheme("ws").hasHost("service1-host1").hasParameter("a", "b"); } @Test void shouldNotFilterWhenGatewaySchemePrefixAttrIsNotLb() { URI uri = UriComponentsBuilder.fromUriString("http://myservice").build().toUri(); exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, uri); exchange.getAttributes().put(GATEWAY_SCHEME_PREFIX_ATTR, "xx"); filter.filter(exchange, chain); verify(chain).filter(exchange); verifyNoMoreInteractions(chain); verifyNoInteractions(clientFactory); } @Test void shouldThrow4O4ExceptionWhenNoServiceInstanceIsFound() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); URI uri = UriComponentsBuilder.fromUriString("lb://service1").build().toUri(); exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, uri); RoundRobinLoadBalancer loadBalancer = new RoundRobinLoadBalancer( ServiceInstanceListSuppliers.toProvider("service1"), "service1", -1); when(clientFactory.getInstance("service1", ReactorServiceInstanceLoadBalancer.class)).thenReturn(loadBalancer); properties.setUse404(true); ReactiveLoadBalancerClientFilter filter = new ReactiveLoadBalancerClientFilter(clientFactory, properties); when(chain.filter(exchange)).thenReturn(Mono.empty()); try { filter.filter(exchange, chain).block(); } catch (NotFoundException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND); } } @SuppressWarnings("unchecked") @Test void shouldOverrideSchemeUsingIsSecure() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); URI url = UriComponentsBuilder.fromUriString("lb://myservice").build().toUri(); ServerWebExchange exchange = MockServerWebExchange .from(MockServerHttpRequest.get("https://localhost:9999/mypath").build()); exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, url); ServiceInstance serviceInstance = new DefaultServiceInstance("myservice1", "myservice", "localhost", 8080, false); when(clientFactory.getInstance("myservice", ReactorServiceInstanceLoadBalancer.class)).thenReturn( new RoundRobinLoadBalancer(ServiceInstanceListSuppliers.toProvider("myservice", serviceInstance), "myservice", -1)); when(chain.filter(exchange)).thenReturn(Mono.empty()); filter.filter(exchange, chain).block(); assertThat((LinkedHashSet<URI>) exchange.getAttribute(GATEWAY_ORIGINAL_REQUEST_URL_ATTR)).contains(url); assertThat((URI) exchange.getAttribute(GATEWAY_REQUEST_URL_ATTR)) .isEqualTo(URI.create("http://localhost:8080/mypath")); verify(chain).filter(exchange); verifyNoMoreInteractions(chain); } @SuppressWarnings({ "rawtypes" }) @Test void shouldPassRequestToLoadBalancer() { String hint = "test"; when(loadBalancerProperties.getHint()).thenReturn(buildHints(hint)); when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); MockServerHttpRequest request = MockServerHttpRequest.get("http://localhost/get?a=b").build(); URI lbUri = URI.create("lb://service1?a=b"); ServerWebExchange serverWebExchange = mock(ServerWebExchange.class); when(serverWebExchange.getAttribute(GATEWAY_REQUEST_URL_ATTR)).thenReturn(lbUri); when(serverWebExchange.getAttributes()).thenReturn(new HashMap<>(Map.of("myattr", "myattrval"))); when(serverWebExchange.getRequiredAttribute(GATEWAY_ORIGINAL_REQUEST_URL_ATTR)) .thenReturn(new LinkedHashSet<>()); when(serverWebExchange.getRequest()).thenReturn(request); RoundRobinLoadBalancer loadBalancer = mock(RoundRobinLoadBalancer.class); when(loadBalancer.choose(any(Request.class))).thenReturn(Mono .just(new DefaultResponse(new DefaultServiceInstance("myservice1", "service1", "localhost", 8080, false)))); when(clientFactory.getInstance("service1", ReactorServiceInstanceLoadBalancer.class)).thenReturn(loadBalancer); when(chain.filter(any())).thenReturn(Mono.empty()); filter.filter(serverWebExchange, chain); verify(loadBalancer).choose(argThat((Request passedRequest) -> { RequestDataContext context = (RequestDataContext) passedRequest.getContext(); return context.getClientRequest().getUrl().equals(request.getURI()) && "myattrval".equals(context.getClientRequest().getAttributes().get("myattr")) && context.getHint().equals(hint); })); } @SuppressWarnings({ "unchecked", "rawtypes" }) @Test void loadBalancerLifecycleCallbacksExecutedForSuccess() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); LoadBalancerLifecycle lifecycleProcessor = mock(LoadBalancerLifecycle.class); ServiceInstance serviceInstance = new DefaultServiceInstance("myservice1", "myservice", "localhost", 8080, false); ServerWebExchange serverWebExchange = mockExchange(serviceInstance, lifecycleProcessor, false); filter.filter(serverWebExchange, chain).subscribe(); verify(lifecycleProcessor).onStart(any(Request.class)); verify(lifecycleProcessor).onStartRequest(any(Request.class), any(Response.class)); verify(lifecycleProcessor) .onComplete(argThat(completionContext -> CompletionContext.Status.SUCCESS.equals(completionContext.status()) && completionContext.getLoadBalancerResponse().getServer().equals(serviceInstance) && HttpMethod.GET.equals( ((RequestDataContext) completionContext.getLoadBalancerRequest().getContext()).method()))); } @SuppressWarnings({ "unchecked", "rawtypes" }) @Test void loadBalancerLifecycleCallbacksExecutedForDiscard() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); LoadBalancerLifecycle lifecycleProcessor = mock(LoadBalancerLifecycle.class); ServiceInstance serviceInstance = null; ServerWebExchange serverWebExchange = mockExchange(serviceInstance, lifecycleProcessor, false); filter.filter(serverWebExchange, chain).subscribe(); verify(lifecycleProcessor).onStart(any(Request.class)); verify(lifecycleProcessor) .onComplete(argThat(completionContext -> CompletionContext.Status.DISCARD.equals(completionContext.status()) && HttpMethod.GET.equals( ((RequestDataContext) completionContext.getLoadBalancerRequest().getContext()).method()))); } @SuppressWarnings({ "unchecked", "rawtypes" }) @Test void loadBalancerLifecycleCallbacksExecutedForFailed() { when(clientFactory.getProperties(any())).thenReturn(loadBalancerProperties); LoadBalancerLifecycle lifecycleProcessor = mock(LoadBalancerLifecycle.class); ServiceInstance serviceInstance = new DefaultServiceInstance("myservice1", "myservice", "localhost", 8080, false); ServerWebExchange serverWebExchange = mockExchange(serviceInstance, lifecycleProcessor, true); filter.filter(serverWebExchange, chain).subscribe(); verify(lifecycleProcessor).onStart(any(Request.class)); verify(lifecycleProcessor).onStartRequest(any(Request.class), any(Response.class)); verify(lifecycleProcessor) .onComplete(argThat(completionContext -> CompletionContext.Status.FAILED.equals(completionContext.status()) && HttpMethod.GET.equals( ((RequestDataContext) completionContext.getLoadBalancerRequest().getContext()).method()))); } @SuppressWarnings({ "rawtypes", "unchecked" }) private ServerWebExchange mockExchange(ServiceInstance serviceInstance, LoadBalancerLifecycle lifecycleProcessor, boolean shouldThrowException) { Response response; when(lifecycleProcessor.supports(any(Class.class), any(Class.class), any(Class.class))).thenReturn(true); MockServerHttpRequest request = MockServerHttpRequest.get("http://localhost/get?a=b").build(); URI lbUri = URI.create("lb://service1?a=b"); ServerWebExchange serverWebExchange = MockServerWebExchange.from(request); if (serviceInstance == null) { response = new EmptyResponse(); } else { response = new DefaultResponse(serviceInstance); } serverWebExchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, lbUri); serverWebExchange.getAttributes().put(GATEWAY_ORIGINAL_REQUEST_URL_ATTR, new LinkedHashSet<>()); serverWebExchange.getAttributes().put(GATEWAY_LOADBALANCER_RESPONSE_ATTR, response); RoundRobinLoadBalancer loadBalancer = mock(RoundRobinLoadBalancer.class); when(loadBalancer.choose(any(Request.class))).thenReturn(Mono.just(response)); when(clientFactory.getInstance("service1", ReactorServiceInstanceLoadBalancer.class)).thenReturn(loadBalancer); Map<String, LoadBalancerLifecycle> lifecycleProcessors = new HashMap<>(); lifecycleProcessors.put("service1", lifecycleProcessor); when(clientFactory.getInstances("service1", LoadBalancerLifecycle.class)).thenReturn(lifecycleProcessors); if (shouldThrowException) { when(chain.filter(any())).thenReturn(Mono.error(new UnsupportedOperationException())); } else { when(chain.filter(any())).thenReturn(Mono.empty()); } return serverWebExchange; } private Map<String, String> buildHints(String hint) { Map<String, String> hints = new HashMap<>(); hints.put("default", hint); return hints; } private ServerWebExchange testFilter(MockServerHttpRequest request, URI uri) { return testFilter(MockServerWebExchange.from(request), uri); } private ServerWebExchange testFilter(ServerWebExchange exchange, URI uri) { exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, uri); ArgumentCaptor<ServerWebExchange> captor = ArgumentCaptor.forClass(ServerWebExchange.class); when(chain.filter(captor.capture())).thenReturn(Mono.empty()); RoundRobinLoadBalancer loadBalancer = new RoundRobinLoadBalancer( ServiceInstanceListSuppliers.toProvider("service1", new DefaultServiceInstance("service1_1", "service1", "service1-host1", 8081, false)), "service1", -1); when(clientFactory.getInstance("service1", ReactorServiceInstanceLoadBalancer.class)).thenReturn(loadBalancer); ReactiveLoadBalancerClientFilter filter = new ReactiveLoadBalancerClientFilter(clientFactory, properties); filter.filter(exchange, chain).block(); return captor.getValue(); } }
ReactiveLoadBalancerClientFilterTests
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Timestream2EndpointBuilderFactory.java
{ "start": 1453, "end": 1589 }
interface ____ { /** * Builder for endpoint for the AWS Timestream component. */ public
Timestream2EndpointBuilderFactory
java
apache__kafka
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java
{ "start": 4516, "end": 86442 }
class ____ { private ConsumerGroup createConsumerGroup(String groupId) { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); return new ConsumerGroup( snapshotRegistry, groupId ); } @Test public void testGetOrCreateMember() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member; // Create a member. member = consumerGroup.getOrMaybeCreateMember("member-id", true); assertEquals("member-id", member.memberId()); // Add member to the group. consumerGroup.updateMember(member); // Get that member back. member = consumerGroup.getOrMaybeCreateMember("member-id", false); assertEquals("member-id", member.memberId()); assertThrows(UnknownMemberIdException.class, () -> consumerGroup.getOrMaybeCreateMember("does-not-exist", false)); } @Test public void testUpdateMember() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member; member = consumerGroup.getOrMaybeCreateMember("member", true); member = new ConsumerGroupMember.Builder(member) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .build(); consumerGroup.updateMember(member); assertEquals(member, consumerGroup.getOrMaybeCreateMember("member", false)); } @Test public void testNoStaticMember() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); // Create a new member which is not static consumerGroup.getOrMaybeCreateMember("member", true); assertNull(consumerGroup.staticMember("instance-id")); } @Test public void testGetStaticMemberByInstanceId() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member; member = consumerGroup.getOrMaybeCreateMember("member", true); member = new ConsumerGroupMember.Builder(member) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setInstanceId("instance") .build(); consumerGroup.updateMember(member); assertEquals(member, consumerGroup.staticMember("instance")); assertEquals(member, consumerGroup.getOrMaybeCreateMember("member", false)); assertEquals(member.memberId(), consumerGroup.staticMemberId("instance")); } @Test public void testRemoveMember() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member = consumerGroup.getOrMaybeCreateMember("member", true); consumerGroup.updateMember(member); assertTrue(consumerGroup.hasMember("member")); consumerGroup.removeMember("member"); assertFalse(consumerGroup.hasMember("member")); } @Test public void testRemoveStaticMember() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member = new ConsumerGroupMember.Builder("member") .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setInstanceId("instance") .build(); consumerGroup.updateMember(member); assertTrue(consumerGroup.hasMember("member")); consumerGroup.removeMember("member"); assertFalse(consumerGroup.hasMember("member")); assertNull(consumerGroup.staticMember("instance")); assertNull(consumerGroup.staticMemberId("instance")); } @Test public void testUpdatingMemberUpdatesPartitionEpoch() { Uuid fooTopicId = Uuid.randomUuid(); Uuid barTopicId = Uuid.randomUuid(); Uuid zarTopicId = Uuid.randomUuid(); ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member; member = new ConsumerGroupMember.Builder("member") .setMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3))) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(barTopicId, 4, 5, 6))) .build(); consumerGroup.updateMember(member); assertEquals(10, consumerGroup.currentPartitionEpoch(fooTopicId, 1)); assertEquals(10, consumerGroup.currentPartitionEpoch(fooTopicId, 2)); assertEquals(10, consumerGroup.currentPartitionEpoch(fooTopicId, 3)); assertEquals(10, consumerGroup.currentPartitionEpoch(barTopicId, 4)); assertEquals(10, consumerGroup.currentPartitionEpoch(barTopicId, 5)); assertEquals(10, consumerGroup.currentPartitionEpoch(barTopicId, 6)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 7)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 8)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 9)); member = new ConsumerGroupMember.Builder(member) .setMemberEpoch(11) .setAssignedPartitions(mkAssignment( mkTopicAssignment(barTopicId, 1, 2, 3))) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(zarTopicId, 4, 5, 6))) .build(); consumerGroup.updateMember(member); assertEquals(11, consumerGroup.currentPartitionEpoch(barTopicId, 1)); assertEquals(11, consumerGroup.currentPartitionEpoch(barTopicId, 2)); assertEquals(11, consumerGroup.currentPartitionEpoch(barTopicId, 3)); assertEquals(11, consumerGroup.currentPartitionEpoch(zarTopicId, 4)); assertEquals(11, consumerGroup.currentPartitionEpoch(zarTopicId, 5)); assertEquals(11, consumerGroup.currentPartitionEpoch(zarTopicId, 6)); assertEquals(-1, consumerGroup.currentPartitionEpoch(fooTopicId, 7)); assertEquals(-1, consumerGroup.currentPartitionEpoch(fooTopicId, 8)); assertEquals(-1, consumerGroup.currentPartitionEpoch(fooTopicId, 9)); } @Test public void testUpdatingMemberUpdatesPartitionEpochWhenPartitionIsReassignedBeforeBeingRevoked() { Uuid fooTopicId = Uuid.randomUuid(); ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member; member = new ConsumerGroupMember.Builder("member") .setMemberEpoch(10) .setAssignedPartitions(Map.of()) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(fooTopicId, 1))) .build(); consumerGroup.updateMember(member); assertEquals(10, consumerGroup.currentPartitionEpoch(fooTopicId, 1)); member = new ConsumerGroupMember.Builder(member) .setMemberEpoch(11) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1))) .setPartitionsPendingRevocation(Map.of()) .build(); consumerGroup.updateMember(member); assertEquals(11, consumerGroup.currentPartitionEpoch(fooTopicId, 1)); } @Test public void testUpdatingMemberUpdatesPartitionEpochWhenPartitionIsNotReleased() { Uuid fooTopicId = Uuid.randomUuid(); ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember m1 = new ConsumerGroupMember.Builder("m1") .setMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1))) .build(); consumerGroup.updateMember(m1); ConsumerGroupMember m2 = new ConsumerGroupMember.Builder("m2") .setMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1))) .build(); // m2 should not be able to acquire foo-1 because the partition is // still owned by another member. assertThrows(IllegalStateException.class, () -> consumerGroup.updateMember(m2)); } @Test public void testRemovePartitionEpochs() { Uuid fooTopicId = Uuid.randomUuid(); ConsumerGroup consumerGroup = createConsumerGroup("foo"); // Removing should fail because there is no epoch set. assertThrows(IllegalStateException.class, () -> consumerGroup.removePartitionEpochs( mkAssignment( mkTopicAssignment(fooTopicId, 1) ), 10 )); ConsumerGroupMember m1 = new ConsumerGroupMember.Builder("m1") .setMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1))) .build(); consumerGroup.updateMember(m1); // Removing should fail because the expected epoch is incorrect. assertThrows(IllegalStateException.class, () -> consumerGroup.removePartitionEpochs( mkAssignment( mkTopicAssignment(fooTopicId, 1) ), 11 )); } @Test public void testAddPartitionEpochs() { Uuid fooTopicId = Uuid.randomUuid(); ConsumerGroup consumerGroup = createConsumerGroup("foo"); consumerGroup.addPartitionEpochs( mkAssignment( mkTopicAssignment(fooTopicId, 1) ), 10 ); // Changing the epoch should fail because the owner of the partition // should remove it first. assertThrows(IllegalStateException.class, () -> consumerGroup.addPartitionEpochs( mkAssignment( mkTopicAssignment(fooTopicId, 1) ), 11 )); } @Test public void testDeletingMemberRemovesPartitionEpoch() { Uuid fooTopicId = Uuid.randomUuid(); Uuid barTopicId = Uuid.randomUuid(); Uuid zarTopicId = Uuid.randomUuid(); ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member; member = new ConsumerGroupMember.Builder("member") .setMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3))) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(barTopicId, 4, 5, 6))) .build(); consumerGroup.updateMember(member); assertEquals(10, consumerGroup.currentPartitionEpoch(fooTopicId, 1)); assertEquals(10, consumerGroup.currentPartitionEpoch(fooTopicId, 2)); assertEquals(10, consumerGroup.currentPartitionEpoch(fooTopicId, 3)); assertEquals(10, consumerGroup.currentPartitionEpoch(barTopicId, 4)); assertEquals(10, consumerGroup.currentPartitionEpoch(barTopicId, 5)); assertEquals(10, consumerGroup.currentPartitionEpoch(barTopicId, 6)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 7)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 8)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 9)); consumerGroup.removeMember(member.memberId()); assertEquals(-1, consumerGroup.currentPartitionEpoch(barTopicId, 1)); assertEquals(-1, consumerGroup.currentPartitionEpoch(barTopicId, 2)); assertEquals(-1, consumerGroup.currentPartitionEpoch(barTopicId, 3)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 4)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 5)); assertEquals(-1, consumerGroup.currentPartitionEpoch(zarTopicId, 6)); assertEquals(-1, consumerGroup.currentPartitionEpoch(fooTopicId, 7)); assertEquals(-1, consumerGroup.currentPartitionEpoch(fooTopicId, 8)); assertEquals(-1, consumerGroup.currentPartitionEpoch(fooTopicId, 9)); } @Test public void testWaitingOnUnreleasedPartition() { Uuid fooTopicId = Uuid.randomUuid(); Uuid barTopicId = Uuid.randomUuid(); Uuid zarTopicId = Uuid.randomUuid(); String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); ConsumerGroup consumerGroup = createConsumerGroup("foo"); consumerGroup.updateTargetAssignment(memberId1, new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(zarTopicId, 7, 8, 9) ))); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1) .setMemberEpoch(10) .setState(MemberState.UNRELEASED_PARTITIONS) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3))) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(barTopicId, 4, 5, 6))) .build(); consumerGroup.updateMember(member1); assertFalse(consumerGroup.waitingOnUnreleasedPartition(member1)); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2) .setMemberEpoch(10) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(zarTopicId, 7))) .build(); consumerGroup.updateMember(member2); assertTrue(consumerGroup.waitingOnUnreleasedPartition(member1)); } @Test public void testGroupState() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); assertEquals(ConsumerGroup.ConsumerGroupState.EMPTY, consumerGroup.state()); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setState(MemberState.STABLE) .setMemberEpoch(1) .setPreviousMemberEpoch(0) .build(); consumerGroup.updateMember(member1); consumerGroup.setGroupEpoch(1); assertEquals(MemberState.STABLE, member1.state()); assertEquals(ConsumerGroup.ConsumerGroupState.ASSIGNING, consumerGroup.state()); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setState(MemberState.STABLE) .setMemberEpoch(1) .setPreviousMemberEpoch(0) .build(); consumerGroup.updateMember(member2); consumerGroup.setGroupEpoch(2); assertEquals(MemberState.STABLE, member2.state()); assertEquals(ConsumerGroup.ConsumerGroupState.ASSIGNING, consumerGroup.state()); consumerGroup.setTargetAssignmentEpoch(2); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, consumerGroup.state()); member1 = new ConsumerGroupMember.Builder(member1) .setState(MemberState.STABLE) .setMemberEpoch(2) .setPreviousMemberEpoch(1) .build(); consumerGroup.updateMember(member1); assertEquals(MemberState.STABLE, member1.state()); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, consumerGroup.state()); // Member 2 is not stable so the group stays in reconciling state. member2 = new ConsumerGroupMember.Builder(member2) .setState(MemberState.UNREVOKED_PARTITIONS) .setMemberEpoch(2) .setPreviousMemberEpoch(1) .build(); consumerGroup.updateMember(member2); assertEquals(MemberState.UNREVOKED_PARTITIONS, member2.state()); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, consumerGroup.state()); member2 = new ConsumerGroupMember.Builder(member2) .setState(MemberState.STABLE) .setMemberEpoch(2) .setPreviousMemberEpoch(1) .build(); consumerGroup.updateMember(member2); assertEquals(MemberState.STABLE, member2.state()); assertEquals(ConsumerGroup.ConsumerGroupState.STABLE, consumerGroup.state()); consumerGroup.removeMember("member1"); consumerGroup.removeMember("member2"); assertEquals(ConsumerGroup.ConsumerGroupState.EMPTY, consumerGroup.state()); } @Test public void testGroupTypeFromString() { assertEquals(Group.GroupType.CLASSIC, Group.GroupType.parse("classic")); // Test case insensitivity. assertEquals(Group.GroupType.CONSUMER, Group.GroupType.parse("Consumer")); // Test with invalid group type. assertEquals(Group.GroupType.UNKNOWN, Group.GroupType.parse("Invalid")); } @Test public void testPreferredServerAssignor() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setServerAssignorName("range") .build(); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setServerAssignorName("range") .build(); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") .setServerAssignorName("uniform") .build(); // The group is empty so the preferred assignor should be empty. assertEquals( Optional.empty(), consumerGroup.preferredServerAssignor() ); // Member 1 has got an updated assignor but this is not reflected in the group yet so // we pass the updated member. The assignor should be range. assertEquals( Optional.of("range"), consumerGroup.computePreferredServerAssignor(null, member1) ); // Update the group with member 1. consumerGroup.updateMember(member1); // Member 1 is in the group so the assignor should be range. assertEquals( Optional.of("range"), consumerGroup.preferredServerAssignor() ); // Member 1 has been removed but this is not reflected in the group yet so // we pass the removed member. The assignor should be range. assertEquals( Optional.empty(), consumerGroup.computePreferredServerAssignor(member1, null) ); // Member 2 has got an updated assignor but this is not reflected in the group yet so // we pass the updated member. The assignor should be range. assertEquals( Optional.of("range"), consumerGroup.computePreferredServerAssignor(null, member2) ); // Update the group with member 2. consumerGroup.updateMember(member2); // Member 1 and 2 are in the group so the assignor should be range. assertEquals( Optional.of("range"), consumerGroup.preferredServerAssignor() ); // Update the group with member 3. consumerGroup.updateMember(member3); // Member 1, 2 and 3 are in the group so the assignor should be range. assertEquals( Optional.of("range"), consumerGroup.preferredServerAssignor() ); // Members without assignors ConsumerGroupMember updatedMember1 = new ConsumerGroupMember.Builder("member1") .setServerAssignorName(null) .build(); ConsumerGroupMember updatedMember2 = new ConsumerGroupMember.Builder("member2") .setServerAssignorName(null) .build(); ConsumerGroupMember updatedMember3 = new ConsumerGroupMember.Builder("member3") .setServerAssignorName(null) .build(); // Member 1 has removed it assignor but this is not reflected in the group yet so // we pass the updated member. The assignor should be range or uniform. Optional<String> assignor = consumerGroup.computePreferredServerAssignor(member1, updatedMember1); assertTrue(assignor.equals(Optional.of("range")) || assignor.equals(Optional.of("uniform"))); // Update the group. consumerGroup.updateMember(updatedMember1); // Member 2 has removed it assignor but this is not reflected in the group yet so // we pass the updated member. The assignor should be range or uniform. assertEquals( Optional.of("uniform"), consumerGroup.computePreferredServerAssignor(member2, updatedMember2) ); // Update the group. consumerGroup.updateMember(updatedMember2); // Only member 3 is left in the group so the assignor should be uniform. assertEquals( Optional.of("uniform"), consumerGroup.preferredServerAssignor() ); // Member 3 has removed it assignor but this is not reflected in the group yet so // we pass the updated member. The assignor should be empty. assertEquals( Optional.empty(), consumerGroup.computePreferredServerAssignor(member3, updatedMember3) ); // Update the group. consumerGroup.updateMember(updatedMember3); // The group is empty so the assignor should be empty as well. assertEquals( Optional.empty(), consumerGroup.preferredServerAssignor() ); } @Test public void testUpdateSubscribedTopicNamesAndSubscriptionType() { ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setSubscribedTopicNames(List.of("foo")) .build(); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setSubscribedTopicNames(Arrays.asList("bar", "foo")) .build(); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") .setSubscribedTopicNames(Arrays.asList("bar", "foo")) .build(); ConsumerGroup consumerGroup = createConsumerGroup("group-foo"); // It should be empty by default. assertEquals( Map.of(), consumerGroup.subscribedTopicNames() ); // It should be Homogeneous by default. assertEquals( HOMOGENEOUS, consumerGroup.subscriptionType() ); consumerGroup.updateMember(member1); // It should be Homogeneous since there is just 1 member assertEquals( HOMOGENEOUS, consumerGroup.subscriptionType() ); consumerGroup.updateMember(member2); assertEquals( HETEROGENEOUS, consumerGroup.subscriptionType() ); consumerGroup.updateMember(member3); assertEquals( HETEROGENEOUS, consumerGroup.subscriptionType() ); consumerGroup.removeMember(member1.memberId()); assertEquals( HOMOGENEOUS, consumerGroup.subscriptionType() ); ConsumerGroupMember member4 = new ConsumerGroupMember.Builder("member2") .setSubscribedTopicNames(Arrays.asList("bar", "foo", "zar")) .build(); consumerGroup.updateMember(member4); assertEquals( HETEROGENEOUS, consumerGroup.subscriptionType() ); } @Test public void testUpdateInvertedAssignment() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); ConsumerGroup consumerGroup = new ConsumerGroup(snapshotRegistry, "test-group"); Uuid topicId = Uuid.randomUuid(); String memberId1 = "member1"; String memberId2 = "member2"; // Initial assignment for member1 Assignment initialAssignment = new Assignment(Map.of( topicId, Set.of(0) )); consumerGroup.updateTargetAssignment(memberId1, initialAssignment); // Verify that partition 0 is assigned to member1. assertEquals( mkMap( mkEntry(topicId, mkMap(mkEntry(0, memberId1))) ), consumerGroup.invertedTargetAssignment() ); // New assignment for member1 Assignment newAssignment = new Assignment(Map.of( topicId, Set.of(1) )); consumerGroup.updateTargetAssignment(memberId1, newAssignment); // Verify that partition 0 is no longer assigned and partition 1 is assigned to member1 assertEquals( mkMap( mkEntry(topicId, mkMap(mkEntry(1, memberId1))) ), consumerGroup.invertedTargetAssignment() ); // New assignment for member2 to add partition 1 Assignment newAssignment2 = new Assignment(Map.of( topicId, Set.of(1) )); consumerGroup.updateTargetAssignment(memberId2, newAssignment2); // Verify that partition 1 is assigned to member2 assertEquals( mkMap( mkEntry(topicId, mkMap(mkEntry(1, memberId2))) ), consumerGroup.invertedTargetAssignment() ); // New assignment for member1 to revoke partition 1 and assign partition 0 Assignment newAssignment1 = new Assignment(Map.of( topicId, Set.of(0) )); consumerGroup.updateTargetAssignment(memberId1, newAssignment1); // Verify that partition 1 is still assigned to member2 and partition 0 is assigned to member1 assertEquals( mkMap( mkEntry(topicId, mkMap( mkEntry(0, memberId1), mkEntry(1, memberId2) )) ), consumerGroup.invertedTargetAssignment() ); // Test remove target assignment for member1 consumerGroup.removeTargetAssignment(memberId1); // Verify that partition 0 is no longer assigned and partition 1 is still assigned to member2 assertEquals( mkMap( mkEntry(topicId, mkMap(mkEntry(1, memberId2))) ), consumerGroup.invertedTargetAssignment() ); } @Test public void testMetadataRefreshDeadline() { MockTime time = new MockTime(); ConsumerGroup group = createConsumerGroup("group-foo"); // Group epoch starts at 0. assertEquals(0, group.groupEpoch()); // The refresh time deadline should be empty when the group is created or loaded. assertTrue(group.hasMetadataExpired(time.milliseconds())); assertEquals(0L, group.metadataRefreshDeadline().deadlineMs); assertEquals(0, group.metadataRefreshDeadline().epoch); // Set the refresh deadline. The metadata remains valid because the deadline // has not past and the group epoch is correct. group.setMetadataRefreshDeadline(time.milliseconds() + 1000, group.groupEpoch()); assertFalse(group.hasMetadataExpired(time.milliseconds())); assertEquals(time.milliseconds() + 1000, group.metadataRefreshDeadline().deadlineMs); assertEquals(group.groupEpoch(), group.metadataRefreshDeadline().epoch); // Advance past the deadline. The metadata should have expired. time.sleep(1001L); assertTrue(group.hasMetadataExpired(time.milliseconds())); // Set the refresh time deadline with a higher group epoch. The metadata is considered // as expired because the group epoch attached to the deadline is higher than the // current group epoch. group.setMetadataRefreshDeadline(time.milliseconds() + 1000, group.groupEpoch() + 1); assertTrue(group.hasMetadataExpired(time.milliseconds())); assertEquals(time.milliseconds() + 1000, group.metadataRefreshDeadline().deadlineMs); assertEquals(group.groupEpoch() + 1, group.metadataRefreshDeadline().epoch); // Advance the group epoch. group.setGroupEpoch(group.groupEpoch() + 1); // Set the refresh deadline. The metadata remains valid because the deadline // has not past and the group epoch is correct. group.setMetadataRefreshDeadline(time.milliseconds() + 1000, group.groupEpoch()); assertFalse(group.hasMetadataExpired(time.milliseconds())); assertEquals(time.milliseconds() + 1000, group.metadataRefreshDeadline().deadlineMs); assertEquals(group.groupEpoch(), group.metadataRefreshDeadline().epoch); // Request metadata refresh. The metadata expires immediately. group.requestMetadataRefresh(); assertTrue(group.hasMetadataExpired(time.milliseconds())); assertEquals(0L, group.metadataRefreshDeadline().deadlineMs); assertEquals(0, group.metadataRefreshDeadline().epoch); } @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.TXN_OFFSET_COMMIT) public void testValidateTransactionalOffsetCommit(short version) { boolean isTransactional = true; ConsumerGroup group = createConsumerGroup("group-foo"); // Simulate a call from the admin client without member id and member epoch. // This should pass only if the group is empty. group.validateOffsetCommit("", "", -1, isTransactional, version); // The member does not exist. assertThrows(UnknownMemberIdException.class, () -> group.validateOffsetCommit("member-id", null, 0, isTransactional, version)); // Create a member. group.updateMember(new ConsumerGroupMember.Builder("member-id").build()); // A call from the admin client should fail as the group is not empty. assertThrows(UnknownMemberIdException.class, () -> group.validateOffsetCommit("", "", -1, isTransactional, version)); // The member epoch is stale. assertThrows(StaleMemberEpochException.class, () -> group.validateOffsetCommit("member-id", "", 10, isTransactional, version)); // This should succeed. group.validateOffsetCommit("member-id", "", 0, isTransactional, version); // This should succeed. group.validateOffsetCommit("", null, -1, isTransactional, version); } @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) public void testValidateOffsetCommit(short version) { boolean isTransactional = false; ConsumerGroup group = createConsumerGroup("group-foo"); // Simulate a call from the admin client without member id and member epoch. // This should pass only if the group is empty. group.validateOffsetCommit("", "", -1, isTransactional, version); // The member does not exist. assertThrows(UnknownMemberIdException.class, () -> group.validateOffsetCommit("member-id", null, 0, isTransactional, version)); // Create members. group.updateMember( new ConsumerGroupMember .Builder("new-protocol-member-id").build() ); group.updateMember( new ConsumerGroupMember.Builder("old-protocol-member-id") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata()) .build() ); // A call from the admin client should fail as the group is not empty. assertThrows(UnknownMemberIdException.class, () -> group.validateOffsetCommit("", "", -1, isTransactional, version)); assertThrows(UnknownMemberIdException.class, () -> group.validateOffsetCommit("", null, -1, isTransactional, version)); // The member epoch is stale. if (version >= 9) { assertThrows(StaleMemberEpochException.class, () -> group.validateOffsetCommit("new-protocol-member-id", "", 10, isTransactional, version)); } else { assertThrows(UnsupportedVersionException.class, () -> group.validateOffsetCommit("new-protocol-member-id", "", 10, isTransactional, version)); } assertThrows(IllegalGenerationException.class, () -> group.validateOffsetCommit("old-protocol-member-id", "", 10, isTransactional, version)); // This should succeed. if (version >= 9) { group.validateOffsetCommit("new-protocol-member-id", "", 0, isTransactional, version); } else { assertThrows(UnsupportedVersionException.class, () -> group.validateOffsetCommit("new-protocol-member-id", "", 0, isTransactional, version)); } } @Test public void testAsListedGroup() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); ConsumerGroup group = new ConsumerGroup(snapshotRegistry, "group-foo"); snapshotRegistry.idempotentCreateSnapshot(0); assertEquals(ConsumerGroup.ConsumerGroupState.EMPTY.toString(), group.stateAsString(0)); group.updateMember(new ConsumerGroupMember.Builder("member1") .setSubscribedTopicNames(List.of("foo")) .build()); snapshotRegistry.idempotentCreateSnapshot(1); assertEquals(ConsumerGroup.ConsumerGroupState.EMPTY.toString(), group.stateAsString(0)); assertEquals(ConsumerGroup.ConsumerGroupState.STABLE.toString(), group.stateAsString(1)); } @Test public void testValidateOffsetFetch() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); ConsumerGroup group = new ConsumerGroup( snapshotRegistry, "group-foo" ); // Simulate a call from the admin client without member id and member epoch. group.validateOffsetFetch(null, -1, Long.MAX_VALUE); // The member does not exist. assertThrows(UnknownMemberIdException.class, () -> group.validateOffsetFetch("member-id", 0, Long.MAX_VALUE)); // Create a member. snapshotRegistry.idempotentCreateSnapshot(0); group.updateMember(new ConsumerGroupMember.Builder("member-id").build()); // The member does not exist at last committed offset 0. assertThrows(UnknownMemberIdException.class, () -> group.validateOffsetFetch("member-id", 0, 0)); // The member exists but the epoch is stale when the last committed offset is not considered. assertThrows(StaleMemberEpochException.class, () -> group.validateOffsetFetch("member-id", 10, Long.MAX_VALUE)); // This should succeed. group.validateOffsetFetch("member-id", 0, Long.MAX_VALUE); } @Test public void testValidateDeleteGroup() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); assertEquals(ConsumerGroup.ConsumerGroupState.EMPTY, consumerGroup.state()); assertDoesNotThrow(consumerGroup::validateDeleteGroup); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setMemberEpoch(1) .setPreviousMemberEpoch(0) .build(); consumerGroup.updateMember(member1); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, consumerGroup.state()); assertThrows(GroupNotEmptyException.class, consumerGroup::validateDeleteGroup); consumerGroup.setGroupEpoch(1); assertEquals(ConsumerGroup.ConsumerGroupState.ASSIGNING, consumerGroup.state()); assertThrows(GroupNotEmptyException.class, consumerGroup::validateDeleteGroup); consumerGroup.setTargetAssignmentEpoch(1); assertEquals(ConsumerGroup.ConsumerGroupState.STABLE, consumerGroup.state()); assertThrows(GroupNotEmptyException.class, consumerGroup::validateDeleteGroup); } @Test public void testOffsetExpirationCondition() { long currentTimestamp = 30000L; long commitTimestamp = 20000L; long offsetsRetentionMs = 10000L; OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(15000L, OptionalInt.empty(), "", commitTimestamp, OptionalLong.empty(), Uuid.ZERO_UUID); ConsumerGroup group = new ConsumerGroup(new SnapshotRegistry(new LogContext()), "group-id"); Optional<OffsetExpirationCondition> offsetExpirationCondition = group.offsetExpirationCondition(); assertTrue(offsetExpirationCondition.isPresent()); OffsetExpirationConditionImpl condition = (OffsetExpirationConditionImpl) offsetExpirationCondition.get(); assertEquals(commitTimestamp, condition.baseTimestamp().apply(offsetAndMetadata)); assertTrue(condition.isOffsetExpired(offsetAndMetadata, currentTimestamp, offsetsRetentionMs)); } @Test public void testIsSubscribedToTopic() { ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setSubscribedTopicNames(List.of("foo")) .build(); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setSubscribedTopicNames(List.of("bar")) .build(); ConsumerGroup consumerGroup = createConsumerGroup("group-foo"); consumerGroup.updateMember(member1); consumerGroup.updateMember(member2); assertTrue(consumerGroup.isSubscribedToTopic("foo")); assertTrue(consumerGroup.isSubscribedToTopic("bar")); consumerGroup.removeMember("member1"); assertFalse(consumerGroup.isSubscribedToTopic("foo")); consumerGroup.removeMember("member2"); assertFalse(consumerGroup.isSubscribedToTopic("bar")); } @Test public void testAsDescribedGroup() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); ConsumerGroup group = new ConsumerGroup(snapshotRegistry, "group-id-1"); snapshotRegistry.idempotentCreateSnapshot(0); assertEquals(ConsumerGroup.ConsumerGroupState.EMPTY.toString(), group.stateAsString(0)); group.updateMember(new ConsumerGroupMember.Builder("member1") .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("assignorName") .build()); group.updateMember(new ConsumerGroupMember.Builder("member2") .build()); snapshotRegistry.idempotentCreateSnapshot(1); ConsumerGroupDescribeResponseData.DescribedGroup expected = new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId("group-id-1") .setGroupState(ConsumerGroup.ConsumerGroupState.STABLE.toString()) .setGroupEpoch(0) .setAssignmentEpoch(0) .setAssignorName("assignorName") .setMembers(Arrays.asList( new ConsumerGroupDescribeResponseData.Member() .setMemberId("member1") .setSubscribedTopicNames(List.of("foo")) .setSubscribedTopicRegex("") .setMemberType((byte) 1), new ConsumerGroupDescribeResponseData.Member().setMemberId("member2") .setSubscribedTopicRegex("") .setMemberType((byte) 1) )); ConsumerGroupDescribeResponseData.DescribedGroup actual = group.asDescribedGroup(1, "", new KRaftCoordinatorMetadataImage(new MetadataImageBuilder().build())); assertEquals(expected, actual); } @Test public void testIsInStatesCaseInsensitive() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); ConsumerGroup group = new ConsumerGroup(snapshotRegistry, "group-foo"); snapshotRegistry.idempotentCreateSnapshot(0); assertTrue(group.isInStates(Set.of("empty"), 0)); assertFalse(group.isInStates(Set.of("Empty"), 0)); group.updateMember(new ConsumerGroupMember.Builder("member1") .setSubscribedTopicNames(List.of("foo")) .build()); snapshotRegistry.idempotentCreateSnapshot(1); assertTrue(group.isInStates(Set.of("empty"), 0)); assertTrue(group.isInStates(Set.of("stable"), 1)); assertFalse(group.isInStates(Set.of("empty"), 1)); } @Test public void testClassicMembersSupportedProtocols() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); List<ConsumerGroupMemberMetadataValue.ClassicProtocol> rangeProtocol = new ArrayList<>(); rangeProtocol.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(new byte[0])); List<ConsumerGroupMemberMetadataValue.ClassicProtocol> roundRobinAndRangeProtocols = new ArrayList<>(); roundRobinAndRangeProtocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("roundrobin") .setMetadata(new byte[0])); roundRobinAndRangeProtocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(new byte[0])); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member-1") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(rangeProtocol)) .build(); consumerGroup.updateMember(member1); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member-2") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(roundRobinAndRangeProtocols)) .build(); consumerGroup.updateMember(member2); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("range")); assertEquals(1, consumerGroup.classicMembersSupportedProtocols().get("roundrobin")); assertTrue(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, Set.of("range", "sticky"))); assertFalse(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, Set.of("sticky", "roundrobin"))); member2 = new ConsumerGroupMember.Builder(member2) .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(rangeProtocol)) .build(); consumerGroup.updateMember(member2); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("range")); assertFalse(consumerGroup.classicMembersSupportedProtocols().containsKey("roundrobin")); member1 = new ConsumerGroupMember.Builder(member1) .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(roundRobinAndRangeProtocols)) .build(); consumerGroup.updateMember(member1); member2 = new ConsumerGroupMember.Builder(member2) .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(roundRobinAndRangeProtocols)) .build(); consumerGroup.updateMember(member2); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("range")); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("roundrobin")); assertTrue(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, Set.of("sticky", "roundrobin"))); } @Test public void testNumClassicProtocolMembers() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); List<ConsumerGroupMemberMetadataValue.ClassicProtocol> protocols = new ArrayList<>(); protocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(new byte[0])); // The group has member 1 (using the classic protocol). ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member-1") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(protocols)) .build(); consumerGroup.updateMember(member1); assertEquals(1, consumerGroup.numClassicProtocolMembers()); // The group has member 1 (using the classic protocol) and member 2 (using the consumer protocol). ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member-2") .build(); consumerGroup.updateMember(member2); assertEquals(1, consumerGroup.numClassicProtocolMembers()); assertFalse(consumerGroup.allMembersUseClassicProtocolExcept(member1)); assertTrue(consumerGroup.allMembersUseClassicProtocolExcept(member2)); // The group has member 2 (using the consumer protocol) and member 3 (using the consumer protocol). consumerGroup.removeMember(member1.memberId()); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member-3") .build(); consumerGroup.updateMember(member3); assertEquals(0, consumerGroup.numClassicProtocolMembers()); assertFalse(consumerGroup.allMembersUseClassicProtocolExcept(member2)); // The group has member 2 (using the classic protocol). consumerGroup.removeMember(member2.memberId()); member2 = new ConsumerGroupMember.Builder("member-2") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(protocols)) .build(); consumerGroup.updateMember(member2); assertEquals(1, consumerGroup.numClassicProtocolMembers()); } @ParameterizedTest @CsvSource({ "5, 5, 0, 0, false", // remove no consumer protocol members "5, 5, 0, 4, false", // remove 4 out of 5 consumer protocol members "5, 5, 1, 4, false", // remove 4 out of 5 consumer protocol members and 1 classic protocol member "5, 5, 0, 5, true", // remove 5 out of 5 consumer protocol members "5, 5, 1, 5, true", // remove 5 out of 5 consumer protocol members and 1 classic protocol member "5, 5, 5, 5, true", // an empty consumer group is considered to have only classic protocol members "5, 0, 0, 0, true", // a consumer group with only classic protocol members, which should not happen "5, 0, 1, 0, true", // a consumer group with only classic protocol members, which should not happen }) public void testAllMembersUseClassicProtocolExcept( int numClassicProtocolMembers, int numConsumerProtocolMembers, int numRemovedClassicProtocolMembers, int numRemovedConsumerProtocolMembers, boolean expectedResult ) { ConsumerGroup consumerGroup = createConsumerGroup("foo"); List<ConsumerGroupMemberMetadataValue.ClassicProtocol> protocols = new ArrayList<>(); protocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(new byte[0])); List<ConsumerGroupMember> classicProtocolMembers = new ArrayList<>(); List<ConsumerGroupMember> consumerProtocolMembers = new ArrayList<>(); // Add classic and consumer protocol members to the group for (int i = 0; i < numClassicProtocolMembers; i++) { ConsumerGroupMember member = new ConsumerGroupMember.Builder("classic-member-" + i) .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(protocols)) .build(); classicProtocolMembers.add(member); consumerGroup.updateMember(member); } for (int i = 0; i < numConsumerProtocolMembers; i++) { ConsumerGroupMember member = new ConsumerGroupMember.Builder("consumer-member-" + i) .build(); consumerProtocolMembers.add(member); consumerGroup.updateMember(member); } assertEquals(numClassicProtocolMembers, consumerGroup.numClassicProtocolMembers()); // Test allMembersUseClassicProtocolExcept Set<ConsumerGroupMember> removedMembers = new HashSet<>(); for (int i = 0; i < numRemovedClassicProtocolMembers; i++) { removedMembers.add(classicProtocolMembers.get(i)); } for (int i = 0; i < numRemovedConsumerProtocolMembers; i++) { removedMembers.add(consumerProtocolMembers.get(i)); } assertEquals(expectedResult, consumerGroup.allMembersUseClassicProtocolExcept(removedMembers)); } @Test public void testFromClassicGroup() { MockTime time = new MockTime(); LogContext logContext = new LogContext(); String groupId = "group-id"; String memberId = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; CoordinatorMetadataImage metadataImage = new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 1) .addTopic(barTopicId, barTopicName, 1) .addRacks() .buildCoordinatorMetadataImage(); ClassicGroup classicGroup = new ClassicGroup( logContext, groupId, STABLE, time, 10, Optional.of(ConsumerProtocol.PROTOCOL_TYPE), Optional.of("range"), Optional.empty(), Optional.of(time.milliseconds()) ); ClassicGroupMember member = new ClassicGroupMember( memberId, Optional.empty(), "client-id", "client-host", 5000, 500, ConsumerProtocol.PROTOCOL_TYPE, new JoinGroupRequestData.JoinGroupRequestProtocolCollection(List.of( new JoinGroupRequestData.JoinGroupRequestProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList( new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)))))) ).iterator()), Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0) )))) ); classicGroup.add(member); ConsumerGroup consumerGroup = ConsumerGroup.fromClassicGroup( new SnapshotRegistry(logContext), classicGroup, new HashMap<>(), metadataImage ); ConsumerGroup expectedConsumerGroup = new ConsumerGroup( new SnapshotRegistry(logContext), groupId ); expectedConsumerGroup.setGroupEpoch(10); expectedConsumerGroup.setTargetAssignmentEpoch(10); expectedConsumerGroup.updateTargetAssignment(memberId, new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 0) ))); expectedConsumerGroup.setMetadataHash(computeGroupHash(Map.of( fooTopicName, computeTopicHash(fooTopicName, metadataImage), barTopicName, computeTopicHash(barTopicName, metadataImage) ))); expectedConsumerGroup.updateMember(new ConsumerGroupMember.Builder(memberId) .setMemberEpoch(classicGroup.generationId()) .setState(MemberState.STABLE) .setPreviousMemberEpoch(classicGroup.generationId()) .setInstanceId(null) .setRackId(null) .setRebalanceTimeoutMs(member.rebalanceTimeoutMs()) .setClientId(member.clientId()) .setClientHost(member.clientHost()) .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSessionTimeoutMs(member.sessionTimeoutMs()) .setSupportedProtocols(List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList( new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0))))))))) .build()); assertEquals(expectedConsumerGroup.groupId(), consumerGroup.groupId()); assertEquals(expectedConsumerGroup.groupEpoch(), consumerGroup.groupEpoch()); assertEquals(expectedConsumerGroup.state(), consumerGroup.state()); assertEquals(expectedConsumerGroup.preferredServerAssignor(), consumerGroup.preferredServerAssignor()); assertEquals(expectedConsumerGroup.members(), consumerGroup.members()); } @Test public void testSubscribedRegularExpressionCount() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setSubscribedTopicRegex("regex1") .build(); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setSubscribedTopicRegex("regex2") .build(); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") .setSubscribedTopicRegex("regex1") .build(); ConsumerGroupMember member4 = new ConsumerGroupMember.Builder("member4") .build(); // Assert the initial state. assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(0, consumerGroup.numSubscribedMembers("regex1")); assertEquals(0, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Add member 1. consumerGroup.updateMember(member1); assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(1, consumerGroup.numSubscribedMembers("regex1")); assertEquals(0, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Add member 2. consumerGroup.updateMember(member2); assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(1, consumerGroup.numSubscribedMembers("regex1")); assertEquals(1, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Add member 3. consumerGroup.updateMember(member3); assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(2, consumerGroup.numSubscribedMembers("regex1")); assertEquals(1, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Add member 4. consumerGroup.updateMember(member4); assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(2, consumerGroup.numSubscribedMembers("regex1")); assertEquals(1, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Update member 3. member3 = new ConsumerGroupMember.Builder(member3) .setSubscribedTopicRegex("regex2") .build(); consumerGroup.updateMember(member3); assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(1, consumerGroup.numSubscribedMembers("regex1")); assertEquals(2, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Remove member 1. consumerGroup.removeMember(member1.memberId()); assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(0, consumerGroup.numSubscribedMembers("regex1")); assertEquals(2, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Remove member 2. consumerGroup.removeMember(member2.memberId()); assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(0, consumerGroup.numSubscribedMembers("regex1")); assertEquals(1, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Remove member 3. consumerGroup.removeMember(member3.memberId()); assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(0, consumerGroup.numSubscribedMembers("regex1")); assertEquals(0, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); } @Test public void testUpdateAndRemoveRegularExpression() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) .build(); consumerGroup.updateMember(member1); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .build(); consumerGroup.updateMember(member2); // Verify initial state. assertEquals( Map.of( "foo", new SubscriptionCount(2, 0), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0) ), consumerGroup.subscribedTopicNames() ); // Add a regex. consumerGroup.updateResolvedRegularExpression( "foo|bar", new ResolvedRegularExpression( Set.of("foo", "bar"), 10L, 12345L ) ); assertEquals( Map.of( "foo", new SubscriptionCount(2, 1), "bar", new SubscriptionCount(2, 1), "zar", new SubscriptionCount(1, 0) ), consumerGroup.subscribedTopicNames() ); // Add a regex. consumerGroup.updateResolvedRegularExpression( "foobar", new ResolvedRegularExpression( Set.of("foobar"), 10L, 12345L ) ); assertEquals( Map.of( "foo", new SubscriptionCount(2, 1), "bar", new SubscriptionCount(2, 1), "zar", new SubscriptionCount(1, 0), "foobar", new SubscriptionCount(0, 1) ), consumerGroup.subscribedTopicNames() ); // Update a regex. consumerGroup.updateResolvedRegularExpression( "foo|bar", new ResolvedRegularExpression( Set.of("foo"), 10L, 12345L ) ); assertEquals( Map.of( "foo", new SubscriptionCount(2, 1), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0), "foobar", new SubscriptionCount(0, 1) ), consumerGroup.subscribedTopicNames() ); // Remove a regex. consumerGroup.removeResolvedRegularExpression("foo|bar"); assertEquals( Map.of( "foo", new SubscriptionCount(2, 0), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0), "foobar", new SubscriptionCount(0, 1) ), consumerGroup.subscribedTopicNames() ); // Remove another regex. consumerGroup.removeResolvedRegularExpression("foobar"); assertEquals( Map.of( "foo", new SubscriptionCount(2, 0), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0) ), consumerGroup.subscribedTopicNames() ); } @Test public void testComputeSubscribedTopicNamesWithoutDeletedMembers() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) .build(); consumerGroup.updateMember(member1); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .build(); consumerGroup.updateMember(member2); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") .setSubscribedTopicRegex("foo*") .build(); consumerGroup.updateMember(member3); ConsumerGroupMember member4 = new ConsumerGroupMember.Builder("member4") .setSubscribedTopicRegex("foo*") .build(); consumerGroup.updateMember(member4); ConsumerGroupMember member5 = new ConsumerGroupMember.Builder("member5") .setSubscribedTopicRegex("bar*") .build(); consumerGroup.updateMember(member5); ConsumerGroupMember member6 = new ConsumerGroupMember.Builder("member6") .setSubscribedTopicRegex("bar*") .build(); consumerGroup.updateMember(member6); consumerGroup.updateResolvedRegularExpression( "foo*", new ResolvedRegularExpression( Set.of("foo", "fooo"), 10L, 12345L ) ); consumerGroup.updateResolvedRegularExpression( "bar*", new ResolvedRegularExpression( Set.of("bar", "barr"), 10L, 12345L ) ); // Verify initial state. assertEquals( Map.of( "foo", new SubscriptionCount(2, 1), "fooo", new SubscriptionCount(0, 1), "bar", new SubscriptionCount(2, 1), "barr", new SubscriptionCount(0, 1), "zar", new SubscriptionCount(1, 0) ), consumerGroup.subscribedTopicNames() ); // Compute with removed members and regexes. assertEquals( Map.of( "foo", new SubscriptionCount(1, 0), "bar", new SubscriptionCount(1, 1), "barr", new SubscriptionCount(0, 1), "zar", new SubscriptionCount(1, 0) ), consumerGroup.computeSubscribedTopicNamesWithoutDeletedMembers( Set.of(member2, member3, member4, member5), Set.of("foo*") ) ); } @Test public void testComputeSubscribedTopicNames() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setSubscribedTopicNames(List.of("foo", "bar", "zar")) .build(); consumerGroup.updateMember(member1); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setSubscribedTopicNames(List.of("foo", "bar")) .build(); consumerGroup.updateMember(member2); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") .setSubscribedTopicNames(List.of("foo")) .setSubscribedTopicRegex("foo*") .build(); consumerGroup.updateMember(member3); consumerGroup.updateResolvedRegularExpression( "foo*", new ResolvedRegularExpression( Set.of("foo", "fooo"), 10L, 12345L ) ); // Verify initial state. assertEquals( Map.of( "foo", new SubscriptionCount(3, 1), "fooo", new SubscriptionCount(0, 1), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0) ), consumerGroup.subscribedTopicNames() ); // Compute subscribed topic names without changing anything. assertEquals( Map.of( "foo", new SubscriptionCount(3, 1), "fooo", new SubscriptionCount(0, 1), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0) ), consumerGroup.computeSubscribedTopicNames(member3, member3) ); // Compute subscribed topic names with removing the regex. assertEquals( Map.of( "foo", new SubscriptionCount(3, 0), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0) ), consumerGroup.computeSubscribedTopicNames( member3, new ConsumerGroupMember.Builder(member3) .setSubscribedTopicRegex("") .build() ) ); // Compute subscribed topic names with removing the names. assertEquals( Map.of( "foo", new SubscriptionCount(2, 1), "fooo", new SubscriptionCount(0, 1), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0) ), consumerGroup.computeSubscribedTopicNames( member3, new ConsumerGroupMember.Builder(member3) .setSubscribedTopicNames(List.of()) .build() ) ); // Compute subscribed topic names with removing both. assertEquals( Map.of( "foo", new SubscriptionCount(2, 0), "bar", new SubscriptionCount(2, 0), "zar", new SubscriptionCount(1, 0) ), consumerGroup.computeSubscribedTopicNames( member3, new ConsumerGroupMember.Builder(member3) .setSubscribedTopicNames(List.of()) .setSubscribedTopicRegex("") .build() ) ); } @Test public void testCreateGroupTombstoneRecords() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); consumerGroup.setGroupEpoch(10); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setMemberEpoch(10) .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) .build(); consumerGroup.updateMember(member1); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setMemberEpoch(10) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) .build(); consumerGroup.updateMember(member2); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") .setMemberEpoch(10) .setSubscribedTopicRegex("foo*") .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) .build(); consumerGroup.updateMember(member3); consumerGroup.updateResolvedRegularExpression( "foo*", new ResolvedRegularExpression( Set.of("foo", "fooo"), 10L, 12345L ) ); consumerGroup.updateTargetAssignment("member1", new Assignment(mkAssignment( mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) )); consumerGroup.updateTargetAssignment("member2", new Assignment(mkAssignment( mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) )); consumerGroup.updateTargetAssignment("member3", new Assignment(mkAssignment( mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) )); List<CoordinatorRecord> records = new ArrayList<>(); consumerGroup.createGroupTombstoneRecords(records); assertUnorderedRecordsEquals( List.of( List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member1"), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member2"), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member3") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member1"), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member2"), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member3") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord("foo") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member1"), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member2"), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member3") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone("foo", "foo*") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord("foo") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord("foo") ) ), records ); } @Test public void testCreateGroupTombstoneRecordsWithReplacedMember() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); consumerGroup.setGroupEpoch(10); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setMemberEpoch(10) .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) .build(); consumerGroup.updateMember(member1); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") .setMemberEpoch(10) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) .build(); consumerGroup.updateMember(member2); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") .setMemberEpoch(10) .setSubscribedTopicRegex("foo*") .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) .build(); consumerGroup.updateMember(member3); consumerGroup.updateResolvedRegularExpression( "foo*", new ResolvedRegularExpression( Set.of("foo", "fooo"), 10L, 12345L ) ); consumerGroup.updateTargetAssignment("member1", new Assignment(mkAssignment( mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) )); consumerGroup.updateTargetAssignment("member2", new Assignment(mkAssignment( mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) )); consumerGroup.updateTargetAssignment("member3", new Assignment(mkAssignment( mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) )); List<CoordinatorRecord> records = new ArrayList<>(); consumerGroup.createGroupTombstoneRecordsWithReplacedMember(records, "member3", "member4"); assertUnorderedRecordsEquals( List.of( List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member1"), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member2"), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member4") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member1"), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member2"), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member4") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord("foo") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member1"), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member2"), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member4") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone("foo", "foo*") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord("foo") ), List.of( GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord("foo") ) ), records ); } @Test public void testSubscriptionType() { assertEquals( HOMOGENEOUS, ConsumerGroup.subscriptionType( Map.of(), Map.of(), 0 ) ); assertEquals( HOMOGENEOUS, ConsumerGroup.subscriptionType( Map.of(), Map.of("foo", new SubscriptionCount(5, 0)), 5 ) ); assertEquals( HETEROGENEOUS, ConsumerGroup.subscriptionType( Map.of(), Map.of( "foo", new SubscriptionCount(4, 0), "bar", new SubscriptionCount(1, 0) ), 5 ) ); assertEquals( HOMOGENEOUS, ConsumerGroup.subscriptionType( Map.of("foo*", 5), Map.of("foo", new SubscriptionCount(0, 1)), 5 ) ); assertEquals( HOMOGENEOUS, ConsumerGroup.subscriptionType( Map.of("foo*", 5), Map.of( "foo", new SubscriptionCount(0, 1), "food", new SubscriptionCount(0, 1)), 5 ) ); assertEquals( HETEROGENEOUS, ConsumerGroup.subscriptionType( Map.of("foo*", 5), Map.of("foo", new SubscriptionCount(1, 1)), 5 ) ); assertEquals( HETEROGENEOUS, ConsumerGroup.subscriptionType( Map.of("foo*", 5), Map.of( "foo", new SubscriptionCount(0, 1), "bar", new SubscriptionCount(1, 0) ), 5 ) ); assertEquals( HETEROGENEOUS, ConsumerGroup.subscriptionType( Map.of("foo*", 4, "bar*", 1), Map.of( "foo", new SubscriptionCount(0, 1), "bar", new SubscriptionCount(0, 1)), 5 ) ); } @Test public void testComputeSubscribedRegularExpressions() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); consumerGroup.setGroupEpoch(10); consumerGroup.updateMember(new ConsumerGroupMember.Builder("m1") .setSubscribedTopicRegex("foo*") .build()); consumerGroup.updateMember(new ConsumerGroupMember.Builder("m2") .setSubscribedTopicRegex("foo*") .build()); assertEquals( Map.of("foo*", 3), consumerGroup.computeSubscribedRegularExpressions( null, new ConsumerGroupMember.Builder("m3") .setSubscribedTopicRegex("foo*") .build() ) ); assertEquals( Map.of("foo*", 1), consumerGroup.computeSubscribedRegularExpressions( new ConsumerGroupMember.Builder("m2") .setSubscribedTopicRegex("foo*") .build(), null ) ); assertEquals( Map.of("foo*", 2, "bar*", 1), consumerGroup.computeSubscribedRegularExpressions( null, new ConsumerGroupMember.Builder("m4") .setSubscribedTopicRegex("bar*") .build() ) ); assertEquals( Map.of("foo*", 1, "bar*", 1), consumerGroup.computeSubscribedRegularExpressions( new ConsumerGroupMember.Builder("m2") .setSubscribedTopicRegex("foo*") .build(), new ConsumerGroupMember.Builder("m2") .setSubscribedTopicRegex("bar*") .build() ) ); } @Test public void testComputeMetadataHash() { CoordinatorMetadataImage metadataImage = new MetadataImageBuilder() .addTopic(Uuid.randomUuid(), "foo", 1) .addTopic(Uuid.randomUuid(), "bar", 1) .addRacks() .buildCoordinatorMetadataImage(); Map<String, Long> cache = new HashMap<>(); assertEquals( computeGroupHash(Map.of( "foo", computeTopicHash("foo", metadataImage), "bar", computeTopicHash("bar", metadataImage) )), ModernGroup.computeMetadataHash( Map.of( "foo", new SubscriptionCount(1, 0), "bar", new SubscriptionCount(1, 0) ), cache, metadataImage ) ); assertEquals( Map.of( "foo", computeTopicHash("foo", metadataImage), "bar", computeTopicHash("bar", metadataImage) ), cache ); } @Test public void testComputeMetadataHashUseCacheData() { // Use hash map because topic hash cache cannot be immutable. Map<String, Long> cache = new HashMap<>(); cache.put("foo", 1234L); cache.put("bar", 4321L); assertEquals( computeGroupHash(cache), ModernGroup.computeMetadataHash( Map.of( "foo", new SubscriptionCount(1, 0), "bar", new SubscriptionCount(1, 0) ), cache, new KRaftCoordinatorMetadataImage(new MetadataImageBuilder() .addTopic(Uuid.randomUuid(), "foo", 1) .addTopic(Uuid.randomUuid(), "bar", 1) .addRacks() .build()) ) ); assertEquals( Map.of( "foo", 1234L, "bar", 4321L ), cache ); } @Test public void testComputeMetadataHashIgnoreTopicHashIfItIsNotInMetadataImage() { // Use hash map because topic hash cache cannot be immutable. // The zar is not in metadata image, so it should not be used. Map<String, Long> cache = new HashMap<>(); cache.put("foo", 1234L); cache.put("bar", 4321L); cache.put("zar", 0L); assertEquals( computeGroupHash(Map.of( "foo", 1234L, "bar", 4321L )), ModernGroup.computeMetadataHash( Map.of( "foo", new SubscriptionCount(1, 0), "bar", new SubscriptionCount(1, 0) ), cache, new KRaftCoordinatorMetadataImage(new MetadataImageBuilder() .addTopic(Uuid.randomUuid(), "foo", 1) .addTopic(Uuid.randomUuid(), "bar", 1) .addRacks() .build()) ) ); // Although the zar is not in metadata image, it should not be removed from computeMetadataHash function. assertEquals( Map.of( "foo", 1234L, "bar", 4321L, "zar", 0L ), cache ); } }
ConsumerGroupTest
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/StreamGroupedReduceAsyncStateOperator.java
{ "start": 1470, "end": 3079 }
class ____<IN> extends AbstractAsyncStateUdfStreamOperator<IN, ReduceFunction<IN>> implements OneInputStreamOperator<IN, IN> { private static final long serialVersionUID = 1L; private static final String STATE_NAME = "_op_state"; private transient ValueState<IN> values; private final TypeSerializer<IN> serializer; public StreamGroupedReduceAsyncStateOperator( ReduceFunction<IN> reducer, TypeSerializer<IN> serializer) { super(reducer); this.serializer = serializer; } @Override public void open() throws Exception { super.open(); ValueStateDescriptor<IN> stateId = new ValueStateDescriptor<>(STATE_NAME, serializer); values = getRuntimeContext().getValueState(stateId); } @Override public void processElement(StreamRecord<IN> element) throws Exception { IN value = element.getValue(); values.asyncValue() .thenAccept( currentValue -> { if (currentValue != null) { IN reduced = userFunction.reduce(currentValue, value); values.asyncUpdate(reduced) .thenAccept(e -> output.collect(element.replace(reduced))); } else { values.asyncUpdate(value) .thenAccept(e -> output.collect(element.replace(value))); } }); } }
StreamGroupedReduceAsyncStateOperator
java
bumptech__glide
library/test/src/test/java/com/bumptech/glide/load/engine/EngineTest.java
{ "start": 2059, "end": 20277 }
class ____ { private EngineTestHarness harness; @Before public void setUp() { harness = new EngineTestHarness(); } @Test public void testNewRunnerIsCreatedAndPostedWithNoExistingLoad() { harness.doLoad(); verify(harness.job).start((DecodeJob) any()); } @Test public void testCallbackIsAddedToNewEngineJobWithNoExistingLoad() { harness.doLoad(); verify(harness.job).addCallback(eq(harness.cb), any(Executor.class)); } @Test public void testLoadStatusIsReturnedForNewLoad() { assertNotNull(harness.doLoad()); } @Test public void testEngineJobReceivesRemoveCallbackFromLoadStatus() { Engine.LoadStatus loadStatus = harness.doLoad(); loadStatus.cancel(); verify(harness.job).removeCallback(eq(harness.cb)); } @Test public void testNewRunnerIsAddedToRunnersMap() { harness.doLoad(); assertThat(harness.jobs.getAll()).containsKey(harness.cacheKey); } @Test public void testNewRunnerIsNotCreatedAndPostedWithExistingLoad() { harness.doLoad(); harness.doLoad(); verify(harness.job, times(1)).start((DecodeJob) any()); } @Test public void testCallbackIsAddedToExistingRunnerWithExistingLoad() { harness.doLoad(); ResourceCallback newCallback = mock(ResourceCallback.class); harness.cb = newCallback; harness.doLoad(); verify(harness.job).addCallback(eq(newCallback), any(Executor.class)); } @Test public void testLoadStatusIsReturnedForExistingJob() { harness.doLoad(); Engine.LoadStatus loadStatus = harness.doLoad(); assertNotNull(loadStatus); } @Test public void testResourceIsReturnedFromActiveResourcesIfPresent() { harness.activeResources.activate(harness.cacheKey, harness.resource); harness.doLoad(); verify(harness.cb) .onResourceReady(eq(harness.resource), eq(DataSource.MEMORY_CACHE), eq(false)); } @Test public void testResourceIsAcquiredIfReturnedFromActiveResources() { harness.activeResources.activate(harness.cacheKey, harness.resource); harness.doLoad(); verify(harness.resource).acquire(); } @Test public void testNewLoadIsNotStartedIfResourceIsActive() { harness.activeResources.activate(harness.cacheKey, harness.resource); harness.doLoad(); verify(harness.job, never()).start(anyDecodeJobOrNull()); } @Test public void testNullLoadStatusIsReturnedIfResourceIsActive() { harness.activeResources.activate(harness.cacheKey, harness.resource); assertNull(harness.doLoad()); } @Test public void load_withResourceInActiveResources_doesNotCheckMemoryCache() { harness.activeResources.activate(harness.cacheKey, harness.resource); harness.doLoad(); verify(harness.cb) .onResourceReady(eq(harness.resource), eq(DataSource.MEMORY_CACHE), eq(false)); verify(harness.cache, never()).remove(any(Key.class)); } @Test public void testActiveResourcesIsNotCheckedIfNotMemoryCacheable() { harness.activeResources.activate(harness.cacheKey, harness.resource); harness.isMemoryCacheable = false; harness.doLoad(); verify(harness.resource, never()).acquire(); verify(harness.job).start((DecodeJob) any()); } @Test public void testCacheIsCheckedIfMemoryCacheable() { when(harness.cache.remove(eq(harness.cacheKey))).thenReturn(harness.resource); harness.doLoad(); verify(harness.cb) .onResourceReady(eq(harness.resource), eq(DataSource.MEMORY_CACHE), eq(false)); } @Test public void testCacheIsNotCheckedIfNotMemoryCacheable() { when(harness.cache.remove(eq(harness.cacheKey))).thenReturn(harness.resource); harness.isMemoryCacheable = false; harness.doLoad(); verify(harness.job).start((DecodeJob) any()); } @Test public void testResourceIsReturnedFromCacheIfPresent() { when(harness.cache.remove(eq(harness.cacheKey))).thenReturn(harness.resource); harness.doLoad(); verify(harness.cb) .onResourceReady(eq(harness.resource), eq(DataSource.MEMORY_CACHE), eq(false)); } @Test public void testHandlesNonEngineResourcesFromCacheIfPresent() { final Object expected = new Object(); @SuppressWarnings("rawtypes") Resource fromCache = mockResource(); when(fromCache.get()).thenReturn(expected); when(harness.cache.remove(eq(harness.cacheKey))).thenReturn(fromCache); doAnswer( new Answer<Void>() { @Override public Void answer(InvocationOnMock invocationOnMock) { Resource<?> resource = (Resource<?>) invocationOnMock.getArguments()[0]; assertEquals(expected, resource.get()); return null; } }) .when(harness.cb) .onResourceReady(anyResource(), isADataSource(), anyBoolean()); harness.doLoad(); verify(harness.cb).onResourceReady(anyResource(), isADataSource(), anyBoolean()); } @Test public void testResourceIsAddedToActiveResourceIfReturnedFromCache() { when(harness.cache.remove(eq(harness.cacheKey))).thenReturn(harness.resource); harness.doLoad(); EngineResource<?> activeResource = harness.activeResources.get(harness.cacheKey); assertThat(activeResource).isEqualTo(harness.resource); } @Test public void testResourceIsAcquiredIfReturnedFromCache() { when(harness.cache.remove(eq(harness.cacheKey))).thenReturn(harness.resource); harness.doLoad(); verify(harness.resource).acquire(); } @Test public void testNewLoadIsNotStartedIfResourceIsCached() { when(harness.cache.remove(eq(harness.cacheKey))).thenReturn(harness.resource); harness.doLoad(); verify(harness.job, never()).start(anyDecodeJobOrNull()); } @Test public void testNullLoadStatusIsReturnedForCachedResource() { when(harness.cache.remove(eq(harness.cacheKey))).thenReturn(harness.resource); Engine.LoadStatus loadStatus = harness.doLoad(); assertNull(loadStatus); } @Test public void testRunnerIsRemovedFromRunnersOnEngineNotifiedJobComplete() { harness.doLoad(); harness.callOnEngineJobComplete(); assertThat(harness.jobs.getAll()).doesNotContainKey(harness.cacheKey); } @Test public void testEngineIsNotSetAsResourceListenerIfResourceIsNullOnJobComplete() { harness.doLoad(); harness.getEngine().onEngineJobComplete(harness.job, harness.cacheKey, /* resource= */ null); } @Test public void testResourceIsAddedToActiveResourcesOnEngineComplete() { when(harness.resource.isMemoryCacheable()).thenReturn(true); harness.callOnEngineJobComplete(); EngineResource<?> resource = harness.activeResources.get(harness.cacheKey); assertThat(harness.resource).isEqualTo(resource); } @Test public void testDoesNotPutNullResourceInActiveResourcesOnEngineComplete() { harness.getEngine().onEngineJobComplete(harness.job, harness.cacheKey, /* resource= */ null); assertThat(harness.activeResources.get(harness.cacheKey)).isNull(); } @Test public void testDoesNotPutResourceThatIsNotCacheableInActiveResourcesOnEngineComplete() { when(harness.resource.isMemoryCacheable()).thenReturn(false); harness.callOnEngineJobComplete(); assertThat(harness.activeResources.get(harness.cacheKey)).isNull(); } @Test public void testRunnerIsRemovedFromRunnersOnEngineNotifiedJobCancel() { harness.doLoad(); harness.getEngine().onEngineJobCancelled(harness.job, harness.cacheKey); assertThat(harness.jobs.getAll()).doesNotContainKey(harness.cacheKey); } @Test public void testJobIsNotRemovedFromJobsIfOldJobIsCancelled() { harness.doLoad(); harness.getEngine().onEngineJobCancelled(mock(EngineJob.class), harness.cacheKey); assertEquals(harness.job, harness.jobs.get(harness.cacheKey, harness.onlyRetrieveFromCache)); } @Test public void testResourceIsAddedToCacheOnReleased() { final Object expected = new Object(); when(harness.resource.isMemoryCacheable()).thenReturn(true); when(harness.resource.get()).thenReturn(expected); doAnswer( new Answer<Void>() { @Override public Void answer(InvocationOnMock invocationOnMock) { Resource<?> resource = (Resource<?>) invocationOnMock.getArguments()[1]; assertEquals(expected, resource.get()); return null; } }) .when(harness.cache) .put(eq(harness.cacheKey), anyResource()); harness.getEngine().onResourceReleased(harness.cacheKey, harness.resource); verify(harness.cache).put(eq(harness.cacheKey), anyResource()); } @Test public void testResourceIsNotAddedToCacheOnReleasedIfNotCacheable() { when(harness.resource.isMemoryCacheable()).thenReturn(false); harness.getEngine().onResourceReleased(harness.cacheKey, harness.resource); verify(harness.cache, never()).put(eq(harness.cacheKey), eq(harness.resource)); } @Test public void testResourceIsRecycledIfNotCacheableWhenReleased() { when(harness.resource.isMemoryCacheable()).thenReturn(false); harness.getEngine().onResourceReleased(harness.cacheKey, harness.resource); verify(harness.resourceRecycler).recycle(eq(harness.resource), eq(false)); } @Test public void testResourceIsRemovedFromActiveResourcesWhenReleased() { harness.activeResources.activate(harness.cacheKey, harness.resource); harness.getEngine().onResourceReleased(harness.cacheKey, harness.resource); assertThat(harness.activeResources.get(harness.cacheKey)).isNull(); } @Test public void testEngineAddedAsListenerToMemoryCache() { harness.getEngine(); verify(harness.cache).setResourceRemovedListener(eq(harness.getEngine())); } @Test public void testResourceIsRecycledWhenRemovedFromCache() { harness.getEngine().onResourceRemoved(harness.resource); verify(harness.resourceRecycler).recycle(eq(harness.resource), eq(true)); } @Test public void testJobIsPutInJobWithCacheKeyWithRelevantIds() { harness.doLoad(); assertThat(harness.jobs.getAll()).containsEntry(harness.cacheKey, harness.job); } @Test public void testKeyFactoryIsGivenNecessaryArguments() { harness.doLoad(); verify(harness.keyFactory) .buildKey( eq(harness.model), eq(harness.signature), eq(harness.width), eq(harness.height), eq(harness.transformations), eq(Object.class), eq(Object.class), eq(harness.options)); } @Test public void testFactoryIsGivenNecessaryArguments() { harness.doLoad(); verify(harness.engineJobFactory) .build( eq(harness.cacheKey), eq(true) /*isMemoryCacheable*/, eq(false) /*useUnlimitedSourceGeneratorPool*/, /* useAnimationPool= */ eq(false), /* onlyRetrieveFromCache= */ eq(false)); } @Test public void testFactoryIsGivenNecessaryArgumentsWithUnlimitedPool() { harness.useUnlimitedSourceGeneratorPool = true; harness.doLoad(); verify(harness.engineJobFactory) .build( eq(harness.cacheKey), eq(true) /*isMemoryCacheable*/, eq(true) /*useUnlimitedSourceGeneratorPool*/, /* useAnimationPool= */ eq(false), /* onlyRetrieveFromCache= */ eq(false)); } @Test public void testReleaseReleasesEngineResource() { EngineResource<Object> engineResource = mock(EngineResource.class); harness.getEngine().release(engineResource); verify(engineResource).release(); } @Test(expected = IllegalArgumentException.class) public void testThrowsIfAskedToReleaseNonEngineResource() { harness.getEngine().release(mockResource()); } @Test public void load_whenCalledOnBackgroundThread_doesNotThrow() throws InterruptedException { BackgroundUtil.testInBackground( new BackgroundUtil.BackgroundTester() { @Override public void runTest() { harness.doLoad(); } }); } @Test public void load_afterResourceIsLoadedInActiveResources_returnsFromMemoryCache() { when(harness.resource.isMemoryCacheable()).thenReturn(true); doAnswer( new Answer<Object>() { @Override public Object answer(InvocationOnMock invocationOnMock) { harness.callOnEngineJobComplete(); return null; } }) .when(harness.job) .start(anyDecodeJobOrNull()); harness.doLoad(); harness.doLoad(); verify(harness.cb).onResourceReady(any(Resource.class), eq(DataSource.MEMORY_CACHE), eq(false)); } @Test public void load_afterResourceIsLoadedAndReleased_returnsFromMemoryCache() { harness.cache = new LruResourceCache(100); when(harness.resource.isMemoryCacheable()).thenReturn(true); doAnswer( new Answer<Object>() { @Override public Object answer(InvocationOnMock invocationOnMock) { harness.callOnEngineJobComplete(); return null; } }) .when(harness.job) .start(anyDecodeJobOrNull()); harness.doLoad(); harness.getEngine().onResourceReleased(harness.cacheKey, harness.resource); harness.doLoad(); verify(harness.cb).onResourceReady(any(Resource.class), eq(DataSource.MEMORY_CACHE), eq(false)); } @Test public void load_withOnlyRetrieveFromCache_andPreviousNormalLoad_startsNewLoad() { EngineJob<?> first = harness.job; harness.doLoad(); EngineJob<?> second = mock(EngineJob.class); harness.job = second; harness.onlyRetrieveFromCache = true; harness.doLoad(); verify(first).start(anyDecodeJobOrNull()); verify(second).start(anyDecodeJobOrNull()); } @Test public void load_withNormalLoad_afterPreviousRetrieveFromCache_startsNewLoad() { EngineJob<?> first = harness.job; harness.onlyRetrieveFromCache = true; harness.doLoad(); EngineJob<?> second = mock(EngineJob.class); harness.job = second; harness.onlyRetrieveFromCache = false; harness.doLoad(); verify(first).start(anyDecodeJobOrNull()); verify(second).start(anyDecodeJobOrNull()); } @Test public void load_afterFinishedOnlyRetrieveFromCache_withPendingNormal_doesNotStartNewLoad() { EngineJob<?> firstNormal = harness.job; harness.doLoad(); harness.job = mock(EngineJob.class); harness.onlyRetrieveFromCache = true; harness.doLoad(); harness.callOnEngineJobComplete(); EngineJob<?> secondNormal = mock(EngineJob.class); harness.job = secondNormal; harness.onlyRetrieveFromCache = false; harness.doLoad(); verify(firstNormal).start(anyDecodeJobOrNull()); verify(secondNormal, never()).start(anyDecodeJobOrNull()); } @Test public void load_afterCancelledOnlyRetrieveFromCache_withPendingNormal_doesNotStartNewLoad() { EngineJob<?> firstNormal = harness.job; harness.doLoad(); harness.job = mock(EngineJob.class); harness.onlyRetrieveFromCache = true; harness.doLoad(); harness.getEngine().onEngineJobCancelled(harness.job, harness.cacheKey); EngineJob<?> secondNormal = mock(EngineJob.class); harness.job = secondNormal; harness.onlyRetrieveFromCache = false; harness.doLoad(); verify(firstNormal).start(anyDecodeJobOrNull()); verify(secondNormal, never()).start(anyDecodeJobOrNull()); } @Test public void load_withOnlyRetrieveFromCache_withOtherRetrieveFromCachePending_doesNotStartNew() { harness.onlyRetrieveFromCache = true; harness.doLoad(); EngineJob<?> second = mock(EngineJob.class); harness.job = second; harness.doLoad(); verify(second, never()).start(anyDecodeJobOrNull()); } @Test public void load_withOnlyRetrieveFromCache_afterPreviousFinishedOnlyFromCacheLoad_startsNew() { harness.onlyRetrieveFromCache = true; harness.doLoad(); harness.callOnEngineJobComplete(); EngineJob<?> second = mock(EngineJob.class); harness.job = second; harness.doLoad(); verify(second).start(anyDecodeJobOrNull()); } @Test public void load_withOnlyRetrieveFromCache_afterPreviousCancelledOnlyFromCacheLoad_startsNew() { harness.onlyRetrieveFromCache = true; harness.doLoad(); harness.getEngine().onEngineJobCancelled(harness.job, harness.cacheKey); EngineJob<?> second = mock(EngineJob.class); harness.job = second; harness.doLoad(); verify(second).start(anyDecodeJobOrNull()); } @Test public void onEngineJobComplete_withOldJobForKey_doesNotRemoveJob() { harness.doLoad(); harness .getEngine() .onEngineJobComplete(mock(EngineJob.class), harness.cacheKey, harness.resource); harness.job = mock(EngineJob.class); harness.doLoad(); verify(harness.job, never()).start(anyDecodeJobOrNull()); } @Test public void onEngineJobCancelled_withOldJobForKey_doesNotRemoveJob() { harness.doLoad(); harness.getEngine().onEngineJobCancelled(mock(EngineJob.class), harness.cacheKey); harness.job = mock(EngineJob.class); harness.doLoad(); verify(harness.job, never()).start(anyDecodeJobOrNull()); } @Test public void onEngineJobComplete_withOnlyRetrieveFromCacheAndOldJobForKey_doesNotRemoveJob() { harness.onlyRetrieveFromCache = true; harness.doLoad(); harness .getEngine() .onEngineJobComplete(mock(EngineJob.class), harness.cacheKey, harness.resource); harness.job = mock(EngineJob.class); harness.doLoad(); verify(harness.job, never()).start(anyDecodeJobOrNull()); } @Test public void onEngineJobCancelled_withOnlyRetrieveFromCacheAndOldJobForKey_doesNotRemoveJob() { harness.onlyRetrieveFromCache = true; harness.doLoad(); harness.getEngine().onEngineJobCancelled(mock(EngineJob.class), harness.cacheKey); harness.job = mock(EngineJob.class); harness.doLoad(); verify(harness.job, never()).start(anyDecodeJobOrNull()); } @SuppressWarnings({"unchecked", "rawtypes"}) private static DecodeJob anyDecodeJobOrNull() { return any(); } private static
EngineTest
java
apache__kafka
streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableTransformValuesTest.java
{ "start": 23562, "end": 23974 }
class ____ implements ValueTransformerWithKey<String, String, Integer> { private int counter; @Override public void init(final ProcessorContext context) {} @Override public Integer transform(final String readOnlyKey, final String value) { return ++counter; } @Override public void close() {} } private static
StatefulTransformer
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/querycache/StringCompositeKey.java
{ "start": 261, "end": 1253 }
class ____ implements Serializable { private static final long serialVersionUID = 1L; private String substation; private String deviceType; private String device; private String analog; // For some dialects, the sum of a primary key column lengths cannot // be larger than 255 (DB2). Restrict them to a sufficiently // small size. See HHH-8085. @Column( length = 50 ) public String getSubstation() { return substation; } public void setSubstation(String substation) { this.substation = substation; } @Column( length = 50 ) public String getDeviceType() { return deviceType; } public void setDeviceType(String deviceType) { this.deviceType = deviceType; } @Column( length = 50 ) public String getDevice() { return device; } public void setDevice(String device) { this.device = device; } @Column( length = 50 ) public String getAnalog() { return analog; } public void setAnalog(String analog) { this.analog = analog; } }
StringCompositeKey
java
apache__camel
core/camel-support/src/main/java/org/apache/camel/support/builder/TokenXMLExpressionIterator.java
{ "start": 2192, "end": 6769 }
class ____ extends ExpressionAdapter { private static final Pattern NAMESPACE_PATTERN = Pattern.compile("xmlns(:\\w+|)\\s*=\\s*('[^']+'|\"[^\"]+\")"); private static final String SCAN_TOKEN_NS_PREFIX_REGEX = "([^:<>]{1,15}?:|)"; private static final String SCAN_BLOCK_TOKEN_REGEX_TEMPLATE = "<{0}(\\s+[^>]*)?/>|<{0}(\\s+[^>]*)?>(?:(?!(</{0}\\s*>)).)*</{0}\\s*>"; private static final String SCAN_PARENT_TOKEN_REGEX_TEMPLATE = "<{0}(\\s+[^>]*\\s*)?>"; private static final String OPTION_WRAP_TOKEN = "<*>"; private static final String NAMESPACE_SEPERATOR = " "; protected final String tagToken; protected final String inheritNamespaceToken; protected final Expression source; public TokenXMLExpressionIterator(String tagToken, String inheritNamespaceToken) { this(null, tagToken, inheritNamespaceToken); } public TokenXMLExpressionIterator(Expression source, String tagToken, String inheritNamespaceToken) { StringHelper.notEmpty(tagToken, "tagToken"); this.tagToken = tagToken; // namespace token is optional this.inheritNamespaceToken = inheritNamespaceToken; this.source = source; } protected Iterator<?> createIterator(Exchange exchange, InputStream in, String charset) { String tag = tagToken; if (LanguageSupport.hasSimpleFunction(tag)) { tag = exchange.getContext().resolveLanguage("simple").createExpression(tag).evaluate(exchange, String.class); } String inherit = inheritNamespaceToken; if (LanguageSupport.hasSimpleFunction(inherit)) { inherit = exchange.getContext().resolveLanguage("simple").createExpression(inherit).evaluate(exchange, String.class); } // must be XML tokens if (!tag.startsWith("<")) { tag = "<" + tag; } if (!tag.endsWith(">")) { tag = tag + ">"; } if (inherit != null) { if (!inherit.startsWith("<")) { inherit = "<" + inherit; } if (!inherit.endsWith(">")) { inherit = inherit + ">"; } } // must be XML tokens if (!tag.startsWith("<") || !tag.endsWith(">")) { throw new IllegalArgumentException("XML Tag token must be a valid XML tag, was: " + tag); } if (inherit != null && (!inherit.startsWith("<") || !inherit.endsWith(">"))) { throw new IllegalArgumentException("Namespace token must be a valid XML token, was: " + inherit); } XMLTokenIterator iterator = new XMLTokenIterator(tag, inherit, in, charset); iterator.init(); return iterator; } @Override public boolean matches(Exchange exchange) { // as a predicate we must close the stream, as we do not return an iterator that can be used // afterwards to iterate the input stream Object value = doEvaluate(exchange, true); return ObjectHelper.evaluateValuePredicate(value); } @Override public Object evaluate(Exchange exchange) { // as we return an iterator to access the input stream, we should not close it return doEvaluate(exchange, false); } /** * Strategy to evaluate the exchange * * @param exchange the exchange * @param closeStream whether to close the stream before returning from this method. * @return the evaluated value */ protected Object doEvaluate(Exchange exchange, boolean closeStream) { InputStream in = null; try { if (source != null) { in = source.evaluate(exchange, InputStream.class); } else { in = exchange.getIn().getBody(InputStream.class); } if (in == null) { throw new InvalidPayloadException(exchange, InputStream.class); } // we may read from a file, and want to support custom charset defined on the exchange String charset = ExchangeHelper.getCharsetName(exchange); return createIterator(exchange, in, charset); } catch (InvalidPayloadException e) { exchange.setException(e); // must close input stream IOHelper.close(in); return null; } finally { if (closeStream) { IOHelper.close(in); } } } /** * Iterator to walk the input stream */ static
TokenXMLExpressionIterator
java
apache__dubbo
dubbo-remoting/dubbo-remoting-api/src/main/java/org/apache/dubbo/remoting/exchange/PortUnificationExchanger.java
{ "start": 1645, "end": 3793 }
class ____ { private static final ErrorTypeAwareLogger log = LoggerFactory.getErrorTypeAwareLogger(PortUnificationExchanger.class); private static final ConcurrentMap<String, RemotingServer> servers = new ConcurrentHashMap<>(); public static RemotingServer bind(URL url, ChannelHandler handler) { ConcurrentHashMapUtils.computeIfAbsent(servers, url.getAddress(), addr -> { final AbstractPortUnificationServer server; try { server = getTransporter(url).bind(url, handler); } catch (RemotingException e) { throw new RuntimeException(e); } // server.bind(); return server; }); servers.computeIfPresent(url.getAddress(), (addr, server) -> { ((AbstractPortUnificationServer) server).addSupportedProtocol(url, handler); return server; }); return servers.get(url.getAddress()); } public static AbstractConnectionClient connect(URL url, ChannelHandler handler) { final AbstractConnectionClient connectionClient; try { connectionClient = getTransporter(url).connect(url, handler); } catch (RemotingException e) { throw new RuntimeException(e); } return connectionClient; } public static void close() { final ArrayList<RemotingServer> toClose = new ArrayList<>(servers.values()); servers.clear(); for (RemotingServer server : toClose) { try { server.close(); } catch (Throwable throwable) { log.error(PROTOCOL_ERROR_CLOSE_SERVER, "", "", "Close all port unification server failed", throwable); } } } // for test public static ConcurrentMap<String, RemotingServer> getServers() { return servers; } public static PortUnificationTransporter getTransporter(URL url) { return url.getOrDefaultFrameworkModel() .getExtensionLoader(PortUnificationTransporter.class) .getAdaptiveExtension(); } }
PortUnificationExchanger
java
spring-projects__spring-framework
spring-test/src/main/java/org/springframework/test/web/servlet/MockMvc.java
{ "start": 2422, "end": 8770 }
class ____ { static final String MVC_RESULT_ATTRIBUTE = MockMvc.class.getName().concat(".MVC_RESULT_ATTRIBUTE"); private final TestDispatcherServlet servlet; private final Filter[] filters; private final ServletContext servletContext; private @Nullable RequestBuilder defaultRequestBuilder; private @Nullable Charset defaultResponseCharacterEncoding; private List<ResultMatcher> defaultResultMatchers = new ArrayList<>(); private List<ResultHandler> defaultResultHandlers = new ArrayList<>(); /** * Private constructor, not for direct instantiation. * @see org.springframework.test.web.servlet.setup.MockMvcBuilders */ MockMvc(TestDispatcherServlet servlet, Filter... filters) { Assert.notNull(servlet, "DispatcherServlet is required"); Assert.notNull(filters, "Filters cannot be null"); Assert.noNullElements(filters, "Filters cannot contain null values"); this.servlet = servlet; this.filters = filters; this.servletContext = servlet.getServletContext(); } /** * A default request builder merged into every performed request. * @see org.springframework.test.web.servlet.setup.DefaultMockMvcBuilder#defaultRequest(RequestBuilder) */ void setDefaultRequest(@Nullable RequestBuilder requestBuilder) { this.defaultRequestBuilder = requestBuilder; } /** * The default character encoding to be applied to every response. * @see org.springframework.test.web.servlet.setup.ConfigurableMockMvcBuilder#defaultResponseCharacterEncoding(Charset) */ void setDefaultResponseCharacterEncoding(@Nullable Charset defaultResponseCharacterEncoding) { this.defaultResponseCharacterEncoding = defaultResponseCharacterEncoding; } /** * Expectations to assert after every performed request. * @see org.springframework.test.web.servlet.setup.DefaultMockMvcBuilder#alwaysExpect(ResultMatcher) */ void setGlobalResultMatchers(List<ResultMatcher> resultMatchers) { Assert.notNull(resultMatchers, "ResultMatcher List is required"); this.defaultResultMatchers = resultMatchers; } /** * General actions to apply after every performed request. * @see org.springframework.test.web.servlet.setup.DefaultMockMvcBuilder#alwaysDo(ResultHandler) */ void setGlobalResultHandlers(List<ResultHandler> resultHandlers) { Assert.notNull(resultHandlers, "ResultHandler List is required"); this.defaultResultHandlers = resultHandlers; } /** * Return the underlying {@link DispatcherServlet} instance that this * {@code MockMvc} was initialized with. * <p>This is intended for use in custom request processing scenario where a * request handling component happens to delegate to the {@code DispatcherServlet} * at runtime and therefore needs to be injected with it. * <p>For most processing scenarios, simply use {@link MockMvc#perform}, * or if you need to configure the {@code DispatcherServlet}, provide a * {@link DispatcherServletCustomizer} to the {@code MockMvcBuilder}. * @since 5.1 */ public DispatcherServlet getDispatcherServlet() { return this.servlet; } /** * Perform a request and return a type that allows chaining further * actions, such as asserting expectations, on the result. * @param requestBuilder used to prepare the request to execute; * see static factory methods in * {@link org.springframework.test.web.servlet.request.MockMvcRequestBuilders} * @return an instance of {@link ResultActions} (never {@code null}) * @see org.springframework.test.web.servlet.request.MockMvcRequestBuilders * @see org.springframework.test.web.servlet.result.MockMvcResultMatchers */ public ResultActions perform(RequestBuilder requestBuilder) throws Exception { if (this.defaultRequestBuilder != null && requestBuilder instanceof Mergeable mergeable) { requestBuilder = (RequestBuilder) mergeable.merge(this.defaultRequestBuilder); } MockHttpServletRequest request = requestBuilder.buildRequest(this.servletContext); AsyncContext asyncContext = request.getAsyncContext(); MockHttpServletResponse mockResponse; HttpServletResponse servletResponse; if (asyncContext != null) { servletResponse = (HttpServletResponse) asyncContext.getResponse(); mockResponse = unwrapResponseIfNecessary(servletResponse); } else { mockResponse = new MockHttpServletResponse(); servletResponse = mockResponse; } if (this.defaultResponseCharacterEncoding != null) { mockResponse.setDefaultCharacterEncoding(this.defaultResponseCharacterEncoding.name()); } if (requestBuilder instanceof SmartRequestBuilder smartRequestBuilder) { request = smartRequestBuilder.postProcessRequest(request); } MvcResult mvcResult = new DefaultMvcResult(request, mockResponse); request.setAttribute(MVC_RESULT_ATTRIBUTE, mvcResult); RequestAttributes previousAttributes = RequestContextHolder.getRequestAttributes(); RequestContextHolder.setRequestAttributes(new ServletRequestAttributes(request, servletResponse)); MockFilterChain filterChain = new MockFilterChain(this.servlet, this.filters); filterChain.doFilter(request, servletResponse); if (DispatcherType.ASYNC.equals(request.getDispatcherType()) && asyncContext != null && !request.isAsyncStarted()) { asyncContext.complete(); } applyDefaultResultActions(mvcResult); RequestContextHolder.setRequestAttributes(previousAttributes); return new ResultActions() { @Override public ResultActions andExpect(ResultMatcher matcher) throws Exception { matcher.match(mvcResult); return this; } @Override public ResultActions andDo(ResultHandler handler) throws Exception { handler.handle(mvcResult); return this; } @Override public MvcResult andReturn() { return mvcResult; } }; } private MockHttpServletResponse unwrapResponseIfNecessary(ServletResponse servletResponse) { while (servletResponse instanceof HttpServletResponseWrapper wrapper) { servletResponse = wrapper.getResponse(); } Assert.isInstanceOf(MockHttpServletResponse.class, servletResponse); return (MockHttpServletResponse) servletResponse; } private void applyDefaultResultActions(MvcResult mvcResult) throws Exception { for (ResultHandler handler : this.defaultResultHandlers) { handler.handle(mvcResult); } for (ResultMatcher matcher : this.defaultResultMatchers) { matcher.match(mvcResult); } } }
MockMvc
java
quarkusio__quarkus
extensions/hibernate-search-standalone-elasticsearch/runtime/src/main/java/io/quarkus/hibernate/search/standalone/elasticsearch/runtime/management/HibernateSearchStandaloneManagementHandler.java
{ "start": 373, "end": 1851 }
class ____ implements Handler<RoutingContext> { @Override public void handle(RoutingContext routingContext) { ManagedContext requestContext = Arc.container().requestContext(); if (requestContext.isActive()) { doHandle(routingContext); } else { requestContext.activate(); try { doHandle(routingContext); } finally { requestContext.terminate(); } } } private void doHandle(RoutingContext ctx) { HttpServerRequest request = ctx.request(); if (!HttpMethod.POST.equals(request.method())) { errorResponse(ctx, 406, "Http method [" + request.method().name() + "] is not supported. Use [POST] instead."); return; } String contentType = request.getHeader(HttpHeaders.CONTENT_TYPE); if (contentType != null && !contentType.toLowerCase(Locale.ROOT).startsWith("application/json")) { errorResponse(ctx, 406, "Content type [" + contentType + " is not supported. Use [application/json] instead."); return; } new HibernateSearchStandaloneManagementPostRequestProcessor().process(ctx); } private void errorResponse(RoutingContext ctx, int code, String message) { ctx.response() .setStatusCode(code) .setStatusMessage(message) .end(); } }
HibernateSearchStandaloneManagementHandler
java
apache__camel
components/camel-netty/src/test/java/org/apache/camel/component/netty/NettyDisconnectTest.java
{ "start": 1066, "end": 1865 }
class ____ extends BaseNettyTest { @Test public void testCloseSessionWhenComplete() { Object out = template.requestBody("netty:tcp://localhost:{{port}}?sync=true&disconnect=true", "Claus"); assertEquals("Bye Claus", out); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { from("netty:tcp://localhost:{{port}}?sync=true&disconnect=true").process(new Processor() { public void process(Exchange exchange) { String body = exchange.getIn().getBody(String.class); exchange.getMessage().setBody("Bye " + body); } }); } }; } }
NettyDisconnectTest
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/CompatibleTest.java
{ "start": 187, "end": 552 }
class ____ extends TestCase { public void test_for_issue_3986() throws Exception { String sql = "select 1 from dual;"; List<SQLStatement> stmts = SQLUtils.parseStatements(sql, "mysql"); assertEquals(1, stmts.size()); assertEquals("select 1\n" + "from dual;", stmts.get(0).toLowerCaseString()); } }
CompatibleTest
java
spring-projects__spring-framework
spring-test/src/main/java/org/springframework/test/context/TestExecutionListener.java
{ "start": 8720, "end": 8860 }
class ____ the class * for the {@linkplain TestContext#getTestInstance() test instance}. Thus, if * you need consistent access to the
within
java
micronaut-projects__micronaut-core
core-processor/src/main/java/io/micronaut/inject/visitor/VisitorContext.java
{ "start": 11755, "end": 12086 }
enum ____ { JAVA("Java"), GROOVY("Groovy"), KOTLIN("Kotlin"); private final String displayName; Language(String displayName) { this.displayName = displayName; } @Override public String toString() { return displayName; } } }
Language
java
quarkusio__quarkus
extensions/panache/hibernate-orm-rest-data-panache/deployment/src/test/java/io/quarkus/hibernate/orm/rest/data/panache/deployment/security/RolesAllowedPanacheResourceTest.java
{ "start": 738, "end": 4867 }
interface ____ extends PanacheEntityResource<Piece, Long> { boolean delete(Long id); } @Test void testClassLevelSecurity() { // == Method generated for PanacheEntityResource // list method is protected so we should get an HTTP 401 if user is not authenticated given().accept("application/json") .when() .get("/pieces") .then() .statusCode(401); // list method is protected so we should get an HTTP 403 if user doesn't have role 'user' given().accept("application/json") .when() .auth().preemptive().basic("bar", "bar") .get("/pieces") .then() .statusCode(403); // list method is protected so we should get an HTTP 403 if user doesn't have role 'user' given().accept("application/json") .when() .auth().preemptive().basic("foo", "foo") .get("/pieces") .then() .statusCode(200) .body("$", hasSize(2)); // == Explicitly declared resource method // delete method is protected so we should get an HTTP 401 when no user is specified given().accept("application/json") .when() .delete("/pieces/1") .then() .statusCode(401); // delete method is protected so we should get an HTTP 401 when a wrong username and password is specified given().auth().preemptive() .basic("foo", "foo2") .accept("application/json") .when() .delete("/pieces/1") .then() .statusCode(401); // delete method is protected so we should get an HTTP 403 when the 'user' role is missing given().auth().preemptive() .basic("bar", "bar") .accept("application/json") .when() .delete("/pieces/1") .then() .statusCode(403); // delete method is protected so we should get an HTTP 204 when the proper username and password are specified given().auth().preemptive() .basic("foo", "foo") .accept("application/json") .when() .delete("/pieces/1") .then() .statusCode(204); } @Test void testMethodLevelSecurity() { // list method is not protected so we should get an HTTP 200 even if no user is specified given().accept("application/json") .when() .get("/items") .then() .statusCode(200) .body("$", hasSize(2)); // delete method is protected so we should get an HTTP 401 when no user is specified given().accept("application/json") .when() .delete("/items/1") .then() .statusCode(401); // delete method is protected so we should get an HTTP 401 when a wrong username and password is specified given().auth().preemptive() .basic("foo", "foo2") .accept("application/json") .when() .delete("/items/1") .then() .statusCode(401); // delete method is protected so we should get an HTTP 403 when the 'user' role is missing given().auth().preemptive() .basic("bar", "bar") .accept("application/json") .when() .delete("/items/1") .then() .statusCode(403); // delete method is protected so we should get an HTTP 204 when the proper username and password are specified given().auth().preemptive() .basic("foo", "foo") .accept("application/json") .when() .delete("/items/1") .then() .statusCode(204); } }
PiecesResource
java
google__dagger
dagger-compiler/main/java/dagger/internal/codegen/binding/OptionalBindingDeclaration.java
{ "start": 1575, "end": 2114 }
class ____ { private final KeyFactory keyFactory; @Inject Factory(KeyFactory keyFactory) { this.keyFactory = keyFactory; } OptionalBindingDeclaration forMethod(XMethodElement method, XTypeElement contributingModule) { checkArgument(method.hasAnnotation(XTypeNames.BINDS_OPTIONAL_OF)); return new AutoValue_OptionalBindingDeclaration( Optional.of(method), Optional.of(contributingModule), keyFactory.forBindsOptionalOfMethod(method, contributingModule)); } } }
Factory
java
reactor__reactor-core
reactor-core/src/test/java/reactor/core/publisher/FluxDematerializeTest.java
{ "start": 1132, "end": 9320 }
class ____ extends FluxOperatorTest<Signal<String>, String> { @Override @SuppressWarnings("unchecked") protected List<Scenario<Signal<String>, String>> scenarios_operatorSuccess() { return Arrays.asList( scenario(Flux::<String>dematerialize) .producer(1, i -> Signal.complete()) .receiverEmpty(), scenario(Flux::<String>dematerialize) .producer(1, i -> Signal.subscribe(Operators.emptySubscription())) .receiverEmpty() ); } @Override protected List<Scenario<Signal<String>, String>> scenarios_operatorError() { return Arrays.asList( scenario(Flux::<String>dematerialize) .producer(1, i -> Signal.error(exception()))); } @Override protected List<Scenario<Signal<String>, String>> scenarios_errorFromUpstreamFailure() { return Arrays.asList( scenario(Flux::<String>dematerialize) ); } @Override protected Scenario<Signal<String>, String> defaultScenarioOptions(Scenario<Signal<String>, String> defaultOptions) { return defaultOptions.producer(3, i -> i == 0 ? Signal.next("test") : Signal.next("test"+i) ).droppedItem(Signal.next("dropped")); } Signal<Integer> error = Signal.error(new RuntimeException("Forced failure")); @Test public void singleCompletion() { AssertSubscriber<Integer> ts = AssertSubscriber.create(); Flux<Integer> dematerialize = Flux.just(Signal.<Integer>complete()).dematerialize(); dematerialize.subscribe(ts); ts.assertNoValues() .assertNoError() .assertComplete(); } @Test public void singleError() { AssertSubscriber<Integer> ts = AssertSubscriber.create(); Flux<Integer> dematerialize = Flux.just(error) .dematerialize(); dematerialize.subscribe(ts); ts.assertNoValues() .assertError(RuntimeException.class) .assertNotComplete(); } @Test public void immediateCompletionNeedsRequestOne() { AssertSubscriber<Integer> ts = AssertSubscriber.create(0); Flux<Integer> dematerialize = Flux.just(Signal.<Integer>complete()).dematerialize(); dematerialize.subscribe(ts); ts.assertNoValues() .assertNoError() .assertNotComplete(); ts.request(1); ts.assertComplete(); } @Test public void immediateErrorNeedsRequestOne() { AssertSubscriber<Integer> ts = AssertSubscriber.create(0); Flux<Integer> dematerialize = Flux.just(error).dematerialize(); dematerialize.subscribe(ts); ts.assertNoValues() .assertNotComplete() .assertNoError(); ts.request(1); ts.assertError(RuntimeException.class); } @Test public void doesntCompleteWithoutRequest() { AssertSubscriber<Integer> ts = AssertSubscriber.create(0); Flux<Integer> dematerialize = Flux.just(Signal.next(1), Signal.<Integer>complete()).dematerialize(); dematerialize.subscribe(ts); ts.assertNoValues() .assertNoError() .assertNotComplete(); ts.request(1); ts.assertValues(1) .assertNoError() .assertNotComplete(); ts.request(1); ts.assertComplete(); } @Test public void doesntErrorWithoutRequest() { AssertSubscriber<Integer> ts = AssertSubscriber.create(0); Flux<Integer> dematerialize = Flux.just(Signal.next(1), error).dematerialize(); dematerialize.subscribe(ts); ts.assertNoValues() .assertNoError() .assertNotComplete(); ts.request(1); ts.assertValues(1) .assertNoError() .assertNotComplete(); ts.request(1); ts.assertError(RuntimeException.class); } @Test public void twoSignalsAndComplete() { Flux<Integer> dematerialize = Flux.just(Signal.next(1), Signal.next(2), Signal.<Integer>complete()) .dematerialize(); StepVerifier.create(dematerialize, 0) .expectSubscription() .expectNoEvent(Duration.ofMillis(50)) .thenRequest(1) .expectNext(1) .expectNoEvent(Duration.ofMillis(50)) .thenRequest(1) .expectNext(2) .expectNoEvent(Duration.ofMillis(50)) .thenRequest(1) .verifyComplete(); } @Test public void twoSignalsAndError() { Flux<Integer> dematerialize = Flux.just(Signal.next(1), Signal.next(2), error) .dematerialize(); StepVerifier.create(dematerialize, 0) .expectSubscription() .expectNoEvent(Duration.ofMillis(50)) .thenRequest(1) .expectNext(1) .expectNoEvent(Duration.ofMillis(50)) .thenRequest(1) .expectNext(2) .expectNoEvent(Duration.ofMillis(50)) .thenRequest(1) .verifyError(error.getThrowable().getClass()); } @Test public void neverEndingSignalSourceWithCompleteSignal() { AssertSubscriber<Integer> ts = AssertSubscriber.create(); Flux<Integer> dematerialize = Flux.just(Signal.next(1), Signal.next(2), Signal.next(3), Signal.<Integer>complete()) .concatWith(Flux.never()) .dematerialize(); dematerialize.subscribe(ts); ts.assertValues(1, 2, 3) .assertNoError() .assertComplete(); } @Test public void dematerializeUnbounded() { StepVerifier.create(Flux.just(Signal.next("Three"), Signal.next("Two"), Signal.next("One"), Signal.complete()) .dematerialize()) .expectNext("Three") .expectNext("Two") .expectNext("One") .verifyComplete(); } @Test public void materializeDematerializeUnbounded() { StepVerifier.create(Flux.just(1, 2, 3).materialize().dematerialize()) .expectNext(1, 2, 3) .verifyComplete(); } @Test public void materializeDematerializeRequestOneByOne() { StepVerifier.create(Flux.just(1, 2, 3).materialize().dematerialize(), 0) .thenRequest(1) .expectNext(1) .thenRequest(1) .expectNext(2) .thenRequest(1) .expectNext(3) .expectNoEvent(Duration.ofMillis(50)) .thenRequest(1) .verifyComplete(); } @Test public void emissionTimingsAreGrouped() { StepVerifier.withVirtualTime(() -> Flux.interval(Duration.ofSeconds(1)) .map(i -> "tick" + i) .take(5, false) .timestamp() .materialize() .<Tuple2<Long, String>>dematerialize() .timestamp() ) .thenAwait(Duration.ofSeconds(5)) .thenConsumeWhile(tupleDematerialize -> { long dematerializeTimestamp = tupleDematerialize.getT1(); long originalTimestamp = tupleDematerialize.getT2().getT1(); return dematerializeTimestamp == originalTimestamp; }) .verifyComplete(); } @Test public void scanOperator(){ Flux<Signal<Integer>> parent = Flux.just(Signal.next(1)); FluxDematerialize<Integer> test = new FluxDematerialize<>(parent); assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent); assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC); } @Test public void scanSubscriber() { CoreSubscriber<String> actual = new LambdaSubscriber<>(null, e -> {}, null, sub -> sub.request(100)); FluxDematerialize.DematerializeSubscriber<String> test = new FluxDematerialize.DematerializeSubscriber<>(actual, false); Subscription parent = Operators.emptySubscription(); test.onSubscribe(parent); assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent); assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual); assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC); assertThat(test.scan(Scannable.Attr.ERROR)).isNull(); assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse(); test.onError(new IllegalStateException("boom")); assertThat(test.scan(Scannable.Attr.ERROR)).as("error is not retained").isNull(); assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue(); assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse(); test.cancel(); assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue(); } }
FluxDematerializeTest
java
spring-projects__spring-framework
spring-core/src/test/java/org/springframework/util/concurrent/FutureUtilsTests.java
{ "start": 1125, "end": 3543 }
class ____ { @Test void callAsyncNormal() throws ExecutionException, InterruptedException { String foo = "Foo"; CompletableFuture<String> future = FutureUtils.callAsync(() -> foo); assertThat(future.get()).isEqualTo(foo); assertThat(future.isCancelled()).isFalse(); assertThat(future.isDone()).isTrue(); CountDownLatch latch = new CountDownLatch(1); future.whenComplete((s, throwable) -> { assertThat(s).isEqualTo(foo); assertThat(throwable).isNull(); latch.countDown(); }); latch.await(); } @Test void callAsyncException() throws InterruptedException { RuntimeException ex = new RuntimeException("Foo"); CompletableFuture<String> future = FutureUtils.callAsync(() -> { throw ex; }); assertThatExceptionOfType(ExecutionException.class) .isThrownBy(future::get) .withCause(ex); assertThat(future.isCancelled()).isFalse(); assertThat(future.isDone()).isTrue(); CountDownLatch latch = new CountDownLatch(1); future.whenComplete((s, throwable) -> { assertThat(s).isNull(); assertThat(throwable).isInstanceOf(CompletionException.class) .hasCause(ex); latch.countDown(); }); latch.await(); } @Test void callAsyncNormalExecutor() throws ExecutionException, InterruptedException { String foo = "Foo"; CompletableFuture<String> future = FutureUtils.callAsync(() -> foo, new SimpleAsyncTaskExecutor()); assertThat(future.get()).isEqualTo(foo); assertThat(future.isCancelled()).isFalse(); assertThat(future.isDone()).isTrue(); CountDownLatch latch = new CountDownLatch(1); future.whenComplete((s, throwable) -> { assertThat(s).isEqualTo(foo); assertThat(throwable).isNull(); latch.countDown(); }); latch.await(); } @Test void callAsyncExceptionExecutor() throws InterruptedException { RuntimeException ex = new RuntimeException("Foo"); CompletableFuture<String> future = FutureUtils.callAsync(() -> { throw ex; }, new SimpleAsyncTaskExecutor()); assertThatExceptionOfType(ExecutionException.class) .isThrownBy(future::get) .withCause(ex); assertThat(future.isCancelled()).isFalse(); assertThat(future.isDone()).isTrue(); CountDownLatch latch = new CountDownLatch(1); future.whenComplete((s, throwable) -> { assertThat(s).isNull(); assertThat(throwable).isInstanceOf(CompletionException.class) .hasCause(ex); latch.countDown(); }); latch.await(); } }
FutureUtilsTests
java
eclipse-vertx__vert.x
vertx-core/src/main/java/io/vertx/core/streams/StreamBase.java
{ "start": 743, "end": 994 }
interface ____ { /** * Set an exception handler. * * @param handler the handler * @return a reference to this, so the API can be used fluently */ @Fluent StreamBase exceptionHandler(@Nullable Handler<Throwable> handler); }
StreamBase
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/operators/testutils/MockInputSplitProvider.java
{ "start": 1318, "end": 2980 }
class ____ implements InputSplitProvider { /** The input splits to be served during the test. */ private volatile InputSplit[] inputSplits; /** Index pointing to the next input split to be served. */ private int nextSplit = 0; /** * Generates a set of input splits from an input path * * @param path the path of the local file to generate the input splits from * @param noSplits the number of input splits to be generated from the given input file */ public void addInputSplits(final String path, final int noSplits) { final InputSplit[] tmp = new InputSplit[noSplits]; final String[] hosts = {"localhost"}; final String localPath; try { localPath = new URI(path).getPath(); } catch (URISyntaxException e) { throw new IllegalArgumentException("Path URI can not be transformed to local path."); } final File inFile = new File(localPath); final long splitLength = inFile.length() / noSplits; long pos = 0; for (int i = 0; i < noSplits - 1; i++) { tmp[i] = new FileInputSplit(i, new Path(path), pos, splitLength, hosts); pos += splitLength; } tmp[noSplits - 1] = new FileInputSplit(noSplits - 1, new Path(path), pos, inFile.length() - pos, hosts); this.inputSplits = tmp; } @Override public InputSplit getNextInputSplit(ClassLoader userCodeClassLoader) { if (this.nextSplit < this.inputSplits.length) { return this.inputSplits[this.nextSplit++]; } return null; } }
MockInputSplitProvider
java
hibernate__hibernate-orm
tooling/metamodel-generator/src/quarkusOrmPanache/java/org/hibernate/processor/test/ormPanache/PanacheBookRepository.java
{ "start": 407, "end": 628 }
class ____ implements PanacheRepository<PanacheBook> { @Find public native List<PanacheBook> findBook(String isbn); @HQL("WHERE isbn = :isbn") public native List<PanacheBook> hqlBook(String isbn); }
PanacheBookRepository
java
apache__hadoop
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java
{ "start": 13036, "end": 14639 }
enum ____ { TRUE, FALSE } @Test public void testEnumPrecedence() throws IllegalAccessException, IOException, InvalidConfigurationValueException { final String accountName = "account"; final String globalKey = "fs.azure.enum"; final String accountKey = globalKey + "." + accountName; final Configuration conf = new Configuration(); final AbfsConfiguration abfsConf = new AbfsConfiguration(conf, accountName); conf.setEnum(globalKey, GetEnumType.FALSE); assertEquals(abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.FALSE, "Default value returned even though account-agnostic config was set"); conf.unset(globalKey); assertEquals(abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.TRUE, "Default value not returned even though config was unset"); conf.setEnum(accountKey, GetEnumType.FALSE); assertEquals(abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.FALSE, "Default value returned even though account-specific config was set"); conf.unset(accountKey); assertEquals(abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.TRUE, "Default value not returned even though config was unset"); conf.setEnum(accountKey, GetEnumType.TRUE); conf.setEnum(globalKey, GetEnumType.FALSE); assertEquals(abfsConf.getEnum(globalKey, GetEnumType.FALSE), GetEnumType.TRUE, "Account-agnostic or default value returned even though account-specific config was set"); } /** * Dummy type used for testing handling of classes in configuration. */
GetEnumType
java
quarkusio__quarkus
extensions/resteasy-reactive/rest/runtime/src/main/java/io/quarkus/resteasy/reactive/server/runtime/QuarkusContextProducers.java
{ "start": 738, "end": 1289 }
class ____ { @RequestScoped @Produces HttpServerResponse httpServerResponse() { return CurrentRequestManager.get().serverRequest().unwrap(HttpServerResponse.class); } @ApplicationScoped @Produces Providers providers() { return new ProvidersImpl(ResteasyReactiveRecorder.getCurrentDeployment()); } @RequestScoped @Produces CloserImpl closer() { return new CloserImpl(); } void closeCloser(@Disposes CloserImpl closer) { closer.close(); } }
QuarkusContextProducers
java
google__error-prone
check_api/src/main/java/com/google/errorprone/VisitorState.java
{ "start": 9950, "end": 12165 }
interface ____ require updating the // existing implementations, though. Wait for default methods? SeverityLevel override = sharedState.severityMap.get(description.checkName); if (override != null) { description = description.applySeverityOverride(override); } sharedState.statisticsCollector.incrementCounter(statsKey(description.checkName + "-findings")); // TODO(glorioso): I believe it is correct to still emit regular findings since the // Scanner configured the visitor state to explicitly scan suppressed nodes, but perhaps // we can add a 'suppressed' field to Description to allow the description listener to bucket // them out. sharedState.descriptionListener.onDescribed(description); } private String statsKey(String key) { return suppressedState == SuppressedState.SUPPRESSED ? key + "-suppressed" : key; } /** * Increment the counter for a combination of {@code bugChecker}'s canonical name and {@code key} * by 1. * * <p>e.g.: a key of {@code foo} becomes {@code FooChecker-foo}. */ public void incrementCounter(BugChecker bugChecker, String key) { incrementCounter(bugChecker, key, 1); } /** * Increment the counter for a combination of {@code bugChecker}'s canonical name and {@code key} * by {@code count}. * * <p>e.g.: a key of {@code foo} becomes {@code FooChecker-foo}. */ public void incrementCounter(BugChecker bugChecker, String key, int count) { sharedState.statisticsCollector.incrementCounter( statsKey(bugChecker.canonicalName() + "-" + key), count); } /** * Returns a copy of all of the counters previously added to this VisitorState with {@link * #incrementCounter}. */ public ImmutableMultiset<String> counters() { return sharedState.statisticsCollector.counters(); } public Name getName(String nameStr) { return getNames().fromString(nameStr); } /** * Given the binary name of a class, returns the {@link Type}. * * <p>Prefer not to use this method for constant strings, or strings otherwise known at compile * time. Instead, save the result of {@link * com.google.errorprone.suppliers.Suppliers#typeFromString} as a
would
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/ChannelStatePersister.java
{ "start": 1909, "end": 2093 }
class ____ { private static final Logger LOG = LoggerFactory.getLogger(ChannelStatePersister.class); private final InputChannelInfo channelInfo; private
ChannelStatePersister
java
spring-projects__spring-boot
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/SignalUtils.java
{ "start": 807, "end": 1176 }
class ____ { private static final Signal SIG_INT = new Signal("INT"); private SignalUtils() { } /** * Handle {@literal INT} signals by calling the specified {@link Runnable}. * @param runnable the runnable to call on SIGINT. */ public static void attachSignalHandler(Runnable runnable) { Signal.handle(SIG_INT, (signal) -> runnable.run()); } }
SignalUtils
java
apache__rocketmq
broker/src/main/java/org/apache/rocketmq/broker/processor/PopBufferMergeService.java
{ "start": 37555, "end": 41344 }
class ____ { private final int reviveQueueId; // -1: not stored, >=0: stored, Long.MAX: storing. private volatile long reviveQueueOffset; private final PopCheckPoint ck; // bit for concurrent private final AtomicInteger bits; // bit for stored buffer ak private final AtomicInteger toStoreBits; private final long nextBeginOffset; private final String lockKey; private final String mergeKey; private final boolean justOffset; private volatile boolean ckStored = false; public PopCheckPointWrapper(int reviveQueueId, long reviveQueueOffset, PopCheckPoint point, long nextBeginOffset) { this.reviveQueueId = reviveQueueId; this.reviveQueueOffset = reviveQueueOffset; this.ck = point; this.bits = new AtomicInteger(0); this.toStoreBits = new AtomicInteger(0); this.nextBeginOffset = nextBeginOffset; this.lockKey = ck.getTopic() + PopAckConstants.SPLIT + ck.getCId() + PopAckConstants.SPLIT + ck.getQueueId(); this.mergeKey = point.getTopic() + point.getCId() + point.getQueueId() + point.getStartOffset() + point.getPopTime() + point.getBrokerName(); this.justOffset = false; } public PopCheckPointWrapper(int reviveQueueId, long reviveQueueOffset, PopCheckPoint point, long nextBeginOffset, boolean justOffset) { this.reviveQueueId = reviveQueueId; this.reviveQueueOffset = reviveQueueOffset; this.ck = point; this.bits = new AtomicInteger(0); this.toStoreBits = new AtomicInteger(0); this.nextBeginOffset = nextBeginOffset; this.lockKey = ck.getTopic() + PopAckConstants.SPLIT + ck.getCId() + PopAckConstants.SPLIT + ck.getQueueId(); this.mergeKey = point.getTopic() + point.getCId() + point.getQueueId() + point.getStartOffset() + point.getPopTime() + point.getBrokerName(); this.justOffset = justOffset; } public int getReviveQueueId() { return reviveQueueId; } public long getReviveQueueOffset() { return reviveQueueOffset; } public boolean isCkStored() { return ckStored; } public void setReviveQueueOffset(long reviveQueueOffset) { this.reviveQueueOffset = reviveQueueOffset; } public PopCheckPoint getCk() { return ck; } public AtomicInteger getBits() { return bits; } public AtomicInteger getToStoreBits() { return toStoreBits; } public long getNextBeginOffset() { return nextBeginOffset; } public String getLockKey() { return lockKey; } public String getMergeKey() { return mergeKey; } public boolean isJustOffset() { return justOffset; } public void setCkStored(boolean ckStored) { this.ckStored = ckStored; } @Override public String toString() { final StringBuilder sb = new StringBuilder("CkWrap{"); sb.append("rq=").append(reviveQueueId); sb.append(", rqo=").append(reviveQueueOffset); sb.append(", ck=").append(ck); sb.append(", bits=").append(bits); sb.append(", sBits=").append(toStoreBits); sb.append(", nbo=").append(nextBeginOffset); sb.append(", cks=").append(ckStored); sb.append(", jo=").append(justOffset); sb.append('}'); return sb.toString(); } } }
PopCheckPointWrapper
java
apache__camel
components/camel-aws/camel-aws2-ddb/src/main/java/org/apache/camel/component/aws2/ddbstream/BigIntComparisons.java
{ "start": 1071, "end": 1623 }
enum ____ implements BigIntComparisons { LT() { @Override public boolean matches(BigInteger first, BigInteger second) { return first.compareTo(second) < 0; } }, LTEQ() { @Override public boolean matches(BigInteger first, BigInteger second) { return first.compareTo(second) <= 0; } } // TODO Add EQ/GTEQ/GT as needed, but note that GTEQ == !LT and GT == // !LTEQ and EQ == (!LT && !GT) } }
Conditions
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/snapshots/PausedSnapshotException.java
{ "start": 520, "end": 649 }
class ____ extends RuntimeException { public PausedSnapshotException() { super("paused"); } }
PausedSnapshotException
java
apache__hadoop
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
{ "start": 1873, "end": 4689 }
class ____ extends FileOutputFormat<WritableComparable<?>, Writable> { public RecordWriter<WritableComparable<?>, Writable> getRecordWriter( TaskAttemptContext context) throws IOException { Configuration conf = context.getConfiguration(); CompressionCodec codec = null; CompressionType compressionType = CompressionType.NONE; if (getCompressOutput(context)) { // find the kind of compression to do compressionType = SequenceFileOutputFormat.getOutputCompressionType(context); // find the right codec Class<?> codecClass = getOutputCompressorClass(context, DefaultCodec.class); codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf); } Path file = getDefaultWorkFile(context, ""); FileSystem fs = file.getFileSystem(conf); // ignore the progress parameter, since MapFile is local final MapFile.Writer out = new MapFile.Writer(conf, fs, file.toString(), context.getOutputKeyClass().asSubclass(WritableComparable.class), context.getOutputValueClass().asSubclass(Writable.class), compressionType, codec, context); return new RecordWriter<WritableComparable<?>, Writable>() { public void write(WritableComparable<?> key, Writable value) throws IOException { out.append(key, value); } public void close(TaskAttemptContext context) throws IOException { out.close(); } }; } /** Open the output generated by this format. */ public static MapFile.Reader[] getReaders(Path dir, Configuration conf) throws IOException { FileSystem fs = dir.getFileSystem(conf); PathFilter filter = new PathFilter() { @Override public boolean accept(Path path) { String name = path.getName(); if (name.startsWith("_") || name.startsWith(".")) return false; return true; } }; Path[] names = FileUtil.stat2Paths(fs.listStatus(dir, filter)); // sort names, so that hash partitioning works Arrays.sort(names); MapFile.Reader[] parts = new MapFile.Reader[names.length]; for (int i = 0; i < names.length; i++) { parts[i] = new MapFile.Reader(fs, names[i].toString(), conf); } return parts; } /** Get an entry from output generated by this class. */ public static <K extends WritableComparable<?>, V extends Writable> Writable getEntry(MapFile.Reader[] readers, Partitioner<K, V> partitioner, K key, V value) throws IOException { int readerLength = readers.length; int part; if (readerLength <= 1) { part = 0; } else { part = partitioner.getPartition(key, value, readers.length); } return readers[part].get(key, value); } }
MapFileOutputFormat
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/TahuEdgeEndpointBuilderFactory.java
{ "start": 1461, "end": 1603 }
interface ____ { /** * Builder for endpoint for the Tahu Edge Node / Device component. */ public
TahuEdgeEndpointBuilderFactory
java
spring-projects__spring-framework
spring-jdbc/src/main/java/org/springframework/jdbc/support/CustomSQLExceptionTranslatorRegistrar.java
{ "start": 926, "end": 1945 }
class ____ implements InitializingBean { /** * Map registry to hold custom translators specific databases. * Key is the database product name as defined in the * {@link org.springframework.jdbc.support.SQLErrorCodesFactory}. */ private final Map<String, SQLExceptionTranslator> translators = new HashMap<>(); /** * Setter for a Map of {@link SQLExceptionTranslator} references where the key must * be the database name as defined in the {@code sql-error-codes.xml} file. * <p>Note that any existing translators will remain unless there is a match in the * database name, at which point the new translator will replace the existing one. */ public void setTranslators(Map<String, SQLExceptionTranslator> translators) { this.translators.putAll(translators); } @Override public void afterPropertiesSet() { this.translators.forEach((dbName, translator) -> CustomSQLExceptionTranslatorRegistry.getInstance().registerTranslator(dbName, translator)); } }
CustomSQLExceptionTranslatorRegistrar
java
hibernate__hibernate-orm
tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/embeddedid/withoutinheritance/PersonId.java
{ "start": 259, "end": 527 }
class ____ { private String name; private String snn; public String getName() { return name; } public void setName(String name) { this.name = name; } public String getSnn() { return snn; } public void setSnn(String snn) { this.snn = snn; } }
PersonId
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/io/StreamMultipleInputProcessor.java
{ "start": 1385, "end": 6359 }
class ____ implements StreamInputProcessor { private final MultipleInputSelectionHandler inputSelectionHandler; private final StreamOneInputProcessor<?>[] inputProcessors; private final MultipleFuturesAvailabilityHelper availabilityHelper; /** Always try to read from the first input. */ private int lastReadInputIndex = 1; private boolean isPrepared; public StreamMultipleInputProcessor( MultipleInputSelectionHandler inputSelectionHandler, StreamOneInputProcessor<?>[] inputProcessors) { this.inputSelectionHandler = inputSelectionHandler; this.inputProcessors = inputProcessors; this.availabilityHelper = new MultipleFuturesAvailabilityHelper(inputProcessors.length); } @Override public CompletableFuture<?> getAvailableFuture() { if (inputSelectionHandler.isAnyInputAvailable() || inputSelectionHandler.areAllInputsFinished()) { return AVAILABLE; } availabilityHelper.resetToUnAvailable(); for (int i = 0; i < inputProcessors.length; i++) { if (!inputSelectionHandler.isInputFinished(i) && inputSelectionHandler.isInputSelected(i)) { availabilityHelper.anyOf(i, inputProcessors[i].getAvailableFuture()); } } return availabilityHelper.getAvailableFuture(); } @Override public DataInputStatus processInput() throws Exception { int readingInputIndex; if (isPrepared) { readingInputIndex = selectNextReadingInputIndex(); } else { // the preparations here are not placed in the constructor because all work in it // must be executed after all operators are opened. readingInputIndex = selectFirstReadingInputIndex(); } if (readingInputIndex == InputSelection.NONE_AVAILABLE) { return DataInputStatus.NOTHING_AVAILABLE; } lastReadInputIndex = readingInputIndex; DataInputStatus inputStatus = inputProcessors[readingInputIndex].processInput(); return inputSelectionHandler.updateStatusAndSelection(inputStatus, readingInputIndex); } private int selectFirstReadingInputIndex() { // Note: the first call to nextSelection () on the operator must be made after this operator // is opened to ensure that any changes about the input selection in its open() // method take effect. inputSelectionHandler.nextSelection(); isPrepared = true; return selectNextReadingInputIndex(); } @Override public void close() throws IOException { IOException ex = null; for (StreamOneInputProcessor<?> input : inputProcessors) { try { input.close(); } catch (IOException e) { ex = ExceptionUtils.firstOrSuppressed(e, ex); } } if (ex != null) { throw ex; } } @Override public CompletableFuture<Void> prepareSnapshot( ChannelStateWriter channelStateWriter, long checkpointId) throws CheckpointException { CompletableFuture<?>[] inputFutures = new CompletableFuture[inputProcessors.length]; for (int index = 0; index < inputFutures.length; index++) { inputFutures[index] = inputProcessors[index].prepareSnapshot(channelStateWriter, checkpointId); } return CompletableFuture.allOf(inputFutures); } private int selectNextReadingInputIndex() { if (!inputSelectionHandler.isAnyInputAvailable()) { fullCheckAndSetAvailable(); } int readingInputIndex = inputSelectionHandler.selectNextInputIndex(lastReadInputIndex); if (readingInputIndex == InputSelection.NONE_AVAILABLE) { return InputSelection.NONE_AVAILABLE; } // to avoid starvation, if the input selection is ALL and availableInputsMask is not ALL, // always try to check and set the availability of another input if (inputSelectionHandler.shouldSetAvailableForAnotherInput()) { fullCheckAndSetAvailable(); } return readingInputIndex; } private void fullCheckAndSetAvailable() { for (int i = 0; i < inputProcessors.length; i++) { StreamOneInputProcessor<?> inputProcessor = inputProcessors[i]; // TODO: isAvailable() can be a costly operation (checking volatile). If one of // the input is constantly available and another is not, we will be checking this // volatile // once per every record. This might be optimized to only check once per processed // NetworkBuffer if (inputProcessor.isApproximatelyAvailable() || inputProcessor.isAvailable()) { inputSelectionHandler.setAvailableInput(i); } } } }
StreamMultipleInputProcessor
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
{ "start": 4672, "end": 6310 }
class ____ extends SimpleTcpClient { public WriteClient(String host, int port, XDR request, Boolean oneShot) { super(host, port, request, oneShot); } @Override protected ChannelInitializer<SocketChannel> setChannelHandler() { return new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); p.addLast( RpcUtil.constructRpcFrameDecoder(), new WriteHandler(request) ); } }; } } public static void main(String[] args) throws InterruptedException { Arrays.fill(data1, (byte) 7); Arrays.fill(data2, (byte) 8); Arrays.fill(data3, (byte) 9); // NFS3 Create request NfsConfiguration conf = new NfsConfiguration(); WriteClient client = new WriteClient("localhost", conf.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), create(), false); client.run(); while (handle == null) { Thread.sleep(1000); System.out.println("handle is still null..."); } LOG.info("Send write1 request"); XDR writeReq; writeReq = write(handle, 0x8000005c, 2000, 1000, data3); Nfs3Utils.writeChannel(channel, writeReq, 1); writeReq = write(handle, 0x8000005d, 1000, 1000, data2); Nfs3Utils.writeChannel(channel, writeReq, 2); writeReq = write(handle, 0x8000005e, 0, 1000, data1); Nfs3Utils.writeChannel(channel, writeReq, 3); // TODO: convert to Junit test, and validate result automatically } }
WriteClient
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/version/ZonedDateTimeVersionTest.java
{ "start": 1372, "end": 1761 }
class ____ { private Integer id; private ZonedDateTime ts; public TheEntity() { } public TheEntity(Integer id) { this.id = id; } @Id public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } @Version public ZonedDateTime getTs() { return ts; } public void setTs(ZonedDateTime ts) { this.ts = ts; } } }
TheEntity
java
apache__kafka
connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java
{ "start": 1290, "end": 3167 }
class ____ { private final HoistField<SinkRecord> xform = new HoistField.Key<>(); @AfterEach public void teardown() { xform.close(); } @Test public void schemaless() { xform.configure(Map.of("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, null, 42, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); assertNull(transformedRecord.keySchema()); assertEquals(Map.of("magic", 42), transformedRecord.key()); } @Test public void withSchema() { xform.configure(Map.of("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, Schema.INT32_SCHEMA, 42, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); assertEquals(Schema.Type.STRUCT, transformedRecord.keySchema().type()); assertEquals(record.keySchema(), transformedRecord.keySchema().field("magic").schema()); assertEquals(42, ((Struct) transformedRecord.key()).get("magic")); } @Test public void testSchemalessMapIsMutable() { xform.configure(Map.of("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, null, 420, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); assertNull(transformedRecord.keySchema()); @SuppressWarnings("unchecked") Map<String, Object> actualKey = (Map<String, Object>) transformedRecord.key(); actualKey.put("k", "v"); Map<String, Object> expectedKey = new HashMap<>(); expectedKey.put("k", "v"); expectedKey.put("magic", 420); assertEquals(expectedKey, actualKey); } @Test public void testHoistFieldVersionRetrievedFromAppInfoParser() { assertEquals(AppInfoParser.getVersion(), xform.version()); } }
HoistFieldTest
java
apache__camel
components/camel-mllp/src/test/java/org/apache/camel/component/mllp/internal/MllpSocketBufferWriteTest.java
{ "start": 1359, "end": 12568 }
class ____ extends SocketBufferTestSupport { static final int MIN_BUFFER_SIZE = 2048; static final int MAX_BUFFER_SIZE = 0x40000000; // Approximately 1-GB /** * Description of test. * */ @Test public void testWriteIntWithStartOfBlock() { instance.write(MllpProtocolConstants.START_OF_BLOCK); assertEquals(1, instance.size()); assertEquals(0, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * */ @Test public void testWriteIntWithEndOfBlock() { instance.write(MllpProtocolConstants.END_OF_BLOCK); assertEquals(1, instance.size()); assertEquals(-1, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * */ @Test public void testWriteIntWithEndOfData() { instance.write(MllpProtocolConstants.END_OF_DATA); assertEquals(1, instance.size()); assertEquals(-1, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * */ @Test public void testWriteBytesWithNullArray() { instance.write((byte[]) null); assertEquals(0, instance.size()); assertEquals(-1, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * */ @Test public void testWriteBytesWithEmptyArray() { instance.write(new byte[0]); assertEquals(0, instance.size()); assertEquals(-1, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * * @throws Exception in the event of a test error. */ @Test public void testWriteBytesWithFullEnvelope() throws Exception { instance.write(buildTestBytes("BLAH", true, true, true)); assertEquals(7, instance.size()); assertEquals(0, instance.startOfBlockIndex); assertEquals(5, instance.endOfBlockIndex); } /** * Description of test. * */ @Test public void testWriteBytesWithoutEnvelope() { instance.write("BLAH".getBytes()); assertEquals(4, instance.size()); assertEquals(-1, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * * @throws Exception in the event of a test error. */ @Test public void testWriteBytesWithWithoutStartOfBlock() throws Exception { instance.write(buildTestBytes("BLAH", false, true, true)); assertEquals(6, instance.size()); assertEquals(-1, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * * @throws Exception in the event of a test error. */ @Test public void testWriteBytesWithWithoutEndOfBlock() throws Exception { instance.write(buildTestBytes("BLAH", true, false, true)); assertEquals(6, instance.size()); assertEquals(0, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * * @throws Exception in the event of a test error. */ @Test public void testWriteBytesWithWithoutEndOfData() throws Exception { instance.write(buildTestBytes("BLAH", true, true, false)); assertEquals(6, instance.size()); assertEquals(0, instance.startOfBlockIndex); assertEquals(5, instance.endOfBlockIndex); } /** * Description of test. * * @throws Exception in the event of a test error. */ @Test public void testWriteBytesWithWithoutEndOfBlockOrEndOfData() throws Exception { instance.write(buildTestBytes("BLAH", true, false, false)); assertEquals(5, instance.size()); assertEquals(0, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * */ @Test public void testWriteByteArraySliceWithNullArray() { instance.write(null, 0, 5); assertEquals(0, instance.size()); assertEquals(-1, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * */ @Test public void testWriteByteArraySliceWithEmptyArray() { instance.write(new byte[0], 0, 5); assertEquals(0, instance.size()); assertEquals(-1, instance.startOfBlockIndex); assertEquals(-1, instance.endOfBlockIndex); } /** * Description of test. * */ @Test public void testWriteByteArraySliceWithNegativeOffset() { byte[] payload = "BLAH".getBytes(); try { instance.write(payload, -5, payload.length); fail("Exception should have been thrown"); } catch (IndexOutOfBoundsException expectedEx) { assertEquals("write(byte[4], offset[-5], writeCount[4]) - offset is less than zero", expectedEx.getMessage()); } } /** * Description of test. * */ @Test public void testWriteByteArraySliceWithOffsetGreaterThanLength() { byte[] payload = "BLAH".getBytes(); try { instance.write(payload, payload.length + 1, payload.length); fail("Exception should have been thrown"); } catch (IndexOutOfBoundsException expectedEx) { assertEquals("write(byte[4], offset[5], writeCount[4]) - offset is greater than write count", expectedEx.getMessage()); } } /** * Description of test. * * @throws Exception in the event of a test error. */ @Test public void testWriteByteArraySliceWithNegativeLength() { final byte[] bytes = "BLAH".getBytes(); IndexOutOfBoundsException exception = assertThrows(IndexOutOfBoundsException.class, () -> instance.write(bytes, 0, -5), "Exception should have been thrown"); assertEquals("write(byte[4], offset[0], writeCount[-5]) - write count is less than zero", exception.getMessage()); } /** * Description of test. * * @throws Exception in the event of a test error. */ @Test public void testWriteByteArraySliceWithLengthGreaterThanAvailable() { final byte[] payload = "BLAH".getBytes(); IndexOutOfBoundsException exception0 = assertThrows(IndexOutOfBoundsException.class, () -> instance.write(payload, 0, payload.length + 1), "Exception should have been thrown"); assertEquals("write(byte[4], offset[0], writeCount[5]) - write count is greater than length of the source byte[]", exception0.getMessage()); IndexOutOfBoundsException exception1 = assertThrows(IndexOutOfBoundsException.class, () -> instance.write(payload, 1, payload.length), "Exception should have been thrown"); assertEquals( "write(byte[4], offset[1], writeCount[4]) - offset plus write count <5> is greater than length of the source byte[]", exception1.getMessage()); IndexOutOfBoundsException exception2 = assertThrows(IndexOutOfBoundsException.class, () -> instance.write(payload, 2, payload.length - 1), "Exception should have been thrown"); assertEquals( "write(byte[4], offset[2], writeCount[3]) - offset plus write count <5> is greater than length of the source byte[]", exception2.getMessage()); } /** * Description of test. * */ @Test public void testEnsureCapacityWithNegativeRequiredAvailability() { assertEquals(MIN_BUFFER_SIZE, instance.capacity()); instance.ensureCapacity(-1); assertEquals(MIN_BUFFER_SIZE, instance.capacity()); } /** * Description of test. * */ @Test public void testEnsureCapacityWithOutOfRangeRequiredAvailability() { assertEquals(MIN_BUFFER_SIZE, instance.capacity()); try { instance.ensureCapacity(Integer.MAX_VALUE); fail("Should have thrown an exception"); } catch (IllegalStateException expectedEx) { String expectedMessage = "Cannot increase the buffer size <2048> in order to increase the available capacity from <2048> to <2147483647>" + " because the required buffer size <2147483647> exceeds the maximum buffer size <1073741824>"; assertEquals(expectedMessage, expectedEx.getMessage()); } try { instance.ensureCapacity(MAX_BUFFER_SIZE + 1); fail("Should have thrown an exception"); } catch (IllegalStateException expectedEx) { String expectedMessage = "Cannot increase the buffer size <2048> in order to increase the available capacity from <2048> to <1073741825>" + " because the required buffer size <1073741825> exceeds the maximum buffer size <1073741824>"; assertEquals(expectedMessage, expectedEx.getMessage()); } instance.write("BLAH".getBytes()); IllegalStateException expectedEx = assertThrows(IllegalStateException.class, () -> instance.ensureCapacity(MAX_BUFFER_SIZE)); String expectedMessage = "Cannot increase the buffer size <2048> in order to increase the available capacity from <2044> to <1073741824>" + " because the required buffer size <1073741828> exceeds the maximum buffer size <1073741824>"; assertEquals(expectedMessage, expectedEx.getMessage()); } /** * Description of test. * */ @Test public void testEnsureCapacityWithAlreadyAllocateMaxBufferSize() { assertEquals(MIN_BUFFER_SIZE, instance.capacity()); instance.ensureCapacity(MAX_BUFFER_SIZE); IllegalStateException expectedEx = assertThrows(IllegalStateException.class, () -> instance.ensureCapacity(MAX_BUFFER_SIZE + 1)); String expectedMessage = "Cannot increase the buffer size from <1073741824> to <1073741825> in order to increase the available capacity" + " from <1073741824> to <1073741825> because the buffer is already the maximum size <1073741824>"; assertEquals(expectedMessage, expectedEx.getMessage()); } /** * Description of test. * * @throws Exception in the event of a test error. */ @Test public void testReadFrom() throws Exception { SocketStub socketStub = new SocketStub(); socketStub.inputStreamStub .addPacket("FOO".getBytes()) .addPacket("BAR".getBytes()); endpoint.setReceiveTimeout(500); endpoint.setReadTimeout(100); assertThrows(SocketTimeoutException.class, () -> instance.readFrom(socketStub)); } }
MllpSocketBufferWriteTest
java
spring-projects__spring-framework
spring-core/src/main/java/org/springframework/core/retry/RetryPolicy.java
{ "start": 1553, "end": 4810 }
interface ____ { /** * Specify if the {@link Retryable} operation should be retried based on the * given throwable. * @param throwable the exception that caused the operation to fail * @return {@code true} if the operation should be retried, {@code false} otherwise */ boolean shouldRetry(Throwable throwable); /** * Get the {@link BackOff} strategy to use for this retry policy. * <p>Defaults to a fixed backoff of {@value Builder#DEFAULT_DELAY} milliseconds * and maximum {@value Builder#DEFAULT_MAX_RETRIES} retries. * <p>Note that {@code total attempts = 1 initial attempt + maxRetries attempts}. * Thus, when {@code maxRetries} is set to 3, a retryable operation will be * invoked at least once and at most 4 times. * @return the {@code BackOff} strategy to use * @see FixedBackOff */ default BackOff getBackOff() { return new FixedBackOff(Builder.DEFAULT_DELAY, Builder.DEFAULT_MAX_RETRIES); } /** * Create a {@link RetryPolicy} with default configuration. * <p>The returned policy applies to all exception types, uses a fixed backoff * of {@value Builder#DEFAULT_DELAY} milliseconds, and supports maximum * {@value Builder#DEFAULT_MAX_RETRIES} retries. * <p>Note that {@code total attempts = 1 initial attempt + maxRetries attempts}. * Thus, when {@code maxRetries} is set to 3, a retryable operation will be * invoked at least once and at most 4 times. * @see FixedBackOff */ static RetryPolicy withDefaults() { return throwable -> true; } /** * Create a {@link RetryPolicy} configured with a maximum number of retry attempts. * <p>Note that {@code total attempts = 1 initial attempt + maxRetries attempts}. * Thus, if {@code maxRetries} is set to 4, a retryable operation will be invoked * at least once and at most 5 times. * <p>The returned policy applies to all exception types and uses a fixed backoff * of {@value Builder#DEFAULT_DELAY} milliseconds. * @param maxRetries the maximum number of retry attempts; * must be positive (or zero for no retry) * @see Builder#maxRetries(long) * @see FixedBackOff */ static RetryPolicy withMaxRetries(long maxRetries) { assertMaxRetriesIsNotNegative(maxRetries); return builder().backOff(new FixedBackOff(Builder.DEFAULT_DELAY, maxRetries)).build(); } /** * Create a {@link Builder} to configure a {@link RetryPolicy} with common * configuration options. */ static Builder builder() { return new Builder(); } private static void assertMaxRetriesIsNotNegative(long maxRetries) { Assert.isTrue(maxRetries >= 0, () -> "Invalid maxRetries (%d): must be positive or zero for no retry.".formatted(maxRetries)); } private static void assertIsNotNegative(String name, Duration duration) { Assert.isTrue(!duration.isNegative(), () -> "Invalid %s (%dms): must be greater than or equal to zero.".formatted(name, duration.toMillis())); } private static void assertIsPositive(String name, Duration duration) { Assert.isTrue((!duration.isNegative() && !duration.isZero()), () -> "Invalid %s (%dms): must be greater than zero.".formatted(name, duration.toMillis())); } /** * Fluent API for configuring a {@link RetryPolicy} with common configuration * options. */ final
RetryPolicy
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappersTests.java
{ "start": 10512, "end": 11217 }
class ____ implements TokenFilterFactory { @Override public TokenStream create(TokenStream tokenStream) { try { tokenStream.incrementToken(); } catch (IOException e) {} return tokenStream; } @Override public TokenStream normalize(TokenStream tokenStream) { try { tokenStream.incrementToken(); } catch (IOException e) {} return tokenStream; } @Override public AnalysisMode getAnalysisMode() { return AnalysisMode.INDEX_TIME; } } @NamedComponent("TestCharFilterFactory") public static
TestTokenFilterFactory
java
elastic__elasticsearch
x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java
{ "start": 33307, "end": 36735 }
class ____ { String pattern; int prefixLength; Fuzziness fuzziness; String expectedPrefixQuery; int expectedMinShouldMatch; String ngrams; FuzzyTest( String pattern, int prefixLength, Fuzziness fuzziness, String expectedPrefixQuery, int expectedMinShouldMatch, String ngrams ) { super(); this.pattern = pattern; this.prefixLength = prefixLength; this.fuzziness = fuzziness; this.expectedPrefixQuery = expectedPrefixQuery; this.expectedMinShouldMatch = expectedMinShouldMatch; this.ngrams = ngrams; } Query getFuzzyQuery() { return wildcardFieldType.fieldType().fuzzyQuery(pattern, fuzziness, prefixLength, 50, true, MOCK_CONTEXT); } Query getExpectedApproxQuery() throws ParseException { BooleanQuery.Builder bq = new BooleanQuery.Builder(); if (expectedPrefixQuery != null) { String[] tokens = expectedPrefixQuery.split(" "); Query prefixQuery = null; if (tokens.length == 1) { prefixQuery = new TermQuery( new Term(WILDCARD_FIELD_NAME, tokens[0].replaceAll("_", WildcardFieldMapper.TOKEN_START_STRING)) ); } else { BooleanQuery.Builder pqb = new BooleanQuery.Builder(); for (String token : tokens) { Query ngramQuery = new TermQuery( new Term(WILDCARD_FIELD_NAME, token.replaceAll("_", WildcardFieldMapper.TOKEN_START_STRING)) ); pqb.add(ngramQuery, Occur.MUST); } prefixQuery = pqb.build(); } if (ngrams == null) { return prefixQuery; } bq.add(prefixQuery, Occur.MUST); } if (ngrams != null) { BooleanQuery.Builder nq = new BooleanQuery.Builder(); String[] tokens = ngrams.split(" "); for (String token : tokens) { Query ngramQuery = new TermQuery( new Term(WILDCARD_FIELD_NAME, token.replaceAll("_", WildcardFieldMapper.TOKEN_START_STRING)) ); nq.add(ngramQuery, Occur.SHOULD); } nq.setMinimumNumberShouldMatch(expectedMinShouldMatch); bq.add(nq.build(), Occur.MUST); } return bq.build(); } } public void testFuzzyAcceleration() throws IOException, ParseException { FuzzyTest[] tests = { new FuzzyTest("123456", 0, Fuzziness.ONE, null, 1, "113 355"), new FuzzyTest("1234567890", 2, Fuzziness.ONE, "_11", 1, "335 577"), new FuzzyTest("12345678901", 2, Fuzziness.ONE, "_11", 2, "335 577 901"), new FuzzyTest("12345678", 4, Fuzziness.ONE, "_11 113 133", 0, null) }; for (FuzzyTest test : tests) { Query wildcardFieldQuery = test.getFuzzyQuery(); testExpectedAccelerationQuery(test.pattern, wildcardFieldQuery, getSimplifiedApproximationQuery(test.getExpectedApproxQuery())); } } static
FuzzyTest
java
ReactiveX__RxJava
src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableOnBackpressureReduceTest.java
{ "start": 1250, "end": 8175 }
class ____ extends RxJavaTest { static final BiFunction<Integer, Integer, Integer> TEST_INT_REDUCER = (previous, current) -> previous + current + 50; static final BiFunction<Object, Object, Object> TEST_OBJECT_REDUCER = (previous, current) -> current; @Test public void simple() { TestSubscriberEx<Integer> ts = new TestSubscriberEx<>(); Flowable.range(1, 5).onBackpressureReduce(TEST_INT_REDUCER).subscribe(ts); ts.assertNoErrors(); ts.assertTerminated(); ts.assertValues(1, 2, 3, 4, 5); } @Test public void simpleError() { TestSubscriberEx<Integer> ts = new TestSubscriberEx<>(); Flowable.range(1, 5).concatWith(Flowable.error(new TestException())) .onBackpressureReduce(TEST_INT_REDUCER).subscribe(ts); ts.assertTerminated(); ts.assertError(TestException.class); ts.assertValues(1, 2, 3, 4, 5); } @Test public void simpleBackpressure() { TestSubscriber<Integer> ts = new TestSubscriber<>(2L); Flowable.range(1, 5).onBackpressureReduce(TEST_INT_REDUCER).subscribe(ts); ts.assertNoErrors(); ts.assertValues(1, 2); ts.assertNotComplete(); } @Test public void synchronousDrop() { PublishProcessor<Integer> source = PublishProcessor.create(); TestSubscriberEx<Integer> ts = new TestSubscriberEx<>(0L); source.onBackpressureReduce(TEST_INT_REDUCER).subscribe(ts); ts.assertNoValues(); source.onNext(1); ts.request(2); ts.assertValue(1); source.onNext(2); ts.assertValues(1, 2); source.onNext(3); source.onNext(4); //3 + 4 + 50 == 57 source.onNext(5); //57 + 5 + 50 == 112 source.onNext(6); //112 + 6 + 50 == 168 ts.request(2); ts.assertValues(1, 2, 168); source.onNext(7); ts.assertValues(1, 2, 168, 7); source.onNext(8); source.onNext(9); //8 + 9 + 50 == 67 source.onComplete(); ts.request(1); ts.assertValues(1, 2, 168, 7, 67); ts.assertNoErrors(); ts.assertTerminated(); } @Test public void reduceBackpressuredSync() { PublishProcessor<Integer> source = PublishProcessor.create(); TestSubscriberEx<Integer> ts = new TestSubscriberEx<>(0L); source.onBackpressureReduce(Integer::sum).subscribe(ts); source.onNext(1); source.onNext(2); source.onNext(3); ts.request(1); ts.assertValuesOnly(6); source.onNext(4); source.onComplete(); ts.assertValuesOnly(6); ts.request(1); ts.assertResult(6, 4); } private <T> TestSubscriberEx<T> createDelayedSubscriber() { return new TestSubscriberEx<T>(1L) { final Random rnd = new Random(); @Override public void onNext(T t) { super.onNext(t); if (rnd.nextDouble() < 0.001) { try { Thread.sleep(1); } catch (InterruptedException ex) { ex.printStackTrace(); } } request(1); } }; } private <T> void assertValuesDropped(TestSubscriberEx<T> ts, int totalValues) { int n = ts.values().size(); System.out.println("testAsynchronousDrop -> " + n); Assert.assertTrue("All events received?", n < totalValues); } private void assertIncreasingSequence(TestSubscriberEx<Integer> ts) { int previous = 0; for (Integer current : ts.values()) { Assert.assertTrue("The sequence must be increasing [current value=" + previous + ", previous value=" + current + "]", previous <= current); previous = current; } } @Test public void asynchronousDrop() { TestSubscriberEx<Integer> ts = createDelayedSubscriber(); int m = 100000; Flowable.range(1, m) .subscribeOn(Schedulers.computation()) .onBackpressureReduce((previous, current) -> { //in that case it works like onBackpressureLatest //the output sequence of number must be increasing return current; }) .observeOn(Schedulers.io()) .subscribe(ts); ts.awaitDone(2, TimeUnit.SECONDS); ts.assertTerminated(); assertValuesDropped(ts, m); assertIncreasingSequence(ts); } @Test public void asynchronousDrop2() { TestSubscriberEx<Long> ts = createDelayedSubscriber(); int m = 100000; Flowable.rangeLong(1, m) .subscribeOn(Schedulers.computation()) .onBackpressureReduce(Long::sum) .observeOn(Schedulers.io()) .subscribe(ts); ts.awaitDone(2, TimeUnit.SECONDS); ts.assertTerminated(); assertValuesDropped(ts, m); long sum = 0; for (Long i : ts.values()) { sum += i; } //sum = (A1 + An) * n / 2 = 100_001 * 50_000 = 50_000_00000 + 50_000 = 50_000_50_000 Assert.assertEquals("Wrong sum: " + sum, 5000050000L, sum); } @Test public void nullPointerFromReducer() { PublishProcessor<Integer> source = PublishProcessor.create(); TestSubscriberEx<Integer> ts = new TestSubscriberEx<>(0); source.onBackpressureReduce((l, r) -> null).subscribe(ts); source.onNext(1); source.onNext(2); TestHelper.assertError(ts.errors(), 0, NullPointerException.class, "The reducer returned a null value"); } @Test public void exceptionFromReducer() { PublishProcessor<Integer> source = PublishProcessor.create(); TestSubscriberEx<Integer> ts = new TestSubscriberEx<>(0); source.onBackpressureReduce((l, r) -> { throw new TestException("Test exception"); }).subscribe(ts); source.onNext(1); source.onNext(2); TestHelper.assertError(ts.errors(), 0, TestException.class, "Test exception"); } @Test public void doubleOnSubscribe() { TestHelper.checkDoubleOnSubscribeFlowable(f -> f.onBackpressureReduce(TEST_OBJECT_REDUCER)); } @Test public void take() { Flowable.just(1, 2) .onBackpressureReduce(TEST_INT_REDUCER) .take(1) .test() .assertResult(1); } @Test public void dispose() { TestHelper.checkDisposed(Flowable.never().onBackpressureReduce(TEST_OBJECT_REDUCER)); } @Test public void badRequest() { TestHelper.assertBadRequestReported(Flowable.never().onBackpressureReduce(TEST_OBJECT_REDUCER)); } }
FlowableOnBackpressureReduceTest
java
bumptech__glide
library/test/src/test/java/com/bumptech/glide/load/resource/bitmap/DefaultImageHeaderParserTest.java
{ "start": 32459, "end": 32834 }
class ____ extends FilterInputStream { PartialSkipInputStream(InputStream in) { super(in); } @Override public long skip(long byteCount) throws IOException { long toActuallySkip = byteCount / 2; if (byteCount == 1) { toActuallySkip = 1; } return super.skip(toActuallySkip); } } private static
PartialSkipInputStream
java
elastic__elasticsearch
modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextApiSpecGenerator.java
{ "start": 1163, "end": 4406 }
class ____ { public static void main(String[] args) throws IOException { List<PainlessContextInfo> contexts = ContextGeneratorCommon.getContextInfos(); Path rootDir = resetRootDir(); ContextGeneratorCommon.PainlessInfos infos; JavaClassFilesystemResolver jdksrc = getJdkSrc(); if (jdksrc != null) { infos = new ContextGeneratorCommon.PainlessInfos(contexts, new JavadocExtractor(jdksrc)); } else { infos = new ContextGeneratorCommon.PainlessInfos(contexts); } Path json = rootDir.resolve("painless-common.json"); try ( PrintStream jsonStream = new PrintStream( Files.newOutputStream(json, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, StandardCharsets.UTF_8 ) ) { XContentBuilder builder = XContentFactory.jsonBuilder(jsonStream); builder.startObject(); builder.field(PainlessContextInfo.CLASSES.getPreferredName(), infos.common); builder.endObject(); builder.flush(); } for (PainlessInfoJson.Context context : infos.contexts) { json = rootDir.resolve("painless-" + context.getName() + ".json"); try ( PrintStream jsonStream = new PrintStream( Files.newOutputStream(json, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, StandardCharsets.UTF_8 ) ) { XContentBuilder builder = XContentFactory.jsonBuilder(jsonStream); context.toXContent(builder, null); builder.flush(); } } } @SuppressForbidden(reason = "resolve context api directory with environment") private static Path resetRootDir() throws IOException { Path rootDir = PathUtils.get("./src/main/generated/whitelist-json"); IOUtils.rm(rootDir); Files.createDirectories(rootDir); return rootDir; } @SuppressForbidden(reason = "resolve jdk src directory with environment") private static JavaClassFilesystemResolver getJdkSrc() { String jdksrc = System.getProperty("jdksrc"); if (jdksrc == null || "".equals(jdksrc)) { return null; } String packageSourcesString = System.getProperty("packageSources"); if (packageSourcesString == null || "".equals(packageSourcesString)) { return new JavaClassFilesystemResolver(PathUtils.get(jdksrc)); } HashMap<String, Path> packageSources = new HashMap<>(); for (String packageSourceString : packageSourcesString.split(";")) { String[] packageSource = packageSourceString.split(":", 2); if (packageSource.length != 2) { throw new IllegalArgumentException("Bad format for packageSources. Format <package0>:<path0>;<package1>:<path1> ..."); } packageSources.put(packageSource[0], PathUtils.get(packageSource[1])); } return new JavaClassFilesystemResolver(PathUtils.get(jdksrc), packageSources); } public static
ContextApiSpecGenerator
java
quarkusio__quarkus
extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/security/PathMatchingHttpSecurityPolicyTest.java
{ "start": 1399, "end": 8233 }
class ____ { private static final Duration REQUEST_TIMEOUT = Duration.ofSeconds(20); private static WebClient client; protected static QuarkusUnitTest createQuarkusUnitTest(String applicationProperties, Class<?>... additionalTestClasses) { return new QuarkusUnitTest().setArchiveProducer(() -> { var javaArchive = ShrinkWrap.create(JavaArchive.class) .addClasses(TestIdentityController.class, TestIdentityProvider.class, PathHandler.class, RouteHandler.class, CustomNamedPolicy.class) .addAsResource("static-file.html", "META-INF/resources/static-file.html") .addAsResource(new StringAsset(applicationProperties), "application.properties"); if (additionalTestClasses.length > 0) { javaArchive.addClasses(additionalTestClasses); } return javaArchive; }); } @BeforeAll public static void setup() { TestIdentityController.resetRoles() .add("test", "test", "test") .add("admin", "admin", "admin") .add("user", "user", "user") .add("admin1", "admin1", "admin1") .add("root1", "root1", "root1") .add("root", "root", "root") .add("public1", "public1", "public1"); } @AfterAll public static void cleanup() { if (client != null) { client.close(); } } @Inject Vertx vertx; @TestHTTPResource URL url; private WebClient getClient() { if (client == null) { client = WebClient.create(vertx); } return client; } @Test public void testInnerWildcardPath() { assurePath("/api/any-value/bar", 401); assurePath("/api/any-value/bar", 401); assurePath("/api/next/any-value/prev", 401); assurePath("/api/one/two/three/four", 401); assurePath("/api////any-value//////bar", 401); assurePath("/api/next///////any-value////prev", 401); assurePath("////api//one/two//three////four?door=wood", 401); assurePath("/api/one/three/four/five", 401); assurePath("/api/one/3/4/five", 401); assurePath("////api/one///3/4/five", 401); assurePath("/api/now/sadly/i/dont-know", 401); assurePath("/api/now/sadly///i/dont-know", 401); assurePath("/api/one/three/jamaica/five", 200); assurePath("/api/one/three/jamaica/football", 200); assurePath("/api/now/sally/i/dont-know", 200); } @ParameterizedTest @ValueSource(strings = { // path policy without wildcard "/api/foo//bar", "/api/foo///bar", "/api/foo////bar", "/api/foo/////bar", "//api/foo/bar", "///api/foo/bar", "////api/foo/bar", "//api//foo//bar", "//api/foo//bar", // path policy with wildcard "/api/fubar/baz", "/api/fubar/baz/", "/api/fubar/baz//", "/api/fubar/baz/.", "/api/fubar/baz////.", "/api/fubar/baz/bar", // routes defined for exact paths "/api/baz", "//api/baz", "///api////baz", "/api//baz", // zero length path "", "/?one=two", // empty segments only are match with path policy for '/' "/", "///", "////", "/////" }) public void testEmptyPathSegments(String path) { assurePath(path, 401); assurePathAuthenticated(path); } @ParameterizedTest @ValueSource(strings = { "/api/foo/./bar", "/../api/foo///bar", "/api/./foo/.///bar", "/api/foo/./////bar", "/api/fubar/baz/.", "/..///api/foo/bar", "////../../api/foo/bar", "/./api//foo//bar", "//api/foo/./bar", "/.", "/..", "/./", "/..//", "/.///", "/..////", "/./////" }) public void testDotPathSegments(String path) { assurePath(path, 401); assurePathAuthenticated(path); } @ParameterizedTest @ValueSource(strings = { "/static-file.html", "//static-file.html", "///static-file.html" }) public void testStaticResource(String path) { assurePath(path, 401); assurePathAuthenticated(path); } @Test public void testMiscellaneousPaths() { // /api/baz with segment indicating version shouldn't match /api/baz path policy assurePath("/api/baz;v=1.1", 200); // /api/baz/ is different resource than secured /api/baz, but we secure both when there is not more specific exact path pattern assurePath("/api/baz/", 401); } @Test public void testCustomSharedPermission() { assurePath("/", 401, null, null, null); assurePath("/", 200, null, "test", null); assurePath("/", 403, null, "test", "deny-header"); assurePath("/api/one/anything/jamaica/anything", 200, null, null, null); assurePath("/api/one/anything/jamaica/anything", 401, null, null, "deny-header"); } @Test public void testRoleMappingSharedPermission() { assurePath("/secured", 401, null, null, null); assurePath("/secured", 200, null, "test", null); assurePath("/secured/all", 401, null, null, null); assurePath("/secured/all", 200, null, "test", null); assurePath("/secured/all", 200, null, "root", null); assurePath("/secured/all", 200, null, "root1", null); assurePath("/secured/all", 200, null, "admin", null); assurePath("/secured/user", 403, null, "test", null); assurePath("/secured/user", 403, null, "admin", null); assurePath("/secured/user", 403, null, "admin1", null); assurePath("/secured/user", 200, null, "root", null); assurePath("/secured/user", 200, null, "root1", null); assurePath("/secured/user", 200, null, "user", null); assurePath("/secured/admin", 403, null, "user", null); assurePath("/secured/admin", 403, null, "test", null); assurePath("/secured/admin", 200, null, "admin", null); assurePath("/secured/admin", 200, null, "admin1", null); assurePath("/secured/admin", 200, null, "root", null); assurePath("/secured/admin", 200, null, "root1", null); } @Test public void testMultipleSharedPermissions() { assurePath("/secured/user", 200, null, "root", null); assurePath("/secured/user", 403, null, "root", "deny-header"); } @Test public void testRolesMappingOnPublicPath() { // here no HTTP Security policy that requires authentication is applied, and we want to check that identity // is still augmented assurePath("/api/public", 200, null, "public1", null); assurePath("/api/public", 403, null, "root1", null); } @ApplicationScoped public static
PathMatchingHttpSecurityPolicyTest
java
google__jimfs
jimfs/src/test/java/com/google/common/jimfs/WindowsPathTypeTest.java
{ "start": 1310, "end": 7417 }
class ____ { @Test public void testWindows() { PathType windows = PathType.windows(); assertThat(windows.getSeparator()).isEqualTo("\\"); assertThat(windows.getOtherSeparators()).isEqualTo("/"); // "C:\\foo\bar" results from "C:\", "foo", "bar" passed to getPath PathType.ParseResult path = windows.parsePath("C:\\\\foo\\bar"); assertParseResult(path, "C:\\", "foo", "bar"); assertThat(windows.toString(path.root(), path.names())).isEqualTo("C:\\foo\\bar"); PathType.ParseResult path2 = windows.parsePath("foo/bar/"); assertParseResult(path2, null, "foo", "bar"); assertThat(windows.toString(path2.root(), path2.names())).isEqualTo("foo\\bar"); PathType.ParseResult path3 = windows.parsePath("hello world/foo/bar"); assertParseResult(path3, null, "hello world", "foo", "bar"); assertThat(windows.toString(null, path3.names())).isEqualTo("hello world\\foo\\bar"); } @Test public void testWindows_relativePathsWithDriveRoot_unsupported() { assertThrows(InvalidPathException.class, () -> windows().parsePath("C:")); assertThrows(InvalidPathException.class, () -> windows().parsePath("C:foo\\bar")); } @Test public void testWindows_absolutePathOnCurrentDrive_unsupported() { assertThrows(InvalidPathException.class, () -> windows().parsePath("\\foo\\bar")); assertThrows(InvalidPathException.class, () -> windows().parsePath("\\")); } @Test public void testWindows_uncPaths() { PathType windows = PathType.windows(); PathType.ParseResult path = windows.parsePath("\\\\host\\share"); assertParseResult(path, "\\\\host\\share\\"); path = windows.parsePath("\\\\HOST\\share\\foo\\bar"); assertParseResult(path, "\\\\HOST\\share\\", "foo", "bar"); InvalidPathException expected = assertThrows(InvalidPathException.class, () -> windows.parsePath("\\\\")); assertThat(expected.getInput()).isEqualTo("\\\\"); assertThat(expected.getReason()).isEqualTo("UNC path is missing hostname"); expected = assertThrows(InvalidPathException.class, () -> windows.parsePath("\\\\host")); assertThat(expected.getInput()).isEqualTo("\\\\host"); assertThat(expected.getReason()).isEqualTo("UNC path is missing sharename"); expected = assertThrows(InvalidPathException.class, () -> windows.parsePath("\\\\host\\")); assertThat(expected.getInput()).isEqualTo("\\\\host\\"); assertThat(expected.getReason()).isEqualTo("UNC path is missing sharename"); expected = assertThrows(InvalidPathException.class, () -> windows.parsePath("//host")); assertThat(expected.getInput()).isEqualTo("//host"); assertThat(expected.getReason()).isEqualTo("UNC path is missing sharename"); } @Test public void testWindows_illegalNames() { assertThrows(InvalidPathException.class, () -> windows().parsePath("foo<bar")); assertThrows(InvalidPathException.class, () -> windows().parsePath("foo?")); assertThrows(InvalidPathException.class, () -> windows().parsePath("foo ")); assertThrows(InvalidPathException.class, () -> windows().parsePath("foo \\bar")); } @Test public void testWindows_toUri_normal() { URI fileUri = PathType.windows().toUri(fileSystemUri, "C:\\", ImmutableList.of("foo", "bar"), false); assertThat(fileUri.toString()).isEqualTo("jimfs://foo/C:/foo/bar"); assertThat(fileUri.getPath()).isEqualTo("/C:/foo/bar"); URI directoryUri = PathType.windows().toUri(fileSystemUri, "C:\\", ImmutableList.of("foo", "bar"), true); assertThat(directoryUri.toString()).isEqualTo("jimfs://foo/C:/foo/bar/"); assertThat(directoryUri.getPath()).isEqualTo("/C:/foo/bar/"); URI rootUri = PathType.windows().toUri(fileSystemUri, "C:\\", ImmutableList.<String>of(), true); assertThat(rootUri.toString()).isEqualTo("jimfs://foo/C:/"); assertThat(rootUri.getPath()).isEqualTo("/C:/"); } @Test public void testWindows_toUri_unc() { URI fileUri = PathType.windows() .toUri(fileSystemUri, "\\\\host\\share\\", ImmutableList.of("foo", "bar"), false); assertThat(fileUri.toString()).isEqualTo("jimfs://foo//host/share/foo/bar"); assertThat(fileUri.getPath()).isEqualTo("//host/share/foo/bar"); URI rootUri = PathType.windows() .toUri(fileSystemUri, "\\\\host\\share\\", ImmutableList.<String>of(), true); assertThat(rootUri.toString()).isEqualTo("jimfs://foo//host/share/"); assertThat(rootUri.getPath()).isEqualTo("//host/share/"); } @Test public void testWindows_toUri_escaping() { URI uri = PathType.windows() .toUri(fileSystemUri, "C:\\", ImmutableList.of("Users", "foo", "My Documents"), true); assertThat(uri.toString()).isEqualTo("jimfs://foo/C:/Users/foo/My%20Documents/"); assertThat(uri.getRawPath()).isEqualTo("/C:/Users/foo/My%20Documents/"); assertThat(uri.getPath()).isEqualTo("/C:/Users/foo/My Documents/"); } @Test public void testWindows_uriRoundTrips_normal() { assertUriRoundTripsCorrectly(PathType.windows(), "C:\\"); assertUriRoundTripsCorrectly(PathType.windows(), "C:\\foo"); assertUriRoundTripsCorrectly(PathType.windows(), "C:\\foo\\bar\\baz"); assertUriRoundTripsCorrectly(PathType.windows(), "C:\\Users\\foo\\My Documents\\"); assertUriRoundTripsCorrectly(PathType.windows(), "C:\\foo bar"); assertUriRoundTripsCorrectly(PathType.windows(), "C:\\foo bar\\baz"); } @Test public void testWindows_uriRoundTrips_unc() { assertUriRoundTripsCorrectly(PathType.windows(), "\\\\host\\share"); assertUriRoundTripsCorrectly(PathType.windows(), "\\\\host\\share\\"); assertUriRoundTripsCorrectly(PathType.windows(), "\\\\host\\share\\foo"); assertUriRoundTripsCorrectly(PathType.windows(), "\\\\host\\share\\foo\\bar\\baz"); assertUriRoundTripsCorrectly(PathType.windows(), "\\\\host\\share\\Users\\foo\\My Documents\\"); assertUriRoundTripsCorrectly(PathType.windows(), "\\\\host\\share\\foo bar"); assertUriRoundTripsCorrectly(PathType.windows(), "\\\\host\\share\\foo bar\\baz"); } }
WindowsPathTypeTest
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskAsyncCallTest.java
{ "start": 12254, "end": 12433 }
class ____ extends ClassLoader { TestUserCodeClassLoader() { super(ClassLoader.getSystemClassLoader()); } } private static
TestUserCodeClassLoader
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java
{ "start": 3337, "end": 27280 }
class ____ extends ESTestCase { public void testSnapshotDownloadPermitsAreNotGrantedWhenSnapshotsUseFlagIsFalse() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RecoverySettings recoverySettings = new RecoverySettings( Settings.builder() .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 5) .put(INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.getKey(), false) .build(), clusterSettings ); assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); } public void testGrantsSnapshotDownloadPermitsUpToMaxPermits() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RecoverySettings recoverySettings = new RecoverySettings( Settings.builder().put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 5).build(), clusterSettings ); Releasable permit = recoverySettings.tryAcquireSnapshotDownloadPermits(); assertThat(permit, is(notNullValue())); assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); permit.close(); assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(notNullValue())); } public void testSnapshotDownloadPermitCanBeDynamicallyUpdated() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RecoverySettings recoverySettings = new RecoverySettings( Settings.builder().put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 5).build(), clusterSettings ); Releasable permit = recoverySettings.tryAcquireSnapshotDownloadPermits(); assertThat(permit, is(notNullValue())); assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); clusterSettings.applySettings( Settings.builder().put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 10).build() ); assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(notNullValue())); assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); permit.close(); } public void testInsufficientNumberOfPermitsMessage() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RecoverySettings recoverySettings = new RecoverySettings( Settings.builder() .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 5) .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 2) .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), 3) .build(), clusterSettings ); final String expectedMessage = String.format( Locale.ROOT, """ Unable to acquire permit to use snapshot files during recovery, so this recovery will recover index files from \ the source node. Ensure snapshot files can be used during recovery by setting [%s] to be no greater than [2]. \ Current values of [%s] = [5], [%s] = [2] """, INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey() ); final LoggingExpectation expectation = new SeenEventExpectation( "WARN-Capture", RecoverySettings.class.getCanonicalName(), Level.WARN, expectedMessage ); assertThatLogger(() -> { // Allow the first recovery to obtain a permit Releasable permit = recoverySettings.tryAcquireSnapshotDownloadPermits(); assertThat(permit, is(notNullValue())); // Deny the second recovery to get the permit assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); }, RecoverySettings.class, expectation); } public void testToManyRecoveriesSettingsMessage() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RecoverySettings recoverySettings = new RecoverySettings( Settings.builder() .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 5) .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 20) .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), 3) .build(), clusterSettings ); final String expectedMessage = String.format( Locale.ROOT, """ Unable to acquire permit to use snapshot files during recovery, so this recovery will recover index files from \ the source node. Ensure snapshot files can be used during recovery by reducing [%s] from its current value of \ [20] to be no greater than [5], or disable snapshot-based recovery by setting [%s] to [false] """, CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.getKey() ); final LoggingExpectation expectation = new SeenEventExpectation( "WARN-Capture", RecoverySettings.class.getCanonicalName(), Level.WARN, expectedMessage ); assertThatLogger(() -> { // Allow the first recovery to obtain a permit Releasable permit = recoverySettings.tryAcquireSnapshotDownloadPermits(); assertThat(permit, is(notNullValue())); // Deny the second recovery to get the permit assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); }, RecoverySettings.class, expectation); } public void testMaxConcurrentSnapshotFileDownloadsPerNodeIsValidated() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); Settings settings = Settings.builder() .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), 10) .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 5) .build(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> new RecoverySettings(settings, clusterSettings) ); assertThat( exception.getMessage(), containsString( "[indices.recovery.max_concurrent_snapshot_file_downloads_per_node]=5 " + "is less than [indices.recovery.max_concurrent_snapshot_file_downloads]=10" ) ); } public void testAvailableBandwidthsSettingsAreAllConfigured() { final NodeRecoverySettings recoverySettings = nodeRecoverySettings(); recoverySettings.withRandomIndicesRecoveryMaxBytesPerSec(); recoverySettings.withRoles(randomDataNodeRoles()); recoverySettings.withRandomMemory(); final List<Setting<?>> randomSettings = randomSubsetOf( randomIntBetween(1, NODE_BANDWIDTH_RECOVERY_SETTINGS.size() - 1), NODE_BANDWIDTH_RECOVERY_SETTINGS ); for (Setting<?> setting : randomSettings) { if (setting.getKey().equals(NODE_BANDWIDTH_RECOVERY_NETWORK_SETTING.getKey())) { recoverySettings.withNetworkBandwidth(randomNonZeroByteSizeValue()); } else if (setting.getKey().equals(NODE_BANDWIDTH_RECOVERY_DISK_READ_SETTING.getKey())) { recoverySettings.withDiskReadBandwidth(randomNonZeroByteSizeValue()); } else if (setting.getKey().equals(NODE_BANDWIDTH_RECOVERY_DISK_WRITE_SETTING.getKey())) { recoverySettings.withDiskWriteBandwidth(randomNonZeroByteSizeValue()); } else { throw new AssertionError(); } } final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, recoverySettings::build); assertThat( exception.getMessage(), containsString( "Settings " + NODE_BANDWIDTH_RECOVERY_SETTINGS.stream().map(Setting::getKey).toList() + " must all be defined or all be undefined; but only settings " + NODE_BANDWIDTH_RECOVERY_SETTINGS.stream().filter(randomSettings::contains).map(Setting::getKey).toList() + " are configured." ) ); } public void testNodeBandwidthSettingsExist() { final NodeRecoverySettings recoverySettings = nodeRecoverySettings(); recoverySettings.withRandomIndicesRecoveryMaxBytesPerSec(); recoverySettings.withRoles(randomDataNodeRoles()); recoverySettings.withRandomMemory(); if (randomBoolean()) { recoverySettings.withNetworkBandwidth(randomNonZeroByteSizeValue()); recoverySettings.withDiskReadBandwidth(randomNonZeroByteSizeValue()); recoverySettings.withDiskWriteBandwidth(randomNonZeroByteSizeValue()); assertTrue(recoverySettings.build().nodeBandwidthSettingsExist()); } else { assertFalse(recoverySettings.build().nodeBandwidthSettingsExist()); } } public void testDefaultMaxBytesPerSecOnNonDataNode() { RecoverySettings recoverySettings = nodeRecoverySettings().withRole(randomFrom("master", "ingest", "ml")) .withRandomBandwidths() .withRandomMemory() .build(); assertThat( "Non-data nodes have a default 40mb rate limit", recoverySettings.getMaxBytesPerSec(), equalTo(DEFAULT_MAX_BYTES_PER_SEC) ); } public void testMaxBytesPerSecOnNonDataNodeWithIndicesRecoveryMaxBytesPerSec() { final ByteSizeValue random = randomByteSizeValue(); assertThat( "Non-data nodes should use the defined rate limit when set", nodeRecoverySettings().withRole(randomFrom("master", "ingest", "ml")) .withIndicesRecoveryMaxBytesPerSec(random) .withRandomMemory() .build() .getMaxBytesPerSec(), equalTo(random) ); } public void testDefaultMaxBytesPerSecOnDataNode() { assertThat( "Data nodes that are not dedicated to cold/frozen have a default 40mb rate limit", nodeRecoverySettings().withRole(randomFrom("data", "data_hot", "data_warm", "data_content")) .withRandomMemory() .build() .getMaxBytesPerSec(), equalTo(DEFAULT_MAX_BYTES_PER_SEC) ); } public void testMaxBytesPerSecOnDataNodeWithIndicesRecoveryMaxBytesPerSec() { final ByteSizeValue random = randomByteSizeValue(); assertThat( "Data nodes that are not dedicated to cold/frozen should use the defined rate limit when set", nodeRecoverySettings().withIndicesRecoveryMaxBytesPerSec(random) .withRoles(randomDataNodeRoles()) .withRandomMemory() .build() .getMaxBytesPerSec(), equalTo(random) ); } public void testMaxBytesPerSecOnDataNodeWithIndicesRecoveryMaxBytesPerSecAndOvercommit() { final Double maxOvercommitFactor = randomBoolean() ? randomDoubleBetween(1.0d, 100.0d, true) : null; final ByteSizeValue indicesRecoveryMaxBytesPerSec = switch (randomInt(2)) { case 0 -> ByteSizeValue.MINUS_ONE; case 1 -> ByteSizeValue.ZERO; case 2 -> ByteSizeValue.ofGb(between(100, 1000)); default -> throw new AssertionError(); }; RecoverySettings recoverySettings = nodeRecoverySettings().withIndicesRecoveryMaxBytesPerSec(indicesRecoveryMaxBytesPerSec) .withNetworkBandwidth(ByteSizeValue.ofGb(1)) .withDiskReadBandwidth(ByteSizeValue.ofMb(500)) .withDiskWriteBandwidth(ByteSizeValue.ofMb(250)) .withMaxOvercommitFactor(maxOvercommitFactor) .withRoles(randomDataNodeRoles()) .withRandomMemory() .build(); assertThat("Node bandwidth settings should all exist", recoverySettings.nodeBandwidthSettingsExist(), equalTo(true)); assertThat( "Data nodes should not exceed the max. allowed overcommit when 'indices.recovery.max_bytes_per_sec' is too large", recoverySettings.getMaxBytesPerSec(), equalTo( ByteSizeValue.ofBytes( Math.round(Objects.requireNonNullElse(maxOvercommitFactor, 100.d) * ByteSizeValue.ofMb(250).getBytes()) ) ) ); } public void testMaxBytesPerSecOnDataNodeWithAvailableBandwidths() { RecoverySettings recoverySettings = nodeRecoverySettings().withRoles(randomDataNodeRoles()) .withRandomMemory() .withNetworkBandwidth(ByteSizeValue.ofGb(between(1, 10))) .withDiskReadBandwidth(ByteSizeValue.ofMb(between(10, 50))) .withDiskWriteBandwidth(ByteSizeValue.ofMb(between(10, 50))) .build(); assertThat("Node bandwidth settings should all exist", recoverySettings.nodeBandwidthSettingsExist(), equalTo(true)); assertThat( "Data node should use pre 8.1.0 default because available bandwidths are lower", recoverySettings.getMaxBytesPerSec(), equalTo(DEFAULT_MAX_BYTES_PER_SEC) ); final ByteSizeValue indicesRecoveryMaxBytesPerSec = ByteSizeValue.ofMb(randomFrom(100, 250)); recoverySettings = nodeRecoverySettings().withRoles(randomDataNodeRoles()) .withRandomMemory() .withNetworkBandwidth(ByteSizeValue.ofGb(between(1, 10))) .withDiskReadBandwidth(ByteSizeValue.ofMb(between(10, 50))) .withDiskWriteBandwidth(ByteSizeValue.ofMb(between(10, 50))) .withIndicesRecoveryMaxBytesPerSec(indicesRecoveryMaxBytesPerSec) .build(); assertThat("Node bandwidth settings should all exist", recoverySettings.nodeBandwidthSettingsExist(), equalTo(true)); assertThat( "Data node should use 'indices.recovery.max_bytes_per_sec' setting because available bandwidths are lower", recoverySettings.getMaxBytesPerSec(), equalTo(indicesRecoveryMaxBytesPerSec) ); final Double factor = randomBoolean() ? randomDoubleBetween(0.5d, 1.0d, true) : null; final ByteSizeValue networkBandwidth = ByteSizeValue.ofMb(randomFrom(100, 250)); recoverySettings = nodeRecoverySettings().withRoles(randomDataNodeRoles()) .withRandomMemory() .withNetworkBandwidth(networkBandwidth) .withDiskReadBandwidth(ByteSizeValue.ofMb(between(250, 500))) .withDiskWriteBandwidth(ByteSizeValue.ofMb(between(250, 500))) .withOperatorDefaultFactor(factor) .build(); assertThat("Node bandwidth settings should all exist", recoverySettings.nodeBandwidthSettingsExist(), equalTo(true)); assertThat( "Data node should use available disk read bandwidth", recoverySettings.getMaxBytesPerSec(), equalTo( ByteSizeValue.ofBytes(Math.round(Objects.requireNonNullElse(factor, DEFAULT_FACTOR_VALUE) * networkBandwidth.getBytes())) ) ); final ByteSizeValue diskReadBandwidth = ByteSizeValue.ofMb(randomFrom(100, 250)); recoverySettings = nodeRecoverySettings().withRoles(randomDataNodeRoles()) .withRandomMemory() .withNetworkBandwidth(ByteSizeValue.ofGb(between(1, 10))) .withDiskReadBandwidth(diskReadBandwidth) .withDiskWriteBandwidth(ByteSizeValue.ofMb(between(250, 500))) .withOperatorDefaultFactor(factor) .build(); assertThat("Node bandwidth settings should all exist", recoverySettings.nodeBandwidthSettingsExist(), equalTo(true)); assertThat( "Data node should use available disk read bandwidth", recoverySettings.getMaxBytesPerSec(), equalTo( ByteSizeValue.ofBytes(Math.round(Objects.requireNonNullElse(factor, DEFAULT_FACTOR_VALUE) * diskReadBandwidth.getBytes())) ) ); final ByteSizeValue diskWriteBandwidth = ByteSizeValue.ofMb(randomFrom(100, 250)); recoverySettings = nodeRecoverySettings().withRoles(randomDataNodeRoles()) .withRandomMemory() .withNetworkBandwidth(ByteSizeValue.ofGb(between(1, 10))) .withDiskReadBandwidth(ByteSizeValue.ofMb(between(250, 500))) .withDiskWriteBandwidth(diskWriteBandwidth) .withOperatorDefaultFactor(factor) .build(); assertThat("Node bandwidth settings should all exist", recoverySettings.nodeBandwidthSettingsExist(), equalTo(true)); assertThat( "Data node should use available disk write bandwidth", recoverySettings.getMaxBytesPerSec(), equalTo( ByteSizeValue.ofBytes(Math.round(Objects.requireNonNullElse(factor, DEFAULT_FACTOR_VALUE) * diskWriteBandwidth.getBytes())) ) ); } public void testDefaultMaxBytesPerSecOnColdOrFrozenNode() { final Set<String> dataRoles = randomFrom(Set.of("data_cold"), Set.of("data_frozen"), Set.of("data_cold", "data_frozen")); { assertThat( "Dedicated cold/frozen data nodes with <= 4GB of RAM have a default 40mb rate limit", nodeRecoverySettings().withRoles(dataRoles) .withMemory(ByteSizeValue.ofBytes(randomLongBetween(1L, ByteSizeUnit.GB.toBytes(4L)))) .build() .getMaxBytesPerSec(), equalTo(ByteSizeValue.of(40, ByteSizeUnit.MB)) ); } { assertThat( "Dedicated cold/frozen data nodes with 4GB < RAM <= 8GB have a default 60mb rate limit", nodeRecoverySettings().withRoles(dataRoles) .withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(4L) + 1L, ByteSizeUnit.GB.toBytes(8L)))) .build() .getMaxBytesPerSec(), equalTo(ByteSizeValue.of(60, ByteSizeUnit.MB)) ); } { assertThat( "Dedicated cold/frozen data nodes with 8GB < RAM <= 16GB have a default 90mb rate limit", nodeRecoverySettings().withRoles(dataRoles) .withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(8L) + 1L, ByteSizeUnit.GB.toBytes(16L)))) .build() .getMaxBytesPerSec(), equalTo(ByteSizeValue.of(90, ByteSizeUnit.MB)) ); } { assertThat( "Dedicated cold/frozen data nodes with 16GB < RAM <= 32GB have a default 90mb rate limit", nodeRecoverySettings().withRoles(dataRoles) .withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(16L) + 1L, ByteSizeUnit.GB.toBytes(32L)))) .build() .getMaxBytesPerSec(), equalTo(ByteSizeValue.of(125, ByteSizeUnit.MB)) ); } { assertThat( "Dedicated cold/frozen data nodes with RAM > 32GB have a default 250mb rate limit", nodeRecoverySettings().withRoles(dataRoles) .withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(32L) + 1L, ByteSizeUnit.TB.toBytes(4L)))) .build() .getMaxBytesPerSec(), equalTo(ByteSizeValue.of(250, ByteSizeUnit.MB)) ); } } public void testMaxBytesPerSecOnColdOrFrozenNodeWithIndicesRecoveryMaxBytesPerSec() { final ByteSizeValue random = randomByteSizeValue(); assertThat( "Dedicated cold/frozen data nodes should use the defined rate limit when set", nodeRecoverySettings().withRoles(randomFrom(Set.of("data_cold"), Set.of("data_frozen"), Set.of("data_cold", "data_frozen"))) .withMemory(ByteSizeValue.ofBytes(randomLongBetween(1L, ByteSizeUnit.TB.toBytes(4L)))) .withIndicesRecoveryMaxBytesPerSec(random) .build() .getMaxBytesPerSec(), equalTo(random) ); } public void testRecoverFromSnapshotPermitsAreNotLeakedWhenRecoverFromSnapshotIsDisabled() throws Exception { final Settings settings = Settings.builder() .put(INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.getKey(), false) .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), 1) .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 1) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); try (var mockLog = MockLog.capture(RecoverySettings.class)) { mockLog.addExpectation( new MockLog.UnseenEventExpectation("no warnings", RecoverySettings.class.getCanonicalName(), Level.WARN, "*") ); assertThat(recoverySettings.getUseSnapshotsDuringRecovery(), is(false)); for (int i = 0; i < 4; i++) { assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); } clusterSettings.applySettings(Settings.builder().put(INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.getKey(), true).build()); final var releasable = recoverySettings.tryAcquireSnapshotDownloadPermits(); assertThat(releasable, is(notNullValue())); releasable.close(); mockLog.assertAllExpectationsMatched(); } } private static ByteSizeValue randomNonZeroByteSizeValue() { return ByteSizeValue.ofBytes(randomLongBetween(1L, Long.MAX_VALUE >> 16)); } private static Set<String> randomDataNodeRoles() { final Set<String> roles = new HashSet<>(randomSubsetOf(randomIntBetween(1, 4), "data", "data_hot", "data_warm", "data_content")); roles.addAll(randomSubsetOf(Set.of("data_cold", "data_frozen"))); if (randomBoolean()) { roles.addAll( randomSubsetOf( DiscoveryNodeRole.roles() .stream() .filter(role -> role != DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE) .filter(role -> role.canContainData() == false) .map(DiscoveryNodeRole::roleName) .collect(Collectors.toSet()) ) ); } return roles; } private static NodeRecoverySettings nodeRecoverySettings() { return new NodeRecoverySettings(); } private static
RecoverySettingsTests
java
junit-team__junit5
jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedClassIntegrationTests.java
{ "start": 22188, "end": 25231 }
class ____ { @Test void supportsMultipleAggregatorFields() { var results = executeTestsForClass(MultiAggregatorFieldInjectionTestCase.class); results.allEvents().assertStatistics(stats -> stats.started(6).succeeded(6)); } @Test void supportsInjectionOfInheritedFields() { var results = executeTestsForClass(InheritedHiddenParameterFieldTestCase.class); results.allEvents().assertStatistics(stats -> stats.started(6).succeeded(6)); assertThat(allReportEntries(results)) // .extracting(it -> tuple(it.get("super.value"), it.get("this.value"))) // .containsExactly(tuple("foo", "1"), tuple("bar", "2")); } @Test void doesNotSupportInjectionForFinalFields() { var classTemplateClass = InvalidFinalFieldTestCase.class; var results = executeTestsForClass(classTemplateClass); results.allEvents().assertThatEvents() // .haveExactly(1, finishedWithFailure(message( "Configuration error: @Parameter field [final int %s.i] must not be declared as final.".formatted( classTemplateClass.getName())))); } @Test void aggregatorFieldsMustNotDeclareIndex() { var classTemplateClass = InvalidAggregatorFieldWithIndexTestCase.class; var results = executeTestsForClass(classTemplateClass); results.allEvents().assertThatEvents() // .haveExactly(1, finishedWithFailure(message( "Configuration error: no index may be declared in @Parameter(0) annotation on aggregator field [%s %s.accessor].".formatted( ArgumentsAccessor.class.getName(), classTemplateClass.getName())))); } @Test void declaredIndexMustNotBeNegative() { var classTemplateClass = InvalidParameterIndexTestCase.class; var results = executeTestsForClass(classTemplateClass); results.allEvents().assertThatEvents() // .haveExactly(1, finishedWithFailure(message( "Configuration error: index must be greater than or equal to zero in @Parameter(-42) annotation on field [int %s.i].".formatted( classTemplateClass.getName())))); } @Test void declaredIndexMustBeUnique() { var classTemplateClass = InvalidDuplicateParameterDeclarationTestCase.class; var results = executeTestsForClass(classTemplateClass); results.allEvents().assertThatEvents() // .haveExactly(1, finishedWithFailure(message( "Configuration error: duplicate index declared in @Parameter(0) annotation on fields [int %s.i, long %s.l].".formatted( classTemplateClass.getName(), classTemplateClass.getName())))); } @Test void failsWithMeaningfulErrorWhenTooFewArgumentsProvidedForFieldInjection() { var results = executeTestsForClass(NotEnoughArgumentsForFieldsTestCase.class); results.containerEvents().assertThatEvents() // .haveExactly(1, finishedWithFailure(message(withPlatformSpecificLineSeparator( """ Configuration error: @ParameterizedClass has 2 required parameters (due to field injection) but there was 1 argument provided. Note: the provided arguments were [1]""")))); } } @Nested
FieldInjection
java
spring-projects__spring-security
oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/web/InvalidClientRegistrationIdException.java
{ "start": 761, "end": 971 }
class ____ extends IllegalArgumentException { /** * @param message the exception message */ InvalidClientRegistrationIdException(String message) { super(message); } }
InvalidClientRegistrationIdException
java
dropwizard__dropwizard
dropwizard-views-mustache/src/test/java/io/dropwizard/views/mustache/ErrorView.java
{ "start": 87, "end": 196 }
class ____ extends View { protected ErrorView() { super("/example-error.mustache"); } }
ErrorView
java
apache__flink
flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/CEPITCase.java
{ "start": 3042, "end": 37108 }
class ____ extends AbstractTestBaseJUnit4 { @Parameterized.Parameter public Configuration envConfiguration; @Parameterized.Parameters public static Collection<Configuration> prepareSharedBufferCacheConfig() { Configuration miniCacheConfig = new Configuration(); miniCacheConfig.set(CEPCacheOptions.CEP_CACHE_STATISTICS_INTERVAL, Duration.ofSeconds(1)); miniCacheConfig.set(CEPCacheOptions.CEP_SHARED_BUFFER_ENTRY_CACHE_SLOTS, 1); miniCacheConfig.set(CEPCacheOptions.CEP_SHARED_BUFFER_EVENT_CACHE_SLOTS, 1); Configuration bigCacheConfig = new Configuration(); miniCacheConfig.set(CEPCacheOptions.CEP_CACHE_STATISTICS_INTERVAL, Duration.ofSeconds(1)); return Arrays.asList(miniCacheConfig, bigCacheConfig); } /** * Checks that a certain event sequence is recognized. * * @throws Exception */ @Test public void testSimplePatternCEP() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); DataStream<Event> input = env.fromData( new Event(1, "barfoo", 1.0), new Event(2, "start", 2.0), new Event(3, "foobar", 3.0), new SubEvent(4, "foo", 4.0, 1.0), new Event(5, "middle", 5.0), new SubEvent(6, "middle", 6.0, 2.0), new SubEvent(7, "bar", 3.0, 3.0), new Event(42, "42", 42.0), new Event(8, "end", 1.0)); Pattern<Event, ?> pattern = Pattern.<Event>begin("start") .where(SimpleCondition.of(value -> value.getName().equals("start"))) .followedByAny("middle") .subtype(SubEvent.class) .where(SimpleCondition.of(value -> value.getName().equals("middle"))) .followedByAny("end") .where(SimpleCondition.of(value -> value.getName().equals("end"))); DataStream<String> result = CEP.pattern(input, pattern) .inProcessingTime() .flatSelect( (p, o) -> { StringBuilder builder = new StringBuilder(); builder.append(p.get("start").get(0).getId()) .append(",") .append(p.get("middle").get(0).getId()) .append(",") .append(p.get("end").get(0).getId()); o.collect(builder.toString()); }, Types.STRING); List<String> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); assertEquals(Arrays.asList("2,6,8"), resultList); } @Test public void testSimpleKeyedPatternCEP() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); env.setParallelism(2); DataStream<Event> input = env.fromData( new Event(1, "barfoo", 1.0), new Event(2, "start", 2.0), new Event(3, "start", 2.1), new Event(3, "foobar", 3.0), new SubEvent(4, "foo", 4.0, 1.0), new SubEvent(3, "middle", 3.2, 1.0), new Event(42, "start", 3.1), new SubEvent(42, "middle", 3.3, 1.2), new Event(5, "middle", 5.0), new SubEvent(2, "middle", 6.0, 2.0), new SubEvent(7, "bar", 3.0, 3.0), new Event(42, "42", 42.0), new Event(3, "end", 2.0), new Event(2, "end", 1.0), new Event(42, "end", 42.0)) .keyBy( new KeySelector<Event, Integer>() { @Override public Integer getKey(Event value) throws Exception { return value.getId(); } }); Pattern<Event, ?> pattern = Pattern.<Event>begin("start") .where(SimpleCondition.of(value -> value.getName().equals("start"))) .followedByAny("middle") .subtype(SubEvent.class) .where(SimpleCondition.of(value -> value.getName().equals("middle"))) .followedByAny("end") .where(SimpleCondition.of(value -> value.getName().equals("end"))); DataStream<String> result = CEP.pattern(input, pattern) .inProcessingTime() .select( p -> { StringBuilder builder = new StringBuilder(); builder.append(p.get("start").get(0).getId()) .append(",") .append(p.get("middle").get(0).getId()) .append(",") .append(p.get("end").get(0).getId()); return builder.toString(); }); List<String> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); resultList.sort(String::compareTo); assertEquals(Arrays.asList("2,2,2", "3,3,3", "42,42,42"), resultList); } @Test public void testSimplePatternEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); // (Event, timestamp) DataStream<Event> input = env.fromData( Tuple2.of(new Event(1, "start", 1.0), 5L), Tuple2.of(new Event(2, "middle", 2.0), 1L), Tuple2.of(new Event(3, "end", 3.0), 3L), Tuple2.of(new Event(4, "end", 4.0), 10L), Tuple2.of(new Event(5, "middle", 5.0), 7L), // last element for high final watermark Tuple2.of(new Event(5, "middle", 5.0), 100L)) .assignTimestampsAndWatermarks( new WatermarkStrategyWithPunctuatedWatermarks< Tuple2<Event, Long>>() { @Override public long extractTimestamp( Tuple2<Event, Long> element, long previousTimestamp) { return element.f1; } @Override public Watermark checkAndGetNextWatermark( Tuple2<Event, Long> lastElement, long extractedTimestamp) { return new Watermark(lastElement.f1 - 5); } }) .map( new MapFunction<Tuple2<Event, Long>, Event>() { @Override public Event map(Tuple2<Event, Long> value) throws Exception { return value.f0; } }); Pattern<Event, ?> pattern = Pattern.<Event>begin("start") .where(SimpleCondition.of(value -> value.getName().equals("start"))) .followedByAny("middle") .where(SimpleCondition.of(value -> value.getName().equals("middle"))) .followedByAny("end") .where(SimpleCondition.of(value -> value.getName().equals("end"))); DataStream<String> result = CEP.pattern(input, pattern) .select( new PatternSelectFunction<Event, String>() { @Override public String select(Map<String, List<Event>> pattern) { StringBuilder builder = new StringBuilder(); builder.append(pattern.get("start").get(0).getId()) .append(",") .append(pattern.get("middle").get(0).getId()) .append(",") .append(pattern.get("end").get(0).getId()); return builder.toString(); } }); List<String> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); resultList.sort(String::compareTo); assertEquals(Arrays.asList("1,5,4"), resultList); } @Test public void testSimpleKeyedPatternEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); env.setParallelism(2); // (Event, timestamp) DataStream<Event> input = env.fromData( Tuple2.of(new Event(1, "start", 1.0), 5L), Tuple2.of(new Event(1, "middle", 2.0), 1L), Tuple2.of(new Event(2, "middle", 2.0), 4L), Tuple2.of(new Event(2, "start", 2.0), 3L), Tuple2.of(new Event(1, "end", 3.0), 3L), Tuple2.of(new Event(3, "start", 4.1), 5L), Tuple2.of(new Event(1, "end", 4.0), 10L), Tuple2.of(new Event(2, "end", 2.0), 8L), Tuple2.of(new Event(1, "middle", 5.0), 7L), Tuple2.of(new Event(3, "middle", 6.0), 9L), Tuple2.of(new Event(3, "end", 7.0), 7L)) .assignTimestampsAndWatermarks( new WatermarkStrategyWithPunctuatedWatermarks< Tuple2<Event, Long>>() { @Override public long extractTimestamp( Tuple2<Event, Long> element, long currentTimestamp) { return element.f1; } @Override public Watermark checkAndGetNextWatermark( Tuple2<Event, Long> lastElement, long extractedTimestamp) { return new Watermark(lastElement.f1 - 5); } }) .map( new MapFunction<Tuple2<Event, Long>, Event>() { @Override public Event map(Tuple2<Event, Long> value) throws Exception { return value.f0; } }) .keyBy( new KeySelector<Event, Integer>() { @Override public Integer getKey(Event value) throws Exception { return value.getId(); } }); Pattern<Event, ?> pattern = Pattern.<Event>begin("start") .where(SimpleCondition.of(value -> value.getName().equals("start"))) .followedByAny("middle") .where(SimpleCondition.of(value -> value.getName().equals("middle"))) .followedByAny("end") .where(SimpleCondition.of(value -> value.getName().equals("end"))); DataStream<String> result = CEP.pattern(input, pattern) .select( new PatternSelectFunction<Event, String>() { @Override public String select(Map<String, List<Event>> pattern) { StringBuilder builder = new StringBuilder(); builder.append(pattern.get("start").get(0).getId()) .append(",") .append(pattern.get("middle").get(0).getId()) .append(",") .append(pattern.get("end").get(0).getId()); return builder.toString(); } }); List<String> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); resultList.sort(String::compareTo); assertEquals(Arrays.asList("1,1,1", "2,2,2"), resultList); } @Test public void testSimplePatternWithSingleState() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); DataStream<Tuple2<Integer, Integer>> input = env.fromData(new Tuple2<>(0, 1), new Tuple2<>(0, 2)); Pattern<Tuple2<Integer, Integer>, ?> pattern = Pattern.<Tuple2<Integer, Integer>>begin("start") .where(SimpleCondition.of(rec -> rec.f1 == 1)); PatternStream<Tuple2<Integer, Integer>> pStream = CEP.pattern(input, pattern).inProcessingTime(); DataStream<Tuple2<Integer, Integer>> result = pStream.select( new PatternSelectFunction< Tuple2<Integer, Integer>, Tuple2<Integer, Integer>>() { @Override public Tuple2<Integer, Integer> select( Map<String, List<Tuple2<Integer, Integer>>> pattern) throws Exception { return pattern.get("start").get(0); } }); List<Tuple2<Integer, Integer>> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); assertEquals(Arrays.asList(new Tuple2<>(0, 1)), resultList); } @Test public void testProcessingTimeWithinBetweenFirstAndLast() throws Exception { testProcessingTimeWithWindow(WithinType.FIRST_AND_LAST); } @Test public void testProcessingTimeWithinPreviousAndCurrent() throws Exception { testProcessingTimeWithWindow(WithinType.PREVIOUS_AND_CURRENT); } private void testProcessingTimeWithWindow(WithinType withinType) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); env.setParallelism(1); DataStream<Integer> input = env.fromData(1, 2); Pattern<Integer, ?> pattern = Pattern.<Integer>begin("start") .followedByAny("end") .within(Duration.ofDays(1), withinType); DataStream<Integer> result = CEP.pattern(input, pattern) .inProcessingTime() .select( new PatternSelectFunction<Integer, Integer>() { @Override public Integer select(Map<String, List<Integer>> pattern) throws Exception { return pattern.get("start").get(0) + pattern.get("end").get(0); } }); List<Integer> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); assertEquals(Arrays.asList(3), resultList); } @Test public void testTimeoutHandlingWithinFirstAndLast() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); env.setParallelism(1); // (Event, timestamp) DataStream<Event> input = env.fromData( Tuple2.of(new Event(1, "start", 1.0), 1L), Tuple2.of(new Event(1, "middle", 2.0), 5L), Tuple2.of(new Event(1, "start", 2.0), 4L), Tuple2.of(new Event(1, "end", 2.0), 6L)) .assignTimestampsAndWatermarks( new WatermarkStrategyWithPunctuatedWatermarks< Tuple2<Event, Long>>() { @Override public long extractTimestamp( Tuple2<Event, Long> element, long currentTimestamp) { return element.f1; } @Override public Watermark checkAndGetNextWatermark( Tuple2<Event, Long> lastElement, long extractedTimestamp) { return new Watermark(lastElement.f1 - 5); } }) .map( new MapFunction<Tuple2<Event, Long>, Event>() { @Override public Event map(Tuple2<Event, Long> value) throws Exception { return value.f0; } }); Pattern<Event, ?> pattern = Pattern.<Event>begin("start") .where(SimpleCondition.of(value -> value.getName().equals("start"))) .followedByAny("middle") .where(SimpleCondition.of(value -> value.getName().equals("middle"))) .followedByAny("end") .where(SimpleCondition.of(value -> value.getName().equals("end"))) .within(Duration.ofMillis(3)); OutputTag<String> outputTag = new OutputTag<String>("side-output") {}; SingleOutputStreamOperator<String> result = CEP.pattern(input, pattern) .select( outputTag, new PatternTimeoutFunction<Event, String>() { @Override public String timeout( Map<String, List<Event>> pattern, long timeoutTimestamp) throws Exception { return pattern.get("start").get(0).getPrice() + ""; } }, new PatternSelectFunction<Event, String>() { @Override public String select(Map<String, List<Event>> pattern) { StringBuilder builder = new StringBuilder(); builder.append(pattern.get("start").get(0).getPrice()) .append(",") .append(pattern.get("middle").get(0).getPrice()) .append(",") .append(pattern.get("end").get(0).getPrice()); return builder.toString(); } }); List<String> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); resultList.sort(Comparator.comparing(Object::toString)); List<String> timeoutList = new ArrayList<>(); result.getSideOutput(outputTag).executeAndCollect().forEachRemaining(timeoutList::add); timeoutList.sort(Comparator.comparing(Object::toString)); List<String> timeoutExpected = Arrays.asList("1.0", "2.0", "2.0"); List<String> resultExpected = Arrays.asList("2.0,2.0,2.0"); assertEquals(timeoutExpected, timeoutList); assertEquals(resultExpected, resultList); } @Test public void testTimeoutHandlingWithinPreviousAndCurrent() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); env.setParallelism(1); // (Event, timestamp) DataStream<Event> input = env.fromData( Tuple2.of(new Event(1, "start", 1.0), 1L), Tuple2.of(new Event(1, "middle", 2.0), 5L), Tuple2.of(new Event(1, "start", 2.0), 4L), Tuple2.of(new Event(1, "end", 2.0), 6L)) .assignTimestampsAndWatermarks( new WatermarkStrategyWithPunctuatedWatermarks< Tuple2<Event, Long>>() { @Override public long extractTimestamp( Tuple2<Event, Long> element, long currentTimestamp) { return element.f1; } @Override public Watermark checkAndGetNextWatermark( Tuple2<Event, Long> lastElement, long extractedTimestamp) { return new Watermark(lastElement.f1 - 5); } }) .map( new MapFunction<Tuple2<Event, Long>, Event>() { @Override public Event map(Tuple2<Event, Long> value) throws Exception { return value.f0; } }); Pattern<Event, ?> pattern = Pattern.<Event>begin("start") .where(SimpleCondition.of(value -> value.getName().equals("start"))) .followedByAny("middle") .where(SimpleCondition.of(value -> value.getName().equals("middle"))) .followedByAny("end") .where(SimpleCondition.of(value -> value.getName().equals("end"))) .within(Duration.ofMillis(3), WithinType.PREVIOUS_AND_CURRENT); OutputTag<String> outputTag = new OutputTag<String>("side-output") {}; SingleOutputStreamOperator<String> result = CEP.pattern(input, pattern) .select( outputTag, new PatternTimeoutFunction<Event, String>() { @Override public String timeout( Map<String, List<Event>> pattern, long timeoutTimestamp) throws Exception { return pattern.get("start").get(0).getPrice() + ""; } }, new PatternSelectFunction<Event, String>() { @Override public String select(Map<String, List<Event>> pattern) { StringBuilder builder = new StringBuilder(); builder.append(pattern.get("start").get(0).getPrice()) .append(",") .append(pattern.get("middle").get(0).getPrice()) .append(",") .append(pattern.get("end").get(0).getPrice()); return builder.toString(); } }); List<String> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); resultList.sort(Comparator.comparing(Object::toString)); List<String> timeoutList = new ArrayList<>(); result.getSideOutput(outputTag).executeAndCollect().forEachRemaining(timeoutList::add); timeoutList.sort(Comparator.comparing(Object::toString)); List<String> timeoutExpected = Arrays.asList("1.0", "2.0"); List<String> resultExpected = Arrays.asList("1.0,2.0,2.0", "2.0,2.0,2.0"); assertEquals(timeoutExpected, timeoutList); assertEquals(resultExpected, resultList); } /** * Checks that a certain event sequence is recognized with an OR filter. * * @throws Exception */ @Test public void testSimpleOrFilterPatternCEP() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); DataStream<Event> input = env.fromData( new Event(1, "start", 1.0), new Event(2, "middle", 2.0), new Event(3, "end", 3.0), new Event(4, "start", 4.0), new Event(5, "middle", 5.0), new Event(6, "end", 6.0)); Pattern<Event, ?> pattern = Pattern.<Event>begin("start") .where(SimpleCondition.of(value -> value.getName().equals("start"))) .followedByAny("middle") .where(SimpleCondition.of(value -> value.getPrice() == 2.0)) .or(SimpleCondition.of(value -> value.getPrice() == 5.0)) .followedByAny("end") .where(SimpleCondition.of(value -> value.getName().equals("end"))); DataStream<String> result = CEP.pattern(input, pattern) .inProcessingTime() .select( new PatternSelectFunction<Event, String>() { @Override public String select(Map<String, List<Event>> pattern) { StringBuilder builder = new StringBuilder(); builder.append(pattern.get("start").get(0).getId()) .append(",") .append(pattern.get("middle").get(0).getId()) .append(",") .append(pattern.get("end").get(0).getId()); return builder.toString(); } }); List<String> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); List<String> expected = Arrays.asList("1,5,6", "1,2,3", "4,5,6", "1,2,6"); expected.sort(String::compareTo); resultList.sort(String::compareTo); assertEquals(expected, resultList); } /** * Checks that a certain event sequence is recognized. * * @throws Exception */ @Test public void testSimplePatternEventTimeWithComparator() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(envConfiguration); // (Event, timestamp) DataStream<Event> input = env.fromData( Tuple2.of(new Event(1, "start", 1.0), 5L), Tuple2.of(new Event(2, "middle", 2.0), 1L), Tuple2.of(new Event(3, "end", 3.0), 3L), Tuple2.of(new Event(4, "end", 4.0), 10L), Tuple2.of(new Event(5, "middle", 6.0), 7L), Tuple2.of(new Event(6, "middle", 5.0), 7L), // last element for high final watermark Tuple2.of(new Event(7, "middle", 5.0), 100L)) .assignTimestampsAndWatermarks( new WatermarkStrategyWithPunctuatedWatermarks< Tuple2<Event, Long>>() { @Override public long extractTimestamp( Tuple2<Event, Long> element, long previousTimestamp) { return element.f1; } @Override public Watermark checkAndGetNextWatermark( Tuple2<Event, Long> lastElement, long extractedTimestamp) { return new Watermark(lastElement.f1 - 5); } }) .map( new MapFunction<Tuple2<Event, Long>, Event>() { @Override public Event map(Tuple2<Event, Long> value) throws Exception { return value.f0; } }); EventComparator<Event> comparator = new CustomEventComparator(); Pattern<Event, ? extends Event> pattern = Pattern.<Event>begin("start") .where(SimpleCondition.of(value -> value.getName().equals("start"))) .followedByAny("middle") .where(SimpleCondition.of(value -> value.getName().equals("middle"))) .followedByAny("end") .where(SimpleCondition.of(value -> value.getName().equals("end"))); DataStream<String> result = CEP.pattern(input, pattern, comparator) .select( new PatternSelectFunction<Event, String>() { @Override public String select(Map<String, List<Event>> pattern) { StringBuilder builder = new StringBuilder(); builder.append(pattern.get("start").get(0).getId()) .append(",") .append(pattern.get("middle").get(0).getId()) .append(",") .append(pattern.get("end").get(0).getId()); return builder.toString(); } }); List<String> resultList = new ArrayList<>(); result.executeAndCollect().forEachRemaining(resultList::add); List<String> expected = Arrays.asList("1,6,4", "1,5,4"); expected.sort(String::compareTo); resultList.sort(String::compareTo); assertEquals(expected, resultList); } private static
CEPITCase