language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/ParallelMergeOrdered.java | {
"start": 1132,
"end": 2343
} | class ____<T> extends Flux<T> implements Scannable {
final ParallelFlux<? extends T> source;
final int prefetch;
final Comparator<? super T> valueComparator;
ParallelMergeOrdered(ParallelFlux<? extends T> source, int prefetch,
Comparator<? super T> valueComparator) {
if (prefetch <= 0) {
throw new IllegalArgumentException("prefetch > 0 required but it was " + prefetch);
}
this.source = ParallelFlux.from(source);
this.prefetch = prefetch;
this.valueComparator = valueComparator;
}
@Override
public int getPrefetch() {
return prefetch;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return source;
if (key == Attr.PREFETCH) return prefetch;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
if (key == InternalProducerAttr.INSTANCE) return true;
return null;
}
@Override
public void subscribe(CoreSubscriber<? super T> actual) {
FluxMergeComparing.MergeOrderedMainProducer<T>
main = new FluxMergeComparing.MergeOrderedMainProducer<>(actual, valueComparator, prefetch, source.parallelism(), true, true);
actual.onSubscribe(main);
source.subscribe(main.subscribers);
}
}
| ParallelMergeOrdered |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ImmutableMemberCollectionTest.java | {
"start": 9207,
"end": 9796
} | class ____ {
private final Set<String> mySet = new HashSet<>();
public void addString(String x) {
this.mySet.add(x);
}
}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void setInNestedClassMutationInParent_doesNothing() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import com.google.common.collect.ImmutableSet;
import java.util.Set;
import java.util.HashSet;
| Builder |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/CommandFactoryResolver.java | {
"start": 139,
"end": 579
} | interface ____ {
/**
* Resolve a {@link CommandFactory} given a{@link DeclaredCommandMethod} and {@link RedisCommandsMetadata}.
*
* @param method must not be {@code null}.
* @param redisCommandsMetadata must not be {@code null}.
* @return the {@link CommandFactory}.
*/
CommandFactory resolveRedisCommandFactory(CommandMethod method, RedisCommandsMetadata redisCommandsMetadata);
}
| CommandFactoryResolver |
java | apache__camel | components/camel-resilience4j/src/test/java/org/apache/camel/component/resilience4j/ResilienceRecordIgnoreExceptionTest.java | {
"start": 1169,
"end": 4637
} | class ____ extends CamelTestSupport {
@Test
public void testHello() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_SUCCESSFUL_EXECUTION, true);
template.sendBody("direct:start", "Hello World");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testFile() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("file");
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_SUCCESSFUL_EXECUTION, false);
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_SHORT_CIRCUITED, false);
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_FROM_FALLBACK, false);
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_IGNORED, true);
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_FROM_FALLBACK, false);
template.sendBody("direct:start", "file");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testKaboom() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("kaboom");
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_SUCCESSFUL_EXECUTION, true);
template.sendBody("direct:start", "kaboom");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testIo() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Fallback message");
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_SUCCESSFUL_EXECUTION, false);
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_SHORT_CIRCUITED, true);
getMockEndpoint("mock:result").expectedPropertyReceived(CircuitBreakerConstants.RESPONSE_FROM_FALLBACK, true);
template.sendBody("direct:start", "io");
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.to("log:start")
.circuitBreaker().resilience4jConfiguration().recordException(IOException.class).ignoreException(FileNotFoundException.class).end()
.process(e -> {
String b = e.getMessage().getBody(String.class);
if ("kaboom".equals(b)) {
throw new NullPointerException();
} else if ("file".equals(b)) {
throw new FileNotFoundException("unknown.txt");
} else if ("io".equals(b)) {
throw new IOException("Host not found");
}
})
.onFallback()
.transform().constant("Fallback message")
.end()
.to("log:result")
.to("mock:result");
}
};
}
}
| ResilienceRecordIgnoreExceptionTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/coordination/PublishWithJoinResponse.java | {
"start": 872,
"end": 2523
} | class ____ extends TransportResponse {
private final PublishResponse publishResponse;
private final Optional<Join> optionalJoin;
public PublishWithJoinResponse(PublishResponse publishResponse, Optional<Join> optionalJoin) {
this.publishResponse = publishResponse;
this.optionalJoin = optionalJoin;
}
public PublishWithJoinResponse(StreamInput in) throws IOException {
this.publishResponse = new PublishResponse(in);
this.optionalJoin = Optional.ofNullable(in.readOptionalWriteable(Join::new));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
publishResponse.writeTo(out);
out.writeOptionalWriteable(optionalJoin.orElse(null));
}
public PublishResponse getPublishResponse() {
return publishResponse;
}
public Optional<Join> getJoin() {
return optionalJoin;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if ((o instanceof PublishWithJoinResponse) == false) return false;
PublishWithJoinResponse that = (PublishWithJoinResponse) o;
if (publishResponse.equals(that.publishResponse) == false) return false;
return optionalJoin.equals(that.optionalJoin);
}
@Override
public int hashCode() {
int result = publishResponse.hashCode();
result = 31 * result + optionalJoin.hashCode();
return result;
}
@Override
public String toString() {
return "PublishWithJoinResponse{" + "publishResponse=" + publishResponse + ", optionalJoin=" + optionalJoin + '}';
}
}
| PublishWithJoinResponse |
java | google__auto | value/src/main/java/com/google/auto/value/processor/BuilderMethodClassifier.java | {
"start": 5891,
"end": 8165
} | class ____ {@code foo()} or {@code getFoo()} then
* the name of the property is {@code foo}, If the builder also has a method of the same name
* ({@code foo()} or {@code getFoo()}) then the set returned here will contain {@code foo}.
*/
ImmutableMap<String, BuilderSpec.PropertyGetter> builderGetters() {
return ImmutableMap.copyOf(builderGetters);
}
/**
* Returns the methods that were identified as {@code build()} methods. These are methods that
* have no parameters and return the {@code @AutoValue} type, conventionally called {@code
* build()}.
*/
Set<ExecutableElement> buildMethods() {
return ImmutableSet.copyOf(buildMethods);
}
/** Classifies the given methods and sets the state of this object based on what is found. */
boolean classifyMethods(Iterable<ExecutableElement> methods, boolean autoValueHasToBuilder) {
int startErrorCount = errorReporter.errorCount();
for (ExecutableElement method : methods) {
classifyMethod(method);
}
if (errorReporter.errorCount() > startErrorCount) {
return false;
}
Multimap<String, PropertySetter> propertyNameToSetter;
if (propertyNameToPrefixedSetters.isEmpty()) {
propertyNameToSetter = propertyNameToUnprefixedSetters;
this.settersPrefixed = false;
} else if (propertyNameToUnprefixedSetters.isEmpty()) {
propertyNameToSetter = propertyNameToPrefixedSetters;
this.settersPrefixed = true;
} else {
errorReporter.reportError(
propertyNameToUnprefixedSetters.values().iterator().next().getSetter(),
"[%sSetNotSet] If any setter methods use the setFoo convention then all must",
autoWhat());
return false;
}
for (String property : rewrittenPropertyTypes.keySet()) {
TypeMirror propertyType = rewrittenPropertyTypes.get(property).getType();
boolean hasSetter = propertyNameToSetter.containsKey(property);
PropertyBuilder propertyBuilder = propertyNameToPropertyBuilder.get(property);
boolean hasBuilder = propertyBuilder != null;
if (hasBuilder) {
// If property bar of type Bar has a barBuilder() that returns BarBuilder, then it must
// be possible to make a BarBuilder from a Bar if either (1) the @AutoValue | called |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/services/ToolchainsBuilderException.java | {
"start": 924,
"end": 1014
} | class ____ by the {@link ToolchainsBuilder}.
*
* @since 4.0.0
*/
@Experimental
public | throw |
java | elastic__elasticsearch | x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java | {
"start": 5387,
"end": 8085
} | class ____, instead usually calling the utility methods on {@link TypeResolutions}.
* </p>
* <p>
* Implementations should fail if {@link #childrenResolved()} returns {@code false}.
* </p>
*/
protected TypeResolution resolveType() {
return TypeResolution.TYPE_RESOLVED;
}
public final Expression canonical() {
if (lazyCanonical == null) {
lazyCanonical = canonicalize();
}
return lazyCanonical;
}
protected Expression canonicalize() {
if (children().isEmpty()) {
return this;
}
List<Expression> canonicalChildren = Expressions.canonicalize(children());
// check if replacement is really needed
if (children().equals(canonicalChildren)) {
return this;
}
return replaceChildrenSameSize(canonicalChildren);
}
/**
* Whether this expression means the same as {@code other}, even if they are not exactly equal.
* For example, {@code a + b} and {@code b + a} are not equal, but they are semantically equal.
* <p>
* If two expressions are equal, they are also semantically equal, but the reverse is generally not true.
* <p>
* Caution! {@link Attribute#semanticEquals(Expression)} is especially lenient, as it considers two attributes
* with the same {@link NameId} to be semantically equal, even if they have different data types or are represented using different
* classes.
* <p>
* But this doesn't extend to expressions containing attributes as children, which is pretty inconsistent.
* We have to revisit this before using {@link #semanticEquals} in more places.
*/
public boolean semanticEquals(Expression other) {
return canonical().equals(other.canonical());
}
/**
* A hash code that is consistent with {@link #semanticEquals}.
*/
public int semanticHash() {
return canonical().hashCode();
}
@Override
public boolean resolved() {
return childrenResolved() && typeResolved().resolved();
}
/**
* The {@link DataType} returned by executing the tree rooted at this
* expression. If {@link #typeResolved()} returns an error then the behavior
* of this method is undefined. It <strong>may</strong> return a valid
* type. Or it may throw an exception. Or it may return a totally nonsensical
* type.
*/
public abstract DataType dataType();
@Override
public String toString() {
return sourceText();
}
@Override
public String propertiesToString(boolean skipIfChild) {
return super.propertiesToString(false);
}
}
| directly |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/threadlocal/NamedInternalThreadFactoryTest.java | {
"start": 925,
"end": 1290
} | class ____ {
@Test
void newThread() throws Exception {
NamedInternalThreadFactory namedInternalThreadFactory = new NamedInternalThreadFactory();
Thread t = namedInternalThreadFactory.newThread(() -> {});
Assertions.assertEquals(t.getClass(), InternalThread.class, "thread is not InternalThread");
}
}
| NamedInternalThreadFactoryTest |
java | apache__avro | doc/examples/mr-example/src/main/java/example/MapredColorCount.java | {
"start": 1197,
"end": 1723
} | class ____ extends AvroMapper<User, Pair<CharSequence, Integer>> {
@Override
public void map(User user, AvroCollector<Pair<CharSequence, Integer>> collector, Reporter reporter)
throws IOException {
CharSequence color = user.getFavoriteColor();
// We need this check because the User.favorite_color field has type ["string", "null"]
if (color == null) {
color = "none";
}
collector.collect(new Pair<CharSequence, Integer>(color, 1));
}
}
public static | ColorCountMapper |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/disposables/SequentialDisposable.java | {
"start": 917,
"end": 1018
} | class ____ AtomicReference directly so watch out for the API leak!
* @since 2.0
*/
public final | extends |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/resulttype/ResultTypeWithConstructorConstructingFruitInterfaceMapper.java | {
"start": 352,
"end": 671
} | interface ____ {
ResultTypeWithConstructorConstructingFruitInterfaceMapper INSTANCE = Mappers.getMapper(
ResultTypeWithConstructorConstructingFruitInterfaceMapper.class );
@BeanMapping(resultType = Citrus.class)
IsFruit map(FruitDto source);
}
| ResultTypeWithConstructorConstructingFruitInterfaceMapper |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotationsScannerTests.java | {
"start": 24739,
"end": 24828
} | interface ____ {
@TestAnnotation6
void method();
}
static | HierarchyInterfaceInterface |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIntEvaluator.java | {
"start": 3921,
"end": 4539
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory integer;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory integer) {
this.source = source;
this.integer = integer;
}
@Override
public ToStringFromIntEvaluator get(DriverContext context) {
return new ToStringFromIntEvaluator(source, integer.get(context), context);
}
@Override
public String toString() {
return "ToStringFromIntEvaluator[" + "integer=" + integer + "]";
}
}
}
| Factory |
java | netty__netty | common/src/test/java/io/netty/util/internal/TypeParameterMatcherTest.java | {
"start": 3520,
"end": 3811
} | class ____ extends C { }
@Test
public void testInaccessibleClass() throws Exception {
TypeParameterMatcher m = TypeParameterMatcher.find(new U<T>() { }, U.class, "E");
assertFalse(m.match(new Object()));
assertTrue(m.match(new T()));
}
private static | CC |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/provisioning/JdbcUserDetailsManager.java | {
"start": 3022,
"end": 3344
} | class ____
* differentiate between authorities which were loaded for an individual or for a group of
* which the individual is a member, it's important that you take this into account when
* using this implementation for managing your users.
*
* @author Luke Taylor
* @author Junhyeok Lee
* @since 2.0
*/
public | cannot |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/SafeClusterStateSupplier.java | {
"start": 1052,
"end": 1905
} | class ____ implements ClusterStateSupplier, ClusterStateListener {
private volatile ClusterState currentClusterState;
@Override
public void clusterChanged(ClusterChangedEvent event) {
// In this default implementation, "ready" is really "is cluster state available", which after the initial recovery it should be.
// If you need a different condition, feel free to add a different implementation of ClusterStateSupplier
if (isInitialized() || event.state().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) {
currentClusterState = event.state();
}
}
private boolean isInitialized() {
return currentClusterState != null;
}
@Override
public Optional<ClusterState> get() {
return Optional.ofNullable(currentClusterState);
}
}
| SafeClusterStateSupplier |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/LeftOuterJoinTaskTest.java | {
"start": 1635,
"end": 10088
} | class ____ extends AbstractOuterJoinTaskTest {
private static final long HASH_MEM = 6 * 1024 * 1024;
private final double hash_frac;
LeftOuterJoinTaskTest(ExecutionConfig config) {
super(config);
hash_frac = (double) HASH_MEM / this.getMemoryManager().getMemorySize();
}
@Override
protected int calculateExpectedCount(int keyCnt1, int valCnt1, int keyCnt2, int valCnt2) {
return valCnt1 * valCnt2 * Math.min(keyCnt1, keyCnt2)
+ (keyCnt1 > keyCnt2 ? (keyCnt1 - keyCnt2) * valCnt1 : 0);
}
@Override
protected DriverStrategy getSortDriverStrategy() {
return DriverStrategy.LEFT_OUTER_MERGE;
}
@Override
protected AbstractOuterJoinDriver<
Tuple2<Integer, Integer>, Tuple2<Integer, Integer>, Tuple2<Integer, Integer>>
getOuterJoinDriver() {
return new LeftOuterJoinDriver<>();
}
@TestTemplate
void testHash1LeftOuterJoinTask() throws Exception {
final int keyCnt1 = 20;
final int valCnt1 = 1;
final int keyCnt2 = 10;
final int valCnt2 = 2;
testHashLeftOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testHash2LeftOuterJoinTask() throws Exception {
final int keyCnt1 = 20;
final int valCnt1 = 1;
final int keyCnt2 = 20;
final int valCnt2 = 1;
testHashLeftOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testHash3LeftOuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 1;
int keyCnt2 = 20;
int valCnt2 = 20;
testHashLeftOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testHash4LeftOuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 1;
testHashLeftOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testHash5LeftOuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 20;
testHashLeftOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testHash6LeftOuterJoinTask() throws Exception {
int keyCnt1 = 10;
int valCnt1 = 1;
int keyCnt2 = 20;
int valCnt2 = 2;
testHashLeftOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
private void testHashLeftOuterJoinTask(int keyCnt1, int valCnt1, int keyCnt2, int valCnt2)
throws Exception {
setOutput(this.outList, this.serializer);
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(DriverStrategy.LEFT_HYBRIDHASH_BUILD_SECOND);
getTaskConfig().setRelativeMemoryDriver(hash_frac);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new UniformIntTupleGenerator(keyCnt1, valCnt1, false), this.serializer);
addInput(new UniformIntTupleGenerator(keyCnt2, valCnt2, false), this.serializer);
testDriver(testTask, MockJoinStub.class);
final int expCnt = calculateExpectedCount(keyCnt1, valCnt1, keyCnt2, valCnt2);
assertThat(this.outList)
.withFailMessage("Result set size was %d. Expected was %d", outList.size(), expCnt)
.hasSize(expCnt);
this.outList.clear();
}
@TestTemplate
void testFailingHashLeftOuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 20;
setOutput(new DiscardingOutputCollector<Tuple2<Integer, Integer>>());
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(DriverStrategy.LEFT_HYBRIDHASH_BUILD_SECOND);
getTaskConfig().setRelativeMemoryDriver(this.hash_frac);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new UniformIntTupleGenerator(keyCnt1, valCnt1, true), this.serializer);
addInput(new UniformIntTupleGenerator(keyCnt2, valCnt2, true), this.serializer);
assertThatThrownBy(() -> testDriver(testTask, MockFailingJoinStub.class))
.isInstanceOf(ExpectedTestException.class);
}
@TestTemplate
void testCancelLeftOuterJoinTaskWhileBuilding() throws Exception {
setOutput(new DiscardingOutputCollector<Tuple2<Integer, Integer>>());
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(DriverStrategy.LEFT_HYBRIDHASH_BUILD_SECOND);
getTaskConfig().setRelativeMemoryDriver(this.hash_frac);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new UniformIntTupleGenerator(100, 100, true), this.serializer);
addInput(new DelayingIterator<>(new InfiniteIntTupleIterator(), 100), this.serializer);
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread taskRunner =
new Thread("Task runner for testCancelOuterJoinTaskWhileSort1()") {
@Override
public void run() {
try {
testDriver(testTask, MockJoinStub.class);
} catch (Throwable t) {
error.set(t);
}
}
};
taskRunner.start();
Thread.sleep(1000);
cancel();
taskRunner.join(60000);
assertThat(taskRunner.isAlive())
.withFailMessage("Task thread did not finish within 60 seconds")
.isFalse();
assertThat(error.get()).isNull();
}
@TestTemplate
void testCancelLeftOuterJoinTaskWhileProbing() throws Exception {
setOutput(new DiscardingOutputCollector<Tuple2<Integer, Integer>>());
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(DriverStrategy.LEFT_HYBRIDHASH_BUILD_SECOND);
getTaskConfig().setRelativeMemoryDriver(this.hash_frac);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new DelayingIterator<>(new InfiniteIntTupleIterator(), 100), this.serializer);
addInput(new UniformIntTupleGenerator(1, 1, true), this.serializer);
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread taskRunner =
new Thread("Task runner for testCancelOuterJoinTaskWhileSort1()") {
@Override
public void run() {
try {
testDriver(testTask, MockJoinStub.class);
} catch (Throwable t) {
error.set(t);
}
}
};
taskRunner.start();
Thread.sleep(1000);
cancel();
taskRunner.join(60000);
assertThat(taskRunner.isAlive())
.withFailMessage("Task thread did not finish within 60 seconds")
.isFalse();
assertThat(error.get()).isNull();
}
}
| LeftOuterJoinTaskTest |
java | google__dagger | javatests/dagger/hilt/android/ActivityInjectedViewModelTest.java | {
"start": 2988,
"end": 3194
} | class ____ {
@Provides
static MyViewModel provideModel(FragmentActivity activity) {
return new ViewModelProvider(activity).get(MyViewModel.class);
}
}
public static final | MyViewModelModel |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/entities/onetomany/ids/SetRefEdMulIdEntity.java | {
"start": 532,
"end": 2463
} | class ____ {
@Id
private Integer id1;
@Id
private Integer id2;
@Audited
private String data;
@Audited
@OneToMany(mappedBy = "reference")
private Set<SetRefIngMulIdEntity> reffering;
public SetRefEdMulIdEntity() {
}
public SetRefEdMulIdEntity(MulId id, String data) {
this.id1 = id.getId1();
this.id2 = id.getId2();
this.data = data;
}
public SetRefEdMulIdEntity(Integer id1, Integer id2, String data) {
this.id1 = id1;
this.id2 = id2;
this.data = data;
}
public SetRefEdMulIdEntity(String data) {
this.data = data;
}
public Integer getId1() {
return id1;
}
public void setId1(Integer id1) {
this.id1 = id1;
}
public Integer getId2() {
return id2;
}
public void setId2(Integer id2) {
this.id2 = id2;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public Set<SetRefIngMulIdEntity> getReffering() {
return reffering;
}
public void setReffering(Set<SetRefIngMulIdEntity> reffering) {
this.reffering = reffering;
}
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof SetRefEdMulIdEntity) ) {
return false;
}
SetRefEdMulIdEntity that = (SetRefEdMulIdEntity) o;
if ( data != null ? !data.equals( that.getData() ) : that.getData() != null ) {
return false;
}
if ( id1 != null ? !id1.equals( that.getId1() ) : that.getId1() != null ) {
return false;
}
if ( id2 != null ? !id2.equals( that.getId2() ) : that.getId2() != null ) {
return false;
}
return true;
}
public int hashCode() {
int result;
result = (id1 != null ? id1.hashCode() : 0);
result = 31 * result + (id2 != null ? id2.hashCode() : 0);
result = 31 * result + (data != null ? data.hashCode() : 0);
return result;
}
public String toString() {
return "SetRefEdMulIdEntity(id1 = " + id1 + ", id2 = " + id2 + ", data = " + data + ")";
}
}
| SetRefEdMulIdEntity |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBase.java | {
"start": 2829,
"end": 33420
} | class ____ implements FileMergingSnapshotManager {
private static final Logger LOG = LoggerFactory.getLogger(FileMergingSnapshotManager.class);
/** The number of recent checkpoints whose IDs are remembered. */
private static final int NUM_GHOST_CHECKPOINT_IDS = 16;
/** The identifier of this manager. */
private final String id;
/** The executor for I/O operations in this manager. */
protected final Executor ioExecutor;
/** Guard for {@link #initFileSystem}, {@link #restoreStateHandles} and uploadedStates. */
protected final Object lock = new Object();
@GuardedBy("lock")
protected TreeMap<Long, Set<LogicalFile>> uploadedStates = new TreeMap<>();
/** The map that holds all the known live logical files. */
private final Map<LogicalFileId, LogicalFile> knownLogicalFiles = new ConcurrentHashMap<>();
/** The {@link FileSystem} that this manager works on. */
protected FileSystem fs;
// checkpoint directories
protected Path checkpointDir;
protected Path sharedStateDir;
protected Path taskOwnedStateDir;
/** The buffer size for writing files to the file system. */
protected int writeBufferSize;
/**
* The file system should only be initialized once.
*
* @see FileMergingSnapshotManager#initFileSystem for the reason why a throttle is needed.
*/
private boolean fileSystemInitiated = false;
/**
* File-system dependent value. Mark whether the file system this manager running on need sync
* for visibility. If true, DO a file sync after writing each segment .
*/
protected boolean shouldSyncAfterClosingLogicalFile;
/** Max size for a physical file. */
protected long maxPhysicalFileSize;
/** Type of physical file pool. */
protected PhysicalFilePool.Type filePoolType;
protected final float maxSpaceAmplification;
protected PhysicalFileDeleter physicalFileDeleter = this::deletePhysicalFile;
private final Object notifyLock = new Object();
@GuardedBy("notifyLock")
private final TreeMap<Long, Set<SubtaskKey>> notifiedSubtaskCheckpoint = new TreeMap<>();
@GuardedBy("notifyLock")
private final TreeSet<Long> notifiedCheckpoint = new TreeSet<>();
/**
* Currently the shared state files are merged within each subtask, files are split by different
* directories.
*/
private final Map<SubtaskKey, Path> managedSharedStateDir = new ConcurrentHashMap<>();
/**
* The {@link DirectoryStreamStateHandle} with it ongoing checkpoint reference count for shared
* state directories, one for each subtask and job.
*/
private final Map<SubtaskKey, DirectoryHandleWithReferenceTrack> managedSharedStateDirHandles =
new ConcurrentHashMap<>();
/**
* The private state files are merged across subtasks, there is only one directory for
* merged-files within one TM per job.
*/
protected Path managedExclusiveStateDir;
/**
* The {@link DirectoryStreamStateHandle} with it ongoing checkpoint reference count for private
* state directory, one for each taskmanager and job.
*/
protected DirectoryHandleWithReferenceTrack managedExclusiveStateDirHandle;
/** The current space statistic, updated on file creation/deletion. */
protected SpaceStat spaceStat;
/** The metric group for file merging snapshot manager. */
protected FileMergingMetricGroup metricGroup;
public FileMergingSnapshotManagerBase(
String id,
long maxFileSize,
PhysicalFilePool.Type filePoolType,
float maxSpaceAmplification,
Executor ioExecutor,
MetricGroup parentMetricGroup) {
this.id = id;
this.maxPhysicalFileSize = maxFileSize;
this.filePoolType = filePoolType;
this.maxSpaceAmplification =
maxSpaceAmplification < 1f ? Float.MAX_VALUE : maxSpaceAmplification;
this.ioExecutor = ioExecutor;
this.spaceStat = new SpaceStat();
this.metricGroup = new FileMergingMetricGroup(parentMetricGroup, spaceStat);
}
@Override
public void initFileSystem(
FileSystem fileSystem,
Path checkpointBaseDir,
Path sharedStateDir,
Path taskOwnedStateDir,
int writeBufferSize)
throws IllegalArgumentException {
synchronized (lock) {
if (fileSystemInitiated) {
Preconditions.checkArgument(
checkpointBaseDir.equals(this.checkpointDir),
"The checkpoint base dir is not deterministic across subtasks.");
Preconditions.checkArgument(
sharedStateDir.equals(this.sharedStateDir),
"The shared checkpoint dir is not deterministic across subtasks.");
Preconditions.checkArgument(
taskOwnedStateDir.equals(this.taskOwnedStateDir),
"The task-owned checkpoint dir is not deterministic across subtasks.");
return;
}
this.fs = fileSystem;
this.checkpointDir = Preconditions.checkNotNull(checkpointBaseDir);
this.sharedStateDir = Preconditions.checkNotNull(sharedStateDir);
this.taskOwnedStateDir = Preconditions.checkNotNull(taskOwnedStateDir);
this.shouldSyncAfterClosingLogicalFile = shouldSyncAfterClosingLogicalFile(fileSystem);
// Initialize the managed exclusive path using id as the child path name.
// Currently, we use the task-owned directory to place the merged private state.
// According
// to the FLIP-306, we later consider move these files to the new introduced
// task-manager-owned directory.
Path managedExclusivePath = new Path(taskOwnedStateDir, uriEscape(id));
boolean newCreated = createManagedDirectory(managedExclusivePath);
this.managedExclusiveStateDir = managedExclusivePath;
this.managedExclusiveStateDirHandle =
wrap(DirectoryStreamStateHandle.of(managedExclusivePath), newCreated);
this.writeBufferSize = writeBufferSize;
this.fileSystemInitiated = true;
}
}
@Override
public void registerSubtaskForSharedStates(SubtaskKey subtaskKey) {
String managedDirName = subtaskKey.getManagedDirName();
Path managedPath = new Path(sharedStateDir, uriEscape(managedDirName));
if (!managedSharedStateDir.containsKey(subtaskKey)) {
boolean newCreated = createManagedDirectory(managedPath);
managedSharedStateDir.put(subtaskKey, managedPath);
managedSharedStateDirHandles.put(
subtaskKey, wrap(DirectoryStreamStateHandle.of(managedPath), newCreated));
}
}
@Override
public void unregisterSubtask(SubtaskKey subtaskKey) {
if (managedSharedStateDir.containsKey(subtaskKey)) {
managedSharedStateDir.remove(subtaskKey);
// try clean up before remove
managedSharedStateDirHandles.get(subtaskKey).tryCleanupQuietly();
managedSharedStateDirHandles.remove(subtaskKey);
}
}
// ------------------------------------------------------------------------
// logical & physical file
// ------------------------------------------------------------------------
/**
* Create a logical file on a physical file.
*
* @param physicalFile the underlying physical file.
* @param startOffset the offset in the physical file that the logical file starts from.
* @param length the length of the logical file.
* @param subtaskKey the id of the subtask that the logical file belongs to.
* @return the created logical file.
*/
protected LogicalFile createLogicalFile(
@Nonnull PhysicalFile physicalFile,
long startOffset,
long length,
@Nonnull SubtaskKey subtaskKey) {
LogicalFileId fileID = LogicalFileId.generateRandomId();
LogicalFile file = new LogicalFile(fileID, physicalFile, startOffset, length, subtaskKey);
knownLogicalFiles.put(fileID, file);
if (physicalFile.isOwned()) {
spaceStat.onLogicalFileCreate(length);
spaceStat.onPhysicalFileUpdate(length);
}
return file;
}
/**
* Create a physical file in right location (managed directory), which is specified by scope of
* this checkpoint and current subtask.
*
* @param subtaskKey the {@link SubtaskKey} of current subtask.
* @param scope the scope of the checkpoint.
* @return the created physical file.
* @throws IOException if anything goes wrong with file system.
*/
@Nonnull
protected PhysicalFile createPhysicalFile(SubtaskKey subtaskKey, CheckpointedStateScope scope)
throws IOException {
PhysicalFile result;
Exception latestException = null;
Path dirPath = getManagedDir(subtaskKey, scope);
if (dirPath == null) {
throw new IOException(
"Could not get "
+ scope
+ " path for subtask "
+ subtaskKey
+ ", the directory may have not been created.");
}
for (int attempt = 0; attempt < 10; attempt++) {
try {
OutputStreamAndPath streamAndPath =
EntropyInjector.createEntropyAware(
fs,
generatePhysicalFilePath(dirPath),
FileSystem.WriteMode.NO_OVERWRITE);
FSDataOutputStream outputStream = streamAndPath.stream();
Path filePath = streamAndPath.path();
result = new PhysicalFile(outputStream, filePath, this.physicalFileDeleter, scope);
updateFileCreationMetrics(filePath);
return result;
} catch (Exception e) {
latestException = e;
}
}
throw new IOException(
"Could not open output stream for state file merging.", latestException);
}
@Override
public FileMergingCheckpointStateOutputStream createCheckpointStateOutputStream(
SubtaskKey subtaskKey, long checkpointId, CheckpointedStateScope scope) {
return new FileMergingCheckpointStateOutputStream(
writeBufferSize,
new FileMergingCheckpointStateOutputStream.FileMergingSnapshotManagerProxy() {
PhysicalFile physicalFile;
LogicalFile logicalFile;
@Override
public Tuple2<FSDataOutputStream, Path> providePhysicalFile()
throws IOException {
physicalFile =
getOrCreatePhysicalFileForCheckpoint(
subtaskKey, checkpointId, scope);
return new Tuple2<>(
physicalFile.getOutputStream(), physicalFile.getFilePath());
}
@Override
public SegmentFileStateHandle closeStreamAndCreateStateHandle(
Path filePath, long startPos, long stateSize) throws IOException {
if (physicalFile == null) {
return null;
} else {
// deal with logical file
logicalFile =
createLogicalFile(
physicalFile, startPos, stateSize, subtaskKey);
logicalFile.advanceLastCheckpointId(checkpointId);
// track the logical file
synchronized (lock) {
uploadedStates
.computeIfAbsent(checkpointId, key -> new HashSet<>())
.add(logicalFile);
}
// deal with physicalFile file
returnPhysicalFileForNextReuse(subtaskKey, checkpointId, physicalFile);
return new SegmentFileStateHandle(
physicalFile.getFilePath(),
startPos,
stateSize,
scope,
logicalFile.getFileId());
}
}
@Override
public void closeStreamExceptionally() throws IOException {
if (physicalFile != null) {
if (logicalFile != null) {
discardSingleLogicalFile(logicalFile, checkpointId);
} else {
// The physical file should be closed anyway. This is because the
// last segmented write on this file is likely to have failed, and
// we want to prevent further reusing of this file.
physicalFile.close();
physicalFile.deleteIfNecessary();
}
}
}
});
}
private void updateFileCreationMetrics(Path path) {
// TODO: FLINK-32091 add io metrics
spaceStat.onPhysicalFileCreate();
LOG.debug("Create a new physical file {} for checkpoint file merging.", path);
}
/**
* Generate a file path for a physical file.
*
* @param dirPath the parent directory path for the physical file.
* @return the generated file path for a physical file.
*/
protected Path generatePhysicalFilePath(Path dirPath) {
// this must be called after initFileSystem() is called
// so the checkpoint directories must be not null if we reach here
final String fileName = UUID.randomUUID().toString();
return new Path(dirPath, fileName);
}
@VisibleForTesting
boolean isResponsibleForFile(Path filePath) {
Path parent = filePath.getParent();
return parent.equals(managedExclusiveStateDir)
|| managedSharedStateDir.containsValue(parent);
}
/**
* Delete a physical file by given file path. Use the io executor to do the deletion.
*
* @param filePath the given file path to delete.
*/
protected final void deletePhysicalFile(Path filePath, long size) {
ioExecutor.execute(
() -> {
try {
fs.delete(filePath, false);
spaceStat.onPhysicalFileDelete(size);
LOG.debug("Physical file deleted: {}.", filePath);
} catch (IOException e) {
LOG.warn("Fail to delete file: {}", filePath);
}
});
}
/**
* Create physical pool by filePoolType.
*
* @return physical file pool.
*/
protected final PhysicalFilePool createPhysicalPool() {
switch (filePoolType) {
case NON_BLOCKING:
return new NonBlockingPhysicalFilePool(
maxPhysicalFileSize, this::createPhysicalFile);
case BLOCKING:
return new BlockingPhysicalFilePool(maxPhysicalFileSize, this::createPhysicalFile);
default:
throw new UnsupportedOperationException(
"Unsupported type of physical file pool: " + filePoolType);
}
}
// ------------------------------------------------------------------------
// abstract methods
// ------------------------------------------------------------------------
/**
* Get a reused physical file or create one. This will be called in checkpoint output stream
* creation logic.
*
* <p>Basic logic of file reusing: whenever a physical file is needed, this method is called
* with necessary information provided for acquiring a file. The file will not be reused until
* it is written and returned to the reused pool by calling {@link
* #returnPhysicalFileForNextReuse}.
*
* @param subtaskKey the subtask key for the caller
* @param checkpointId the checkpoint id
* @param scope checkpoint scope
* @return the requested physical file.
* @throws IOException thrown if anything goes wrong with file system.
*/
@Nonnull
protected abstract PhysicalFile getOrCreatePhysicalFileForCheckpoint(
SubtaskKey subtaskKey, long checkpointId, CheckpointedStateScope scope)
throws IOException;
/**
* Try to return an existing physical file to the manager for next reuse. If this physical file
* is no longer needed (for reusing), it will be closed.
*
* <p>Basic logic of file reusing, see {@link #getOrCreatePhysicalFileForCheckpoint}.
*
* @param subtaskKey the subtask key for the caller
* @param checkpointId in which checkpoint this physical file is requested.
* @param physicalFile the returning checkpoint
* @throws IOException thrown if anything goes wrong with file system.
* @see #getOrCreatePhysicalFileForCheckpoint(SubtaskKey, long, CheckpointedStateScope)
*/
protected abstract void returnPhysicalFileForNextReuse(
SubtaskKey subtaskKey, long checkpointId, PhysicalFile physicalFile) throws IOException;
/**
* The callback which will be triggered when all subtasks discarded (aborted or subsumed).
*
* @param checkpointId the discarded checkpoint id.
* @throws IOException if anything goes wrong with file system.
*/
protected void discardCheckpoint(long checkpointId) throws IOException {
controlSpace();
}
// ------------------------------------------------------------------------
// Checkpoint Listener
// ------------------------------------------------------------------------
/**
* {@link org.apache.flink.streaming.runtime.tasks.SubtaskCheckpointCoordinatorImpl} use this
* method let the file merging manager know an ongoing checkpoint may reference the managed
* dirs.
*/
@Override
public void notifyCheckpointStart(SubtaskKey subtaskKey, long checkpointId) {
if (fileSystemInitiated) {
managedSharedStateDirHandles.computeIfPresent(
subtaskKey,
(k, v) -> {
v.addReferenceWhenCheckpointStart(checkpointId);
return v;
});
managedExclusiveStateDirHandle.addReferenceWhenCheckpointStart(checkpointId);
}
}
@Override
public void notifyCheckpointComplete(SubtaskKey subtaskKey, long checkpointId)
throws Exception {
if (fileSystemInitiated) {
managedSharedStateDirHandles.computeIfPresent(
subtaskKey,
(k, v) -> {
v.handoverOwnershipWhenCheckpointComplete(checkpointId);
return v;
});
managedExclusiveStateDirHandle.handoverOwnershipWhenCheckpointComplete(checkpointId);
}
}
@Override
public void notifyCheckpointAborted(SubtaskKey subtaskKey, long checkpointId) throws Exception {
if (fileSystemInitiated) {
managedSharedStateDirHandles.computeIfPresent(
subtaskKey,
(k, v) -> {
v.removeReferenceWhenCheckpointAbort(checkpointId);
return v;
});
managedExclusiveStateDirHandle.removeReferenceWhenCheckpointAbort(checkpointId);
}
synchronized (lock) {
Set<LogicalFile> logicalFilesForCurrentCp = uploadedStates.get(checkpointId);
if (logicalFilesForCurrentCp == null) {
return;
}
if (discardLogicalFiles(subtaskKey, checkpointId, logicalFilesForCurrentCp)) {
uploadedStates.remove(checkpointId);
}
}
notifyReleaseCheckpoint(subtaskKey, checkpointId);
}
@Override
public void notifyCheckpointSubsumed(SubtaskKey subtaskKey, long checkpointId)
throws Exception {
if (fileSystemInitiated) {
managedSharedStateDirHandles.computeIfPresent(
subtaskKey,
(k, v) -> {
v.handoverOwnershipWhenCheckpointSubsumed(checkpointId);
return v;
});
managedExclusiveStateDirHandle.handoverOwnershipWhenCheckpointSubsumed(checkpointId);
}
synchronized (lock) {
Iterator<Map.Entry<Long, Set<LogicalFile>>> uploadedStatesIterator =
uploadedStates.headMap(checkpointId, true).entrySet().iterator();
while (uploadedStatesIterator.hasNext()) {
Map.Entry<Long, Set<LogicalFile>> entry = uploadedStatesIterator.next();
if (discardLogicalFiles(subtaskKey, checkpointId, entry.getValue())) {
uploadedStatesIterator.remove();
}
}
}
notifyReleaseCheckpoint(subtaskKey, checkpointId);
}
private void notifyReleaseCheckpoint(SubtaskKey subtaskKey, long checkpointId)
throws IOException {
synchronized (notifyLock) {
if (notifiedCheckpoint.contains(checkpointId)) {
// already release, skip
return;
}
Set<SubtaskKey> knownSubtask =
notifiedSubtaskCheckpoint.computeIfAbsent(checkpointId, (e) -> new HashSet<>());
knownSubtask.add(subtaskKey);
if (knownSubtask.containsAll(managedSharedStateDir.keySet())) {
// all known subtask has been notified.
tryDiscardCheckpoint(checkpointId);
}
// control the size of notifiedSubtaskCheckpoint
if (notifiedSubtaskCheckpoint.size() > NUM_GHOST_CHECKPOINT_IDS) {
notifiedSubtaskCheckpoint.pollFirstEntry();
}
}
}
private void tryDiscardCheckpoint(long checkpointId) throws IOException {
synchronized (notifyLock) {
if (!notifiedCheckpoint.contains(checkpointId)) {
notifiedCheckpoint.add(checkpointId);
notifiedSubtaskCheckpoint.remove(checkpointId);
discardCheckpoint(checkpointId);
if (notifiedCheckpoint.size() > NUM_GHOST_CHECKPOINT_IDS) {
notifiedCheckpoint.pollFirst();
}
}
}
}
@Override
public void reusePreviousStateHandle(
long checkpointId, Collection<? extends StreamStateHandle> stateHandles) {
for (StreamStateHandle stateHandle : stateHandles) {
if (stateHandle instanceof SegmentFileStateHandle) {
LogicalFile file =
knownLogicalFiles.get(
((SegmentFileStateHandle) stateHandle).getLogicalFileId());
if (file != null) {
file.advanceLastCheckpointId(checkpointId);
}
} else if (stateHandle instanceof PlaceholderStreamStateHandle
&& ((PlaceholderStreamStateHandle) stateHandle).isFileMerged()) {
// Since the rocksdb state backend will leverage the PlaceholderStreamStateHandle,
// the manager should recognize this.
LogicalFile file =
knownLogicalFiles.get(
new LogicalFileId(
stateHandle.getStreamStateHandleID().getKeyString()));
if (file != null) {
file.advanceLastCheckpointId(checkpointId);
}
}
}
}
// ------------------------------------------------------------------------
// Space Control
// ------------------------------------------------------------------------
/**
* The core method that control space if needed. This method will compare the desired space
* amplification with current one, and if it exceeds the configured amplification, this method
* will mark minimal set of {@link PhysicalFile}s not to be reused anymore.
*/
private void controlSpace() {
if (maxSpaceAmplification != Float.MAX_VALUE
&& spaceStat.logicalFileSize.get() * maxSpaceAmplification
< spaceStat.physicalFileSize.get()) {
// may need control space
long goalPhysicalSize =
Math.round(spaceStat.logicalFileSize.get() * maxSpaceAmplification);
final AtomicLong aliveSize = new AtomicLong(0L);
// retrieve all the physical files and calculate current alive size
Set<PhysicalFile> knownPhysicalFiles = new HashSet<>();
knownLogicalFiles.values().stream()
.map(LogicalFile::getPhysicalFile)
.forEach(
file -> {
if (file.isCouldReuse()) {
if (knownPhysicalFiles.add(file)) {
aliveSize.addAndGet(file.getSize());
}
}
});
// the alive size still greater than the goal
if (aliveSize.get() > goalPhysicalSize) {
// sort in DESC order on wasted size
SortedSet<PhysicalFile> sortedPhysicalFile =
new TreeSet<>((a, b) -> Long.compare(b.wastedSize(), a.wastedSize()));
knownPhysicalFiles.stream()
.filter(PhysicalFile::closed)
.forEach(sortedPhysicalFile::add);
// mark the physical file un-alive, until it reaches our goal.
for (PhysicalFile file : sortedPhysicalFile) {
if (!file.checkReuseOnSpaceAmplification(maxSpaceAmplification)) {
if (aliveSize.addAndGet(-file.wastedSize()) <= goalPhysicalSize) {
break;
}
}
}
}
}
}
@Override
public boolean couldReusePreviousStateHandle(StreamStateHandle stateHandle) {
if (stateHandle instanceof SegmentFileStateHandle) {
LogicalFile file =
knownLogicalFiles.get(
((SegmentFileStateHandle) stateHandle).getLogicalFileId());
if (file != null) {
return file.getPhysicalFile().isCouldReuse();
}
} else if (stateHandle instanceof PlaceholderStreamStateHandle
&& ((PlaceholderStreamStateHandle) stateHandle).isFileMerged()) {
// Since the rocksdb state backend will leverage the PlaceholderStreamStateHandle,
// the manager should recognize this.
LogicalFile file =
knownLogicalFiles.get(
new LogicalFileId(stateHandle.getStreamStateHandleID().getKeyString()));
if (file != null) {
return file.getPhysicalFile().isCouldReuse();
}
}
// If a stateHandle is not of the type SegmentFileStateHandle or if its corresponding file
// is not recognized by the fileMergingManager, it needs to be re-uploaded.
return false;
}
public void discardSingleLogicalFile(LogicalFile logicalFile, long checkpointId)
throws IOException {
logicalFile.discardWithCheckpointId(checkpointId);
if (logicalFile.getPhysicalFile().isOwned()) {
spaceStat.onLogicalFileDelete(logicalFile.getLength());
}
}
private boolean discardLogicalFiles(
SubtaskKey subtaskKey, long checkpointId, Set<LogicalFile> logicalFiles)
throws Exception {
Iterator<LogicalFile> logicalFileIterator = logicalFiles.iterator();
while (logicalFileIterator.hasNext()) {
LogicalFile logicalFile = logicalFileIterator.next();
if (logicalFile.getSubtaskKey().equals(subtaskKey)
&& logicalFile.getLastUsedCheckpointID() <= checkpointId) {
discardSingleLogicalFile(logicalFile, checkpointId);
logicalFileIterator.remove();
knownLogicalFiles.remove(logicalFile.getFileId());
}
}
if (logicalFiles.isEmpty()) {
tryDiscardCheckpoint(checkpointId);
return true;
}
return false;
}
// ------------------------------------------------------------------------
// file system
// ------------------------------------------------------------------------
@Override
public Path getManagedDir(SubtaskKey subtaskKey, CheckpointedStateScope scope) {
if (scope.equals(CheckpointedStateScope.SHARED)) {
return managedSharedStateDir.get(subtaskKey);
} else {
return managedExclusiveStateDir;
}
}
@Override
public DirectoryStreamStateHandle getManagedDirStateHandle(
SubtaskKey subtaskKey, CheckpointedStateScope scope) {
if (scope.equals(CheckpointedStateScope.SHARED)) {
DirectoryHandleWithReferenceTrack handleWithTrack =
managedSharedStateDirHandles.get(subtaskKey);
return handleWithTrack != null ? handleWithTrack.getHandle() : null;
} else {
return managedExclusiveStateDirHandle.getHandle();
}
}
static boolean shouldSyncAfterClosingLogicalFile(FileSystem fileSystem) {
// Currently, we do file sync regardless of the file system.
// TODO: Determine whether do file sync more wisely. Add an | FileMergingSnapshotManagerBase |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/geo/GeoRadiusArgs.java | {
"start": 165,
"end": 3729
} | class ____ implements RedisCommandExtraArguments {
private boolean withDistance;
private boolean withCoordinates;
private boolean withHash;
private long count = -1;
private boolean any;
/**
* The direction (ASC or DESC)
*/
private String direction;
/**
* Use {@code ASC} order (from small to large).
*
* @return the current {@code GeoRadiusArgs}
**/
public GeoRadiusArgs ascending() {
this.direction = "ASC";
return this;
}
/**
* Use {@code DESC} order (from large to small).
*
* @return the current {@code GeoRadiusArgs}
**/
public GeoRadiusArgs descending() {
this.direction = "DESC";
return this;
}
/**
* Also return the distance of the returned items from the specified center. The distance is returned in the same
* unit as the unit specified as the radius argument of the command.
*
* @return the current {@code GeoRadiusArgs}
**/
public GeoRadiusArgs withDistance() {
this.withDistance = true;
return this;
}
/**
* Also return the longitude,latitude coordinates of the matching items.
*
* @return the current {@code GeoRadiusArgs}
**/
public GeoRadiusArgs withCoordinates() {
this.withCoordinates = true;
return this;
}
/**
* Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer.
* This is only useful for low level hacks or debugging and is otherwise of little interest for the general user.
*
* @return the current {@code GeoRadiusArgs}
**/
public GeoRadiusArgs withHash() {
this.withHash = true;
return this;
}
/**
* By default all the matching items are returned. It is possible to limit the results to the first N matching items
* by using the {@code COUNT <count>} option.
*
* @param count the count value
* @return the current {@code GeoRadiusArgs}
**/
public GeoRadiusArgs count(long count) {
this.count = count;
return this;
}
/**
* When ANY is provided the command will return as soon as enough matches are found, so the results may not be the
* ones closest to the specified point, but on the other hand, the effort invested by the server is significantly
* lower.
* <p>
* Using {@code ANY} requires {@code count} to be set.
*
* @return the current {@code GeoRadiusArgs}
**/
public GeoRadiusArgs any() {
this.any = true;
return this;
}
@Override
public List<Object> toArgs() {
// Validation
if (any && count == -1) {
throw new IllegalArgumentException("ANY can only be used if COUNT is also set");
}
List<Object> list = new ArrayList<>();
if (withDistance) {
list.add("WITHDIST");
}
if (withCoordinates) {
list.add("WITHCOORD");
}
if (withHash) {
list.add("WITHHASH");
}
if (count > 0) {
list.add("COUNT");
list.add(Long.toString(count));
}
if (any) {
list.add("ANY");
}
list.add(direction);
return list;
}
public boolean hasDistance() {
return withDistance;
}
public boolean hasHash() {
return withHash;
}
public boolean hasCoordinates() {
return withCoordinates;
}
}
| GeoRadiusArgs |
java | apache__camel | components/camel-kamelet/src/main/java/org/apache/camel/component/kamelet/KameletProcessor.java | {
"start": 1636,
"end": 4551
} | class ____ extends BaseProcessorSupport
implements CamelContextAware, Navigate<Processor>, org.apache.camel.Traceable, IdAware, RouteIdAware {
private final String name;
private final AsyncProcessor processor;
private KameletProducer producer;
private KameletComponent component;
private CamelContext camelContext;
private String id;
private String routeId;
public KameletProcessor(CamelContext camelContext, String name, Processor processor) throws Exception {
this.camelContext = camelContext;
this.name = name;
this.processor = AsyncProcessorConverterHelper.convert(processor);
}
@ManagedAttribute(description = "Kamelet name (templateId/routeId?options)")
public String getName() {
return name;
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public String getId() {
return id;
}
@Override
public void setId(String id) {
this.id = id;
}
@Override
public String getRouteId() {
return routeId;
}
@Override
public void setRouteId(String routeId) {
this.routeId = routeId;
}
@Override
public boolean process(Exchange exchange, final AsyncCallback callback) {
return producer.process(exchange, callback);
}
@Override
public List<Processor> next() {
if (!hasNext()) {
return null;
}
List<Processor> answer = new ArrayList<>();
answer.add(processor);
return answer;
}
@Override
public boolean hasNext() {
return true;
}
@Override
public String getTraceLabel() {
return "kamelet";
}
@Override
protected void doInit() throws Exception {
this.component = camelContext.getComponent("kamelet", KameletComponent.class);
this.producer = (KameletProducer) camelContext.getEndpoint("kamelet://" + name).createAsyncProducer();
ServiceHelper.initService(processor, producer);
// we use the kamelet component (producer) to call the kamelet
// and to receive the reply we register ourselves to the kamelet component
// with our child processor it should call
component.addKameletEip(producer.getKey(), processor);
}
@Override
protected void doStart() throws Exception {
ServiceHelper.startService(processor, producer);
}
@Override
protected void doStop() throws Exception {
ServiceHelper.stopService(processor, producer);
}
@Override
protected void doShutdown() throws Exception {
ServiceHelper.stopAndShutdownServices(processor, producer);
component.removeKameletEip(producer.getKey());
}
}
| KameletProcessor |
java | quarkusio__quarkus | integration-tests/oidc-client-registration/src/test/java/io/quarkus/it/keycloak/OidcClientRegistrationTest.java | {
"start": 854,
"end": 9015
} | class ____ {
@Test
public void testDefaultRegisteredClientOnStartup() throws IOException {
try (final WebClient webClient = createWebClient()) {
HtmlPage page = webClient.getPage("http://localhost:8081/protected");
assertEquals("Sign in to quarkus", page.getTitleText());
HtmlForm loginForm = page.getForms().get(0);
loginForm.getInputByName("username").setValueAttribute("alice");
loginForm.getInputByName("password").setValueAttribute("alice");
TextPage textPage = loginForm.getButtonByName("login").click();
assertEquals("registered-client:Default Client Updated:alice", textPage.getContent());
}
checkLog();
}
@Test
public void testTenantRegisteredClientOnStartup() throws IOException {
try (final WebClient webClient = createWebClient()) {
HtmlPage page = webClient.getPage("http://localhost:8081/protected/tenant");
assertEquals("Sign in to quarkus", page.getTitleText());
HtmlForm loginForm = page.getForms().get(0);
loginForm.getInputByName("username").setValueAttribute("alice");
loginForm.getInputByName("password").setValueAttribute("alice");
TextPage textPage = loginForm.getButtonByName("login").click();
assertEquals("registered-client-tenant:Tenant Client:alice", textPage.getContent());
}
}
@Test
public void testRegisteredClientDynamically() throws IOException {
try (final WebClient webClient = createWebClient()) {
HtmlPage page = webClient.getPage("http://localhost:8081/protected/dynamic");
assertEquals("Sign in to quarkus", page.getTitleText());
HtmlForm loginForm = page.getForms().get(0);
loginForm.getInputByName("username").setValueAttribute("alice");
loginForm.getInputByName("password").setValueAttribute("alice");
TextPage textPage = loginForm.getButtonByName("login").click();
assertEquals("registered-client-dynamically:Dynamic Client:alice", textPage.getContent());
}
}
@Test
public void testRegisteredClientDynamicTenant() throws IOException {
try (final WebClient webClient = createWebClient()) {
HtmlPage page = webClient.getPage("http://localhost:8081/protected/dynamic-tenant");
assertEquals("Sign in to quarkus", page.getTitleText());
HtmlForm loginForm = page.getForms().get(0);
loginForm.getInputByName("username").setValueAttribute("alice");
loginForm.getInputByName("password").setValueAttribute("alice");
TextPage textPage = loginForm.getButtonByName("login").click();
assertEquals("registered-client-dynamic-tenant:Registered Dynamically Tenant Client:alice", textPage.getContent());
}
}
@Test
public void testRegisteredClientMulti1() throws IOException {
try (final WebClient webClient = createWebClient()) {
HtmlPage page = webClient.getPage("http://localhost:8081/protected/multi1");
assertEquals("Sign in to quarkus", page.getTitleText());
HtmlForm loginForm = page.getForms().get(0);
loginForm.getInputByName("username").setValueAttribute("alice");
loginForm.getInputByName("password").setValueAttribute("alice");
TextPage textPage = loginForm.getButtonByName("login").click();
assertEquals("registered-client-multi1:Multi1 Client:alice", textPage.getContent());
}
}
@Test
public void testRegisteredClientMulti2() throws IOException {
try (final WebClient webClient = createWebClient()) {
HtmlPage page = webClient.getPage("http://localhost:8081/protected/multi2");
assertEquals("Sign in to quarkus", page.getTitleText());
HtmlForm loginForm = page.getForms().get(0);
loginForm.getInputByName("username").setValueAttribute("alice");
loginForm.getInputByName("password").setValueAttribute("alice");
TextPage textPage = loginForm.getButtonByName("login").click();
assertEquals("registered-client-multi2:Multi2 Client:alice", textPage.getContent());
}
}
@Test
public void testRegisteredClientJwtBearerTokenFromFile() throws IOException {
try (final WebClient webClient = createWebClient()) {
HtmlPage page = webClient.getPage("http://localhost:8081/protected/jwt-bearer-token-file");
assertEquals("Sign in to quarkus", page.getTitleText());
HtmlForm loginForm = page.getForms().get(0);
loginForm.getInputByName("username").setValueAttribute("alice");
loginForm.getInputByName("password").setValueAttribute("alice");
TextPage textPage = loginForm.getButtonByName("login").click();
assertEquals("registered-client-jwt-bearer-token-file:signed-jwt-test:alice", textPage.getContent());
}
}
private WebClient createWebClient() {
WebClient webClient = new WebClient();
webClient.setCssErrorHandler(new SilentCssErrorHandler());
return webClient;
}
private void checkLog() {
final Path logDirectory = Paths.get(".", "target");
given().await().pollInterval(100, TimeUnit.MILLISECONDS)
.atMost(10, TimeUnit.SECONDS)
.untilAsserted(new ThrowingRunnable() {
@Override
public void run() throws Throwable {
Path accessLogFilePath = logDirectory.resolve("quarkus.log");
boolean fileExists = Files.exists(accessLogFilePath);
if (!fileExists) {
accessLogFilePath = logDirectory.resolve("target/quarkus.log");
fileExists = Files.exists(accessLogFilePath);
}
Assertions.assertTrue(Files.exists(accessLogFilePath),
"quarkus log file " + accessLogFilePath + " is missing");
boolean clientRegistrationRequest = false;
boolean clientRegistered = false;
boolean registeredClientUpdated = false;
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(new ByteArrayInputStream(Files.readAllBytes(accessLogFilePath)),
StandardCharsets.UTF_8))) {
String line = null;
while ((line = reader.readLine()) != null) {
if (line.contains("'Default Client' registration request")) {
clientRegistrationRequest = true;
} else if (line.contains("'Default Client' has been registered")) {
clientRegistered = true;
} else if (line.contains(
"Registered 'Default Client' has had its name updated to 'Default Client Updated'")) {
registeredClientUpdated = true;
}
if (clientRegistrationRequest && clientRegistered && registeredClientUpdated) {
break;
}
}
}
assertTrue(clientRegistrationRequest,
"Log file must contain a default client registration request confirmation");
assertTrue(clientRegistered,
"Log file must contain a default client registration confirmation");
assertTrue(registeredClientUpdated,
"Log file must contain a a default client's name update confirmation");
}
});
}
}
| OidcClientRegistrationTest |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/db/jdbc/FactoryMethodConnectionSource.java | {
"start": 1721,
"end": 2614
} | class ____ extends AbstractConnectionSource {
private static final Logger LOGGER = StatusLogger.getLogger();
private final DataSource dataSource;
private final String description;
private FactoryMethodConnectionSource(
final DataSource dataSource, final String className, final String methodName, final String returnType) {
this.dataSource = dataSource;
this.description = "factory{ public static " + returnType + ' ' + className + '.' + methodName + "() }";
}
@Override
public Connection getConnection() throws SQLException {
return this.dataSource.getConnection();
}
@Override
public String toString() {
return this.description;
}
/**
* Factory method for creating a connection source within the plugin manager.
*
* @param className The name of a public | FactoryMethodConnectionSource |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerModel.java | {
"start": 619,
"end": 2083
} | class ____ extends ElasticsearchInternalModel {
public ElasticRerankerModel(
String inferenceEntityId,
TaskType taskType,
String service,
ElasticRerankerServiceSettings serviceSettings,
RerankTaskSettings taskSettings
) {
super(inferenceEntityId, taskType, service, serviceSettings, taskSettings);
}
@Override
public ElasticRerankerServiceSettings getServiceSettings() {
return (ElasticRerankerServiceSettings) super.getServiceSettings();
}
@Override
public ActionListener<CreateTrainedModelAssignmentAction.Response> getCreateTrainedModelAssignmentActionListener(
ElasticsearchInternalModel esModel,
ActionListener<Boolean> listener
) {
return new ActionListener<>() {
@Override
public void onResponse(CreateTrainedModelAssignmentAction.Response response) {
listener.onResponse(Boolean.TRUE);
}
@Override
public void onFailure(Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) {
listener.onFailure(
new ResourceNotFoundException("Could not start the Elastic Reranker Endpoint due to [{}]", e, e.getMessage())
);
return;
}
listener.onFailure(e);
}
};
}
}
| ElasticRerankerModel |
java | quarkusio__quarkus | integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/KubernetesWithEnvFromSecretWithPrefixBehaviorsTest.java | {
"start": 512,
"end": 2978
} | class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName("env-from-secret-with-prefix-behaviors")
.setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource("kubernetes-with-env-from-secret-with-prefix-behaviors.properties");
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.yml"));
List<HasMetadata> kubernetesList = DeserializationUtil
.deserializeAsList(kubernetesDir.resolve("kubernetes.yml"));
assertThat(kubernetesList.get(0)).isInstanceOfSatisfying(Deployment.class, d -> {
assertThat(d.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("env-from-secret-with-prefix-behaviors");
});
assertThat(d.getSpec()).satisfies(deploymentSpec -> {
assertThat(deploymentSpec.getTemplate()).satisfies(t -> {
assertThat(t.getSpec()).satisfies(podSpec -> {
assertThat(podSpec.getContainers()).singleElement().satisfies(container -> {
assertThat(container.getEnvFrom())
.anyMatch(item -> item.getPrefix() != null && !item.getPrefix().isBlank()
&& item.getSecretRef().getName().equals("another"));
assertThat(container.getEnvFrom())
.anyMatch(item -> item.getPrefix() != null && !item.getPrefix().isBlank()
&& item.getSecretRef().getName().equals("secrets"));
assertThat(container.getEnvFrom()).anyMatch(
item -> item.getPrefix() == null && item.getSecretRef().getName().equals("without"));
});
});
});
});
});
}
}
| KubernetesWithEnvFromSecretWithPrefixBehaviorsTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java | {
"start": 1212,
"end": 1487
} | class ____ {
private final CSQueue queue;
/**
* Updating the queue may involve entitlement updates
* and/or QueueState changes
*
* QueueAction can potentially be enhanced
* for adding, removing queues for queue management
*/
public | QueueManagementChange |
java | apache__hadoop | hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/common/TestHdfsCompatFsCommand.java | {
"start": 4492,
"end": 4938
} | class ____ implements HdfsCompatSuite {
@Override
public String getSuiteName() {
return "All (Test)";
}
@Override
public Class<? extends AbstractHdfsCompatCase>[] getApiCases() {
return new Class[]{
HdfsCompatMkdirTestCases.class,
HdfsCompatAclTestCases.class,
};
}
@Override
public String[] getShellCases() {
return new String[0];
}
}
private static | AllTestSuite |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/config/PojoAndStringConfig.java | {
"start": 947,
"end": 1033
} | class ____ various integration tests.
*
* <p>The beans defined in this configuration | for |
java | processing__processing4 | core/test/processing/core/PMatrix2DTest.java | {
"start": 117,
"end": 4753
} | class ____ {
private PMatrix2D m;
@Before
public void setUp() {
m = new PMatrix2D();
}
@Test
public void testIdentity() {
assertTrue("New matrix should be identity", m.isIdentity());
float[] arr = m.get(null);
assertEquals(1, arr[0], 0.0001f); // m00
assertEquals(0, arr[1], 0.0001f); // m01
assertEquals(0, arr[2], 0.0001f); // m02
assertEquals(0, arr[3], 0.0001f); // m10
assertEquals(1, arr[4], 0.0001f); // m11
assertEquals(0, arr[5], 0.0001f); // m12
}
@Test
public void testTranslate() {
m.translate(10, 20);
assertEquals(10, m.m02, 0.0001f);
assertEquals(20, m.m12, 0.0001f);
}
@Test
public void testRotate() {
m.rotate(PConstants.HALF_PI);
assertEquals(0, m.m00, 0.0001f);
assertEquals(-1, m.m01, 0.0001f);
assertEquals(1, m.m10, 0.0001f);
assertEquals(0, m.m11, 0.0001f);
}
@Test
public void testScale() {
m.scale(2, 3);
assertEquals(2, m.m00, 0.0001f);
assertEquals(3, m.m11, 0.0001f);
assertEquals(0, m.m02, 0.0001f);
assertEquals(0, m.m12, 0.0001f);
}
@Test
public void testShear() {
float shearAngle = 0.2f;
m.shearX(shearAngle);
assertEquals(0, m.m01, 0.0001f);
assertEquals((float)Math.tan(shearAngle), m.m10, 0.0001f);
assertEquals(1, m.m02, 0.0001f);
m.reset();
m.shearY(shearAngle);
assertEquals(0, m.m01, 0.0001f);
assertEquals(0, m.m10, 0.0001f);
assertEquals((float)Math.tan(shearAngle), m.m11, 0.0001f);
assertEquals(1, m.m02, 0.0001f);
}
@Test
public void testApply() {
PMatrix2D m2 = new PMatrix2D(1, 2, 3, 4, 5, 6);
m.apply(m2);
assertEquals(m2.m00, m.m00, 0.0001f);
assertEquals(m2.m01, m.m01, 0.0001f);
assertEquals(m2.m02, m.m02, 0.0001f);
assertEquals(m2.m10, m.m10, 0.0001f);
assertEquals(m2.m11, m.m11, 0.0001f);
assertEquals(m2.m12, m.m12, 0.0001f);
}
@Test
public void testPreApply() {
PMatrix2D m1 = new PMatrix2D(1, 2, 3, 4, 5, 6);
m.reset(); // identity matrix
m.preApply(m1);
assertEquals(m1.m00, m.m00, 0.0001f);
assertEquals(m1.m01, m.m01, 0.0001f);
assertEquals(m1.m02, m.m02, 0.0001f);
assertEquals(m1.m10, m.m10, 0.0001f);
assertEquals(m1.m11, m.m11, 0.0001f);
assertEquals(m1.m12, m.m12, 0.0001f);
}
@Test
public void testMultPVector() {
PVector src = new PVector(1, 2, 0);
PVector result = m.mult(src, null);
assertEquals(src.x, result.x, 0.0001f);
assertEquals(src.y, result.y, 0.0001f);
}
@Test
public void testMultArray() {
float[] vec = { 1, 2 };
float[] out = m.mult(vec, null);
assertEquals(1, out[0], 0.0001f);
assertEquals(2, out[1], 0.0001f);
}
@Test
public void testMultXandY() {
float x = 10, y = 20;
float xOut = m.multX(x, y);
float yOut = m.multY(x, y);
assertEquals(x, xOut, 0.0001f);
assertEquals(y, yOut, 0.0001f);
}
@Test
public void testInvertAndDeterminant() {
m.set(2, 0, 5, 1, 3, 7);
float det = m.determinant();
assertEquals(6, det, 0.0001f);
boolean invertible = m.invert();
assertTrue("Matrix should be invertible", invertible);
PMatrix2D identity = new PMatrix2D(2, 0, 5, 1, 3, 7);
identity.apply(m);
assertEquals(1, identity.m00, 0.001f);
assertEquals(0, identity.m01, 0.001f);
assertEquals(0, identity.m10, 0.001f);
assertEquals(1, identity.m11, 0.001f);
}
@Test
public void testIdentityWarped() {
assertTrue(m.isIdentity());
assertFalse(m.isWarped());
m.translate(10, 20);
assertFalse(m.isIdentity());
}
@Test(expected = IllegalArgumentException.class)
public void testTranslate3DThrows() {
m.translate(1, 2, 3);
}
@Test(expected = IllegalArgumentException.class)
public void testRotateXThrows() {
m.rotateX(1);
}
@Test(expected = IllegalArgumentException.class)
public void testRotateYThrows() {
m.rotateY(1);
}
@Test(expected = IllegalArgumentException.class)
public void testScale3DThrows() {
m.scale(1, 2, 3);
}
@Test(expected = IllegalArgumentException.class)
public void testApplyPMatrix3DThrows() {
PMatrix3D m3d = new PMatrix3D(1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1);
m.apply(m3d);
}
@Test
public void testGetArray() {
m.set(new float[]{1, 2, 0, 0, 1, 0});
float[] arr = m.get(null);
assertEquals(1, arr[0], 0.0001f);
assertEquals(2, arr[1], 0.0001f);
assertEquals(0, arr[2], 0.0001f);
}
}
| PMatrix2DTest |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java | {
"start": 17052,
"end": 17599
} | class ____ {
@GET("/")
Call<ResponseBody> method(
@HeaderMap okhttp3.Headers headers, @HeaderMap List<String> headerMap) {
return null;
}
}
try {
buildRequest(Example.class);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.isEqualTo(
"@HeaderMap parameter type must be Map or Headers. (parameter 'headerMap')\n for method Example.method");
}
}
@Test
public void headerMapSupportsSubclasses() {
| Example |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/Gh32489Tests.java | {
"start": 5490,
"end": 5623
} | class ____ {
@Autowired
SimpleRepositoryFactoryBean<EmployeeRepository, Long> repositoryFactory;
}
static | RepositoryFactoryHolder |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java | {
"start": 2412,
"end": 2689
} | class ____ {@link DeleteResponse}. This builder is usually used during xcontent parsing to
* temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to
* instantiate the {@link DeleteResponse}.
*/
public static | for |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/mysql/MySqlWallTest39.java | {
"start": 942,
"end": 1301
} | class ____ extends TestCase {
public void test_true() throws Exception {
WallProvider provider = new MySqlWallProvider();
assertTrue(provider.checkValid(//
"COMMIT"));
assertEquals(0, provider.getTableStats().size());
System.out.println(JSONUtils.toJSONString(provider.getStatsMap()));
}
}
| MySqlWallTest39 |
java | quarkusio__quarkus | extensions/info/deployment/src/test/java/io/quarkus/info/deployment/EnabledInfoOnManagementInterfaceTest.java | {
"start": 317,
"end": 1220
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withEmptyApplication()
.overrideConfigKey("quarkus.management.enabled", "true");
@Test
public void test() {
when().get("/q/info")
.then()
.statusCode(404);
when().get("http://localhost:9001/q/info")
.then()
.statusCode(200)
.body("os", is(notNullValue()))
.body("os.name", is(notNullValue()))
.body("java", is(notNullValue()))
.body("java.version", is(notNullValue()))
.body("build", is(notNullValue()))
.body("build.time", is(notNullValue()))
.body("git", is(notNullValue()))
.body("git.branch", is(notNullValue()));
}
}
| EnabledInfoOnManagementInterfaceTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesCustomResourcesEndpointBuilderFactory.java | {
"start": 47900,
"end": 49457
} | interface ____
extends
AdvancedKubernetesCustomResourcesEndpointConsumerBuilder,
AdvancedKubernetesCustomResourcesEndpointProducerBuilder {
default KubernetesCustomResourcesEndpointBuilder basic() {
return (KubernetesCustomResourcesEndpointBuilder) this;
}
/**
* Connection timeout in milliseconds to use when making requests to the
* Kubernetes API server.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*
* @param connectionTimeout the value to set
* @return the dsl builder
*/
default AdvancedKubernetesCustomResourcesEndpointBuilder connectionTimeout(Integer connectionTimeout) {
doSetProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout in milliseconds to use when making requests to the
* Kubernetes API server.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*
* @param connectionTimeout the value to set
* @return the dsl builder
*/
default AdvancedKubernetesCustomResourcesEndpointBuilder connectionTimeout(String connectionTimeout) {
doSetProperty("connectionTimeout", connectionTimeout);
return this;
}
}
public | AdvancedKubernetesCustomResourcesEndpointBuilder |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/window/tvf/unslicing/UnsliceAssigners.java | {
"start": 3115,
"end": 7523
} | class ____ implements UnsliceAssigner<TimeWindow> {
private static final long serialVersionUID = 1L;
private final int rowtimeIndex;
private final long sessionGap;
private final boolean isEventTime;
private final ZoneId shiftTimeZone;
private final SessionWindowAssigner innerSessionWindowAssigner;
public SessionUnsliceAssigner(int rowtimeIndex, ZoneId shiftTimeZone, long sessionGap) {
this.rowtimeIndex = rowtimeIndex;
this.shiftTimeZone = shiftTimeZone;
this.sessionGap = sessionGap;
this.isEventTime = rowtimeIndex >= 0;
this.innerSessionWindowAssigner =
SessionWindowAssigner.withGap(Duration.ofMillis(sessionGap));
if (isEventTime()) {
this.innerSessionWindowAssigner.withEventTime();
} else {
this.innerSessionWindowAssigner.withProcessingTime();
}
}
@Override
public MergingWindowAssigner<TimeWindow> getMergingWindowAssigner() {
return innerSessionWindowAssigner;
}
@Override
public Optional<TimeWindow> assignActualWindow(
RowData element,
ClockService clock,
MergingWindowProcessFunction<?, TimeWindow> windowFunction)
throws Exception {
Collection<TimeWindow> windows =
windowFunction.assignActualWindows(element, getUtcTimestamp(element, clock));
checkState(windows.size() <= 1);
if (windows.size() == 1) {
return Optional.of(windows.iterator().next());
} else {
return Optional.empty();
}
}
@Override
public Optional<TimeWindow> assignStateNamespace(
RowData element,
ClockService clock,
MergingWindowProcessFunction<?, TimeWindow> windowFunction)
throws Exception {
Collection<TimeWindow> windows =
windowFunction.assignStateNamespace(element, getUtcTimestamp(element, clock));
checkState(windows.size() <= 1);
if (windows.size() == 1) {
return Optional.of(windows.iterator().next());
} else {
return Optional.empty();
}
}
protected long getUtcTimestamp(RowData element, ClockService clock) {
final long timestamp;
if (rowtimeIndex >= 0) {
if (element.isNullAt(rowtimeIndex)) {
throw new RuntimeException(
"rowtimeIndex should not be null,"
+ " please convert it to a non-null long value.");
}
// Precision for row timestamp is always 3
TimestampData rowTime = element.getTimestamp(rowtimeIndex, 3);
timestamp = toUtcTimestampMills(rowTime.getMillisecond(), shiftTimeZone);
} else {
// in processing time mode
timestamp = toUtcTimestampMills(clock.currentProcessingTime(), shiftTimeZone);
}
return timestamp;
}
@Override
public boolean isEventTime() {
return isEventTime;
}
@Override
public String getDescription() {
return String.format("SessionWindow(gap=%dms)", sessionGap);
}
}
/**
* Creates a {@link UnsliceAssigner} that assigns elements which has been attached window start
* and window end timestamp to windows. The assigned windows doesn't need to be merged again.
*
* @param windowStartIndex the index of window start field in the input row, mustn't be a
* negative value.
* @param windowEndIndex the index of window end field in the input row, mustn't be a negative
* value.
*/
public static WindowedUnsliceAssigner windowed(
int windowStartIndex, int windowEndIndex, UnsliceAssigner<TimeWindow> innerAssigner) {
return new WindowedUnsliceAssigner(windowStartIndex, windowEndIndex, innerAssigner);
}
/**
* The {@link UnsliceAssigner} for elements have been merged into unslicing windows and attached
* window start and end timestamps.
*/
public static | SessionUnsliceAssigner |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/Mockito.java | {
"start": 66229,
"end": 72961
} | class ____ represent
* a mock. This way, it becomes possible to mock final types and methods.
*
* <p>
* In versions preceding 5.0.0, this mock maker is <strong>turned off by default</strong> because it is based on
* completely different mocking mechanism that required more feedback from the community. It can be activated
* explicitly by the mockito extension mechanism, just create in the classpath a file
* <code>/mockito-extensions/org.mockito.plugins.MockMaker</code> containing the value <code>mock-maker-inline</code>.
*
* <p>
* As a convenience, the Mockito team provides an artifact where this mock maker is preconfigured. Instead of using the
* <i>mockito-core</i> artifact, include the <i>mockito-inline</i> artifact in your project. Note that this artifact is
* likely to be discontinued once mocking of final classes and methods gets integrated into the default mock maker.
*
* <p>
* Some noteworthy notes about this mock maker:
* <ul>
* <li>Mocking final types and enums is incompatible with mock settings like :
* <ul>
* <li>explicitly serialization support <code>withSettings().serializable()</code></li>
* <li>extra-interfaces <code>withSettings().extraInterfaces()</code></li>
* </ul>
* </li>
* <li>Some methods cannot be mocked
* <ul>
* <li>Package-visible methods of <code>java.*</code></li>
* <li><code>native</code> methods</li>
* </ul>
* </li>
* <li>This mock maker has been designed around Java Agent runtime attachment ; this require a compatible JVM,
* that is part of the JDK (or Java 9 VM). When running on a non-JDK VM prior to Java 9, it is however possible to
* manually add the <a href="https://bytebuddy.net">Byte Buddy Java agent jar</a> using the <code>-javaagent</code>
* parameter upon starting the JVM.
* </li>
* </ul>
*
* <p>
* If you are interested in more details of this feature please read the javadoc of
* <code>org.mockito.internal.creation.bytebuddy.InlineByteBuddyMockMaker</code>
*
* <h3 id="40">40. <a class="meaningful_link" href="#strict_mockito" name="strict_mockito">
* Improved productivity and cleaner tests with "stricter" Mockito</a> (Since 2.+)</h3>
*
* To quickly find out how "stricter" Mockito can make you more productive and get your tests cleaner, see:
* <ul>
* <li>Strict stubbing with JUnit4 Rules - {@link MockitoRule#strictness(Strictness)} with {@link Strictness#STRICT_STUBS}</li>
* <li>Strict stubbing with JUnit4 Runner - {@link Strict MockitoJUnitRunner.Strict}</li>
* <li>Strict stubbing with JUnit5 Extension - <code>org.mockito.junit.jupiter.MockitoExtension</code></li>
* <li>Strict stubbing with TestNG Listener <a href="https://github.com/mockito/mockito-testng">MockitoTestNGListener</a></li>
* <li>Strict stubbing if you cannot use runner/rule - {@link MockitoSession}</li>
* <li>Unnecessary stubbing detection with {@link MockitoJUnitRunner}</li>
* <li>Stubbing argument mismatch warnings, documented in {@link MockitoHint}</li>
* </ul>
*
* Mockito is a "loose" mocking framework by default.
* Mocks can be interacted with without setting any expectations beforehand.
* This is intentional and it improves the quality of tests by forcing users to be explicit about what they want to stub / verify.
* It is also very intuitive, easy to use and blends nicely with "given", "when", "then" template of clean test code.
* This is also different from the classic mocking frameworks of the past, they were "strict" by default.
* <p>
* Being "loose" by default makes Mockito tests harder to debug at times.
* There are scenarios where misconfigured stubbing (like using a wrong argument) forces the user to run the test with a debugger.
* Ideally, tests failures are immediately obvious and don't require debugger to identify the root cause.
* Starting with version 2.1 Mockito has been getting new features that nudge the framework towards "strictness".
* We want Mockito to offer fantastic debuggability while not losing its core mocking style, optimized for
* intuitiveness, explicitness and clean test code.
* <p>
* Help Mockito! Try the new features, give us feedback, join the discussion about Mockito strictness at GitHub
* <a href="https://github.com/mockito/mockito/issues/769">issue 769</a>.
*
* <h3 id="41">41. <a class="meaningful_link" href="#framework_integrations_api" name="framework_integrations_api">
* Advanced public API for framework integrations (Since 2.10.+)</a></h3>
*
* In Summer 2017 we decided that Mockito
* <a href="https://www.linkedin.com/pulse/mockito-vs-powermock-opinionated-dogmatic-static-mocking-faber">
* should offer better API
* </a>
* for advanced framework integrations.
* The new API is not intended for users who want to write unit tests.
* It is intended for other test tools and mocking frameworks that need to extend or wrap Mockito with some custom logic.
* During the design and implementation process (<a href="https://github.com/mockito/mockito/issues/1110">issue 1110</a>)
* we have developed and changed following public API elements:
* <ul>
* <li>New {@link MockitoPlugins} -
* Enables framework integrators to get access to default Mockito plugins.
* Useful when one needs to implement custom plugin such as {@link MockMaker}
* and delegate some behavior to the default Mockito implementation.
* </li>
* <li>New {@link MockSettings#build(Class)} -
* Creates immutable view of mock settings used later by Mockito.
* Useful for creating invocations with {@link InvocationFactory} or when implementing custom {@link MockHandler}.
* </li>
* <li>New {@link MockingDetails#getMockHandler()} -
* Other frameworks may use the mock handler to programmatically simulate invocations on mock objects.
* </li>
* <li>New {@link MockHandler#getMockSettings()} -
* Useful to get hold of the setting the mock object was created with.
* </li>
* <li>New {@link InvocationFactory} -
* Provides means to create instances of {@link Invocation} objects.
* Useful for framework integrations that need to programmatically simulate method calls on mock objects.
* </li>
* <li>New {@link MockHandler#getInvocationContainer()} -
* Provides access to invocation container object which has no methods (marker interface).
* Container is needed to hide the internal implementation and avoid leaking it to the public API.
* </li>
* <li>Changed {@link org.mockito.stubbing.Stubbing} -
* it now extends {@link Answer} interface.
* It is backwards compatible because Stubbing | to |
java | apache__camel | components/camel-saxon/src/test/java/org/apache/camel/language/XQueryLanguageProducerTemplateTest.java | {
"start": 1053,
"end": 1609
} | class ____ {
@Test
public void testXQueryLanguage() {
CamelContext context = new DefaultCamelContext();
context.start();
ProducerTemplate producer = context.createProducerTemplate();
String result = producer.requestBody(
"language:xquery:upper-case(/message/text())?resultType=String",
"<message>Hello from XQuery</message>",
String.class);
Assertions.assertEquals("HELLO FROM XQUERY", result);
context.stop();
}
}
| XQueryLanguageProducerTemplateTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java | {
"start": 2431,
"end": 2745
} | class ____ extends ActionType<GetDataStreamAction.Response> {
public static final GetDataStreamAction INSTANCE = new GetDataStreamAction();
public static final String NAME = "indices:admin/data_stream/get";
private GetDataStreamAction() {
super(NAME);
}
public static | GetDataStreamAction |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/condition/Not_toString_Test.java | {
"start": 967,
"end": 1216
} | class ____ {
@Test
void should_implement_toString_showing_descriptions_of_inner_Conditions() {
// GIVEN
Condition<Object> not = not(new TestCondition<>("Jedi"));
// THEN
then(not).hasToString("not :<Jedi>");
}
}
| Not_toString_Test |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/dev/testing/TestTracingProcessor.java | {
"start": 9083,
"end": 9467
} | class ____ implements Command {
@Override
public CommandResult execute(CommandInvocation commandInvocation) throws CommandException, InterruptedException {
return CommandResult.SUCCESS;
}
}
@CommandDefinition(name = "include", description = "Sets the current included tags, this supports JUnit tag expressions.")
public static | TagsCommand |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java | {
"start": 43053,
"end": 43132
} | interface ____ {}
@ThreadSafe
abstract | ThreadSafeInterface |
java | apache__camel | components/camel-opentelemetry-metrics/src/test/java/org/apache/camel/opentelemetry/metrics/integration/eventnotifier/ExchangeEventNotifierAutoConfigIT.java | {
"start": 2713,
"end": 6866
} | class ____ extends CamelTestSupport {
private static final Long DELAY = 250L;
@BeforeAll
public static void init() {
GlobalOpenTelemetry.resetForTest();
// open telemetry auto configuration using console exporter that writes to logging
System.setProperty("otel.java.global-autoconfigure.enabled", "true");
System.setProperty("otel.metrics.exporter", "console");
System.setProperty("otel.traces.exporter", "none");
System.setProperty("otel.logs.exporter", "none");
System.setProperty("otel.propagators", "tracecontext");
System.setProperty("otel.metric.export.interval", "50");
}
@AfterEach
void cleanup() {
GlobalOpenTelemetry.resetForTest();
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
// not setting any meter explicitly, relying on opentelemetry autoconfigure
OpenTelemetryExchangeEventNotifier eventNotifier = new OpenTelemetryExchangeEventNotifier();
context.getManagementStrategy().addEventNotifier(eventNotifier);
eventNotifier.init();
return context;
}
@Test
public void testElapsedTimerEvents() throws Exception {
Logger logger = Logger.getLogger(LoggingMetricExporter.class.getName());
MemoryLogHandler handler = new MemoryLogHandler();
logger.addHandler(handler);
int count = 6;
MockEndpoint mock = getMockEndpoint("mock://result");
mock.expectedMessageCount(count);
for (int i = 0; i < count; i++) {
if (i % 2 == 0) {
template.sendBody("direct:foo", "Hello " + i);
} else {
template.sendBody("direct:bar", "Hello " + i);
}
}
mock.assertIsSatisfied();
await().atMost(Duration.ofMillis(1000L)).until(() -> !handler.getLogs().isEmpty());
List<LogRecord> logs = new ArrayList<>(handler.getLogs());
Map<String, Integer> counts = new HashMap<>();
for (LogRecord log : logs) {
if (log.getParameters() != null && log.getParameters().length > 0) {
MetricData metricData = (MetricData) log.getParameters()[0];
counts.compute(metricData.getName(), (k, v) -> v == null ? 1 : v + 1);
switch (metricData.getName()) {
case DEFAULT_CAMEL_EXCHANGE_ELAPSED_TIMER,
DEFAULT_CAMEL_EXCHANGE_SENT_TIMER -> {
// histogram
assertInstanceOf(HistogramData.class, metricData.getData());
}
case DEFAULT_CAMEL_EXCHANGE_LAST_PROCESSED_TIME_INSTRUMENT,
DEFAULT_CAMEL_ROUTES_EXCHANGES_INFLIGHT -> {
// gauge
assertInstanceOf(GaugeData.class, metricData.getData());
}
default -> fail();
}
}
}
assertEquals(4, counts.size());
assertTrue(counts.get(DEFAULT_CAMEL_EXCHANGE_ELAPSED_TIMER) > 0,
"Should have metric log for " + DEFAULT_CAMEL_EXCHANGE_ELAPSED_TIMER);
assertTrue(counts.get(DEFAULT_CAMEL_EXCHANGE_SENT_TIMER) > 0,
"Should have metric log for " + DEFAULT_CAMEL_EXCHANGE_SENT_TIMER);
assertTrue(counts.get(DEFAULT_CAMEL_EXCHANGE_LAST_PROCESSED_TIME_INSTRUMENT) > 0,
"Should have metric log for " + DEFAULT_CAMEL_EXCHANGE_LAST_PROCESSED_TIME_INSTRUMENT);
assertTrue(counts.get(DEFAULT_CAMEL_ROUTES_EXCHANGES_INFLIGHT) > 0,
"Should have metric log for " + DEFAULT_CAMEL_ROUTES_EXCHANGES_INFLIGHT);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct://foo").routeId("foo").to("mock://result");
from("direct://bar").routeId("bar").delay(DELAY).to("mock://result");
}
};
}
}
| ExchangeEventNotifierAutoConfigIT |
java | elastic__elasticsearch | x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java | {
"start": 880,
"end": 1156
} | class ____ implements Writeable {
private static final TransportVersion ESQL_SERIALIZE_TIMESERIES_FIELD_TYPE = TransportVersion.fromName(
"esql_serialize_timeseries_field_type"
);
/**
* Fields in a TSDB can be either dimensions or metrics. This | EsField |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/autoconfigure/RemoteDevToolsAutoConfiguration.java | {
"start": 4924,
"end": 6029
} | class ____ {
@Bean
@ConditionalOnMissingBean
SourceDirectoryUrlFilter remoteRestartSourceDirectoryUrlFilter() {
return new DefaultSourceDirectoryUrlFilter();
}
@Bean
@ConditionalOnMissingBean
HttpRestartServer remoteRestartHttpRestartServer(SourceDirectoryUrlFilter sourceDirectoryUrlFilter) {
return new HttpRestartServer(sourceDirectoryUrlFilter);
}
@Bean
@ConditionalOnMissingBean(name = "remoteRestartHandlerMapper")
UrlHandlerMapper remoteRestartHandlerMapper(HttpRestartServer server, ServerProperties serverProperties,
DevToolsProperties properties) {
Servlet servlet = serverProperties.getServlet();
RemoteDevToolsProperties remote = properties.getRemote();
String servletContextPath = (servlet.getContextPath() != null) ? servlet.getContextPath() : "";
String url = servletContextPath + remote.getContextPath() + "/restart";
logger.warn(LogMessage.format("Listening for remote restart updates on %s", url));
Handler handler = new HttpRestartServerHandler(server);
return new UrlHandlerMapper(url, handler);
}
}
}
| RemoteRestartConfiguration |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/model/domain/ReturnableType.java | {
"start": 328,
"end": 388
} | interface ____<T> extends SimpleDomainType<T> {
}
| ReturnableType |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/collectionincompatibletype/TruthIncompatibleTypeTest.java | {
"start": 13624,
"end": 14145
} | class ____ {
public void f(Map<String, Long> xs, Map<String, String> ys) {
// BUG: Diagnostic contains:
assertThat(xs).containsExactlyEntriesIn(ys);
}
}
""")
.doTest();
}
@Test
public void mapContainsExactly() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import static com.google.common.truth.Truth.assertThat;
import java.util.Map;
public | Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java | {
"start": 18795,
"end": 20382
} | class ____ implements TransportRequestHandler<CancelChildRequest> {
@Override
public void messageReceived(final CancelChildRequest request, final TransportChannel channel, Task task) throws Exception {
taskManager.cancelChildLocal(request.parentTaskId, request.childRequestId, request.reason);
channel.sendResponse(ActionResponse.Empty.INSTANCE);
}
}
private static final TransportResponseHandler.Empty NOOP_HANDLER = TransportResponseHandler.empty(
TransportResponseHandler.TRANSPORT_WORKER,
ActionListener.noop()
);
/**
* Sends an action to cancel a child task, associated with the given request ID and parent task.
*/
public void cancelChildRemote(TaskId parentTask, long childRequestId, Transport.Connection childConnection, String reason) {
if (childConnection.getTransportVersion().onOrAfter(VERSION_SUPPORTING_CANCEL_CHILD_ACTION)) {
DiscoveryNode childNode = childConnection.getNode();
logger.debug(
"sending cancellation of child of parent task [{}] with request ID [{}] to node [{}] because of [{}]",
parentTask,
childRequestId,
childNode,
reason
);
final CancelChildRequest request = CancelChildRequest.createCancelChildRequest(parentTask, childRequestId, reason);
transportService.sendRequest(childConnection, CANCEL_CHILD_ACTION_NAME, request, TransportRequestOptions.EMPTY, NOOP_HANDLER);
}
}
}
| CancelChildRequestHandler |
java | google__guava | android/guava-tests/test/com/google/common/collect/AbstractImmutableSetTest.java | {
"start": 1864,
"end": 2034
} | class ____ {@link ImmutableSet} and {@link ImmutableSortedSet} tests.
*
* @author Kevin Bourrillion
* @author Jared Levy
*/
@GwtCompatible
@NullMarked
public abstract | for |
java | google__truth | core/src/test/java/com/google/common/truth/StackTraceCleanerTest.java | {
"start": 16377,
"end": 16631
} | class ____ extends Exception {
SelfReferencingThrowable(String... classNames) {
setStackTrace(createStackTrace(classNames));
}
@Override
public synchronized Throwable getCause() {
return this;
}
}
}
| SelfReferencingThrowable |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/common/CatalogLoader.java | {
"start": 14420,
"end": 15646
} | class ____ implements VersionManager {
private ClassLoader classLoader;
private final String version;
public DownloadCatalogVersionManager(String version, ClassLoader classLoader) {
this.version = version;
this.classLoader = classLoader;
}
@Override
public void setClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
}
@Override
public ClassLoader getClassLoader() {
return classLoader;
}
@Override
public String getLoadedVersion() {
return version;
}
@Override
public boolean loadVersion(String version) {
return this.version.equals(version);
}
@Override
public String getRuntimeProviderLoadedVersion() {
return version;
}
@Override
public boolean loadRuntimeProviderVersion(String groupId, String artifactId, String version) {
return true;
}
@Override
public InputStream getResourceAsStream(String name) {
return classLoader.getResourceAsStream(name);
}
}
}
| DownloadCatalogVersionManager |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQueryTestCase.java | {
"start": 935,
"end": 1879
} | class ____<T extends AbstractBooleanScriptFieldQuery> extends
AbstractScriptFieldQueryTestCase<T> {
protected final BooleanFieldScript.LeafFactory leafFactory = mock(BooleanFieldScript.LeafFactory.class);
@Override
public final void testVisit() {
T query = createTestInstance();
List<Query> leavesVisited = new ArrayList<>();
query.visit(new QueryVisitor() {
@Override
public void consumeTerms(Query query, Term... terms) {
fail();
}
@Override
public void consumeTermsMatching(Query query, String field, Supplier<ByteRunAutomaton> automaton) {
fail();
}
@Override
public void visitLeaf(Query query) {
leavesVisited.add(query);
}
});
assertThat(leavesVisited, equalTo(List.of(query)));
}
}
| AbstractBooleanScriptFieldQueryTestCase |
java | apache__camel | components/camel-micrometer-observability/src/test/java/org/apache/camel/micrometer/observability/MicrometerObservabilityTracerTest.java | {
"start": 1444,
"end": 4620
} | class ____ extends MicrometerObservabilityTracerPropagationTestSupport {
@Test
void testRouteSingleRequest() throws IOException {
template.request("direct:start", null);
Map<String, OtelTrace> traces = otelExtension.getTraces();
assertEquals(1, traces.size());
checkTrace(traces.values().iterator().next(), null);
}
@Test
void testRouteMultipleRequests() throws IOException {
for (int i = 1; i <= 10; i++) {
context.createProducerTemplate().sendBody("direct:start", "Hello!");
}
Map<String, OtelTrace> traces = otelExtension.getTraces();
// Each trace should have a unique trace id. It is enough to assert that
// the number of elements in the map is the same of the requests to prove
// all traces have been generated uniquely.
assertEquals(10, traces.size());
// Each trace should have the same structure
for (OtelTrace trace : traces.values()) {
checkTrace(trace, "Hello!");
}
}
private void checkTrace(OtelTrace trace, String expectedBody) {
List<SpanData> spans = trace.getSpans();
assertEquals(3, spans.size());
SpanData testProducer = spans.get(0);
SpanData direct = spans.get(1);
SpanData log = spans.get(2);
// Validate span completion
assertTrue(testProducer.hasEnded());
assertTrue(direct.hasEnded());
assertTrue(log.hasEnded());
// Validate same trace
assertEquals(testProducer.getTraceId(), direct.getTraceId());
assertEquals(direct.getTraceId(), log.getTraceId());
// Validate hierarchy
assertEquals(SpanId.getInvalid(), testProducer.getParentSpanContext().getSpanId());
assertEquals(testProducer.getSpanContext().getSpanId(), direct.getParentSpanContext().getSpanId());
assertEquals(direct.getSpanContext().getSpanId(), log.getParentSpanContext().getSpanId());
// Validate operations
assertEquals(Op.EVENT_SENT.toString(), testProducer.getAttributes().get(AttributeKey.stringKey("op")));
assertEquals(Op.EVENT_RECEIVED.toString(), direct.getAttributes().get(AttributeKey.stringKey("op")));
// Validate message logging
assertEquals("message=A message", direct.getEvents().get(0).getName());
if (expectedBody == null) {
assertEquals(
"message=Exchange[ExchangePattern: InOut, BodyType: null, Body: [Body is null]]",
log.getEvents().get(0).getName());
} else {
assertEquals(
"message=Exchange[ExchangePattern: InOnly, BodyType: String, Body: " + expectedBody + "]",
log.getEvents().get(0).getName());
}
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.routeId("start")
.log("A message")
.to("log:info");
}
};
}
}
| MicrometerObservabilityTracerTest |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/decorators/interceptor/other/Converter.java | {
"start": 66,
"end": 119
} | interface ____<T> {
T convert(T value);
}
| Converter |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/functions/UserDefinedFunctionHelperTest.java | {
"start": 18618,
"end": 18897
} | class ____
extends TableAggregateFunction<String, String> {
public void accumulate(String acc, String in) {
// nothing to do
}
}
/** Hierarchy that is implementing different methods. */
public static | AbstractTableAggregateFunction |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/alter/OracleAlterTableTest.java | {
"start": 977,
"end": 2442
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"alter table WRH$_SERVICE_WAIT_CLASS drop partition WRH$_SERVIC_1870432296_13478";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
statemen.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("WRH$_SERVICE_WAIT_CLASS")));
assertEquals(0, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "*")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "YEAR")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "order_mode")));
}
}
| OracleAlterTableTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/nested/fieldaccess/Customer.java | {
"start": 653,
"end": 1175
} | class ____ {
@Id
@GeneratedValue( generator="increment" )
@GenericGenerator( name = "increment", strategy = "increment" )
private Long id;
@ElementCollection(fetch = FetchType.EAGER)
private List<Investment> investments = new ArrayList<Investment>();
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public List<Investment> getInvestments() {
return investments;
}
public void setInvestments(List<Investment> investments) {
this.investments = investments;
}
}
| Customer |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/javadoc/EmptyBlockTagTest.java | {
"start": 5961,
"end": 6324
} | interface ____ {
/**
* @deprecated Very old
*/
@Deprecated
void foo();
}
""")
.doTest();
}
@Test
public void keeps_allTheThingsWithDescriptions() {
compilationTestHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | resilience4j__resilience4j | resilience4j-spring6/src/test/java/io/github/resilience4j/spring6/micrometer/configure/utils/TestConfiguration.java | {
"start": 314,
"end": 444
} | class ____ {
@Bean
public MeterRegistry meterRegistry() {
return new SimpleMeterRegistry();
}
}
| TestConfiguration |
java | google__dagger | javatests/dagger/functional/membersinject/MembersInjectTest.java | {
"start": 3773,
"end": 3884
} | interface ____ {
MembersInjector<A> getAMembersInjector();
@Component.Factory
| NonLocalMembersComponent |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/IdentityMapper.java | {
"start": 1317,
"end": 1652
} | class ____<K, V>
extends MapReduceBase implements Mapper<K, V, K, V> {
/** The identity function. Input key/value pair is written directly to
* output.*/
public void map(K key, V val,
OutputCollector<K, V> output, Reporter reporter)
throws IOException {
output.collect(key, val);
}
}
| IdentityMapper |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/stubbing/VoidAnswer1.java | {
"start": 166,
"end": 929
} | interface ____ be used for configuring mock's answer for a single argument invocation that returns nothing.
*
* Answer specifies an action that is executed when you interact with the mock.
* <p>
* Example of stubbing a mock with this custom answer:
*
* <pre class="code"><code class="java">
* import static org.mockito.AdditionalAnswers.answerVoid;
*
* doAnswer(answerVoid(
* new VoidAnswer1<String>() {
* public void answer(String msg) throws Exception {
* throw new Exception(msg);
* }
* })).when(mock).someMethod(anyString());
*
* //Following will raise an exception with the message "boom"
* mock.someMethod("boom");
* </code></pre>
*
* @param <A0> type of the single argument
* @see Answer
*/
public | to |
java | alibaba__nacos | test/naming-test/src/test/java/com/alibaba/nacos/test/naming/SelectOneHealthyInstanceNamingITCase.java | {
"start": 1866,
"end": 6504
} | class ____ {
private static NamingService naming;
private static NamingService naming1;
private static NamingService naming2;
private static NamingService naming3;
private static NamingService naming4;
@LocalServerPort
private int port;
@AfterAll
static void tearDown() throws NacosException {
if (null != naming) {
naming.shutDown();
}
if (null != naming1) {
naming1.shutDown();
}
if (null != naming2) {
naming2.shutDown();
}
if (null != naming3) {
naming3.shutDown();
}
if (null != naming4) {
naming4.shutDown();
}
}
@BeforeEach
void init() throws Exception {
if (naming == null) {
//TimeUnit.SECONDS.sleep(10);
naming = NamingFactory.createNamingService("127.0.0.1" + ":" + port);
naming1 = NamingFactory.createNamingService("127.0.0.1" + ":" + port);
naming2 = NamingFactory.createNamingService("127.0.0.1" + ":" + port);
naming3 = NamingFactory.createNamingService("127.0.0.1" + ":" + port);
naming4 = NamingFactory.createNamingService("127.0.0.1" + ":" + port);
}
}
/**
* 获取一个健康的Instance
*
* @throws Exception
*/
@Test
void selectOneHealthyInstances() throws Exception {
String serviceName = randomDomainName();
naming.registerInstance(serviceName, "127.0.0.1", TEST_PORT);
naming1.registerInstance(serviceName, "127.0.0.1", 60000);
TimeUnit.SECONDS.sleep(2);
Instance instance = naming.selectOneHealthyInstance(serviceName);
List<Instance> instancesGet = naming.getAllInstances(serviceName);
for (Instance instance1 : instancesGet) {
if (instance1.getIp().equals(instance.getIp()) && instance1.getPort() == instance.getPort()) {
assertTrue(instance.isHealthy());
assertTrue(verifyInstance(instance1, instance));
return;
}
}
fail();
}
/**
* 获取指定单个cluster中一个健康的Instance
*
* @throws Exception
*/
@Test
void selectOneHealthyInstancesCluster() throws Exception {
String serviceName = randomDomainName();
naming.registerInstance(serviceName, "127.0.0.1", TEST_PORT, "c1");
naming1.registerInstance(serviceName, "127.0.0.1", 60000, "c1");
naming2.registerInstance(serviceName, "1.1.1.1", TEST_PORT, "c1");
naming3.registerInstance(serviceName, "127.0.0.1", 60001, "c1");
naming4.registerInstance(serviceName, "127.0.0.1", 60002, "c2");
TimeUnit.SECONDS.sleep(2);
Instance instance = naming.selectOneHealthyInstance(serviceName, Arrays.asList("c1"));
assertNotSame("1.1.1.1", instance.getIp());
assertTrue(instance.getPort() != 60002);
List<Instance> instancesGet = naming.getAllInstances(serviceName);
for (Instance instance1 : instancesGet) {
if (instance1.getIp().equals(instance.getIp()) && instance1.getPort() == instance.getPort()) {
assertTrue(instance.isHealthy());
assertTrue(verifyInstance(instance1, instance));
return;
}
}
fail();
}
/**
* 获取指定多个cluster中一个健康的Instance
*
* @throws Exception
*/
@Test
void selectOneHealthyInstancesClusters() throws Exception {
String serviceName = randomDomainName();
naming.registerInstance(serviceName, "1.1.1.1", TEST_PORT, "c1");
naming1.registerInstance(serviceName, "127.0.0.1", TEST_PORT, "c1");
naming2.registerInstance(serviceName, "127.0.0.1", 60000, "c1");
naming3.registerInstance(serviceName, "127.0.0.1", 60001, "c2");
TimeUnit.SECONDS.sleep(2);
Instance instance = naming.selectOneHealthyInstance(serviceName, Arrays.asList("c1", "c2"));
assertNotSame("1.1.1.1", instance.getIp());
List<Instance> instancesGet = naming.getAllInstances(serviceName);
for (Instance instance1 : instancesGet) {
if (instance1.getIp().equals(instance.getIp()) && instance1.getPort() == instance.getPort()) {
assertTrue(instance.isHealthy());
assertTrue(verifyInstance(instance1, instance));
return;
}
}
fail();
}
}
| SelectOneHealthyInstanceNamingITCase |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/internal/ExceptionFactory.java | {
"start": 1389,
"end": 5894
} | class ____ {
private static final DateTimeFormatter MINUTES = new DateTimeFormatterBuilder().appendText(ChronoField.MINUTE_OF_DAY)
.appendLiteral(" minute(s)").toFormatter();
private static final DateTimeFormatter SECONDS = new DateTimeFormatterBuilder().appendText(ChronoField.SECOND_OF_DAY)
.appendLiteral(" second(s)").toFormatter();
private static final DateTimeFormatter MILLISECONDS = new DateTimeFormatterBuilder().appendText(ChronoField.MILLI_OF_DAY)
.appendLiteral(" millisecond(s)").toFormatter();
private ExceptionFactory() {
}
/**
* Create a {@link RedisCommandTimeoutException} with a detail message given the timeout.
*
* @param timeout the timeout value.
* @return the {@link RedisCommandTimeoutException}.
*/
public static RedisCommandTimeoutException createTimeoutException(Duration timeout) {
return new RedisCommandTimeoutException(String.format("Command timed out after %s", formatTimeout(timeout)));
}
/**
* Create a {@link RedisCommandTimeoutException} with a detail message given the message and timeout.
*
* @param message the detail message.
* @param timeout the timeout value.
* @return the {@link RedisCommandTimeoutException}.
*/
public static RedisCommandTimeoutException createTimeoutException(String message, Duration timeout) {
return new RedisCommandTimeoutException(
String.format("%s. Command timed out after %s", message, formatTimeout(timeout)));
}
public static String formatTimeout(Duration duration) {
if (duration.isZero()) {
return "no timeout";
}
LocalTime time = LocalTime.MIDNIGHT.plus(duration);
if (isExactMinutes(duration)) {
return MINUTES.format(time);
}
if (isExactSeconds(duration)) {
return SECONDS.format(time);
}
if (isExactMillis(duration)) {
return MILLISECONDS.format(time);
}
return String.format("%d ns", duration.toNanos());
}
private static boolean isExactMinutes(Duration duration) {
return duration.toMillis() % (1000 * 60) == 0 && duration.getNano() == 0;
}
private static boolean isExactSeconds(Duration duration) {
return duration.toMillis() % (1000) == 0 && duration.getNano() == 0;
}
private static boolean isExactMillis(Duration duration) {
return duration.toNanos() % (1000 * 1000) == 0;
}
/**
* Create a {@link RedisCommandExecutionException} with a detail message. Specific Redis error messages may create subtypes
* of {@link RedisCommandExecutionException}.
*
* @param message the detail message.
* @return the {@link RedisCommandExecutionException}.
*/
public static RedisCommandExecutionException createExecutionException(String message) {
return createExecutionException(message, null);
}
/**
* Create a {@link RedisCommandExecutionException} with a detail message and optionally a {@link Throwable cause}. Specific
* Redis error messages may create subtypes of {@link RedisCommandExecutionException}.
*
* @param message the detail message.
* @param cause the nested exception, may be {@code null}.
* @return the {@link RedisCommandExecutionException}.
*/
public static RedisCommandExecutionException createExecutionException(String message, Throwable cause) {
if (message != null) {
if (message.startsWith("BUSY")) {
return cause != null ? new RedisBusyException(message, cause) : new RedisBusyException(message);
}
if (message.startsWith("NOSCRIPT")) {
return cause != null ? new RedisNoScriptException(message, cause) : new RedisNoScriptException(message);
}
if (message.startsWith("LOADING")) {
return cause != null ? new RedisLoadingException(message, cause) : new RedisLoadingException(message);
}
if (message.startsWith("READONLY")) {
return cause != null ? new RedisReadOnlyException(message, cause) : new RedisReadOnlyException(message);
}
return cause != null ? new RedisCommandExecutionException(message, cause)
: new RedisCommandExecutionException(message);
}
return new RedisCommandExecutionException(cause);
}
}
| ExceptionFactory |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java | {
"start": 5757,
"end": 69788
} | class ____ {
private String clusterId = DEFAULT_CLUSTER_ID;
private List<Node> brokers = new ArrayList<>();
private Node controller = null;
private List<List<String>> brokerLogDirs = new ArrayList<>();
private Short defaultPartitions;
private boolean usingRaftController = false;
private Integer defaultReplicationFactor;
private Map<String, Short> featureLevels = Collections.emptyMap();
private Map<String, Short> minSupportedFeatureLevels = Collections.emptyMap();
private Map<String, Short> maxSupportedFeatureLevels = Collections.emptyMap();
private Map<String, String> defaultGroupConfigs = Collections.emptyMap();
public Builder() {
numBrokers(1);
}
public Builder clusterId(String clusterId) {
this.clusterId = clusterId;
return this;
}
public Builder brokers(List<Node> brokers) {
numBrokers(brokers.size());
this.brokers = brokers;
return this;
}
public final Builder numBrokers(int numBrokers) {
if (brokers.size() >= numBrokers) {
brokers = brokers.subList(0, numBrokers);
brokerLogDirs = brokerLogDirs.subList(0, numBrokers);
} else {
for (int id = brokers.size(); id < numBrokers; id++) {
brokers.add(new Node(id, "localhost", 1000 + id));
brokerLogDirs.add(DEFAULT_LOG_DIRS);
}
}
return this;
}
public Builder controller(int index) {
this.controller = brokers.get(index);
return this;
}
public Builder brokerLogDirs(List<List<String>> brokerLogDirs) {
this.brokerLogDirs = brokerLogDirs;
return this;
}
public Builder defaultReplicationFactor(int defaultReplicationFactor) {
this.defaultReplicationFactor = defaultReplicationFactor;
return this;
}
public Builder usingRaftController(boolean usingRaftController) {
this.usingRaftController = usingRaftController;
return this;
}
public Builder defaultPartitions(short numPartitions) {
this.defaultPartitions = numPartitions;
return this;
}
public Builder featureLevels(Map<String, Short> featureLevels) {
this.featureLevels = featureLevels;
return this;
}
public Builder minSupportedFeatureLevels(Map<String, Short> minSupportedFeatureLevels) {
this.minSupportedFeatureLevels = minSupportedFeatureLevels;
return this;
}
public Builder maxSupportedFeatureLevels(Map<String, Short> maxSupportedFeatureLevels) {
this.maxSupportedFeatureLevels = maxSupportedFeatureLevels;
return this;
}
public Builder defaultGroupConfigs(Map<String, String> defaultGroupConfigs) {
this.defaultGroupConfigs = defaultGroupConfigs;
return this;
}
public MockAdminClient build() {
return new MockAdminClient(brokers,
controller == null ? brokers.get(0) : controller,
clusterId,
defaultPartitions != null ? defaultPartitions : 1,
defaultReplicationFactor != null ? defaultReplicationFactor.shortValue() : Math.min(brokers.size(), 3),
brokerLogDirs,
usingRaftController,
featureLevels,
minSupportedFeatureLevels,
maxSupportedFeatureLevels,
defaultGroupConfigs);
}
}
public MockAdminClient() {
this(Collections.singletonList(Node.noNode()), Node.noNode());
}
public MockAdminClient(List<Node> brokers, Node controller) {
this(brokers,
controller,
DEFAULT_CLUSTER_ID,
1,
brokers.size(),
Collections.nCopies(brokers.size(), DEFAULT_LOG_DIRS),
false,
Collections.emptyMap(),
Collections.emptyMap(),
Collections.emptyMap(),
Collections.emptyMap());
}
private MockAdminClient(
List<Node> brokers,
Node controller,
String clusterId,
int defaultPartitions,
int defaultReplicationFactor,
List<List<String>> brokerLogDirs,
boolean usingRaftController,
Map<String, Short> featureLevels,
Map<String, Short> minSupportedFeatureLevels,
Map<String, Short> maxSupportedFeatureLevels,
Map<String, String> defaultGroupConfigs
) {
this.brokers = brokers;
controller(controller);
this.clusterId = clusterId;
this.defaultPartitions = defaultPartitions;
this.defaultReplicationFactor = defaultReplicationFactor;
this.brokerLogDirs = brokerLogDirs;
this.brokerConfigs = new ArrayList<>();
this.clientMetricsConfigs = new HashMap<>();
this.groupConfigs = new HashMap<>();
this.defaultGroupConfigs = new HashMap<>(defaultGroupConfigs);
for (int i = 0; i < brokers.size(); i++) {
final Map<String, String> config = new HashMap<>();
config.put("default.replication.factor", String.valueOf(defaultReplicationFactor));
this.brokerConfigs.add(config);
}
this.beginningOffsets = new HashMap<>();
this.endOffsets = new HashMap<>();
this.committedOffsets = new HashMap<>();
this.usingRaftController = usingRaftController;
this.featureLevels = new HashMap<>(featureLevels);
this.minSupportedFeatureLevels = new HashMap<>(minSupportedFeatureLevels);
this.maxSupportedFeatureLevels = new HashMap<>(maxSupportedFeatureLevels);
}
public final synchronized void controller(Node controller) {
if (!brokers.contains(controller))
throw new IllegalArgumentException("The controller node must be in the list of brokers");
this.controller = controller;
}
public void addTopic(boolean internal,
String name,
List<TopicPartitionInfo> partitions,
Map<String, String> configs) {
addTopic(internal, name, partitions, configs, true);
}
public synchronized void addTopic(boolean internal,
String name,
List<TopicPartitionInfo> partitions,
Map<String, String> configs,
boolean usesTopicId) {
if (allTopics.containsKey(name)) {
throw new IllegalArgumentException(String.format("Topic %s was already added.", name));
}
for (TopicPartitionInfo partition : partitions) {
if (!brokers.contains(partition.leader())) {
throw new IllegalArgumentException("Leader broker unknown");
}
if (!brokers.containsAll(partition.replicas())) {
throw new IllegalArgumentException("Unknown brokers in replica list");
}
if (!brokers.containsAll(partition.isr())) {
throw new IllegalArgumentException("Unknown brokers in isr list");
}
}
ArrayList<String> logDirs = new ArrayList<>();
for (TopicPartitionInfo partition : partitions) {
if (partition.leader() != null) {
logDirs.add(brokerLogDirs.get(partition.leader().id()).get(0));
}
}
Uuid topicId;
if (usesTopicId) {
topicId = Uuid.randomUuid();
topicIds.put(name, topicId);
topicNames.put(topicId, name);
} else {
topicId = Uuid.ZERO_UUID;
}
allTopics.put(name, new TopicMetadata(topicId, internal, partitions, logDirs, configs));
}
public synchronized void markTopicForDeletion(final String name) {
if (!allTopics.containsKey(name)) {
throw new IllegalArgumentException(String.format("Topic %s did not exist.", name));
}
allTopics.get(name).markedForDeletion = true;
}
public synchronized void timeoutNextRequest(int numberOfRequest) {
timeoutNextRequests = numberOfRequest;
}
@Override
public synchronized DescribeClusterResult describeCluster(DescribeClusterOptions options) {
KafkaFutureImpl<Collection<Node>> nodesFuture = new KafkaFutureImpl<>();
KafkaFutureImpl<Node> controllerFuture = new KafkaFutureImpl<>();
KafkaFutureImpl<String> brokerIdFuture = new KafkaFutureImpl<>();
KafkaFutureImpl<Set<AclOperation>> authorizedOperationsFuture = new KafkaFutureImpl<>();
if (timeoutNextRequests > 0) {
nodesFuture.completeExceptionally(new TimeoutException());
controllerFuture.completeExceptionally(new TimeoutException());
brokerIdFuture.completeExceptionally(new TimeoutException());
authorizedOperationsFuture.completeExceptionally(new TimeoutException());
--timeoutNextRequests;
} else {
nodesFuture.complete(brokers);
controllerFuture.complete(controller);
brokerIdFuture.complete(clusterId);
authorizedOperationsFuture.complete(Collections.emptySet());
}
return new DescribeClusterResult(nodesFuture, controllerFuture, brokerIdFuture, authorizedOperationsFuture);
}
@Override
public synchronized CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
Map<String, KafkaFuture<CreateTopicsResult.TopicMetadataAndConfig>> createTopicResult = new HashMap<>();
if (timeoutNextRequests > 0) {
for (final NewTopic newTopic : newTopics) {
String topicName = newTopic.name();
KafkaFutureImpl<CreateTopicsResult.TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
createTopicResult.put(topicName, future);
}
--timeoutNextRequests;
return new CreateTopicsResult(createTopicResult);
}
for (final NewTopic newTopic : newTopics) {
KafkaFutureImpl<CreateTopicsResult.TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
String topicName = newTopic.name();
if (allTopics.containsKey(topicName)) {
future.completeExceptionally(new TopicExistsException(String.format("Topic %s exists already.", topicName)));
createTopicResult.put(topicName, future);
continue;
}
int replicationFactor = newTopic.replicationFactor();
if (replicationFactor == -1) {
replicationFactor = defaultReplicationFactor;
}
if (replicationFactor > brokers.size()) {
future.completeExceptionally(new InvalidReplicationFactorException(
String.format("Replication factor: %d is larger than brokers: %d", newTopic.replicationFactor(), brokers.size())));
createTopicResult.put(topicName, future);
continue;
}
List<Node> replicas = new ArrayList<>(replicationFactor);
for (int i = 0; i < replicationFactor; ++i) {
replicas.add(brokers.get(i));
}
int numberOfPartitions = newTopic.numPartitions();
if (numberOfPartitions == -1) {
numberOfPartitions = defaultPartitions;
}
List<TopicPartitionInfo> partitions = new ArrayList<>(numberOfPartitions);
// Partitions start off on the first log directory of each broker, for now.
List<String> logDirs = new ArrayList<>(numberOfPartitions);
for (int i = 0; i < numberOfPartitions; i++) {
partitions.add(new TopicPartitionInfo(i, brokers.get(0), replicas, Collections.emptyList(), Collections.emptyList(), Collections.emptyList()));
logDirs.add(brokerLogDirs.get(partitions.get(i).leader().id()).get(0));
}
Uuid topicId = Uuid.randomUuid();
topicIds.put(topicName, topicId);
topicNames.put(topicId, topicName);
allTopics.put(topicName, new TopicMetadata(topicId, false, partitions, logDirs, newTopic.configs()));
future.complete(new CreateTopicsResult.TopicMetadataAndConfig(topicId, numberOfPartitions, replicationFactor, config(newTopic)));
createTopicResult.put(topicName, future);
}
return new CreateTopicsResult(createTopicResult);
}
private static Config config(NewTopic newTopic) {
Collection<ConfigEntry> configEntries = new ArrayList<>();
if (newTopic.configs() != null) {
for (Map.Entry<String, String> entry : newTopic.configs().entrySet()) {
configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue()));
}
}
return new Config(configEntries);
}
@Override
public synchronized ListTopicsResult listTopics(ListTopicsOptions options) {
Map<String, TopicListing> topicListings = new HashMap<>();
if (timeoutNextRequests > 0) {
KafkaFutureImpl<Map<String, TopicListing>> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
--timeoutNextRequests;
return new ListTopicsResult(future);
}
for (Map.Entry<String, TopicMetadata> topicDescription : allTopics.entrySet()) {
String topicName = topicDescription.getKey();
if (topicDescription.getValue().fetchesRemainingUntilVisible > 0) {
topicDescription.getValue().fetchesRemainingUntilVisible--;
} else {
topicListings.put(topicName, new TopicListing(topicName, topicDescription.getValue().topicId, topicDescription.getValue().isInternalTopic));
}
}
KafkaFutureImpl<Map<String, TopicListing>> future = new KafkaFutureImpl<>();
future.complete(topicListings);
return new ListTopicsResult(future);
}
@Override
public synchronized DescribeTopicsResult describeTopics(TopicCollection topics, DescribeTopicsOptions options) {
if (topics instanceof TopicIdCollection)
return DescribeTopicsResult.ofTopicIds(new HashMap<>(handleDescribeTopicsUsingIds(((TopicIdCollection) topics).topicIds())));
else if (topics instanceof TopicNameCollection)
return DescribeTopicsResult.ofTopicNames(new HashMap<>(handleDescribeTopicsByNames(((TopicNameCollection) topics).topicNames())));
else
throw new IllegalArgumentException("The TopicCollection provided did not match any supported classes for describeTopics.");
}
private Map<String, KafkaFuture<TopicDescription>> handleDescribeTopicsByNames(Collection<String> topicNames) {
Map<String, KafkaFuture<TopicDescription>> topicDescriptions = new HashMap<>();
if (timeoutNextRequests > 0) {
for (String requestedTopic : topicNames) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
topicDescriptions.put(requestedTopic, future);
}
--timeoutNextRequests;
return topicDescriptions;
}
for (String requestedTopic : topicNames) {
for (Map.Entry<String, TopicMetadata> topicDescription : allTopics.entrySet()) {
String topicName = topicDescription.getKey();
Uuid topicId = topicIds.getOrDefault(topicName, Uuid.ZERO_UUID);
if (topicName.equals(requestedTopic) && !topicDescription.getValue().markedForDeletion) {
if (topicDescription.getValue().fetchesRemainingUntilVisible > 0) {
topicDescription.getValue().fetchesRemainingUntilVisible--;
} else {
TopicMetadata topicMetadata = topicDescription.getValue();
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.complete(new TopicDescription(topicName, topicMetadata.isInternalTopic, topicMetadata.partitions, Collections.emptySet(), topicId));
topicDescriptions.put(topicName, future);
break;
}
}
}
if (!topicDescriptions.containsKey(requestedTopic)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new UnknownTopicOrPartitionException("Topic " + requestedTopic + " not found."));
topicDescriptions.put(requestedTopic, future);
}
}
return topicDescriptions;
}
public synchronized Map<Uuid, KafkaFuture<TopicDescription>> handleDescribeTopicsUsingIds(Collection<Uuid> topicIds) {
Map<Uuid, KafkaFuture<TopicDescription>> topicDescriptions = new HashMap<>();
if (timeoutNextRequests > 0) {
for (Uuid requestedTopicId : topicIds) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
topicDescriptions.put(requestedTopicId, future);
}
--timeoutNextRequests;
return topicDescriptions;
}
for (Uuid requestedTopicId : topicIds) {
for (Map.Entry<String, TopicMetadata> topicDescription : allTopics.entrySet()) {
String topicName = topicDescription.getKey();
Uuid topicId = this.topicIds.get(topicName);
if (topicId != null && topicId.equals(requestedTopicId) && !topicDescription.getValue().markedForDeletion) {
if (topicDescription.getValue().fetchesRemainingUntilVisible > 0) {
topicDescription.getValue().fetchesRemainingUntilVisible--;
} else {
TopicMetadata topicMetadata = topicDescription.getValue();
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.complete(new TopicDescription(topicName, topicMetadata.isInternalTopic, topicMetadata.partitions, Collections.emptySet(), topicId));
topicDescriptions.put(requestedTopicId, future);
break;
}
}
}
if (!topicDescriptions.containsKey(requestedTopicId)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new UnknownTopicIdException("Topic id" + requestedTopicId + " not found."));
topicDescriptions.put(requestedTopicId, future);
}
}
return topicDescriptions;
}
@Override
public synchronized DeleteTopicsResult deleteTopics(TopicCollection topics, DeleteTopicsOptions options) {
DeleteTopicsResult result;
if (topics instanceof TopicIdCollection)
result = DeleteTopicsResult.ofTopicIds(new HashMap<>(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds())));
else if (topics instanceof TopicNameCollection)
result = DeleteTopicsResult.ofTopicNames(new HashMap<>(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames())));
else
throw new IllegalArgumentException("The TopicCollection provided did not match any supported classes for deleteTopics.");
return result;
}
private Map<String, KafkaFuture<Void>> handleDeleteTopicsUsingNames(Collection<String> topicNameCollection) {
Map<String, KafkaFuture<Void>> deleteTopicsResult = new HashMap<>();
Collection<String> topicNames = new ArrayList<>(topicNameCollection);
if (timeoutNextRequests > 0) {
for (final String topicName : topicNames) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
deleteTopicsResult.put(topicName, future);
}
--timeoutNextRequests;
return deleteTopicsResult;
}
for (final String topicName : topicNames) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
if (allTopics.remove(topicName) == null) {
future.completeExceptionally(new UnknownTopicOrPartitionException(String.format("Topic %s does not exist.", topicName)));
} else {
topicNames.remove(topicIds.remove(topicName));
future.complete(null);
}
deleteTopicsResult.put(topicName, future);
}
return deleteTopicsResult;
}
private Map<Uuid, KafkaFuture<Void>> handleDeleteTopicsUsingIds(Collection<Uuid> topicIdCollection) {
Map<Uuid, KafkaFuture<Void>> deleteTopicsResult = new HashMap<>();
Collection<Uuid> topicIds = new ArrayList<>(topicIdCollection);
if (timeoutNextRequests > 0) {
for (final Uuid topicId : topicIds) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
deleteTopicsResult.put(topicId, future);
}
--timeoutNextRequests;
return deleteTopicsResult;
}
for (final Uuid topicId : topicIds) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
String name = topicNames.remove(topicId);
if (name == null || allTopics.remove(name) == null) {
future.completeExceptionally(new UnknownTopicOrPartitionException(String.format("Topic %s does not exist.", topicId)));
} else {
topicIds.remove(name);
future.complete(null);
}
deleteTopicsResult.put(topicId, future);
}
return deleteTopicsResult;
}
@Override
public synchronized CreatePartitionsResult createPartitions(Map<String, NewPartitions> newPartitions, CreatePartitionsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DeleteRecordsResult deleteRecords(Map<TopicPartition, RecordsToDelete> recordsToDelete, DeleteRecordsOptions options) {
Map<TopicPartition, KafkaFuture<DeletedRecords>> deletedRecordsResult = new HashMap<>();
if (recordsToDelete.isEmpty()) {
return new DeleteRecordsResult(deletedRecordsResult);
} else {
throw new UnsupportedOperationException("Not implemented yet");
}
}
@Override
public synchronized CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options) {
KafkaFutureImpl<DelegationToken> future = new KafkaFutureImpl<>();
for (KafkaPrincipal renewer : options.renewers()) {
if (!renewer.getPrincipalType().equals(KafkaPrincipal.USER_TYPE)) {
future.completeExceptionally(new InvalidPrincipalTypeException(""));
return new CreateDelegationTokenResult(future);
}
}
String tokenId = Uuid.randomUuid().toString();
TokenInformation tokenInfo = new TokenInformation(tokenId, options.renewers().get(0), options.renewers(), System.currentTimeMillis(), options.maxLifetimeMs(), -1);
DelegationToken token = new DelegationToken(tokenInfo, tokenId.getBytes());
allTokens.add(token);
future.complete(token);
return new CreateDelegationTokenResult(future);
}
@Override
public synchronized RenewDelegationTokenResult renewDelegationToken(byte[] hmac, RenewDelegationTokenOptions options) {
KafkaFutureImpl<Long> future = new KafkaFutureImpl<>();
boolean tokenFound = false;
long expiryTimestamp = options.renewTimePeriodMs();
for (DelegationToken token : allTokens) {
if (Arrays.equals(token.hmac(), hmac)) {
token.tokenInfo().setExpiryTimestamp(expiryTimestamp);
tokenFound = true;
}
}
if (tokenFound) {
future.complete(expiryTimestamp);
} else {
future.completeExceptionally(new DelegationTokenNotFoundException(""));
}
return new RenewDelegationTokenResult(future);
}
@Override
public synchronized ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, ExpireDelegationTokenOptions options) {
KafkaFutureImpl<Long> future = new KafkaFutureImpl<>();
long expiryTimestamp = options.expiryTimePeriodMs();
List<DelegationToken> tokensToRemove = new ArrayList<>();
boolean tokenFound = false;
for (DelegationToken token : allTokens) {
if (Arrays.equals(token.hmac(), hmac)) {
if (expiryTimestamp == -1 || expiryTimestamp < System.currentTimeMillis()) {
tokensToRemove.add(token);
}
tokenFound = true;
}
}
if (tokenFound) {
allTokens.removeAll(tokensToRemove);
future.complete(expiryTimestamp);
} else {
future.completeExceptionally(new DelegationTokenNotFoundException(""));
}
return new ExpireDelegationTokenResult(future);
}
@Override
public synchronized DescribeDelegationTokenResult describeDelegationToken(DescribeDelegationTokenOptions options) {
KafkaFutureImpl<List<DelegationToken>> future = new KafkaFutureImpl<>();
if (options.owners().isEmpty()) {
future.complete(allTokens);
} else {
List<DelegationToken> tokensResult = new ArrayList<>();
for (DelegationToken token : allTokens) {
if (options.owners().contains(token.tokenInfo().owner())) {
tokensResult.add(token);
}
}
future.complete(tokensResult);
}
return new DescribeDelegationTokenResult(future);
}
@Override
public synchronized ListGroupsResult listGroups(ListGroupsOptions options) {
KafkaFutureImpl<Collection<Object>> future = new KafkaFutureImpl<>();
future.complete(groupConfigs.keySet().stream().map(g -> new GroupListing(g, Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))).collect(Collectors.toList()));
return new ListGroupsResult(future);
}
@Override
public synchronized DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds, DescribeConsumerGroupsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
@SuppressWarnings("removal")
public synchronized ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) {
KafkaFutureImpl<Collection<Object>> future = new KafkaFutureImpl<>();
future.complete(groupConfigs.keySet().stream().map(g -> new ConsumerGroupListing(g, false)).collect(Collectors.toList()));
return new ListConsumerGroupsResult(future);
}
@Override
public synchronized ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs, ListConsumerGroupOffsetsOptions options) {
// ignoring the groups and assume one test would only work on one group only
if (groupSpecs.size() != 1)
throw new UnsupportedOperationException("Not implemented yet");
String group = groupSpecs.keySet().iterator().next();
Collection<TopicPartition> topicPartitions = groupSpecs.get(group).topicPartitions();
final KafkaFutureImpl<Map<TopicPartition, OffsetAndMetadata>> future = new KafkaFutureImpl<>();
future.complete(committedOffsets.entrySet().stream()
.filter(entry -> topicPartitions.isEmpty() || topicPartitions.contains(entry.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, entry -> new OffsetAndMetadata(entry.getValue()))));
return new ListConsumerGroupOffsetsResult(Collections.singletonMap(CoordinatorKey.byGroupId(group), future));
}
@Override
public synchronized ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map<String, ListStreamsGroupOffsetsSpec> groupSpecs, ListStreamsGroupOffsetsOptions options) {
Map<String, ListConsumerGroupOffsetsSpec> consumerGroupSpecs = groupSpecs.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> new ListConsumerGroupOffsetsSpec().topicPartitions(entry.getValue().topicPartitions())
));
return new ListStreamsGroupOffsetsResult(listConsumerGroupOffsets(consumerGroupSpecs, new ListConsumerGroupOffsetsOptions()));
}
@Override
public synchronized DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, DeleteConsumerGroupsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DeleteStreamsGroupsResult deleteStreamsGroups(Collection<String> groupIds, DeleteStreamsGroupsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set<TopicPartition> partitions, DeleteConsumerGroupOffsetsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, Set<TopicPartition> partitions, DeleteStreamsGroupOffsetsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized ElectLeadersResult electLeaders(
ElectionType electionType,
Set<TopicPartition> partitions,
ElectLeadersOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, RemoveMembersFromConsumerGroupOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DescribeAclsResult describeAcls(AclBindingFilter filter, DescribeAclsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
if (timeoutNextRequests > 0) {
Map<ConfigResource, KafkaFuture<Config>> configs = new HashMap<>();
for (ConfigResource requestedResource : resources) {
KafkaFutureImpl<Config> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
configs.put(requestedResource, future);
}
--timeoutNextRequests;
return new DescribeConfigsResult(configs);
}
Map<ConfigResource, KafkaFuture<Config>> results = new HashMap<>();
for (ConfigResource resource : resources) {
KafkaFutureImpl<Config> future = new KafkaFutureImpl<>();
results.put(resource, future);
try {
future.complete(getResourceDescription(resource));
} catch (Throwable e) {
future.completeExceptionally(e);
}
}
return new DescribeConfigsResult(results);
}
private synchronized Config getResourceDescription(ConfigResource resource) {
switch (resource.type()) {
case BROKER: {
int brokerId = Integer.parseInt(resource.name());
if (brokerId >= brokerConfigs.size()) {
throw new InvalidRequestException("Broker " + resource.name() +
" not found.");
}
return toConfigObject(brokerConfigs.get(brokerId));
}
case TOPIC: {
TopicMetadata topicMetadata = allTopics.get(resource.name());
if (topicMetadata != null && !topicMetadata.markedForDeletion) {
if (topicMetadata.fetchesRemainingUntilVisible > 0)
topicMetadata.fetchesRemainingUntilVisible = Math.max(0, topicMetadata.fetchesRemainingUntilVisible - 1);
else return toConfigObject(topicMetadata.configs);
}
throw new UnknownTopicOrPartitionException("Resource " + resource + " not found.");
}
case CLIENT_METRICS: {
String resourceName = resource.name();
if (resourceName.isEmpty()) {
throw new InvalidRequestException("Empty resource name");
}
return toConfigObject(clientMetricsConfigs.get(resourceName));
}
case GROUP: {
String resourceName = resource.name();
if (resourceName.isEmpty()) {
throw new InvalidRequestException("Empty resource name");
}
Map<String, String> groupConfig = groupConfigs.getOrDefault(resourceName, new HashMap<>());
defaultGroupConfigs.forEach(groupConfig::putIfAbsent);
return toConfigObject(groupConfig);
}
default:
throw new UnsupportedOperationException("Not implemented yet");
}
}
private static Config toConfigObject(Map<String, String> map) {
List<ConfigEntry> configEntries = new ArrayList<>();
for (Map.Entry<String, String> entry : map.entrySet()) {
configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue()));
}
return new Config(configEntries);
}
@Override
public synchronized AlterConfigsResult incrementalAlterConfigs(
Map<ConfigResource, Collection<AlterConfigOp>> configs,
AlterConfigsOptions options) {
Map<ConfigResource, KafkaFuture<Void>> futures = new HashMap<>();
for (Map.Entry<ConfigResource, Collection<AlterConfigOp>> entry :
configs.entrySet()) {
ConfigResource resource = entry.getKey();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(resource, future);
Throwable throwable =
handleIncrementalResourceAlteration(resource, entry.getValue());
if (throwable == null) {
future.complete(null);
} else {
future.completeExceptionally(throwable);
}
}
return new AlterConfigsResult(futures);
}
private synchronized Throwable handleIncrementalResourceAlteration(
ConfigResource resource, Collection<AlterConfigOp> ops) {
switch (resource.type()) {
case BROKER: {
int brokerId;
try {
brokerId = Integer.parseInt(resource.name());
} catch (NumberFormatException e) {
return e;
}
if (brokerId >= brokerConfigs.size()) {
return new InvalidRequestException("no such broker as " + brokerId);
}
HashMap<String, String> newMap = new HashMap<>(brokerConfigs.get(brokerId));
for (AlterConfigOp op : ops) {
switch (op.opType()) {
case SET:
newMap.put(op.configEntry().name(), op.configEntry().value());
break;
case DELETE:
newMap.remove(op.configEntry().name());
break;
default:
return new InvalidRequestException(
"Unsupported op type " + op.opType());
}
}
brokerConfigs.set(brokerId, newMap);
return null;
}
case TOPIC: {
TopicMetadata topicMetadata = allTopics.get(resource.name());
if (topicMetadata == null) {
return new UnknownTopicOrPartitionException("No such topic as " +
resource.name());
}
HashMap<String, String> newMap = new HashMap<>(topicMetadata.configs);
for (AlterConfigOp op : ops) {
switch (op.opType()) {
case SET:
newMap.put(op.configEntry().name(), op.configEntry().value());
break;
case DELETE:
newMap.remove(op.configEntry().name());
break;
default:
return new InvalidRequestException(
"Unsupported op type " + op.opType());
}
}
topicMetadata.configs = newMap;
return null;
}
case CLIENT_METRICS: {
String resourceName = resource.name();
if (resourceName.isEmpty()) {
return new InvalidRequestException("Empty resource name");
}
if (!clientMetricsConfigs.containsKey(resourceName)) {
clientMetricsConfigs.put(resourceName, new HashMap<>());
}
HashMap<String, String> newMap = new HashMap<>(clientMetricsConfigs.get(resourceName));
for (AlterConfigOp op : ops) {
switch (op.opType()) {
case SET:
newMap.put(op.configEntry().name(), op.configEntry().value());
break;
case DELETE:
newMap.remove(op.configEntry().name());
break;
default:
return new InvalidRequestException(
"Unsupported op type " + op.opType());
}
}
clientMetricsConfigs.put(resourceName, newMap);
return null;
}
case GROUP: {
String resourceName = resource.name();
if (resourceName.isEmpty()) {
return new InvalidRequestException("Empty resource name");
}
if (!groupConfigs.containsKey(resourceName)) {
groupConfigs.put(resourceName, new HashMap<>());
}
HashMap<String, String> newMap = new HashMap<>(groupConfigs.get(resourceName));
for (AlterConfigOp op : ops) {
switch (op.opType()) {
case SET:
newMap.put(op.configEntry().name(), op.configEntry().value());
break;
case DELETE:
newMap.remove(op.configEntry().name());
break;
default:
return new InvalidRequestException(
"Unsupported op type " + op.opType());
}
}
groupConfigs.put(resourceName, newMap);
return null;
}
default:
return new UnsupportedOperationException();
}
}
@Override
public synchronized AlterReplicaLogDirsResult alterReplicaLogDirs(
Map<TopicPartitionReplica, String> replicaAssignment,
AlterReplicaLogDirsOptions options) {
Map<TopicPartitionReplica, KafkaFuture<Void>> results = new HashMap<>();
for (Map.Entry<TopicPartitionReplica, String> entry : replicaAssignment.entrySet()) {
TopicPartitionReplica replica = entry.getKey();
String newLogDir = entry.getValue();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
results.put(replica, future);
List<String> dirs = brokerLogDirs.get(replica.brokerId());
if (dirs == null) {
future.completeExceptionally(
new ReplicaNotAvailableException("Can't find " + replica));
} else if (!dirs.contains(newLogDir)) {
future.completeExceptionally(
new KafkaStorageException("Log directory " + newLogDir + " is offline"));
} else {
TopicMetadata metadata = allTopics.get(replica.topic());
if (metadata == null || metadata.partitions.size() <= replica.partition()) {
future.completeExceptionally(
new ReplicaNotAvailableException("Can't find " + replica));
} else {
String currentLogDir = metadata.partitionLogDirs.get(replica.partition());
replicaMoves.put(replica,
new ReplicaLogDirInfo(currentLogDir, 0, newLogDir, 0));
future.complete(null);
}
}
}
return new AlterReplicaLogDirsResult(results);
}
@Override
public synchronized DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers,
DescribeLogDirsOptions options) {
Map<Integer, Map<String, LogDirDescription>> unwrappedResults = new HashMap<>();
for (Integer broker : brokers) {
unwrappedResults.putIfAbsent(broker, new HashMap<>());
}
for (Map.Entry<String, TopicMetadata> entry : allTopics.entrySet()) {
String topicName = entry.getKey();
TopicMetadata topicMetadata = entry.getValue();
// For tests, we make the assumption that there will always be only 1 entry.
List<String> partitionLogDirs = topicMetadata.partitionLogDirs;
List<TopicPartitionInfo> topicPartitionInfos = topicMetadata.partitions;
for (TopicPartitionInfo topicPartitionInfo : topicPartitionInfos) {
List<Node> nodes = topicPartitionInfo.replicas();
for (Node node : nodes) {
Map<String, LogDirDescription> logDirDescriptionMap = unwrappedResults.get(node.id());
LogDirDescription logDirDescription = logDirDescriptionMap.getOrDefault(partitionLogDirs.get(0), new LogDirDescription(null, new HashMap<>()));
Map<TopicPartition, ReplicaInfo> topicPartitionReplicaInfoMap = new HashMap<>(logDirDescription.replicaInfos());
topicPartitionReplicaInfoMap.put(new TopicPartition(topicName, topicPartitionInfo.partition()), new ReplicaInfo(0, 0, false));
logDirDescriptionMap.put(partitionLogDirs.get(0), new LogDirDescription(
logDirDescription.error(),
topicPartitionReplicaInfoMap,
logDirDescription.totalBytes().orElse(DescribeLogDirsResponse.UNKNOWN_VOLUME_BYTES),
logDirDescription.usableBytes().orElse(DescribeLogDirsResponse.UNKNOWN_VOLUME_BYTES)));
}
}
}
Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> results = new HashMap<>();
for (Map.Entry<Integer, Map<String, LogDirDescription>> entry : unwrappedResults.entrySet()) {
KafkaFutureImpl<Map<String, LogDirDescription>> kafkaFuture = new KafkaFutureImpl<>();
kafkaFuture.complete(entry.getValue());
results.put(entry.getKey(), kafkaFuture);
}
return new DescribeLogDirsResult(results);
}
@Override
public synchronized DescribeReplicaLogDirsResult describeReplicaLogDirs(
Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
Map<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> results = new HashMap<>();
for (TopicPartitionReplica replica : replicas) {
TopicMetadata topicMetadata = allTopics.get(replica.topic());
if (topicMetadata != null) {
KafkaFutureImpl<ReplicaLogDirInfo> future = new KafkaFutureImpl<>();
results.put(replica, future);
String currentLogDir = currentLogDir(replica);
if (currentLogDir == null) {
future.complete(new ReplicaLogDirInfo(null,
DescribeLogDirsResponse.INVALID_OFFSET_LAG,
null,
DescribeLogDirsResponse.INVALID_OFFSET_LAG));
} else {
ReplicaLogDirInfo info = replicaMoves.get(replica);
future.complete(Objects.requireNonNullElseGet(info, () -> new ReplicaLogDirInfo(currentLogDir, 0, null, 0)));
}
}
}
return new DescribeReplicaLogDirsResult(results);
}
private synchronized String currentLogDir(TopicPartitionReplica replica) {
TopicMetadata topicMetadata = allTopics.get(replica.topic());
if (topicMetadata == null) {
return null;
}
if (topicMetadata.partitionLogDirs.size() <= replica.partition()) {
return null;
}
return topicMetadata.partitionLogDirs.get(replica.partition());
}
@Override
public synchronized AlterPartitionReassignmentsResult alterPartitionReassignments(
Map<TopicPartition, Optional<NewPartitionReassignment>> newReassignments,
AlterPartitionReassignmentsOptions options) {
Map<TopicPartition, KafkaFuture<Void>> futures = new HashMap<>();
for (Map.Entry<TopicPartition, Optional<NewPartitionReassignment>> entry :
newReassignments.entrySet()) {
TopicPartition partition = entry.getKey();
Optional<NewPartitionReassignment> newReassignment = entry.getValue();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(partition, future);
TopicMetadata topicMetadata = allTopics.get(partition.topic());
if (partition.partition() < 0 ||
topicMetadata == null ||
topicMetadata.partitions.size() <= partition.partition()) {
future.completeExceptionally(new UnknownTopicOrPartitionException());
} else if (newReassignment.isPresent()) {
reassignments.put(partition, newReassignment.get());
future.complete(null);
} else {
reassignments.remove(partition);
future.complete(null);
}
}
return new AlterPartitionReassignmentsResult(futures);
}
@Override
public synchronized ListPartitionReassignmentsResult listPartitionReassignments(
Optional<Set<TopicPartition>> partitions,
ListPartitionReassignmentsOptions options) {
Map<TopicPartition, PartitionReassignment> map = new HashMap<>();
for (TopicPartition partition : partitions.orElseGet(reassignments::keySet)) {
PartitionReassignment reassignment = findPartitionReassignment(partition);
if (reassignment != null) {
map.put(partition, reassignment);
}
}
return new ListPartitionReassignmentsResult(KafkaFutureImpl.completedFuture(map));
}
private synchronized PartitionReassignment findPartitionReassignment(TopicPartition partition) {
NewPartitionReassignment reassignment = reassignments.get(partition);
if (reassignment == null) {
return null;
}
TopicMetadata metadata = allTopics.get(partition.topic());
if (metadata == null) {
throw new RuntimeException("Internal MockAdminClient logic error: found " +
"reassignment for " + partition + ", but no TopicMetadata");
}
TopicPartitionInfo info = metadata.partitions.get(partition.partition());
if (info == null) {
throw new RuntimeException("Internal MockAdminClient logic error: found " +
"reassignment for " + partition + ", but no TopicPartitionInfo");
}
List<Integer> replicas = new ArrayList<>();
List<Integer> removingReplicas = new ArrayList<>();
List<Integer> addingReplicas = new ArrayList<>(reassignment.targetReplicas());
for (Node node : info.replicas()) {
replicas.add(node.id());
if (!reassignment.targetReplicas().contains(node.id())) {
removingReplicas.add(node.id());
}
addingReplicas.remove(Integer.valueOf(node.id()));
}
return new PartitionReassignment(replicas, addingReplicas, removingReplicas);
}
@Override
public synchronized AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, AlterConsumerGroupOffsetsOptions options) {
throw new UnsupportedOperationException("Not implement yet");
}
@Override
public synchronized AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets(String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, AlterStreamsGroupOffsetsOptions options) {
throw new UnsupportedOperationException("Not implement yet");
}
@Override
public synchronized ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) {
Map<TopicPartition, KafkaFuture<ListOffsetsResult.ListOffsetsResultInfo>> futures = new HashMap<>();
for (Map.Entry<TopicPartition, OffsetSpec> entry : topicPartitionOffsets.entrySet()) {
TopicPartition tp = entry.getKey();
OffsetSpec spec = entry.getValue();
KafkaFutureImpl<ListOffsetsResult.ListOffsetsResultInfo> future = new KafkaFutureImpl<>();
if (spec instanceof OffsetSpec.TimestampSpec)
throw new UnsupportedOperationException("Not implement yet");
else if (spec instanceof OffsetSpec.EarliestSpec)
future.complete(new ListOffsetsResult.ListOffsetsResultInfo(beginningOffsets.get(tp), -1, Optional.empty()));
else
future.complete(new ListOffsetsResult.ListOffsetsResultInfo(endOffsets.get(tp), -1, Optional.empty()));
futures.put(tp, future);
}
return new ListOffsetsResult(futures);
}
@Override
public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) {
throw new UnsupportedOperationException("Not implement yet");
}
@Override
public AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries, AlterClientQuotasOptions options) {
throw new UnsupportedOperationException("Not implement yet");
}
@Override
public DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users, DescribeUserScramCredentialsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations, AlterUserScramCredentialsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DescribeFeaturesResult describeFeatures(DescribeFeaturesOptions options) {
Map<String, FinalizedVersionRange> finalizedFeatures = new HashMap<>();
Map<String, SupportedVersionRange> supportedFeatures = new HashMap<>();
for (Map.Entry<String, Short> entry : featureLevels.entrySet()) {
finalizedFeatures.put(entry.getKey(), new FinalizedVersionRange(
entry.getValue(), entry.getValue()));
supportedFeatures.put(entry.getKey(), new SupportedVersionRange(
minSupportedFeatureLevels.get(entry.getKey()),
maxSupportedFeatureLevels.get(entry.getKey())));
}
return new DescribeFeaturesResult(KafkaFuture.completedFuture(
new FeatureMetadata(finalizedFeatures,
Optional.of(123L),
supportedFeatures)));
}
@Override
public UpdateFeaturesResult updateFeatures(
Map<String, FeatureUpdate> featureUpdates,
UpdateFeaturesOptions options
) {
Throwable error = null;
for (Map.Entry<String, FeatureUpdate> entry : featureUpdates.entrySet()) {
String feature = entry.getKey();
short cur = featureLevels.getOrDefault(feature, (short) 0);
short next = entry.getValue().maxVersionLevel();
short min = minSupportedFeatureLevels.getOrDefault(feature, (short) 0);
short max = maxSupportedFeatureLevels.getOrDefault(feature, (short) 0);
try {
switch (entry.getValue().upgradeType()) {
case UNKNOWN:
throw new InvalidRequestException("Invalid upgrade type.");
case UPGRADE:
if (cur > next) {
throw new InvalidUpdateVersionException("Can't upgrade to lower version.");
}
break;
case SAFE_DOWNGRADE:
if (cur < next) {
throw new InvalidUpdateVersionException("Can't downgrade to newer version.");
}
break;
case UNSAFE_DOWNGRADE:
if (cur < next) {
throw new InvalidUpdateVersionException("Can't downgrade to newer version.");
}
while (next != cur) {
// Simulate a scenario where all the even feature levels unsafe to downgrade from.
if (cur % 2 == 0) {
if (entry.getValue().upgradeType() == FeatureUpdate.UpgradeType.SAFE_DOWNGRADE) {
throw new InvalidUpdateVersionException("Unable to perform a safe downgrade.");
}
}
cur--;
}
break;
}
if (next < min) {
throw new InvalidUpdateVersionException("Can't downgrade below " + min);
}
if (next > max) {
throw new InvalidUpdateVersionException("Can't upgrade above " + max);
}
} catch (Exception e) {
error = invalidUpdateVersion(feature, next, e.getMessage());
break;
}
}
Map<String, KafkaFuture<Void>> results = new HashMap<>();
for (Map.Entry<String, FeatureUpdate> entry : featureUpdates.entrySet()) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
if (error == null) {
future.complete(null);
if (!options.validateOnly()) {
featureLevels.put(entry.getKey(), entry.getValue().maxVersionLevel());
}
} else {
future.completeExceptionally(error);
}
results.put(entry.getKey(), future);
}
return new UpdateFeaturesResult(results);
}
private InvalidRequestException invalidUpdateVersion(String feature, short version, String message) {
return new InvalidRequestException(String.format("Invalid update version %d for feature %s. %s", version, feature, message));
}
@Override
public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) {
if (usingRaftController) {
return new UnregisterBrokerResult(KafkaFuture.completedFuture(null));
} else {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new UnsupportedVersionException(""));
return new UnregisterBrokerResult(future);
}
}
@Override
public DescribeProducersResult describeProducers(Collection<TopicPartition> partitions, DescribeProducersOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, DescribeTransactionsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortTransactionOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public TerminateTransactionResult forceTerminateTransaction(String transactionalId, TerminateTransactionOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public ListTransactionsResult listTransactions(ListTransactionsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public FenceProducersResult fenceProducers(Collection<String> transactionalIds, FenceProducersOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public ListConfigResourcesResult listConfigResources(Set<ConfigResource.Type> configResourceTypes, ListConfigResourcesOptions options) {
KafkaFutureImpl<Collection<ConfigResource>> future = new KafkaFutureImpl<>();
Set<ConfigResource> configResources = new HashSet<>();
if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.TOPIC)) {
allTopics.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.TOPIC, name)));
}
if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.BROKER)) {
for (int i = 0; i < brokers.size(); i++) {
configResources.add(new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(i)));
}
}
if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.BROKER_LOGGER)) {
for (int i = 0; i < brokers.size(); i++) {
configResources.add(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, String.valueOf(i)));
}
}
if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.CLIENT_METRICS)) {
clientMetricsConfigs.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.CLIENT_METRICS, name)));
}
if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.GROUP)) {
groupConfigs.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.GROUP, name)));
}
future.complete(configResources);
return new ListConfigResourcesResult(future);
}
@Override
@SuppressWarnings({"deprecation", "removal"})
public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) {
KafkaFutureImpl<Collection<ClientMetricsResourceListing>> future = new KafkaFutureImpl<>();
future.complete(clientMetricsConfigs.keySet().stream().map(ClientMetricsResourceListing::new).collect(Collectors.toList()));
return new ListClientMetricsResourcesResult(future);
}
@Override
public AddRaftVoterResult addRaftVoter(int voterId, Uuid voterDirectoryId, Set<RaftVoterEndpoint> endpoints, AddRaftVoterOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public RemoveRaftVoterResult removeRaftVoter(int voterId, Uuid voterDirectoryId, RemoveRaftVoterOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DescribeShareGroupsResult describeShareGroups(Collection<String> groupIds, DescribeShareGroupsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, Map<TopicPartition, Long> offsets, AlterShareGroupOffsetsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized ListShareGroupOffsetsResult listShareGroupOffsets(Map<String, ListShareGroupOffsetsSpec> groupSpecs, ListShareGroupOffsetsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set<String> topics, DeleteShareGroupOffsetsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DeleteShareGroupsResult deleteShareGroups(Collection<String> groupIds, DeleteShareGroupsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DescribeStreamsGroupsResult describeStreamsGroups(Collection<String> groupIds, DescribeStreamsGroupsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized DescribeClassicGroupsResult describeClassicGroups(Collection<String> groupIds, DescribeClassicGroupsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public synchronized void close(Duration timeout) {}
public synchronized void updateBeginningOffsets(Map<TopicPartition, Long> newOffsets) {
beginningOffsets.putAll(newOffsets);
}
public synchronized void updateEndOffsets(final Map<TopicPartition, Long> newOffsets) {
endOffsets.putAll(newOffsets);
}
public synchronized void updateConsumerGroupOffsets(final Map<TopicPartition, Long> newOffsets) {
committedOffsets.putAll(newOffsets);
}
private static final | Builder |
java | google__guice | core/test/com/google/inject/ProvisionListenerTest.java | {
"start": 21837,
"end": 22027
} | class ____ implements ProvisionListener {
@Override
public <T> void onProvision(ProvisionInvocation<T> provision) {
provision.provision();
}
}
private static | JustProvision |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/project/ProjectBuilder.java | {
"start": 1157,
"end": 5625
} | interface ____ {
/**
* Builds a project descriptor from the specified POM file.
*
* @param projectFile The POM file to build the project from, must not be {@code null}.
* @param request The project building request that holds further parameters, must not be {@code null}.
* @return The result of the project building, never {@code null}.
* @throws ProjectBuildingException If the project descriptor could not be successfully built.
*/
ProjectBuildingResult build(File projectFile, ProjectBuildingRequest request) throws ProjectBuildingException;
/**
* Builds a project descriptor for the specified artifact.
*
* @param projectArtifact The POM artifact to build the project from, must not be {@code null}.
* @param request The project building request that holds further parameters, must not be {@code null}.
* @return The result of the project building, never {@code null}.
* @throws ProjectBuildingException If the project descriptor could not be successfully built.
*/
ProjectBuildingResult build(Artifact projectArtifact, ProjectBuildingRequest request)
throws ProjectBuildingException;
/**
* Builds a project descriptor for the specified artifact.
*
* @param projectArtifact The POM artifact to build the project from, must not be {@code null}.
* @param allowStubModel A flag controlling the case of a missing POM artifact. If {@code true} and the specified
* POM artifact does not exist, a simple stub model will be returned. If {@code false}, an exception will
* be thrown.
* @param request The project building request that holds further parameters, must not be {@code null}.
* @return The result of the project building, never {@code null}.
* @throws ProjectBuildingException If the project descriptor could not be successfully built.
*/
ProjectBuildingResult build(Artifact projectArtifact, boolean allowStubModel, ProjectBuildingRequest request)
throws ProjectBuildingException;
/**
* Builds a project descriptor for the specified model source.
*
* @param modelSource The source of the model to build the project descriptor from, must not be {@code null}.
* @param request The project building request that holds further parameters, must not be {@code null}.
* @return The result of the project building, never {@code null}.
* @throws ProjectBuildingException If the project descriptor could not be successfully built.
*
* @see org.apache.maven.model.building.ModelSource2
*/
ProjectBuildingResult build(ModelSource modelSource, ProjectBuildingRequest request)
throws ProjectBuildingException;
/**
* Builds a project descriptor for the specified model source.
*
* @param modelSource The source of the model to build the project descriptor from, must not be {@code null}.
* @param request The project building request that holds further parameters, must not be {@code null}.
* @return The result of the project building, never {@code null}.
* @throws ProjectBuildingException If the project descriptor could not be successfully built.
*
* @see org.apache.maven.model.building.ModelSource2
*/
ProjectBuildingResult build(org.apache.maven.api.services.ModelSource modelSource, ProjectBuildingRequest request)
throws ProjectBuildingException;
/**
* Builds the projects for the specified POM files and optionally their children.
*
* @param pomFiles The POM files to build, must not be {@code null}.
* @param recursive {@code true} to recursively build submodules referenced by the POM files, {@code false} to
* build only the specified POM files.
* @param request The project builder configuration that provides further parameters, must not be {@code null}.
* @return The results of the project builder where each result corresponds to one project that was built, never
* {@code null}.
* @throws ProjectBuildingException If an error was encountered during building of any project.
* {@link ProjectBuildingException#getResults()} provides access to the details of the problems.
*/
List<ProjectBuildingResult> build(List<File> pomFiles, boolean recursive, ProjectBuildingRequest request)
throws ProjectBuildingException;
}
| ProjectBuilder |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/dynamic/output/CodecAwareOutputResolverUnitTests.java | {
"start": 600,
"end": 2661
} | class ____ {
private CodecAwareOutputFactoryResolver resolver = new CodecAwareOutputFactoryResolver(
new OutputRegistryCommandOutputFactoryResolver(new OutputRegistry()), new ByteBufferAndStringCodec());
@Test
void shouldResolveValueOutput() {
CommandOutput<?, ?, ?> commandOutput = getCommandOutput("string");
assertThat(commandOutput).isInstanceOf(ValueOutput.class);
}
@Test
void shouldResolveValueListOutput() {
assertThat(getCommandOutput("stringList")).isOfAnyClassIn(ValueListOutput.class, StringListOutput.class);
assertThat(getCommandOutput("charSequenceList")).isOfAnyClassIn(ValueListOutput.class, StringListOutput.class);
}
@Test
void shouldResolveKeyOutput() {
CommandOutput<?, ?, ?> commandOutput = getCommandOutput("byteBuffer");
assertThat(commandOutput).isInstanceOf(KeyOutput.class);
}
@Test
void shouldResolveKeyListOutput() {
CommandOutput<?, ?, ?> commandOutput = getCommandOutput("byteBufferList");
assertThat(commandOutput).isInstanceOf(KeyListOutput.class);
}
@Test
void shouldResolveListOfMapsOutput() {
CommandOutput<?, ?, ?> commandOutput = getCommandOutput("listOfMapsOutput");
assertThat(commandOutput).isInstanceOf(ListOfMapsOutput.class);
}
@Test
void shouldResolveMapsOutput() {
CommandOutput<?, ?, ?> commandOutput = getCommandOutput("mapOutput");
assertThat(commandOutput).isInstanceOf(MapOutput.class);
}
CommandOutput<?, ?, ?> getCommandOutput(String methodName) {
Method method = ReflectionUtils.findMethod(CommandMethods.class, methodName);
CommandMethod commandMethod = DeclaredCommandMethod.create(method);
CommandOutputFactory factory = resolver
.resolveCommandOutput(new OutputSelector(commandMethod.getReturnType(), new ByteBufferAndStringCodec()));
return factory.create(new ByteBufferAndStringCodec());
}
private static | CodecAwareOutputResolverUnitTests |
java | apache__camel | components/camel-infinispan/camel-infinispan-embedded/src/main/java/org/apache/camel/component/infinispan/embedded/InfinispanEmbeddedProducer.java | {
"start": 1086,
"end": 2415
} | class ____ extends InfinispanProducer<InfinispanEmbeddedManager, InfinispanEmbeddedConfiguration> {
public InfinispanEmbeddedProducer(InfinispanEmbeddedEndpoint endpoint,
String cacheName,
InfinispanEmbeddedManager manager,
InfinispanEmbeddedConfiguration configuration) {
super(endpoint, cacheName, manager, configuration);
}
// ************************************
// Operations
// ************************************
@SuppressWarnings("unchecked")
@InvokeOnHeader("STATS")
public void onStats(Message message) {
final Cache<Object, Object> cache = getManager().getCache(message, getCacheName(), Cache.class);
final Object result = cache.getAdvancedCache().getStats();
setResult(message, result);
}
@SuppressWarnings("unchecked")
@InvokeOnHeader("QUERY")
public void onQuery(Message message) {
final Cache<Object, Object> cache = getManager().getCache(message, getCacheName(), Cache.class);
final Query<?> query = InfinispanEmbeddedUtil.buildQuery(getConfiguration(), cache, message);
if (query != null) {
setResult(message, query.execute().list());
}
}
}
| InfinispanEmbeddedProducer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/onetomany/inheritance/joined/Product.java | {
"start": 514,
"end": 848
} | class ____ {
@Id
@GeneratedValue
private int entid;
@Column(name="INVCODE")
private String inventoryCode;
public Product() {
}
public Product(String inventoryCode) {
this.inventoryCode = inventoryCode;
}
public int getEntid() {
return entid;
}
public String getInventoryCode() {
return inventoryCode;
}
}
| Product |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/mock/web/HeaderValueHolder.java | {
"start": 1022,
"end": 1951
} | class ____ {
private final List<Object> values = new LinkedList<>();
void setValue(@Nullable Object value) {
this.values.clear();
if (value != null) {
this.values.add(value);
}
}
void addValue(Object value) {
this.values.add(value);
}
void addValues(Collection<?> values) {
this.values.addAll(values);
}
void addValueArray(Object values) {
CollectionUtils.mergeArrayIntoCollection(values, this.values);
}
List<Object> getValues() {
return Collections.unmodifiableList(this.values);
}
List<String> getStringValues() {
return this.values.stream().map(Object::toString).toList();
}
@Nullable Object getValue() {
return (!this.values.isEmpty() ? this.values.get(0) : null);
}
@Nullable String getStringValue() {
return (!this.values.isEmpty() ? String.valueOf(this.values.get(0)) : null);
}
@Override
public String toString() {
return this.values.toString();
}
}
| HeaderValueHolder |
java | apache__dubbo | dubbo-demo/dubbo-demo-api/dubbo-demo-api-provider/src/main/java/org/apache/dubbo/demo/provider/Application.java | {
"start": 1306,
"end": 2345
} | class ____ {
private static final String ZOOKEEPER_URL = "zookeeper://127.0.0.1:2181";
public static void main(String[] args) {
startWithBootstrap();
}
private static void startWithBootstrap() {
ServiceConfig<DemoServiceImpl> service = new ServiceConfig<>();
service.setInterface(DemoService.class);
service.setRef(new DemoServiceImpl());
ConfigCenterConfig configCenterConfig = new ConfigCenterConfig();
configCenterConfig.setAddress(ZOOKEEPER_URL);
DubboBootstrap bootstrap = DubboBootstrap.getInstance();
bootstrap
.application(new ApplicationConfig("dubbo-demo-api-provider"))
.configCenter(configCenterConfig)
.registry(new RegistryConfig(ZOOKEEPER_URL))
.metadataReport(new MetadataReportConfig(ZOOKEEPER_URL))
.protocol(new ProtocolConfig(CommonConstants.DUBBO, -1))
.service(service)
.start()
.await();
}
}
| Application |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderElectionTest.java | {
"start": 8914,
"end": 9325
} | class ____ implements ServiceClass {
@Override
public void setup(FatalErrorHandler fatalErrorHandler) {
// noop
}
@Override
public void teardown() {
// noop
}
@Override
public LeaderElection createLeaderElection() {
return new StandaloneLeaderElection(UUID.randomUUID());
}
}
}
| StandaloneServiceClass |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableLeftJoinTest.java | {
"start": 2243,
"end": 10993
} | class ____ {
private static final KeyValueTimestamp[] EMPTY = new KeyValueTimestamp[0];
private final String streamTopic = "streamTopic";
private final String tableTopic = "tableTopic";
private TestInputTopic<Integer, String> inputStreamTopic;
private TestInputTopic<Integer, String> inputTableTopic;
private final int[] expectedKeys = {0, 1, 2, 3};
private TopologyTestDriver driver;
private MockApiProcessor<Integer, String, Void, Void> processor;
private StreamsBuilder builder;
@BeforeEach
public void setUp() {
builder = new StreamsBuilder();
final KStream<Integer, String> stream;
final KTable<Integer, String> table;
final MockApiProcessorSupplier<Integer, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
final Consumed<Integer, String> consumed = Consumed.with(Serdes.Integer(), Serdes.String());
stream = builder.stream(streamTopic, consumed);
table = builder.table(tableTopic, consumed);
stream.leftJoin(table, MockValueJoiner.TOSTRING_JOINER).process(supplier);
final Properties props = StreamsTestUtils.getStreamsConfig(Serdes.Integer(), Serdes.String());
driver = new TopologyTestDriver(builder.build(), props);
inputStreamTopic = driver.createInputTopic(streamTopic, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
inputTableTopic = driver.createInputTopic(tableTopic, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
processor = supplier.theCapturedProcessor();
}
@AfterEach
public void cleanup() {
driver.close();
}
private void pushToStream(final int messageCount, final String valuePrefix) {
for (int i = 0; i < messageCount; i++) {
inputStreamTopic.pipeInput(expectedKeys[i], valuePrefix + expectedKeys[i], i);
}
}
private void pushToTable(final int messageCount, final String valuePrefix) {
final Random r = new Random(System.currentTimeMillis());
for (int i = 0; i < messageCount; i++) {
inputTableTopic.pipeInput(
expectedKeys[i],
valuePrefix + expectedKeys[i],
r.nextInt(Integer.MAX_VALUE));
}
}
private void pushNullValueToTable(final int messageCount) {
for (int i = 0; i < messageCount; i++) {
inputTableTopic.pipeInput(expectedKeys[i], null);
}
}
@Test
public void shouldRequireCopartitionedStreams() {
final Collection<Set<String>> copartitionGroups =
TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups();
assertEquals(1, copartitionGroups.size());
assertEquals(Set.of(streamTopic, tableTopic), copartitionGroups.iterator().next());
}
@Test
public void shouldJoinWithEmptyTableOnStreamUpdates() {
// push two items to the primary stream. the table is empty
pushToStream(2, "X");
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "X0+null", 0),
new KeyValueTimestamp<>(1, "X1+null", 1));
}
@Test
public void shouldNotJoinOnTableUpdates() {
// push two items to the primary stream. the table is empty
pushToStream(2, "X");
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "X0+null", 0),
new KeyValueTimestamp<>(1, "X1+null", 1));
// push two items to the table. this should not produce any item.
pushToTable(2, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce four items.
pushToStream(4, "X");
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "X0+Y0", 0),
new KeyValueTimestamp<>(1, "X1+Y1", 1),
new KeyValueTimestamp<>(2, "X2+null", 2),
new KeyValueTimestamp<>(3, "X3+null", 3));
// push all items to the table. this should not produce any item
pushToTable(4, "YY");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce four items.
pushToStream(4, "X");
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "X0+YY0", 0),
new KeyValueTimestamp<>(1, "X1+YY1", 1),
new KeyValueTimestamp<>(2, "X2+YY2", 2),
new KeyValueTimestamp<>(3, "X3+YY3", 3));
// push all items to the table. this should not produce any item
pushToTable(4, "YYY");
processor.checkAndClearProcessResult(EMPTY);
}
@Test
public void shouldJoinRegardlessIfMatchFoundOnStreamUpdates() {
// push two items to the table. this should not produce any item.
pushToTable(2, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce four items.
pushToStream(4, "X");
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "X0+Y0", 0),
new KeyValueTimestamp<>(1, "X1+Y1", 1),
new KeyValueTimestamp<>(2, "X2+null", 2),
new KeyValueTimestamp<>(3, "X3+null", 3));
}
@Test
public void shouldClearTableEntryOnNullValueUpdates() {
// push all four items to the table. this should not produce any item.
pushToTable(4, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce four items.
pushToStream(4, "X");
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "X0+Y0", 0),
new KeyValueTimestamp<>(1, "X1+Y1", 1),
new KeyValueTimestamp<>(2, "X2+Y2", 2),
new KeyValueTimestamp<>(3, "X3+Y3", 3));
// push two items with null to the table as deletes. this should not produce any item.
pushNullValueToTable(2);
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce four items.
pushToStream(4, "XX");
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "XX0+null", 0),
new KeyValueTimestamp<>(1, "XX1+null", 1),
new KeyValueTimestamp<>(2, "XX2+Y2", 2),
new KeyValueTimestamp<>(3, "XX3+Y3", 3));
}
@Test
public void shouldNotDropLeftNullKey() {
// push all four items to the table. this should not produce any item.
pushToTable(1, "Y");
processor.checkAndClearProcessResult(EMPTY);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamKTableJoin.class)) {
final TestInputTopic<Integer, String> inputTopic =
driver.createInputTopic(streamTopic, new IntegerSerializer(), new StringSerializer());
inputTopic.pipeInput(null, "A", 0);
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(null, "A+null", 0));
assertTrue(appender.getMessages().isEmpty());
}
assertEquals(
0.0,
driver.metrics().get(
new MetricName(
"dropped-records-total",
"stream-task-metrics",
"",
mkMap(
mkEntry("thread-id", Thread.currentThread().getName()),
mkEntry("task-id", "0_0")
)
))
.metricValue()
);
}
@Test
public void shouldLogAndMeterWhenSkippingNullLeftValue() {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamKTableJoin.class)) {
final TestInputTopic<Integer, String> inputTopic =
driver.createInputTopic(streamTopic, new IntegerSerializer(), new StringSerializer());
inputTopic.pipeInput(1, null);
assertTrue(appender.getMessages().contains("Skipping record due to null join key or value. topic=[streamTopic] partition=[0] offset=[0]"));
}
assertEquals(
1.0,
driver.metrics().get(
new MetricName(
"dropped-records-total",
"stream-task-metrics",
"",
mkMap(
mkEntry("thread-id", Thread.currentThread().getName()),
mkEntry("task-id", "0_0")
)
))
.metricValue()
);
}
}
| KStreamKTableLeftJoinTest |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/main/java/org/hibernate/processor/annotation/IdClassMetaAttribute.java | {
"start": 829,
"end": 1589
} | class ____ {@link ")
.append( parent.getQualifiedName() )
.append( "}\n **/\n" )
.append( "public record Id" );
String delimiter = "(";
for ( MetaAttribute component : components ) {
decl.append( delimiter ).append( parent.importType( component.getTypeDeclaration() ) )
.append( ' ' ).append( component.getPropertyName() );
delimiter = ", ";
}
return decl.append( ") {}" ).toString();
}
@Override
public String getAttributeNameDeclarationString() {
return "";
}
@Override
public String getMetaType() {
return "";
}
@Override
public String getPropertyName() {
return "";
}
@Override
public String getTypeDeclaration() {
return "";
}
@Override
public Metamodel getHostingEntity() {
return parent;
}
}
| for |
java | apache__flink | flink-state-backends/flink-statebackend-rocksdb/src/test/java/org/apache/flink/state/rocksdb/RocksDBStateDownloaderTest.java | {
"start": 13530,
"end": 16861
} | class ____ implements ExecutorService {
private final CompletableFuture<Void> unblockFuture = new CompletableFuture<>();
private final ExecutorService delegate;
private BlockingExecutorService() {
delegate = Executors.newSingleThreadExecutor();
}
@Override
public void shutdown() {
delegate.shutdown();
}
@Nonnull
@Override
public List<Runnable> shutdownNow() {
return delegate.shutdownNow();
}
@Override
public boolean isShutdown() {
return delegate.isShutdown();
}
@Override
public boolean isTerminated() {
return delegate.isTerminated();
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
return delegate.awaitTermination(timeout, unit);
}
@Nonnull
@Override
public <T> Future<T> submit(Callable<T> task) {
return delegate.submit(wrap(task));
}
@Nonnull
@Override
public <T> Future<T> submit(Runnable task, T result) {
return delegate.submit(wrap(task), result);
}
@Nonnull
@Override
public Future<?> submit(Runnable task) {
return delegate.submit(wrap(task));
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
throws InterruptedException {
return delegate.invokeAll(wrap(tasks));
}
@Nonnull
@Override
public <T> List<Future<T>> invokeAll(
Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
return delegate.invokeAll(wrap(tasks), timeout, unit);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
throws InterruptedException, ExecutionException {
return delegate.invokeAny(wrap(tasks));
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return delegate.invokeAny(wrap(tasks), timeout, unit);
}
@Override
public void execute(Runnable command) {
delegate.execute(wrap(command));
}
private <T> Callable<T> wrap(Callable<T> task) {
return () -> {
T result = task.call();
unblockFuture.join();
return result;
};
}
private Runnable wrap(Runnable task) {
return () -> {
try {
unblockFuture.join();
} catch (Exception e) {
throw new RuntimeException(e);
}
task.run();
};
}
private <T> List<Callable<T>> wrap(Collection<? extends Callable<T>> tasks) {
return tasks.stream().map(this::wrap).collect(Collectors.toList());
}
public void unblock() {
this.unblockFuture.complete(null);
}
}
}
| BlockingExecutorService |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/socket/SocketAccess.java | {
"start": 868,
"end": 999
} | class ____ the operations requiring access in
* {@link AccessController#doPrivileged(PrivilegedAction)} blocks.
*/
public final | wraps |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java | {
"start": 1254,
"end": 3171
} | class ____ extends ValuesSourceAggregatorFactory {
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
builder.register(IpRangeAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.IP, BinaryRangeAggregator::new, true);
}
private final IpRangeAggregatorSupplier aggregatorSupplier;
private final List<BinaryRangeAggregator.Range> ranges;
private final boolean keyed;
public BinaryRangeAggregatorFactory(
String name,
ValuesSourceConfig config,
List<BinaryRangeAggregator.Range> ranges,
boolean keyed,
AggregationContext context,
AggregatorFactory parent,
Builder subFactoriesBuilder,
Map<String, Object> metadata,
IpRangeAggregatorSupplier aggregatorSupplier
) throws IOException {
super(name, config, context, parent, subFactoriesBuilder, metadata);
this.aggregatorSupplier = aggregatorSupplier;
this.ranges = ranges;
this.keyed = keyed;
}
@Override
protected Aggregator createUnmapped(Aggregator parent, Map<String, Object> metadata) throws IOException {
return new BinaryRangeAggregator(
name,
factories,
null,
config.format(),
ranges,
keyed,
context,
parent,
CardinalityUpperBound.NONE,
metadata
);
}
@Override
protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata)
throws IOException {
return aggregatorSupplier.build(
name,
factories,
config.getValuesSource(),
config.format(),
ranges,
keyed,
context,
parent,
cardinality,
metadata
);
}
}
| BinaryRangeAggregatorFactory |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/TimeDeserializerTest2.java | {
"start": 212,
"end": 1688
} | class ____ extends TestCase {
public void test_0() throws Exception {
long millis = System.currentTimeMillis();
JSON.parse("{\"@type\":\"java.sql.Time\",\"value\":" + millis + "}");
}
public void test_error() throws Exception {
long millis = System.currentTimeMillis();
Exception error = null;
try {
JSON.parse("{\"@type\":\"java.sql.Time\",33:" + millis + "}");
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_1() throws Exception {
Exception error = null;
try {
JSON.parse("{\"@type\":\"java.sql.Time\",\"value\":true}");
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_2() throws Exception {
long millis = System.currentTimeMillis();
Exception error = null;
try {
JSON.parse("{\"@type\":\"java.sql.Time\",\"value\":" + millis + ",}");
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_3() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"time\":{}}", VO.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public static | TimeDeserializerTest2 |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldBeNumeric.java | {
"start": 828,
"end": 895
} | class ____ extends BasicErrorMessageFactory {
public | ShouldBeNumeric |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/standalone/ResponseBodyTests.java | {
"start": 1670,
"end": 2625
} | class ____ {
@Test
void json() throws Exception {
standaloneSetup(new PersonController()).defaultResponseCharacterEncoding(UTF_8).build()
// We use a name containing an umlaut to test UTF-8 encoding for the request and the response.
.perform(get("/person/Jürgen").characterEncoding(UTF_8).accept(MediaType.APPLICATION_JSON))
.andExpect(status().isOk())
.andExpect(content().contentType("application/json"))
.andExpect(content().encoding(UTF_8))
.andExpect(content().string(containsString("Jürgen")))
.andExpect(jsonPath("$.name").value("Jürgen"))
.andExpect(jsonPath("$.age").value(42))
.andExpect(jsonPath("$.age").value(42.0f))
.andExpect(jsonPath("$.age").value(equalTo(42)))
.andExpect(jsonPath("$.age").value(equalTo(42.0f), Float.class))
.andExpect(jsonPath("$.age", equalTo(42)))
.andExpect(jsonPath("$.age", equalTo(42.0f), Float.class));
}
@RestController
private static | ResponseBodyTests |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/cfg/MutableCoercionConfig.java | {
"start": 745,
"end": 1522
} | class ____ builder-style mapper construction with
* <ul>
* <li>{@link MapperBuilder#withCoercionConfig(Class, Consumer)},</li>
* <li>{@link MapperBuilder#withCoercionConfig(tools.jackson.databind.type.LogicalType, Consumer)} and</li>
* <li>{@link MapperBuilder#withCoercionConfigDefaults(Consumer)}</li>
* </ul>
* ... these builder methods. Refrain from using this method outside of builder phase.
*/
public MutableCoercionConfig setCoercion(CoercionInputShape shape,
CoercionAction action) {
_coercionsByShape[shape.ordinal()] = action;
return this;
}
public MutableCoercionConfig setAcceptBlankAsEmpty(Boolean state) {
_acceptBlankAsEmpty = state;
return this;
}
}
| during |
java | google__guice | core/src/com/google/inject/internal/DefaultConstructionProxyFactory.java | {
"start": 1377,
"end": 1450
} | class ____.
*
* @author crazybob@google.com (Bob Lee)
*/
final | constructor |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/store/Store.java | {
"start": 54514,
"end": 56261
} | class ____ {
/**
* Files that exist in both snapshots and they can be considered the same ie. they don't need to be recovered
*/
public final List<StoreFileMetadata> identical;
/**
* Files that exist in both snapshots but their they are not identical
*/
public final List<StoreFileMetadata> different;
/**
* Files that exist in the source but not in the target
*/
public final List<StoreFileMetadata> missing;
RecoveryDiff(List<StoreFileMetadata> identical, List<StoreFileMetadata> different, List<StoreFileMetadata> missing) {
this.identical = identical;
this.different = different;
this.missing = missing;
}
/**
* Returns the sum of the files in this diff.
*/
public int size() {
return identical.size() + different.size() + missing.size();
}
@Override
public String toString() {
return "RecoveryDiff{" + "identical=" + identical + ", different=" + different + ", missing=" + missing + '}';
}
}
/**
* Returns true if the file is auto-generated by the store and shouldn't be deleted during cleanup.
* This includes write lock files
*/
public static boolean isAutogenerated(String name) {
return IndexWriter.WRITE_LOCK_NAME.equals(name);
}
/**
* Produces a string representation of the given digest value.
*/
public static String digestToString(long digest) {
return Long.toString(digest, Character.MAX_RADIX);
}
/**
* Index input that calculates checksum as data is read from the input.
* <p>
* This | RecoveryDiff |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/source/presencecheck/spi/NonDirectMapper.java | {
"start": 1413,
"end": 1640
} | class ____ {
private final String name;
public GoalKeeperSource(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
}
| GoalKeeperSource |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/reflect/TestReflect.java | {
"start": 17031,
"end": 17451
} | class ____ {
@Nullable
@AvroDefault("1")
int foo;
}
@Test
public void testAvroNullableDefault() {
check(NullableDefaultTest.class,
"{\"type\":\"record\",\"name\":\"NullableDefaultTest\","
+ "\"namespace\":\"org.apache.avro.reflect.TestReflect\",\"fields\":["
+ "{\"name\":\"foo\",\"type\":[\"null\",\"int\"],\"default\":1}]}");
}
private static | NullableDefaultTest |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/build/AbstractBuildLog.java | {
"start": 1323,
"end": 1509
} | class ____ {@link BuildLog} implementations.
*
* @author Phillip Webb
* @author Scott Frederick
* @author Andrey Shlykov
* @author Rafael Ceccone
* @since 2.3.0
*/
public abstract | for |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/binding/annotations/access/jpa/Course4.java | {
"start": 534,
"end": 1035
} | class ____ {
@Id
@GeneratedValue
private long id;
private String title;
@OneToMany(cascade = CascadeType.ALL)
private List<Student> students;
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public List<Student> getStudents() {
return students;
}
public void setStudents(List<Student> students) {
this.students = students;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
}
| Course4 |
java | apache__camel | components/camel-hazelcast/src/test/java/org/apache/camel/component/hazelcast/HazelcastSedaInOutTest.java | {
"start": 1486,
"end": 2973
} | class ____ extends CamelTestSupport {
@EndpointInject("mock:result")
private MockEndpoint mock;
private HazelcastInstance hazelcastInstance;
@BeforeAll
public void beforeEach() {
hazelcastInstance = Hazelcast.newHazelcastInstance();
}
@AfterAll
public void afterEach() {
if (hazelcastInstance != null) {
hazelcastInstance.shutdown();
}
}
@Test
public void sendInOut() throws Exception {
mock.expectedMessageCount(1);
mock.expectedBodiesReceived("test");
template.send("direct:foo", ExchangePattern.InOut, new Processor() {
public void process(Exchange exchange) throws Exception {
exchange.getIn().setBody("test");
}
});
MockEndpoint.assertIsSatisfied(context);
mock.reset();
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
HazelcastCamelTestHelper.registerHazelcastComponents(context, hazelcastInstance);
return context;
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:foo").to("hazelcast-seda:foo");
from("hazelcast-seda:foo").to("mock:result");
}
};
}
}
| HazelcastSedaInOutTest |
java | apache__dubbo | dubbo-metadata/dubbo-metadata-processor/src/main/java/org/apache/dubbo/metadata/annotation/processing/util/AnnotationUtils.java | {
"start": 2374,
"end": 11170
} | interface ____ {
static AnnotationMirror getAnnotation(
AnnotatedConstruct annotatedConstruct, Class<? extends Annotation> annotationClass) {
return annotationClass == null ? null : getAnnotation(annotatedConstruct, annotationClass.getTypeName());
}
static AnnotationMirror getAnnotation(AnnotatedConstruct annotatedConstruct, CharSequence annotationClassName) {
List<AnnotationMirror> annotations = getAnnotations(annotatedConstruct, annotationClassName);
return annotations.isEmpty() ? null : annotations.get(0);
}
static List<AnnotationMirror> getAnnotations(
AnnotatedConstruct annotatedConstruct, Class<? extends Annotation> annotationClass) {
return annotationClass == null
? emptyList()
: getAnnotations(annotatedConstruct, annotationClass.getTypeName());
}
static List<AnnotationMirror> getAnnotations(
AnnotatedConstruct annotatedConstruct, CharSequence annotationClassName) {
return getAnnotations(
annotatedConstruct, annotation -> isSameType(annotation.getAnnotationType(), annotationClassName));
}
static List<AnnotationMirror> getAnnotations(AnnotatedConstruct annotatedConstruct) {
return getAnnotations(annotatedConstruct, EMPTY_ARRAY);
}
static List<AnnotationMirror> getAnnotations(
AnnotatedConstruct annotatedConstruct, Predicate<AnnotationMirror>... annotationFilters) {
AnnotatedConstruct actualAnnotatedConstruct = annotatedConstruct;
if (annotatedConstruct instanceof TypeMirror) {
actualAnnotatedConstruct = ofTypeElement((TypeMirror) actualAnnotatedConstruct);
}
return actualAnnotatedConstruct == null
? emptyList()
: filterAll(
(List<AnnotationMirror>) actualAnnotatedConstruct.getAnnotationMirrors(), annotationFilters);
}
static List<AnnotationMirror> getAllAnnotations(TypeMirror type) {
return getAllAnnotations(ofTypeElement(type));
}
static List<AnnotationMirror> getAllAnnotations(Element element) {
return getAllAnnotations(element, EMPTY_ARRAY);
}
static List<AnnotationMirror> getAllAnnotations(TypeMirror type, Class<? extends Annotation> annotationClass) {
return getAllAnnotations(ofTypeElement(type), annotationClass);
}
static List<AnnotationMirror> getAllAnnotations(Element element, Class<? extends Annotation> annotationClass) {
return element == null || annotationClass == null
? emptyList()
: getAllAnnotations(element, annotationClass.getTypeName());
}
static List<AnnotationMirror> getAllAnnotations(TypeMirror type, CharSequence annotationClassName) {
return getAllAnnotations(ofTypeElement(type), annotationClassName);
}
static List<AnnotationMirror> getAllAnnotations(Element element, CharSequence annotationClassName) {
return getAllAnnotations(
element, annotation -> isSameType(annotation.getAnnotationType(), annotationClassName));
}
static List<AnnotationMirror> getAllAnnotations(TypeMirror type, Predicate<AnnotationMirror>... annotationFilters) {
return getAllAnnotations(ofTypeElement(type), annotationFilters);
}
static List<AnnotationMirror> getAllAnnotations(Element element, Predicate<AnnotationMirror>... annotationFilters) {
List<AnnotationMirror> allAnnotations = isTypeElement(element)
? getHierarchicalTypes(ofTypeElement(element)).stream()
.map(AnnotationUtils::getAnnotations)
.flatMap(Collection::stream)
.collect(Collectors.toList())
: element == null ? emptyList() : (List<AnnotationMirror>) element.getAnnotationMirrors();
return filterAll(allAnnotations, annotationFilters);
}
static List<AnnotationMirror> getAllAnnotations(ProcessingEnvironment processingEnv, Type annotatedType) {
return getAllAnnotations(processingEnv, annotatedType, EMPTY_ARRAY);
}
static List<AnnotationMirror> getAllAnnotations(
ProcessingEnvironment processingEnv, Type annotatedType, Predicate<AnnotationMirror>... annotationFilters) {
return annotatedType == null
? emptyList()
: getAllAnnotations(processingEnv, annotatedType.getTypeName(), annotationFilters);
}
static List<AnnotationMirror> getAllAnnotations(
ProcessingEnvironment processingEnv,
CharSequence annotatedTypeName,
Predicate<AnnotationMirror>... annotationFilters) {
return getAllAnnotations(getType(processingEnv, annotatedTypeName), annotationFilters);
}
static AnnotationMirror findAnnotation(TypeMirror type, Class<? extends Annotation> annotationClass) {
return annotationClass == null ? null : findAnnotation(type, annotationClass.getTypeName());
}
static AnnotationMirror findAnnotation(TypeMirror type, CharSequence annotationClassName) {
return findAnnotation(ofTypeElement(type), annotationClassName);
}
static AnnotationMirror findAnnotation(Element element, Class<? extends Annotation> annotationClass) {
return annotationClass == null ? null : findAnnotation(element, annotationClass.getTypeName());
}
static AnnotationMirror findAnnotation(Element element, CharSequence annotationClassName) {
return filterFirst(getAllAnnotations(
element, annotation -> isSameType(annotation.getAnnotationType(), annotationClassName)));
}
static AnnotationMirror findMetaAnnotation(Element annotatedConstruct, CharSequence metaAnnotationClassName) {
return annotatedConstruct == null
? null
: getAnnotations(annotatedConstruct).stream()
.map(annotation -> findAnnotation(annotation.getAnnotationType(), metaAnnotationClassName))
.filter(Objects::nonNull)
.findFirst()
.orElse(null);
}
static boolean isAnnotationPresent(Element element, CharSequence annotationClassName) {
return findAnnotation(element, annotationClassName) != null
|| findMetaAnnotation(element, annotationClassName) != null;
}
static <T> T getAttribute(AnnotationMirror annotation, String attributeName) {
return annotation == null ? null : getAttribute(annotation.getElementValues(), attributeName);
}
static <T> T getAttribute(
Map<? extends ExecutableElement, ? extends AnnotationValue> attributesMap, String attributeName) {
T annotationValue = null;
for (Map.Entry<? extends ExecutableElement, ? extends AnnotationValue> entry : attributesMap.entrySet()) {
ExecutableElement attributeMethod = entry.getKey();
if (Objects.equals(attributeName, attributeMethod.getSimpleName().toString())) {
TypeMirror attributeType = attributeMethod.getReturnType();
AnnotationValue value = entry.getValue();
if (attributeType instanceof ArrayType) { // array-typed attribute values
ArrayType arrayType = (ArrayType) attributeType;
String componentType = arrayType.getComponentType().toString();
ClassLoader classLoader = AnnotationUtils.class.getClassLoader();
List<AnnotationValue> values = (List<AnnotationValue>) value.getValue();
int size = values.size();
try {
Class componentClass = classLoader.loadClass(componentType);
boolean isEnum = componentClass.isEnum();
Object array = Array.newInstance(componentClass, values.size());
for (int i = 0; i < size; i++) {
Object element = values.get(i).getValue();
if (isEnum) {
element = valueOf(componentClass, element.toString());
}
Array.set(array, i, element);
}
annotationValue = (T) array;
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
} else {
annotationValue = (T) value.getValue();
}
break;
}
}
return annotationValue;
}
static <T> T getValue(AnnotationMirror annotation) {
return (T) getAttribute(annotation, "value");
}
}
| AnnotationUtils |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/aot/generate/GeneratedClasses.java | {
"start": 1096,
"end": 1282
} | class ____ stateful, so the same instance should be used for all class
* generation.
*
* @author Phillip Webb
* @author Stephane Nicoll
* @since 6.0
* @see GeneratedClass
*/
public | is |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/utils/MethodUtilsTest.java | {
"start": 6299,
"end": 6451
} | class ____ extends MethodTestClazz {
@Override
public MethodTestClazz get() {
return this;
}
}
}
| MethodOverrideClazz |
java | apache__camel | components/camel-debezium/camel-debezium-db2/src/generated/java/org/apache/camel/component/debezium/db2/configuration/Db2ConnectorEmbeddedDebeziumConfiguration.java | {
"start": 25042,
"end": 34932
} | class ____ should be used to
* determine the topic name for data change, schema change, transaction,
* heartbeat event etc.
*/
public void setTopicNamingStrategy(String topicNamingStrategy) {
this.topicNamingStrategy = topicNamingStrategy;
}
public String getTopicNamingStrategy() {
return topicNamingStrategy;
}
/**
* The criteria for running a snapshot upon startup of the connector.
* Options include: 'initial' (the default) to specify the connector should
* run a snapshot only when no offsets are available for the logical server
* name; 'schema_only' to specify the connector should run a snapshot of the
* schema when no offsets are available for the logical server name.
*/
public void setSnapshotMode(String snapshotMode) {
this.snapshotMode = snapshotMode;
}
public String getSnapshotMode() {
return snapshotMode;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting permits
* to specify whenever the data should be snapshotted or not.
*/
public void setSnapshotModeConfigurationBasedSnapshotData(
boolean snapshotModeConfigurationBasedSnapshotData) {
this.snapshotModeConfigurationBasedSnapshotData = snapshotModeConfigurationBasedSnapshotData;
}
public boolean isSnapshotModeConfigurationBasedSnapshotData() {
return snapshotModeConfigurationBasedSnapshotData;
}
/**
* Enable/Disable Debezium context headers that provides essential metadata
* for tracking and identifying the source of CDC events in downstream
* processing systems.
*/
public void setExtendedHeadersEnabled(boolean extendedHeadersEnabled) {
this.extendedHeadersEnabled = extendedHeadersEnabled;
}
public boolean isExtendedHeadersEnabled() {
return extendedHeadersEnabled;
}
/**
* Maximum size of the queue for change events read from the database log
* but not yet recorded or forwarded. Defaults to 8192, and should always be
* larger than the maximum batch size.
*/
public void setMaxQueueSize(int maxQueueSize) {
this.maxQueueSize = maxQueueSize;
}
public int getMaxQueueSize() {
return maxQueueSize;
}
/**
* Specify the action to take when a guardrail collections limit is
* exceeded: 'warn' (the default) logs a warning message and continues
* processing; 'fail' stops the connector with an error.
*/
public void setGuardrailCollectionsLimitAction(
String guardrailCollectionsLimitAction) {
this.guardrailCollectionsLimitAction = guardrailCollectionsLimitAction;
}
public String getGuardrailCollectionsLimitAction() {
return guardrailCollectionsLimitAction;
}
/**
* The maximum size of chunk (number of documents/rows) for incremental
* snapshotting
*/
public void setIncrementalSnapshotChunkSize(int incrementalSnapshotChunkSize) {
this.incrementalSnapshotChunkSize = incrementalSnapshotChunkSize;
}
public int getIncrementalSnapshotChunkSize() {
return incrementalSnapshotChunkSize;
}
/**
* The job's owners emitted by Debezium. A comma-separated list of key-value
* pairs.For example: k1=v1,k2=v2
*/
public void setOpenlineageIntegrationJobOwners(
String openlineageIntegrationJobOwners) {
this.openlineageIntegrationJobOwners = openlineageIntegrationJobOwners;
}
public String getOpenlineageIntegrationJobOwners() {
return openlineageIntegrationJobOwners;
}
/**
* Path to OpenLineage file configuration. See
* https://openlineage.io/docs/client/java/configuration
*/
public void setOpenlineageIntegrationConfigFilePath(
String openlineageIntegrationConfigFilePath) {
this.openlineageIntegrationConfigFilePath = openlineageIntegrationConfigFilePath;
}
public String getOpenlineageIntegrationConfigFilePath() {
return openlineageIntegrationConfigFilePath;
}
/**
* Time to wait before restarting connector after retriable exception
* occurs. Defaults to 10000ms.
*/
public void setRetriableRestartConnectorWaitMs(
long retriableRestartConnectorWaitMs) {
this.retriableRestartConnectorWaitMs = retriableRestartConnectorWaitMs;
}
public long getRetriableRestartConnectorWaitMs() {
return retriableRestartConnectorWaitMs;
}
/**
* A delay period before a snapshot will begin, given in milliseconds.
* Defaults to 0 ms.
*/
public void setSnapshotDelayMs(long snapshotDelayMs) {
this.snapshotDelayMs = snapshotDelayMs;
}
public long getSnapshotDelayMs() {
return snapshotDelayMs;
}
/**
* The maximum time in milliseconds to wait for task executor to shut down.
*/
public void setExecutorShutdownTimeoutMs(long executorShutdownTimeoutMs) {
this.executorShutdownTimeoutMs = executorShutdownTimeoutMs;
}
public long getExecutorShutdownTimeoutMs() {
return executorShutdownTimeoutMs;
}
/**
* Enables transaction metadata extraction together with event counting
*/
public void setProvideTransactionMetadata(boolean provideTransactionMetadata) {
this.provideTransactionMetadata = provideTransactionMetadata;
}
public boolean isProvideTransactionMetadata() {
return provideTransactionMetadata;
}
/**
* Controls what DDL will Debezium store in database schema history. By
* default (false) Debezium will store all incoming DDL statements. If set
* to true, then only DDL that manipulates a captured table will be stored.
*/
public void setSchemaHistoryInternalStoreOnlyCapturedTablesDdl(
boolean schemaHistoryInternalStoreOnlyCapturedTablesDdl) {
this.schemaHistoryInternalStoreOnlyCapturedTablesDdl = schemaHistoryInternalStoreOnlyCapturedTablesDdl;
}
public boolean isSchemaHistoryInternalStoreOnlyCapturedTablesDdl() {
return schemaHistoryInternalStoreOnlyCapturedTablesDdl;
}
/**
* Controls what DDL will Debezium store in database schema history. By
* default (false) Debezium will store all incoming DDL statements. If set
* to true, then only DDL that manipulates a table from captured
* schema/database will be stored.
*/
public void setSchemaHistoryInternalStoreOnlyCapturedDatabasesDdl(
boolean schemaHistoryInternalStoreOnlyCapturedDatabasesDdl) {
this.schemaHistoryInternalStoreOnlyCapturedDatabasesDdl = schemaHistoryInternalStoreOnlyCapturedDatabasesDdl;
}
public boolean isSchemaHistoryInternalStoreOnlyCapturedDatabasesDdl() {
return schemaHistoryInternalStoreOnlyCapturedDatabasesDdl;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting permits
* to specify whenever the data should be snapshotted or not in case of
* error.
*/
public void setSnapshotModeConfigurationBasedSnapshotOnDataError(
boolean snapshotModeConfigurationBasedSnapshotOnDataError) {
this.snapshotModeConfigurationBasedSnapshotOnDataError = snapshotModeConfigurationBasedSnapshotOnDataError;
}
public boolean isSnapshotModeConfigurationBasedSnapshotOnDataError() {
return snapshotModeConfigurationBasedSnapshotOnDataError;
}
/**
* The path to the file that will be used to record the database schema
* history
*/
public void setSchemaHistoryInternalFileFilename(
String schemaHistoryInternalFileFilename) {
this.schemaHistoryInternalFileFilename = schemaHistoryInternalFileFilename;
}
public String getSchemaHistoryInternalFileFilename() {
return schemaHistoryInternalFileFilename;
}
/**
* Whether delete operations should be represented by a delete event and a
* subsequent tombstone event (true) or only by a delete event (false).
* Emitting the tombstone event (the default behavior) allows Kafka to
* completely delete all events pertaining to the given key once the source
* record got deleted.
*/
public void setTombstonesOnDelete(boolean tombstonesOnDelete) {
this.tombstonesOnDelete = tombstonesOnDelete;
}
public boolean isTombstonesOnDelete() {
return tombstonesOnDelete;
}
/**
* Topic prefix that identifies and provides a namespace for the particular
* database server/cluster is capturing changes. The topic prefix should be
* unique across all other connectors, since it is used as a prefix for all
* Kafka topic names that receive events emitted by this connector. Only
* alphanumeric characters, hyphens, dots and underscores must be accepted.
*/
public void setTopicPrefix(String topicPrefix) {
this.topicPrefix = topicPrefix;
}
public String getTopicPrefix() {
return topicPrefix;
}
/**
* Specify how DECIMAL and NUMERIC columns should be represented in change
* events, including: 'precise' (the default) uses java.math.BigDecimal to
* represent values, which are encoded in the change events using a binary
* representation and Kafka Connect's
* 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to
* represent values; 'double' represents values using Java's 'double', which
* may not offer the precision but will be far easier to use in consumers.
*/
public void setDecimalHandlingMode(String decimalHandlingMode) {
this.decimalHandlingMode = decimalHandlingMode;
}
public String getDecimalHandlingMode() {
return decimalHandlingMode;
}
/**
* The name of the SourceInfoStructMaker | that |
java | spring-projects__spring-framework | spring-expression/src/main/java/org/springframework/expression/spel/ast/OpPlus.java | {
"start": 1875,
"end": 9952
} | class ____ extends Operator {
/**
* Maximum number of characters permitted in a concatenated string.
* @since 5.2.24
*/
private static final int MAX_CONCATENATED_STRING_LENGTH = 100_000;
public OpPlus(int startPos, int endPos, SpelNodeImpl... operands) {
super("+", startPos, endPos, operands);
Assert.notEmpty(operands, "Operands must not be empty");
}
@Override
public TypedValue getValueInternal(ExpressionState state) throws EvaluationException {
SpelNodeImpl leftOp = getLeftOperand();
if (this.children.length < 2) { // if only one operand, then this is unary plus
Object operandOne = leftOp.getValueInternal(state).getValue();
if (operandOne instanceof Number) {
if (operandOne instanceof Double) {
this.exitTypeDescriptor = "D";
}
else if (operandOne instanceof Float) {
this.exitTypeDescriptor = "F";
}
else if (operandOne instanceof Long) {
this.exitTypeDescriptor = "J";
}
else if (operandOne instanceof Integer) {
this.exitTypeDescriptor = "I";
}
return new TypedValue(operandOne);
}
return state.operate(Operation.ADD, operandOne, null);
}
TypedValue operandOneValue = leftOp.getValueInternal(state);
Object leftOperand = operandOneValue.getValue();
TypedValue operandTwoValue = getRightOperand().getValueInternal(state);
Object rightOperand = operandTwoValue.getValue();
if (leftOperand instanceof Number leftNumber && rightOperand instanceof Number rightNumber) {
if (leftNumber instanceof BigDecimal || rightNumber instanceof BigDecimal) {
BigDecimal leftBigDecimal = NumberUtils.convertNumberToTargetClass(leftNumber, BigDecimal.class);
BigDecimal rightBigDecimal = NumberUtils.convertNumberToTargetClass(rightNumber, BigDecimal.class);
return new TypedValue(leftBigDecimal.add(rightBigDecimal));
}
else if (leftNumber instanceof Double || rightNumber instanceof Double) {
this.exitTypeDescriptor = "D";
return new TypedValue(leftNumber.doubleValue() + rightNumber.doubleValue());
}
else if (leftNumber instanceof Float || rightNumber instanceof Float) {
this.exitTypeDescriptor = "F";
return new TypedValue(leftNumber.floatValue() + rightNumber.floatValue());
}
else if (leftNumber instanceof BigInteger || rightNumber instanceof BigInteger) {
BigInteger leftBigInteger = NumberUtils.convertNumberToTargetClass(leftNumber, BigInteger.class);
BigInteger rightBigInteger = NumberUtils.convertNumberToTargetClass(rightNumber, BigInteger.class);
return new TypedValue(leftBigInteger.add(rightBigInteger));
}
else if (leftNumber instanceof Long || rightNumber instanceof Long) {
this.exitTypeDescriptor = "J";
return new TypedValue(leftNumber.longValue() + rightNumber.longValue());
}
else if (CodeFlow.isIntegerForNumericOp(leftNumber) || CodeFlow.isIntegerForNumericOp(rightNumber)) {
this.exitTypeDescriptor = "I";
return new TypedValue(leftNumber.intValue() + rightNumber.intValue());
}
else {
// Unknown Number subtypes -> best guess is double addition
return new TypedValue(leftNumber.doubleValue() + rightNumber.doubleValue());
}
}
if (leftOperand instanceof String leftString && rightOperand instanceof String rightString) {
this.exitTypeDescriptor = "Ljava/lang/String";
checkStringLength(leftString);
checkStringLength(rightString);
return concatenate(leftString, rightString);
}
if (leftOperand instanceof String leftString) {
checkStringLength(leftString);
String rightString = (rightOperand == null ? "null" : convertTypedValueToString(operandTwoValue, state));
checkStringLength(rightString);
return concatenate(leftString, rightString);
}
if (rightOperand instanceof String rightString) {
checkStringLength(rightString);
String leftString = (leftOperand == null ? "null" : convertTypedValueToString(operandOneValue, state));
checkStringLength(leftString);
return concatenate(leftString, rightString);
}
return state.operate(Operation.ADD, leftOperand, rightOperand);
}
private void checkStringLength(String string) {
if (string.length() > MAX_CONCATENATED_STRING_LENGTH) {
throw new SpelEvaluationException(getStartPosition(),
SpelMessage.MAX_CONCATENATED_STRING_LENGTH_EXCEEDED, MAX_CONCATENATED_STRING_LENGTH);
}
}
private TypedValue concatenate(String leftString, String rightString) {
String result = leftString + rightString;
checkStringLength(result);
return new TypedValue(result);
}
@Override
public String toStringAST() {
if (this.children.length < 2) { // unary plus
return "+" + getLeftOperand().toStringAST();
}
return super.toStringAST();
}
@Override
public SpelNodeImpl getRightOperand() {
if (this.children.length < 2) {
throw new IllegalStateException("No right operand");
}
return this.children[1];
}
/**
* Convert operand value to string using registered converter or using
* {@code toString} method.
* @param value typed value to be converted
* @param state expression state
* @return {@code TypedValue} instance converted to {@code String}
*/
private static String convertTypedValueToString(TypedValue value, ExpressionState state) {
TypeConverter typeConverter = state.getEvaluationContext().getTypeConverter();
TypeDescriptor typeDescriptor = TypeDescriptor.valueOf(String.class);
if (typeConverter.canConvert(value.getTypeDescriptor(), typeDescriptor)) {
return String.valueOf(typeConverter.convertValue(value.getValue(),
value.getTypeDescriptor(), typeDescriptor));
}
return String.valueOf(value.getValue());
}
@Override
public boolean isCompilable() {
if (!getLeftOperand().isCompilable()) {
return false;
}
if (this.children.length > 1) {
if (!getRightOperand().isCompilable()) {
return false;
}
}
return (this.exitTypeDescriptor != null);
}
/**
* Walk through a possible tree of nodes that combine strings and append
* them all to the same (on stack) StringBuilder.
*/
private void walk(MethodVisitor mv, CodeFlow cf, @Nullable SpelNodeImpl operand) {
if (operand instanceof OpPlus plus) {
walk(mv, cf, plus.getLeftOperand());
walk(mv, cf, plus.getRightOperand());
}
else if (operand != null) {
cf.enterCompilationScope();
operand.generateCode(mv,cf);
if (!"Ljava/lang/String".equals(cf.lastDescriptor())) {
mv.visitTypeInsn(CHECKCAST, "java/lang/String");
}
cf.exitCompilationScope();
mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/StringBuilder", "append", "(Ljava/lang/String;)Ljava/lang/StringBuilder;", false);
}
}
@Override
public void generateCode(MethodVisitor mv, CodeFlow cf) {
if ("Ljava/lang/String".equals(this.exitTypeDescriptor)) {
mv.visitTypeInsn(NEW, "java/lang/StringBuilder");
mv.visitInsn(DUP);
mv.visitMethodInsn(INVOKESPECIAL, "java/lang/StringBuilder", "<init>", "()V", false);
walk(mv, cf, getLeftOperand());
walk(mv, cf, getRightOperand());
mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/StringBuilder", "toString", "()Ljava/lang/String;", false);
}
else {
this.children[0].generateCode(mv, cf);
String leftDesc = this.children[0].exitTypeDescriptor;
String exitDesc = this.exitTypeDescriptor;
Assert.state(exitDesc != null, "No exit type descriptor");
char targetDesc = exitDesc.charAt(0);
CodeFlow.insertNumericUnboxOrPrimitiveTypeCoercion(mv, leftDesc, targetDesc);
if (this.children.length > 1) {
cf.enterCompilationScope();
this.children[1].generateCode(mv, cf);
String rightDesc = this.children[1].exitTypeDescriptor;
cf.exitCompilationScope();
CodeFlow.insertNumericUnboxOrPrimitiveTypeCoercion(mv, rightDesc, targetDesc);
switch (targetDesc) {
case 'I' -> mv.visitInsn(IADD);
case 'J' -> mv.visitInsn(LADD);
case 'F' -> mv.visitInsn(FADD);
case 'D' -> mv.visitInsn(DADD);
default -> throw new IllegalStateException(
"Unrecognized exit type descriptor: '" + this.exitTypeDescriptor + "'");
}
}
}
cf.pushDescriptor(this.exitTypeDescriptor);
}
}
| OpPlus |
java | apache__camel | components/camel-ai/camel-weaviate/src/main/java/org/apache/camel/component/weaviate/WeaviateVectorDbHeaders.java | {
"start": 896,
"end": 3693
} | class ____ {
@Metadata(description = "The action to be performed.", javaType = "String",
enums = "CREATE_COLLECTION,CREATE_INDEX,UPSERT,INSERT,SEARCH,DELETE,UPDATE,QUERY,QUERY_BY_ID")
public static final String ACTION = "CamelWeaviateAction";
@Metadata(description = "Text Field Name for Insert/Upsert operation", javaType = "String")
public static final String TEXT_FIELD_NAME = "CamelWeaviateTextFieldName";
@Metadata(description = "Vector Field Name for Insert/Upsert operation", javaType = "String")
public static final String VECTOR_FIELD_NAME = "CamelweaviateVectorFieldName";
@Metadata(description = "Collection Name for Insert/Upsert operation", javaType = "String")
public static final String COLLECTION_NAME = "CamelWeaviateCollectionName";
@Metadata(description = "Collection Similarity Metric", javaType = "String", enums = "cosine,euclidean,dotproduct")
public static final String COLLECTION_SIMILARITY_METRIC = "CamelWeaviateCollectionSimilarityMetric";
@Metadata(description = "Collection Dimension", javaType = "int")
public static final String COLLECTION_DIMENSION = "CamelWeaviateCollectionDimension";
@Metadata(description = "Collection Cloud Vendor", javaType = "String", enums = "aws,gcp,azure")
public static final String COLLECTION_CLOUD = "CamelWeaviateCollectionCloud";
@Metadata(description = "Collection Cloud Vendor Region", javaType = "String", enums = "aws,gcp,azure")
public static final String COLLECTION_CLOUD_REGION = "CamelWeaviateCollectionCloudRegion";
@Metadata(description = "Index Name", javaType = "String")
public static final String INDEX_NAME = "CamelWeaviateIndexName";
@Metadata(description = "Weaviate Object fields", javaType = "HashMap")
public static final String FIELDS = "CamelWeaviateFields";
@Metadata(description = "Weaviate Object properties", javaType = "HashMap")
public static final String PROPERTIES = "CamelWeaviateProperties";
@Metadata(description = "Index Id", javaType = "String")
public static final String INDEX_ID = "CamelWeaviateIndexId";
@Metadata(description = "Query Top K", javaType = "Integer")
public static final String QUERY_TOP_K = "CamelWeaviateQueryTopK";
@Metadata(description = "Merges properties into the object", javaType = "Boolean", defaultValue = "true")
public static final String UPDATE_WITH_MERGE = "CamelWeaviateUpdateWithMerge";
@Metadata(description = "Key Name for Insert/Upsert operation", javaType = "String")
public static final String KEY_NAME = "CamelWeaviateKeyName";
@Metadata(description = "Key Value for Insert/Upsert operation", javaType = "String")
public static final String KEY_VALUE = "CamelWeaviateKeyValue";
}
| WeaviateVectorDbHeaders |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/event/test/TestEvent.java | {
"start": 772,
"end": 1242
} | class ____ extends IdentifiableApplicationEvent {
public final Object msg;
public TestEvent(Object source, String id, String msg) {
super(source, id);
this.msg = msg;
}
public TestEvent(Object source, String msg) {
super(source);
this.msg = msg;
}
public TestEvent(Object source, Integer msg) {
super(source);
this.msg = msg;
}
public TestEvent(Object source) {
this(source, "test");
}
public TestEvent() {
this(new Object());
}
}
| TestEvent |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/deftyping/TestDefaultForMaps.java | {
"start": 737,
"end": 903
} | class ____ {
public String key;
public MapKey(String k) { key = k; }
@Override public String toString() { return key; }
}
static | MapKey |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MissingSuperCallTest.java | {
"start": 2327,
"end": 2504
} | interface ____ {}
""")
.addSourceLines(
"Super.java",
"""
import androidx.annotation.CallSuper;
public | CallSuper |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.