language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/health/ReadinessAndLivenessTest.java | {
"start": 1540,
"end": 4324
} | class ____ {
@Test
public void testLiveAndReady() {
CamelContext context = new DefaultCamelContext();
HealthCheckRegistry registry = new DefaultHealthCheckRegistry();
registry.setCamelContext(context);
context.getRegistry().bind("check1", new MyReadyCheck("G1", "1"));
context.getRegistry().bind("check2", new MyLiveCheck("G1", "2"));
context.start();
registry.start();
List<HealthCheck> checks = registry.stream().toList();
assertEquals(2, checks.size());
Collection<HealthCheck.Result> results = HealthCheckHelper.invokeReadiness(context);
assertEquals(1, results.size());
HealthCheck.Result result = results.iterator().next();
assertEquals(HealthCheck.State.UP, result.getState());
assertFalse(result.getCheck().isLiveness());
assertTrue(result.getCheck().isReadiness());
assertInstanceOf(MyReadyCheck.class, result.getCheck());
results = HealthCheckHelper.invokeLiveness(context);
assertEquals(1, results.size());
result = results.iterator().next();
assertEquals(HealthCheck.State.DOWN, result.getState());
assertTrue(result.getCheck().isLiveness());
assertFalse(result.getCheck().isReadiness());
assertInstanceOf(MyLiveCheck.class, result.getCheck());
}
@Test
public void testAll() {
CamelContext context = new DefaultCamelContext();
HealthCheckRegistry registry = new DefaultHealthCheckRegistry();
registry.setCamelContext(context);
context.getRegistry().bind("check1", new MyAllCheck("G1", "1"));
context.start();
registry.start();
List<HealthCheck> checks = registry.stream().toList();
assertEquals(1, checks.size());
Collection<HealthCheck.Result> results = HealthCheckHelper.invokeReadiness(context);
assertEquals(1, results.size());
HealthCheck.Result result = results.iterator().next();
assertEquals(HealthCheck.State.DOWN, result.getState());
assertEquals("READINESS", result.getMessage().get());
assertTrue(result.getCheck().isLiveness());
assertTrue(result.getCheck().isReadiness());
assertInstanceOf(MyAllCheck.class, result.getCheck());
results = HealthCheckHelper.invokeLiveness(context);
assertEquals(1, results.size());
result = results.iterator().next();
assertEquals(HealthCheck.State.UP, result.getState());
assertTrue(result.getCheck().isLiveness());
assertTrue(result.getCheck().isReadiness());
assertEquals("LIVENESS", result.getMessage().get());
assertInstanceOf(MyAllCheck.class, result.getCheck());
}
private static | ReadinessAndLivenessTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/objectarray/ObjectArrayAssert_anySatisfy_with_ThrowingConsumer_Test.java | {
"start": 1279,
"end": 2533
} | class ____ extends ObjectArrayAssertBaseTest {
private ThrowingConsumer<Object> restrictions;
@BeforeEach
void beforeOnce() {
restrictions = o -> assertThat(o).isNotNull();
}
@Override
protected ObjectArrayAssert<Object> invoke_api_method() {
return assertions.anySatisfy(restrictions);
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertAnySatisfy(getInfo(assertions), list(getActual(assertions)), restrictions);
}
@Test
void should_rethrow_throwables_as_runtime_exceptions() {
// GIVEN
Throwable exception = new Throwable("boom!");
// WHEN
Throwable throwable = catchThrowable(() -> assertThat(array("foo")).anySatisfy(throwingConsumer(exception)));
// THEN
then(throwable).isInstanceOf(RuntimeException.class)
.cause().isSameAs(exception);
}
@Test
void should_propagate_RuntimeException_as_is() {
// GIVEN
RuntimeException runtimeException = new RuntimeException("boom!");
// WHEN
Throwable throwable = catchThrowable(() -> assertThat(array("foo")).anySatisfy(throwingConsumer(runtimeException)));
// THEN
then(throwable).isSameAs(runtimeException);
}
}
| ObjectArrayAssert_anySatisfy_with_ThrowingConsumer_Test |
java | apache__camel | components/camel-spring-parent/camel-spring-redis/src/main/java/org/apache/camel/component/redis/Command.java | {
"start": 853,
"end": 2483
} | enum ____ {
PING,
SET,
GET,
QUIT,
EXISTS,
DEL,
TYPE,
FLUSHDB,
KEYS,
RANDOMKEY,
RENAME,
RENAMENX,
RENAMEX,
DBSIZE,
EXPIRE,
EXPIREAT,
TTL,
SELECT,
MOVE,
FLUSHALL,
GETSET,
MGET,
SETNX,
SETEX,
MSET,
MSETNX,
DECRBY,
DECR,
INCRBY,
INCR,
APPEND,
SUBSTR,
HSET,
HGET,
HSETNX,
HMSET,
HMGET,
HINCRBY,
HEXISTS,
HDEL,
HLEN,
HKEYS,
HVALS,
HGETALL,
RPUSH,
LPUSH,
LLEN,
LRANGE,
LTRIM,
LINDEX,
LSET,
LREM,
LPOP,
RPOP,
RPOPLPUSH,
SADD,
SMEMBERS,
SREM,
SPOP,
SMOVE,
SCARD,
SISMEMBER,
SINTER,
SINTERSTORE,
SUNION,
SUNIONSTORE,
SDIFF,
SDIFFSTORE,
SRANDMEMBER,
ZADD,
ZRANGE,
ZREM,
ZINCRBY,
ZRANK,
ZREVRANK,
ZREVRANGE,
ZCARD,
ZSCORE,
MULTI,
DISCARD,
EXEC,
WATCH,
UNWATCH,
SORT,
BLPOP,
BRPOP,
AUTH,
SUBSCRIBE,
PUBLISH,
UNSUBSCRIBE,
PSUBSCRIBE,
PUNSUBSCRIBE,
ZCOUNT,
ZRANGEBYSCORE,
ZREVRANGEBYSCORE,
ZREMRANGEBYRANK,
ZREMRANGEBYSCORE,
ZUNIONSTORE,
ZINTERSTORE,
SAVE,
BGSAVE,
BGREWRITEAOF,
LASTSAVE,
SHUTDOWN,
INFO,
MONITOR,
SLAVEOF,
CONFIG,
STRLEN,
SYNC,
LPUSHX,
PERSIST,
RPUSHX,
ECHO,
LINSERT,
DEBUG,
BRPOPLPUSH,
SETBIT,
GETBIT,
SETRANGE,
GETRANGE,
PEXPIRE,
PEXPIREAT,
GEOADD,
GEODIST,
GEOHASH,
GEOPOS,
GEORADIUS,
GEORADIUSBYMEMBER;
}
| Command |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/error/ErrorMappingTest.java | {
"start": 1825,
"end": 1921
} | interface ____ {
@GET
Dto get();
}
@Path("/error")
public static | Client |
java | quarkusio__quarkus | independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/UncaughtExceptions.java | {
"start": 71,
"end": 194
} | class ____ {
public static final Logger LOGGER = Logger.getLogger(UncaughtExceptions.class.getName());
}
| UncaughtExceptions |
java | google__auto | value/src/test/java/com/google/auto/value/extension/toprettystring/ToPrettyStringTest.java | {
"start": 2654,
"end": 3947
} | class ____ {
@Nullable
@SuppressWarnings("mutable")
abstract long[] longs();
@ToPrettyString
abstract String toPrettyString();
}
@Test
public void primitiveArray() {
PrimitiveArray valueType =
new AutoValue_ToPrettyStringTest_PrimitiveArray(new long[] {1L, 2L, 10L, 200L});
assertThat(valueType.toPrettyString())
.isEqualTo(
"PrimitiveArray {"
+ "\n longs = ["
+ "\n 1,"
+ "\n 2,"
+ "\n 10,"
+ "\n 200,"
+ "\n ],"
+ "\n}");
}
@Test
public void primitiveArray_empty() {
PrimitiveArray valueType = new AutoValue_ToPrettyStringTest_PrimitiveArray(new long[0]);
assertThat(valueType.toPrettyString())
.isEqualTo(
"PrimitiveArray {" // force newline
+ "\n longs = [],"
+ "\n}");
}
@Test
public void primitiveArray_null() {
PrimitiveArray valueType = new AutoValue_ToPrettyStringTest_PrimitiveArray(null);
assertThat(valueType.toPrettyString())
.isEqualTo(
"PrimitiveArray {" // force newline
+ "\n longs = null,"
+ "\n}");
}
@AutoValue
abstract static | PrimitiveArray |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BertJapaneseTokenizer.java | {
"start": 1616,
"end": 2475
} | class ____ extends BertTokenizer.Builder {
protected JapaneseBuilder(List<String> vocab, Tokenization tokenization) {
super(vocab, tokenization);
}
@Override
public BertTokenizer build() {
// if not set strip accents defaults to the value of doLowerCase
if (doStripAccents == null) {
doStripAccents = doLowerCase;
}
if (neverSplit == null) {
neverSplit = Collections.emptySet();
}
return new BertJapaneseTokenizer(
originalVocab,
vocab,
doLowerCase,
doTokenizeCjKChars,
doStripAccents,
withSpecialTokens,
maxSequenceLength,
neverSplit
);
}
}
}
| JapaneseBuilder |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/feature/FeaturesTest.java | {
"start": 904,
"end": 1198
} | class ____ {
private Object value;
@JSONField(serialzeFeatures = { SerializerFeature.WriteMapNullValue })
public Object getValue() {
return value;
}
public void setValue(Object value) {
this.value = value;
}
}
}
| Entity |
java | apache__maven | impl/maven-core/src/test/java/org/apache/maven/lifecycle/internal/stub/MojoExecutorStub.java | {
"start": 1846,
"end": 3549
} | interface
____ final List<MojoExecution> executions = Collections.synchronizedList(new ArrayList<>());
public MojoExecutorStub() {
super(null, null, null, null, null, null);
}
public MojoExecutorStub(
BuildPluginManager pluginManager,
MavenPluginManager mavenPluginManager,
LifecycleDependencyResolver lifeCycleDependencyResolver,
ExecutionEventCatapult eventCatapult,
Provider<MojosExecutionStrategy> mojosExecutionStrategy,
MessageBuilderFactory messageBuilderFactory) {
super(
pluginManager,
mavenPluginManager,
lifeCycleDependencyResolver,
eventCatapult,
mojosExecutionStrategy,
messageBuilderFactory);
}
@Override
public void execute(MavenSession session, List<MojoExecution> mojoExecutions) throws LifecycleExecutionException {
executions.addAll(mojoExecutions);
}
@Override
public List<MavenProject> executeForkedExecutions(MojoExecution mojoExecution, MavenSession session)
throws LifecycleExecutionException {
return null;
}
public static MojoDescriptor createMojoDescriptor(Plugin plugin) {
final PluginDescriptor descriptor = new PluginDescriptor();
descriptor.setGroupId(plugin.getGroupId());
descriptor.setArtifactId(plugin.getArtifactId());
descriptor.setPlugin(plugin);
descriptor.setVersion(plugin.getVersion());
final MojoDescriptor mojoDescriptor = new MojoDescriptor();
mojoDescriptor.setPluginDescriptor(descriptor);
return mojoDescriptor;
}
}
| protected |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/TypeConverterRegistryStatisticsEnabledTest.java | {
"start": 1180,
"end": 2924
} | class ____ extends ContextTestSupport {
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = new DefaultCamelContext(false);
context.setTypeConverterStatisticsEnabled(true);
return context;
}
@Test
public void testTypeConverterRegistry() throws Exception {
getMockEndpoint("mock:a").expectedMessageCount(2);
template.sendBody("direct:start", "3");
template.sendBody("direct:start", "7");
assertMockEndpointsSatisfied();
TypeConverterRegistry reg = context.getTypeConverterRegistry();
long failed = reg.getStatistics().getFailedCounter();
assertEquals(0, (int) failed);
long miss = reg.getStatistics().getMissCounter();
assertEquals(0, (int) miss);
assertThrows(Exception.class, () -> template.sendBody("direct:start", "foo"),
"Should have thrown exception");
// should now have a failed
failed = reg.getStatistics().getFailedCounter();
assertEquals(1, (int) failed);
miss = reg.getStatistics().getMissCounter();
assertEquals(0, (int) miss);
// reset
reg.getStatistics().reset();
failed = reg.getStatistics().getFailedCounter();
assertEquals(0, (int) failed);
miss = reg.getStatistics().getMissCounter();
assertEquals(0, (int) miss);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("foo").convertBodyTo(int.class).to("mock:a");
}
};
}
}
| TypeConverterRegistryStatisticsEnabledTest |
java | google__guice | core/test/com/google/inject/CircularDependencyTest.java | {
"start": 13228,
"end": 13472
} | class ____ implements F {
private final G g;
@Inject
RealF(G g) {
this.g = g;
}
@Override
public G g() {
return g;
}
@Override
public String toString() {
return "F";
}
}
public | RealF |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxBufferBoundaryTest.java | {
"start": 1350,
"end": 15309
} | class ____
extends FluxOperatorTest<String, List<String>> {
@Override
protected Scenario<String, List<String>> defaultScenarioOptions(Scenario<String, List<String>> defaultOptions) {
return defaultOptions.prefetch(Integer.MAX_VALUE);
}
@Override
protected List<Scenario<String, List<String>>> scenarios_operatorError() {
return Arrays.asList(
scenario(f -> f.buffer(Flux.never(), () -> null)),
scenario(f -> f.buffer(Flux.never(), () -> {
throw exception();
})));
}
@Override
protected List<Scenario<String, List<String>>> scenarios_operatorSuccess() {
return Arrays.asList(scenario(f -> f.buffer(Mono.never()))
.receive(i -> assertThat(i).containsExactly(item(0), item(1), item(2)))
.shouldAssertPostTerminateState(false),
scenario(f -> f.buffer(Mono.just(1)))
.receiverEmpty()
.shouldAssertPostTerminateState(false)
);
}
@Override
protected List<Scenario<String, List<String>>> scenarios_errorFromUpstreamFailure() {
return Arrays.asList(scenario(f -> f.buffer(Flux.never())));
}
@Test
public void normal() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Sinks.Many<Integer> sp1 = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> sp2 = Sinks.unsafe().many().multicast().directBestEffort();
sp1.asFlux()
.buffer(sp2.asFlux())
.subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
sp1.emitNext(1, FAIL_FAST);
sp1.emitNext(2, FAIL_FAST);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
sp2.emitNext(1, FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2))
.assertNoError()
.assertNotComplete();
sp2.emitNext(2, FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2))
.assertNoError()
.assertNotComplete();
sp1.emitNext(3, FAIL_FAST);
sp1.emitNext(4, FAIL_FAST);
sp2.emitComplete(FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2), Arrays.asList(3, 4))
.assertNoError()
.assertComplete();
sp1.emitNext(5, FAIL_FAST);
sp1.emitNext(6, FAIL_FAST);
sp1.emitComplete(FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2), Arrays.asList(3, 4))
.assertNoError()
.assertComplete();
}
@Test
public void mainError() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Sinks.Many<Integer> sp1 = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> sp2 = Sinks.unsafe().many().multicast().directBestEffort();
sp1.asFlux()
.buffer(sp2.asFlux())
.subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
sp1.emitNext(1, FAIL_FAST);
sp1.emitNext(2, FAIL_FAST);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
sp2.emitNext(1, FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2))
.assertNoError()
.assertNotComplete();
sp1.emitError(new RuntimeException("forced failure"), FAIL_FAST);
assertThat(sp2.currentSubscriberCount()).as("sp2 has subscriber").isZero();
sp2.emitNext(2, FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2))
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure")
.assertNotComplete();
sp2.emitComplete(FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2))
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure")
.assertNotComplete();
}
@Test
public void otherError() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Sinks.Many<Integer> sp1 = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> sp2 = Sinks.unsafe().many().multicast().directBestEffort();
sp1.asFlux()
.buffer(sp2.asFlux())
.subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
sp1.emitNext(1, FAIL_FAST);
sp1.emitNext(2, FAIL_FAST);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
sp2.emitNext(1, FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2))
.assertNoError()
.assertNotComplete();
sp1.emitNext(3, FAIL_FAST);
sp2.emitError(new RuntimeException("forced failure"), FAIL_FAST);
assertThat(sp1.currentSubscriberCount()).as("sp1 has subscriber").isZero();
ts.assertValues(Arrays.asList(1, 2))
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure")
.assertNotComplete();
sp2.emitComplete(FAIL_FAST);
ts.assertValues(Arrays.asList(1, 2))
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure")
.assertNotComplete();
}
@Test
public void bufferSupplierThrows() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Sinks.Many<Integer> sp1 = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> sp2 = Sinks.unsafe().many().multicast().directBestEffort();
sp1.asFlux()
.buffer(sp2.asFlux(), (Supplier<List<Integer>>) () -> {
throw new RuntimeException("forced failure");
})
.subscribe(ts);
assertThat(sp1.currentSubscriberCount()).as("sp1 has subscriber").isZero();
assertThat(sp2.currentSubscriberCount()).as("sp2 has subscriber").isZero();
ts.assertNoValues()
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure")
.assertNotComplete();
}
@Test
public void bufferSupplierThrowsLater() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Sinks.Many<Integer> sp1 = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> sp2 = Sinks.unsafe().many().multicast().directBestEffort();
int count[] = {1};
sp1.asFlux()
.buffer(sp2.asFlux(), (Supplier<List<Integer>>) () -> {
if (count[0]-- > 0) {
return new ArrayList<>();
}
throw new RuntimeException("forced failure");
})
.subscribe(ts);
sp1.emitNext(1, FAIL_FAST);
sp1.emitNext(2, FAIL_FAST);
sp2.emitNext(1, FAIL_FAST);
assertThat(sp1.currentSubscriberCount()).as("sp1 has subscriber").isZero();
assertThat(sp2.currentSubscriberCount()).as("sp2 has subscriber").isZero();
ts.assertNoValues()
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure")
.assertNotComplete();
}
@Test
public void bufferSupplierReturnsNUll() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Sinks.Many<Integer> sp1 = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> sp2 = Sinks.unsafe().many().multicast().directBestEffort();
sp1.asFlux()
.buffer(sp2.asFlux(), (Supplier<List<Integer>>) () -> null)
.subscribe(ts);
assertThat(sp1.currentSubscriberCount()).as("sp1 has subscriber").isZero();
assertThat(sp2.currentSubscriberCount()).as("sp2 has subscriber").isZero();
ts.assertNoValues()
.assertError(NullPointerException.class)
.assertNotComplete();
}
Flux<List<Integer>> scenario_bufferWillSubdivideAnInputFluxTime() {
return Flux.just(1, 2, 3, 4, 5, 6, 7, 8)
.delayElements(Duration.ofMillis(99))
.buffer(Duration.ofMillis(200));
}
@Test
public void bufferWillSubdivideAnInputFluxTime() {
StepVerifier.withVirtualTime(this::scenario_bufferWillSubdivideAnInputFluxTime)
.thenAwait(Duration.ofSeconds(10))
.assertNext(t -> assertThat(t).containsExactly(1, 2))
.assertNext(t -> assertThat(t).containsExactly(3, 4))
.assertNext(t -> assertThat(t).containsExactly(5, 6))
.assertNext(t -> assertThat(t).containsExactly(7, 8))
.verifyComplete();
}
Flux<List<Integer>> scenario_bufferWillSubdivideAnInputFluxTime2() {
return Flux.just(1, 2, 3, 4, 5, 6, 7, 8)
.delayElements(Duration.ofMillis(99))
.buffer(Duration.ofMillis(200));
}
@Test
public void bufferWillSubdivideAnInputFluxTime2() {
StepVerifier.withVirtualTime(this::scenario_bufferWillSubdivideAnInputFluxTime2)
.thenAwait(Duration.ofSeconds(10))
.assertNext(t -> assertThat(t).containsExactly(1, 2))
.assertNext(t -> assertThat(t).containsExactly(3, 4))
.assertNext(t -> assertThat(t).containsExactly(5, 6))
.assertNext(t -> assertThat(t).containsExactly(7, 8))
.verifyComplete();
}
@Test
public void bufferWillAccumulateMultipleListsOfValues() {
//given: "a source and a collected flux"
Sinks.Many<Integer> numbers = Sinks.many().multicast().onBackpressureBuffer();
//non overlapping buffers
Sinks.Many<Integer> boundaryFlux = Sinks.many().multicast().onBackpressureBuffer();
StepVerifier.create(numbers.asFlux()
.buffer(boundaryFlux.asFlux())
.collectList())
.then(() -> {
numbers.emitNext(1, FAIL_FAST);
numbers.emitNext(2, FAIL_FAST);
numbers.emitNext(3, FAIL_FAST);
boundaryFlux.emitNext(1, FAIL_FAST);
numbers.emitNext(5, FAIL_FAST);
numbers.emitNext(6, FAIL_FAST);
numbers.emitComplete(FAIL_FAST);
//"the collected lists are available"
})
.assertNext(res -> assertThat(res).containsExactly(Arrays.asList(1, 2, 3), Arrays.asList(5, 6)))
.verifyComplete();
}
@Test
public void fluxEmptyBufferJust() {
// "flux empty buffer just"() {
// when:
List<List<Object>> ranges = Flux.empty()
.buffer(Flux.just(1))
.collectList()
.block();
// then:
assertThat(ranges).isEmpty();
}
@Test
public void fluxEmptyBuffer() {
// "flux empty buffer"
// when:
List<List<Object>> ranges = Flux.empty()
.buffer(Flux.never())
.collectList()
.block(Duration.ofMillis(100));
// then:
assertThat(ranges).isEmpty();
}
@Test
public void scanOperator(){
Flux<Integer> parent = Flux.just(1);
FluxBufferBoundary<Integer, Object, ArrayList<Integer>> test = new FluxBufferBoundary<>(parent, Flux.empty(), ArrayList::new);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanMain() {
CoreSubscriber<? super List> actual = new LambdaSubscriber<>(null, e -> {}, null, null);
List<String> initialBuffer = Arrays.asList("foo", "bar");
FluxBufferBoundary.BufferBoundaryMain<String, Integer, List<String>> test = new FluxBufferBoundary.BufferBoundaryMain<>(
actual, initialBuffer, ArrayList::new);
Subscription parent = Operators.cancelledSubscription();
test.onSubscribe(parent);
assertThat(test.scan(Scannable.Attr.CAPACITY)).isEqualTo(2);
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(Integer.MAX_VALUE);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(0L);
test.request(2);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(2L);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue();
}
@Test
public void scanOther() {
CoreSubscriber<Object> actual = new LambdaSubscriber<>(null, null, null, null);
FluxBufferBoundary.BufferBoundaryMain<String, Integer, List<String>> main = new FluxBufferBoundary.BufferBoundaryMain<>(
actual, null, ArrayList::new);
FluxBufferBoundary.BufferBoundaryOther<Integer> test = new FluxBufferBoundary.BufferBoundaryOther<>(main);
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
//the request is not tracked when there is a parent
test.request(2);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(0L);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(main);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.cancel();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue();
}
@Test
public void scanOtherRequestWhenNoParent() {
CoreSubscriber<Object> actual = new LambdaSubscriber<>(null, null, null, null);
FluxBufferBoundary.BufferBoundaryMain<String, Integer, List<String>> main = new FluxBufferBoundary.BufferBoundaryMain<>(
actual, null, ArrayList::new);
FluxBufferBoundary.BufferBoundaryOther<Integer> test = new FluxBufferBoundary.BufferBoundaryOther<>(main);
//the request is tracked when there is no parent
test.request(2);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(2L);
}
@Test
public void discardOnCancel() {
StepVerifier.create(Flux.just(1, 2, 3)
.concatWith(Mono.never())
.buffer(Mono.never()))
.thenAwait(Duration.ofMillis(10))
.thenCancel()
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
@Test
public void discardOnError() {
StepVerifier.create(Flux.just(1, 2, 3)
.concatWith(Mono.error(new IllegalStateException("boom")))
.buffer(Mono.never()))
.expectErrorMessage("boom")
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
@Test
public void discardOnEmitOverflow() {
final TestPublisher<Integer> publisher = TestPublisher.createNoncompliant(TestPublisher.Violation.REQUEST_OVERFLOW);
StepVerifier.create(publisher.flux()
.buffer(Mono.never()),
StepVerifierOptions.create().initialRequest(0))
.then(() -> publisher.emit(1, 2, 3))
.expectErrorMatches(Exceptions::isOverflow)
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
}
| FluxBufferBoundaryTest |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/MvcUriComponentsBuilder.java | {
"start": 28133,
"end": 32524
} | class ____
implements MethodInterceptor, InvocationHandler, MethodInvocationInfo {
private final Class<?> controllerType;
private @Nullable Method controllerMethod;
private Object @Nullable [] argumentValues;
ControllerMethodInvocationInterceptor(Class<?> controllerType) {
this.controllerType = controllerType;
}
@Override
public @Nullable Object intercept(@Nullable Object obj, Method method, Object[] args, @Nullable MethodProxy proxy) {
switch (method.getName()) {
case "getControllerType" -> {
return this.controllerType;
}
case "getControllerMethod" -> {
return this.controllerMethod;
}
case "getArgumentValues" -> {
return this.argumentValues;
}
default -> {
if (ReflectionUtils.isObjectMethod(method)) {
return ReflectionUtils.invokeMethod(method, obj, args);
}
else {
this.controllerMethod = method;
this.argumentValues = args;
Class<?> returnType = method.getReturnType();
try {
return (returnType == void.class ? null : returnType.cast(initProxy(returnType, this)));
}
catch (Throwable ex) {
throw new IllegalStateException(
"Failed to create proxy for controller method return type: " + method, ex);
}
}
}
}
}
@Override
public @Nullable Object invoke(Object proxy, Method method, Object @Nullable [] args) {
return intercept(proxy, method, (args != null ? args : new Object[0]), null);
}
@Override
public Class<?> getControllerType() {
return this.controllerType;
}
@Override
public Method getControllerMethod() {
Assert.state(this.controllerMethod != null, "Not initialized yet");
return this.controllerMethod;
}
@Override
public Object[] getArgumentValues() {
Assert.state(this.argumentValues != null, "Not initialized yet");
return this.argumentValues;
}
@SuppressWarnings("unchecked")
private static <T> T initProxy(
Class<?> controllerType, @Nullable ControllerMethodInvocationInterceptor interceptor) {
interceptor = (interceptor != null ?
interceptor : new ControllerMethodInvocationInterceptor(controllerType));
if (controllerType == Object.class) {
return (T) interceptor;
}
else if (controllerType.isInterface()) {
ClassLoader classLoader = controllerType.getClassLoader();
if (classLoader == null) {
// JDK bootstrap loader -> use MethodInvocationInfo ClassLoader instead.
classLoader = MethodInvocationInfo.class.getClassLoader();
}
else if (classLoader.getParent() == null) {
// Potentially the JDK platform loader on JDK 9+
ClassLoader miiClassLoader = MethodInvocationInfo.class.getClassLoader();
ClassLoader miiParent = miiClassLoader.getParent();
while (miiParent != null) {
if (classLoader == miiParent) {
// Suggested ClassLoader is ancestor of MethodInvocationInfo ClassLoader
// -> use MethodInvocationInfo ClassLoader itself instead.
classLoader = miiClassLoader;
break;
}
miiParent = miiParent.getParent();
}
}
Class<?>[] ifcs = new Class<?>[] {controllerType, MethodInvocationInfo.class};
return (T) Proxy.newProxyInstance(classLoader, ifcs, interceptor);
}
else {
Enhancer enhancer = new Enhancer();
enhancer.setSuperclass(controllerType);
enhancer.setInterfaces(new Class<?>[] {MethodInvocationInfo.class});
enhancer.setNamingPolicy(SpringNamingPolicy.INSTANCE);
enhancer.setAttemptLoad(AotDetector.useGeneratedArtifacts());
enhancer.setCallbackType(MethodInterceptor.class);
Class<?> proxyClass = enhancer.createClass();
Object proxy = null;
if (objenesis.isWorthTrying()) {
try {
proxy = objenesis.newInstance(proxyClass, enhancer.getUseCache());
}
catch (ObjenesisException ex) {
logger.debug("Failed to create controller proxy, falling back on default constructor", ex);
}
}
if (proxy == null) {
try {
proxy = ReflectionUtils.accessibleConstructor(proxyClass).newInstance();
}
catch (Throwable ex) {
throw new IllegalStateException(
"Failed to create controller proxy or use default constructor", ex);
}
}
((Factory) proxy).setCallbacks(new Callback[] {interceptor});
return (T) proxy;
}
}
}
/**
* Builder | ControllerMethodInvocationInterceptor |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/configuration/ClassLoadingConfig.java | {
"start": 2997,
"end": 3315
} | class ____ name. e.g. to
* remove <code>com.acme.Foo</code> you would specify <code>com/acme/Foo.class</code>.
* <p>
* Note that for technical reasons this is not supported when running with JBang.
*/
@ConfigDocMapKey("group-id:artifact-id")
Map<String, Set<String>> removedResources();
}
| file |
java | google__dagger | javatests/dagger/internal/codegen/MapBindingComponentProcessorTest.java | {
"start": 13200,
"end": 13751
} | class ____ {",
" @Provides @IntoMap",
" @WrappedClassKey(Integer.class) Handler provideAdminHandler() {",
" return new AdminHandler();",
" }",
"}");
Source mapModuleTwoFile =
CompilerTests.javaSource(
"test.MapModuleTwo",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import dagger.multibindings.IntoMap;",
"",
"@Module",
"final | MapModuleOne |
java | square__javapoet | src/test/java/com/squareup/javapoet/TypeSpecTest.java | {
"start": 24748,
"end": 25548
} | class ____ extends AbstractSet<Food> "
+ "implements Serializable, Comparable<Taco> {\n"
+ "}\n");
}
@Test public void classImplementsNestedClass() throws Exception {
ClassName outer = ClassName.get(tacosPackage, "Outer");
ClassName inner = outer.nestedClass("Inner");
ClassName callable = ClassName.get(Callable.class);
TypeSpec typeSpec = TypeSpec.classBuilder("Outer")
.superclass(ParameterizedTypeName.get(callable,
inner))
.addType(TypeSpec.classBuilder("Inner")
.addModifiers(Modifier.STATIC)
.build())
.build();
assertThat(toString(typeSpec)).isEqualTo(""
+ "package com.squareup.tacos;\n"
+ "\n"
+ "import java.util.concurrent.Callable;\n"
+ "\n"
+ " | Taco |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/value/spi/CustomThrowingCheeseType.java | {
"start": 231,
"end": 376
} | enum ____ implements CustomThrowingEnumMarker {
UNSPECIFIED,
CUSTOM_BRIE,
CUSTOM_ROQUEFORT,
UNRECOGNIZED,
}
| CustomThrowingCheeseType |
java | quarkusio__quarkus | test-framework/junit5-component/src/main/java/io/quarkus/test/component/ComponentContainer.java | {
"start": 31787,
"end": 39842
} | class
____.addAll(findFields(enclosing, injectAnnotations));
enclosing = enclosing.getEnclosingClass();
}
}
if (injectSpy != null) {
List<Field> injectSpies = found.stream().filter(f -> f.isAnnotationPresent(injectSpy)).toList();
if (!injectSpies.isEmpty()) {
throw new IllegalStateException("@InjectSpy is not supported by QuarkusComponentTest: " + injectSpies);
}
}
return found;
}
@SuppressWarnings("unchecked")
private static Class<? extends Annotation> loadInjectSpy() {
try {
return (Class<? extends Annotation>) Class.forName("io.quarkus.test.junit.mockito.InjectSpy");
} catch (Throwable e) {
return null;
}
}
static final Predicate<Parameter> BUILTIN_PARAMETER = new Predicate<Parameter>() {
@Override
public boolean test(Parameter parameter) {
if (parameter.isAnnotationPresent(TempDir.class)) {
return true;
}
java.lang.reflect.Type type = parameter.getParameterizedType();
return type.equals(TestInfo.class) || type.equals(RepetitionInfo.class) || type.equals(TestReporter.class);
}
};
private static List<Parameter> findInjectParams(Class<?> testClass) {
List<Method> testMethods = findMethods(testClass, QuarkusComponentTestExtension::isTestMethod);
List<Parameter> ret = new ArrayList<>();
for (Method method : testMethods) {
for (Parameter param : method.getParameters()) {
if (BUILTIN_PARAMETER.test(param)
|| param.isAnnotationPresent(SkipInject.class)) {
continue;
}
ret.add(param);
}
}
return ret;
}
private static List<Parameter> findInjectMockParams(Class<?> testClass) {
List<Method> testMethods = findMethods(testClass, QuarkusComponentTestExtension::isTestMethod);
List<Parameter> ret = new ArrayList<>();
for (Method method : testMethods) {
for (Parameter param : method.getParameters()) {
if (param.isAnnotationPresent(InjectMock.class)
&& !BUILTIN_PARAMETER.test(param)) {
ret.add(param);
}
}
}
return ret;
}
static boolean isTestMethod(Executable method) {
return method.isAnnotationPresent(Test.class)
|| method.isAnnotationPresent(ParameterizedTest.class)
|| method.isAnnotationPresent(RepeatedTest.class);
}
private static List<Field> findFields(Class<?> testClass, List<Class<? extends Annotation>> annotations) {
List<Field> fields = new ArrayList<>();
Class<?> current = testClass;
while (current.getSuperclass() != null) {
for (Field field : current.getDeclaredFields()) {
for (Class<? extends Annotation> annotation : annotations) {
if (field.isAnnotationPresent(annotation)) {
fields.add(field);
break;
}
}
}
current = current.getSuperclass();
}
return fields;
}
private static List<Method> findMethods(Class<?> testClass, Predicate<Method> methodPredicate) {
List<Method> methods = new ArrayList<>();
Class<?> current = testClass;
while (current.getSuperclass() != null) {
for (Method method : current.getDeclaredMethods()) {
if (methodPredicate.test(method)) {
methods.add(method);
}
}
current = current.getSuperclass();
}
return methods;
}
private static Set<AnnotationInstance> getQualifiers(AnnotatedElement element, Collection<DotName> qualifiers) {
Set<AnnotationInstance> ret = new HashSet<>();
Annotation[] annotations = element.getDeclaredAnnotations();
for (Annotation annotation : annotations) {
if (qualifiers.contains(DotName.createSimple(annotation.annotationType()))) {
ret.add(Annotations.jandexAnnotation(annotation));
}
}
return ret;
}
private static boolean isListRequiredType(java.lang.reflect.Type type) {
if (type instanceof ParameterizedType) {
final ParameterizedType parameterizedType = (ParameterizedType) type;
return List.class.equals(parameterizedType.getRawType());
}
return false;
}
static boolean isListAllInjectionPoint(java.lang.reflect.Type requiredType, Annotation[] qualifiers,
AnnotatedElement annotatedElement) {
if (qualifiers.length > 0 && Arrays.stream(qualifiers).anyMatch(All.Literal.INSTANCE::equals)) {
if (!isListRequiredType(requiredType)) {
throw new IllegalStateException("Invalid injection point type: " + annotatedElement);
}
return true;
}
return false;
}
static final DotName ALL_NAME = DotName.createSimple(All.class);
static void adaptListAllQualifiers(Set<AnnotationInstance> qualifiers) {
// Remove @All and add @Default if empty
qualifiers.removeIf(a -> a.name().equals(ALL_NAME));
if (qualifiers.isEmpty()) {
qualifiers.add(AnnotationInstance.builder(Default.class).build());
}
}
static java.lang.reflect.Type getFirstActualTypeArgument(java.lang.reflect.Type requiredType) {
if (requiredType instanceof ParameterizedType) {
final ParameterizedType parameterizedType = (ParameterizedType) requiredType;
// List<String> -> String
return parameterizedType.getActualTypeArguments()[0];
}
return null;
}
private static boolean injectionPointMatchesBean(java.lang.reflect.Type injectionPointType,
AnnotatedElement annotatedElement,
List<DotName> allQualifiers, BeanResolver beanResolver, BeanInfo bean) {
Type requiredType;
Set<AnnotationInstance> requiredQualifiers = getQualifiers(annotatedElement, allQualifiers);
if (isListAllInjectionPoint(injectionPointType,
Arrays.stream(annotatedElement.getAnnotations())
.filter(a -> allQualifiers.contains(DotName.createSimple(a.annotationType())))
.toArray(Annotation[]::new),
annotatedElement)) {
requiredType = Types.jandexType(getFirstActualTypeArgument(injectionPointType));
adaptListAllQualifiers(requiredQualifiers);
} else if (Instance.class.isAssignableFrom(QuarkusComponentTestConfiguration.getRawType(injectionPointType))) {
requiredType = Types.jandexType(getFirstActualTypeArgument(injectionPointType));
} else {
requiredType = Types.jandexType(injectionPointType);
}
return beanResolver.matches(bean, requiredType, requiredQualifiers);
}
private static final String QUARKUS_TEST_COMPONENT_OUTPUT_DIRECTORY = "quarkus.test.component.output-directory";
private static File getTestOutputDirectory(Class<?> testClass) {
String outputDirectory = System.getProperty(QUARKUS_TEST_COMPONENT_OUTPUT_DIRECTORY);
File testOutputDirectory;
if (outputDirectory != null) {
testOutputDirectory = new File(outputDirectory);
} else {
// All below string transformations work with _URL encoded_ paths, where e.g.
// a space is replaced with %20. At the end, we feed this back to URI.create
// to make sure the encoding is dealt with properly, so we don't have to do this
// ourselves. Directly passing a URL-encoded string to the File() constructor
// does not work properly.
// org.acme.Foo -> org/acme/Foo. | found |
java | apache__camel | components/camel-consul/src/main/java/org/apache/camel/component/consul/endpoint/ConsulCatalogActions.java | {
"start": 863,
"end": 1176
} | interface ____ {
String REGISTER = "REGISTER";
String DEREGISTER = "DEREGISTER";
String LIST_DATACENTERS = "LIST_DATACENTERS";
String LIST_NODES = "LIST_NODES";
String LIST_SERVICES = "LIST_SERVICES";
String GET_SERVICE = "GET_SERVICE";
String GET_NODE = "GET_NODE";
}
| ConsulCatalogActions |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/type/filter/AnnotationTypeFilter.java | {
"start": 1585,
"end": 1732
} | class ____ details.
*
* @author Mark Fisher
* @author Ramnivas Laddad
* @author Juergen Hoeller
* @author Sam Brannen
* @since 2.5
*/
public | for |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/ext/jdk8/DoubleStreamSerializer.java | {
"start": 456,
"end": 1337
} | class ____ extends StdSerializer<DoubleStream>
{
/**
* Singleton instance
*/
public static final DoubleStreamSerializer INSTANCE = new DoubleStreamSerializer();
private DoubleStreamSerializer() {
super(DoubleStream.class);
}
@Override
public void serialize(DoubleStream stream, JsonGenerator g, SerializationContext ctxt)
throws JacksonException
{
try (final DoubleStream ds = stream) {
g.writeStartArray(ds);
ds.forEach(value -> {
g.writeNumber(value);
});
g.writeEndArray();
} catch (Exception e) {
// For most regular serializers we won't both handling but streams are typically
// root values so
wrapAndThrow(ctxt, e, stream, g.streamWriteContext().getCurrentIndex());
}
}
}
| DoubleStreamSerializer |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/TopologyTest.java | {
"start": 17128,
"end": 123169
} | class ____ implements ProcessorSupplier<Object, Object, Object, Object> {
static final String STORE_NAME = "store";
@Override
public Processor<Object, Object, Object, Object> get() {
return new Processor<>() {
@Override
public void init(final ProcessorContext<Object, Object> context) {
context.getStateStore(STORE_NAME);
}
@Override
public void process(final Record<Object, Object> record) { }
};
}
}
@Test
public void shouldNotAllowToAddGlobalStoreWithSourceNameEqualsProcessorName() {
assertThrows(TopologyException.class, () -> topology.addGlobalStore(
globalStoreBuilder,
"sameName",
null,
null,
"anyTopicName",
"sameName",
new MockProcessorSupplier<>()));
}
@Test
public void shouldDescribeEmptyTopology() {
assertThat(topology.describe(), equalTo(expectedDescription));
}
@Test
public void sinkShouldReturnNullTopicWithDynamicRouting() {
final TopologyDescription.Sink expectedSinkNode =
new InternalTopologyBuilder.Sink<>("sink", (key, value, record) -> record.topic() + "-" + key);
assertThat(expectedSinkNode.topic(), equalTo(null));
}
@Test
public void sinkShouldReturnTopicNameExtractorWithDynamicRouting() {
final TopicNameExtractor<?, ?> topicNameExtractor = (key, value, record) -> record.topic() + "-" + key;
final TopologyDescription.Sink expectedSinkNode =
new InternalTopologyBuilder.Sink<>("sink", topicNameExtractor);
assertThat(expectedSinkNode.topicNameExtractor(), equalTo(topicNameExtractor));
}
@Test
public void singleSourceShouldHaveSingleSubtopology() {
final TopologyDescription.Source expectedSourceNode = addSource("source", "topic");
expectedDescription.addSubtopology(
new SubtopologyDescription(0,
Collections.singleton(expectedSourceNode)));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void singleSourceWithListOfTopicsShouldHaveSingleSubtopology() {
final TopologyDescription.Source expectedSourceNode = addSource("source", "topic1", "topic2", "topic3");
expectedDescription.addSubtopology(
new SubtopologyDescription(0,
Collections.singleton(expectedSourceNode)));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void singleSourcePatternShouldHaveSingleSubtopology() {
final TopologyDescription.Source expectedSourceNode = addSource("source", Pattern.compile("topic[0-9]"));
expectedDescription.addSubtopology(
new SubtopologyDescription(0,
Collections.singleton(expectedSourceNode)));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void multipleSourcesShouldHaveDistinctSubtopologies() {
final TopologyDescription.Source expectedSourceNode1 = addSource("source1", "topic1");
expectedDescription.addSubtopology(
new SubtopologyDescription(0,
Collections.singleton(expectedSourceNode1)));
final TopologyDescription.Source expectedSourceNode2 = addSource("source2", "topic2");
expectedDescription.addSubtopology(
new SubtopologyDescription(1,
Collections.singleton(expectedSourceNode2)));
final TopologyDescription.Source expectedSourceNode3 = addSource("source3", "topic3");
expectedDescription.addSubtopology(
new SubtopologyDescription(2,
Collections.singleton(expectedSourceNode3)));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void sourceAndProcessorShouldHaveSingleSubtopology() {
final TopologyDescription.Source expectedSourceNode = addSource("source", "topic");
final TopologyDescription.Processor expectedProcessorNode = addProcessor("processor", expectedSourceNode);
final Set<TopologyDescription.Node> allNodes = new HashSet<>();
allNodes.add(expectedSourceNode);
allNodes.add(expectedProcessorNode);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void sourceAndProcessorWithStateShouldHaveSingleSubtopology() {
final TopologyDescription.Source expectedSourceNode = addSource("source", "topic");
final String[] store = new String[] {"store"};
final TopologyDescription.Processor expectedProcessorNode =
addProcessorWithNewStore("processor", store, expectedSourceNode);
final Set<TopologyDescription.Node> allNodes = new HashSet<>();
allNodes.add(expectedSourceNode);
allNodes.add(expectedProcessorNode);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void sourceAndProcessorWithMultipleStatesShouldHaveSingleSubtopology() {
final TopologyDescription.Source expectedSourceNode = addSource("source", "topic");
final String[] stores = new String[] {"store1", "store2"};
final TopologyDescription.Processor expectedProcessorNode =
addProcessorWithNewStore("processor", stores, expectedSourceNode);
final Set<TopologyDescription.Node> allNodes = new HashSet<>();
allNodes.add(expectedSourceNode);
allNodes.add(expectedProcessorNode);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void sourceWithMultipleProcessorsShouldHaveSingleSubtopology() {
final TopologyDescription.Source expectedSourceNode = addSource("source", "topic");
final TopologyDescription.Processor expectedProcessorNode1 = addProcessor("processor1", expectedSourceNode);
final TopologyDescription.Processor expectedProcessorNode2 = addProcessor("processor2", expectedSourceNode);
final Set<TopologyDescription.Node> allNodes = new HashSet<>();
allNodes.add(expectedSourceNode);
allNodes.add(expectedProcessorNode1);
allNodes.add(expectedProcessorNode2);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void processorWithMultipleSourcesShouldHaveSingleSubtopology() {
final TopologyDescription.Source expectedSourceNode1 = addSource("source1", "topic0");
final TopologyDescription.Source expectedSourceNode2 = addSource("source2", Pattern.compile("topic[1-9]"));
final TopologyDescription.Processor expectedProcessorNode = addProcessor("processor", expectedSourceNode1, expectedSourceNode2);
final Set<TopologyDescription.Node> allNodes = new HashSet<>();
allNodes.add(expectedSourceNode1);
allNodes.add(expectedSourceNode2);
allNodes.add(expectedProcessorNode);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void multipleSourcesWithProcessorsShouldHaveDistinctSubtopologies() {
final TopologyDescription.Source expectedSourceNode1 = addSource("source1", "topic1");
final TopologyDescription.Processor expectedProcessorNode1 = addProcessor("processor1", expectedSourceNode1);
final TopologyDescription.Source expectedSourceNode2 = addSource("source2", "topic2");
final TopologyDescription.Processor expectedProcessorNode2 = addProcessor("processor2", expectedSourceNode2);
final TopologyDescription.Source expectedSourceNode3 = addSource("source3", "topic3");
final TopologyDescription.Processor expectedProcessorNode3 = addProcessor("processor3", expectedSourceNode3);
final Set<TopologyDescription.Node> allNodes1 = new HashSet<>();
allNodes1.add(expectedSourceNode1);
allNodes1.add(expectedProcessorNode1);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes1));
final Set<TopologyDescription.Node> allNodes2 = new HashSet<>();
allNodes2.add(expectedSourceNode2);
allNodes2.add(expectedProcessorNode2);
expectedDescription.addSubtopology(new SubtopologyDescription(1, allNodes2));
final Set<TopologyDescription.Node> allNodes3 = new HashSet<>();
allNodes3.add(expectedSourceNode3);
allNodes3.add(expectedProcessorNode3);
expectedDescription.addSubtopology(new SubtopologyDescription(2, allNodes3));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void multipleSourcesWithSinksShouldHaveDistinctSubtopologies() {
final TopologyDescription.Source expectedSourceNode1 = addSource("source1", "topic1");
final TopologyDescription.Sink expectedSinkNode1 = addSink("sink1", "sinkTopic1", expectedSourceNode1);
final TopologyDescription.Source expectedSourceNode2 = addSource("source2", "topic2");
final TopologyDescription.Sink expectedSinkNode2 = addSink("sink2", "sinkTopic2", expectedSourceNode2);
final TopologyDescription.Source expectedSourceNode3 = addSource("source3", "topic3");
final TopologyDescription.Sink expectedSinkNode3 = addSink("sink3", "sinkTopic3", expectedSourceNode3);
final Set<TopologyDescription.Node> allNodes1 = new HashSet<>();
allNodes1.add(expectedSourceNode1);
allNodes1.add(expectedSinkNode1);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes1));
final Set<TopologyDescription.Node> allNodes2 = new HashSet<>();
allNodes2.add(expectedSourceNode2);
allNodes2.add(expectedSinkNode2);
expectedDescription.addSubtopology(new SubtopologyDescription(1, allNodes2));
final Set<TopologyDescription.Node> allNodes3 = new HashSet<>();
allNodes3.add(expectedSourceNode3);
allNodes3.add(expectedSinkNode3);
expectedDescription.addSubtopology(new SubtopologyDescription(2, allNodes3));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void processorsWithSameSinkShouldHaveSameSubtopology() {
final TopologyDescription.Source expectedSourceNode1 = addSource("source", "topic");
final TopologyDescription.Processor expectedProcessorNode1 = addProcessor("processor1", expectedSourceNode1);
final TopologyDescription.Source expectedSourceNode2 = addSource("source2", "topic2");
final TopologyDescription.Processor expectedProcessorNode2 = addProcessor("processor2", expectedSourceNode2);
final TopologyDescription.Source expectedSourceNode3 = addSource("source3", "topic3");
final TopologyDescription.Processor expectedProcessorNode3 = addProcessor("processor3", expectedSourceNode3);
final TopologyDescription.Sink expectedSinkNode = addSink(
"sink",
"sinkTopic",
expectedProcessorNode1,
expectedProcessorNode2,
expectedProcessorNode3);
final Set<TopologyDescription.Node> allNodes = new HashSet<>();
allNodes.add(expectedSourceNode1);
allNodes.add(expectedProcessorNode1);
allNodes.add(expectedSourceNode2);
allNodes.add(expectedProcessorNode2);
allNodes.add(expectedSourceNode3);
allNodes.add(expectedProcessorNode3);
allNodes.add(expectedSinkNode);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void processorsWithSharedStateShouldHaveSameSubtopology() {
final String[] store1 = new String[] {"store1"};
final String[] store2 = new String[] {"store2"};
final String[] bothStores = new String[] {store1[0], store2[0]};
final TopologyDescription.Source expectedSourceNode1 = addSource("source", "topic");
final TopologyDescription.Processor expectedProcessorNode1 =
addProcessorWithNewStore("processor1", store1, expectedSourceNode1);
final TopologyDescription.Source expectedSourceNode2 = addSource("source2", "topic2");
final TopologyDescription.Processor expectedProcessorNode2 =
addProcessorWithNewStore("processor2", store2, expectedSourceNode2);
final TopologyDescription.Source expectedSourceNode3 = addSource("source3", "topic3");
final TopologyDescription.Processor expectedProcessorNode3 =
addProcessorWithExistingStore("processor3", bothStores, expectedSourceNode3);
final Set<TopologyDescription.Node> allNodes = new HashSet<>();
allNodes.add(expectedSourceNode1);
allNodes.add(expectedProcessorNode1);
allNodes.add(expectedSourceNode2);
allNodes.add(expectedProcessorNode2);
allNodes.add(expectedSourceNode3);
allNodes.add(expectedProcessorNode3);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void shouldDescribeGlobalStoreTopology() {
addGlobalStoreToTopologyAndExpectedDescription("globalStore", "source", "globalTopic", "processor", 0);
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void shouldDescribeMultipleGlobalStoreTopology() {
addGlobalStoreToTopologyAndExpectedDescription("globalStore1", "source1", "globalTopic1", "processor1", 0);
addGlobalStoreToTopologyAndExpectedDescription("globalStore2", "source2", "globalTopic2", "processor2", 1);
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@SuppressWarnings("deprecation")
@Test
public void streamStreamJoinTopologyWithDefaultStoresNames() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
stream1.join(
stream2,
MockValueJoiner.TOSTRING_JOINER,
JoinWindows.of(ofMillis(100)),
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String()));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [KSTREAM-JOINTHIS-0000000004-store])\n" +
" --> KSTREAM-JOINTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [KSTREAM-JOINOTHER-0000000005-store])\n" +
" --> KSTREAM-JOINOTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-JOINOTHER-0000000005 (stores: [KSTREAM-JOINTHIS-0000000004-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-JOINTHIS-0000000004 (stores: [KSTREAM-JOINOTHER-0000000005-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-JOINTHIS-0000000004, KSTREAM-JOINOTHER-0000000005\n\n",
describe.toString());
}
@SuppressWarnings("deprecation")
@Test
public void streamStreamJoinTopologyWithCustomStoresNames() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
stream1.join(
stream2,
MockValueJoiner.TOSTRING_JOINER,
JoinWindows.of(ofMillis(100)),
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String())
.withStoreName("custom-name"));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [custom-name-this-join-store])\n" +
" --> KSTREAM-JOINTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [custom-name-other-join-store])\n" +
" --> KSTREAM-JOINOTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-JOINOTHER-0000000005 (stores: [custom-name-this-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-JOINTHIS-0000000004 (stores: [custom-name-other-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-JOINTHIS-0000000004, KSTREAM-JOINOTHER-0000000005\n\n",
describe.toString());
}
@SuppressWarnings("deprecation")
@Test
public void streamStreamJoinTopologyWithCustomStoresSuppliers() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
final JoinWindows joinWindows = JoinWindows.of(ofMillis(100));
final WindowBytesStoreSupplier thisStoreSupplier = Stores.inMemoryWindowStore("in-memory-join-store",
Duration.ofMillis(joinWindows.size() + joinWindows.gracePeriodMs()),
Duration.ofMillis(joinWindows.size()), true);
final WindowBytesStoreSupplier otherStoreSupplier = Stores.inMemoryWindowStore("in-memory-join-store-other",
Duration.ofMillis(joinWindows.size() + joinWindows.gracePeriodMs()),
Duration.ofMillis(joinWindows.size()), true);
stream1.join(
stream2,
MockValueJoiner.TOSTRING_JOINER,
joinWindows,
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String())
.withThisStoreSupplier(thisStoreSupplier)
.withOtherStoreSupplier(otherStoreSupplier));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [in-memory-join-store])\n" +
" --> KSTREAM-JOINTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [in-memory-join-store-other])\n" +
" --> KSTREAM-JOINOTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-JOINOTHER-0000000005 (stores: [in-memory-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-JOINTHIS-0000000004 (stores: [in-memory-join-store-other])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-JOINTHIS-0000000004, KSTREAM-JOINOTHER-0000000005\n\n",
describe.toString());
}
@Test
public void streamStreamLeftJoinTopologyWithDefaultStoresNames() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
stream1.leftJoin(
stream2,
MockValueJoiner.TOSTRING_JOINER,
JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100)),
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String()));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [KSTREAM-JOINTHIS-0000000004-store])\n" +
" --> KSTREAM-JOINTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [KSTREAM-OUTEROTHER-0000000005-store])\n" +
" --> KSTREAM-OUTEROTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-JOINTHIS-0000000004 (stores: [KSTREAM-OUTEROTHER-0000000005-store, KSTREAM-OUTERSHARED-0000000004-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-OUTEROTHER-0000000005 (stores: [KSTREAM-JOINTHIS-0000000004-store, KSTREAM-OUTERSHARED-0000000004-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-JOINTHIS-0000000004, KSTREAM-OUTEROTHER-0000000005\n\n",
describe.toString());
}
@Test
public void streamStreamLeftJoinTopologyWithCustomStoresNames() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
stream1.leftJoin(
stream2,
MockValueJoiner.TOSTRING_JOINER,
JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100)),
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String())
.withStoreName("custom-name"));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [custom-name-this-join-store])\n" +
" --> KSTREAM-JOINTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [custom-name-outer-other-join-store])\n" +
" --> KSTREAM-OUTEROTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-JOINTHIS-0000000004 (stores: [custom-name-outer-other-join-store, custom-name-left-shared-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-OUTEROTHER-0000000005 (stores: [custom-name-this-join-store, custom-name-left-shared-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-JOINTHIS-0000000004, KSTREAM-OUTEROTHER-0000000005\n\n",
describe.toString());
}
@Test
public void streamStreamLeftJoinTopologyWithCustomStoresSuppliers() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
final JoinWindows joinWindows = JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100));
final WindowBytesStoreSupplier thisStoreSupplier = Stores.inMemoryWindowStore("in-memory-join-store",
Duration.ofMillis(joinWindows.size() + joinWindows.gracePeriodMs()),
Duration.ofMillis(joinWindows.size()), true);
final WindowBytesStoreSupplier otherStoreSupplier = Stores.inMemoryWindowStore("in-memory-join-store-other",
Duration.ofMillis(joinWindows.size() + joinWindows.gracePeriodMs()),
Duration.ofMillis(joinWindows.size()), true);
stream1.leftJoin(
stream2,
MockValueJoiner.TOSTRING_JOINER,
joinWindows,
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String())
.withThisStoreSupplier(thisStoreSupplier)
.withOtherStoreSupplier(otherStoreSupplier));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [in-memory-join-store])\n" +
" --> KSTREAM-JOINTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [in-memory-join-store-other])\n" +
" --> KSTREAM-OUTEROTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-JOINTHIS-0000000004 (stores: [in-memory-join-store-other, in-memory-join-store-left-shared-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-OUTEROTHER-0000000005 (stores: [in-memory-join-store, in-memory-join-store-left-shared-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-JOINTHIS-0000000004, KSTREAM-OUTEROTHER-0000000005\n\n",
describe.toString());
}
@Test
public void streamStreamOuterJoinTopologyWithDefaultStoresNames() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
stream1.outerJoin(
stream2,
MockValueJoiner.TOSTRING_JOINER,
JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100)),
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String()));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [KSTREAM-OUTERTHIS-0000000004-store])\n" +
" --> KSTREAM-OUTERTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [KSTREAM-OUTEROTHER-0000000005-store])\n" +
" --> KSTREAM-OUTEROTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-OUTEROTHER-0000000005 (stores: [KSTREAM-OUTERTHIS-0000000004-store, KSTREAM-OUTERSHARED-0000000004-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-OUTERTHIS-0000000004 (stores: [KSTREAM-OUTEROTHER-0000000005-store, KSTREAM-OUTERSHARED-0000000004-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-OUTERTHIS-0000000004, KSTREAM-OUTEROTHER-0000000005\n\n",
describe.toString());
}
@Test
public void streamStreamOuterJoinTopologyWithCustomStoresNames() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
stream1.outerJoin(
stream2,
MockValueJoiner.TOSTRING_JOINER,
JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100)),
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String())
.withStoreName("custom-name"));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [custom-name-outer-this-join-store])\n" +
" --> KSTREAM-OUTERTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [custom-name-outer-other-join-store])\n" +
" --> KSTREAM-OUTEROTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-OUTEROTHER-0000000005 (stores: [custom-name-outer-this-join-store, custom-name-outer-shared-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-OUTERTHIS-0000000004 (stores: [custom-name-outer-other-join-store, custom-name-outer-shared-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-OUTERTHIS-0000000004, KSTREAM-OUTEROTHER-0000000005\n\n",
describe.toString());
}
@Test
public void streamStreamOuterJoinTopologyWithCustomStoresSuppliers() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
final JoinWindows joinWindows = JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100));
final WindowBytesStoreSupplier thisStoreSupplier = Stores.inMemoryWindowStore("in-memory-join-store",
Duration.ofMillis(joinWindows.size() + joinWindows.gracePeriodMs()),
Duration.ofMillis(joinWindows.size()), true);
final WindowBytesStoreSupplier otherStoreSupplier = Stores.inMemoryWindowStore("in-memory-join-store-other",
Duration.ofMillis(joinWindows.size() + joinWindows.gracePeriodMs()),
Duration.ofMillis(joinWindows.size()), true);
stream1.outerJoin(
stream2,
MockValueJoiner.TOSTRING_JOINER,
joinWindows,
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String())
.withThisStoreSupplier(thisStoreSupplier)
.withOtherStoreSupplier(otherStoreSupplier));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [in-memory-join-store])\n" +
" --> KSTREAM-OUTERTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [in-memory-join-store-other])\n" +
" --> KSTREAM-OUTEROTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-OUTEROTHER-0000000005 (stores: [in-memory-join-store-outer-shared-join-store, in-memory-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-OUTERTHIS-0000000004 (stores: [in-memory-join-store-other, in-memory-join-store-outer-shared-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-OUTERTHIS-0000000004, KSTREAM-OUTEROTHER-0000000005\n\n",
describe.toString());
}
@Test
public void topologyWithDynamicRoutingShouldDescribeExtractorClass() {
final StreamsBuilder builder = new StreamsBuilder();
final TopicNameExtractor<Object, Object> topicNameExtractor = new TopicNameExtractor<>() {
@Override
public String extract(final Object key, final Object value, final RecordContext recordContext) {
return recordContext.topic() + "-" + key;
}
@Override
public String toString() {
return "anonymous topic name extractor. topic is [recordContext.topic()]-[key]";
}
};
builder.stream("input-topic").to(topicNameExtractor);
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-SINK-0000000001\n" +
" Sink: KSTREAM-SINK-0000000001 (extractor class: anonymous topic name extractor. topic is [recordContext.topic()]-[key])\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString());
}
@Test
public void kGroupedStreamZeroArgCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000002\n" +
" Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void kGroupedStreamNamedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("count-store")
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000001\n" +
" Processor: KSTREAM-AGGREGATE-0000000001 (stores: [count-store])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void kGroupedStreamAnonymousMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>with(null, Serdes.Long())
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void kGroupedStreamAnonymousStoreTypedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.count(Materialized.as(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@SuppressWarnings("deprecation")
@Test
public void kGroupedStreamZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.stream("input-topic")
.groupByKey()
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000002\n" +
" Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
@Test
public void timeWindowZeroArgCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000002\n" +
" Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void timeWindowNamedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.count(Materialized.<Object, Long, WindowStore<Bytes, byte[]>>as("count-store").withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000001\n" +
" Processor: KSTREAM-AGGREGATE-0000000001 (stores: [count-store])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void timeWindowAnonymousMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.count(Materialized.<Object, Long, WindowStore<Bytes, byte[]>>with(null, Serdes.Long())
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void timeWindowAnonymousStoreTypeMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.count(Materialized.as(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@SuppressWarnings("deprecation")
@Test
public void timeWindowZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000002\n" +
" Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
@Test
public void slidingWindowZeroArgCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000002\n" +
" Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void slidingWindowNamedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.count(Materialized.<Object, Long, WindowStore<Bytes, byte[]>>as("count-store").withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000001\n" +
" Processor: KSTREAM-AGGREGATE-0000000001 (stores: [count-store])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@SuppressWarnings("deprecation")
@Test
public void slidingWindowZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.stream("input-topic")
.groupByKey()
.windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(1)))
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000002\n" +
" Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
@Test
public void timeWindowedCogroupedZeroArgCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.aggregate(() -> "");
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000002\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000002 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> COGROUPKSTREAM-MERGE-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000002\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void timeWindowedCogroupedNamedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.aggregate(() -> "", Materialized.<Object, Object, WindowStore<Bytes, byte[]>>as("aggregate-store")
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000001\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000001 (stores: [aggregate-store])\n" +
" --> COGROUPKSTREAM-MERGE-0000000002\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000002 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000001\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@SuppressWarnings("deprecation")
@Test
public void timeWindowedCogroupedZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.aggregate(() -> "");
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000002\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000002 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> COGROUPKSTREAM-MERGE-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000002\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
@Test
public void slidingWindowedCogroupedZeroArgCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(1)))
.aggregate(() -> "");
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000002\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000002 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> COGROUPKSTREAM-MERGE-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000002\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void slidingWindowedCogroupedNamedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(1)))
.aggregate(() -> "", Materialized.<Object, Object, WindowStore<Bytes, byte[]>>as("aggregate-store")
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000001\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000001 (stores: [aggregate-store])\n" +
" --> COGROUPKSTREAM-MERGE-0000000002\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000002 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000001\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@SuppressWarnings("deprecation")
@Test
public void slidingWindowedCogroupedZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(1)))
.aggregate(() -> "");
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000002\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000002 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> COGROUPKSTREAM-MERGE-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000002\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
@Test
public void sessionWindowedCogroupedZeroArgCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1)))
.aggregate(() -> "", (aggKey, aggOne, aggTwo) -> "");
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000002\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000002 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> COGROUPKSTREAM-MERGE-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000002\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void sessionWindowedCogroupedNamedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1)))
.aggregate(() -> "", (aggKey, aggOne, aggTwo) -> "", Materialized.<Object, Object, SessionStore<Bytes, byte[]>>as("aggregate-store")
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000001\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000001 (stores: [aggregate-store])\n" +
" --> COGROUPKSTREAM-MERGE-0000000002\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000002 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000001\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@SuppressWarnings("deprecation")
@Test
public void sessionWindowedCogroupedZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1)))
.aggregate(() -> "", (aggKey, aggOne, aggTwo) -> "");
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000002\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000002 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> COGROUPKSTREAM-MERGE-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000002\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
@Test
public void sessionWindowZeroArgCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1)))
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000002\n" +
" Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void sessionWindowNamedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1)))
.count(Materialized.<Object, Long, SessionStore<Bytes, byte[]>>as("count-store")
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000001\n" +
" Processor: KSTREAM-AGGREGATE-0000000001 (stores: [count-store])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void sessionWindowAnonymousMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1)))
.count(Materialized.<Object, Long, SessionStore<Bytes, byte[]>>with(null, Serdes.Long())
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void sessionWindowAnonymousStoreTypedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1)))
.count(Materialized.as(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(storeType == StoreType.ROCKS_DB));
}
@SuppressWarnings("deprecation")
@Test
public void sessionWindowZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.stream("input-topic")
.groupByKey()
.windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1)))
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000002\n" +
" Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
@Test
public void tableZeroArgCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.table("input-topic")
.groupBy((key, value) -> null)
.count();
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [input-topic-STATE-STORE-0000000000])\n" +
" --> KTABLE-SELECT-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-SELECT-0000000003 (stores: [])\n" +
" --> KSTREAM-SINK-0000000005\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
" Sink: KSTREAM-SINK-0000000005 (topic: KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition)\n" +
" <-- KTABLE-SELECT-0000000003\n" +
"\n" +
" Sub-topology: 1\n" +
" Source: KSTREAM-SOURCE-0000000006 (topics: [KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition])\n" +
" --> KTABLE-AGGREGATE-0000000007\n" +
" Processor: KTABLE-AGGREGATE-0000000007 (stores: [KTABLE-AGGREGATE-STATE-STORE-0000000004])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000006\n" +
"\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
final ProcessorTopology processorTopology = topology.internalTopologyBuilder.setApplicationId("test").buildTopology();
// one for ktable, and one for count operation
assertThat(processorTopology.stateStores().size(), is(2));
// ktable store is rocksDB (default)
assertThat(processorTopology.stateStores().get(0).persistent(), is(true));
// count store is rocksDB (default)
assertThat(processorTopology.stateStores().get(1).persistent(), is(true));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void tableNamedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.table("input-topic")
.groupBy((key, value) -> null)
.count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("count-store")
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [input-topic-STATE-STORE-0000000000])\n" +
" --> KTABLE-SELECT-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-SELECT-0000000003 (stores: [])\n" +
" --> KSTREAM-SINK-0000000004\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
" Sink: KSTREAM-SINK-0000000004 (topic: count-store-repartition)\n" +
" <-- KTABLE-SELECT-0000000003\n" +
"\n" +
" Sub-topology: 1\n" +
" Source: KSTREAM-SOURCE-0000000005 (topics: [count-store-repartition])\n" +
" --> KTABLE-AGGREGATE-0000000006\n" +
" Processor: KTABLE-AGGREGATE-0000000006 (stores: [count-store])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000005\n" +
"\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
final ProcessorTopology processorTopology = topology.internalTopologyBuilder.setApplicationId("test").buildTopology();
// one for ktable, and one for count operation
assertThat(processorTopology.stateStores().size(), is(2));
// ktable store is rocksDB (default)
assertThat(processorTopology.stateStores().get(0).persistent(), is(true));
// count store is storeType
assertThat(processorTopology.stateStores().get(1).persistent(), is(storeType == StoreType.ROCKS_DB));
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@EnumSource(StoreType.class)
public void tableNamedMaterializedCountWithTopologyConfigShouldPreserveTopologyStructure(final StoreType storeType) {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.table("input-topic")
.groupBy((key, value) -> null)
// can still override the default store dynamically
.count(Materialized.as(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [input-topic-STATE-STORE-0000000000])\n" +
" --> KTABLE-SELECT-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-SELECT-0000000003 (stores: [])\n" +
" --> KSTREAM-SINK-0000000005\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
" Sink: KSTREAM-SINK-0000000005 (topic: KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition)\n" +
" <-- KTABLE-SELECT-0000000003\n" +
"\n" +
" Sub-topology: 1\n" +
" Source: KSTREAM-SOURCE-0000000006 (topics: [KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition])\n" +
" --> KTABLE-AGGREGATE-0000000007\n" +
" Processor: KTABLE-AGGREGATE-0000000007 (stores: [KTABLE-AGGREGATE-STATE-STORE-0000000004])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000006\n" +
"\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
final ProcessorTopology processorTopology = topology.internalTopologyBuilder.setApplicationId("test").buildTopology();
// one for ktable, and one for count operation
assertThat(processorTopology.stateStores().size(), is(2));
// ktable store is in-memory (default is in-memory)
assertThat(processorTopology.stateStores().get(0).persistent(), is(false));
// count store is storeType
assertThat(processorTopology.stateStores().get(1).persistent(), is(storeType == StoreType.ROCKS_DB));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void tableAnonymousMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.table("input-topic")
.groupBy((key, value) -> null)
.count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>with(null, Serdes.Long())
.withStoreType(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [input-topic-STATE-STORE-0000000000])\n" +
" --> KTABLE-SELECT-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-SELECT-0000000003 (stores: [])\n" +
" --> KSTREAM-SINK-0000000005\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
" Sink: KSTREAM-SINK-0000000005 (topic: KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition)\n" +
" <-- KTABLE-SELECT-0000000003\n" +
"\n" +
" Sub-topology: 1\n" +
" Source: KSTREAM-SOURCE-0000000006 (topics: [KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition])\n" +
" --> KTABLE-AGGREGATE-0000000007\n" +
" Processor: KTABLE-AGGREGATE-0000000007 (stores: [KTABLE-AGGREGATE-STATE-STORE-0000000004])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000006\n" +
"\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
final ProcessorTopology processorTopology = topology.internalTopologyBuilder.setApplicationId("test").buildTopology();
// one for ktable, and one for count operation
assertThat(processorTopology.stateStores().size(), is(2));
// ktable store is rocksDB (default)
assertThat(processorTopology.stateStores().get(0).persistent(), is(true));
// count store is storeType
assertThat(processorTopology.stateStores().get(1).persistent(), is(storeType == StoreType.ROCKS_DB));
}
@ParameterizedTest
@EnumSource(StoreType.class)
public void tableAnonymousStoreTypedMaterializedCountShouldPreserveTopologyStructure(final StoreType storeType) {
final StreamsBuilder builder = new StreamsBuilder();
builder.table("input-topic")
.groupBy((key, value) -> null)
.count(Materialized.as(storeType));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [input-topic-STATE-STORE-0000000000])\n" +
" --> KTABLE-SELECT-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-SELECT-0000000003 (stores: [])\n" +
" --> KSTREAM-SINK-0000000005\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
" Sink: KSTREAM-SINK-0000000005 (topic: KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition)\n" +
" <-- KTABLE-SELECT-0000000003\n" +
"\n" +
" Sub-topology: 1\n" +
" Source: KSTREAM-SOURCE-0000000006 (topics: [KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition])\n" +
" --> KTABLE-AGGREGATE-0000000007\n" +
" Processor: KTABLE-AGGREGATE-0000000007 (stores: [KTABLE-AGGREGATE-STATE-STORE-0000000004])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000006\n" +
"\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
final ProcessorTopology processorTopology = topology.internalTopologyBuilder.setApplicationId("test").buildTopology();
// one for ktable, and one for count operation
assertThat(processorTopology.stateStores().size(), is(2));
// ktable store is rocksDB (default)
assertThat(processorTopology.stateStores().get(0).persistent(), is(true));
// count store is storeType
assertThat(processorTopology.stateStores().get(1).persistent(), is(storeType == StoreType.ROCKS_DB));
}
@Test
public void kTableNonMaterializedMapValuesShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.mapValues((readOnlyKey, value) -> null);
final TopologyDescription describe = builder.build().describe();
assertEquals("Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-MAPVALUES-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-MAPVALUES-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n\n", describe.toString());
}
@Test
public void kTableAnonymousMaterializedMapValuesShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.mapValues(
(readOnlyKey, value) -> null,
Materialized.<Object, Object, KeyValueStore<Bytes, byte[]>>with(null, null)
.withStoreType(Materialized.StoreType.IN_MEMORY));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-MAPVALUES-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
// previously, this was
// Processor: KTABLE-MAPVALUES-0000000004 (stores: [KTABLE-MAPVALUES-STATE-STORE-0000000003]
// but we added a change not to materialize non-queryable stores. This change shouldn't break compatibility.
" Processor: KTABLE-MAPVALUES-0000000004 (stores: [])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
"\n",
describe.toString());
}
@Test
public void kTableNamedMaterializedMapValuesShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.mapValues(
(readOnlyKey, value) -> null,
Materialized.<Object, Object, KeyValueStore<Bytes, byte[]>>as("store-name").withKeySerde(null).withValueSerde(null));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-MAPVALUES-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-MAPVALUES-0000000003 (stores: [store-name])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
"\n",
describe.toString());
}
@Test
public void kTableNonMaterializedFilterShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.filter((key, value) -> false);
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-FILTER-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-FILTER-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n\n",
describe.toString());
}
@Test
public void kTableAnonymousMaterializedFilterShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.filter((key, value) -> false, Materialized.with(null, null));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-FILTER-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
// Previously, this was
// Processor: KTABLE-FILTER-0000000004 (stores: [KTABLE-FILTER-STATE-STORE-0000000003]
// but we added a change not to materialize non-queryable stores. This change shouldn't break compatibility.
" Processor: KTABLE-FILTER-0000000004 (stores: [])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
"\n",
describe.toString());
}
@Test
public void kTableNamedMaterializedFilterShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.filter((key, value) -> false, Materialized.as("store-name"));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-FILTER-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-FILTER-0000000003 (stores: [store-name])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
"\n",
describe.toString());
}
@Test
public void topologyWithStaticTopicNameExtractorShouldRespectEqualHashcodeContract() {
final Topology topologyA = topologyWithStaticTopicName();
final Topology topologyB = topologyWithStaticTopicName();
assertThat(topologyA.describe(), equalTo(topologyB.describe()));
assertThat(topologyA.describe().hashCode(), equalTo(topologyB.describe().hashCode()));
}
private Topology topologyWithStaticTopicName() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("from-topic-name").to("to-topic-name");
return builder.build();
}
private TopologyDescription.Source addSource(final String sourceName,
final String... sourceTopic) {
topology.addSource((AutoOffsetReset) null, sourceName, null, null, null, sourceTopic);
return new InternalTopologyBuilder.Source(sourceName, Set.of(sourceTopic), null);
}
@SuppressWarnings("deprecation")
private TopologyDescription.Source addSource(final String sourceName,
final Pattern sourcePattern) {
// we still test the old `Topology.AutoOffsetReset` here, to increase test coverage
// (cf `addSource` about which used the new one)
// When can rewrite this to the new one, when the old one is removed
topology.addSource((Topology.AutoOffsetReset) null, sourceName, null, null, null, sourcePattern);
return new InternalTopologyBuilder.Source(sourceName, null, sourcePattern);
}
private TopologyDescription.Processor addProcessor(final String processorName,
final TopologyDescription.Node... parents) {
return addProcessorWithNewStore(processorName, new String[0], parents);
}
private TopologyDescription.Processor addProcessorWithNewStore(final String processorName,
final String[] storeNames,
final TopologyDescription.Node... parents) {
return addProcessorWithStore(processorName, storeNames, true, parents);
}
private TopologyDescription.Processor addProcessorWithExistingStore(final String processorName,
final String[] storeNames,
final TopologyDescription.Node... parents) {
return addProcessorWithStore(processorName, storeNames, false, parents);
}
private TopologyDescription.Processor addProcessorWithStore(final String processorName,
final String[] storeNames,
final boolean newStores,
final TopologyDescription.Node... parents) {
final String[] parentNames = new String[parents.length];
for (int i = 0; i < parents.length; ++i) {
parentNames[i] = parents[i].name();
}
topology.addProcessor(processorName, new MockApiProcessorSupplier<>(), parentNames);
if (newStores) {
for (final String store : storeNames) {
final StoreBuilder<?> storeBuilder = mock(StoreBuilder.class);
when(storeBuilder.name()).thenReturn(store);
topology.addStateStore(storeBuilder, processorName);
}
} else {
topology.connectProcessorAndStateStores(processorName, storeNames);
}
final TopologyDescription.Processor expectedProcessorNode =
new InternalTopologyBuilder.Processor(processorName, Set.of(storeNames));
for (final TopologyDescription.Node parent : parents) {
((InternalTopologyBuilder.AbstractNode) parent).addSuccessor(expectedProcessorNode);
((InternalTopologyBuilder.AbstractNode) expectedProcessorNode).addPredecessor(parent);
}
return expectedProcessorNode;
}
private TopologyDescription.Sink addSink(final String sinkName,
final String sinkTopic,
final TopologyDescription.Node... parents) {
final String[] parentNames = new String[parents.length];
for (int i = 0; i < parents.length; ++i) {
parentNames[i] = parents[i].name();
}
topology.addSink(sinkName, sinkTopic, null, null, null, parentNames);
final TopologyDescription.Sink expectedSinkNode =
new InternalTopologyBuilder.Sink<>(sinkName, sinkTopic);
for (final TopologyDescription.Node parent : parents) {
((InternalTopologyBuilder.AbstractNode) parent).addSuccessor(expectedSinkNode);
((InternalTopologyBuilder.AbstractNode) expectedSinkNode).addPredecessor(parent);
}
return expectedSinkNode;
}
private void addGlobalStoreToTopologyAndExpectedDescription(final String globalStoreName,
final String sourceName,
final String globalTopicName,
final String processorName,
final int id) {
final KeyValueStoreBuilder<?, ?> globalStoreBuilder = mock(KeyValueStoreBuilder.class);
when(globalStoreBuilder.name()).thenReturn(globalStoreName);
topology.addGlobalStore(
globalStoreBuilder,
sourceName,
null,
null,
null,
globalTopicName,
processorName,
new MockProcessorSupplier<>());
final TopologyDescription.GlobalStore expectedGlobalStore = new InternalTopologyBuilder.GlobalStore(
sourceName,
processorName,
globalStoreName,
globalTopicName,
id);
expectedDescription.addGlobalStore(expectedGlobalStore);
}
@Test
public void readOnlyStateStoresShouldHaveTheirOwnSubTopology() {
final String sourceName = "source";
final String storeName = "store";
final String topicName = "topic";
final String processorName = "processor";
final KeyValueStoreBuilder<?, ?> storeBuilder = mock(KeyValueStoreBuilder.class);
when(storeBuilder.name()).thenReturn(storeName);
topology.addReadOnlyStateStore(
storeBuilder,
sourceName,
null,
null,
null,
topicName,
processorName,
new MockProcessorSupplier<>());
final TopologyDescription.Source expectedSource = new InternalTopologyBuilder.Source(sourceName, Sets.newSet(topicName), null);
final TopologyDescription.Processor expectedProcessor = new InternalTopologyBuilder.Processor(processorName, Sets.newSet(storeName));
((InternalTopologyBuilder.AbstractNode) expectedSource).addSuccessor(expectedProcessor);
((InternalTopologyBuilder.AbstractNode) expectedProcessor).addPredecessor(expectedSource);
final Set<TopologyDescription.Node> allNodes = new HashSet<>();
allNodes.add(expectedSource);
allNodes.add(expectedProcessor);
expectedDescription.addSubtopology(new SubtopologyDescription(0, allNodes));
assertThat(topology.describe(), equalTo(expectedDescription));
assertThat(topology.describe().hashCode(), equalTo(expectedDescription.hashCode()));
}
@Test
public void readOnlyStateStoresShouldNotLog() {
final String sourceName = "source";
final String storeName = "store";
final String topicName = "topic";
final String processorName = "processor";
final KeyValueStoreBuilder<?, ?> storeBuilder = mock(KeyValueStoreBuilder.class);
when(storeBuilder.name()).thenReturn(storeName);
topology.addReadOnlyStateStore(
storeBuilder,
sourceName,
null,
null,
null,
topicName,
processorName,
new MockProcessorSupplier<>());
final StoreFactory stateStoreFactory = topology.internalTopologyBuilder.stateStores().get(storeName);
assertThat(stateStoreFactory.loggingEnabled(), equalTo(false));
}
@Test
public void shouldWrapProcessors() {
final Map<Object, Object> props = dummyStreamsConfigMap();
props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class);
final WrapperRecorder counter = new WrapperRecorder();
props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter);
final Topology topology = new Topology(new TopologyConfig(new StreamsConfig(props)));
// Add a bit of randomness to the lambda-created processors to avoid them being
// optimized into a shared instance that will cause the ApiUtils#checkSupplier
// call to fail
final Random random = new Random();
topology.addSource("source", "topic");
topology.addProcessor(
"p1",
() -> record -> System.out.println("Processing: " + random.nextInt()),
"source"
);
topology.addProcessor(
"p2",
() -> record -> System.out.println("Processing: " + random.nextInt()),
"p1"
);
topology.addProcessor(
"p3",
() -> record -> System.out.println("Processing: " + random.nextInt()),
"p2"
);
assertThat(counter.numWrappedProcessors(), is(3));
assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder("p1", "p2", "p3"));
}
@SuppressWarnings("deprecation")
private TopologyConfig overrideDefaultStore(final String defaultStore) {
final Properties topologyOverrides = new Properties();
// change default store as in-memory
topologyOverrides.put(StreamsConfig.DEFAULT_DSL_STORE_CONFIG, defaultStore);
final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig());
return new TopologyConfig(
"my-topology",
config,
topologyOverrides);
}
}
| LocalMockProcessorSupplier |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/foreach/ForEachTest.java | {
"start": 1476,
"end": 4343
} | class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create a SqlSessionFactory
try (Reader reader = Resources.getResourceAsReader("org/apache/ibatis/submitted/foreach/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/foreach/CreateDB.sql");
}
@Test
void shouldGetAUser() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User testProfile = new User();
testProfile.setId(2);
User friendProfile = new User();
friendProfile.setId(6);
List<User> friendList = new ArrayList<>();
friendList.add(friendProfile);
testProfile.setFriendList(friendList);
User user = mapper.getUser(testProfile);
Assertions.assertEquals("User6", user.getName());
}
}
@Test
void shouldHandleComplexNullItem() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user1 = new User();
user1.setId(2);
user1.setName("User2");
List<User> users = new ArrayList<>();
users.add(user1);
users.add(null);
int count = mapper.countByUserList(users);
Assertions.assertEquals(1, count);
}
}
@Test
void shouldHandleMoreComplexNullItem() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user1 = new User();
User bestFriend = new User();
bestFriend.setId(5);
user1.setBestFriend(bestFriend);
List<User> users = new ArrayList<>();
users.add(user1);
users.add(null);
int count = mapper.countByBestFriend(users);
Assertions.assertEquals(1, count);
}
}
@Test
void nullItemInContext() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user1 = new User();
user1.setId(3);
List<User> users = new ArrayList<>();
users.add(user1);
users.add(null);
String name = mapper.selectWithNullItemCheck(users);
Assertions.assertEquals("User3", name);
}
}
@Test
void shouldReportMissingPropertyName() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
when(() -> mapper.typoInItemProperty(List.of(new User())));
then(caughtException()).isInstanceOf(PersistenceException.class).hasMessageContaining(
"There is no getter for property named 'idd' in ' | ForEachTest |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java | {
"start": 24060,
"end": 24136
} | class ____ extends BaseAnalyzerRule {
private static | ResolveMissingRefs |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_3400/Issue3465.java | {
"start": 215,
"end": 1307
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSONObject jsonObj1 = new JSONObject();
JSONObject sonJsonObj1 = new JSONObject();
sonJsonObj1.put("dca0898f74b4cc6d0174b4cc77fd0005", "2ca0898f74b4cc6d0174b4cc77fd0005");
jsonObj1.put("issue", sonJsonObj1);
String rst1 = JSON.toJSONString(jsonObj1, JSON.DEFAULT_GENERATE_FEATURE | SerializerFeature.WRITE_MAP_NULL_FEATURES);
System.out.println(rst1);
JSONObject parse1 = JSON.parseObject(rst1);
System.out.println(parse1.toJSONString());
JSONObject jsonObj = new JSONObject();
JSONObject sonJsonObj = new JSONObject();
sonJsonObj.put("2ca0898f74b4cc6d0174b4cc77fd0005", "2ca0898f74b4cc6d0174b4cc77fd0005");
jsonObj.put("issue", sonJsonObj);
String rst = JSON.toJSONString(jsonObj, JSON.DEFAULT_GENERATE_FEATURE | SerializerFeature.WRITE_MAP_NULL_FEATURES);
System.out.println(rst);
JSONObject parse = JSON.parseObject(rst);
System.out.println(parse.toJSONString());
}
}
| Issue3465 |
java | google__guava | android/guava-tests/test/com/google/common/collect/ImmutableListCopyOfConcurrentlyModifiedInputTest.java | {
"start": 3749,
"end": 4401
} | interface ____ {
void perform(List<Integer> list);
}
static ListFrobber add(int element) {
return new ListFrobber() {
@Override
public void perform(List<Integer> list) {
list.add(0, element);
}
};
}
static ListFrobber remove() {
return new ListFrobber() {
@Override
public void perform(List<Integer> list) {
list.remove(0);
}
};
}
static ListFrobber nop() {
return new ListFrobber() {
@Override
public void perform(List<Integer> list) {}
};
}
/** A list that mutates itself after every call to each of its {@link List} methods. */
| ListFrobber |
java | quarkusio__quarkus | integration-tests/hibernate-search-orm-elasticsearch-tenancy/src/main/java/io/quarkus/it/hibernate/search/orm/elasticsearch/multitenancy/fruit/FruitResource.java | {
"start": 892,
"end": 3511
} | class ____ {
private static final Logger LOG = Logger.getLogger(FruitResource.class.getName());
@Inject
EntityManager entityManager;
@Inject
SearchSession searchSession;
@GET
@Path("/")
@Transactional
public Fruit[] getAll() {
return entityManager.createNamedQuery("Fruits.findAll", Fruit.class)
.getResultList().toArray(new Fruit[0]);
}
@GET
@Path("/{id}")
@Transactional
public Fruit findById(int id) {
Fruit entity = entityManager.find(Fruit.class, id);
if (entity == null) {
throw new WebApplicationException("Fruit with id of " + id + " does not exist.", 404);
}
return entity;
}
@POST
@Path("/")
@Transactional
public Response create(@NotNull Fruit fruit) {
if (fruit.getId() != null) {
throw new WebApplicationException("Id was invalidly set on request.", 422);
}
LOG.debugv("Create {0}", fruit.getName());
entityManager.persist(fruit);
return Response.ok(fruit).status(Response.Status.CREATED).build();
}
@PUT
@Path("/{id}")
@Transactional
public Fruit update(@NotNull @PathParam("id") int id, @NotNull Fruit fruit) {
if (fruit.getName() == null) {
throw new WebApplicationException("Fruit Name was not set on request.", 422);
}
Fruit entity = entityManager.find(Fruit.class, id);
if (entity == null) {
throw new WebApplicationException("Fruit with id of " + id + " does not exist.", 404);
}
entity.setName(fruit.getName());
LOG.debugv("Update #{0} {1}", fruit.getId(), fruit.getName());
return entity;
}
@DELETE
@Path("/{id}")
@Transactional
public Response delete(@NotNull @PathParam("id") int id) {
Fruit fruit = entityManager.getReference(Fruit.class, id);
if (fruit == null) {
throw new WebApplicationException("Fruit with id of " + id + " does not exist.", 404);
}
LOG.debugv("Delete #{0} {1}", fruit.getId(), fruit.getName());
entityManager.remove(fruit);
return Response.status(Response.Status.NO_CONTENT).build();
}
@GET
@Path("/search")
@Transactional
public Response search(@NotNull @QueryParam("terms") String terms) {
List<Fruit> list = searchSession.search(Fruit.class)
.where(f -> f.simpleQueryString().field("name").matching(terms))
.fetchAllHits();
return Response.status(Response.Status.OK).entity(list).build();
}
}
| FruitResource |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/orphan/elementcollection/EnrollableClass.java | {
"start": 336,
"end": 657
} | class ____ {
@Id
@Column(name = "id")
private String id;
@Column(name = "name")
private String name;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| EnrollableClass |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/GenericTypeNotMatchTest.java | {
"start": 516,
"end": 561
} | class ____<T> {
public T id;
}
}
| Base |
java | elastic__elasticsearch | x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCustomAuthenticatorIntegTests.java | {
"start": 2276,
"end": 8168
} | class ____ extends SecurityIntegTestCase {
private static final Authentication.RealmRef TEST_REALM_REF = new Authentication.RealmRef("cloud-saml", "saml", "test-node");
private static final String TEST_USERNAME = "spiderman";
private static final String TEST_ROLE_NAME = "admin";
private static final TestCustomTokenAuthenticator authenticator = new TestCustomTokenAuthenticator();
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
var plugins = new ArrayList<>(super.nodePlugins());
plugins.remove(LocalStateSecurity.class);
plugins.add(TestCustomAuthenticatorSecurityPlugin.class);
return plugins;
}
@Override
protected boolean addMockHttpTransport() {
return false;
}
@Override
protected void doAssertXPackIsInstalled() {
// avoids tripping the assertion due to missing LocalStateSecurity
}
@Before
public void resetAuthenticator() {
authenticator.reset();
}
@Override
protected String configUsers() {
final Hasher passwdHasher = getFastStoredHashAlgoForTests();
final String usersPasswdHashed = new String(passwdHasher.hash(TEST_PASSWORD_SECURE_STRING));
return super.configUsers() + "file_user:" + usersPasswdHashed + "\n";
}
@Override
protected String configUsersRoles() {
return super.configUsersRoles() + """
editor:file_user""";
}
public void testProfileActivationSuccess() {
final SecureString accessToken = new SecureString("strawberries".toCharArray());
final Profile profile = doActivateProfileWithAccessToken(accessToken);
assertThat(authenticator.extractedGrantTokens(), contains(new TestCustomAccessToken(accessToken)));
assertThat(authenticator.authenticatedTokens(), contains(new TestCustomAccessToken(accessToken)));
assertThat(profile.user().realmName(), equalTo(TEST_REALM_REF.getName()));
assertThat(profile.user().username(), equalTo(TEST_USERNAME));
assertThat(profile.user().roles(), contains(TEST_ROLE_NAME));
assertThat(profile.user().domainName(), nullValue());
assertThat(profile.user().fullName(), nullValue());
assertThat(profile.user().email(), nullValue());
}
public void testProfileActivationFailure() {
authenticator.setAuthFailure(new Exception("simulate authentication failure"));
final SecureString accessToken = new SecureString("blueberries".toCharArray());
var e = expectThrows(ElasticsearchSecurityException.class, () -> doActivateProfileWithAccessToken(accessToken));
assertThat(e.getMessage(), equalTo("error attempting to authenticate request"));
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause().getMessage(), equalTo("simulate authentication failure"));
assertThat(authenticator.extractedGrantTokens(), contains(new TestCustomAccessToken(accessToken)));
assertThat(authenticator.authenticatedTokens(), contains(new TestCustomAccessToken(accessToken)));
}
public void testProfileActivationNotHandled() {
// extract token returns null -> no applicable auth handler
authenticator.setShouldExtractAccessToken(false);
final SecureString accessToken = new SecureString("blackberries".toCharArray());
var e = expectThrows(ElasticsearchSecurityException.class, () -> doActivateProfileWithAccessToken(accessToken));
assertThat(
e.getMessage(),
containsString("unable to authenticate user [_bearer_token] for action [cluster:admin/xpack/security/profile/activate]")
);
assertThat(e.getCause(), nullValue());
assertThat(authenticator.extractedGrantTokens(), is(emptyIterable()));
assertThat(authenticator.authenticatedTokens(), is(emptyIterable()));
}
public void testProfileActivationWithPassword() {
Profile profile = doActivateProfileWithPassword("file_user", TEST_PASSWORD_SECURE_STRING.clone());
assertThat(profile.user().realmName(), equalTo("file"));
// the authenticator should not be called for password grant type
assertThat(authenticator.isCalledOnce(), is(false));
assertThat(authenticator.extractedGrantTokens(), is(emptyIterable()));
assertThat(authenticator.authenticatedTokens(), is(emptyIterable()));
}
private Profile doActivateProfileWithAccessToken(SecureString token) {
final ActivateProfileRequest activateProfileRequest = new ActivateProfileRequest();
activateProfileRequest.getGrant().setType("access_token");
activateProfileRequest.getGrant().setAccessToken(token);
final ActivateProfileResponse activateProfileResponse = client().execute(ActivateProfileAction.INSTANCE, activateProfileRequest)
.actionGet();
final Profile profile = activateProfileResponse.getProfile();
assertThat(profile, notNullValue());
assertThat(profile.applicationData(), anEmptyMap());
return profile;
}
private Profile doActivateProfileWithPassword(String username, SecureString password) {
final ActivateProfileRequest activateProfileRequest = new ActivateProfileRequest();
activateProfileRequest.getGrant().setType("password");
activateProfileRequest.getGrant().setPassword(password);
activateProfileRequest.getGrant().setUsername(username);
final ActivateProfileResponse activateProfileResponse = client().execute(ActivateProfileAction.INSTANCE, activateProfileRequest)
.actionGet();
final Profile profile = activateProfileResponse.getProfile();
assertThat(profile, notNullValue());
assertThat(profile.applicationData(), anEmptyMap());
return profile;
}
private static | ProfileCustomAuthenticatorIntegTests |
java | spring-projects__spring-boot | module/spring-boot-data-commons/src/test/java/org/springframework/boot/data/autoconfigure/metrics/DataRepositoryMetricsAutoConfigurationTests.java | {
"start": 8030,
"end": 8348
} | class ____ extends MetricsRepositoryMethodInvocationListener {
TestMetricsRepositoryMethodInvocationListener(Supplier<MeterRegistry> registrySupplier,
RepositoryTagsProvider tagsProvider) {
super(registrySupplier, tagsProvider, "test", AutoTimer.DISABLED);
}
}
| TestMetricsRepositoryMethodInvocationListener |
java | spring-projects__spring-boot | module/spring-boot-kafka/src/main/java/org/springframework/boot/kafka/autoconfigure/KafkaProperties.java | {
"start": 43141,
"end": 44760
} | class ____ {
/**
* Base delay after the initial invocation. Can be combined with a
* "multiplier" to use an exponential back off strategy.
*/
private Duration delay = Duration.ofSeconds(1);
/**
* Multiplier for a delay for the next retry attempt, applied to the
* previous delay, starting with the initial delay as well as to the
* applicable jitter for each attempt. Fixed delay by default.
*/
private double multiplier = 1.0;
/**
* Maximum delay for any retry attempt, limiting how far jitter and the
* multiplier can increase the delay.
*/
private Duration maxDelay = Duration.ofSeconds(30);
/**
* Jitter value for the base retry attempt, randomly subtracted or added
* to the calculated delay, resulting in a value between 'delay - jitter'
* and 'delay + jitter' but never below the base delay or above the max
* delay.
*/
private Duration jitter = Duration.ZERO;
public Duration getDelay() {
return this.delay;
}
public void setDelay(Duration delay) {
this.delay = delay;
}
public double getMultiplier() {
return this.multiplier;
}
public void setMultiplier(double multiplier) {
this.multiplier = multiplier;
}
public Duration getMaxDelay() {
return this.maxDelay;
}
public void setMaxDelay(Duration maxDelay) {
this.maxDelay = maxDelay;
}
public Duration getJitter() {
return this.jitter;
}
public void setJitter(Duration jitter) {
this.jitter = jitter;
}
}
}
}
public static | Backoff |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/integerarray/AtomicIntegerArrayAssert_hasSize_Test.java | {
"start": 828,
"end": 1151
} | class ____ extends AtomicIntegerArrayAssertBaseTest {
@Override
protected AtomicIntegerArrayAssert invoke_api_method() {
return assertions.hasSize(6);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSize(info(), internalArray(), 6);
}
}
| AtomicIntegerArrayAssert_hasSize_Test |
java | google__guava | android/guava/src/com/google/common/collect/CompactHashMap.java | {
"start": 6272,
"end": 24162
} | class ____ {
// int hash;
// Entry next;
// K key;
// V value;
// }
//
// The imaginary `hash` and `next` values are combined into a single `int` value in the `entries`
// array. The top bits of this value are the remaining bits of the hash value that were not used
// in the short hash. We saw that a mask of 0x7f would keep the 7-bit value 0x6f from a full
// hashcode of 0x89abcdef. The imaginary `hash` value would then be the remaining top 25 bits,
// 0x89abcd80. To this is added (or'd) the `next` value, which is an index within `entries`
// (and therefore within `keys` and `values`) of another entry that has the same short hash
// value. In our example, it would be another entry for a key whose short hash is also 0x6f.
//
// Essentially, then, `table[h]` gives us the start of a linked list in `entries`, where every
// element of the list has the short hash value h.
//
// A wrinkle here is that the value 0 (called UNSET in the code) is used as the equivalent of a
// null pointer. If `table[h] == 0` that means there are no keys in the map whose short hash is h.
// If the `next` bits in `entries[i]` are 0 that means there are no further entries for the given
// short hash. But 0 is also a valid index in `entries`, so we add 1 to these indices before
// putting them in `table` or in `next` bits, and subtract 1 again when we need an index value.
//
// The elements of `keys`, `values`, and `entries` are added sequentially, so that elements 0 to
// `size() - 1` are used and remaining elements are not. This makes iteration straightforward.
// Removing an entry generally involves moving the last element of each array to where the removed
// entry was, and adjusting index links accordingly.
/**
* The hashtable object. This can be either:
*
* <ul>
* <li>a byte[], short[], or int[], with size a power of two, created by
* CompactHashing.createTable, whose values are either
* <ul>
* <li>UNSET, meaning "null pointer"
* <li>one plus an index into the keys, values, and entries arrays
* </ul>
* <li>another java.util.Map delegate implementation. In most modern JDKs, normal java.util hash
* collections intelligently fall back to a binary search tree if hash table collisions are
* detected. Rather than going to all the trouble of reimplementing this ourselves, we
* simply switch over to use the JDK implementation wholesale if probable hash flooding is
* detected, sacrificing the compactness guarantee in very rare cases in exchange for much
* more reliable worst-case behavior.
* <li>null, if no entries have yet been added to the map
* </ul>
*/
private transient @Nullable Object table;
/**
* Contains the logical entries, in the range of [0, size()). The high bits of each int are the
* part of the smeared hash of the key not covered by the hashtable mask, whereas the low bits are
* the "next" pointer (pointing to the next entry in the bucket chain), which will always be less
* than or equal to the hashtable mask.
*
* <pre>
* hash = aaaaaaaa
* mask = 00000fff
* next = 00000bbb
* entry = aaaaabbb
* </pre>
*
* <p>The pointers in [size(), entries.length) are all "null" (UNSET).
*/
@VisibleForTesting transient int @Nullable [] entries;
/**
* The keys of the entries in the map, in the range of [0, size()). The keys in [size(),
* keys.length) are all {@code null}.
*/
@VisibleForTesting transient @Nullable Object @Nullable [] keys;
/**
* The values of the entries in the map, in the range of [0, size()). The values in [size(),
* values.length) are all {@code null}.
*/
@VisibleForTesting transient @Nullable Object @Nullable [] values;
/**
* Keeps track of metadata like the number of hash table bits and modifications of this data
* structure (to make it possible to throw ConcurrentModificationException in the iterator). Note
* that we choose not to make this volatile, so we do less of a "best effort" to track such
* errors, for better performance.
*
* <p>For a new instance, where the arrays above have not yet been allocated, the value of {@code
* metadata} is the size that the arrays should be allocated with. Once the arrays have been
* allocated, the value of {@code metadata} combines the number of bits in the "short hash", in
* its bottom {@value CompactHashing#HASH_TABLE_BITS_MAX_BITS} bits, with a modification count in
* the remaining bits that is used to detect concurrent modification during iteration.
*/
private transient int metadata;
/** The number of elements contained in the set. */
private transient int size;
/** Constructs a new empty instance of {@code CompactHashMap}. */
CompactHashMap() {
init(CompactHashing.DEFAULT_SIZE);
}
/**
* Constructs a new instance of {@code CompactHashMap} with the specified capacity.
*
* @param expectedSize the initial capacity of this {@code CompactHashMap}.
*/
CompactHashMap(int expectedSize) {
init(expectedSize);
}
/** Pseudoconstructor for serialization support. */
void init(int expectedSize) {
Preconditions.checkArgument(expectedSize >= 0, "Expected size must be >= 0");
// Save expectedSize for use in allocArrays()
this.metadata = Ints.constrainToRange(expectedSize, 1, CompactHashing.MAX_SIZE);
}
/** Returns whether arrays need to be allocated. */
boolean needsAllocArrays() {
return table == null;
}
/** Handle lazy allocation of arrays. */
@CanIgnoreReturnValue
int allocArrays() {
Preconditions.checkState(needsAllocArrays(), "Arrays already allocated");
int expectedSize = metadata;
int buckets = CompactHashing.tableSize(expectedSize);
this.table = CompactHashing.createTable(buckets);
setHashTableMask(buckets - 1);
this.entries = new int[expectedSize];
this.keys = new Object[expectedSize];
this.values = new Object[expectedSize];
return expectedSize;
}
@SuppressWarnings("unchecked")
@VisibleForTesting
@Nullable Map<K, V> delegateOrNull() {
if (table instanceof Map) {
return (Map<K, V>) table;
}
return null;
}
Map<K, V> createHashFloodingResistantDelegate(int tableSize) {
return new LinkedHashMap<>(tableSize, 1.0f);
}
@CanIgnoreReturnValue
Map<K, V> convertToHashFloodingResistantImplementation() {
Map<K, V> newDelegate = createHashFloodingResistantDelegate(hashTableMask() + 1);
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
newDelegate.put(key(i), value(i));
}
this.table = newDelegate;
this.entries = null;
this.keys = null;
this.values = null;
incrementModCount();
return newDelegate;
}
/** Stores the hash table mask as the number of bits needed to represent an index. */
private void setHashTableMask(int mask) {
int hashTableBits = Integer.SIZE - Integer.numberOfLeadingZeros(mask);
metadata =
CompactHashing.maskCombine(metadata, hashTableBits, CompactHashing.HASH_TABLE_BITS_MASK);
}
/** Gets the hash table mask using the stored number of hash table bits. */
private int hashTableMask() {
return (1 << (metadata & CompactHashing.HASH_TABLE_BITS_MASK)) - 1;
}
void incrementModCount() {
metadata += CompactHashing.MODIFICATION_COUNT_INCREMENT;
}
/**
* Mark an access of the specified entry. Used only in {@code CompactLinkedHashMap} for LRU
* ordering.
*/
void accessEntry(int index) {
// no-op by default
}
@CanIgnoreReturnValue
@Override
public @Nullable V put(@ParametricNullness K key, @ParametricNullness V value) {
if (needsAllocArrays()) {
allocArrays();
}
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.put(key, value);
}
int[] entries = requireEntries();
@Nullable Object[] keys = requireKeys();
@Nullable Object[] values = requireValues();
int newEntryIndex = this.size; // current size, and pointer to the entry to be appended
int newSize = newEntryIndex + 1;
int hash = smearedHash(key);
int mask = hashTableMask();
int tableIndex = hash & mask;
int next = CompactHashing.tableGet(requireTable(), tableIndex);
if (next == UNSET) { // uninitialized bucket
if (newSize > mask) {
// Resize and add new entry
mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex);
} else {
CompactHashing.tableSet(requireTable(), tableIndex, newEntryIndex + 1);
}
} else {
int entryIndex;
int entry;
int hashPrefix = CompactHashing.getHashPrefix(hash, mask);
int bucketLength = 0;
do {
entryIndex = next - 1;
entry = entries[entryIndex];
if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix
&& Objects.equals(key, keys[entryIndex])) {
@SuppressWarnings("unchecked") // known to be a V
V oldValue = (V) values[entryIndex];
values[entryIndex] = value;
accessEntry(entryIndex);
return oldValue;
}
next = CompactHashing.getNext(entry, mask);
bucketLength++;
} while (next != UNSET);
if (bucketLength >= MAX_HASH_BUCKET_LENGTH) {
return convertToHashFloodingResistantImplementation().put(key, value);
}
if (newSize > mask) {
// Resize and add new entry
mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex);
} else {
entries[entryIndex] = CompactHashing.maskCombine(entry, newEntryIndex + 1, mask);
}
}
resizeMeMaybe(newSize);
insertEntry(newEntryIndex, key, value, hash, mask);
this.size = newSize;
incrementModCount();
return null;
}
/**
* Creates a fresh entry with the specified object at the specified position in the entry arrays.
*/
void insertEntry(
int entryIndex, @ParametricNullness K key, @ParametricNullness V value, int hash, int mask) {
this.setEntry(entryIndex, CompactHashing.maskCombine(hash, UNSET, mask));
this.setKey(entryIndex, key);
this.setValue(entryIndex, value);
}
/** Resizes the entries storage if necessary. */
private void resizeMeMaybe(int newSize) {
int entriesSize = requireEntries().length;
if (newSize > entriesSize) {
// 1.5x but round up to nearest odd (this is optimal for memory consumption on Android)
int newCapacity = min(CompactHashing.MAX_SIZE, (entriesSize + max(1, entriesSize >>> 1)) | 1);
if (newCapacity != entriesSize) {
resizeEntries(newCapacity);
}
}
}
/**
* Resizes the internal entries array to the specified capacity, which may be greater or less than
* the current capacity.
*/
void resizeEntries(int newCapacity) {
this.entries = Arrays.copyOf(requireEntries(), newCapacity);
this.keys = Arrays.copyOf(requireKeys(), newCapacity);
this.values = Arrays.copyOf(requireValues(), newCapacity);
}
@CanIgnoreReturnValue
private int resizeTable(int oldMask, int newCapacity, int targetHash, int targetEntryIndex) {
Object newTable = CompactHashing.createTable(newCapacity);
int newMask = newCapacity - 1;
if (targetEntryIndex != UNSET) {
// Add target first; it must be last in the chain because its entry hasn't yet been created
CompactHashing.tableSet(newTable, targetHash & newMask, targetEntryIndex + 1);
}
Object oldTable = requireTable();
int[] entries = requireEntries();
// Loop over `oldTable` to construct its replacement, ``newTable`. The entries do not move, so
// the `keys` and `values` arrays do not need to change. But because the "short hash" now has a
// different number of bits, we must rewrite each element of `entries` so that its contribution
// to the full hashcode reflects the change, and so that its `next` link corresponds to the new
// linked list of entries with the new short hash.
for (int oldTableIndex = 0; oldTableIndex <= oldMask; oldTableIndex++) {
int oldNext = CompactHashing.tableGet(oldTable, oldTableIndex);
// Each element of `oldTable` is the head of a (possibly empty) linked list of elements in
// `entries`. The `oldNext` loop is going to traverse that linked list.
// We need to rewrite the `next` link of each of the elements so that it is in the appropriate
// linked list starting from `newTable`. In general, each element from the old linked list
// belongs to a different linked list from `newTable`. We insert each element in turn at the
// head of its appropriate `newTable` linked list.
while (oldNext != UNSET) {
int entryIndex = oldNext - 1;
int oldEntry = entries[entryIndex];
// Rebuild the full 32-bit hash using entry hashPrefix and oldTableIndex ("hashSuffix").
int hash = CompactHashing.getHashPrefix(oldEntry, oldMask) | oldTableIndex;
int newTableIndex = hash & newMask;
int newNext = CompactHashing.tableGet(newTable, newTableIndex);
CompactHashing.tableSet(newTable, newTableIndex, oldNext);
entries[entryIndex] = CompactHashing.maskCombine(hash, newNext, newMask);
oldNext = CompactHashing.getNext(oldEntry, oldMask);
}
}
this.table = newTable;
setHashTableMask(newMask);
return newMask;
}
private int indexOf(@Nullable Object key) {
if (needsAllocArrays()) {
return -1;
}
int hash = smearedHash(key);
int mask = hashTableMask();
int next = CompactHashing.tableGet(requireTable(), hash & mask);
if (next == UNSET) {
return -1;
}
int hashPrefix = CompactHashing.getHashPrefix(hash, mask);
do {
int entryIndex = next - 1;
int entry = entry(entryIndex);
if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix
&& Objects.equals(key, key(entryIndex))) {
return entryIndex;
}
next = CompactHashing.getNext(entry, mask);
} while (next != UNSET);
return -1;
}
@Override
public boolean containsKey(@Nullable Object key) {
Map<K, V> delegate = delegateOrNull();
return (delegate != null) ? delegate.containsKey(key) : indexOf(key) != -1;
}
@Override
public @Nullable V get(@Nullable Object key) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.get(key);
}
int index = indexOf(key);
if (index == -1) {
return null;
}
accessEntry(index);
return value(index);
}
@CanIgnoreReturnValue
@SuppressWarnings("unchecked") // known to be a V
@Override
public @Nullable V remove(@Nullable Object key) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.remove(key);
}
Object oldValue = removeHelper(key);
return (oldValue == NOT_FOUND) ? null : (V) oldValue;
}
private @Nullable Object removeHelper(@Nullable Object key) {
if (needsAllocArrays()) {
return NOT_FOUND;
}
int mask = hashTableMask();
int index =
CompactHashing.remove(
key,
/* value= */ null,
mask,
requireTable(),
requireEntries(),
requireKeys(),
/* values= */ null);
if (index == -1) {
return NOT_FOUND;
}
Object oldValue = value(index);
moveLastEntry(index, mask);
size--;
incrementModCount();
return oldValue;
}
/**
* Moves the last entry in the entry array into {@code dstIndex}, and nulls out its old position.
*/
void moveLastEntry(int dstIndex, int mask) {
Object table = requireTable();
int[] entries = requireEntries();
@Nullable Object[] keys = requireKeys();
@Nullable Object[] values = requireValues();
int srcIndex = size() - 1;
if (dstIndex < srcIndex) {
// move last entry to deleted spot
Object key = keys[srcIndex];
keys[dstIndex] = key;
values[dstIndex] = values[srcIndex];
keys[srcIndex] = null;
values[srcIndex] = null;
// move the last entry to the removed spot, just like we moved the element
entries[dstIndex] = entries[srcIndex];
entries[srcIndex] = 0;
// also need to update whoever's "next" pointer was pointing to the last entry place
int tableIndex = smearedHash(key) & mask;
int next = CompactHashing.tableGet(table, tableIndex);
int srcNext = srcIndex + 1;
if (next == srcNext) {
// we need to update the root pointer
CompactHashing.tableSet(table, tableIndex, dstIndex + 1);
} else {
// we need to update a pointer in an entry
int entryIndex;
int entry;
do {
entryIndex = next - 1;
entry = entries[entryIndex];
next = CompactHashing.getNext(entry, mask);
} while (next != srcNext);
// here, entries[entryIndex] points to the old entry location; update it
entries[entryIndex] = CompactHashing.maskCombine(entry, dstIndex + 1, mask);
}
} else {
keys[dstIndex] = null;
values[dstIndex] = null;
entries[dstIndex] = 0;
}
}
int firstEntryIndex() {
return isEmpty() ? -1 : 0;
}
int getSuccessor(int entryIndex) {
return (entryIndex + 1 < size) ? entryIndex + 1 : -1;
}
/**
* Updates the index an iterator is pointing to after a call to remove: returns the index of the
* entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
* index that *was* the next entry that would be looked at.
*/
int adjustAfterRemove(int indexBeforeRemove, @SuppressWarnings("unused") int indexRemoved) {
return indexBeforeRemove - 1;
}
private abstract | Entry |
java | apache__maven | impl/maven-impl/src/test/java/org/apache/maven/impl/model/ParentCycleDetectionTest.java | {
"start": 1579,
"end": 12526
} | class ____ {
Session session;
ModelBuilder modelBuilder;
@BeforeEach
void setup() {
session = ApiRunner.createSession();
modelBuilder = session.getService(ModelBuilder.class);
assertNotNull(modelBuilder);
}
@Test
void testParentResolutionCycleDetectionWithRelativePath(@TempDir Path tempDir) throws IOException {
// Create .mvn directory to mark root
Files.createDirectories(tempDir.resolve(".mvn"));
// Create a parent resolution cycle using relativePath: child -> parent -> child
// This reproduces the same issue as the integration test MavenITmng11009StackOverflowParentResolutionTest
Path childPom = tempDir.resolve("pom.xml");
Files.writeString(childPom, """
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.maven.its.mng11009</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
<relativePath>parent</relativePath>
</parent>
<artifactId>child</artifactId>
<packaging>pom</packaging>
</project>
""");
Path parentPom = tempDir.resolve("parent").resolve("pom.xml");
Files.createDirectories(parentPom.getParent());
Files.writeString(parentPom, """
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.maven.its.mng11009</groupId>
<artifactId>external-parent</artifactId>
<version>1.0-SNAPSHOT</version>
<!-- No relativePath specified, defaults to ../pom.xml which creates the circular reference -->
</parent>
<artifactId>parent</artifactId>
<packaging>pom</packaging>
</project>
""");
ModelBuilderRequest request = ModelBuilderRequest.builder()
.session(session)
.source(Sources.buildSource(childPom))
.requestType(ModelBuilderRequest.RequestType.BUILD_PROJECT)
.build();
// This should either:
// 1. Detect the cycle and throw a meaningful ModelBuilderException, OR
// 2. Not cause a StackOverflowError (the main goal is to prevent the StackOverflowError)
try {
ModelBuilderResult result = modelBuilder.newSession().build(request);
// If we get here without StackOverflowError, that's actually good progress
// The build may still fail with a different error (circular dependency), but that's expected
System.out.println("Build completed without StackOverflowError. Result: " + result);
} catch (StackOverflowError error) {
fail(
"Build failed with StackOverflowError, which should be prevented. This indicates the cycle detection is not working properly for relativePath-based cycles.");
} catch (ModelBuilderException exception) {
// This is acceptable - the build should fail with a meaningful error, not StackOverflowError
System.out.println("Build failed with ModelBuilderException (expected): " + exception.getMessage());
// Check if it's a cycle detection error
if (exception.getMessage().contains("cycle")
|| exception.getMessage().contains("circular")) {
System.out.println("✓ Cycle detected correctly!");
}
// We don't assert on the specific message because the main goal is to prevent StackOverflowError
}
}
@Test
void testDirectCycleDetection(@TempDir Path tempDir) throws IOException {
// Create .mvn directory to mark root
Files.createDirectories(tempDir.resolve(".mvn"));
// Create a direct cycle: A -> B -> A
Path pomA = tempDir.resolve("a").resolve("pom.xml");
Files.createDirectories(pomA.getParent());
Files.writeString(pomA, """
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>test</groupId>
<artifactId>a</artifactId>
<version>1.0</version>
<parent>
<groupId>test</groupId>
<artifactId>b</artifactId>
<version>1.0</version>
<relativePath>../b/pom.xml</relativePath>
</parent>
</project>
""");
Path pomB = tempDir.resolve("b").resolve("pom.xml");
Files.createDirectories(pomB.getParent());
Files.writeString(pomB, """
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>test</groupId>
<artifactId>b</artifactId>
<version>1.0</version>
<parent>
<groupId>test</groupId>
<artifactId>a</artifactId>
<version>1.0</version>
<relativePath>../a/pom.xml</relativePath>
</parent>
</project>
""");
ModelBuilderRequest request = ModelBuilderRequest.builder()
.session(session)
.source(Sources.buildSource(pomA))
.requestType(ModelBuilderRequest.RequestType.BUILD_PROJECT)
.build();
// This should detect the cycle and throw a meaningful ModelBuilderException
try {
ModelBuilderResult result = modelBuilder.newSession().build(request);
fail("Expected ModelBuilderException due to cycle detection, but build succeeded: " + result);
} catch (StackOverflowError error) {
fail("Build failed with StackOverflowError, which should be prevented by cycle detection.");
} catch (ModelBuilderException exception) {
// This is expected - the build should fail with a cycle detection error
System.out.println("Build failed with ModelBuilderException (expected): " + exception.getMessage());
// Check if it's a cycle detection error
if (exception.getMessage().contains("cycle")
|| exception.getMessage().contains("circular")) {
System.out.println("✓ Cycle detected correctly!");
} else {
System.out.println("⚠ Exception was not a cycle detection error: " + exception.getMessage());
}
}
}
@Test
void testMultipleModulesWithSameParentDoNotCauseCycle(@TempDir Path tempDir) throws IOException {
// Create .mvn directory to mark root
Files.createDirectories(tempDir.resolve(".mvn"));
// Create a scenario like the failing test: multiple modules with the same parent
Path parentPom = tempDir.resolve("parent").resolve("pom.xml");
Files.createDirectories(parentPom.getParent());
Files.writeString(parentPom, """
<project xmlns="http://maven.apache.org/POM/4.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.1.0 https://maven.apache.org/xsd/maven-4.1.0.xsd">
<modelVersion>4.1.0</modelVersion>
<groupId>test</groupId>
<artifactId>parent</artifactId>
<version>1.0</version>
<packaging>pom</packaging>
</project>
""");
Path moduleA = tempDir.resolve("module-a").resolve("pom.xml");
Files.createDirectories(moduleA.getParent());
Files.writeString(moduleA, """
<project xmlns="http://maven.apache.org/POM/4.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.1.0 https://maven.apache.org/xsd/maven-4.1.0.xsd">
<modelVersion>4.1.0</modelVersion>
<parent>
<groupId>test</groupId>
<artifactId>parent</artifactId>
<version>1.0</version>
<relativePath>../parent/pom.xml</relativePath>
</parent>
<artifactId>module-a</artifactId>
</project>
""");
Path moduleB = tempDir.resolve("module-b").resolve("pom.xml");
Files.createDirectories(moduleB.getParent());
Files.writeString(moduleB, """
<project xmlns="http://maven.apache.org/POM/4.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.1.0 https://maven.apache.org/xsd/maven-4.1.0.xsd">
<modelVersion>4.1.0</modelVersion>
<parent>
<groupId>test</groupId>
<artifactId>parent</artifactId>
<version>1.0</version>
<relativePath>../parent/pom.xml</relativePath>
</parent>
<artifactId>module-b</artifactId>
</project>
""");
// Both modules should be able to resolve their parent without cycle detection errors
ModelBuilderRequest requestA = ModelBuilderRequest.builder()
.session(session)
.source(Sources.buildSource(moduleA))
.requestType(ModelBuilderRequest.RequestType.BUILD_PROJECT)
.build();
ModelBuilderRequest requestB = ModelBuilderRequest.builder()
.session(session)
.source(Sources.buildSource(moduleB))
.requestType(ModelBuilderRequest.RequestType.BUILD_PROJECT)
.build();
// These should not throw exceptions
ModelBuilderResult resultA = modelBuilder.newSession().build(requestA);
ModelBuilderResult resultB = modelBuilder.newSession().build(requestB);
// Verify that both models were built successfully
assertNotNull(resultA);
assertNotNull(resultB);
}
}
| ParentCycleDetectionTest |
java | micronaut-projects__micronaut-core | websocket/src/main/java/io/micronaut/websocket/annotation/ClientWebSocket.java | {
"start": 1742,
"end": 2659
} | interface ____ {
/**
* @return The URI of the action
*/
@AliasFor(member = "uri")
@AliasFor(annotation = WebSocketComponent.class, member = "value")
@AliasFor(annotation = WebSocketComponent.class, member = "uri")
String value() default DEFAULT_URI;
/**
* @return The URI of the action
*/
@AliasFor(member = "value")
@AliasFor(annotation = WebSocketComponent.class, member = "value")
@AliasFor(annotation = WebSocketComponent.class, member = "uri")
String uri() default DEFAULT_URI;
/**
* @return The WebSocket version to use to connect
*/
@AliasFor(annotation = WebSocketComponent.class, member = "version")
WebSocketVersion version() default WebSocketVersion.V13;
/**
* @return The Sec-WebSocket-Protocol header field is used in the WebSocket opening handshake.
*/
String subprotocol() default "";
}
| ClientWebSocket |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/serde/DynamicTableSinkSpecSerdeTest.java | {
"start": 4243,
"end": 16325
} | class ____ {
static Stream<DynamicTableSinkSpec> testDynamicTableSinkSpecSerde() {
Map<String, String> options1 = new HashMap<>();
options1.put("connector", FileSystemTableFactory.IDENTIFIER);
options1.put("format", TestCsvFormatFactory.IDENTIFIER);
options1.put("path", "/tmp");
final ResolvedSchema resolvedSchema1 =
new ResolvedSchema(
Collections.singletonList(Column.physical("a", DataTypes.BIGINT())),
Collections.emptyList(),
null,
Collections.singletonList(
DefaultIndex.newIndex("idx", Collections.singletonList("a"))));
final CatalogTable catalogTable1 =
CatalogTable.newBuilder()
.schema(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build())
.options(options1)
.build();
DynamicTableSinkSpec spec1 =
new DynamicTableSinkSpec(
ContextResolvedTable.temporary(
ObjectIdentifier.of(
CatalogManagerMocks.DEFAULT_CATALOG,
CatalogManagerMocks.DEFAULT_DATABASE,
"MyTable"),
new ResolvedCatalogTable(catalogTable1, resolvedSchema1)),
null,
null);
Map<String, String> options2 = new HashMap<>();
options2.put("connector", FileSystemTableFactory.IDENTIFIER);
options2.put("format", TestCsvFormatFactory.IDENTIFIER);
options2.put("path", "/tmp");
final ResolvedSchema resolvedSchema2 =
new ResolvedSchema(
Arrays.asList(
Column.physical("a", DataTypes.BIGINT()),
Column.physical("b", DataTypes.INT()),
Column.physical("p", DataTypes.STRING())),
Collections.emptyList(),
null,
Collections.singletonList(
DefaultIndex.newIndex("idx", Collections.singletonList("a"))));
final CatalogTable catalogTable2 =
CatalogTable.newBuilder()
.schema(Schema.newBuilder().fromResolvedSchema(resolvedSchema2).build())
.options(options2)
.build();
DynamicTableSinkSpec spec2 =
new DynamicTableSinkSpec(
ContextResolvedTable.temporary(
ObjectIdentifier.of(
CatalogManagerMocks.DEFAULT_CATALOG,
CatalogManagerMocks.DEFAULT_DATABASE,
"MyTable"),
new ResolvedCatalogTable(catalogTable2, resolvedSchema2)),
Arrays.asList(
new OverwriteSpec(true),
new PartitioningSpec(
new HashMap<String, String>() {
{
put("p", "A");
}
})),
new int[][] {{0}, {1}});
Map<String, String> options3 = new HashMap<>();
options3.put("connector", TestValuesTableFactory.IDENTIFIER);
options3.put("writable-metadata", "m:STRING");
final ResolvedSchema resolvedSchema3 =
new ResolvedSchema(
Arrays.asList(
Column.physical("a", DataTypes.BIGINT()),
Column.physical("b", DataTypes.INT()),
Column.metadata("m", DataTypes.STRING(), null, false)),
Collections.emptyList(),
null,
Collections.singletonList(
DefaultIndex.newIndex("idx", Collections.singletonList("a"))));
final CatalogTable catalogTable3 =
CatalogTable.newBuilder()
.schema(Schema.newBuilder().fromResolvedSchema(resolvedSchema3).build())
.options(options3)
.build();
DynamicTableSinkSpec spec3 =
new DynamicTableSinkSpec(
ContextResolvedTable.temporary(
ObjectIdentifier.of(
CatalogManagerMocks.DEFAULT_CATALOG,
CatalogManagerMocks.DEFAULT_DATABASE,
"MyTable"),
new ResolvedCatalogTable(catalogTable3, resolvedSchema3)),
Collections.singletonList(
new WritingMetadataSpec(
Collections.singletonList("m"),
RowType.of(new BigIntType(), new IntType()))),
null);
Map<String, String> options4 = new HashMap<>();
options4.put("connector", TestValuesTableFactory.IDENTIFIER);
int[][] targetColumnIndices = new int[][] {{0}, {1}};
// Todo: add test cases for nested columns in schema after FLINK-31301 is fixed.
final ResolvedSchema resolvedSchema4 =
new ResolvedSchema(
Arrays.asList(
Column.physical("a", DataTypes.BIGINT()),
Column.physical("b", DataTypes.INT()),
Column.metadata("p", DataTypes.STRING(), null, false)),
Collections.emptyList(),
null,
Collections.singletonList(
DefaultIndex.newIndex("idx", Collections.singletonList("a"))));
final CatalogTable catalogTable4 =
CatalogTable.newBuilder()
.schema(Schema.newBuilder().fromResolvedSchema(resolvedSchema4).build())
.options(options4)
.build();
DynamicTableSinkSpec spec4 =
new DynamicTableSinkSpec(
ContextResolvedTable.temporary(
ObjectIdentifier.of(
CatalogManagerMocks.DEFAULT_CATALOG,
CatalogManagerMocks.DEFAULT_DATABASE,
"MyTable"),
new ResolvedCatalogTable(catalogTable4, resolvedSchema4)),
Collections.singletonList(new TargetColumnWritingSpec(targetColumnIndices)),
targetColumnIndices);
return Stream.of(spec1, spec2, spec3, spec4);
}
@ParameterizedTest
@MethodSource("testDynamicTableSinkSpecSerde")
void testDynamicTableSinkSpecSerde(DynamicTableSinkSpec spec) throws IOException {
PlannerMocks plannerMocks = PlannerMocks.create();
CatalogManager catalogManager = plannerMocks.getCatalogManager();
catalogManager.createTable(
spec.getContextResolvedTable().getResolvedTable(),
spec.getContextResolvedTable().getIdentifier(),
false);
SerdeContext serdeCtx =
configuredSerdeContext(catalogManager, plannerMocks.getTableConfig());
// Re-init the spec to be permanent with correct catalog
spec =
new DynamicTableSinkSpec(
ContextResolvedTable.permanent(
spec.getContextResolvedTable().getIdentifier(),
catalogManager.getCatalog(catalogManager.getCurrentCatalog()).get(),
spec.getContextResolvedTable().getResolvedTable()),
spec.getSinkAbilities(),
null);
String actualJson = toJson(serdeCtx, spec);
DynamicTableSinkSpec actual = toObject(serdeCtx, actualJson, DynamicTableSinkSpec.class);
assertThat(actual.getContextResolvedTable()).isEqualTo(spec.getContextResolvedTable());
assertThat(actual.getSinkAbilities()).isEqualTo(spec.getSinkAbilities());
assertThat(actual.getTableSink(plannerMocks.getPlannerContext().getFlinkContext()))
.isNotNull();
}
@Test
void testDynamicTableSinkSpecSerdeWithEnrichmentOptions() throws Exception {
// Test model
ObjectIdentifier identifier =
ObjectIdentifier.of(
CatalogManagerMocks.DEFAULT_CATALOG,
CatalogManagerMocks.DEFAULT_DATABASE,
"my_table");
String formatPrefix = FactoryUtil.getFormatPrefix(FORMAT, TestFormatFactory.IDENTIFIER);
Map<String, String> planOptions = new HashMap<>();
planOptions.put(CONNECTOR.key(), TestDynamicTableFactory.IDENTIFIER);
planOptions.put(TARGET.key(), "abc");
planOptions.put(BUFFER_SIZE.key(), "1000");
planOptions.put(FORMAT.key(), TestFormatFactory.IDENTIFIER);
planOptions.put(formatPrefix + DELIMITER.key(), "|");
Map<String, String> catalogOptions = new HashMap<>();
catalogOptions.put(CONNECTOR.key(), TestDynamicTableFactory.IDENTIFIER);
catalogOptions.put(TARGET.key(), "xyz");
catalogOptions.put(BUFFER_SIZE.key(), "2000");
catalogOptions.put(FORMAT.key(), TestFormatFactory.IDENTIFIER);
catalogOptions.put(formatPrefix + DELIMITER.key(), ",");
ResolvedCatalogTable planResolvedCatalogTable = tableWithOnlyPhysicalColumns(planOptions);
ResolvedCatalogTable catalogResolvedCatalogTable =
tableWithOnlyPhysicalColumns(catalogOptions);
// Create planner mocks
PlannerMocks plannerMocks =
PlannerMocks.create(
new Configuration()
.set(PLAN_RESTORE_CATALOG_OBJECTS, CatalogPlanRestore.ALL)
.set(PLAN_COMPILE_CATALOG_OBJECTS, CatalogPlanCompilation.ALL));
CatalogManager catalogManager = plannerMocks.getCatalogManager();
catalogManager.createTable(catalogResolvedCatalogTable, identifier, false);
// Mock the context
SerdeContext serdeCtx =
configuredSerdeContext(catalogManager, plannerMocks.getTableConfig());
DynamicTableSinkSpec planSpec =
new DynamicTableSinkSpec(
ContextResolvedTable.permanent(
identifier,
catalogManager.getCatalog(catalogManager.getCurrentCatalog()).get(),
planResolvedCatalogTable),
Collections.emptyList(),
null);
String actualJson = toJson(serdeCtx, planSpec);
DynamicTableSinkSpec actual = toObject(serdeCtx, actualJson, DynamicTableSinkSpec.class);
assertThat(actual.getContextResolvedTable()).isEqualTo(planSpec.getContextResolvedTable());
assertThat(actual.getSinkAbilities()).isNull();
TestDynamicTableFactory.DynamicTableSinkMock dynamicTableSink =
(TestDynamicTableFactory.DynamicTableSinkMock)
actual.getTableSink(plannerMocks.getPlannerContext().getFlinkContext());
assertThat(dynamicTableSink.target).isEqualTo("abc");
assertThat(dynamicTableSink.bufferSize).isEqualTo(2000);
assertThat(((TestFormatFactory.EncodingFormatMock) dynamicTableSink.valueFormat).delimiter)
.isEqualTo(",");
}
}
| DynamicTableSinkSpecSerdeTest |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/workload/RoundTripWorkerBase.java | {
"start": 3294,
"end": 4963
} | class ____ implements TaskWorker {
private static final int THROTTLE_PERIOD_MS = 100;
private static final int LOG_INTERVAL_MS = 5000;
private static final int LOG_NUM_MESSAGES = 10;
private static final Logger log = LoggerFactory.getLogger(RoundTripWorkerBase.class);
private static final PayloadGenerator KEY_GENERATOR = new SequentialPayloadGenerator(4, 0);
private ToReceiveTracker toReceiveTracker;
protected String id;
protected RoundTripWorkloadSpec spec;
private final AtomicBoolean running = new AtomicBoolean(false);
private final Lock lock = new ReentrantLock();
private final Condition unackedSendsAreZero = lock.newCondition();
private ScheduledExecutorService executor;
private WorkerStatusTracker status;
private KafkaFutureImpl<String> doneFuture;
private KafkaProducer<byte[], byte[]> producer;
private Long unackedSends;
private ToSendTracker toSendTracker;
@Override
public void start(Platform platform, WorkerStatusTracker status,
KafkaFutureImpl<String> doneFuture) throws Exception {
if (!running.compareAndSet(false, true)) {
throw new IllegalStateException("RoundTripWorker is already running.");
}
log.info("{}: Activating RoundTripWorker.", id);
this.executor = Executors.newScheduledThreadPool(3,
ThreadUtils.createThreadFactory("RoundTripWorker%d", false));
this.status = status;
this.doneFuture = doneFuture;
this.producer = null;
this.unackedSends = spec.maxMessages();
executor.submit(new Prepare());
}
| RoundTripWorkerBase |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/datasource/DataSourceTest.java | {
"start": 1276,
"end": 1889
} | class ____ {
@Test
void test(EntityManagerFactoryScope scope) {
Listener listener = new Listener();
LogInspectionHelper.registerListener( listener, ConnectionInfoLogger.CONNECTION_INFO_LOGGER );
scope.getEntityManagerFactory();
LogInspectionHelper.clearAllListeners( ConnectionInfoLogger.CONNECTION_INFO_LOGGER );
Dialect dialect = scope.getDialect();
assertTrue( dialect instanceof OracleDialect
|| dialect instanceof DB2Dialect
|| dialect instanceof InformixDialect // Informix metadata does not include the URL
|| listener.seen );
}
@Entity(name="TestEntity")
static | DataSourceTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/proxy/ProxyConfiguration.java | {
"start": 1002,
"end": 1160
} | class ____. This allows using this interceptor
* and configuration with for example OSGi without any export of Byte Buddy when using Hibernate.
*/
public | loader |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/accessibility/referenced/AbstractSourceTargetMapperPrivate.java | {
"start": 363,
"end": 713
} | class ____ extends SourceTargetMapperPrivateBase {
public static final AbstractSourceTargetMapperPrivate INSTANCE =
Mappers.getMapper( AbstractSourceTargetMapperPrivate.class );
@Mapping(target = "referencedTarget", source = "referencedSource")
public abstract Target toTarget(Source source);
}
| AbstractSourceTargetMapperPrivate |
java | apache__camel | components/camel-spring-parent/camel-spring-main/src/test/java/org/apache/camel/spring/MyMainIoCRouteBuilder.java | {
"start": 954,
"end": 1231
} | class ____ extends RouteBuilder {
// use spring IoC annotations
@Autowired
private MyHelloBean bean;
@Override
public void configure() throws Exception {
from("direct:start").transform().constant(bean).to("mock:results");
}
}
| MyMainIoCRouteBuilder |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/internal/security/certprovider/FileWatcherCertificateProviderTest.java | {
"start": 20147,
"end": 20220
} | class ____<V> implements ScheduledFuture<V> {
static | TestScheduledFuture |
java | junit-team__junit5 | junit-platform-commons/src/main/java/org/junit/platform/commons/util/ClassUtils.java | {
"start": 968,
"end": 1183
} | class ____ {
private ClassUtils() {
/* no-op */
}
/**
* Get the fully qualified name of the supplied class.
*
* <p>This is a null-safe variant of {@link Class#getName()}.
*
* @param clazz the | ClassUtils |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/loader/ast/internal/CollectionBatchLoaderInPredicate.java | {
"start": 1250,
"end": 5294
} | class ____
extends AbstractCollectionBatchLoader
implements SqlArrayMultiKeyLoader {
private final int keyColumnCount;
private final int sqlBatchSize;
private final JdbcParametersList jdbcParameters;
private final SelectStatement sqlAst;
private final JdbcOperationQuerySelect jdbcSelect;
public CollectionBatchLoaderInPredicate(
int domainBatchSize,
LoadQueryInfluencers influencers,
PluralAttributeMapping attributeMapping,
SessionFactoryImplementor sessionFactory) {
super( domainBatchSize, influencers, attributeMapping, sessionFactory );
keyColumnCount = attributeMapping.getKeyDescriptor().getJdbcTypeCount();
sqlBatchSize =
sessionFactory.getJdbcServices().getDialect()
.getBatchLoadSizingStrategy()
.determineOptimalBatchLoadSize( keyColumnCount, domainBatchSize, false );
if ( MULTI_KEY_LOAD_LOGGER.isTraceEnabled() ) {
MULTI_KEY_LOAD_LOGGER.enabledCollectionInPredicate(
attributeMapping.getNavigableRole().getFullPath(),
sqlBatchSize,
domainBatchSize
);
}
final var jdbcParametersBuilder = JdbcParametersList.newBuilder();
this.sqlAst = LoaderSelectBuilder.createSelect(
attributeMapping,
null,
attributeMapping.getKeyDescriptor(),
null,
sqlBatchSize,
influencers,
new LockOptions(),
jdbcParametersBuilder::add,
sessionFactory
);
final var querySpec = sqlAst.getQueryPart().getFirstQuerySpec();
final var tableGroup = querySpec.getFromClause().getRoots().get( 0 );
attributeMapping.applySoftDeleteRestrictions( tableGroup, querySpec::applyPredicate );
jdbcParameters = jdbcParametersBuilder.build();
assert jdbcParameters.size() == sqlBatchSize * keyColumnCount;
jdbcSelect =
sessionFactory.getJdbcServices().getJdbcEnvironment().getSqlAstTranslatorFactory()
.buildSelectTranslator( sessionFactory, sqlAst )
.translate( NO_BINDINGS, QueryOptions.NONE );
}
@Override
void initializeKeys(Object key, Object[] keysToInitialize, SharedSessionContractImplementor session) {
final boolean loggerDebugEnabled = MULTI_KEY_LOAD_LOGGER.isDebugEnabled();
if ( loggerDebugEnabled ) {
MULTI_KEY_LOAD_LOGGER.collectionKeysToInitialize(
collectionInfoString( getLoadable(), key ),
keysToInitialize
);
}
final var chunker = new MultiKeyLoadChunker<>(
sqlBatchSize,
keyColumnCount,
getLoadable().getKeyDescriptor(),
jdbcParameters,
sqlAst,
jdbcSelect
);
final var batchFetchQueue = session.getPersistenceContextInternal().getBatchFetchQueue();
chunker.processChunks(
keysToInitialize,
countIds( keysToInitialize ),
(jdbcParameterBindings, session1) ->
// Create a RegistrationHandler for handling any
// subselect fetches we encounter handling this chunk
new ExecutionContextWithSubselectFetchHandler(
session,
SubselectFetch.createRegistrationHandler(
batchFetchQueue,
sqlAst,
jdbcParameters,
jdbcParameterBindings
)
),
(key1, relativePosition, absolutePosition) -> {
},
(startIndex) -> {
if ( loggerDebugEnabled ) {
MULTI_KEY_LOAD_LOGGER.processingCollectionBatchFetchChunk(
collectionInfoString( getLoadable(), key ),
startIndex,
startIndex + (sqlBatchSize-1)
);
}
},
(startIndex, nonNullElementCount) -> {
if ( loggerDebugEnabled ) {
MULTI_KEY_LOAD_LOGGER.finishingCollectionBatchFetchChunk(
collectionInfoString( getLoadable(), key ),
startIndex,
startIndex + (sqlBatchSize-1),
nonNullElementCount
);
}
for ( int i = 0; i < nonNullElementCount; i++ ) {
final int keyPosition = i + startIndex;
if ( keyPosition < keysToInitialize.length ) {
finishInitializingKey( keysToInitialize[keyPosition], session );
}
}
},
session
);
}
@Override
void finishInitializingKeys(Object[] key, SharedSessionContractImplementor session) {
// do nothing
}
}
| CollectionBatchLoaderInPredicate |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1569/java8/Issue1569Mapper.java | {
"start": 314,
"end": 452
} | interface ____ {
Issue1569Mapper INSTANCE = Mappers.getMapper( Issue1569Mapper.class );
Target map(Source source);
}
| Issue1569Mapper |
java | apache__dubbo | dubbo-demo/dubbo-demo-mcp-server/src/main/java/org/apache/dubbo/mcp/server/demo/demo/HelloServiceImpl.java | {
"start": 947,
"end": 2135
} | class ____ implements HelloService {
@Override
public String sayHello(String name) {
System.out.println("HelloServiceImpl.sayHello called with: " + name);
if (name == null || name.trim().isEmpty()) {
return "Hello, guest!";
}
return "Hello, " + name + "!";
}
@Override
public ComplexResponse greetComplex(ComplexRequest request) {
System.out.println("HelloServiceImpl.greetComplex called with: " + request);
if (request == null) {
return new ComplexResponse("Error: Request was null", false, 400);
}
String message = "Received: " + request.getGreeting() + ". Count: "
+ request.getCount() + ". Active: "
+ request.isActive() + ". Detail: "
+ (request.getNestedDetail() != null ? request.getNestedDetail().getDetailInfo() : "N/A") + ". Tags: "
+ (request.getTags() != null ? String.join(", ", request.getTags()) : "None") + ". Attributes: "
+ (request.getAttributes() != null ? request.getAttributes().toString() : "None");
return new ComplexResponse(message, true, 200);
}
}
| HelloServiceImpl |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/sql/dialect/starrocks/parser/StarRocksStatementParserTest.java | {
"start": 217,
"end": 3529
} | class ____ extends TestCase {
public void testParseCreate() {
for (int i = 0; i < StarRocksCreateTableParserTest.caseList.length; i++) {
final String sql = StarRocksCreateTableParserTest.caseList[i];
final StarRocksStatementParser starRocksStatementParser = new StarRocksStatementParser(sql);
final SQLStatement parsed = starRocksStatementParser.parseCreate();
final String result = parsed.toString();
assertEquals("第 " + (i + 1) + "个用例验证失败", sql, result);
}
}
public void testParseBySQLUtil() {
for (int i = 0; i < StarRocksCreateTableParserTest.caseList.length; i++) {
final String sql = StarRocksCreateTableParserTest.caseList[i];
final SQLStatement parsed = SQLUtils.parseSingleStatement(sql, DbType.starrocks);
final String result = parsed.toString();
assertEquals("第 " + (i + 1) + "个用例验证失败", sql, result);
}
}
public void testParseCreateResource() {
String[] ddlList = new String[] {
"CREATE EXTERNAL RESOURCE \"spark0\"\n" +
"PROPERTIES (\n" +
"\t'spark.master' = 'yarn',\n" +
"\t'spark.executor.memory' = '1g',\n" +
"\t'working_dir' = 'hdfs://127.0.0.1:10000/tmp/doris',\n" +
"\t'spark.submit.deployMode' = 'cluster',\n" +
"\t'broker' = 'broker0',\n" +
"\t'type' = 'spark',\n" +
"\t'spark.yarn.queue' = 'queue0',\n" +
"\t'spark.hadoop.yarn.resourcemanager.address' = '127.0.0.1:9999',\n" +
"\t'broker.password' = 'password0',\n" +
"\t'broker.username' = 'user0',\n" +
"\t'spark.hadoop.fs.defaultFS' = 'hdfs://127.0.0.1:10000',\n" +
"\t'spark.jars' = 'xxx.jar,yyy.jar',\n" +
"\t'spark.files' = '/tmp/aaa,/tmp/bbb'\n" +
");",
"CREATE RESOURCE \"spark0\"\n" +
"PROPERTIES (\n" +
"\t'spark.master' = 'yarn',\n" +
"\t'spark.executor.memory' = '1g',\n" +
"\t'working_dir' = 'hdfs://127.0.0.1:10000/tmp/doris',\n" +
"\t'spark.submit.deployMode' = 'cluster',\n" +
"\t'broker' = 'broker0',\n" +
"\t'type' = 'spark',\n" +
"\t'spark.yarn.queue' = 'queue0',\n" +
"\t'spark.hadoop.yarn.resourcemanager.address' = '127.0.0.1:9999',\n" +
"\t'broker.password' = 'password0',\n" +
"\t'broker.username' = 'user0',\n" +
"\t'spark.hadoop.fs.defaultFS' = 'hdfs://127.0.0.1:10000',\n" +
"\t'spark.jars' = 'xxx.jar,yyy.jar',\n" +
"\t'spark.files' = '/tmp/aaa,/tmp/bbb'\n" +
");",
};
for (String ddl : ddlList) {
SQLStatement stmt = SQLUtils.parseSingleStatement(ddl, DbType.starrocks);
assertEquals(ddl, stmt.toString());
}
}
}
| StarRocksStatementParserTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQLDialect.java | {
"start": 53281,
"end": 54751
} | class ____ implements ParameterMarkerStrategy {
/**
* Singleton access
*/
public static final NativeParameterMarkers INSTANCE = new NativeParameterMarkers();
@Override
public String createMarker(int position, JdbcType jdbcType) {
return "$" + position;
}
}
@Override
public int getDefaultIntervalSecondScale() {
// The maximum scale for `interval second` is 6 unfortunately
return 6;
}
@Override
public DmlTargetColumnQualifierSupport getDmlTargetColumnQualifierSupport() {
return DmlTargetColumnQualifierSupport.TABLE_ALIAS;
}
@Override
public boolean supportsFromClauseInUpdate() {
return true;
}
@Override
public boolean supportsBindingNullSqlTypeForSetNull() {
return true;
}
@Override
public boolean supportsFilterClause() {
return true;
}
@Override
public boolean supportsRowConstructor() {
return true;
}
@Override
public boolean supportsArrayConstructor() {
return true;
}
@Override
public boolean supportsRecursiveCycleClause() {
return getVersion().isSameOrAfter( 14 );
}
@Override
public boolean supportsRecursiveCycleUsingClause() {
return getVersion().isSameOrAfter( 14 );
}
@Override
public boolean supportsRecursiveSearchClause() {
return getVersion().isSameOrAfter( 14 );
}
@Override
public InformationExtractor getInformationExtractor(ExtractionContext extractionContext) {
return new InformationExtractorPostgreSQLImpl( extractionContext );
}
}
| NativeParameterMarkers |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/hierarchies/standard/SingleTestClassWithTwoLevelContextHierarchyAndMixedConfigTypesTests.java | {
"start": 1824,
"end": 2500
} | class ____ {
@Bean
String foo() {
return "foo";
}
@Bean
String baz() {
return "baz-parent";
}
}
@Autowired
private String foo;
@Autowired
private String bar;
@Autowired
private String baz;
@Autowired
private ApplicationContext context;
@Test
void loadContextHierarchy() {
assertThat(context).as("child ApplicationContext").isNotNull();
assertThat(context.getParent()).as("parent ApplicationContext").isNotNull();
assertThat(context.getParent().getParent()).as("grandparent ApplicationContext").isNull();
assertThat(foo).isEqualTo("foo");
assertThat(bar).isEqualTo("bar");
assertThat(baz).isEqualTo("baz-child");
}
}
| ParentConfig |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java | {
"start": 989,
"end": 3937
} | class ____ extends ESTestCase {
public static AtomicArray<SearchPhaseResult> generateQueryResults() {
AtomicArray<SearchPhaseResult> array = new AtomicArray<>(3);
DiscoveryNode node1 = DiscoveryNodeUtils.create("node_1");
DiscoveryNode node2 = DiscoveryNodeUtils.create("node_2");
DiscoveryNode node3 = DiscoveryNodeUtils.create("node_3");
SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult(
new ShardSearchContextId("a", 1),
node1
);
testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), "cluster_x"));
SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult(
new ShardSearchContextId("b", 12),
node2
);
testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), "cluster_y"));
SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult(
new ShardSearchContextId("c", 42),
node3
);
testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null));
array.setOnce(0, testSearchPhaseResult1);
array.setOnce(1, testSearchPhaseResult2);
array.setOnce(2, testSearchPhaseResult3);
return array;
}
public void testParseScrollId() {
final AtomicArray<SearchPhaseResult> queryResults = generateQueryResults();
String scrollId = TransportSearchHelper.buildScrollId(queryResults);
ParsedScrollId parseScrollId = TransportSearchHelper.parseScrollId(scrollId);
assertEquals(3, parseScrollId.getContext().length);
assertEquals("node_1", parseScrollId.getContext()[0].getNode());
assertEquals("cluster_x", parseScrollId.getContext()[0].getClusterAlias());
assertEquals(1, parseScrollId.getContext()[0].getSearchContextId().getId());
assertThat(parseScrollId.getContext()[0].getSearchContextId().getSessionId(), equalTo("a"));
assertEquals("node_2", parseScrollId.getContext()[1].getNode());
assertEquals("cluster_y", parseScrollId.getContext()[1].getClusterAlias());
assertEquals(12, parseScrollId.getContext()[1].getSearchContextId().getId());
assertThat(parseScrollId.getContext()[1].getSearchContextId().getSessionId(), equalTo("b"));
assertEquals("node_3", parseScrollId.getContext()[2].getNode());
assertNull(parseScrollId.getContext()[2].getClusterAlias());
assertEquals(42, parseScrollId.getContext()[2].getSearchContextId().getId());
assertThat(parseScrollId.getContext()[2].getSearchContextId().getSessionId(), equalTo("c"));
}
}
| TransportSearchHelperTests |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/MonoContextWriteTest.java | {
"start": 811,
"end": 1074
} | class ____ {
@Test
public void scanOperator(){
MonoContextWrite<Integer> test = new MonoContextWrite<>(Mono.just(1), c -> c);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
} | MonoContextWriteTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestLocalModeWithNewApis.java | {
"start": 4886,
"end": 5318
} | class ____
extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
}
| IntSumReducer |
java | elastic__elasticsearch | x-pack/plugin/redact/src/main/java/org/elasticsearch/xpack/redact/RedactProcessor.java | {
"start": 9497,
"end": 9582
} | class ____ implements GrokCaptureExtracter {
static | RegionTrackingMatchExtractor |
java | google__guice | core/test/com/google/inject/DuplicateBindingsTest.java | {
"start": 20051,
"end": 20185
} | class ____ implements Provider<Foo> {
@Override
public Foo get() {
return new Bar();
}
}
private static | BarProvider |
java | apache__camel | components/camel-kafka/src/main/java/org/apache/camel/processor/idempotent/kafka/KafkaConsumerUtil.java | {
"start": 1077,
"end": 2431
} | class ____ {
/**
* Tests whether the Kafka consumer reached the target offsets for all specified topic partitions.
*
* @param consumer Kafka consumer. It is expected to have some assignment to topic partitions.
* @param targetOffsets Target offsets for topic partitions.
* @param <K> Key type.
* @param <V> Value type.
* @return {@code true} if the consumer has reached the target offsets for all specified topic
* partitions.
*/
public static <K, V> boolean isReachedOffsets(Consumer<K, V> consumer, Map<TopicPartition, Long> targetOffsets) {
if (ObjectHelper.isEmpty(targetOffsets)) {
throw new IllegalArgumentException("Target offsets must be non-empty");
}
Set<TopicPartition> partitions = consumer.assignment();
/* If some partition is missing in the targetOffsets map, then we do not check the offset for this partition. */
Map<TopicPartition, Long> extendedTargetOffsets = new HashMap<>(targetOffsets);
partitions.forEach(partition -> extendedTargetOffsets.putIfAbsent(partition, Long.MIN_VALUE));
return partitions.stream()
.allMatch(partition -> consumer.position(partition) >= extendedTargetOffsets.get(partition));
}
}
| KafkaConsumerUtil |
java | apache__spark | sql/api/src/main/java/org/apache/spark/sql/RowFactory.java | {
"start": 955,
"end": 1038
} | class ____ to construct {@link Row} objects.
*
* @since 1.3.0
*/
@Stable
public | used |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/fielddata/SingletonSortedBinaryDocValues.java | {
"start": 637,
"end": 1209
} | class ____ extends SortedBinaryDocValues {
private final BinaryDocValues in;
SingletonSortedBinaryDocValues(BinaryDocValues in) {
this.in = in;
}
@Override
public boolean advanceExact(int doc) throws IOException {
return in.advanceExact(doc);
}
@Override
public int docValueCount() {
return 1;
}
@Override
public BytesRef nextValue() throws IOException {
return in.binaryValue();
}
public BinaryDocValues getBinaryDocValues() {
return in;
}
}
| SingletonSortedBinaryDocValues |
java | apache__maven | its/core-it-support/core-it-plugins/maven-it-plugin-core-stubs/maven-ear-plugin/src/main/java/org/apache/maven/plugin/coreit/EarMojo.java | {
"start": 1430,
"end": 2924
} | class ____ extends AbstractMojo {
/**
* The current Maven project.
*/
@Parameter(defaultValue = "${project}", required = true, readonly = true)
private MavenProject project;
/**
* The path to the output file, relative to the project base directory.
*
*/
@Parameter
private String pathname = "target/ear-ear.txt";
/**
* Runs this mojo.
*
* @throws MojoExecutionException If the output file could not be created.
* @throws MojoFailureException If the output file has not been set.
*/
public void execute() throws MojoExecutionException, MojoFailureException {
getLog().info("[MAVEN-CORE-IT-LOG] Using output file path: " + pathname);
if (pathname == null || pathname.length() <= 0) {
throw new MojoFailureException("Path name for output file has not been specified");
}
File outputFile = new File(pathname);
if (!outputFile.isAbsolute()) {
outputFile = new File(project.getBasedir(), pathname).getAbsoluteFile();
}
getLog().info("[MAVEN-CORE-IT-LOG] Creating output file: " + outputFile);
try {
outputFile.getParentFile().mkdirs();
outputFile.createNewFile();
} catch (IOException e) {
throw new MojoExecutionException("Output file could not be created: " + pathname, e);
}
getLog().info("[MAVEN-CORE-IT-LOG] Created output file: " + outputFile);
}
}
| EarMojo |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/SshComponentBuilderFactory.java | {
"start": 1786,
"end": 18101
} | interface ____ extends ComponentBuilder<SshComponent> {
/**
* Specifies whether a connection to an unknown host should fail or not.
* This value is only checked when the property knownHosts is set.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param failOnUnknownHost the value to set
* @return the dsl builder
*/
default SshComponentBuilder failOnUnknownHost(boolean failOnUnknownHost) {
doSetProperty("failOnUnknownHost", failOnUnknownHost);
return this;
}
/**
* Sets the resource path for a known_hosts file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param knownHostsResource the value to set
* @return the dsl builder
*/
default SshComponentBuilder knownHostsResource(java.lang.String knownHostsResource) {
doSetProperty("knownHostsResource", knownHostsResource);
return this;
}
/**
* Sets the timeout in milliseconds to wait in establishing the remote
* SSH server connection. Defaults to 30000 milliseconds.
*
* The option is a: <code>long</code> type.
*
* Default: 30000
* Group: common
*
* @param timeout the value to set
* @return the dsl builder
*/
default SshComponentBuilder timeout(long timeout) {
doSetProperty("timeout", timeout);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default SshComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Sets the command string to send to the remote SSH server during every
* poll cycle. Only works with camel-ssh component being used as a
* consumer, i.e. from(ssh://...) You may need to end your command with
* a newline, and that must be URL encoded %0A.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param pollCommand the value to set
* @return the dsl builder
*/
default SshComponentBuilder pollCommand(java.lang.String pollCommand) {
doSetProperty("pollCommand", pollCommand);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default SshComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default SshComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* Sets the channel type to pass to the Channel as part of command
* execution. Defaults to exec.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: exec
* Group: advanced
*
* @param channelType the value to set
* @return the dsl builder
*/
default SshComponentBuilder channelType(java.lang.String channelType) {
doSetProperty("channelType", channelType);
return this;
}
/**
* Instance of ClientBuilder used by the producer or consumer to create
* a new SshClient.
*
* The option is a:
* <code>org.apache.sshd.client.ClientBuilder</code> type.
*
* Group: advanced
*
* @param clientBuilder the value to set
* @return the dsl builder
*/
default SshComponentBuilder clientBuilder(org.apache.sshd.client.ClientBuilder clientBuilder) {
doSetProperty("clientBuilder", clientBuilder);
return this;
}
/**
* Whether to use compression, and if so which.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: advanced
*
* @param compressions the value to set
* @return the dsl builder
*/
default SshComponentBuilder compressions(java.lang.String compressions) {
doSetProperty("compressions", compressions);
return this;
}
/**
* Component configuration.
*
* The option is a:
* <code>org.apache.camel.component.ssh.SshConfiguration</code> type.
*
* Group: advanced
*
* @param configuration the value to set
* @return the dsl builder
*/
default SshComponentBuilder configuration(org.apache.camel.component.ssh.SshConfiguration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* Sets the shellPrompt to be dropped when response is read after
* command execution.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: advanced
*
* @param shellPrompt the value to set
* @return the dsl builder
*/
default SshComponentBuilder shellPrompt(java.lang.String shellPrompt) {
doSetProperty("shellPrompt", shellPrompt);
return this;
}
/**
* Sets the sleep period in milliseconds to wait reading response from
* shell prompt. Defaults to 100 milliseconds.
*
* The option is a: <code>long</code> type.
*
* Default: 100
* Group: advanced
*
* @param sleepForShellPrompt the value to set
* @return the dsl builder
*/
default SshComponentBuilder sleepForShellPrompt(long sleepForShellPrompt) {
doSetProperty("sleepForShellPrompt", sleepForShellPrompt);
return this;
}
/**
* Used for enabling or disabling all consumer based health checks from
* this component.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckConsumerEnabled the value to set
* @return the dsl builder
*/
default SshComponentBuilder healthCheckConsumerEnabled(boolean healthCheckConsumerEnabled) {
doSetProperty("healthCheckConsumerEnabled", healthCheckConsumerEnabled);
return this;
}
/**
* Used for enabling or disabling all producer based health checks from
* this component. Notice: Camel has by default disabled all producer
* based health-checks. You can turn on producer checks globally by
* setting camel.health.producersEnabled=true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckProducerEnabled the value to set
* @return the dsl builder
*/
default SshComponentBuilder healthCheckProducerEnabled(boolean healthCheckProducerEnabled) {
doSetProperty("healthCheckProducerEnabled", healthCheckProducerEnabled);
return this;
}
/**
* Sets the resource path of the certificate to use for Authentication.
* Will use ResourceHelperKeyPairProvider to resolve file based
* certificate, and depends on keyType setting.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param certResource the value to set
* @return the dsl builder
*/
default SshComponentBuilder certResource(java.lang.String certResource) {
doSetProperty("certResource", certResource);
return this;
}
/**
* Sets the password to use in loading certResource, if certResource is
* an encrypted key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param certResourcePassword the value to set
* @return the dsl builder
*/
default SshComponentBuilder certResourcePassword(java.lang.String certResourcePassword) {
doSetProperty("certResourcePassword", certResourcePassword);
return this;
}
/**
* Comma-separated list of allowed/supported ciphers in their order of
* preference.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param ciphers the value to set
* @return the dsl builder
*/
default SshComponentBuilder ciphers(java.lang.String ciphers) {
doSetProperty("ciphers", ciphers);
return this;
}
/**
* Comma-separated list of allowed/supported key exchange algorithms in
* their order of preference.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param kex the value to set
* @return the dsl builder
*/
default SshComponentBuilder kex(java.lang.String kex) {
doSetProperty("kex", kex);
return this;
}
/**
* Sets the KeyPairProvider reference to use when connecting using
* Certificates to the remote SSH Server.
*
* The option is a:
* <code>org.apache.sshd.common.keyprovider.KeyPairProvider</code> type.
*
* Group: security
*
* @param keyPairProvider the value to set
* @return the dsl builder
*/
default SshComponentBuilder keyPairProvider(org.apache.sshd.common.keyprovider.KeyPairProvider keyPairProvider) {
doSetProperty("keyPairProvider", keyPairProvider);
return this;
}
/**
* Sets the key type to pass to the KeyPairProvider as part of
* authentication. KeyPairProvider.loadKey(...) will be passed this
* value. From Camel 3.0.0 / 2.25.0, by default Camel will select the
* first available KeyPair that is loaded. Prior to this, a KeyType of
* 'ssh-rsa' was enforced by default.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param keyType the value to set
* @return the dsl builder
*/
default SshComponentBuilder keyType(java.lang.String keyType) {
doSetProperty("keyType", keyType);
return this;
}
/**
* Comma-separated list of allowed/supported message authentication code
* algorithms in their order of preference. The MAC algorithm is used
* for data integrity protection.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param macs the value to set
* @return the dsl builder
*/
default SshComponentBuilder macs(java.lang.String macs) {
doSetProperty("macs", macs);
return this;
}
/**
* Sets the password to use in connecting to remote SSH server. Requires
* keyPairProvider to be set to null.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default SshComponentBuilder password(java.lang.String password) {
doSetProperty("password", password);
return this;
}
/**
* Comma-separated list of allowed/supported signature algorithms in
* their order of preference.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param signatures the value to set
* @return the dsl builder
*/
default SshComponentBuilder signatures(java.lang.String signatures) {
doSetProperty("signatures", signatures);
return this;
}
/**
* Sets the username to use in logging into the remote SSH server.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param username the value to set
* @return the dsl builder
*/
default SshComponentBuilder username(java.lang.String username) {
doSetProperty("username", username);
return this;
}
}
| SshComponentBuilder |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/handler/accesslog/HttpAccessLogHandler.java | {
"start": 1945,
"end": 7904
} | class ____ extends ChannelDuplexHandler {
/**
* The default logger name.
*/
public static final String HTTP_ACCESS_LOGGER = "HTTP_ACCESS_LOGGER";
static final String H2_PROTOCOL_NAME = "HTTP/2.0";
private static final AttributeKey<AccessLogHolder> ACCESS_LOGGER = AttributeKey.valueOf("ACCESS_LOGGER");
private final Logger logger;
private final AccessLogFormatParser accessLogFormatParser;
private final Predicate<String> uriInclusion;
/**
* Creates a HttpAccessLogHandler.
*
* @param loggerName A logger name.
* @param spec The log format specification.
*/
public HttpAccessLogHandler(String loggerName, String spec) {
this(loggerName == null || loggerName.isEmpty() ? null : LoggerFactory.getLogger(loggerName), spec, null);
}
/**
* Creates a HttpAccessLogHandler.
*
* @param loggerName A logger name.
* @param spec The log format specification.
* @param uriInclusion A filtering Predicate that will be checked per URI.
*/
public HttpAccessLogHandler(String loggerName, String spec, Predicate<String> uriInclusion) {
this(loggerName == null || loggerName.isEmpty() ? null : LoggerFactory.getLogger(loggerName), spec, uriInclusion);
}
/**
* Creates a HttpAccessLogHandler.
*
* @param logger A logger. Will log at info level.
* @param spec The log format specification.
*/
public HttpAccessLogHandler(Logger logger, String spec) {
this(logger, spec, null);
}
/**
* Creates a HttpAccessLogHandler.
*
* @param logger A logger. Will log at info level.
* @param spec The log format specification.
* @param uriInclusion A filtering Predicate that will be checked per URI.
*/
public HttpAccessLogHandler(Logger logger, String spec, Predicate<String> uriInclusion) {
super();
this.logger = logger == null ? LoggerFactory.getLogger(HTTP_ACCESS_LOGGER) : logger;
this.accessLogFormatParser = new AccessLogFormatParser(spec);
this.uriInclusion = uriInclusion;
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Http2Exception {
if (logger.isInfoEnabled() && msg instanceof HttpRequest request) {
AccessLogHolder accessLogHolder = getAccessLogHolder(ctx, true);
assert accessLogHolder != null; // can only return null when createIfMissing is false
if (uriInclusion == null || uriInclusion.test(request.uri())) {
final HttpHeaders headers = request.headers();
// Trying to detect http/2
String protocol;
if (headers.contains(ExtensionHeaderNames.STREAM_ID.text()) || headers.contains(ExtensionHeaderNames.SCHEME.text())) {
protocol = H2_PROTOCOL_NAME;
} else {
protocol = request.protocolVersion().text();
}
accessLogHolder.createLogForRequest().onRequestHeaders(ConnectionMetadata.ofNettyChannel(ctx.channel()), request.method().name(), request.headers(), request.uri(), protocol);
} else {
accessLogHolder.excludeRequest();
}
}
ctx.fireChannelRead(msg);
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (logger.isInfoEnabled()) {
processWriteEvent(ctx, msg, promise);
} else {
super.write(ctx, msg, promise);
}
}
private void log(ChannelHandlerContext ctx, Object msg, ChannelPromise promise, AccessLog accessLog, AccessLogHolder accessLogHolder) {
ctx.write(msg, promise.unvoid()).addListener(future -> {
if (future.isSuccess()) {
accessLog.log(logger);
accessLogHolder.logForReuse = accessLog;
}
});
}
private void processWriteEvent(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
AccessLogHolder accessLogHolder = getAccessLogHolder(ctx, false);
if (accessLogHolder != null) {
boolean isContinueResponse = msg instanceof HttpResponse hr && hr.status().equals(HttpResponseStatus.CONTINUE);
AccessLog accessLogger = accessLogHolder.getLogForResponse(
msg instanceof LastHttpContent && !isContinueResponse);
if (accessLogger != null && !isContinueResponse) {
if (msg instanceof HttpResponse response) {
accessLogger.onResponseHeaders(ctx, response.headers(), response.status().codeAsText().toString());
}
if (msg instanceof LastHttpContent content) {
accessLogger.onLastResponseWrite(content.content().readableBytes());
log(ctx, msg, promise, accessLogger, accessLogHolder);
return;
} else if (msg instanceof ByteBufHolder holder) {
accessLogger.onResponseWrite(holder.content().readableBytes());
} else if (msg instanceof ByteBuf buf) {
accessLogger.onResponseWrite(buf.readableBytes());
}
}
}
super.write(ctx, msg, promise);
}
@Nullable
private AccessLogHolder getAccessLogHolder(ChannelHandlerContext ctx, boolean createIfMissing) {
final Attribute<AccessLogHolder> attr = ctx.channel().attr(ACCESS_LOGGER);
AccessLogHolder holder = attr.get();
if (holder == null) {
if (!createIfMissing) {
return null;
}
holder = new AccessLogHolder();
attr.set(holder);
}
return holder;
}
/**
* Holder for {@link AccessLog} instances. {@link AccessLog} can only handle one concurrent request at a time, this
* | HttpAccessLogHandler |
java | apache__kafka | jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/TestLinearWriteSpeed.java | {
"start": 8829,
"end": 8950
} | interface ____ {
int write() throws IOException;
void close() throws IOException;
}
static | Writable |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/mappedsuperclass/typedmappedsuperclass/TypesMappedSuperclassTest.java | {
"start": 559,
"end": 882
} | class ____ {
@Test
@WithClasses({
AttachmentGroup.class,
AttachmentGroupInTopic.class,
AttachmentGroupPost.class,
AttachmentGroupPostInTopic.class,
Post.class,
UserRole.class
})
void testExtractClosestRealType() {
assertMetamodelClassGeneratedFor( AttachmentGroup.class );
}
}
| TypesMappedSuperclassTest |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/CachedMethodExecutorTests.java | {
"start": 1186,
"end": 2359
} | class ____ {
private final ExpressionParser parser = new SpelExpressionParser();
private final StandardEvaluationContext context = new StandardEvaluationContext(new RootObject());
@Test
void testCachedExecutionForParameters() {
Expression expression = this.parser.parseExpression("echo(#var)");
assertMethodExecution(expression, 42, "int: 42");
assertMethodExecution(expression, 42, "int: 42");
assertMethodExecution(expression, "Deep Thought", "String: Deep Thought");
assertMethodExecution(expression, 42, "int: 42");
}
@Test
void testCachedExecutionForTarget() {
Expression expression = this.parser.parseExpression("#var.echo(42)");
assertMethodExecution(expression, new RootObject(), "int: 42");
assertMethodExecution(expression, new RootObject(), "int: 42");
assertMethodExecution(expression, new BaseObject(), "String: 42");
assertMethodExecution(expression, new RootObject(), "int: 42");
}
private void assertMethodExecution(Expression expression, Object var, String expected) {
this.context.setVariable("var", var);
assertThat(expression.getValue(this.context)).isEqualTo(expected);
}
public static | CachedMethodExecutorTests |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/stubbing/Stubbing.java | {
"start": 617,
"end": 805
} | interface ____ not extensible (see {@link NotExtensible}).
* Extending Answer was needed to improve Mockito domain model and simplify the code.
*
* @since 2.2.3
*/
@NotExtensible
public | is |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/java8stream/base/SourceElement.java | {
"start": 238,
"end": 432
} | class ____ {
private String source;
public String getSource() {
return source;
}
public void setSource(String source) {
this.source = source;
}
}
| SourceElement |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorImpl.java | {
"start": 1021,
"end": 1894
} | class ____ implements SASKeyGeneratorInterface {
/**
* Configuration key to be used to specify the expiry period for SAS keys
* This value currently is specified in days. {@value}
*/
public static final String KEY_SAS_KEY_EXPIRY_PERIOD =
"fs.azure.sas.expiry.period";
/**
* Default value for the SAS key expiry period in days. {@value}
*/
public static final long DEFAULT_CONTAINER_SAS_KEY_PERIOD = 90;
private long sasKeyExpiryPeriod;
private Configuration conf;
public SASKeyGeneratorImpl(Configuration conf) {
this.conf = conf;
this.sasKeyExpiryPeriod = conf.getTimeDuration(
KEY_SAS_KEY_EXPIRY_PERIOD, DEFAULT_CONTAINER_SAS_KEY_PERIOD,
TimeUnit.DAYS);
}
public long getSasKeyExpiryPeriod() {
return sasKeyExpiryPeriod;
}
public Configuration getConf() {
return conf;
}
} | SASKeyGeneratorImpl |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/datastream/UnionSerializerUpgradeTest.java | {
"start": 3813,
"end": 4890
} | class ____
implements TypeSerializerUpgradeTestBase.UpgradeVerifier<TaggedUnion<String, Long>> {
@Override
public TypeSerializer<TaggedUnion<String, Long>> createUpgradedSerializer() {
return new UnionSerializer<>(StringSerializer.INSTANCE, LongSerializer.INSTANCE);
}
@Override
public Condition<TaggedUnion<String, Long>> testDataCondition() {
return new Condition<>(value -> TaggedUnion.one("flink").equals(value), "");
}
@Override
public Condition<TypeSerializerSchemaCompatibility<TaggedUnion<String, Long>>>
schemaCompatibilityCondition(FlinkVersion version) {
return TypeSerializerConditions.isCompatibleAsIs();
}
}
// ----------------------------------------------------------------------------------------------
// Specification for "union-serializer-for-TaggedUnion.two"
// ----------------------------------------------------------------------------------------------
/**
* This | UnionSerializerOneVerifier |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/MapStateNullValueCheckpointingITCase.java | {
"start": 9485,
"end": 11377
} | class ____ extends RichMapFunction<Long, Long> {
static CompletableFuture<Void> firstRunFuture;
static CompletableFuture<Map<String, String>> secondRunFuture;
private final boolean isFirstRun;
private boolean hasPopulated;
private transient MapState<String, String> mapState;
StatefulMapper(boolean isFirstRun) {
this.isFirstRun = isFirstRun;
}
@Override
public void open(OpenContext context) {
MapStateDescriptor<String, String> mapStateDescriptor =
new MapStateDescriptor<>(
"map-state",
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO);
mapState = getRuntimeContext().getMapState(mapStateDescriptor);
ValueStateDescriptor<Boolean> hasPopulatedStateDescriptor =
new ValueStateDescriptor<>("has-populated", BasicTypeInfo.BOOLEAN_TYPE_INFO);
hasPopulated = false;
}
@Override
public Long map(Long value) throws Exception {
if (hasPopulated) {
return value;
}
if (isFirstRun) {
mapState.put("key", "value");
mapState.put("null-key", null);
firstRunFuture.complete(null);
} else {
// This is the first record for this key after restore.
// Verify that the state is correctly restored.
Map<String, String> restoredState = new HashMap<>();
restoredState.put("key", mapState.get("key"));
restoredState.put("null-key", mapState.get("null-key"));
secondRunFuture.complete(restoredState);
}
hasPopulated = true;
return value;
}
}
}
| StatefulMapper |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java | {
"start": 1168,
"end": 3293
} | class ____ {
{
DFSTestUtil.setNameNodeLogLevel(Level.TRACE);
}
@Test
public void testFileCreationDeleteParent() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
// create file1.
Path dir = new Path("/foo");
Path file1 = new Path(dir, "file1");
FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file1);
TestFileCreation.writeFile(stm1, 1000);
stm1.hflush();
// create file2.
Path file2 = new Path("/file2");
FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file2);
TestFileCreation.writeFile(stm2, 1000);
stm2.hflush();
// rm dir
fs.delete(dir, true);
// restart cluster.
// This ensures that leases are persisted in fsimage.
cluster.shutdown();
try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).format(false).build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).format(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
} finally {
fs.close();
cluster.shutdown();
}
}
}
| TestFileCreationDelete |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java | {
"start": 1574,
"end": 2173
} | class ____ {
@Mock
private Service service;
@Mock
private RuntimeException e;
@Test
public void testStopQuietlyWhenServiceStopThrowsException() throws Exception {
Logger logger = LoggerFactory.getLogger(TestServiceOperations.class);
LogCapturer logCapturer = captureLogs(logger);
doThrow(e).when(service).stop();
ServiceOperations.stopQuietly(logger, service);
assertThat(logCapturer.getOutput())
.contains("When stopping the service " + service.getName());
verify(e, times(1)).printStackTrace(Mockito.any(PrintWriter.class));
}
} | TestServiceOperations |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/javadoc/InvalidBlockTagTest.java | {
"start": 4716,
"end": 4950
} | interface ____ {
/**
* @inheritDoc
*/
void frobnicate(String foo);
}
""")
.addOutputLines(
"Test.java",
"""
| Test |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/MappingTest_createTable.java | {
"start": 256,
"end": 2794
} | class ____ extends TestCase {
String sql = "create table user (\n" +
"source_key int,\n" +
"source_value varchar(32),\n" +
"primary key(source_key)\n" +
");";
Map<String, String> mapping = Collections.singletonMap("user", "user_01");
public void test_mapping_createTable() throws Exception {
String result = SQLUtils.refactor(sql, null, mapping);
assertEquals("CREATE TABLE user_01 (\n" +
"\tsource_key int,\n" +
"\tsource_value varchar(32),\n" +
"\tPRIMARY KEY (source_key)\n" +
");", result);
}
public void test_mapping_createTable_mysql() throws Exception {
String result = SQLUtils.refactor(sql, JdbcConstants.MYSQL, mapping);
assertEquals("CREATE TABLE user_01 (\n" +
"\tsource_key int,\n" +
"\tsource_value varchar(32),\n" +
"\tPRIMARY KEY (source_key)\n" +
");", result);
}
public void test_mapping_createTable_oracle() throws Exception {
String result = SQLUtils.refactor(sql, JdbcConstants.ORACLE, mapping);
assertEquals("CREATE TABLE user_01 (\n" +
"\tsource_key int,\n" +
"\tsource_value varchar(32),\n" +
"\tPRIMARY KEY (source_key)\n" +
");", result);
}
public void test_mapping_createTable_pg() throws Exception {
String result = SQLUtils.refactor(sql, JdbcConstants.POSTGRESQL, mapping);
assertEquals("CREATE TABLE user_01 (\n" +
"\tsource_key int,\n" +
"\tsource_value varchar(32),\n" +
"\tPRIMARY KEY (source_key)\n" +
");", result);
}
public void test_mapping_createTable_sqlserver() throws Exception {
String result = SQLUtils.refactor(sql, JdbcConstants.SQL_SERVER, mapping);
assertEquals("CREATE TABLE user_01 (\n" +
"\tsource_key int,\n" +
"\tsource_value varchar(32),\n" +
"\tPRIMARY KEY (source_key)\n" +
");", result);
}
public void test_mapping_createTable_db2() throws Exception {
String result = SQLUtils.refactor(sql, JdbcConstants.DB2, mapping);
assertEquals("CREATE TABLE user_01 (\n" +
"\tsource_key int,\n" +
"\tsource_value varchar(32),\n" +
"\tPRIMARY KEY (source_key)\n" +
");", result);
}
}
| MappingTest_createTable |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/createTable/OracleCreateTableTest83.java | {
"start": 943,
"end": 4780
} | class ____ extends OracleTest {
public void test_types() throws Exception {
String sql = //
"CREATE TABLE NIRVANA.CS_MATURE_ADVICE (\n" +
"\tID DECIMAL(38) NOT NULL,\n" +
"\tGMT_CREATED TIMESTAMP(0) NOT NULL,\n" +
"\tGMT_MODIFIED TIMESTAMP(0),\n" +
"\tCREATOR VARCHAR(32) NOT NULL,\n" +
"\tMODIFIER VARCHAR(32),\n" +
"\tTYPE VARCHAR(32) NOT NULL,\n" +
"\tEXPLAIN VARCHAR(256),\n" +
"\tMATURITY_REF DECIMAL(38) NOT NULL,\n" +
"\tSERVICE VARCHAR(512) NOT NULL,\n" +
"\tPRIORITY BIGINT,\n" +
"\tREPOSITORY_URL VARCHAR(512),\n" +
"\tSERVICE_URL VARCHAR(512),\n" +
"\tPRIMARY KEY (ID)\n" +
");";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
SQLCreateTableStatement stmt = (SQLCreateTableStatement) statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
//
assertEquals("CREATE TABLE NIRVANA.CS_MATURE_ADVICE (\n" +
"\tID DECIMAL(38) NOT NULL,\n" +
"\tGMT_CREATED TIMESTAMP(0) NOT NULL,\n" +
"\tGMT_MODIFIED TIMESTAMP(0),\n" +
"\tCREATOR VARCHAR(32) NOT NULL,\n" +
"\tMODIFIER VARCHAR(32),\n" +
"\tTYPE VARCHAR(32) NOT NULL,\n" +
"\tEXPLAIN VARCHAR(256),\n" +
"\tMATURITY_REF DECIMAL(38) NOT NULL,\n" +
"\tSERVICE VARCHAR(512) NOT NULL,\n" +
"\tPRIORITY BIGINT,\n" +
"\tREPOSITORY_URL VARCHAR(512),\n" +
"\tSERVICE_URL VARCHAR(512),\n" +
"\tPRIMARY KEY (ID)\n" +
");",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
stmt.renameColumn("ID", "FID");
assertEquals("CREATE TABLE NIRVANA.CS_MATURE_ADVICE (\n" +
"\tFID DECIMAL(38) NOT NULL,\n" +
"\tGMT_CREATED TIMESTAMP(0) NOT NULL,\n" +
"\tGMT_MODIFIED TIMESTAMP(0),\n" +
"\tCREATOR VARCHAR(32) NOT NULL,\n" +
"\tMODIFIER VARCHAR(32),\n" +
"\tTYPE VARCHAR(32) NOT NULL,\n" +
"\tEXPLAIN VARCHAR(256),\n" +
"\tMATURITY_REF DECIMAL(38) NOT NULL,\n" +
"\tSERVICE VARCHAR(512) NOT NULL,\n" +
"\tPRIORITY BIGINT,\n" +
"\tREPOSITORY_URL VARCHAR(512),\n" +
"\tSERVICE_URL VARCHAR(512),\n" +
"\tPRIMARY KEY (ID)\n" +
");",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
//
// SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.ORACLE);
// stmt.accept(visitor);
//
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("relationships : " + visitor.getRelationships());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
//
// assertEquals(1, visitor.getTables().size());
//
// assertEquals(3, visitor.getColumns().size());
//
// assertTrue(visitor.getColumns().contains(new TableStat.Column("JWGZPT.A", "XM")));
}
}
| OracleCreateTableTest83 |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java | {
"start": 1491,
"end": 7372
} | class ____ extends AsyncWaitStep {
static final String NAME = "wait-for-snapshot";
private static final Logger logger = LogManager.getLogger(WaitForSnapshotStep.class);
private static final String MESSAGE_FIELD = "message";
private static final String POLICY_NOT_EXECUTED_MESSAGE = "waiting for policy '%s' to be executed since %s";
private static final String POLICY_NOT_FOUND_MESSAGE = "configured policy '%s' not found";
private static final String INDEX_NOT_INCLUDED_IN_SNAPSHOT_MESSAGE =
"the last successful snapshot of policy '%s' does not include index '%s'";
private static final String UNEXPECTED_SNAPSHOT_STATE_MESSAGE =
"unexpected number of snapshots retrieved for repository '%s' and snapshot '%s' (expected 1, found %d)";
private static final String NO_ACTION_TIME_MESSAGE = "no information about ILM action start in index metadata for index '%s'";
private final String policy;
WaitForSnapshotStep(StepKey key, StepKey nextStepKey, Client client, String policy) {
super(key, nextStepKey, client);
this.policy = policy;
}
@Override
public void evaluateCondition(ProjectState state, IndexMetadata indexMetadata, Listener listener, TimeValue masterTimeout) {
String indexName = indexMetadata.getIndex().getName();
Long actionTime = indexMetadata.getLifecycleExecutionState().actionTime();
if (actionTime == null) {
listener.onFailure(error(NO_ACTION_TIME_MESSAGE, indexName));
return;
}
SnapshotLifecycleMetadata snapMeta = state.metadata().custom(SnapshotLifecycleMetadata.TYPE);
if (snapMeta == null || snapMeta.getSnapshotConfigurations().containsKey(policy) == false) {
listener.onFailure(error(POLICY_NOT_FOUND_MESSAGE, policy));
return;
}
SnapshotLifecyclePolicyMetadata snapPolicyMeta = snapMeta.getSnapshotConfigurations().get(policy);
if (snapPolicyMeta.getLastSuccess() == null
|| snapPolicyMeta.getLastSuccess().getSnapshotStartTimestamp() == null
|| snapPolicyMeta.getLastSuccess().getSnapshotStartTimestamp() < actionTime) {
if (snapPolicyMeta.getLastSuccess() == null) {
logger.debug("skipping ILM policy execution because there is no last snapshot success, action time: {}", actionTime);
} else if (snapPolicyMeta.getLastSuccess().getSnapshotStartTimestamp() == null) {
/*
* This is because we are running in mixed cluster mode, and the snapshot was taken on an older master, which then went
* down before this check could happen. We'll wait until a snapshot is taken on this newer master before passing this check.
*/
logger.debug("skipping ILM policy execution because no last snapshot start date, action time: {}", actionTime);
} else {
logger.debug(
"skipping ILM policy execution because snapshot start time {} is before action time {}, snapshot timestamp is {}",
snapPolicyMeta.getLastSuccess().getSnapshotStartTimestamp(),
actionTime,
snapPolicyMeta.getLastSuccess().getSnapshotFinishTimestamp()
);
}
listener.onResponse(false, notExecutedMessage(actionTime));
return;
}
logger.debug(
"executing policy because snapshot start time {} is after action time {}, snapshot timestamp is {}",
snapPolicyMeta.getLastSuccess().getSnapshotStartTimestamp(),
actionTime,
snapPolicyMeta.getLastSuccess().getSnapshotFinishTimestamp()
);
String snapshotName = snapPolicyMeta.getLastSuccess().getSnapshotName();
String repositoryName = snapPolicyMeta.getPolicy().getRepository();
GetSnapshotsRequest request = new GetSnapshotsRequest(TimeValue.MAX_VALUE).repositories(repositoryName)
.snapshots(new String[] { snapshotName })
.includeIndexNames(true)
.verbose(false);
getClient(state.projectId()).admin().cluster().getSnapshots(request, ActionListener.wrap(response -> {
if (response.getSnapshots().size() != 1) {
listener.onFailure(error(UNEXPECTED_SNAPSHOT_STATE_MESSAGE, repositoryName, snapshotName, response.getSnapshots().size()));
} else {
if (response.getSnapshots().get(0).indices().contains(indexName)) {
listener.onResponse(true, EmptyInfo.INSTANCE);
} else {
listener.onFailure(error(INDEX_NOT_INCLUDED_IN_SNAPSHOT_MESSAGE, policy, indexName));
}
}
}, listener::onFailure));
}
public String getPolicy() {
return policy;
}
@Override
public boolean isRetryable() {
return true;
}
private ToXContentObject notExecutedMessage(long time) {
return (builder, params) -> {
builder.startObject();
builder.field(MESSAGE_FIELD, Strings.format(POLICY_NOT_EXECUTED_MESSAGE, policy, new Date(time)));
builder.endObject();
return builder;
};
}
private static IllegalStateException error(String message, Object... args) {
return new IllegalStateException(Strings.format(message, args));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (super.equals(o) == false) return false;
WaitForSnapshotStep that = (WaitForSnapshotStep) o;
return policy.equals(that.policy);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), policy);
}
}
| WaitForSnapshotStep |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/internal/dto/QueryRecordsPushTopic.java | {
"start": 1005,
"end": 1081
} | class ____ extends AbstractQueryRecordsBase<PushTopic> {
}
| QueryRecordsPushTopic |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClusterMetrics.java | {
"start": 1242,
"end": 2732
} | class ____ {
private ClusterMetrics metrics;
/**
* Test below metrics
* - aMLaunchDelay
* - aMRegisterDelay
* - aMContainerAllocationDelay
*/
@Test
public void testAmMetrics() throws Exception {
assert(metrics != null);
assertTrue(!metrics.aMLaunchDelay.changed());
assertTrue(!metrics.aMRegisterDelay.changed());
assertTrue(!metrics.getAMContainerAllocationDelay().changed());
metrics.addAMLaunchDelay(1);
metrics.addAMRegisterDelay(1);
metrics.addAMContainerAllocationDelay(1);
assertTrue(metrics.aMLaunchDelay.changed());
assertTrue(metrics.aMRegisterDelay.changed());
assertTrue(metrics.getAMContainerAllocationDelay().changed());
}
@BeforeEach
public void setup() {
DefaultMetricsSystem.initialize("ResourceManager");
metrics = ClusterMetrics.getMetrics();
}
@AfterEach
public void tearDown() {
ClusterMetrics.destroy();
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms.getSource("ClusterMetrics") != null) {
DefaultMetricsSystem.shutdown();
}
}
@Test
public void testClusterMetrics() throws Exception {
assertTrue(!metrics.containerAssignedPerSecond.changed());
metrics.incrNumContainerAssigned();
metrics.incrNumContainerAssigned();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return metrics.getContainerAssignedPerSecond() == 2;
}
}, 500, 5000);
}
}
| TestClusterMetrics |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/embeddable/naming/EmbeddedColumnNamingImplicitTests.java | {
"start": 946,
"end": 1663
} | class ____ {
@Test
@DomainModel( annotatedClasses = {Person.class, Address.class} )
@SessionFactory(exportSchema = false)
void testNaming(SessionFactoryScope factoryScope) {
final SessionFactoryImplementor sessionFactory = factoryScope.getSessionFactory();
final MappingMetamodelImplementor mappingMetamodel = sessionFactory.getMappingMetamodel();
final EntityPersister persister = mappingMetamodel.getEntityDescriptor( Person.class );
verifyColumnNames( persister.findAttributeMapping( "homeAddress" ), "homeAddress_" );
verifyColumnNames( persister.findAttributeMapping( "workAddress" ), "workAddress_" );
}
@Entity(name="Person")
@Table(name="person")
public static | EmbeddedColumnNamingImplicitTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/XmlVerifierEndpointBuilderFactory.java | {
"start": 16628,
"end": 17788
} | class ____
* XmlSignatureInvalidException. If the signature value validation
* fails, a XmlSignatureInvalidValueException is thrown. If a reference
* validation fails, a XmlSignatureInvalidContentHashException is
* thrown. For more detailed information, see the JavaDoc.
*
* The option is a:
* <code>org.apache.camel.component.xmlsecurity.api.ValidationFailedHandler</code> type.
*
* Group: producer
*
* @param validationFailedHandler the value to set
* @return the dsl builder
*/
default XmlVerifierEndpointBuilder validationFailedHandler(org.apache.camel.component.xmlsecurity.api.ValidationFailedHandler validationFailedHandler) {
doSetProperty("validationFailedHandler", validationFailedHandler);
return this;
}
/**
* Handles the different validation failed situations. The default
* implementation throws specific exceptions for the different
* situations (All exceptions have the package name
* org.apache.camel.component.xmlsecurity.api and are a sub- | of |
java | apache__camel | components/camel-micrometer/src/main/java/org/apache/camel/component/micrometer/CounterProducer.java | {
"start": 1300,
"end": 2450
} | class ____ extends AbstractMicrometerProducer<Counter> {
public CounterProducer(MicrometerEndpoint endpoint) {
super(endpoint);
}
@Override
protected Function<MeterRegistry, Counter> registrar(String name, String description, Iterable<Tag> tags) {
return meterRegistry -> Counter.builder(name).description(description).tags(tags).register(meterRegistry);
}
@Override
protected void doProcess(Exchange exchange, MicrometerEndpoint endpoint, Counter counter) {
Message in = exchange.getIn();
Double increment = simple(exchange, endpoint.getIncrement(), Double.class);
Double decrement = simple(exchange, endpoint.getDecrement(), Double.class);
Double finalIncrement = getDoubleHeader(in, HEADER_COUNTER_INCREMENT, increment);
Double finalDecrement = getDoubleHeader(in, HEADER_COUNTER_DECREMENT, decrement);
if (finalIncrement != null) {
counter.increment(finalIncrement);
} else if (finalDecrement != null) {
counter.increment(-finalDecrement);
} else {
counter.increment();
}
}
}
| CounterProducer |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/cfg/CacheProviderTest.java | {
"start": 3983,
"end": 4468
} | class ____ extends SimpleLookupCache<TypeKey, ValueSerializer<Object>> {
private static final long serialVersionUID = 1L;
public boolean _isInvoked = false;
public CustomTestSerializerCache() {
super(8, 64);
}
@Override
public ValueSerializer<Object> put(TypeKey key, ValueSerializer<Object> value) {
_isInvoked = true;
return super.put(key, value);
}
}
static | CustomTestSerializerCache |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/LineRecordReaderHelper.java | {
"start": 1123,
"end": 1911
} | class ____ extends
BaseLineRecordReaderHelper {
public LineRecordReaderHelper(Path filePath, Configuration conf) {
super(filePath, conf);
}
@Override
public long countRecords(long start, long length) throws IOException {
try (LineRecordReader reader = newReader(start, length)) {
LongWritable key = new LongWritable();
Text value = new Text();
long numRecords = 0L;
while (reader.next(key, value)) {
numRecords++;
}
return numRecords;
}
}
private LineRecordReader newReader(long start, long length)
throws IOException {
FileSplit split = new FileSplit(getFilePath(), start, length, (String[]) null);
return new LineRecordReader(getConf(), split, getRecordDelimiterBytes());
}
}
| LineRecordReaderHelper |
java | apache__flink | flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/table/batch/compact/BatchPartitionCommitterSinkTest.java | {
"start": 2104,
"end": 5599
} | class ____ {
private final FileSystemFactory fileSystemFactory = FileSystem::get;
private TableMetaStoreFactory metaStoreFactory;
private ObjectIdentifier identifier;
@TempDir private java.nio.file.Path path;
@TempDir private java.nio.file.Path outputPath;
@BeforeEach
public void before() {
metaStoreFactory =
new FileSystemCommitterTest.TestMetaStoreFactory(new Path(outputPath.toString()));
identifier = ObjectIdentifier.of("hiveCatalog", "default", "test");
}
@Test
public void testPartitionCommit() throws Exception {
BatchPartitionCommitterSink committerSink =
new BatchPartitionCommitterSink(
fileSystemFactory,
metaStoreFactory,
false,
false,
new Path(path.toString()),
new String[] {"p1", "p2"},
new LinkedHashMap<>(),
identifier,
new PartitionCommitPolicyFactory(null, null, null, null));
committerSink.open(DefaultOpenContext.INSTANCE);
List<Path> pathList1 = createFiles(path, "task-1/p1=0/p2=0/", "f1", "f2");
List<Path> pathList2 = createFiles(path, "task-2/p1=0/p2=0/", "f3");
List<Path> pathList3 = createFiles(path, "task-2/p1=0/p2=1/", "f4");
Map<String, List<Path>> compactedFiles = new HashMap<>();
pathList1.addAll(pathList2);
compactedFiles.put("p1=0/p2=0/", pathList1);
compactedFiles.put("p1=0/p2=1/", pathList3);
committerSink.invoke(new CompactMessages.CompactOutput(compactedFiles), TEST_SINK_CONTEXT);
committerSink.setRuntimeContext(TEST_RUNTIME_CONTEXT);
committerSink.finish();
committerSink.close();
assertThat(new File(outputPath.toFile(), "p1=0/p2=0/f1")).exists();
assertThat(new File(outputPath.toFile(), "p1=0/p2=0/f2")).exists();
assertThat(new File(outputPath.toFile(), "p1=0/p2=0/f3")).exists();
assertThat(new File(outputPath.toFile(), "p1=0/p2=1/f4")).exists();
}
private List<Path> createFiles(java.nio.file.Path parent, String path, String... files)
throws IOException {
java.nio.file.Path dir = Files.createDirectories(Paths.get(parent.toString(), path));
List<Path> paths = new ArrayList<>();
for (String file : files) {
paths.add(new Path(Files.createFile(dir.resolve(file)).toFile().getPath()));
}
return paths;
}
private static final RuntimeContext TEST_RUNTIME_CONTEXT = getMockRuntimeContext();
private static final SinkFunction.Context TEST_SINK_CONTEXT =
new SinkFunction.Context() {
@Override
public long currentProcessingTime() {
return 0;
}
@Override
public long currentWatermark() {
return 0;
}
@Override
public Long timestamp() {
return null;
}
};
private static RuntimeContext getMockRuntimeContext() {
return new MockStreamingRuntimeContext(1, 0) {
@Override
public ClassLoader getUserCodeClassLoader() {
return Thread.currentThread().getContextClassLoader();
}
};
}
}
| BatchPartitionCommitterSinkTest |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/client/XdsClient.java | {
"start": 15581,
"end": 16165
} | interface ____ {
void reportServerConnectionGauge(boolean isConnected, String xdsServer);
}
/**
* Reports whether xDS client has a "working" ADS stream to xDS server. The definition of a
* working stream is defined in gRFC A78.
*
* @see <a
* href="https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md#xdsclient">
* A78-grpc-metrics-wrr-pf-xds.md</a>
*/
public Future<Void> reportServerConnections(ServerConnectionCallback callback) {
throw new UnsupportedOperationException();
}
static final | ServerConnectionCallback |
java | square__retrofit | retrofit-converters/protobuf/src/test/java/retrofit2/converter/protobuf/ProtoConverterFactoryTest.java | {
"start": 1966,
"end": 2189
} | interface ____ {
@GET("/")
Call<Phone> get();
@POST("/")
Call<Phone> post(@Body MessageLite impl);
@GET("/")
Call<String> wrongClass();
@GET("/")
Call<List<String>> wrongType();
}
| Service |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/visitor/VisitorContext.java | {
"start": 8552,
"end": 8908
} | class ____
* @since 4.0.0
*/
default ClassElement getRequiredClassElement(String name, ElementAnnotationMetadataFactory annotationMetadataFactory) {
return getClassElement(name, annotationMetadataFactory).orElseThrow(() -> new IllegalStateException("Unknown type: " + name));
}
/**
* This method will look up another | element |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/http/client/bind/annotation/AnnotationBinderSpec.java | {
"start": 294,
"end": 810
} | class ____ {
@Test
void testBindingToTheRequest() {
EmbeddedServer server = ApplicationContext.run(EmbeddedServer.class);
MetadataClient client = server.getApplicationContext().getBean(MetadataClient.class);
Map<String, Object> metadata = new LinkedHashMap<>();
metadata.put("version", 3.6);
metadata.put("deploymentId", 42L);
String resp = client.get(metadata);
Assertions.assertEquals("3.6", resp);
server.close();
}
}
| AnnotationBinderSpec |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/deployment/src/test/java/io/quarkus/resteasy/reactive/jackson/deployment/test/streams/Message.java | {
"start": 78,
"end": 234
} | class ____ {
public String name;
public Message(String name) {
this.name = name;
}
// for Jsonb
public Message() {
}
}
| Message |
java | spring-projects__spring-framework | spring-orm/src/test/java/org/springframework/orm/jpa/hibernate/beans/BeanSource.java | {
"start": 687,
"end": 726
} | enum ____ {
SPRING,
FALLBACK
}
| BeanSource |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/SQLKeep.java | {
"start": 1652,
"end": 1705
} | enum ____ {
FIRST,
LAST
}
}
| DenseRank |
java | quarkusio__quarkus | extensions/infinispan-client/runtime/src/main/java/io/quarkus/infinispan/client/runtime/InfinispanClientBuildTimeConfig.java | {
"start": 990,
"end": 1724
} | interface ____ {
// @formatter:off
/**
* Cache configuration file in XML, JSON or YAML is defined in build time to create the cache on first access.
* An example of the user defined property. cacheConfig.xml file is located in the 'resources' folder:
* quarkus.infinispan-client.cache.bookscache.configuration-resource=cacheConfig.xml
*/
// @formatter:on
Optional<String> configurationResource();
}
/**
* Dev Services.
* <p>
* Dev Services allows Quarkus to automatically start an Infinispan Server in dev and test
* mode.
*/
@WithParentName
DevServiceConfiguration devservices();
@ConfigGroup
public | RemoteCacheConfig |
java | apache__camel | components/camel-quartz/src/test/java/org/apache/camel/component/quartz/QuartzRouteFireNowTest.java | {
"start": 989,
"end": 1957
} | class ____ extends BaseQuartzTest {
protected MockEndpoint resultEndpoint;
@Test
public void testQuartzRoute() throws Exception {
resultEndpoint = getMockEndpoint("mock:result");
resultEndpoint.expectedMinimumMessageCount(2);
resultEndpoint.message(0).header("triggerName").isEqualTo("myTimerName");
resultEndpoint.message(0).header("triggerGroup").isEqualTo("myGroup");
// lets test the receive worked
resultEndpoint.assertIsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// START SNIPPET: example
from("quartz://myGroup/myTimerName?trigger.repeatInterval=100&trigger.repeatCount=2")
.to("log:quartz")
.to("mock:result");
// END SNIPPET: example
}
};
}
}
| QuartzRouteFireNowTest |
java | qos-ch__slf4j | slf4j-ext/src/main/java/org/slf4j/profiler/DurationUnit.java | {
"start": 1307,
"end": 1379
} | enum ____ {
NANOSECOND, MICROSECOND, MILLISSECOND, SECOND;
} | DurationUnit |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/aop/aspectj/autoproxy/AspectJAutoProxyCreatorTests.java | {
"start": 3652,
"end": 15632
} | class ____ {
@Test
void aspectsAreApplied() {
ClassPathXmlApplicationContext bf = newContext("aspects.xml");
ITestBean tb = bf.getBean("adrian", ITestBean.class);
assertThat(tb.getAge()).isEqualTo(68);
MethodInvokingFactoryBean factoryBean = (MethodInvokingFactoryBean) bf.getBean("&factoryBean");
assertThat(AopUtils.isAopProxy(factoryBean.getTargetObject())).isTrue();
assertThat(((ITestBean) factoryBean.getTargetObject()).getAge()).isEqualTo(68);
}
@Test
void multipleAspectsWithParameterApplied() {
ClassPathXmlApplicationContext bf = newContext("aspects.xml");
ITestBean tb = bf.getBean("adrian", ITestBean.class);
tb.setAge(10);
assertThat(tb.getAge()).isEqualTo(20);
}
@Test
void aspectsAreAppliedInDefinedOrder() {
ClassPathXmlApplicationContext bf = newContext("aspectsWithOrdering.xml");
ITestBean tb = bf.getBean("adrian", ITestBean.class);
assertThat(tb.getAge()).isEqualTo(71);
}
@Test
void aspectsAndAdvisorAreApplied() {
ClassPathXmlApplicationContext ac = newContext("aspectsPlusAdvisor.xml");
ITestBean shouldBeWoven = ac.getBean("adrian", ITestBean.class);
assertAspectsAndAdvisorAreApplied(ac, shouldBeWoven);
}
@Test
void aspectsAndAdvisorAreAppliedEvenIfComingFromParentFactory() {
ClassPathXmlApplicationContext ac = newContext("aspectsPlusAdvisor.xml");
GenericApplicationContext childAc = new GenericApplicationContext(ac);
// Create a child factory with a bean that should be woven
RootBeanDefinition bd = new RootBeanDefinition(TestBean.class);
bd.getPropertyValues()
.addPropertyValue(new PropertyValue("name", "Adrian"))
.addPropertyValue(new PropertyValue("age", 34));
childAc.registerBeanDefinition("adrian2", bd);
// Register the advisor auto proxy creator with subclass
childAc.registerBeanDefinition(AnnotationAwareAspectJAutoProxyCreator.class.getName(),
new RootBeanDefinition(AnnotationAwareAspectJAutoProxyCreator.class));
childAc.refresh();
ITestBean beanFromParentContextThatShouldBeWoven = ac.getBean("adrian", ITestBean.class);
ITestBean beanFromChildContextThatShouldBeWoven = childAc.getBean("adrian2", ITestBean.class);
assertAspectsAndAdvisorAreApplied(childAc, beanFromParentContextThatShouldBeWoven);
assertAspectsAndAdvisorAreApplied(childAc, beanFromChildContextThatShouldBeWoven);
}
protected void assertAspectsAndAdvisorAreApplied(ApplicationContext ac, ITestBean shouldBeWoven) {
TestBeanAdvisor tba = (TestBeanAdvisor) ac.getBean("advisor");
MultiplyReturnValue mrv = (MultiplyReturnValue) ac.getBean("aspect");
assertThat(mrv.getMultiple()).isEqualTo(3);
tba.count = 0;
mrv.invocations = 0;
assertThat(AopUtils.isAopProxy(shouldBeWoven)).as("Autoproxying must apply from @AspectJ aspect").isTrue();
assertThat(shouldBeWoven.getName()).isEqualTo("Adrian");
assertThat(mrv.invocations).isEqualTo(0);
assertThat(shouldBeWoven.getAge()).isEqualTo((34 * mrv.getMultiple()));
assertThat(tba.count).as("Spring advisor must be invoked").isEqualTo(2);
assertThat(mrv.invocations).as("Must be able to hold state in aspect").isEqualTo(1);
}
@Test
void perThisAspect() {
ClassPathXmlApplicationContext bf = newContext("perthis.xml");
ITestBean adrian1 = bf.getBean("adrian", ITestBean.class);
assertThat(AopUtils.isAopProxy(adrian1)).isTrue();
assertThat(adrian1.getAge()).isEqualTo(0);
assertThat(adrian1.getAge()).isEqualTo(1);
ITestBean adrian2 = bf.getBean("adrian", ITestBean.class);
assertThat(adrian2).isNotSameAs(adrian1);
assertThat(AopUtils.isAopProxy(adrian1)).isTrue();
assertThat(adrian2.getAge()).isEqualTo(0);
assertThat(adrian2.getAge()).isEqualTo(1);
assertThat(adrian2.getAge()).isEqualTo(2);
assertThat(adrian2.getAge()).isEqualTo(3);
assertThat(adrian1.getAge()).isEqualTo(2);
}
@Test
void perTargetAspect() throws SecurityException, NoSuchMethodException {
ClassPathXmlApplicationContext bf = newContext("pertarget.xml");
ITestBean adrian1 = bf.getBean("adrian", ITestBean.class);
assertThat(AopUtils.isAopProxy(adrian1)).isTrue();
// Does not trigger advice or count
int explicitlySetAge = 25;
adrian1.setAge(explicitlySetAge);
assertThat(adrian1.getAge()).as("Setter does not initiate advice").isEqualTo(explicitlySetAge);
// Fire aspect
AspectMetadata am = new AspectMetadata(PerTargetAspect.class, "someBean");
assertThat(am.getPerClausePointcut().getMethodMatcher().matches(TestBean.class.getMethod("getSpouse"), null)).isTrue();
adrian1.getSpouse();
assertThat(adrian1.getAge()).as("Advice has now been instantiated").isEqualTo(0);
adrian1.setAge(11);
assertThat(adrian1.getAge()).as("Any int setter increments").isEqualTo(2);
adrian1.setName("Adrian");
//assertEquals("Any other setter does not increment", 2, adrian1.getAge());
ITestBean adrian2 = bf.getBean("adrian", ITestBean.class);
assertThat(adrian2).isNotSameAs(adrian1);
assertThat(AopUtils.isAopProxy(adrian1)).isTrue();
assertThat(adrian2.getAge()).isEqualTo(34);
adrian2.getSpouse();
assertThat(adrian2.getAge()).as("Aspect now fired").isEqualTo(0);
assertThat(adrian2.getAge()).isEqualTo(1);
assertThat(adrian2.getAge()).isEqualTo(2);
assertThat(adrian1.getAge()).isEqualTo(3);
}
@Test // gh-31238
void cglibProxyClassIsCachedAcrossApplicationContextsForPerTargetAspect() {
Class<?> configClass = PerTargetProxyTargetClassTrueConfig.class;
TestBean testBean1;
TestBean testBean2;
// Round #1
try (ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(configClass)) {
testBean1 = context.getBean(TestBean.class);
assertThat(AopUtils.isCglibProxy(testBean1)).as("CGLIB proxy").isTrue();
assertThat(testBean1.getClass().getInterfaces()).containsExactlyInAnyOrder(
Factory.class, SpringProxy.class, Advised.class);
}
// Round #2
try (ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(configClass)) {
testBean2 = context.getBean(TestBean.class);
assertThat(AopUtils.isCglibProxy(testBean2)).as("CGLIB proxy").isTrue();
assertThat(testBean2.getClass().getInterfaces()).containsExactlyInAnyOrder(
Factory.class, SpringProxy.class, Advised.class);
}
assertThat(testBean1.getClass()).isSameAs(testBean2.getClass());
}
@Test
void twoAdviceAspect() {
ClassPathXmlApplicationContext bf = newContext("twoAdviceAspect.xml");
ITestBean adrian1 = bf.getBean("adrian", ITestBean.class);
testAgeAspect(adrian1, 0, 2);
}
@Test
void twoAdviceAspectSingleton() {
ClassPathXmlApplicationContext bf = newContext("twoAdviceAspectSingleton.xml");
ITestBean adrian1 = bf.getBean("adrian", ITestBean.class);
testAgeAspect(adrian1, 0, 1);
ITestBean adrian2 = bf.getBean("adrian", ITestBean.class);
assertThat(adrian2).isNotSameAs(adrian1);
testAgeAspect(adrian2, 2, 1);
}
@Test
void twoAdviceAspectPrototype() {
ClassPathXmlApplicationContext bf = newContext("twoAdviceAspectPrototype.xml");
ITestBean adrian1 = bf.getBean("adrian", ITestBean.class);
testAgeAspect(adrian1, 0, 1);
ITestBean adrian2 = bf.getBean("adrian", ITestBean.class);
assertThat(adrian2).isNotSameAs(adrian1);
testAgeAspect(adrian2, 0, 1);
}
private void testAgeAspect(ITestBean adrian, int start, int increment) {
assertThat(AopUtils.isAopProxy(adrian)).isTrue();
adrian.setName("");
assertThat(adrian.age()).isEqualTo(start);
int newAge = 32;
adrian.setAge(newAge);
assertThat(adrian.age()).isEqualTo(start + increment);
adrian.setAge(0);
assertThat(adrian.age()).isEqualTo(start + increment * 2);
}
@Test
void adviceUsingJoinPoint() {
ClassPathXmlApplicationContext bf = newContext("usesJoinPointAspect.xml");
ITestBean adrian1 = bf.getBean("adrian", ITestBean.class);
adrian1.getAge();
AdviceUsingThisJoinPoint aspectInstance = (AdviceUsingThisJoinPoint) bf.getBean("aspect");
//(AdviceUsingThisJoinPoint) Aspects.aspectOf(AdviceUsingThisJoinPoint.class);
//assertEquals("method-execution(int TestBean.getAge())",aspectInstance.getLastMethodEntered());
assertThat(aspectInstance.getLastMethodEntered()).doesNotStartWith("TestBean.getAge())");
}
@Test
void includeMechanism() {
ClassPathXmlApplicationContext bf = newContext("usesInclude.xml");
ITestBean adrian = bf.getBean("adrian", ITestBean.class);
assertThat(AopUtils.isAopProxy(adrian)).isTrue();
assertThat(adrian.getAge()).isEqualTo(68);
}
@Test
void forceProxyTargetClass() {
ClassPathXmlApplicationContext bf = newContext("aspectsWithCGLIB.xml");
ProxyConfig pc = (ProxyConfig) bf.getBean(AopConfigUtils.AUTO_PROXY_CREATOR_BEAN_NAME);
assertThat(pc.isProxyTargetClass()).as("should be proxying classes").isTrue();
assertThat(pc.isExposeProxy()).as("should expose proxy").isTrue();
}
@Test
void withAbstractFactoryBeanAreApplied() {
ClassPathXmlApplicationContext bf = newContext("aspectsWithAbstractBean.xml");
ITestBean adrian = bf.getBean("adrian", ITestBean.class);
assertThat(AopUtils.isAopProxy(adrian)).isTrue();
assertThat(adrian.getAge()).isEqualTo(68);
}
@Test
void retryAspect() {
ClassPathXmlApplicationContext bf = newContext("retryAspect.xml");
UnreliableBean bean = (UnreliableBean) bf.getBean("unreliableBean");
RetryAspect aspect = (RetryAspect) bf.getBean("retryAspect");
assertThat(bean.unreliable()).isEqualTo(2);
assertThat(aspect.getBeginCalls()).isEqualTo(2);
assertThat(aspect.getRollbackCalls()).isEqualTo(1);
assertThat(aspect.getCommitCalls()).isEqualTo(1);
}
@Test
void withBeanNameAutoProxyCreator() {
ClassPathXmlApplicationContext bf = newContext("withBeanNameAutoProxyCreator.xml");
ITestBean tb = bf.getBean("adrian", ITestBean.class);
assertThat(tb.getAge()).isEqualTo(68);
}
@ParameterizedTest(name = "[{index}] {0}")
@ValueSource(classes = {ProxyTargetClassFalseConfig.class, ProxyTargetClassTrueConfig.class})
void lambdaIsAlwaysProxiedWithJdkProxy(Class<?> configClass) {
try (ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(configClass)) {
@SuppressWarnings("unchecked")
Supplier<String> supplier = context.getBean(Supplier.class);
assertThat(AopUtils.isAopProxy(supplier)).as("AOP proxy").isTrue();
assertThat(AopUtils.isJdkDynamicProxy(supplier)).as("JDK Dynamic proxy").isTrue();
assertThat(supplier.getClass().getInterfaces()).containsExactlyInAnyOrder(
Supplier.class, SpringProxy.class, Advised.class, DecoratingProxy.class);
assertThat(supplier.get()).isEqualTo("advised: lambda");
}
}
@ParameterizedTest(name = "[{index}] {0}")
@ValueSource(classes = {MixinProxyTargetClassFalseConfig.class, MixinProxyTargetClassTrueConfig.class})
void lambdaIsAlwaysProxiedWithJdkProxyWithIntroductions(Class<?> configClass) {
try (ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(configClass)) {
MessageGenerator messageGenerator = context.getBean(MessageGenerator.class);
assertThat(AopUtils.isAopProxy(messageGenerator)).as("AOP proxy").isTrue();
assertThat(AopUtils.isJdkDynamicProxy(messageGenerator)).as("JDK Dynamic proxy").isTrue();
assertThat(messageGenerator.getClass().getInterfaces()).containsExactlyInAnyOrder(
MessageGenerator.class, Mixin.class, SpringProxy.class, Advised.class, DecoratingProxy.class);
assertThat(messageGenerator.generateMessage()).isEqualTo("mixin: lambda");
}
}
@Test
void nullAdviceIsSkipped() {
try (ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(ProxyWithNullAdviceConfig.class)) {
@SuppressWarnings("unchecked")
Supplier<String> supplier = context.getBean(Supplier.class);
assertThat(AopUtils.isAopProxy(supplier)).as("AOP proxy").isTrue();
assertThat(supplier.get()).isEqualTo("lambda");
}
}
private ClassPathXmlApplicationContext newContext(String fileSuffix) {
return new ClassPathXmlApplicationContext(getClass().getSimpleName() + "-" + fileSuffix, getClass());
}
}
@Aspect
| AspectJAutoProxyCreatorTests |
java | google__dagger | javatests/dagger/internal/codegen/DaggerSuperficialValidationTest.java | {
"start": 16539,
"end": 18182
} | class ____ {}",
"}"),
(processingEnv, superficialValidation) -> {
XTypeElement testClassElement = processingEnv.findTypeElement("test.Outer.TestClass");
ValidationException exception =
assertThrows(
ValidationException.KnownErrorType.class,
() -> superficialValidation.validateElement(testClassElement));
// TODO(b/248552462): Javac and KSP should match once this bug is fixed.
boolean isJavac = processingEnv.getBackend() == XProcessingEnv.Backend.JAVAC;
String expectedMessage =
String.format(
NEW_LINES.join(
"Validation trace:",
" => element (CLASS): test.Outer.TestClass",
" => annotation type: test.Outer.TestAnnotation",
" => annotation: @test.Outer.TestAnnotation(classes={%1$s})",
" => annotation value (TYPE_ARRAY): classes={%1$s}",
" => annotation value (TYPE): classes=%1$s"),
isJavac ? "<error>" : "MissingType");
if (!isJavac) {
expectedMessage =
NEW_LINES.join(
expectedMessage,
" => type (ERROR annotation value type): MissingType");
}
assertThat(exception).hasMessageThat().contains(expectedMessage);
});
}
@Test
public void invalidAnnotationValueOnParameter() {
runTest(
CompilerTests.javaSource(
"test.Outer",
"package test;",
"",
"final | TestClass |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/generics/GenericManyToOneParameterTest.java | {
"start": 3551,
"end": 3621
} | class ____ extends SiteImpl implements UserSite {
}
public | UserSiteImpl |
java | apache__logging-log4j2 | log4j-jakarta-jms/src/main/java/org/apache/logging/log4j/core/appender/mom/jakarta/JmsManager.java | {
"start": 2085,
"end": 4571
} | class ____ {
private final Properties jndiProperties;
private final String connectionFactoryName;
private final String destinationName;
private final String userName;
private final char[] password;
private final boolean immediateFail;
private final boolean retry;
private final long reconnectIntervalMillis;
JmsManagerConfiguration(
final Properties jndiProperties,
final String connectionFactoryName,
final String destinationName,
final String userName,
final char[] password,
final boolean immediateFail,
final long reconnectIntervalMillis) {
this.jndiProperties = jndiProperties;
this.connectionFactoryName = connectionFactoryName;
this.destinationName = destinationName;
this.userName = userName;
this.password = password;
this.immediateFail = immediateFail;
this.reconnectIntervalMillis = reconnectIntervalMillis;
this.retry = reconnectIntervalMillis > 0;
}
public String getConnectionFactoryName() {
return connectionFactoryName;
}
public String getDestinationName() {
return destinationName;
}
public JndiManager getJndiManager() {
return JndiManager.getJndiManager(getJndiProperties());
}
public Properties getJndiProperties() {
return jndiProperties;
}
public char[] getPassword() {
return password;
}
public long getReconnectIntervalMillis() {
return reconnectIntervalMillis;
}
public String getUserName() {
return userName;
}
public boolean isImmediateFail() {
return immediateFail;
}
public boolean isRetry() {
return retry;
}
@Override
public String toString() {
return "JmsManagerConfiguration [jndiProperties=" + jndiProperties + ", connectionFactoryName="
+ connectionFactoryName + ", destinationName=" + destinationName + ", userName=" + userName
+ ", immediateFail=" + immediateFail + ", retry=" + retry + ", reconnectIntervalMillis="
+ reconnectIntervalMillis + "]";
}
}
private static final | JmsManagerConfiguration |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/MasterKeyPBImpl.java | {
"start": 1185,
"end": 2905
} | class ____ extends ProtoBase<MasterKeyProto> implements
MasterKey {
MasterKeyProto proto = MasterKeyProto.getDefaultInstance();
MasterKeyProto.Builder builder = null;
boolean viaProto = false;
public MasterKeyPBImpl() {
builder = MasterKeyProto.newBuilder();
}
public MasterKeyPBImpl(MasterKeyProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized MasterKeyProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = MasterKeyProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public synchronized int getKeyId() {
MasterKeyProtoOrBuilder p = viaProto ? proto : builder;
return (p.getKeyId());
}
@Override
public synchronized void setKeyId(int id) {
maybeInitBuilder();
builder.setKeyId((id));
}
@Override
public synchronized ByteBuffer getBytes() {
MasterKeyProtoOrBuilder p = viaProto ? proto : builder;
return convertFromProtoFormat(p.getBytes());
}
@Override
public synchronized void setBytes(ByteBuffer bytes) {
maybeInitBuilder();
builder.setBytes(convertToProtoFormat(bytes));
}
@Override
public int hashCode() {
return getKeyId();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!(obj instanceof MasterKey)) {
return false;
}
MasterKey other = (MasterKey) obj;
if (this.getKeyId() != other.getKeyId()) {
return false;
}
if (!this.getBytes().equals(other.getBytes())) {
return false;
}
return true;
}
}
| MasterKeyPBImpl |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/jackson/OAuth2AuthenticationTokenMixin.java | {
"start": 1232,
"end": 1714
} | class ____ used to serialize/deserialize {@link OAuth2AuthenticationToken}.
*
* @author Sebastien Deleuze
* @author Joe Grandja
* @since 7.0O
* @see OAuth2AuthenticationToken
* @see OAuth2ClientJacksonModule
*/
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS)
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY, getterVisibility = JsonAutoDetect.Visibility.NONE,
isGetterVisibility = JsonAutoDetect.Visibility.NONE)
@JsonIgnoreProperties({ "authenticated" })
abstract | is |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/docker/DockerApi.java | {
"start": 18394,
"end": 18950
} | class ____ {
VolumeApi() {
}
/**
* Delete a volume.
* @param name the name of the volume to delete
* @param force if the deletion should be forced
* @throws IOException on IO error
*/
public void delete(VolumeName name, boolean force) throws IOException {
Assert.notNull(name, "'name' must not be null");
Collection<String> params = force ? FORCE_PARAMS : Collections.emptySet();
URI uri = buildUrl("/volumes/" + name, params);
http().delete(uri).close();
}
}
/**
* Docker API for system operations.
*/
| VolumeApi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.