language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java | {
"start": 14656,
"end": 17972
} | class ____ implements Iterator<T> {
/** The current modification epoch. */
private int expectedModification = modification;
/** The current index of the entry array. */
private int index = -1;
/** The next element to return. */
private LinkedElement<T> next = nextNonemptyEntry();
private LinkedElement<T> current;
private LinkedElement<T> nextNonemptyEntry() {
for (index++; index < entries.length && entries[index] == null; index++);
return index < entries.length ? entries[index] : null;
}
@Override
public boolean hasNext() {
return next != null;
}
@Override
public T next() {
if (modification != expectedModification) {
throw new ConcurrentModificationException("modification="
+ modification + " != expectedModification = " + expectedModification);
}
if (next == null) {
throw new NoSuchElementException();
}
current = next;
final T e = next.element;
// find the next element
final LinkedElement<T> n = next.next;
next = n != null ? n : nextNonemptyEntry();
return e;
}
@Override
public void remove() {
if (current == null) {
throw new NoSuchElementException();
}
if (modification != expectedModification) {
throw new ConcurrentModificationException("modification="
+ modification + " != expectedModification = " + expectedModification);
}
LightWeightHashSet.this.removeElem(current.element);
current = null;
expectedModification = modification;
}
}
/**
* Clear the set. Resize it to the original capacity.
*/
@Override
@SuppressWarnings("unchecked")
public void clear() {
this.capacity = this.initialCapacity;
this.hash_mask = capacity - 1;
this.expandThreshold = (int) (capacity * maxLoadFactor);
this.shrinkThreshold = (int) (capacity * minLoadFactor);
entries = new LinkedElement[capacity];
size = 0;
modification++;
}
@Override
public Object[] toArray() {
Object[] result = new Object[size];
return toArray(result);
}
@Override
@SuppressWarnings("unchecked")
public <U> U[] toArray(U[] a) {
if (a == null) {
throw new NullPointerException("Input array can not be null");
}
if (a.length < size) {
a = (U[]) java.lang.reflect.Array.newInstance(a.getClass()
.getComponentType(), size);
}
int currentIndex = 0;
for (int i = 0; i < entries.length; i++) {
LinkedElement<T> current = entries[i];
while (current != null) {
a[currentIndex++] = (U) current.element;
current = current.next;
}
}
return a;
}
@Override
public boolean containsAll(Collection<?> c) {
Iterator<?> iter = c.iterator();
while (iter.hasNext()) {
if (!contains(iter.next())) {
return false;
}
}
return true;
}
@Override
public boolean removeAll(Collection<?> c) {
boolean changed = false;
Iterator<?> iter = c.iterator();
while (iter.hasNext()) {
changed |= remove(iter.next());
}
return changed;
}
@Override
public boolean retainAll(Collection<?> c) {
throw new UnsupportedOperationException("retainAll is not supported.");
}
}
| LinkedSetIterator |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/factories/TableFactoryHarness.java | {
"start": 7480,
"end": 8455
} | class ____ extends TableDescriptor {
private final @Nullable SourceBase source;
private final @Nullable SinkBase sink;
private HarnessTableDescriptor(
Schema schema, @Nullable SourceBase source, @Nullable SinkBase sink) {
super(
schema,
Collections.singletonMap(FactoryUtil.CONNECTOR.key(), IDENTIFIER),
null,
Collections.emptyList(),
null);
this.source = source;
this.sink = sink;
}
@Override
public CatalogTable toCatalogTable() {
return new HarnessCatalogTable(super.toCatalogTable(), source, sink);
}
@Override
public TableDescriptor.Builder toBuilder() {
return new Builder(this).source(source).sink(sink);
}
/** Builder for {@link HarnessTableDescriptor}. */
public static | HarnessTableDescriptor |
java | apache__camel | dsl/camel-jbang/camel-jbang-plugin-kubernetes/src/main/java/org/apache/camel/dsl/jbang/core/commands/kubernetes/traits/model/Openapi.java | {
"start": 1332,
"end": 2104
} | class ____ {
@JsonProperty("configmaps")
@JsonPropertyDescription("The configmaps holding the spec of the OpenAPI")
@JsonSetter(
nulls = Nulls.SKIP)
private List<String> configmaps;
@JsonProperty("enabled")
@JsonPropertyDescription("Can be used to enable or disable a trait.")
@JsonSetter(
nulls = Nulls.SKIP)
private Boolean enabled;
public Openapi() {
}
public List<String> getConfigmaps() {
return this.configmaps;
}
public void setConfigmaps(List<String> configmaps) {
this.configmaps = configmaps;
}
public Boolean getEnabled() {
return this.enabled;
}
public void setEnabled(Boolean enabled) {
this.enabled = enabled;
}
}
| Openapi |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/webapp/NavBlock.java | {
"start": 969,
"end": 1483
} | class ____ extends HtmlBlock {
@Override
public void render(Block html) {
html.
div("#nav").
h3("GPG").
ul().
li().a(url(""), "Overview").__().
li().a(url("policies"), "Policies").__().
__().
h3("Tools").
ul().
li().a("/conf", "Configuration").__().
li().a("/logs", "Local logs").__().
li().a("/stacks", "Server stacks").__().
li().a("/jmx?qry=Hadoop:*", "Server metrics").__().__().__();
}
}
| NavBlock |
java | apache__avro | lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestWordCount.java | {
"start": 6425,
"end": 7118
} | class ____ extends Reducer<Text, IntWritable, AvroKey<ReflectStats>, NullWritable> {
private AvroKey<ReflectStats> mStats;
@Override
protected void setup(Context context) {
mStats = new AvroKey<>(null);
}
@Override
protected void reduce(Text line, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
ReflectStats record = new ReflectStats();
record.count = 0;
for (IntWritable count : counts) {
record.count += count.get();
}
record.name = line.toString();
mStats.datum(record);
context.write(mStats, NullWritable.get());
}
}
private static | ReflectStatsReducer |
java | google__dagger | javatests/dagger/internal/codegen/bindinggraphvalidation/SetMultibindingValidationTest.java | {
"start": 5564,
"end": 6349
} | interface ____ {",
" Set<Foo> setOfFoo();",
"}");
CompilerTests.daggerCompiler(FOO, module, component)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining("MissingFooImpl cannot be provided");
});
}
@Test public void testMultipleSetBindingsToSameFooThroughMultipleBinds() {
Source module =
CompilerTests.javaSource(
"test.TestModule",
"package test;",
"",
"import dagger.Binds;",
"import dagger.multibindings.IntoSet;",
"import javax.inject.Inject;",
"",
"@dagger.Module",
" | TestComponent |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/handler/accesslog/element/AccessLogFormatParser.java | {
"start": 11125,
"end": 13686
} | class ____ implements LogElement, Comparable<IndexedLogElement> {
final int index;
private final LogElement delegate;
/**
* Creates an IndexedLogElement.
* @param delegate A LogElement.
* @param index The index.
*/
IndexedLogElement(LogElement delegate, int index) {
this.delegate = delegate;
this.index = index;
}
@Override
public Set<Event> events() {
return delegate.events();
}
@Override
public void reset() {
delegate.reset();
}
@Override
public String onRequestHeaders(@NonNull ConnectionMetadata metadata, @NonNull String method, @NonNull HttpHeaders headers, @NonNull String uri, @NonNull String protocol) {
return delegate.onRequestHeaders(metadata, method, headers, uri, protocol);
}
@Override
public String onResponseHeaders(ChannelHandlerContext ctx, io.netty.handler.codec.http.HttpHeaders headers, String status) {
return delegate.onResponseHeaders(ctx, headers, status);
}
@Override
public void onResponseWrite(int contentSize) {
delegate.onResponseWrite(contentSize);
}
@Override
public String onLastResponseWrite(int contentSize) {
return delegate.onLastResponseWrite(contentSize);
}
@Override
public LogElement copy() {
return new IndexedLogElement(delegate.copy(), index);
}
/**
* Returns a copy of this element.
* @return A copy of this element.
*/
public IndexedLogElement copyIndexedLogElement() {
return new IndexedLogElement(delegate.copy(), index);
}
@Override
public int compareTo(IndexedLogElement o) {
return Long.compare(index, o.index);
}
@Override
public String toString() {
return delegate.toString();
}
@Override
public int hashCode() {
return index;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
IndexedLogElement other = (IndexedLogElement) obj;
return index == other.index;
}
}
}
| IndexedLogElement |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cache/PendingBulkOperationCleanupActionTest.java | {
"start": 7891,
"end": 9617
} | class ____ extends AbstractEntityDataAccess {
private boolean isUnlockRegionCalled = false;
private boolean lockRegionCalled = false;
public TestEntityDataAccess(
DomainDataRegion region,
CacheKeysFactory cacheKeysFactory,
DomainDataStorageAccess storageAccess) {
super( region, cacheKeysFactory, storageAccess );
}
@Override
public boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) {
return false;
}
@Override
public boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version) {
return false;
}
@Override
public boolean update(
SharedSessionContractImplementor session,
Object key,
Object value,
Object currentVersion,
Object previousVersion) {
return false;
}
@Override
public boolean afterUpdate(
SharedSessionContractImplementor session,
Object key,
Object value,
Object currentVersion,
Object previousVersion,
SoftLock lock) {
return false;
}
@Override
public AccessType getAccessType() {
return null;
}
@Override
public SoftLock lockRegion() {
lockRegionCalled = true;
return super.lockRegion();
}
@Override
public void unlockRegion(SoftLock lock) {
super.unlockRegion( lock );
isUnlockRegionCalled = true;
}
@Override
public void destroy() {
super.destroy();
isUnlockRegionCalled = false;
}
public boolean isUnlockRegionCalled() {
return isUnlockRegionCalled;
}
public boolean isLockRegionCalled() {
return lockRegionCalled;
}
public void reset() {
this.isUnlockRegionCalled = false;
this.lockRegionCalled = false;
}
}
}
| TestEntityDataAccess |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/autoconfigure/DataSourceProperties.java | {
"start": 1409,
"end": 1682
} | class ____ configuration of a data source.
*
* @author Dave Syer
* @author Maciej Walkowiak
* @author Stephane Nicoll
* @author Benedikt Ritter
* @author Eddú Meléndez
* @author Scott Frederick
* @since 4.0.0
*/
@ConfigurationProperties("spring.datasource")
public | for |
java | apache__flink | flink-metrics/flink-metrics-otel/src/test/java/org/apache/flink/metrics/otel/OpenTelemetryTestBase.java | {
"start": 6358,
"end": 6850
} | class ____ extends Exception {
private final List<String> dataLog;
public ConsumeDataLogException(Throwable cause, List<String> dataLog) {
super(cause);
this.dataLog = dataLog;
}
public List<String> getDataLog() {
return dataLog;
}
}
/**
* Similar to {@link Slf4jLogConsumer} but parses output lines and tries to log them with
* appropriate levels.
*/
private static | ConsumeDataLogException |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/FullyQualifiedAnnotationBeanNameGenerator.java | {
"start": 1291,
"end": 1559
} | class ____ (i.e., classes with identical names but residing in
* different packages). If you need such conflict avoidance for {@link Bean @Bean}
* methods as well, consider {@link FullyQualifiedConfigurationBeanNameGenerator}.
*
* <p>Note that an instance of this | name |
java | spring-projects__spring-boot | module/spring-boot-servlet/src/main/java/org/springframework/boot/servlet/autoconfigure/actuate/web/ManagementServletContext.java | {
"start": 888,
"end": 1048
} | interface ____ {
/**
* Return the servlet path of the management server.
* @return the servlet path
*/
String getServletPath();
}
| ManagementServletContext |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/beans/AbstractBeanConstructor.java | {
"start": 1085,
"end": 2267
} | class ____<T> implements BeanConstructor<T> {
private final Class<T> beanType;
private final AnnotationMetadata annotationMetadata;
private final Argument<?>[] arguments;
/**
* Default constructor.
* @param beanType The bean type
* @param annotationMetadata The annotation metadata
* @param arguments The arguments
*/
protected AbstractBeanConstructor(
Class<T> beanType,
AnnotationMetadata annotationMetadata,
Argument<?>... arguments) {
this.beanType = Objects.requireNonNull(beanType, "Bean type should not be null");
this.annotationMetadata = annotationMetadata == null ? AnnotationMetadata.EMPTY_METADATA : annotationMetadata;
this.arguments = ArrayUtils.isEmpty(arguments) ? Argument.ZERO_ARGUMENTS : arguments;
}
@Override
@NonNull
public AnnotationMetadata getAnnotationMetadata() {
return annotationMetadata;
}
@Override
@NonNull
public Class<T> getDeclaringBeanType() {
return beanType;
}
@Override
@NonNull
public Argument<?>[] getArguments() {
return arguments;
}
}
| AbstractBeanConstructor |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/BCryptTests.java | {
"start": 503,
"end": 2784
} | class ____ extends ESTestCase {
private static final SecureString PASSWORD = new SecureString("U21VniQwdEWATfqO".toCharArray());
private static final String[] VALID_HASHES = {
"$2a$04$OLNTeJiq3vjYqTZwgDi62OU5MvzkV3Jqz.KiR3pwgQv70pD6bUsGa",
"$2a$05$XNLcDk8PSYbU70A4bWjY1ugWlNSVM.VPMp6lb9qLotOB9oPV5TyM6",
"$2a$06$KMO7CTXk.rzWPve.dRYXgu8x028/6QlBmRTCijvbwFH5Xx4Xhn4tW",
"$2a$07$tr.C.OmBfdIBg7gcMruQX.UHZtmoZfi6xNpK6A0/oa.ulR4rXj6Ny",
"$2a$08$Er.JIbUaPM7JmIN0iFEhW.H2hgtRT9weKtLdqEgSMAzmEe2xZ0B7a",
"$2a$09$OmkfXJKIWhUmnrIlOy9Cd.SOu337FXAKcbB10nMUwpKSez5G4jz8e",
"$2a$10$qyfYQcOK13wQmGO3Y.nVj.he5w1.Z0WV81HqBW6NlV.nkmg90utxO",
"$2a$11$oNdrIn9.RBEg.XXnZkqwk..2wBrU6SjEJkQTLyxEXVQQcw4BokSaa",
"$2a$12$WMLT/yjmMvBTgBnnZw1EhO6r4g7cWoxEOhS9ln4dNVg8gK3es/BZm",
"$2a$13$WHkGwOCLz8SnX13tYH0Ez.qwKK0YFD8DA4Anz0a0Laozw75vqmBee",
"$2a$14$8Urbk50As1LIgDBWPmXcFOpMWJfy3ddFLgvDlH3G1y4TFo4sLXU9y" };
public void testVerifyHash() {
for (String hash : VALID_HASHES) {
runWithValidRevisions(hash, h -> assertTrue("Hash " + h, BCrypt.checkpw(PASSWORD, h)));
runWithInvalidRevisions(hash, h -> expectThrows(IllegalArgumentException.class, () -> BCrypt.checkpw(PASSWORD, h)));
// Replace a random character in the hash
int index = randomIntBetween(10, hash.length() - 1);
String replace = randomValueOtherThan(hash.substring(index, index + 1), () -> randomAlphaOfLength(1));
String invalid = hash.substring(0, index) + replace + hash.substring(index + 1);
assertThat(invalid.length(), equalTo(hash.length()));
runWithValidRevisions(invalid, h -> assertFalse("Hash " + h, BCrypt.checkpw(PASSWORD, h)));
}
}
static void runWithValidRevisions(String baseHash, Consumer<String> action) {
for (String revision : new String[] { "$2a$", "$2b$", "$2y$" }) {
action.accept(revision + baseHash.substring(4));
}
}
static void runWithInvalidRevisions(String baseHash, Consumer<String> action) {
for (String revision : new String[] { "$2c$", "$2x$", "$2z$" }) {
action.accept(revision + baseHash.substring(4));
}
}
}
| BCryptTests |
java | apache__flink | flink-clients/src/test/java/org/apache/flink/client/program/rest/RestClusterClientSavepointTriggerTest.java | {
"start": 4003,
"end": 12532
} | class ____ {
private static final DispatcherGateway mockRestfulGateway =
TestingDispatcherGateway.newBuilder().build();
private static final GatewayRetriever<DispatcherGateway> mockGatewayRetriever =
() -> CompletableFuture.completedFuture(mockRestfulGateway);
private static ExecutorService executor;
private static final Configuration REST_CONFIG;
static {
final Configuration config = new Configuration();
config.set(JobManagerOptions.ADDRESS, "localhost");
config.set(RestOptions.RETRY_MAX_ATTEMPTS, 10);
config.set(RestOptions.RETRY_DELAY, Duration.ofMillis(0L));
config.set(RestOptions.PORT, 0);
REST_CONFIG = new UnmodifiableConfiguration(config);
}
@BeforeAll
static void setUp() {
executor =
Executors.newSingleThreadExecutor(
new ExecutorThreadFactory(
RestClusterClientSavepointTriggerTest.class.getSimpleName()));
}
@AfterAll
static void tearDown() {
if (executor != null) {
executor.shutdown();
}
}
@Test
void testTriggerSavepointDefaultDirectory() throws Exception {
final TriggerId triggerId = new TriggerId();
final String expectedReturnedSavepointDir = "hello";
try (final RestServerEndpoint restServerEndpoint =
createRestServerEndpoint(
request -> {
assertThat(request.getTargetDirectory()).isEmpty();
assertThat(request.isCancelJob()).isFalse();
return triggerId;
},
trigger -> {
assertThat(triggerId).isEqualTo(trigger);
return new SavepointInfo(expectedReturnedSavepointDir, null);
})) {
final RestClusterClient<?> restClusterClient =
createRestClusterClient(restServerEndpoint.getServerAddress().getPort());
final String savepointPath =
restClusterClient
.triggerSavepoint(new JobID(), null, SavepointFormatType.CANONICAL)
.get();
assertThat(savepointPath).isEqualTo(expectedReturnedSavepointDir);
}
}
@Test
void testTriggerSavepointTargetDirectory() throws Exception {
final TriggerId triggerId = new TriggerId();
final String expectedSubmittedSavepointDir = "world";
final String expectedReturnedSavepointDir = "hello";
try (final RestServerEndpoint restServerEndpoint =
createRestServerEndpoint(
triggerRequestBody -> {
assertThat(triggerRequestBody.getTargetDirectory())
.get()
.isEqualTo(expectedSubmittedSavepointDir);
assertThat(triggerRequestBody.isCancelJob()).isFalse();
return triggerId;
},
statusRequestTriggerId -> {
assertThat(statusRequestTriggerId).isEqualTo(triggerId);
return new SavepointInfo(expectedReturnedSavepointDir, null);
})) {
final RestClusterClient<?> restClusterClient =
createRestClusterClient(restServerEndpoint.getServerAddress().getPort());
final String savepointPath =
restClusterClient
.triggerSavepoint(
new JobID(),
expectedSubmittedSavepointDir,
SavepointFormatType.CANONICAL)
.get();
assertThat(savepointPath).isEqualTo(expectedReturnedSavepointDir);
}
}
@Test
void testTriggerSavepointCancelJob() throws Exception {
final TriggerId triggerId = new TriggerId();
final String expectedSavepointDir = "hello";
try (final RestServerEndpoint restServerEndpoint =
createRestServerEndpoint(
request -> {
assertThat(request.isCancelJob()).isTrue();
return triggerId;
},
trigger -> {
assertThat(trigger).isEqualTo(triggerId);
return new SavepointInfo(expectedSavepointDir, null);
})) {
final RestClusterClient<?> restClusterClient =
createRestClusterClient(restServerEndpoint.getServerAddress().getPort());
final String savepointPath =
restClusterClient
.cancelWithSavepoint(new JobID(), null, SavepointFormatType.CANONICAL)
.get();
assertThat(savepointPath).isEqualTo(expectedSavepointDir);
}
}
@Test
void testTriggerSavepointFailure() throws Exception {
final TriggerId triggerId = new TriggerId();
try (final RestServerEndpoint restServerEndpoint =
createRestServerEndpoint(
request -> triggerId,
trigger ->
new SavepointInfo(
null,
new SerializedThrowable(
new RuntimeException("expected"))))) {
final RestClusterClient<?> restClusterClient =
createRestClusterClient(restServerEndpoint.getServerAddress().getPort());
try {
restClusterClient
.triggerSavepoint(new JobID(), null, SavepointFormatType.CANONICAL)
.get();
} catch (ExecutionException e) {
final Throwable cause = e.getCause();
assertThat(cause).isInstanceOf(SerializedThrowable.class);
assertThat(
((SerializedThrowable) cause)
.deserializeError(ClassLoader.getSystemClassLoader())
.getMessage())
.isEqualTo("expected");
}
}
}
@Test
void testTriggerSavepointRetry() throws Exception {
final TriggerId triggerId = new TriggerId();
final String expectedSavepointDir = "hello";
final AtomicBoolean failRequest = new AtomicBoolean(true);
try (final RestServerEndpoint restServerEndpoint =
createRestServerEndpoint(
request -> triggerId,
trigger -> {
if (failRequest.compareAndSet(true, false)) {
throw new RestHandlerException(
"expected", HttpResponseStatus.SERVICE_UNAVAILABLE);
} else {
return new SavepointInfo(expectedSavepointDir, null);
}
})) {
final RestClusterClient<?> restClusterClient =
createRestClusterClient(restServerEndpoint.getServerAddress().getPort());
final String savepointPath =
restClusterClient
.triggerSavepoint(new JobID(), null, SavepointFormatType.CANONICAL)
.get();
assertThat(savepointPath).isEqualTo(expectedSavepointDir);
}
}
private static RestServerEndpoint createRestServerEndpoint(
final FunctionWithException<
SavepointTriggerRequestBody, TriggerId, RestHandlerException>
triggerHandlerLogic,
final FunctionWithException<TriggerId, SavepointInfo, RestHandlerException>
savepointHandlerLogic)
throws Exception {
return TestRestServerEndpoint.builder(REST_CONFIG)
.withHandler(new TestSavepointTriggerHandler(triggerHandlerLogic))
.withHandler(new TestSavepointHandler(savepointHandlerLogic))
.buildAndStart();
}
private static final | RestClusterClientSavepointTriggerTest |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/hierarchical/CompositeLock.java | {
"start": 639,
"end": 2643
} | class ____ implements ResourceLock {
private final List<ExclusiveResource> resources;
private final List<Lock> locks;
private final boolean exclusive;
CompositeLock(List<ExclusiveResource> resources, List<Lock> locks) {
Preconditions.condition(resources.size() == locks.size(), "Resources and locks must have the same size");
this.resources = List.copyOf(resources);
this.locks = Preconditions.notEmpty(locks, "Locks must not be empty");
this.exclusive = resources.stream().anyMatch(
resource -> resource.getLockMode() == ExclusiveResource.LockMode.READ_WRITE);
}
@Override
public List<ExclusiveResource> getResources() {
return resources;
}
// for tests only
List<Lock> getLocks() {
return this.locks;
}
@Override
public boolean tryAcquire() {
List<Lock> acquiredLocks = new ArrayList<>(this.locks.size());
for (Lock lock : this.locks) {
if (lock.tryLock()) {
acquiredLocks.add(lock);
}
else {
break;
}
}
if (acquiredLocks.size() == this.locks.size()) {
return true;
}
else {
release(acquiredLocks);
return false;
}
}
@Override
public ResourceLock acquire() throws InterruptedException {
ForkJoinPool.managedBlock(new CompositeLockManagedBlocker());
return this;
}
private void acquireAllLocks() throws InterruptedException {
List<Lock> acquiredLocks = new ArrayList<>(this.locks.size());
try {
for (Lock lock : this.locks) {
lock.lockInterruptibly();
acquiredLocks.add(lock);
}
}
catch (InterruptedException e) {
release(acquiredLocks);
throw e;
}
}
@Override
public void release() {
release(this.locks);
}
private void release(List<Lock> acquiredLocks) {
for (int i = acquiredLocks.size() - 1; i >= 0; i--) {
acquiredLocks.get(i).unlock();
}
}
@Override
public boolean isExclusive() {
return exclusive;
}
@Override
public String toString() {
return new ToStringBuilder(this) //
.append("resources", resources) //
.toString();
}
private | CompositeLock |
java | junit-team__junit5 | junit-vintage-engine/src/main/java/org/junit/vintage/engine/discovery/IsPotentialJUnit4TestMethod.java | {
"start": 478,
"end": 744
} | class ____ implements Predicate<Method> {
@Override
public boolean test(Method method) {
// Don't use AnnotationUtils.isAnnotated since JUnit 4 does not support
// meta-annotations
return method.isAnnotationPresent(Test.class);
}
}
| IsPotentialJUnit4TestMethod |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/filter/FilterArgumentBinderPredicate.java | {
"start": 1137,
"end": 1945
} | interface ____ {
/**
* Check whether the filter method should run in the given context.
*
* @param argument The argument that this binder binds
* @param mutablePropagatedContext The propagated context
* @param request The request
* @param response For response filters, the response (if there is no failure)
* @param failure For response filters, the failure
* @return {@code true} if this filter method should run
*/
boolean test(Argument<?> argument,
MutablePropagatedContext mutablePropagatedContext,
HttpRequest<?> request,
@Nullable HttpResponse<?> response,
@Nullable Throwable failure);
}
| FilterArgumentBinderPredicate |
java | apache__logging-log4j2 | log4j-core-its/src/test/java/org/apache/logging/log4j/core/async/perftest/PerfTestDriver.java | {
"start": 7757,
"end": 16292
} | enum ____ {
Log4j12(RunLog4j1.class), //
Log4j2(RunLog4j2.class), //
Logback(RunLogback.class);
private final Class<? extends IPerfTestRunner> implementationClass;
Runner(final Class<? extends IPerfTestRunner> cls) {
this.implementationClass = cls;
}
}
public static void main(final String[] args) throws Exception {
final long start = System.nanoTime();
final List<Setup> tests = selectTests();
runPerfTests(args, tests);
System.out.printf(
"Done. Total duration: %.1f minutes%n",
(System.nanoTime() - start) / (60.0 * 1000.0 * 1000.0 * 1000.0));
printRanking(tests.toArray(new Setup[tests.size()]));
}
private static List<Setup> selectTests() throws IOException {
final List<Setup> tests = new ArrayList<>();
// final String CACHEDCLOCK = "-Dlog4j.Clock=CachedClock";
final String SYSCLOCK = "-Dlog4j.Clock=SystemClock";
final String ALL_ASYNC = "-DLog4jContextSelector=" + AsyncLoggerContextSelector.class.getName();
final String THREADNAME = "-DAsyncLogger.ThreadNameStrategy=" //
+ System.getProperty("AsyncLogger.ThreadNameStrategy", "CACHED");
// includeLocation=false
add(tests, 1, "perf3PlainNoLoc.xml", Runner.Log4j2, "Loggers all async", ALL_ASYNC, SYSCLOCK, THREADNAME);
add(tests, 1, "perf7MixedNoLoc.xml", Runner.Log4j2, "Loggers mixed sync/async");
add(tests, 1, "perf-logback.xml", Runner.Logback, "Sync");
add(tests, 1, "perf-log4j12.xml", Runner.Log4j12, "Sync");
add(tests, 1, "perf3PlainNoLoc.xml", Runner.Log4j2, "Sync");
add(tests, 1, "perf-logback-async.xml", Runner.Logback, "Async Appender");
add(tests, 1, "perf-log4j12-async.xml", Runner.Log4j12, "Async Appender");
add(tests, 1, "perf5AsyncApndNoLoc.xml", Runner.Log4j2, "Async Appender");
// includeLocation=true
// add(tests, 1, "perf6AsyncApndLoc.xml", Runner.Log4j2, "Async Appender includeLocation");
// add(tests, 1, "perf8MixedLoc.xml", Runner.Log4j2, "Mixed sync/async includeLocation");
// add(tests, 1, "perf4PlainLocation.xml", Runner.Log4j2, "Loggers all async includeLocation", ALL_ASYNC);
// add(tests, 1, "perf4PlainLocation.xml", Runner.Log4j2, "Loggers all async includeLocation CachedClock",
// ALL_ASYNC, CACHEDCLOCK);
// add(tests, 1, "perf4PlainLocation.xml", Runner.Log4j2, "Sync includeLocation");
// appenders
// add(tests, 1, "perf1syncFile.xml", Runner.Log4j2, "FileAppender");
// add(tests, 1, "perf1syncRandomAccessFile.xml", Runner.Log4j2, "RandomAccessFileAppender");
// add(tests, 1, "perf2syncRollFile.xml", Runner.Log4j2, "RollFileAppender");
// add(tests, 1, "perf2syncRollRandomAccessFile.xml", Runner.Log4j2, "RollRandomAccessFileAppender");
final int MAX_THREADS = 4; // 64 takes a LONG time
for (int i = 2; i <= MAX_THREADS; i *= 2) {
// includeLocation = false
add(tests, i, "perf-logback.xml", Runner.Logback, "Sync");
add(tests, i, "perf-log4j12.xml", Runner.Log4j12, "Sync");
add(tests, i, "perf3PlainNoLoc.xml", Runner.Log4j2, "Sync");
add(tests, i, "perf-logback-async.xml", Runner.Logback, "Async Appender");
add(tests, i, "perf-log4j12-async.xml", Runner.Log4j12, "Async Appender");
add(tests, i, "perf5AsyncApndNoLoc.xml", Runner.Log4j2, "Async Appender");
add(tests, i, "perf3PlainNoLoc.xml", Runner.Log4j2, "Loggers all async", ALL_ASYNC, SYSCLOCK, THREADNAME);
add(tests, i, "perf7MixedNoLoc.xml", Runner.Log4j2, "Loggers mixed sync/async");
// includeLocation=true
// add(tests, i, "perf6AsyncApndLoc.xml", Runner.Log4j2, "Async Appender includeLocation");
// add(tests, i, "perf8MixedLoc.xml", Runner.Log4j2, "Mixed sync/async includeLocation");
// add(tests, i, "perf4PlainLocation.xml", Runner.Log4j2, "Loggers all async includeLocation", ALL_ASYNC));
// add(tests, i, "perf4PlainLocation.xml", Runner.Log4j2, "Loggers all async includeLocation CachedClock",
// ALL_ASYNC, CACHEDCLOCK));
// add(tests, i, "perf4PlainLocation.xml", Runner.Log4j2, "Sync includeLocation");
// appenders
// add(tests, i, "perf1syncFile.xml", Runner.Log4j2, "FileAppender");
// add(tests, i, "perf1syncRandomAccessFile.xml", Runner.Log4j2, "RandomAccessFileAppender");
// add(tests, i, "perf2syncRollFile.xml", Runner.Log4j2, "RollFileAppender");
// add(tests, i, "perf2syncRollRandomAccessFile.xml", Runner.Log4j2, "RollRandomAccessFileAppender");
}
return tests;
}
private static void add(
final List<Setup> tests,
final int threadCount,
final String config,
final Runner runner,
final String name,
final String... systemProperties)
throws IOException {
final WaitStrategy wait = WaitStrategy.get();
final Class<?> perfTest = threadCount == 1 ? PerfTest.class : MultiThreadPerfTest.class;
final Setup setup = new Setup(perfTest, runner, name, config, threadCount, wait, systemProperties);
tests.add(setup);
}
private static void runPerfTests(final String[] args, final List<Setup> tests)
throws IOException, InterruptedException {
final String java = args.length > 0 ? args[0] : "java";
final int repeat = args.length > 1 ? Integers.parseInt(args[1]) : 5;
int x = 0;
for (final Setup setup : tests) {
System.out.print(setup.description());
final ProcessBuilder pb = setup.throughputTest(java);
pb.redirectErrorStream(true); // merge System.out and System.err
final long t1 = System.nanoTime();
final int count = setup.threadCount >= 4 ? 3 : repeat;
runPerfTest(count, x++, setup, pb);
System.out.printf(" took %.1f seconds%n", (System.nanoTime() - t1) / (1000.0 * 1000.0 * 1000.0));
final FileReader reader = new FileReader(setup.temp);
final CharBuffer buffer = CharBuffer.allocate(256 * 1024);
reader.read(buffer);
reader.close();
setup.temp.delete();
buffer.flip();
final String raw = buffer.toString();
System.out.print(raw);
final Stats stats = new Stats(raw);
System.out.println(stats);
System.out.println("-----");
setup.stats = stats;
}
new File("perftest.log").delete();
}
private static void printRanking(final Setup[] tests) {
System.out.println();
System.out.println("Ranking:");
Arrays.sort(tests);
for (int i = 0; i < tests.length; i++) {
final Setup setup = tests[i];
System.out.println((i + 1) + ". " + setup.description() + ": " + setup.stats);
}
}
private static void runPerfTest(final int repeat, final int setupIndex, final Setup config, final ProcessBuilder pb)
throws IOException, InterruptedException {
for (int i = 0; i < repeat; i++) {
System.out.print(" (" + (i + 1) + '/' + repeat + ")...");
final Process process = pb.start();
final boolean[] stop = {false};
printProcessOutput(process, stop);
process.waitFor();
stop[0] = true;
final File gc = new File("gc" + setupIndex + '_' + i + config.log4jConfig + ".log");
if (gc.exists()) {
gc.delete();
}
new File("gc.log").renameTo(gc);
}
}
private static Thread printProcessOutput(final Process process, final boolean[] stop) {
final Thread t = new Thread("OutputWriter") {
@Override
public void run() {
final BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream()));
try {
String line = null;
while (!stop[0] && (line = in.readLine()) != null) {
System.out.println(line);
}
} catch (final Exception ignored) {
}
}
};
t.start();
return t;
}
}
| Runner |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/extraction/TypeInferenceExtractorTest.java | {
"start": 100486,
"end": 100668
} | class ____ implements Procedure {
public Integer[] call(Object procedureContext, int i, Double d) {
return null;
}
}
private static | MixedArgProcedure |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MicrometerEndpointBuilderFactory.java | {
"start": 13629,
"end": 13966
} | class ____ extends AbstractEndpointBuilder implements MicrometerEndpointBuilder, AdvancedMicrometerEndpointBuilder {
public MicrometerEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new MicrometerEndpointBuilderImpl(path);
}
} | MicrometerEndpointBuilderImpl |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/internal/TypeComparators.java | {
"start": 2648,
"end": 2702
} | interface ____ {@code clazz}
*
* @param clazz the | of |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ChunkChecksum.java | {
"start": 1122,
"end": 1491
} | class ____ {
private final long dataLength;
// can be null if not available
private final byte[] checksum;
public ChunkChecksum(long dataLength, byte[] checksum) {
this.dataLength = dataLength;
this.checksum = checksum;
}
public long getDataLength() {
return dataLength;
}
public byte[] getChecksum() {
return checksum;
}
} | ChunkChecksum |
java | playframework__playframework | web/play-java-forms/src/main/java/play/data/validation/Constraints.java | {
"start": 19991,
"end": 21783
} | class ____ extends Validator<Object>
implements ConstraintValidator<ValidateWith, Object> {
public static final String defaultMessage = "error.invalid";
Class<?> clazz = null;
Validator validator = null;
public ValidateWithValidator() {}
public ValidateWithValidator(Class clazz) {
this.clazz = clazz;
}
public void initialize(ValidateWith constraintAnnotation) {
this.clazz = constraintAnnotation.value();
try {
Constructor<?> constructor = clazz.getDeclaredConstructor();
constructor.setAccessible(true);
validator = (Validator) constructor.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@SuppressWarnings("unchecked")
public boolean isValid(Object object) {
try {
return validator.isValid(object);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@SuppressWarnings("unchecked")
public Tuple<String, Object[]> getErrorMessageKey() {
Tuple<String, Object[]> errorMessageKey = null;
try {
errorMessageKey = validator.getErrorMessageKey();
} catch (Exception e) {
throw new RuntimeException(e);
}
return (errorMessageKey != null) ? errorMessageKey : Tuple(defaultMessage, new Object[] {});
}
}
// --- validate fields with custom validator that gets payload
/** Defines a custom validator. */
@Target({METHOD, FIELD, ANNOTATION_TYPE, CONSTRUCTOR, PARAMETER, TYPE_USE})
@Retention(RUNTIME)
@Constraint(validatedBy = ValidatePayloadWithValidator.class)
@Repeatable(play.data.validation.Constraints.ValidatePayloadWith.List.class)
@Display(
name = "constraint.validatewith",
attributes = {})
public @ | ValidateWithValidator |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CopyFromLocalOperation.java | {
"start": 14941,
"end": 15305
} | class ____ implements Comparator<UploadEntry>,
Serializable {
@Override
public int compare(UploadEntry entry1, UploadEntry entry2) {
return Long.compare(entry1.size, entry2.size);
}
}
}
/**
* Define the contract for {@link CopyFromLocalOperation} to interact
* with any external resources.
*/
public | SizeComparator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java | {
"start": 750,
"end": 1854
} | class ____ extends BucketMetricsPipelineAggregator {
private double sum = 0;
private long count = 0;
private double min = Double.POSITIVE_INFINITY;
private double max = Double.NEGATIVE_INFINITY;
StatsBucketPipelineAggregator(
String name,
String[] bucketsPaths,
GapPolicy gapPolicy,
DocValueFormat formatter,
Map<String, Object> metadata
) {
super(name, bucketsPaths, gapPolicy, formatter, metadata);
}
@Override
protected void preCollection() {
sum = 0;
count = 0;
min = Double.POSITIVE_INFINITY;
max = Double.NEGATIVE_INFINITY;
}
@Override
protected void collectBucketValue(String bucketKey, Double bucketValue) {
sum += bucketValue;
min = Math.min(min, bucketValue);
max = Math.max(max, bucketValue);
count += 1;
}
@Override
protected InternalAggregation buildAggregation(Map<String, Object> metadata) {
return new InternalStatsBucket(name(), count, sum, min, max, format, metadata);
}
}
| StatsBucketPipelineAggregator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/naturalid/SimpleNaturalIdTests.java | {
"start": 1478,
"end": 4580
} | class ____ {
private static final UUID uuid = SafeRandomUUIDGenerator.safeRandomUUID();
@BeforeEach
public void prepareTestData(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final Vendor vendor = new Vendor( 1, "Acme Brick", "Acme Global" );
session.persist( vendor );
final Product product = new Product(
1,
uuid,
vendor,
Monetary.getDefaultAmountFactory().setNumber( 1L ).setCurrency( Monetary.getCurrency( Locale.US ) ).create()
);
session.persist( product );
}
);
}
@AfterEach
public void releaseTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testProcessing(DomainModelScope domainModelScope, SessionFactoryScope factoryScope) {
final PersistentClass productBootMapping = domainModelScope.getDomainModel().getEntityBinding( Product.class.getName() );
assertThat( productBootMapping.hasNaturalId(), is( true ) );
final Property sku = productBootMapping.getProperty( "sku" );
assertThat( sku.isNaturalIdentifier(), is( true ) );
final MappingMetamodel mappingMetamodel = factoryScope.getSessionFactory().getRuntimeMetamodels().getMappingMetamodel();
final EntityPersister productMapping = mappingMetamodel.findEntityDescriptor( Product.class );
assertThat( productMapping.hasNaturalIdentifier(), is( true ) );
final NaturalIdMapping naturalIdMapping = productMapping.getNaturalIdMapping();
assertThat( naturalIdMapping, notNullValue() );
}
@Test
public void testGetReference(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final SimpleNaturalIdLoadAccess<Product> loadAccess = session.bySimpleNaturalId( Product.class );
verifyEntity( loadAccess.getReference( uuid ) );
}
);
}
public void verifyEntity(Product productRef) {
assertThat( productRef, notNullValue() );
assertThat( productRef.getId(), is( 1 ) );
assertThat( productRef.getSku(), is( uuid ) );
}
@Test
public void testLoad(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final SimpleNaturalIdLoadAccess<Product> loadAccess = session.bySimpleNaturalId( Product.class );
verifyEntity( loadAccess.load( uuid ) );
}
);
}
@Test
public void testOptionalLoad(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final SimpleNaturalIdLoadAccess<Product> loadAccess = session.bySimpleNaturalId( Product.class );
final Optional<Product> optionalProduct = loadAccess.loadOptional( uuid );
assertThat( optionalProduct.isPresent(), is( true ) );
verifyEntity( optionalProduct.get() );
}
);
}
@Test
public void testMultiLoad(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final NaturalIdMultiLoadAccess<Product> loadAccess = session.byMultipleNaturalId( Product.class );
loadAccess.enableOrderedReturn( false );
final List<Product> products = loadAccess.multiLoad( uuid );
assertThat( products.size(), is( 1 ) );
verifyEntity( products.get( 0 ) );
}
);
}
}
| SimpleNaturalIdTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/FieldMissingNullableTest.java | {
"start": 13091,
"end": 13620
} | class ____ {
private String message = "hello";
public void setMessage(int x) {
message = String.valueOf(x);
}
}
""")
.doTest();
}
@Test
public void negativeCases_nonNullField() {
createCompilationTestHelper()
.addSourceLines(
"com/google/errorprone/bugpatterns/nullness/NonNullFieldTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
public | NonNullMethodTest |
java | dropwizard__dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/junit5/DropwizardClientExtensionTest.java | {
"start": 412,
"end": 1280
} | class ____ {
private static final DropwizardClientExtension EXTENSION_WITH_INSTANCE = new DropwizardClientExtension(new TestResource("foo"));
private static final DropwizardClientExtension EXTENSION_WITH_CLASS = new DropwizardClientExtension(TestResource.class);
@Test
void shouldGetStringBodyFromDropWizard() throws IOException {
try (InputStream inputStream = new URL(EXTENSION_WITH_INSTANCE.baseUri() + "/test").openStream()) {
assertThat(inputStream).asString(UTF_8).isEqualTo("foo");
}
}
@Test
void shouldGetDefaultStringBodyFromDropWizard() throws IOException {
try (InputStream inputStream = new URL(EXTENSION_WITH_CLASS.baseUri() + "/test").openStream()) {
assertThat(inputStream).asString(UTF_8).isEqualTo(TestResource.DEFAULT_MESSAGE);
}
}
}
| DropwizardClientExtensionTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineWriteResponse.java | {
"start": 1612,
"end": 2865
} | class ____ {
private List<TimelineWriteError> errors = new ArrayList<TimelineWriteError>();
public TimelineWriteResponse() {
}
/**
* Get a list of {@link TimelineWriteError} instances.
*
* @return a list of {@link TimelineWriteError} instances
*/
@XmlElement(name = "errors")
public List<TimelineWriteError> getErrors() {
return errors;
}
/**
* Add a single {@link TimelineWriteError} instance into the existing list.
*
* @param error
* a single {@link TimelineWriteError} instance
*/
public void addError(TimelineWriteError error) {
errors.add(error);
}
/**
* Add a list of {@link TimelineWriteError} instances into the existing list.
*
* @param writeErrors
* a list of {@link TimelineWriteError} instances
*/
public void addErrors(List<TimelineWriteError> writeErrors) {
this.errors.addAll(writeErrors);
}
/**
* Set the list to the given list of {@link TimelineWriteError} instances.
*
* @param writeErrors
* a list of {@link TimelineWriteError} instances
*/
public void setErrors(List<TimelineWriteError> writeErrors) {
this.errors.clear();
this.errors.addAll(writeErrors);
}
/**
* A | TimelineWriteResponse |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng2052InterpolateWithSettingsProfilePropertiesTest.java | {
"start": 1014,
"end": 1796
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that properties defined in an active profile in the user's
* settings are available for interpolation of systemPath in a dependency.
* [MNG-2052]
*
* @throws Exception in case of failure
*/
@Test
public void testitMNG2052() throws Exception {
File testDir = extractResources("/mng-2052");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.addCliArgument("--settings");
verifier.addCliArgument("settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
}
}
| MavenITmng2052InterpolateWithSettingsProfilePropertiesTest |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/RetrofitTest.java | {
"start": 11617,
"end": 12751
} | class ____ extends CallAdapter.Factory {
@Override
public @Nullable CallAdapter<?, ?> get(
final Type returnType, Annotation[] annotations, Retrofit retrofit) {
factoryCalled.set(true);
if (getRawType(returnType) != Call.class) {
return null;
}
return new CallAdapter<Object, Call<?>>() {
@Override
public Type responseType() {
return getParameterUpperBound(0, (ParameterizedType) returnType);
}
@Override
public Call<Object> adapt(Call<Object> call) {
adapterCalled.set(true);
return call;
}
};
}
}
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server.url("/"))
.addCallAdapterFactory(new MyCallAdapterFactory())
.build();
CallMethod example = retrofit.create(CallMethod.class);
assertThat(example.getResponseBody()).isNotNull();
assertThat(factoryCalled.get()).isTrue();
assertThat(adapterCalled.get()).isTrue();
}
@Test
public void customCallAdapter() {
| MyCallAdapterFactory |
java | elastic__elasticsearch | x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/analysis/MutableAnalyzerContext.java | {
"start": 2567,
"end": 2960
} | class ____ implements AutoCloseable {
private final TransportVersion originalVersion;
private RestoreTransportVersion(TransportVersion originalVersion) {
this.originalVersion = originalVersion;
}
@Override
public void close() {
MutableAnalyzerContext.this.currentVersion = originalVersion;
}
}
}
| RestoreTransportVersion |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/DefaultFileFilter.java | {
"start": 1171,
"end": 1517
} | class ____ implements Predicate<Path> {
@Override
public boolean test(Path path) {
final String fileName = path.getName();
if (fileName == null || fileName.length() == 0) {
return true;
}
final char first = fileName.charAt(0);
return first != '.' && first != '_';
}
}
| DefaultFileFilter |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapperResult.java | {
"start": 706,
"end": 2933
} | class ____ implements ToXContentObject {
private final String id;
@Nullable
private final Condition.Result condition;
@Nullable
private final Transform.Result transform;
private final Action.Result action;
public ActionWrapperResult(String id, Action.Result action) {
this(id, null, null, action);
}
public ActionWrapperResult(
String id,
@Nullable Condition.Result condition,
@Nullable Transform.Result transform,
Action.Result action
) {
this.id = id;
this.condition = condition;
this.transform = transform;
this.action = action;
}
public String id() {
return id;
}
public Condition.Result condition() {
return condition;
}
public Transform.Result transform() {
return transform;
}
public Action.Result action() {
return action;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ActionWrapperResult result = (ActionWrapperResult) o;
return Objects.equals(id, result.id)
&& Objects.equals(condition, result.condition)
&& Objects.equals(transform, result.transform)
&& Objects.equals(action, result.action);
}
@Override
public int hashCode() {
return Objects.hash(id, condition, transform, action);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(ActionWrapperField.ID.getPreferredName(), id);
builder.field(ActionWrapperField.TYPE.getPreferredName(), action.type());
builder.field(ActionWrapperField.STATUS.getPreferredName(), action.status().value());
if (condition != null) {
builder.field(WatchField.CONDITION.getPreferredName(), condition, params);
}
if (transform != null) {
builder.field(Transform.TRANSFORM.getPreferredName(), transform, params);
}
action.toXContent(builder, params);
return builder.endObject();
}
}
| ActionWrapperResult |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/Rounding.java | {
"start": 39118,
"end": 39790
} | class ____ extends TimeUnitPreparedRounding {
protected final long unitMillis;
AbstractNotToMidnightRounding(long unitMillis) {
this.unitMillis = unitMillis;
}
@Override
public final long nextRoundingValue(long utcMillis) {
final long roundedAfterOneIncrement = round(utcMillis + unitMillis);
if (utcMillis < roundedAfterOneIncrement) {
return roundedAfterOneIncrement;
} else {
return round(utcMillis + 2 * unitMillis);
}
}
}
}
static | AbstractNotToMidnightRounding |
java | apache__camel | components/camel-cloudevents/src/main/java/org/apache/camel/component/cloudevents/transformer/CloudEventJsonDataTypeTransformer.java | {
"start": 1828,
"end": 5608
} | class ____ extends Transformer {
public static final String APPLICATION_CLOUDEVENTS_JSON = "application/cloudevents+json";
public static final String APPLICATION_JSON = "application/json";
@Override
public void transform(Message message, DataType fromType, DataType toType) {
final Map<String, Object> headers = message.getHeaders();
String dataContentType = headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_CONTENT_TYPE, APPLICATION_JSON).toString();
if (!APPLICATION_CLOUDEVENTS_JSON.equals(dataContentType)) {
Map<String, Object> cloudEventAttributes = new HashMap<>();
CloudEvent cloudEvent = CloudEvents.v1_0;
for (CloudEvent.Attribute attribute : cloudEvent.attributes()) {
if (headers.containsKey(attribute.id())) {
cloudEventAttributes.put(attribute.json(), headers.get(attribute.id()));
}
}
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_VERSION).json(),
cloudEvent.version());
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_ID).json(),
message.getExchange().getExchangeId());
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TYPE).json(),
CloudEvent.DEFAULT_CAMEL_CLOUD_EVENT_TYPE);
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE).json(),
CloudEvent.DEFAULT_CAMEL_CLOUD_EVENT_SOURCE);
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TIME).json(),
cloudEvent.getEventTime(message.getExchange()));
String body = MessageHelper.extractBodyAsString(message);
cloudEventAttributes.putIfAbsent("data", body);
cloudEventAttributes.putIfAbsent(
cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_DATA_CONTENT_TYPE).json(), dataContentType);
headers.put(Exchange.CONTENT_TYPE, APPLICATION_CLOUDEVENTS_JSON);
message.setBody(createCouldEventJsonObject(cloudEventAttributes));
cloudEvent.attributes().stream().map(CloudEvent.Attribute::id).forEach(headers::remove);
}
}
private String createCouldEventJsonObject(Map<String, Object> cloudEventAttributes) {
StringBuilder builder = new StringBuilder("{");
cloudEventAttributes.forEach((key, value) -> {
if ("data".equals(key) && value instanceof String data) {
if (isJson(data)) {
// set Json data as nested object in the data field
builder.append(" ").append("\"").append(key).append("\"").append(":").append(data)
.append(",");
} else {
builder.append(" ").append("\"").append(key).append("\"").append(":").append("\"").append(data).append("\"")
.append(",");
}
} else {
builder.append(" ").append("\"").append(key).append("\"").append(":").append("\"").append(value).append("\"")
.append(",");
}
});
if (!cloudEventAttributes.isEmpty()) {
builder.deleteCharAt(builder.lastIndexOf(","));
}
return builder.append("}").toString();
}
private boolean isJson(String data) {
if (data == null || data.isEmpty()) {
return false;
}
return data.trim().startsWith("{") || data.trim().startsWith("[");
}
}
| CloudEventJsonDataTypeTransformer |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java | {
"start": 3618,
"end": 5188
} | class ____ {
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "TestMapReduce-mapreduce");
private static FileSystem fs;
static {
try {
fs = FileSystem.getLocal(new Configuration());
} catch (IOException ioe) {
fs = null;
}
}
/**
* Modified to make it a junit test.
* The RandomGen Job does the actual work of creating
* a huge file of assorted numbers. It receives instructions
* as to how many times each number should be counted. Then
* it emits those numbers in a crazy order.
*
* The map() function takes a key/val pair that describes
* a value-to-be-emitted (the key) and how many times it
* should be emitted (the value), aka "numtimes". map() then
* emits a series of intermediate key/val pairs. It emits
* 'numtimes' of these. The key is a random number and the
* value is the 'value-to-be-emitted'.
*
* The system collates and merges these pairs according to
* the random number. reduce() function takes in a key/value
* pair that consists of a crazy random number and a series
* of values that should be emitted. The random number key
* is now dropped, and reduce() emits a pair for every intermediate value.
* The emitted key is an intermediate value. The emitted value
* is just a blank string. Thus, we've created a huge file
* of numbers in random order, but where each number appears
* as many times as we were instructed.
*/
static | TestMapReduce |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/iterable/IterableAssert_hasAtLeastOneElementOfType_Test.java | {
"start": 987,
"end": 1397
} | class ____ {
@Test
void should_pass_if_actual_has_one_element_of_the_expected_type() {
List<Object> list = newArrayList();
list.add("string");
list.add(1);
assertThat(list).hasAtLeastOneElementOfType(Integer.class)
.hasAtLeastOneElementOfType(String.class)
.hasAtLeastOneElementOfType(Object.class);
}
}
| IterableAssert_hasAtLeastOneElementOfType_Test |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java | {
"start": 10363,
"end": 10831
} | class ____ extends Action {
ReservedStateAction(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(actionName, transportService, clusterService, threadPool, EsExecutors.DIRECT_EXECUTOR_SERVICE);
}
@Override
public Optional<String> reservedStateHandlerName() {
return Optional.of("test_reserved_state_action");
}
}
| ReservedStateAction |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/scheduling/concurrent/ThreadPoolTaskExecutor.java | {
"start": 1903,
"end": 2918
} | class ____ also well suited for management and monitoring (for example, through JMX),
* providing several useful attributes: "corePoolSize", "maxPoolSize", "keepAliveSeconds"
* (all supporting updates at runtime); "poolSize", "activeCount" (for introspection only).
*
* <p>The default configuration is a core pool size of 1, with unlimited max pool size
* and unlimited queue capacity. This is roughly equivalent to
* {@link java.util.concurrent.Executors#newSingleThreadExecutor()}, sharing a single
* thread for all tasks. Setting {@link #setQueueCapacity "queueCapacity"} to 0 mimics
* {@link java.util.concurrent.Executors#newCachedThreadPool()}, with immediate scaling
* of threads in the pool to a potentially very high number. Consider also setting a
* {@link #setMaxPoolSize "maxPoolSize"} at that point, as well as possibly a higher
* {@link #setCorePoolSize "corePoolSize"} (see also the
* {@link #setAllowCoreThreadTimeOut "allowCoreThreadTimeOut"} mode of scaling).
*
* <p><b>NOTE:</b> This | is |
java | spring-projects__spring-framework | spring-messaging/src/test/java/org/springframework/messaging/StubMessageChannel.java | {
"start": 820,
"end": 1639
} | class ____ implements SubscribableChannel {
private final List<Message<byte[]>> messages = new ArrayList<>();
private final List<MessageHandler> handlers = new ArrayList<>();
public List<Message<byte[]>> getMessages() {
return this.messages;
}
@Override
@SuppressWarnings("unchecked")
public boolean send(Message<?> message) {
this.messages.add((Message<byte[]>) message);
return true;
}
@Override
@SuppressWarnings("unchecked")
public boolean send(Message<?> message, long timeout) {
this.messages.add((Message<byte[]>) message);
return true;
}
@Override
public boolean subscribe(MessageHandler handler) {
this.handlers.add(handler);
return true;
}
@Override
public boolean unsubscribe(MessageHandler handler) {
this.handlers.remove(handler);
return true;
}
}
| StubMessageChannel |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_2200/issue2224/PersonCollection.java | {
"start": 59,
"end": 217
} | class ____ extends KeyedCollection<String, Person> {
protected String getKeyForItem(Person person) {
return person.getIdNo();
}
}
| PersonCollection |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/ResolvableType.java | {
"start": 37187,
"end": 37679
} | class ____ introspect ({@code null} is semantically
* equivalent to {@code Object.class} for typical use cases here)
* @return a {@code ResolvableType} for the specified class
* @see #forClass(Class, Class)
* @see #forClassWithGenerics(Class, Class...)
*/
public static ResolvableType forClass(@Nullable Class<?> clazz) {
return new ResolvableType(clazz);
}
/**
* Return a {@code ResolvableType} for the specified {@link Class},
* doing assignability checks against the raw | to |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/utils/MockTime.java | {
"start": 1576,
"end": 3943
} | class ____ detect bugs where this is incorrectly assumed to be true
private final AtomicLong timeMs;
private final AtomicLong highResTimeNs;
public MockTime() {
this(0);
}
public MockTime(long autoTickMs) {
this(autoTickMs, System.currentTimeMillis(), System.nanoTime());
}
public MockTime(long autoTickMs, long currentTimeMs, long currentHighResTimeNs) {
this.timeMs = new AtomicLong(currentTimeMs);
this.highResTimeNs = new AtomicLong(currentHighResTimeNs);
this.autoTickMs = autoTickMs;
}
public void addListener(Listener listener) {
listeners.add(listener);
}
@Override
public long milliseconds() {
maybeSleep(autoTickMs);
return timeMs.get();
}
@Override
public long nanoseconds() {
maybeSleep(autoTickMs);
return highResTimeNs.get();
}
private void maybeSleep(long ms) {
if (ms != 0)
sleep(ms);
}
@Override
public void sleep(long ms) {
timeMs.addAndGet(ms);
highResTimeNs.addAndGet(TimeUnit.MILLISECONDS.toNanos(ms));
tick();
}
@Override
public void waitObject(Object obj, Supplier<Boolean> condition, long deadlineMs) throws InterruptedException {
Listener listener = () -> {
synchronized (obj) {
obj.notify();
}
};
listeners.add(listener);
try {
synchronized (obj) {
while (milliseconds() < deadlineMs && !condition.get()) {
obj.wait();
}
if (!condition.get())
throw new TimeoutException("Condition not satisfied before deadline");
}
} finally {
listeners.remove(listener);
}
}
public void setCurrentTimeMs(long newMs) {
long oldMs = timeMs.getAndSet(newMs);
// does not allow to set to an older timestamp
if (oldMs > newMs)
throw new IllegalArgumentException("Setting the time to " + newMs + " while current time " + oldMs + " is newer; this is not allowed");
highResTimeNs.set(TimeUnit.MILLISECONDS.toNanos(newMs));
tick();
}
private void tick() {
for (Listener listener : listeners) {
listener.onTimeUpdated();
}
}
}
| to |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-api/src/main/java/org/apache/dubbo/remoting/transport/AbstractClient.java | {
"start": 2688,
"end": 15944
} | class ____ extends AbstractEndpoint implements Client {
private Lock connectLock;
private final boolean needReconnect;
private final FrameworkModel frameworkModel;
protected volatile ExecutorService executor;
protected volatile ScheduledExecutorService connectivityExecutor;
protected long reconnectDuration;
public AbstractClient(URL url, ChannelHandler handler) throws RemotingException {
super(url, handler);
// initialize connectLock before calling connect()
connectLock = new ReentrantLock();
// set default needReconnect true when channel is not connected
needReconnect = url.getParameter(Constants.SEND_RECONNECT_KEY, true);
frameworkModel = url.getOrDefaultFrameworkModel();
initExecutor(url);
reconnectDuration = getReconnectDuration(url);
try {
doOpen();
} catch (Throwable t) {
close();
throw new RemotingException(
url.toInetSocketAddress(),
null,
"Failed to start " + getClass().getSimpleName() + " " + NetUtils.getLocalAddress()
+ " connect to the server " + getRemoteAddress() + ", cause: " + t.getMessage(),
t);
}
try {
// connect.
connect();
if (logger.isInfoEnabled()) {
logger.info("Start " + getClass().getSimpleName() + " " + NetUtils.getLocalAddress()
+ " connect to the server " + getRemoteAddress());
}
} catch (RemotingException t) {
// If lazy connect client fails to establish a connection, the client instance will still be created,
// and the reconnection will be initiated by ReconnectTask, so there is no need to throw an exception
if (url.getParameter(LAZY_CONNECT_KEY, false)) {
logger.warn(
TRANSPORT_FAILED_CONNECT_PROVIDER,
"",
"",
"Failed to start " + getClass().getSimpleName() + " " + NetUtils.getLocalAddress()
+ " connect to the server "
+ getRemoteAddress()
+ " (the connection request is initiated by lazy connect client, ignore and retry later!), cause: "
+ t.getMessage(),
t);
return;
}
if (url.getParameter(Constants.CHECK_KEY, true)) {
close();
throw t;
} else {
logger.warn(
TRANSPORT_FAILED_CONNECT_PROVIDER,
"",
"",
"Failed to start " + getClass().getSimpleName() + " " + NetUtils.getLocalAddress()
+ " connect to the server " + getRemoteAddress()
+ " (check == false, ignore and retry later!), cause: " + t.getMessage(),
t);
}
} catch (Throwable t) {
close();
throw new RemotingException(
url.toInetSocketAddress(),
null,
"Failed to start " + getClass().getSimpleName() + " " + NetUtils.getLocalAddress()
+ " connect to the server " + getRemoteAddress() + ", cause: " + t.getMessage(),
t);
}
}
protected AbstractClient() {
needReconnect = false;
frameworkModel = null;
}
private void initExecutor(URL url) {
ExecutorRepository executorRepository = ExecutorRepository.getInstance(url.getOrDefaultApplicationModel());
/*
* Consumer's executor is shared globally, provider ip doesn't need to be part of the thread name.
*
* Instance of url is InstanceAddressURL, so addParameter actually adds parameters into ServiceInstance,
* which means params are shared among different services. Since client is shared among services this is currently not a problem.
*/
url = url.addParameter(THREAD_NAME_KEY, CLIENT_THREAD_POOL_NAME)
.addParameterIfAbsent(THREADPOOL_KEY, DEFAULT_CLIENT_THREADPOOL);
executor = executorRepository.createExecutorIfAbsent(url);
connectivityExecutor = frameworkModel
.getBeanFactory()
.getBean(FrameworkExecutorRepository.class)
.getConnectivityScheduledExecutor();
}
protected static ChannelHandler wrapChannelHandler(URL url, ChannelHandler handler) {
return ChannelHandlers.wrap(handler, url);
}
public InetSocketAddress getConnectAddress() {
return new InetSocketAddress(NetUtils.filterLocalHost(getUrl().getHost()), getUrl().getPort());
}
@Override
public InetSocketAddress getRemoteAddress() {
Channel channel = getChannel();
if (channel == null) {
return getUrl().toInetSocketAddress();
}
return channel.getRemoteAddress();
}
@Override
public InetSocketAddress getLocalAddress() {
Channel channel = getChannel();
if (channel == null) {
return InetSocketAddress.createUnresolved(NetUtils.getLocalHost(), 0);
}
return channel.getLocalAddress();
}
@Override
public boolean isConnected() {
Channel channel = getChannel();
if (channel == null) {
return false;
}
return channel.isConnected();
}
@Override
public Object getAttribute(String key) {
Channel channel = getChannel();
if (channel == null) {
return null;
}
return channel.getAttribute(key);
}
@Override
public void setAttribute(String key, Object value) {
Channel channel = getChannel();
if (channel == null) {
return;
}
channel.setAttribute(key, value);
}
@Override
public void removeAttribute(String key) {
Channel channel = getChannel();
if (channel == null) {
return;
}
channel.removeAttribute(key);
}
@Override
public boolean hasAttribute(String key) {
Channel channel = getChannel();
if (channel == null) {
return false;
}
return channel.hasAttribute(key);
}
@Override
public void send(Object message, boolean sent) throws RemotingException {
if (needReconnect && !isConnected()) {
connect();
}
Channel channel = getChannel();
// TODO Can the value returned by getChannel() be null? need improvement.
if (channel == null || !channel.isConnected()) {
throw new RemotingException(this, "message can not send, because channel is closed . url:" + getUrl());
}
channel.send(message, sent);
}
protected void connect() throws RemotingException {
connectLock.lock();
try {
if (isConnected()) {
return;
}
if (isClosed() || isClosing()) {
logger.warn(
TRANSPORT_FAILED_CONNECT_PROVIDER,
"",
"",
"No need to connect to server " + getRemoteAddress() + " from "
+ getClass().getSimpleName() + " " + NetUtils.getLocalHost() + " using dubbo version "
+ Version.getVersion() + ", cause: client status is closed or closing.");
return;
}
doConnect();
if (!isConnected()) {
throw new RemotingException(
this,
"Failed to connect to server " + getRemoteAddress() + " from "
+ getClass().getSimpleName() + " "
+ NetUtils.getLocalHost() + " using dubbo version " + Version.getVersion()
+ ", cause: Connect wait timeout: " + getConnectTimeout() + "ms.");
} else {
if (logger.isInfoEnabled()) {
logger.info("Successfully connect to server " + getRemoteAddress() + " from "
+ getClass().getSimpleName() + " "
+ NetUtils.getLocalHost() + " using dubbo version " + Version.getVersion()
+ ", channel is " + this.getChannel());
}
}
} catch (RemotingException e) {
throw e;
} catch (Throwable e) {
throw new RemotingException(
this,
"Failed to connect to server " + getRemoteAddress() + " from "
+ getClass().getSimpleName() + " "
+ NetUtils.getLocalHost() + " using dubbo version " + Version.getVersion()
+ ", cause: " + e.getMessage(),
e);
} finally {
connectLock.unlock();
}
}
public void disconnect() {
connectLock.lock();
try {
try {
Channel channel = getChannel();
if (channel != null) {
channel.close();
}
} catch (Throwable e) {
logger.warn(TRANSPORT_FAILED_CLOSE, "", "", e.getMessage(), e);
}
try {
doDisConnect();
} catch (Throwable e) {
logger.warn(TRANSPORT_FAILED_CLOSE, "", "", e.getMessage(), e);
}
} finally {
connectLock.unlock();
}
}
private long getReconnectDuration(URL url) {
int idleTimeout = getIdleTimeout(url);
long heartbeatTimeoutTick = calculateLeastDuration(idleTimeout);
return calculateReconnectDuration(url, heartbeatTimeoutTick);
}
private long calculateLeastDuration(int time) {
if (time / HEARTBEAT_CHECK_TICK <= 0) {
return LEAST_HEARTBEAT_DURATION;
} else {
return time / HEARTBEAT_CHECK_TICK;
}
}
private long calculateReconnectDuration(URL url, long tick) {
long leastReconnectDuration = url.getParameter(LEAST_RECONNECT_DURATION_KEY, LEAST_RECONNECT_DURATION);
return Math.max(leastReconnectDuration, tick);
}
@Override
public void reconnect() throws RemotingException {
connectLock.lock();
try {
disconnect();
connect();
} finally {
connectLock.unlock();
}
}
@Override
public void close() {
if (isClosed()) {
logger.warn(
TRANSPORT_FAILED_CONNECT_PROVIDER,
"",
"",
"No need to close connection to server " + getRemoteAddress() + " from "
+ getClass().getSimpleName() + " " + NetUtils.getLocalHost() + " using dubbo version "
+ Version.getVersion() + ", cause: the client status is closed.");
return;
}
connectLock.lock();
try {
if (isClosed()) {
logger.warn(
TRANSPORT_FAILED_CONNECT_PROVIDER,
"",
"",
"No need to close connection to server " + getRemoteAddress() + " from "
+ getClass().getSimpleName() + " " + NetUtils.getLocalHost() + " using dubbo version "
+ Version.getVersion() + ", cause: the client status is closed.");
return;
}
try {
super.close();
} catch (Throwable e) {
logger.warn(TRANSPORT_FAILED_CLOSE, "", "", e.getMessage(), e);
}
try {
disconnect();
} catch (Throwable e) {
logger.warn(TRANSPORT_FAILED_CLOSE, "", "", e.getMessage(), e);
}
try {
doClose();
} catch (Throwable e) {
logger.warn(TRANSPORT_FAILED_CLOSE, "", "", e.getMessage(), e);
}
} finally {
connectLock.unlock();
}
}
@Override
public void close(int timeout) {
close();
}
@Override
public String toString() {
return getClass().getName() + " [" + getLocalAddress() + " -> " + getRemoteAddress() + "]";
}
/**
* Open client.
*/
protected abstract void doOpen() throws Throwable;
/**
* Close client.
*/
protected abstract void doClose() throws Throwable;
/**
* Connect to server.
*/
protected abstract void doConnect() throws Throwable;
/**
* disConnect to server.
*/
protected abstract void doDisConnect() throws Throwable;
/**
* Get the connected channel.
*/
protected abstract Channel getChannel();
}
| AbstractClient |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/paths/Paths_assertIsDirectoryRecursivelyContaining_Predicate_Test.java | {
"start": 1819,
"end": 4816
} | class ____ {
@BeforeEach
void createFixturePaths() {
// @format:off
// The layout:
// root
// |—— foo
// | |—— foobar
// | |—— foobar1.data
// | |—— foobar2.json
// |—— foo2.data
// @format:on
Path fooDir = createDirectory(tempDir, "foo2.data", "foo");
createDirectory(fooDir, "foobar", "foobar1.data", "foobar2.json");
}
@ParameterizedTest
@MethodSource("foundMatchProvider")
void should_pass_if_actual_contains_any_paths_matching_the_given_predicate(Predicate<Path> predicate) {
paths.assertIsDirectoryRecursivelyContaining(INFO, tempDir, predicate);
}
private Stream<Predicate<Path>> foundMatchProvider() {
return Stream.of(path -> path.toString().contains("bar2"), // one match
path -> path.toString().endsWith("foobar2.json"), // one match
path -> path.toString().contains("foobar"), // 3 matches
path -> path.getParent().toString().endsWith("foobar"), // one match
path -> path.toString().contains("foo")); // all matches
}
}
@Test
void should_fail_if_actual_does_not_exist() {
// GIVEN
Path notExistingPath = tempDir.resolve("doesnt-exist-file");
Predicate<Path> anyPredicate = f -> true;
// WHEN
expectAssertionError(() -> paths.assertIsDirectoryRecursivelyContaining(INFO, notExistingPath, anyPredicate));
// THEN
verify(failures).failure(INFO, shouldExist(notExistingPath));
}
@Test
void should_fail_if_actual_exists_but_is_not_a_directory() {
// GIVEN
Path rootDir = createDirectoryFromRoot("foo2.data");
Path existingPath = rootDir.resolve("foo2.data");
Predicate<Path> alwaysTrue = f -> true;
// WHEN
expectAssertionError(() -> paths.assertIsDirectoryRecursivelyContaining(INFO, existingPath, alwaysTrue));
// THEN
verify(failures).failure(INFO, shouldBeDirectory(existingPath));
}
@Test
void should_fail_if_actual_is_empty() {
// GIVEN
Predicate<Path> alwaysTrue = f -> true;
// WHEN
expectAssertionError(() -> paths.assertIsDirectoryRecursivelyContaining(INFO, tempDir, alwaysTrue));
// THEN
verify(failures).failure(INFO, directoryShouldContainRecursively(tempDir, emptyList(), THE_GIVEN_FILTER_DESCRIPTION));
}
@Test
void should_fail_if_actual_does_not_contain_any_paths_matching_the_given_predicate() {
// GIVEN
Path fooDir = createDirectory(tempDir, "foo", "foo2.data");
createDirectory(fooDir, "foo3");
Predicate<Path> alwaysFalse = f -> false;
// WHEN
expectAssertionError(() -> paths.assertIsDirectoryRecursivelyContaining(INFO, tempDir, alwaysFalse));
// THEN
List<Path> fooDirContent = list(fooDir, fooDir.resolve("foo2.data"), fooDir.resolve("foo3"));
verify(failures).failure(INFO, directoryShouldContainRecursively(tempDir, fooDirContent, THE_GIVEN_FILTER_DESCRIPTION));
}
}
| Actual_matches |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlStatsAction.java | {
"start": 352,
"end": 630
} | class ____ extends ActionType<EsqlStatsResponse> {
public static final EsqlStatsAction INSTANCE = new EsqlStatsAction();
public static final String NAME = "cluster:monitor/xpack/esql/stats/dist";
private EsqlStatsAction() {
super(NAME);
}
}
| EsqlStatsAction |
java | apache__logging-log4j2 | log4j-perf-test/src/main/java/org/apache/logging/log4j/perf/jmh/StringBuilderEscapeBenchmark.java | {
"start": 1567,
"end": 2625
} | class ____ {
StringBuilder buffer = new StringBuilder(1024 * 4);
}
@Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public int escapeJsonLargeString(final ThreadState state) {
state.buffer.setLength(0);
state.buffer.append(EVERY_CHARACTER_MUST_BE_ESCAPED_JSON);
StringBuilders.escapeJson(state.buffer, 0);
return state.buffer.length();
}
@Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public int escapeXmlLargeString(final ThreadState state) {
state.buffer.setLength(0);
state.buffer.append(EVERY_CHARACTER_MUST_BE_ESCAPED_XML);
StringBuilders.escapeXml(state.buffer, 0);
return state.buffer.length();
}
private static String repeat(final String str, final int times) {
final StringBuilder sb = new StringBuilder(str.length() * times);
for (int i = 0; i < times; i++) {
sb.append(str);
}
return sb.toString();
}
}
| ThreadState |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/AnnotationAwareAspectJAutoProxyCreator.java | {
"start": 5249,
"end": 5676
} | class ____ extends BeanFactoryAspectJAdvisorsBuilder {
public BeanFactoryAspectJAdvisorsBuilderAdapter(
ListableBeanFactory beanFactory, AspectJAdvisorFactory advisorFactory) {
super(beanFactory, advisorFactory);
}
@Override
protected boolean isEligibleBean(String beanName) {
return AnnotationAwareAspectJAutoProxyCreator.this.isEligibleAspectBean(beanName);
}
}
}
| BeanFactoryAspectJAdvisorsBuilderAdapter |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/http/AggregatingDispatcher.java | {
"start": 712,
"end": 1334
} | class ____ implements HttpServerTransport.Dispatcher {
public void dispatchAggregatedRequest(RestRequest restRequest, RestChannel restChannel, ThreadContext threadContext) {
assert restRequest.isStreamedContent();
}
@Override
public final void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {
RestContentAggregator.aggregate(request, (r) -> dispatchAggregatedRequest(r, channel, threadContext));
}
@Override
public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) {
}
}
| AggregatingDispatcher |
java | quarkusio__quarkus | extensions/cache/deployment/src/test/java/io/quarkus/cache/test/runtime/MultipleCacheAnnotationsTest.java | {
"start": 6311,
"end": 7937
} | class ____ {
static final String TEST_CACHE_1 = "cache1";
static final String TEST_CACHE_2 = "cache2";
@CacheResult(cacheName = TEST_CACHE_1)
public String cachedMethod1(Object key) {
return new String();
}
@CacheResult(cacheName = TEST_CACHE_2)
public String cachedMethod2(Object key) {
return new String();
}
@CacheResult(cacheName = TEST_CACHE_1)
public String cachedMethodWithNoArg1() {
return new String();
}
@CacheResult(cacheName = TEST_CACHE_2)
public String cachedMethodWithNoArg2() {
return new String();
}
@CacheResult(cacheName = TEST_CACHE_1)
@CacheInvalidate(cacheName = TEST_CACHE_1)
public String cachedMethodWithInvalidate(Object key) {
return new String();
}
@CacheResult(cacheName = TEST_CACHE_1)
@CacheInvalidateAll(cacheName = TEST_CACHE_2)
public String cachedMethodWithInvalidateAll(Object key) {
return new String();
}
@CacheInvalidate(cacheName = TEST_CACHE_1)
@CacheInvalidate(cacheName = TEST_CACHE_2)
public void multipleCacheInvalidate(Object key) {
}
@CacheInvalidate(cacheName = TEST_CACHE_1)
@CacheInvalidate(cacheName = TEST_CACHE_2)
public void multipleCacheInvalidateWithNoArg() {
}
@CacheInvalidateAll(cacheName = TEST_CACHE_1)
@CacheInvalidateAll(cacheName = TEST_CACHE_2)
public void multipleCacheInvalidateAll() {
}
}
}
| CachedService |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java | {
"start": 62907,
"end": 62974
} | class ____ implements io.grpc.xds.Filter {
static final | TestFilter |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentLongitudeWrappingAggregator.java | {
"start": 767,
"end": 1897
} | class ____ {
public static void combineIntermediate(
SpatialExtentStateWrappedLongitudeState current,
int top,
int bottom,
int negLeft,
int negRight,
int posLeft,
int posRight
) {
current.add(top, bottom, negLeft, negRight, posLeft, posRight);
}
public static void combineIntermediate(
SpatialExtentGroupingStateWrappedLongitudeState current,
int groupId,
int top,
int bottom,
int negLeft,
int negRight,
int posLeft,
int posRight
) {
current.add(groupId, top, bottom, negLeft, negRight, posLeft, posRight);
}
public static Block evaluateFinal(SpatialExtentStateWrappedLongitudeState state, DriverContext driverContext) {
return state.toBlock(driverContext);
}
public static Block evaluateFinal(
SpatialExtentGroupingStateWrappedLongitudeState state,
IntVector selected,
GroupingAggregatorEvaluationContext ctx
) {
return state.toBlock(selected, ctx.driverContext());
}
}
| SpatialExtentLongitudeWrappingAggregator |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/StateCheckpointedITCase.java | {
"start": 7872,
"end": 8948
} | class ____ extends RichFilterFunction<String>
implements ListCheckpointed<Long> {
static long[] counts = new long[PARALLELISM];
private long count;
@Override
public boolean filter(String value) throws Exception {
count++;
return value.length() < 100; // should be always true
}
@Override
public void close() {
counts[getRuntimeContext().getTaskInfo().getIndexOfThisSubtask()] = count;
}
@Override
public List<Long> snapshotState(long checkpointId, long timestamp) throws Exception {
return Collections.singletonList(this.count);
}
@Override
public void restoreState(List<Long> state) throws Exception {
if (state.isEmpty() || state.size() > 1) {
throw new RuntimeException(
"Test failed due to unexpected recovered state size " + state.size());
}
this.count = state.get(0);
}
}
private static | StringRichFilterFunction |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanRegistrationAotProcessorTests.java | {
"start": 8138,
"end": 8432
} | class ____ {
@SuppressWarnings("unused")
private final String name;
ValueObjectSampleBean(String name) {
this.name = name;
}
}
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(ValueObjectWithSpecificConstructorSampleBean.class)
static | ValueObjectSampleBean |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/test/java/org/apache/dubbo/config/spring/schema/GenericServiceTest.java | {
"start": 2155,
"end": 3270
} | class ____ {
@BeforeAll
public static void beforeAll() {
DubboBootstrap.reset();
}
@AfterAll
public static void afterAll() {
DubboBootstrap.reset();
}
@Autowired
@Qualifier("demoServiceRef")
private GenericService demoServiceRef;
@Autowired
@Qualifier("demoService")
private ServiceBean serviceBean;
@Test
void testGeneric() {
assertNotNull(demoServiceRef);
assertNotNull(serviceBean);
ModuleConfigManager configManager = DubboBootstrap.getInstance()
.getApplicationModel()
.getDefaultModule()
.getConfigManager();
ServiceConfigBase<Object> serviceConfig = configManager.getService("demoService");
Assertions.assertEquals(DemoService.class.getName(), serviceConfig.getInterface());
Assertions.assertEquals(true, serviceConfig.isExported());
Object result = demoServiceRef.$invoke("sayHello", new String[] {"java.lang.String"}, new Object[] {"dubbo"});
Assertions.assertEquals("Welcome dubbo", result);
}
}
| GenericServiceTest |
java | apache__camel | components/camel-file/src/main/java/org/apache/camel/component/file/strategy/GenericFileRenameExclusiveReadLockStrategy.java | {
"start": 1702,
"end": 6243
} | class ____<T> implements GenericFileExclusiveReadLockStrategy<T> {
private static final Logger LOG = LoggerFactory.getLogger(GenericFileRenameExclusiveReadLockStrategy.class);
private long timeout;
private long checkInterval;
private LoggingLevel readLockLoggingLevel = LoggingLevel.DEBUG;
@Override
public void prepareOnStartup(GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint) throws Exception {
// noop
}
@Override
public boolean acquireExclusiveReadLock(GenericFileOperations<T> operations, GenericFile<T> file, Exchange exchange)
throws Exception {
LOG.trace("Waiting for exclusive read lock to file: {}", file);
// the trick is to try to rename the file, if we can rename then we have
// exclusive read
// since its a Generic file we cannot use java.nio to get a RW lock
String newName = file.getFileName() + ".camelExclusiveReadLock";
// make a copy as result and change its file name
GenericFile<T> newFile = operations.newGenericFile();
file.copyFrom(file, newFile);
newFile.changeFileName(newName);
StopWatch watch = new StopWatch();
boolean exclusive = false;
while (!exclusive) {
// timeout check
if (timeout > 0) {
long delta = watch.taken();
if (delta > timeout) {
CamelLogger.log(LOG, readLockLoggingLevel,
"Cannot acquire read lock within " + timeout + " millis. Will skip the file: " + file);
// we could not get the lock within the timeout period, so
// return false
return false;
}
}
try {
exclusive = operations.renameFile(file.getAbsoluteFilePath(), newFile.getAbsoluteFilePath());
} catch (GenericFileOperationFailedException ex) {
if (ex.getCause() instanceof IOException) {
exclusive = false;
} else {
throw ex;
}
}
if (exclusive) {
LOG.trace("Acquired exclusive read lock to file: {}", file);
// rename it back so we can read it
operations.renameFile(newFile.getAbsoluteFilePath(), file.getAbsoluteFilePath());
} else {
boolean interrupted = sleep();
if (interrupted) {
// we were interrupted while sleeping, we are likely being
// shutdown so return false
return false;
}
}
}
return true;
}
@Override
public void releaseExclusiveReadLockOnAbort(GenericFileOperations<T> operations, GenericFile<T> file, Exchange exchange)
throws Exception {
// noop
}
@Override
public void releaseExclusiveReadLockOnRollback(GenericFileOperations<T> operations, GenericFile<T> file, Exchange exchange)
throws Exception {
// noop
}
@Override
public void releaseExclusiveReadLockOnCommit(GenericFileOperations<T> operations, GenericFile<T> file, Exchange exchange)
throws Exception {
// noop
}
private boolean sleep() {
LOG.trace("Exclusive read lock not granted. Sleeping for {} millis.", checkInterval);
try {
Thread.sleep(checkInterval);
return false;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.debug("Sleep interrupted while waiting for exclusive read lock, so breaking out");
return true;
}
}
public long getTimeout() {
return timeout;
}
@Override
public void setTimeout(long timeout) {
this.timeout = timeout;
}
@Override
public void setCheckInterval(long checkInterval) {
this.checkInterval = checkInterval;
}
@Override
public void setReadLockLoggingLevel(LoggingLevel readLockLoggingLevel) {
this.readLockLoggingLevel = readLockLoggingLevel;
}
@Override
public void setMarkerFiler(boolean markerFile) {
// noop - we do not use marker file with the rename strategy
}
@Override
public void setDeleteOrphanLockFiles(boolean deleteOrphanLockFiles) {
// noop - we do not use marker file with the rename strategy
}
}
| GenericFileRenameExclusiveReadLockStrategy |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java | {
"start": 810,
"end": 2418
} | class ____ extends AbstractPageMappingOperator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(EvalOperator.class);
public record EvalOperatorFactory(ExpressionEvaluator.Factory evaluator) implements OperatorFactory {
@Override
public Operator get(DriverContext driverContext) {
return new EvalOperator(driverContext, evaluator.get(driverContext));
}
@Override
public String describe() {
return "EvalOperator[evaluator=" + evaluator + "]";
}
}
private final DriverContext ctx;
private final ExpressionEvaluator evaluator;
public EvalOperator(DriverContext ctx, ExpressionEvaluator evaluator) {
this.ctx = ctx;
this.evaluator = evaluator;
ctx.breaker().addEstimateBytesAndMaybeBreak(BASE_RAM_BYTES_USED + evaluator.baseRamBytesUsed(), "ESQL");
}
@Override
protected Page process(Page page) {
Block block = evaluator.eval(page);
return page.appendBlock(block);
}
@Override
public String toString() {
return getClass().getSimpleName() + "[evaluator=" + evaluator + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(
evaluator,
() -> ctx.breaker().addWithoutBreaking(-BASE_RAM_BYTES_USED - evaluator.baseRamBytesUsed()),
super::close
);
}
/**
* Evaluates an expression {@code a + b} or {@code log(c)} one {@link Page} at a time.
* <h2>Eval</h2>
* <p>
* The primary | EvalOperator |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedTestIntegrationTests.java | {
"start": 59812,
"end": 64180
} | class ____ {
@ParameterizedTest(quoteTextArguments = false)
@ArgumentsSource(TwoSingleStringArgumentsProvider.class)
void testWithTwoSingleStringArgumentsProvider(String argument) {
fail(argument);
}
@ParameterizedTest(quoteTextArguments = false)
@CsvSource({ "foo", "bar" })
void testWithCsvSource(String argument) {
fail(argument);
}
@ParameterizedTest(name = "{0}")
@CsvSource({ "'üñåé'", "'\n'", "'\r'", "'\u0007'", "😱", "'Zero\u200BWidth\u200BSpaces'" })
void testWithCsvSourceAndSpecialCharacters(String argument) {
}
@ParameterizedTest(quoteTextArguments = false, name = "{0} and {1}")
@CsvSource({ "foo, 23", "bar, 42" })
void testWithCustomName(String argument, int i) {
fail(argument + ", " + i);
}
@ParameterizedTest(quoteTextArguments = false)
@ValueSource(shorts = { 1, 2 })
void testWithPrimitiveWideningConversion(double num) {
fail("num: " + num);
}
@ParameterizedTest(quoteTextArguments = false)
@ValueSource(strings = { "book 1", "book 2" })
void testWithImplicitGenericConverter(Book book) {
fail(book.title);
}
@ParameterizedTest(name = "{0}")
@ValueSource(strings = { "record 1", "record 2" })
void testWithImplicitGenericConverterWithCharSequenceConstructor(Record record) {
fail(record.title.toString());
}
@ParameterizedTest(quoteTextArguments = false)
@ValueSource(strings = { "O", "XXX" })
void testWithExplicitConverter(@ConvertWith(StringLengthConverter.class) int length) {
fail("length: " + length);
}
@ParameterizedTest(name = " \t ")
@ValueSource(strings = "not important")
void testWithEmptyName(String argument) {
fail(argument);
}
@ParameterizedTest(quoteTextArguments = false)
@ValueSource(ints = 42)
void testWithErroneousConverter(@ConvertWith(ErroneousConverter.class) Object ignored) {
fail("this should never be called");
}
@ParameterizedTest(quoteTextArguments = false, name = "{0,number,#.####}")
@ValueSource(doubles = Math.PI)
void testWithMessageFormat(double argument) {
fail(String.valueOf(argument));
}
@ParameterizedTest(quoteTextArguments = false)
@CsvSource({ "ab, cd", "ef, gh" })
void testWithAggregator(@AggregateWith(StringAggregator.class) String concatenation) {
fail("concatenation: " + concatenation);
}
@ParameterizedTest(quoteTextArguments = false)
@CsvSource(value = { " ab , cd", "ef ,gh" }, ignoreLeadingAndTrailingWhitespace = false)
void testWithIgnoreLeadingAndTrailingWhitespaceSetToFalseForCsvSource(String argument1, String argument2) {
fail("arguments: '" + argument1 + "', '" + argument2 + "'");
}
@ParameterizedTest(quoteTextArguments = false)
@CsvSource(value = { " ab , cd", "ef ,gh" }, ignoreLeadingAndTrailingWhitespace = true)
void testWithIgnoreLeadingAndTrailingWhitespaceSetToTrueForCsvSource(String argument1, String argument2) {
fail("arguments: '" + argument1 + "', '" + argument2 + "'");
}
@ParameterizedTest(quoteTextArguments = false)
@CsvFileSource(resources = "provider/leading-trailing-spaces.csv", ignoreLeadingAndTrailingWhitespace = false)
void testWithIgnoreLeadingAndTrailingWhitespaceSetToFalseForCsvFileSource(String argument1, String argument2) {
fail("arguments: '" + argument1 + "', '" + argument2 + "'");
}
@ParameterizedTest(quoteTextArguments = false)
@CsvFileSource(resources = "provider/leading-trailing-spaces.csv", ignoreLeadingAndTrailingWhitespace = true)
void testWithIgnoreLeadingAndTrailingWhitespaceSetToTrueForCsvFileSource(String argument1, String argument2) {
fail("arguments: '" + argument1 + "', '" + argument2 + "'");
}
@ParameterizedTest(quoteTextArguments = false)
@ArgumentsSource(AutoCloseableArgumentProvider.class)
void testWithAutoCloseableArgument(AutoCloseableArgument autoCloseable) {
assertEquals(0, AutoCloseableArgument.closeCounter);
}
@ParameterizedTest(quoteTextArguments = false, autoCloseArguments = false)
@ArgumentsSource(AutoCloseableArgumentProvider.class)
void testWithAutoCloseableArgumentButDisabledCleanup(AutoCloseableArgument autoCloseable) {
assertEquals(0, AutoCloseableArgument.closeCounter);
}
@ParameterizedTest
@ValueSource(ints = { 2, 3, 5 })
void testWithThreeIterations(int argument) {
fail("argument: " + argument);
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
static | TestCase |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/mysql/MySqlWallTest93.java | {
"start": 893,
"end": 1185
} | class ____ extends TestCase {
public void test_false() throws Exception {
WallProvider provider = new MySqlWallProvider();
assertTrue(provider.checkValid(//
"desc tablename"));
assertEquals(0, provider.getTableStats().size());
}
}
| MySqlWallTest93 |
java | spring-projects__spring-framework | spring-websocket/src/main/java/org/springframework/web/socket/sockjs/transport/session/AbstractHttpSockJsSession.java | {
"start": 2096,
"end": 11337
} | class ____ extends AbstractSockJsSession {
private final Queue<String> messageCache;
private volatile @Nullable URI uri;
private volatile @Nullable HttpHeaders handshakeHeaders;
private volatile @Nullable Principal principal;
private volatile @Nullable InetSocketAddress localAddress;
private volatile @Nullable InetSocketAddress remoteAddress;
private volatile @Nullable String acceptedProtocol;
private volatile @Nullable ServerHttpResponse response;
private volatile @Nullable SockJsFrameFormat frameFormat;
private volatile @Nullable ServerHttpAsyncRequestControl asyncRequestControl;
private boolean readyToSend;
public AbstractHttpSockJsSession(String id, SockJsServiceConfig config,
WebSocketHandler wsHandler, Map<String, Object> attributes) {
super(id, config, wsHandler, attributes);
this.messageCache = new LinkedBlockingQueue<>(config.getHttpMessageCacheSize());
}
@Override
public URI getUri() {
URI uri = this.uri;
Assert.state(uri != null, "No initial request yet");
return uri;
}
@Override
public HttpHeaders getHandshakeHeaders() {
HttpHeaders headers = this.handshakeHeaders;
Assert.state(headers != null, "No initial request yet");
return headers;
}
@Override
public @Nullable Principal getPrincipal() {
return this.principal;
}
@Override
public @Nullable InetSocketAddress getLocalAddress() {
return this.localAddress;
}
@Override
public @Nullable InetSocketAddress getRemoteAddress() {
return this.remoteAddress;
}
/**
* Unlike WebSocket where sub-protocol negotiation is part of the initial
* handshake, in HTTP transports the same negotiation must be emulated and
* the selected protocol set through this setter.
* @param protocol the sub-protocol to set
*/
public void setAcceptedProtocol(@Nullable String protocol) {
this.acceptedProtocol = protocol;
}
/**
* Return the selected sub-protocol to use.
*/
@Override
public @Nullable String getAcceptedProtocol() {
return this.acceptedProtocol;
}
/**
* Return the SockJS buffer for messages stored transparently between polling
* requests. If the polling request takes longer than 5 seconds, the session
* is closed.
* @see org.springframework.web.socket.sockjs.transport.TransportHandlingSockJsService
*/
protected Queue<String> getMessageCache() {
return this.messageCache;
}
@Override
public boolean isActive() {
ServerHttpAsyncRequestControl control = this.asyncRequestControl;
return (control != null && !control.isCompleted());
}
@Override
public void setTextMessageSizeLimit(int messageSizeLimit) {
// ignore
}
@Override
public int getTextMessageSizeLimit() {
return -1;
}
@Override
public void setBinaryMessageSizeLimit(int messageSizeLimit) {
// ignore
}
@Override
public int getBinaryMessageSizeLimit() {
return -1;
}
@Override
public List<WebSocketExtension> getExtensions() {
return Collections.emptyList();
}
/**
* Handle the first request for receiving messages on a SockJS HTTP transport
* based session.
* <p>Long polling-based transports (for example, "xhr", "jsonp") complete the request
* after writing the open frame. Streaming-based transports ("xhr_streaming",
* "eventsource", and "htmlfile") leave the response open longer for further
* streaming of message frames but will also close it eventually after some
* amount of data has been sent.
* @param request the current request
* @param response the current response
* @param frameFormat the transport-specific SocksJS frame format to use
*/
public void handleInitialRequest(ServerHttpRequest request, ServerHttpResponse response,
SockJsFrameFormat frameFormat) throws SockJsException {
this.uri = request.getURI();
this.handshakeHeaders = request.getHeaders();
this.principal = request.getPrincipal();
try {
this.localAddress = request.getLocalAddress();
}
catch (Exception ex) {
// Ignore
}
try {
this.remoteAddress = request.getRemoteAddress();
}
catch (Exception ex) {
// Ignore
}
synchronized (this.responseLock) {
try {
this.response = response;
this.frameFormat = frameFormat;
ServerHttpAsyncRequestControl control = request.getAsyncRequestControl(response);
this.asyncRequestControl = control;
control.start(-1);
disableShallowEtagHeaderFilter(request);
// Let "our" handler know before sending the open frame to the remote handler
delegateConnectionEstablished();
handleRequestInternal(request, response, true);
// Request might have been reset (for example, polling sessions do after writing)
this.readyToSend = isActive();
}
catch (Throwable ex) {
tryCloseWithSockJsTransportError(ex, CloseStatus.SERVER_ERROR);
throw new SockJsTransportFailureException("Failed to open session", getId(), ex);
}
}
}
/**
* Handle all requests, except the first one, to receive messages on a SockJS
* HTTP transport based session.
* <p>Long polling-based transports (for example, "xhr", "jsonp") complete the request
* after writing any buffered message frames (or the next one). Streaming-based
* transports ("xhr_streaming", "eventsource", and "htmlfile") leave the
* response open longer for further streaming of message frames but will also
* close it eventually after some amount of data has been sent.
* @param request the current request
* @param response the current response
* @param frameFormat the transport-specific SocksJS frame format to use
*/
public void handleSuccessiveRequest(ServerHttpRequest request, ServerHttpResponse response,
SockJsFrameFormat frameFormat) throws SockJsException {
synchronized (this.responseLock) {
try {
if (isClosed()) {
String formattedFrame = frameFormat.format(SockJsFrame.closeFrameGoAway());
response.getBody().write(formattedFrame.getBytes(SockJsFrame.CHARSET));
return;
}
this.response = response;
this.frameFormat = frameFormat;
ServerHttpAsyncRequestControl control = request.getAsyncRequestControl(response);
this.asyncRequestControl = control;
control.start(-1);
disableShallowEtagHeaderFilter(request);
handleRequestInternal(request, response, false);
this.readyToSend = isActive();
}
catch (Throwable ex) {
tryCloseWithSockJsTransportError(ex, CloseStatus.SERVER_ERROR);
throw new SockJsTransportFailureException("Failed to handle SockJS receive request", getId(), ex);
}
}
}
private void disableShallowEtagHeaderFilter(ServerHttpRequest request) {
if (request instanceof ServletServerHttpRequest servletServerHttpRequest) {
ServletRequest servletRequest = servletServerHttpRequest.getServletRequest();
ShallowEtagHeaderFilter.disableContentCaching(servletRequest);
}
}
/**
* Invoked when a SockJS transport request is received.
* @param request the current request
* @param response the current response
* @param initialRequest whether it is the first request for the session
*/
protected abstract void handleRequestInternal(ServerHttpRequest request, ServerHttpResponse response,
boolean initialRequest) throws IOException;
@Override
protected final void sendMessageInternal(String message) throws SockJsTransportFailureException {
synchronized (this.responseLock) {
this.messageCache.add(message);
if (logger.isTraceEnabled()) {
logger.trace(this.messageCache.size() + " message(s) to flush in session " + getId());
}
if (isActive() && this.readyToSend) {
if (logger.isTraceEnabled()) {
logger.trace("Session is active, ready to flush.");
}
cancelHeartbeat();
flushCache();
}
else {
if (logger.isTraceEnabled()) {
logger.trace("Session is not active, not ready to flush.");
}
}
}
}
/**
* Called when the connection is active and ready to write to the response.
* Subclasses should only call this method from a method where the
* "responseLock" is acquired.
*/
protected abstract void flushCache() throws SockJsTransportFailureException;
@Override
protected void disconnect(CloseStatus status) {
resetRequest();
}
protected void resetRequest() {
synchronized (this.responseLock) {
ServerHttpAsyncRequestControl control = this.asyncRequestControl;
this.asyncRequestControl = null;
this.readyToSend = false;
this.response = null;
updateLastActiveTime();
if (control != null && !control.isCompleted() && control.isStarted()) {
try {
control.complete();
}
catch (Throwable ex) {
// Could be part of normal workflow (for example, browser tab closed)
logger.debug("Failed to complete request: " + ex.getMessage());
}
}
}
}
@Override
protected void writeFrameInternal(SockJsFrame frame) throws IOException {
if (isActive()) {
SockJsFrameFormat frameFormat = this.frameFormat;
ServerHttpResponse response = this.response;
if (frameFormat != null && response != null) {
String formattedFrame = frameFormat.format(frame);
if (logger.isTraceEnabled()) {
logger.trace("Writing to HTTP response: " + formattedFrame);
}
response.getBody().write(formattedFrame.getBytes(SockJsFrame.CHARSET));
response.flush();
}
}
}
}
| AbstractHttpSockJsSession |
java | spring-projects__spring-security | crypto/src/main/java/org/springframework/security/crypto/encrypt/Encryptors.java | {
"start": 1030,
"end": 4358
} | class ____ {
private Encryptors() {
}
/**
* Creates a standard password-based bytes encryptor using 256 bit AES encryption with
* Galois Counter Mode (GCM). Derives the secret key using PKCS #5's PBKDF2
* (Password-Based Key Derivation Function #2). Salts the password to prevent
* dictionary attacks against the key. The provided salt is expected to be
* hex-encoded; it should be random and at least 8 bytes in length. Also applies a
* random 16-byte initialization vector to ensure each encrypted message will be
* unique. Requires Java 6.
* @param password the password used to generate the encryptor's secret key; should
* not be shared
* @param salt a hex-encoded, random, site-global salt value to use to generate the
* key
*/
public static BytesEncryptor stronger(CharSequence password, CharSequence salt) {
return new AesBytesEncryptor(password.toString(), salt, KeyGenerators.secureRandom(16), CipherAlgorithm.GCM);
}
/**
* Creates a standard password-based bytes encryptor using 256 bit AES encryption.
* Derives the secret key using PKCS #5's PBKDF2 (Password-Based Key Derivation
* Function #2). Salts the password to prevent dictionary attacks against the key. The
* provided salt is expected to be hex-encoded; it should be random and at least 8
* bytes in length. Also applies a random 16-byte initialization vector to ensure each
* encrypted message will be unique. Requires Java 6. NOTE: This mode is not
* <a href="https://en.wikipedia.org/wiki/Authenticated_encryption">authenticated</a>
* and does not provide any guarantees about the authenticity of the data. For a more
* secure alternative, users should prefer
* {@link #stronger(CharSequence, CharSequence)}.
* @param password the password used to generate the encryptor's secret key; should
* not be shared
* @param salt a hex-encoded, random, site-global salt value to use to generate the
* key
*
* @see Encryptors#stronger(CharSequence, CharSequence)
*/
public static BytesEncryptor standard(CharSequence password, CharSequence salt) {
return new AesBytesEncryptor(password.toString(), salt, KeyGenerators.secureRandom(16));
}
/**
* Creates a text encryptor that uses "stronger" password-based encryption. Encrypted
* text is hex-encoded.
* @param password the password used to generate the encryptor's secret key; should
* not be shared
* @see Encryptors#stronger(CharSequence, CharSequence)
*/
public static TextEncryptor delux(CharSequence password, CharSequence salt) {
return new HexEncodingTextEncryptor(stronger(password, salt));
}
/**
* Creates a text encryptor that uses "standard" password-based encryption. Encrypted
* text is hex-encoded.
* @param password the password used to generate the encryptor's secret key; should
* not be shared
* @see Encryptors#standard(CharSequence, CharSequence)
*/
public static TextEncryptor text(CharSequence password, CharSequence salt) {
return new HexEncodingTextEncryptor(standard(password, salt));
}
/**
* Creates a text encryptor that performs no encryption. Useful for developer testing
* environments where working with plain text strings is desired for simplicity.
*/
public static TextEncryptor noOpText() {
return NoOpTextEncryptor.INSTANCE;
}
private static final | Encryptors |
java | spring-projects__spring-boot | module/spring-boot-graphql/src/main/java/org/springframework/boot/graphql/autoconfigure/security/GraphQlWebFluxSecurityAutoConfiguration.java | {
"start": 2101,
"end": 2346
} | class ____ {
@Bean
@ConditionalOnMissingBean
ReactiveSecurityDataFetcherExceptionResolver reactiveSecurityDataFetcherExceptionResolver() {
return new ReactiveSecurityDataFetcherExceptionResolver();
}
}
| GraphQlWebFluxSecurityAutoConfiguration |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/misc/RaceCondition738Test.java | {
"start": 458,
"end": 1003
} | class ____ extends AbstractHasSubTypes {
private final String id;
public TypeOne(String id) {
this.id = id;
}
@JsonProperty
public String getId() {
return id;
}
@Override
public String getType() {
return TypeOne.class.getSimpleName();
}
}
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.WRAPPER_OBJECT)
@JsonSubTypes({
@JsonSubTypes.Type(value = TypeOne.class, name = "one")
})
public | TypeOne |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/aot/BeanInstanceSupplierTests.java | {
"start": 34378,
"end": 35099
} | class ____ {
final Class<?> beanClass;
public BeanRegistrar(Class<?> beanClass) {
this.beanClass = beanClass;
}
RegisteredBean registerBean(DefaultListableBeanFactory beanFactory) {
return registerBean(beanFactory, bd -> {});
}
RegisteredBean registerBean(DefaultListableBeanFactory beanFactory, Consumer<RootBeanDefinition> bdCustomizer) {
String beanName = "testBean";
RootBeanDefinition bd = new RootBeanDefinition(this.beanClass);
bd.setInstanceSupplier(() -> {
throw new BeanCurrentlyInCreationException(beanName);
});
bdCustomizer.accept(bd);
beanFactory.registerBeanDefinition(beanName, bd);
return RegisteredBean.of(beanFactory, beanName);
}
}
static | BeanRegistrar |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/domain/SqmCorrelation.java | {
"start": 592,
"end": 779
} | interface ____<L,R> extends SqmFrom<L,R>, SqmPathWrapper<R,R> {
SqmRoot<L> getCorrelatedRoot();
@Override
default SqmRoot<?> findRoot() {
return getCorrelatedRoot();
}
}
| SqmCorrelation |
java | apache__kafka | streams/upgrade-system-tests-11/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java | {
"start": 1482,
"end": 6150
} | class ____ {
@SuppressWarnings("unchecked")
public static void main(final String[] args) throws Exception {
if (args.length < 1) {
System.err.println("StreamsUpgradeToCooperativeRebalanceTest requires one argument (properties-file) but none provided");
}
System.out.println("Args are " + Arrays.toString(args));
final String propFileName = args[0];
final Properties streamsProperties = Utils.loadProps(propFileName);
final Properties config = new Properties();
System.out.println("StreamsTest instance started (StreamsUpgradeToCooperativeRebalanceTest v1.1)");
System.out.println("props=" + streamsProperties);
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "cooperative-rebalance-upgrade");
config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L);
config.putAll(streamsProperties);
final String sourceTopic = streamsProperties.getProperty("source.topic", "source");
final String sinkTopic = streamsProperties.getProperty("sink.topic", "sink");
final String taskDelimiter = streamsProperties.getProperty("task.delimiter", "#");
final int reportInterval = Integer.parseInt(streamsProperties.getProperty("report.interval", "100"));
final String upgradePhase = streamsProperties.getProperty("upgrade.phase", "");
final StreamsBuilder builder = new StreamsBuilder();
builder.<String, String>stream(sourceTopic)
.peek(new ForeachAction<String, String>() {
int recordCounter = 0;
@Override
public void apply(final String key, final String value) {
if (recordCounter++ % reportInterval == 0) {
System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter);
System.out.flush();
}
}
}
).to(sinkTopic);
final KafkaStreams streams = new KafkaStreams(builder.build(), config);
streams.setStateListener((newState, oldState) -> {
if (newState == State.RUNNING && oldState == State.REBALANCING) {
System.out.printf("%sSTREAMS in a RUNNING State%n", upgradePhase);
final Set<ThreadMetadata> allThreadMetadata = streams.localThreadsMetadata();
final StringBuilder taskReportBuilder = new StringBuilder();
final List<String> activeTasks = new ArrayList<>();
final List<String> standbyTasks = new ArrayList<>();
for (final ThreadMetadata threadMetadata : allThreadMetadata) {
getTasks(threadMetadata.activeTasks(), activeTasks);
if (!threadMetadata.standbyTasks().isEmpty()) {
getTasks(threadMetadata.standbyTasks(), standbyTasks);
}
}
addTasksToBuilder(activeTasks, taskReportBuilder);
taskReportBuilder.append(taskDelimiter);
if (!standbyTasks.isEmpty()) {
addTasksToBuilder(standbyTasks, taskReportBuilder);
}
System.out.println("TASK-ASSIGNMENTS:" + taskReportBuilder);
}
if (newState == State.REBALANCING) {
System.out.printf("%sStarting a REBALANCE%n", upgradePhase);
}
});
streams.start();
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
streams.close();
System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase);
System.out.flush();
}));
}
private static void addTasksToBuilder(final List<String> tasks, final StringBuilder builder) {
if (!tasks.isEmpty()) {
for (final String task : tasks) {
builder.append(task).append(",");
}
builder.setLength(builder.length() - 1);
}
}
private static void getTasks(final Set<TaskMetadata> taskMetadata,
final List<String> taskList) {
for (final TaskMetadata task : taskMetadata) {
final Set<TopicPartition> topicPartitions = task.topicPartitions();
for (final TopicPartition topicPartition : topicPartitions) {
taskList.add(topicPartition.toString());
}
}
}
}
| StreamsUpgradeToCooperativeRebalanceTest |
java | netty__netty | resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverBuilderTest.java | {
"start": 9290,
"end": 9743
} | class ____ implements DnsServerAddressStream {
@Override
public InetSocketAddress next() {
throw new UnsupportedOperationException();
}
@Override
public int size() {
throw new UnsupportedOperationException();
}
@Override
public DnsServerAddressStream duplicate() {
throw new UnsupportedOperationException();
}
}
}
| TestQueryServerAddressStream |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/tm/CMTTest.java | {
"start": 23363,
"end": 23558
} | class ____ implements SettingProvider.Provider<String> {
@Override
public String getSetting() {
return JtaAwareConnectionProviderImpl.class.getName();
}
}
public static | ConnectionProvider |
java | apache__camel | components/camel-google/camel-google-mail/src/main/java/org/apache/camel/component/google/mail/GoogleMailEndpoint.java | {
"start": 2109,
"end": 5865
} | class ____ extends AbstractApiEndpoint<GoogleMailApiName, GoogleMailConfiguration>
implements EndpointServiceLocation {
private Object apiProxy;
@UriParam
private GoogleMailConfiguration configuration;
public GoogleMailEndpoint(String uri, GoogleMailComponent component, GoogleMailApiName apiName, String methodName,
GoogleMailConfiguration endpointConfiguration) {
super(uri, component, apiName, methodName, GoogleMailApiCollection.getCollection().getHelper(apiName),
endpointConfiguration);
this.configuration = endpointConfiguration;
}
@Override
public Producer createProducer() throws Exception {
return new GoogleMailProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
// make sure inBody is not set for consumers
if (inBody != null) {
throw new IllegalArgumentException("Option inBody is not supported for consumer endpoint");
}
final GoogleMailConsumer consumer = new GoogleMailConsumer(this, processor);
// also set consumer.* properties
configureConsumer(consumer);
return consumer;
}
@Override
protected ApiMethodPropertiesHelper<GoogleMailConfiguration> getPropertiesHelper() {
return GoogleMailPropertiesHelper.getHelper(getCamelContext());
}
@Override
protected String getThreadProfileName() {
return GoogleMailConstants.THREAD_PROFILE_NAME;
}
@Override
protected void afterConfigureProperties() {
switch (apiName) {
case ATTACHMENTS:
apiProxy = getClient().users().messages().attachments();
break;
case DRAFTS:
apiProxy = getClient().users().drafts();
break;
case HISTORY:
apiProxy = getClient().users().history();
break;
case LABELS:
apiProxy = getClient().users().labels();
break;
case MESSAGES:
apiProxy = getClient().users().messages();
break;
case THREADS:
apiProxy = getClient().users().threads();
break;
case USERS:
apiProxy = getClient().users();
break;
default:
throw new IllegalArgumentException("Invalid API name " + apiName);
}
}
public Gmail getClient() {
return ((GoogleMailComponent) getComponent()).getClient(configuration);
}
@Override
public Object getApiProxy(ApiMethod method, Map<String, Object> args) {
return apiProxy;
}
public GoogleMailClientFactory getClientFactory() {
return ((GoogleMailComponent) getComponent()).getClientFactory();
}
public void setClientFactory(GoogleMailClientFactory clientFactory) {
((GoogleMailComponent) getComponent()).setClientFactory(clientFactory);
}
@Override
public String getServiceUrl() {
if (ObjectHelper.isNotEmpty(ObjectHelper.isNotEmpty(configuration.getApiName())
&& ObjectHelper.isNotEmpty(configuration.getMethodName()))) {
return getServiceProtocol() + ":" + configuration.getApiName() + ":" + configuration.getMethodName();
}
return null;
}
@Override
public String getServiceProtocol() {
return "mail";
}
@Override
public Map<String, String> getServiceMetadata() {
if (configuration.getApplicationName() != null) {
return Map.of("applicationName", configuration.getApplicationName());
}
return null;
}
}
| GoogleMailEndpoint |
java | apache__kafka | coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java | {
"start": 5082,
"end": 269253
} | class ____ {
private static final TopicPartition TP = new TopicPartition("__consumer_offsets", 0);
private static final Duration DEFAULT_WRITE_TIMEOUT = Duration.ofMillis(5);
private static final short TXN_OFFSET_COMMIT_LATEST_VERSION = ApiKeys.TXN_OFFSET_COMMIT.latestVersion();
@Test
public void testScheduleLoading() {
MockTimer timer = new MockTimer();
MockCoordinatorLoader loader = mock(MockCoordinatorLoader.class);
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withLoader(loader)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
CompletableFuture<CoordinatorLoader.LoadSummary> future = new CompletableFuture<>();
when(loader.load(eq(TP), argThat(coordinatorMatcher(runtime, TP)))).thenReturn(future);
// Getting the coordinator context fails because the coordinator
// does not exist until scheduleLoadOperation is called.
assertThrows(NotCoordinatorException.class, () -> runtime.contextOrThrow(TP));
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 0);
// Getting the coordinator context succeeds now.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(coordinator, ctx.coordinator.coordinator());
// The coordinator is loading.
assertEquals(LOADING, ctx.state);
assertEquals(0, ctx.epoch);
assertEquals(coordinator, ctx.coordinator.coordinator());
// When the loading completes, the coordinator transitions to active.
future.complete(null);
assertEquals(ACTIVE, ctx.state);
// Verify that onLoaded is called.
verify(coordinator, times(1)).onLoaded(CoordinatorMetadataImage.EMPTY);
// Verify that the listener is registered.
verify(writer, times(1)).registerListener(
eq(TP),
any(PartitionWriter.Listener.class)
);
// Verify that the builder got all the expected objects.
verify(builder, times(1)).withSnapshotRegistry(eq(ctx.coordinator.snapshotRegistry()));
verify(builder, times(1)).withLogContext(eq(ctx.logContext));
verify(builder, times(1)).withTime(eq(timer.time()));
verify(builder, times(1)).withTimer(eq(ctx.timer));
}
@Test
public void testScheduleLoadingWithFailure() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorLoader loader = mock(MockCoordinatorLoader.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withLoader(loader)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
CompletableFuture<CoordinatorLoader.LoadSummary> future = new CompletableFuture<>();
when(loader.load(eq(TP), argThat(coordinatorMatcher(runtime, TP)))).thenReturn(future);
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 0);
// Getting the context succeeds and the coordinator should be in loading.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(LOADING, ctx.state);
assertEquals(0, ctx.epoch);
assertEquals(coordinator, ctx.coordinator.coordinator());
// When the loading fails, the coordinator transitions to failed.
future.completeExceptionally(new Exception("failure"));
assertEquals(FAILED, ctx.state);
// Verify that onUnloaded is called.
verify(coordinator, times(1)).onUnloaded();
}
@Test
public void testScheduleLoadingWithStalePartitionEpoch() {
MockTimer timer = new MockTimer();
MockCoordinatorLoader loader = mock(MockCoordinatorLoader.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withLoader(loader)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
CompletableFuture<CoordinatorLoader.LoadSummary> future = new CompletableFuture<>();
when(loader.load(eq(TP), argThat(coordinatorMatcher(runtime, TP)))).thenReturn(future);
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Getting the context succeeds and the coordinator should be in loading.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(LOADING, ctx.state);
assertEquals(10, ctx.epoch);
assertEquals(coordinator, ctx.coordinator.coordinator());
// When the loading completes, the coordinator transitions to active.
future.complete(null);
assertEquals(ACTIVE, ctx.state);
assertEquals(10, ctx.epoch);
// Loading with a previous epoch is a no-op. The coordinator stays
// in active state with the correct epoch.
runtime.scheduleLoadOperation(TP, 0);
assertEquals(ACTIVE, ctx.state);
assertEquals(10, ctx.epoch);
}
@Test
public void testScheduleLoadingAfterLoadingFailure() {
MockTimer timer = new MockTimer();
MockCoordinatorLoader loader = mock(MockCoordinatorLoader.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withLoader(loader)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
CompletableFuture<CoordinatorLoader.LoadSummary> future = new CompletableFuture<>();
when(loader.load(eq(TP), argThat(coordinatorMatcher(runtime, TP)))).thenReturn(future);
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Getting the context succeeds and the coordinator should be in loading.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(LOADING, ctx.state);
assertEquals(10, ctx.epoch);
assertEquals(coordinator, ctx.coordinator.coordinator());
// When the loading fails, the coordinator transitions to failed.
future.completeExceptionally(new Exception("failure"));
assertEquals(FAILED, ctx.state);
// Verify that onUnloaded is called.
verify(coordinator, times(1)).onUnloaded();
// Create a new coordinator.
coordinator = mock(MockCoordinatorShard.class);
when(builder.build()).thenReturn(coordinator);
// Schedule the reloading.
future = new CompletableFuture<>();
when(loader.load(eq(TP), argThat(coordinatorMatcher(runtime, TP)))).thenReturn(future);
runtime.scheduleLoadOperation(TP, 11);
// Getting the context succeeds and the coordinator should be in loading.
ctx = runtime.contextOrThrow(TP);
assertEquals(LOADING, ctx.state);
assertEquals(11, ctx.epoch);
assertEquals(coordinator, ctx.coordinator.coordinator());
// Complete the loading.
future.complete(null);
// Verify the state.
assertEquals(ACTIVE, ctx.state);
}
@Test
public void testScheduleUnloading() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// Loads the coordinator. It directly transitions to active.
runtime.scheduleLoadOperation(TP, 10);
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(ACTIVE, ctx.state);
assertEquals(10, ctx.epoch);
// Schedule the unloading.
runtime.scheduleUnloadOperation(TP, OptionalInt.of(ctx.epoch + 1));
assertEquals(CLOSED, ctx.state);
// Verify that onUnloaded is called.
verify(coordinator, times(1)).onUnloaded();
// Verify that the listener is deregistered.
verify(writer, times(1)).deregisterListener(
eq(TP),
any(PartitionWriter.Listener.class)
);
// Getting the coordinator context fails because it no longer exists.
assertThrows(NotCoordinatorException.class, () -> runtime.contextOrThrow(TP));
}
@Test
public void testScheduleUnloadingWithEmptyEpoch() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// Loads the coordinator. It directly transitions to active.
runtime.scheduleLoadOperation(TP, 10);
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(ACTIVE, ctx.state);
assertEquals(10, ctx.epoch);
// Schedule the unloading.
runtime.scheduleUnloadOperation(TP, OptionalInt.empty());
assertEquals(CLOSED, ctx.state);
// Verify that onUnloaded is called.
verify(coordinator, times(1)).onUnloaded();
// Verify that the listener is deregistered.
verify(writer, times(1)).deregisterListener(
eq(TP),
any(PartitionWriter.Listener.class)
);
// Getting the coordinator context fails because it no longer exists.
assertThrows(NotCoordinatorException.class, () -> runtime.contextOrThrow(TP));
}
@Test
public void testScheduleUnloadingWhenContextDoesntExist() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// No loading is scheduled. This is to check the case when a follower that was never a coordinator
// is asked to unload its state. The unload event is skipped in this case.
// Schedule the unloading.
runtime.scheduleUnloadOperation(TP, OptionalInt.of(11));
// Verify that onUnloaded is not called.
verify(coordinator, times(0)).onUnloaded();
// Getting the coordinator context fails because it doesn't exist.
assertThrows(NotCoordinatorException.class, () -> runtime.contextOrThrow(TP));
}
@Test
public void testScheduleUnloadingWithStalePartitionEpoch() {
MockTimer timer = new MockTimer();
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// Loads the coordinator. It directly transitions to active.
runtime.scheduleLoadOperation(TP, 10);
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(ACTIVE, ctx.state);
assertEquals(10, ctx.epoch);
// Unloading with a previous epoch is a no-op. The coordinator stays
// in active with the correct epoch.
runtime.scheduleUnloadOperation(TP, OptionalInt.of(0));
assertEquals(ACTIVE, ctx.state);
assertEquals(10, ctx.epoch);
}
@Test
public void testScheduleUnloadingWithException() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntimeMetrics metrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(metrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
doThrow(new KafkaException("error")).when(coordinator).onUnloaded();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// Loads the coordinator. It directly transitions to active.
runtime.scheduleLoadOperation(TP, 10);
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(ACTIVE, ctx.state);
assertEquals(10, ctx.epoch);
// Schedule the unloading.
runtime.scheduleUnloadOperation(TP, OptionalInt.of(ctx.epoch + 1));
assertEquals(CLOSED, ctx.state);
// Verify that onUnloaded is called.
verify(coordinator, times(1)).onUnloaded();
// Getting the coordinator context fails because it no longer exists.
assertThrows(NotCoordinatorException.class, () -> runtime.contextOrThrow(TP));
}
@Test
public void testScheduleUnloadingWithDeferredEventExceptions() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntimeMetrics metrics = mock(CoordinatorRuntimeMetrics.class);
// All operations will throw an exception when completed.
doThrow(new KafkaException("error")).when(metrics).recordEventPurgatoryTime(anyLong());
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(metrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// Load the coordinator.
runtime.scheduleLoadOperation(TP, 10);
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record1"), "response1")
);
// Complete transaction #1, to force the flush of write #1.
// Use TV_1 since this test doesn't check epoch validation - it only tests write flushing behavior.
CompletableFuture<Void> complete1 = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
(short) 50,
10,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
DEFAULT_WRITE_TIMEOUT
);
// Write #2 but without any records.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of(), "response2")
);
// Records have been written to the log.
assertEquals(List.of(
records(timer.time().milliseconds(), "record1"),
endTransactionMarker(100L, (short) 50, timer.time().milliseconds(), 10, ControlRecordType.COMMIT)
), writer.entries(TP));
// Verify that no writes are committed yet.
assertFalse(write1.isDone());
assertFalse(complete1.isDone());
assertFalse(write2.isDone());
// Schedule the unloading.
runtime.scheduleUnloadOperation(TP, OptionalInt.of(ctx.epoch + 1));
assertEquals(CLOSED, ctx.state);
// All write completions throw exceptions after completing their futures.
// Despite the exceptions, the unload should still complete.
assertTrue(write1.isDone());
assertTrue(complete1.isDone());
assertTrue(write2.isDone());
assertFutureThrows(NotCoordinatorException.class, write1);
assertFutureThrows(NotCoordinatorException.class, complete1);
assertFutureThrows(NotCoordinatorException.class, write2);
// Verify that onUnloaded is called.
verify(coordinator, times(1)).onUnloaded();
// Getting the coordinator context fails because it no longer exists.
assertThrows(NotCoordinatorException.class, () -> runtime.contextOrThrow(TP));
}
@Test
public void testScheduleWriteOp() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")
);
// Verify that the write is not committed yet.
assertFalse(write1.isDone());
// The last written offset is updated.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
// The last committed offset does not change.
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
// A new snapshot is created.
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
// Records have been replayed to the coordinator.
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().records());
// Records have been written to the log.
assertEquals(List.of(
records(timer.time().milliseconds(), "record1", "record2")
), writer.entries(TP));
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record3"), "response2"));
// Verify that the write is not committed yet.
assertFalse(write2.isDone());
// The last written offset is updated.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
// The last committed offset does not change.
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
// A new snapshot is created.
assertEquals(List.of(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
// Records have been replayed to the coordinator.
assertEquals(Set.of("record1", "record2", "record3"), ctx.coordinator.coordinator().records());
// Records have been written to the log.
assertEquals(List.of(
records(timer.time().milliseconds(), "record1", "record2"),
records(timer.time().milliseconds(), "record3")
), writer.entries(TP));
// Write #3 but without any records.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of(), "response3"));
// Verify that the write is not committed yet.
assertFalse(write3.isDone());
// The state does not change.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2", "record3"), ctx.coordinator.coordinator().records());
assertEquals(List.of(
records(timer.time().milliseconds(), "record1", "record2"),
records(timer.time().milliseconds(), "record3")
), writer.entries(TP));
// Commit write #1.
writer.commit(TP, 2);
// The write is completed.
assertTrue(write1.isDone());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
// The last committed offset is updated.
assertEquals(2L, ctx.coordinator.lastCommittedOffset());
// The snapshot is cleaned up.
assertEquals(List.of(2L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
// Commit write #2.
writer.commit(TP, 3);
// The writes are completed.
assertTrue(write2.isDone());
assertTrue(write3.isDone());
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
assertEquals("response3", write3.get(5, TimeUnit.SECONDS));
// The last committed offset is updated.
assertEquals(3L, ctx.coordinator.lastCommittedOffset());
// The snapshot is cleaned up.
assertEquals(List.of(3L), ctx.coordinator.snapshotRegistry().epochsList());
// Write #4 but without records.
CompletableFuture<String> write4 = runtime.scheduleWriteOperation("write#4", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of(), "response4"));
// It is completed immediately because the state is fully committed.
assertTrue(write4.isDone());
assertEquals("response4", write4.get(5, TimeUnit.SECONDS));
assertEquals(List.of(3L), ctx.coordinator.snapshotRegistry().epochsList());
}
@Test
public void testScheduleWriteOpWhenInactive() {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Scheduling a write fails with a NotCoordinatorException because the coordinator
// does not exist.
CompletableFuture<String> write = runtime.scheduleWriteOperation("write", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of(), "response1"));
assertFutureThrows(NotCoordinatorException.class, write);
}
@Test
public void testScheduleWriteOpWhenOpFails() {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Scheduling a write that fails when the operation is called. The exception
// is used to complete the future.
CompletableFuture<String> write = runtime.scheduleWriteOperation("write", TP, DEFAULT_WRITE_TIMEOUT, state -> {
throw new KafkaException("error");
});
assertFutureThrows(KafkaException.class, write);
}
@Test
public void testScheduleWriteOpWhenReplayFails() {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Override the coordinator with a coordinator that throws
// an exception when replay is called.
SnapshotRegistry snapshotRegistry = ctx.coordinator.snapshotRegistry();
ctx.coordinator = new SnapshottableCoordinator<>(
new LogContext(),
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, ctx.timer) {
@Override
public void replay(
long offset,
long producerId,
short producerEpoch,
String record
) throws RuntimeException {
throw new IllegalArgumentException("error");
}
},
TP
);
// Write. It should fail.
CompletableFuture<String> write = runtime.scheduleWriteOperation("write", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"));
assertFutureThrows(IllegalArgumentException.class, write);
// Verify that the state has not changed.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
}
@Test
public void testScheduleWriteOpWhenWriteFails() {
MockTimer timer = new MockTimer();
// The partition writer only accept one write.
MockPartitionWriter writer = new MockPartitionWriter(1);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.coordinator.lastWrittenOffset());
assertEquals(0, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Write #1. It should succeed and be applied to the coordinator.
runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"));
// Verify that the state has been updated.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().records());
// Write #2. It should fail because the writer is configured to only
// accept 1 write.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record3", "record4", "record5"), "response2"));
assertFutureThrows(KafkaException.class, write2);
// Verify that the state has not changed.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().records());
}
@Test
public void testScheduleWriteOpWhenWriteTimesOut() throws InterruptedException {
MockTimer timer = new MockTimer();
// The partition writer only accept on write.
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.coordinator.lastWrittenOffset());
assertEquals(0, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Write #1. We should get a TimeoutException because the HWM will not advance.
CompletableFuture<String> timedOutWrite = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(3),
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"));
timer.advanceClock(4);
assertFutureThrows(org.apache.kafka.common.errors.TimeoutException.class, timedOutWrite);
}
@Test
public void testScheduleWriteAllOperation() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
TopicPartition coordinator0 = new TopicPartition("__consumer_offsets", 0);
TopicPartition coordinator1 = new TopicPartition("__consumer_offsets", 1);
TopicPartition coordinator2 = new TopicPartition("__consumer_offsets", 2);
// Load coordinators.
runtime.scheduleLoadOperation(coordinator0, 10);
runtime.scheduleLoadOperation(coordinator1, 10);
runtime.scheduleLoadOperation(coordinator2, 10);
// Writes.
AtomicInteger cnt = new AtomicInteger(0);
List<CompletableFuture<List<String>>> writes = runtime.scheduleWriteAllOperation("write", DEFAULT_WRITE_TIMEOUT, state -> {
int counter = cnt.getAndIncrement();
return new CoordinatorResult<>(
List.of("record#" + counter),
List.of("response#" + counter)
);
});
assertEquals(1L, runtime.contextOrThrow(coordinator0).coordinator.lastWrittenOffset());
assertEquals(1L, runtime.contextOrThrow(coordinator1).coordinator.lastWrittenOffset());
assertEquals(1L, runtime.contextOrThrow(coordinator2).coordinator.lastWrittenOffset());
assertEquals(List.of(records(timer.time().milliseconds(), "record#0")), writer.entries(coordinator0));
assertEquals(List.of(records(timer.time().milliseconds(), "record#1")), writer.entries(coordinator1));
assertEquals(List.of(records(timer.time().milliseconds(), "record#2")), writer.entries(coordinator2));
// Commit.
writer.commit(coordinator0);
writer.commit(coordinator1);
writer.commit(coordinator2);
// Verify.
assertEquals(
List.of("response#0", "response#1", "response#2"),
FutureUtils.combineFutures(writes, ArrayList::new, List::addAll).get(5, TimeUnit.SECONDS)
);
}
@Test
public void testScheduleTransactionalWriteOp() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
MockCoordinatorShardBuilder shardBuilder = new MockCoordinatorShardBuilder() {
@Override
public MockCoordinatorShard build() {
return coordinator;
}
};
MockCoordinatorShardBuilderSupplier shardBuilderSupplier = new MockCoordinatorShardBuilderSupplier() {
@Override
public CoordinatorShardBuilder<MockCoordinatorShard, String> get() {
return shardBuilder;
}
};
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(shardBuilderSupplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify that the listener was registered.
verify(writer, times(1)).registerListener(eq(TP), any());
// Prepare the log config.
when(writer.config(TP)).thenReturn(new LogConfig(Map.of()));
// Prepare the transaction verification.
VerificationGuard guard = new VerificationGuard();
when(writer.maybeStartTransactionVerification(
TP,
"transactional-id",
100L,
(short) 50,
TXN_OFFSET_COMMIT_LATEST_VERSION
)).thenReturn(CompletableFuture.completedFuture(guard));
// Schedule a transactional write.
runtime.scheduleTransactionalWriteOperation(
"tnx-write",
TP,
"transactional-id",
100L,
(short) 50,
Duration.ofMillis(5000),
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response"),
TXN_OFFSET_COMMIT_LATEST_VERSION
);
// Verify that the writer got the records with the correct
// producer id and producer epoch.
// Regular transactional writes (not transaction markers) use TV_UNKNOWN
verify(writer, times(1)).append(
eq(TP),
eq(guard),
eq(transactionalRecords(
100L,
(short) 50,
timer.time().milliseconds(),
"record1",
"record2"
)),
eq(TransactionVersion.TV_UNKNOWN)
);
// Verify that the coordinator got the records with the correct
// producer id and producer epoch.
verify(coordinator, times(1)).replay(
eq(0L),
eq(100L),
eq((short) 50),
eq("record1")
);
verify(coordinator, times(1)).replay(
eq(1L),
eq(100L),
eq((short) 50),
eq("record2")
);
}
@Test
public void testScheduleTransactionalWriteOpWhenVerificationFails() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
MockCoordinatorShardBuilder shardBuilder = new MockCoordinatorShardBuilder() {
@Override
public MockCoordinatorShard build() {
return coordinator;
}
};
MockCoordinatorShardBuilderSupplier shardBuilderSupplier = new MockCoordinatorShardBuilderSupplier() {
@Override
public CoordinatorShardBuilder<MockCoordinatorShard, String> get() {
return shardBuilder;
}
};
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(shardBuilderSupplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify that the listener was registered.
verify(writer, times(1)).registerListener(eq(TP), any());
// Fail the transaction verification.
when(writer.maybeStartTransactionVerification(
TP,
"transactional-id",
100L,
(short) 50,
TXN_OFFSET_COMMIT_LATEST_VERSION
)).thenReturn(FutureUtils.failedFuture(Errors.NOT_ENOUGH_REPLICAS.exception()));
// Schedule a transactional write.
CompletableFuture<String> future = runtime.scheduleTransactionalWriteOperation(
"tnx-write",
TP,
"transactional-id",
100L,
(short) 50,
Duration.ofMillis(5000),
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response"),
TXN_OFFSET_COMMIT_LATEST_VERSION
);
// Verify that the future is failed with the expected exception.
assertFutureThrows(NotEnoughReplicasException.class, future);
// Verify that the writer is not called.
verify(writer, times(0)).append(
any(),
any(),
any(),
anyShort()
);
}
@ParameterizedTest
@MethodSource("transactionCompletionTestParameters")
public void testScheduleTransactionCompletion(TransactionResult result, short transactionVersion) throws ExecutionException, InterruptedException, TimeoutException {
// Test transaction completion with different transaction results (COMMIT/ABORT) and transaction versions (TV1/TV2).
// TV1: marker epoch can be the same as transactional records epoch (markerEpoch >= currentEpoch)
// TV2: marker epoch bumped by coordinator (markerEpoch > currentEpoch, strict validation)
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Transactional write #1 with epoch 5.
short transactionalEpoch = 5;
CompletableFuture<String> write1 = runtime.scheduleTransactionalWriteOperation(
"write#1",
TP,
"transactional-id",
100L,
transactionalEpoch,
DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"),
TXN_OFFSET_COMMIT_LATEST_VERSION
);
// Verify that the write is not committed yet.
assertFalse(write1.isDone());
// The last written offset is updated.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
// The last committed offset does not change.
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
// A new snapshot is created.
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
// Records have been replayed to the coordinator. They are stored in
// the pending set for now.
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(
100L
));
// Records have been written to the log.
assertEquals(List.of(
transactionalRecords(100L, transactionalEpoch, timer.time().milliseconds(), "record1", "record2")
), writer.entries(TP));
// Complete transaction #1.
// For TV2, the coordinator bumps the epoch before writing the marker (epoch + 1).
// For TV1, the marker uses the same epoch as transactional records.
short markerEpoch = (transactionVersion >= 2) ? (short) (transactionalEpoch + 1) : transactionalEpoch;
CompletableFuture<Void> complete1 = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
markerEpoch,
10,
result,
transactionVersion,
DEFAULT_WRITE_TIMEOUT
);
// Verify that the completion is not committed yet.
assertFalse(complete1.isDone());
// The last written offset is updated.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
// The last committed offset does not change.
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
// A new snapshot is created.
assertEquals(List.of(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
// Records have been replayed to the coordinator.
ControlRecordType expectedType;
if (result == TransactionResult.COMMIT) {
// They are now in the records set if committed.
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().records());
expectedType = ControlRecordType.COMMIT;
} else {
// Or they are gone if aborted.
assertEquals(Set.of(), ctx.coordinator.coordinator().records());
expectedType = ControlRecordType.ABORT;
}
// Records have been written to the log.
assertEquals(List.of(
transactionalRecords(100L, transactionalEpoch, timer.time().milliseconds(), "record1", "record2"),
endTransactionMarker(100L, markerEpoch, timer.time().milliseconds(), 10, expectedType)
), writer.entries(TP));
// Commit write #1.
writer.commit(TP, 2);
// The write is completed.
assertTrue(write1.isDone());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
// Commit completion #1.
writer.commit(TP, 3);
// The transaction is completed.
assertTrue(complete1.isDone());
assertNull(complete1.get(5, TimeUnit.SECONDS));
}
private static Stream<Arguments> transactionCompletionTestParameters() {
// Test all combinations: COMMIT/ABORT x TV1/TV2
return Stream.of(
Arguments.of(TransactionResult.COMMIT, (short) 1),
Arguments.of(TransactionResult.COMMIT, (short) 2),
Arguments.of(TransactionResult.ABORT, (short) 1),
Arguments.of(TransactionResult.ABORT, (short) 2)
);
}
@Test
public void testScheduleTransactionCompletionWhenWriteTimesOut() throws InterruptedException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.coordinator.lastWrittenOffset());
assertEquals(0, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Complete #1. We should get a TimeoutException because the HWM will not advance.
// Use TV_1 since this test doesn't check epoch validation - it only tests timeout behavior.
CompletableFuture<Void> timedOutCompletion = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
(short) 5,
10,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
Duration.ofMillis(3)
);
// Verify that the state has been updated.
assertEquals(1L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 1L), ctx.coordinator.snapshotRegistry().epochsList());
// Advance clock to timeout Complete #1.
timer.advanceClock(4);
assertFutureThrows(org.apache.kafka.common.errors.TimeoutException.class, timedOutCompletion);
// Verify that the state is still the same. We don't revert when the
// operation timeouts because the record has been written to the log.
assertEquals(1L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 1L), ctx.coordinator.snapshotRegistry().epochsList());
}
@Test
public void testScheduleTransactionCompletionWhenWriteFails() {
MockTimer timer = new MockTimer();
// The partition writer accepts records but fails on markers.
// This failure happens at MockPartitionWriter.append() BEFORE epoch validation,
// so transactionVersion doesn't matter - we use TV_1 as a default.
MockPartitionWriter writer = new MockPartitionWriter(true);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.coordinator.lastWrittenOffset());
assertEquals(0, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Write #1. It should succeed and be applied to the coordinator.
runtime.scheduleTransactionalWriteOperation(
"write#1",
TP,
"transactional-id",
100L,
(short) 5,
DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"),
TXN_OFFSET_COMMIT_LATEST_VERSION
);
// Verify that the state has been updated.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of(), ctx.coordinator.coordinator().records());
// Complete transaction #1. It should fail at partitionWriter.append() before epoch validation.
CompletableFuture<Void> complete1 = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
(short) 5,
10,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
DEFAULT_WRITE_TIMEOUT
);
assertFutureThrows(KafkaException.class, complete1);
// Verify that the state has not changed.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of(), ctx.coordinator.coordinator().records());
}
@Test
public void testScheduleTransactionCompletionWhenReplayFails() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Override the coordinator with a coordinator that throws
// an exception when replayEndTransactionMarker is called.
SnapshotRegistry snapshotRegistry = ctx.coordinator.snapshotRegistry();
ctx.coordinator = new SnapshottableCoordinator<>(
new LogContext(),
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, ctx.timer) {
@Override
public void replayEndTransactionMarker(
long producerId,
short producerEpoch,
TransactionResult result
) throws RuntimeException {
throw new IllegalArgumentException("error");
}
},
TP
);
// Write #1. It should succeed and be applied to the coordinator.
runtime.scheduleTransactionalWriteOperation(
"write#1",
TP,
"transactional-id",
100L,
(short) 5,
DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"),
TXN_OFFSET_COMMIT_LATEST_VERSION
);
// Verify that the state has been updated.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of(), ctx.coordinator.coordinator().records());
assertEquals(List.of(
transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2")
), writer.entries(TP));
// Complete transaction #1. It should fail.
// Use TV_1 since this test doesn't check epoch validation
CompletableFuture<Void> complete1 = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
(short) 5,
10,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
DEFAULT_WRITE_TIMEOUT
);
assertFutureThrows(IllegalArgumentException.class, complete1);
// Verify that the state has not changed.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of(), ctx.coordinator.coordinator().records());
assertEquals(List.of(
transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2")
), writer.entries(TP));
}
@ParameterizedTest
@MethodSource("epochValidationFailureTestParameters")
public void testScheduleTransactionCompletionWhenEpochValidationFails(
short transactionVersion,
short transactionalEpoch,
short markerEpoch
) {
// Test that InvalidProducerEpochException is thrown when epoch validation fails during transaction completion.
MockTimer timer = new MockTimer();
MockPartitionWriter.EpochValidatingPartitionWriter writer = new MockPartitionWriter.EpochValidatingPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Write transactional records with the given epoch.
runtime.scheduleTransactionalWriteOperation(
"write#1",
TP,
"transactional-id",
100L,
transactionalEpoch,
DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"),
TXN_OFFSET_COMMIT_LATEST_VERSION
);
// Verify that the state has been updated.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of(), ctx.coordinator.coordinator().records());
// Complete transaction with an invalid marker epoch.
// This will trigger epoch validation in the partition writer, which should fail.
CompletableFuture<Void> complete1 = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
markerEpoch,
10,
TransactionResult.COMMIT,
transactionVersion,
DEFAULT_WRITE_TIMEOUT
);
// Verify that InvalidProducerEpochException is thrown.
assertFutureThrows(InvalidProducerEpochException.class, complete1);
// Verify that the state has been reverted (no marker written).
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of(), ctx.coordinator.coordinator().records());
// Only transactional records should be in the log, no marker.
assertEquals(1, writer.entries(TP).size());
}
private static Stream<Arguments> epochValidationFailureTestParameters() {
// Test cases: (transactionVersion, transactionalEpoch, markerEpoch)
return Stream.of(
// TV1: markerEpoch < currentEpoch should fail
Arguments.of((short) 1, (short) 5, (short) 4),
// TV2: markerEpoch < currentEpoch should fail
Arguments.of((short) 2, (short) 5, (short) 4),
// TV2: markerEpoch == currentEpoch should fail (strict validation)
Arguments.of((short) 2, (short) 5, (short) 5)
);
}
@Test
public void testScheduleReadOp() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.coordinator.lastWrittenOffset());
assertEquals(0, ctx.coordinator.lastCommittedOffset());
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"));
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record3", "record4"), "response2"));
// Commit write #1.
writer.commit(TP, 2);
// Write #1 is completed.
assertTrue(write1.isDone());
// Write #2 is not.
assertFalse(write2.isDone());
// The last written and committed offsets are updated.
assertEquals(4, ctx.coordinator.lastWrittenOffset());
assertEquals(2, ctx.coordinator.lastCommittedOffset());
// Read.
CompletableFuture<String> read = runtime.scheduleReadOperation("read", TP, (state, offset) -> {
// The read operation should be given the last committed offset.
assertEquals(ctx.coordinator.lastCommittedOffset(), offset);
return "read-response";
});
// The read is completed immediately.
assertTrue(read.isDone());
assertEquals("read-response", read.get(5, TimeUnit.SECONDS));
}
@Test
public void testScheduleReadOpWhenPartitionInactive() {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule a read. It fails because the coordinator does not exist.
CompletableFuture<String> read = runtime.scheduleReadOperation("read", TP,
(state, offset) -> "read-response");
assertFutureThrows(NotCoordinatorException.class, read);
}
@Test
public void testScheduleReadOpWhenOpsFails() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.coordinator.lastWrittenOffset());
assertEquals(0, ctx.coordinator.lastCommittedOffset());
// Write #1.
runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"));
// Write #2.
runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record3", "record4"), "response2"));
// Commit write #1.
writer.commit(TP, 2);
// Read. It fails with an exception that is used to complete the future.
CompletableFuture<String> read = runtime.scheduleReadOperation("read", TP, (state, offset) -> {
assertEquals(ctx.coordinator.lastCommittedOffset(), offset);
throw new IllegalArgumentException("error");
});
assertFutureThrows(IllegalArgumentException.class, read);
}
@Test
public void testScheduleReadAllOp() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
TopicPartition coordinator0 = new TopicPartition("__consumer_offsets", 0);
TopicPartition coordinator1 = new TopicPartition("__consumer_offsets", 1);
TopicPartition coordinator2 = new TopicPartition("__consumer_offsets", 2);
// Loads the coordinators.
runtime.scheduleLoadOperation(coordinator0, 10);
runtime.scheduleLoadOperation(coordinator1, 10);
runtime.scheduleLoadOperation(coordinator2, 10);
// Writes
runtime.scheduleWriteOperation("write#0", coordinator0, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record0"), "response0"));
runtime.scheduleWriteOperation("write#1", coordinator1, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1"), "response1"));
runtime.scheduleWriteOperation("write#2", coordinator2, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record2"), "response2"));
// Commit writes.
writer.commit(coordinator0);
writer.commit(coordinator1);
writer.commit(coordinator2);
// Read.
List<CompletableFuture<List<String>>> responses = runtime.scheduleReadAllOperation(
"read",
(state, offset) -> new ArrayList<>(state.records())
);
assertEquals(
List.of("record0", "record1", "record2"),
FutureUtils.combineFutures(responses, ArrayList::new, List::addAll).get(5, TimeUnit.SECONDS)
);
}
@Test
public void testClose() throws Exception {
MockCoordinatorLoader loader = spy(new MockCoordinatorLoader());
MockTimer timer = new MockTimer();
ExecutorService executorService = mock(ExecutorService.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withLoader(loader)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(executorService)
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Check initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.coordinator.lastWrittenOffset());
assertEquals(0, ctx.coordinator.lastCommittedOffset());
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"));
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record3", "record4"), "response2"));
// Writes are inflight.
assertFalse(write1.isDone());
assertFalse(write2.isDone());
// The coordinator timer should be empty.
assertEquals(0, ctx.timer.size());
// Timer #1. This is never executed.
ctx.timer.schedule("timer-1", 10, TimeUnit.SECONDS, true,
() -> new CoordinatorResult<>(List.of("record5", "record6"), null));
// The coordinator timer should have one pending task.
assertEquals(1, ctx.timer.size());
// Close the runtime.
runtime.close();
// All the pending operations are completed with NotCoordinatorException.
assertFutureThrows(NotCoordinatorException.class, write1);
assertFutureThrows(NotCoordinatorException.class, write2);
// Verify that the loader was closed.
verify(loader).close();
// The coordinator timer should be empty.
assertEquals(0, ctx.timer.size());
// Verify that the executor service was shutdown.
verify(executorService).shutdown();
}
@Test
public void testOnNewMetadataImage() {
TopicPartition tp0 = new TopicPartition("__consumer_offsets", 0);
TopicPartition tp1 = new TopicPartition("__consumer_offsets", 1);
MockTimer timer = new MockTimer();
MockCoordinatorLoader loader = mock(MockCoordinatorLoader.class);
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withLoader(loader)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
MockCoordinatorShard coordinator0 = mock(MockCoordinatorShard.class);
MockCoordinatorShard coordinator1 = mock(MockCoordinatorShard.class);
when(supplier.get()).thenReturn(builder);
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.build())
.thenReturn(coordinator0)
.thenReturn(coordinator1);
CompletableFuture<CoordinatorLoader.LoadSummary> future0 = new CompletableFuture<>();
when(loader.load(eq(tp0), argThat(coordinatorMatcher(runtime, tp0)))).thenReturn(future0);
CompletableFuture<CoordinatorLoader.LoadSummary> future1 = new CompletableFuture<>();
when(loader.load(eq(tp1), argThat(coordinatorMatcher(runtime, tp1)))).thenReturn(future1);
runtime.scheduleLoadOperation(tp0, 0);
runtime.scheduleLoadOperation(tp1, 0);
assertEquals(coordinator0, runtime.contextOrThrow(tp0).coordinator.coordinator());
assertEquals(coordinator1, runtime.contextOrThrow(tp1).coordinator.coordinator());
// Coordinator 0 is loaded. It should get the current image
// that is the empty one.
future0.complete(null);
verify(coordinator0).onLoaded(CoordinatorMetadataImage.EMPTY);
// Publish a new image.
CoordinatorMetadataDelta delta = new KRaftCoordinatorMetadataDelta(new MetadataDelta(MetadataImage.EMPTY));
CoordinatorMetadataImage newImage = CoordinatorMetadataImage.EMPTY;
runtime.onNewMetadataImage(newImage, delta);
// Coordinator 0 should be notified about it.
verify(coordinator0).onNewMetadataImage(newImage, delta);
// Coordinator 1 is loaded. It should get the current image
// that is the new image.
future1.complete(null);
verify(coordinator1).onLoaded(newImage);
}
@Test
public void testScheduleTimer() throws InterruptedException {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(30))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Check initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.coordinator.lastWrittenOffset());
assertEquals(0, ctx.coordinator.lastCommittedOffset());
// The coordinator timer should be empty.
assertEquals(0, ctx.timer.size());
// Timer #1.
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true,
() -> new CoordinatorResult<>(List.of("record1", "record2"), null));
// Timer #2.
ctx.timer.schedule("timer-2", 20, TimeUnit.MILLISECONDS, true,
() -> new CoordinatorResult<>(List.of("record3", "record4"), null));
// The coordinator timer should have two pending tasks.
assertEquals(2, ctx.timer.size());
// Advance time to fire timer #1,
timer.advanceClock(10 + 1);
// Verify that the operation was executed.
assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().records());
assertEquals(1, ctx.timer.size());
// Advance time to fire timer #2,
timer.advanceClock(10 + 1);
// Verify that the operation was executed.
assertEquals(Set.of("record1", "record2", "record3", "record4"), ctx.coordinator.coordinator().records());
assertEquals(0, ctx.timer.size());
}
@Test
public void testRescheduleTimer() throws InterruptedException {
MockTimer timer = new MockTimer();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Poll twice to process the pending events related to the loading.
processor.poll();
processor.poll();
// Check initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.timer.size());
// The processor should be empty.
assertEquals(0, processor.size());
// Timer #1.
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true,
() -> new CoordinatorResult<>(List.of("record1"), null));
// The coordinator timer should have one pending task.
assertEquals(1, ctx.timer.size());
// Advance time to fire the pending timer.
timer.advanceClock(10 + 1);
// An event should be waiting in the processor.
assertEquals(1, processor.size());
// Schedule a second timer with the same key.
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true,
() -> new CoordinatorResult<>(List.of("record2"), null));
// The coordinator timer should still have one pending task.
assertEquals(1, ctx.timer.size());
// Schedule a third timer with the same key.
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true,
() -> new CoordinatorResult<>(List.of("record3"), null));
// The coordinator timer should still have one pending task.
assertEquals(1, ctx.timer.size());
// Advance time to fire the pending timer.
timer.advanceClock(10 + 1);
// Another event should be waiting in the processor.
assertEquals(2, processor.size());
// Poll twice to execute the two pending events.
assertTrue(processor.poll());
assertTrue(processor.poll());
// Verify that the correct operation was executed. Only the third
// instance should have been executed here.
assertEquals(Set.of("record3"), ctx.coordinator.coordinator().records());
assertEquals(0, ctx.timer.size());
}
@Test
public void testCancelTimer() throws InterruptedException {
MockTimer timer = new MockTimer();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Poll twice to process the pending events related to the loading.
processor.poll();
processor.poll();
// Check initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.timer.size());
// The processor should be empty.
assertEquals(0, processor.size());
// Timer #1.
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true,
() -> new CoordinatorResult<>(List.of("record1"), null));
// The coordinator timer should have one pending task.
assertEquals(1, ctx.timer.size());
// Advance time to fire the pending timer.
timer.advanceClock(10 + 1);
// An event should be waiting in the processor.
assertEquals(1, processor.size());
// Schedule a second timer with the same key.
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true,
() -> new CoordinatorResult<>(List.of("record2"), null));
// The coordinator timer should still have one pending task.
assertEquals(1, ctx.timer.size());
// Cancel the timer.
ctx.timer.cancel("timer-1");
// The coordinator timer have no pending timers.
assertEquals(0, ctx.timer.size());
// Advance time to fire the cancelled timer.
timer.advanceClock(10 + 1);
// No new event expected because the timer was cancelled before
// it expired.
assertEquals(1, processor.size());
// Poll to execute the pending event.
assertTrue(processor.poll());
// Verify that no operation was executed.
assertEquals(Set.of(), ctx.coordinator.coordinator().records());
assertEquals(0, ctx.timer.size());
}
@Test
public void testRetryableTimer() throws InterruptedException {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Check initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.timer.size());
// Timer #1.
AtomicInteger cnt = new AtomicInteger(0);
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true, () -> {
cnt.incrementAndGet();
throw new KafkaException("error");
});
// The coordinator timer should have one pending task.
assertEquals(1, ctx.timer.size());
// Advance time to fire the pending timer.
timer.advanceClock(10 + 1);
// The timer should have been called and the timer should have one pending task.
assertEquals(1, cnt.get());
assertEquals(1, ctx.timer.size());
// Advance past the retry backoff.
timer.advanceClock(500 + 1);
// The timer should have been called and the timer should have one pending task.
assertEquals(2, cnt.get());
assertEquals(1, ctx.timer.size());
// Advance past the retry backoff.
timer.advanceClock(500 + 1);
// The timer should have been called and the timer should have one pending task.
assertEquals(3, cnt.get());
assertEquals(1, ctx.timer.size());
// Cancel Timer #1.
ctx.timer.cancel("timer-1");
assertEquals(0, ctx.timer.size());
}
@Test
public void testRetryableTimerWithCustomBackoff() throws InterruptedException {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Check initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.timer.size());
// Timer #1.
AtomicInteger cnt = new AtomicInteger(0);
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true, 1000, () -> {
cnt.incrementAndGet();
throw new KafkaException("error");
});
// The coordinator timer should have one pending task.
assertEquals(1, ctx.timer.size());
// Advance time to fire the pending timer.
timer.advanceClock(10 + 1);
// The timer should have been called and the timer should have one pending task.
assertEquals(1, cnt.get());
assertEquals(1, ctx.timer.size());
// Advance past the default retry backoff.
timer.advanceClock(500 + 1);
// The timer should not have been called yet.
assertEquals(1, cnt.get());
assertEquals(1, ctx.timer.size());
// Advance past the custom retry.
timer.advanceClock(500 + 1);
// The timer should have been called and the timer should have one pending task.
assertEquals(2, cnt.get());
assertEquals(1, ctx.timer.size());
// Advance past the default retry backoff.
timer.advanceClock(500 + 1);
// The timer should not have been called yet.
assertEquals(2, cnt.get());
assertEquals(1, ctx.timer.size());
// Advance past the custom retry.
timer.advanceClock(500 + 1);
// The timer should have been called and the timer should have one pending task.
assertEquals(3, cnt.get());
assertEquals(1, ctx.timer.size());
// Cancel Timer #1.
ctx.timer.cancel("timer-1");
assertEquals(0, ctx.timer.size());
}
@Test
public void testNonRetryableTimer() throws InterruptedException {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Check initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.timer.size());
// Timer #1.
AtomicInteger cnt = new AtomicInteger(0);
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, false, () -> {
cnt.incrementAndGet();
throw new KafkaException("error");
});
// The coordinator timer should have one pending task.
assertEquals(1, ctx.timer.size());
// Advance time to fire the pending timer.
timer.advanceClock(10 + 1);
// The timer should have been called and the timer should have no pending tasks
// because the timer is not retried.
assertEquals(1, cnt.get());
assertEquals(0, ctx.timer.size());
}
@Test
public void testTimerScheduleIfAbsent() throws InterruptedException {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Check initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.timer.size());
// Timer #1.
AtomicInteger cnt = new AtomicInteger(0);
ctx.timer.scheduleIfAbsent("timer-1", 10, TimeUnit.MILLISECONDS, false, () -> {
cnt.incrementAndGet();
throw new KafkaException("error");
});
// The coordinator timer should have one pending task.
assertEquals(1, ctx.timer.size());
// Advance half of the time to fire the pending timer.
timer.advanceClock(10 / 2);
// Reschedule timer #1. Since the timer already exists, the timeout shouldn't be refreshed.
ctx.timer.scheduleIfAbsent("timer-1", 10, TimeUnit.MILLISECONDS, false, () -> {
cnt.incrementAndGet();
throw new KafkaException("error");
});
// Advance the time to fire the pending timer.
timer.advanceClock(10 / 2 + 1);
// The timer should have been called and the timer should have no pending tasks.
assertEquals(1, cnt.get());
assertEquals(0, ctx.timer.size());
}
@Test
public void testTimerIsScheduled() throws InterruptedException {
MockTimer timer = new MockTimer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(new MockPartitionWriter())
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
runtime.scheduleLoadOperation(TP, 10);
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0, ctx.timer.size());
assertFalse(ctx.timer.isScheduled("timer-1"));
ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, false,
() -> new CoordinatorResult<>(List.of("record1"), null));
assertTrue(ctx.timer.isScheduled("timer-1"));
assertFalse(ctx.timer.isScheduled("timer-2"));
assertEquals(1, ctx.timer.size());
ctx.timer.schedule("timer-2", 20, TimeUnit.MILLISECONDS, false,
() -> new CoordinatorResult<>(List.of("record2"), null));
assertTrue(ctx.timer.isScheduled("timer-1"));
assertTrue(ctx.timer.isScheduled("timer-2"));
assertEquals(2, ctx.timer.size());
ctx.timer.cancel("timer-1");
assertFalse(ctx.timer.isScheduled("timer-1"));
assertTrue(ctx.timer.isScheduled("timer-2"));
assertEquals(1, ctx.timer.size());
timer.advanceClock(21);
assertFalse(ctx.timer.isScheduled("timer-2"));
assertEquals(0, ctx.timer.size());
}
@Test
public void testStateChanges() throws Exception {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorLoader loader = mock(MockCoordinatorLoader.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(loader)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
CompletableFuture<CoordinatorLoader.LoadSummary> future = new CompletableFuture<>();
when(loader.load(eq(TP), argThat(coordinatorMatcher(runtime, TP)))).thenReturn(future);
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 0);
// Getting the context succeeds and the coordinator should be in loading.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(LOADING, ctx.state);
verify(runtimeMetrics, times(1)).recordPartitionStateChange(INITIAL, LOADING);
// When the loading fails, the coordinator transitions to failed.
future.completeExceptionally(new Exception("failure"));
assertEquals(FAILED, ctx.state);
verify(runtimeMetrics, times(1)).recordPartitionStateChange(LOADING, FAILED);
// Start loading a new topic partition.
TopicPartition tp = new TopicPartition("__consumer_offsets", 1);
future = new CompletableFuture<>();
when(loader.load(eq(tp), argThat(coordinatorMatcher(runtime, tp)))).thenReturn(future);
// Schedule the loading.
runtime.scheduleLoadOperation(tp, 0);
// Getting the context succeeds and the coordinator should be in loading.
ctx = runtime.contextOrThrow(tp);
assertEquals(coordinator, ctx.coordinator.coordinator());
assertEquals(LOADING, ctx.state);
verify(runtimeMetrics, times(2)).recordPartitionStateChange(INITIAL, LOADING);
// When the loading completes, the coordinator transitions to active.
future.complete(null);
assertEquals(ACTIVE, ctx.state);
verify(runtimeMetrics, times(1)).recordPartitionStateChange(LOADING, ACTIVE);
runtime.close();
verify(runtimeMetrics, times(1)).recordPartitionStateChange(FAILED, CLOSED);
verify(runtimeMetrics, times(1)).recordPartitionStateChange(ACTIVE, CLOSED);
}
@Test
public void testPartitionLoadSensor() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
long startTimeMs = timer.time().milliseconds();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader(
new CoordinatorLoader.LoadSummary(
startTimeMs,
startTimeMs + 1000,
startTimeMs + 500,
30,
3000),
List.of(),
List.of()))
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// Getting the coordinator context fails because the coordinator
// does not exist until scheduleLoadOperation is called.
assertThrows(NotCoordinatorException.class, () -> runtime.contextOrThrow(TP));
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 0);
// Getting the coordinator context succeeds now.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
// When the loading completes, the coordinator transitions to active.
assertEquals(ACTIVE, ctx.state);
verify(runtimeMetrics, times(1)).recordPartitionLoadSensor(startTimeMs, startTimeMs + 1000);
}
@Test
public void testPartitionLoadGeneratesSnapshotAtHighWatermark() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(Time.SYSTEM)
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader(
new CoordinatorLoader.LoadSummary(
1000,
2000,
1500,
30,
3000),
List.of(5L, 15L, 27L),
List.of(5L, 15L)))
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 0);
// Getting the coordinator context succeeds now.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
// When the loading completes, the coordinator transitions to active.
assertEquals(ACTIVE, ctx.state);
assertEquals(27L, ctx.coordinator.lastWrittenOffset());
assertEquals(15L, ctx.coordinator.lastCommittedOffset());
assertFalse(ctx.coordinator.snapshotRegistry().hasSnapshot(0L));
assertFalse(ctx.coordinator.snapshotRegistry().hasSnapshot(5L));
assertTrue(ctx.coordinator.snapshotRegistry().hasSnapshot(15L));
assertTrue(ctx.coordinator.snapshotRegistry().hasSnapshot(27L));
}
@Test
public void testPartitionLoadGeneratesSnapshotAtHighWatermarkNoRecordsLoaded() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = mock(MockPartitionWriter.class);
MockCoordinatorShardBuilderSupplier supplier = mock(MockCoordinatorShardBuilderSupplier.class);
MockCoordinatorShardBuilder builder = mock(MockCoordinatorShardBuilder.class);
MockCoordinatorShard coordinator = mock(MockCoordinatorShard.class);
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(Time.SYSTEM)
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader(
new CoordinatorLoader.LoadSummary(
1000,
2000,
1500,
30,
3000),
List.of(),
List.of()))
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(supplier)
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
when(builder.withSnapshotRegistry(any())).thenReturn(builder);
when(builder.withLogContext(any())).thenReturn(builder);
when(builder.withTime(any())).thenReturn(builder);
when(builder.withTimer(any())).thenReturn(builder);
when(builder.withCoordinatorMetrics(any())).thenReturn(builder);
when(builder.withTopicPartition(any())).thenReturn(builder);
when(builder.withExecutor(any())).thenReturn(builder);
when(builder.build()).thenReturn(coordinator);
when(supplier.get()).thenReturn(builder);
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 0);
// Getting the coordinator context succeeds now.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
// When the loading completes, the coordinator transitions to active.
assertEquals(ACTIVE, ctx.state);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertTrue(ctx.coordinator.snapshotRegistry().hasSnapshot(0L));
}
@Test
public void testHighWatermarkUpdate() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(0))
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1"), "response1")
);
processor.poll();
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record2"), "response2")
);
processor.poll();
// Records have been written to the log.
assertEquals(List.of(
records(timer.time().milliseconds(), "record1"),
records(timer.time().milliseconds(), "record2")
), writer.entries(TP));
// There is no pending high watermark.
assertEquals(-1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// Commit the first record.
writer.commit(TP, 1);
// We should have one pending event and the pending high watermark should be set.
assertEquals(1, processor.size());
assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// Commit the second record.
writer.commit(TP, 2);
// We should still have one pending event and the pending high watermark should be updated.
assertEquals(1, processor.size());
assertEquals(2, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// Poll once to process the high watermark update and complete the writes.
processor.poll();
assertEquals(-1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
assertEquals(2, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset());
assertTrue(write1.isDone());
assertTrue(write2.isDone());
}
@Test
public void testHighWatermarkUpdateWithDeferredEventExceptions() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntimeMetrics metrics = mock(CoordinatorRuntimeMetrics.class);
// All operations will throw an exception when completed.
doThrow(new KafkaException("error")).when(metrics).recordEventPurgatoryTime(anyLong());
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(metrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Load the coordinator.
runtime.scheduleLoadOperation(TP, 10);
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record1"), "response1")
);
// Complete transaction #1, to force the flush of write #2.
// Use TV_1 since this test doesn't check epoch validation
CompletableFuture<Void> complete1 = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
(short) 50,
10,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
DEFAULT_WRITE_TIMEOUT
);
// Write #2 but without any records.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of(), "response2")
);
// Write #3, also without any records. Should complete together with write #2.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of(), "response3")
);
// Records have been written to the log.
assertEquals(List.of(
records(timer.time().milliseconds(), "record1"),
endTransactionMarker(100L, (short) 50, timer.time().milliseconds(), 10, ControlRecordType.COMMIT)
), writer.entries(TP));
// Verify that no writes are committed yet.
assertFalse(write1.isDone());
assertFalse(complete1.isDone());
assertFalse(write2.isDone());
assertFalse(write3.isDone());
// Commit the records and transaction marker.
writer.commit(TP, 2);
// All write completions throw exceptions after completing their futures.
// Despite the exceptions, all writes should still complete.
assertTrue(write1.isDone());
assertTrue(complete1.isDone());
assertTrue(write2.isDone());
assertTrue(write3.isDone());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
assertEquals("response3", write3.get(5, TimeUnit.SECONDS));
}
@Test
public void testWriteEventWriteTimeoutTaskIsCancelledWhenHighWatermarkIsUpdated() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(0))
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// Write#1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("Write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record1"), "response1")
);
processor.poll();
// Write#2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("Write#2", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(List.of("record2"), "response2")
);
processor.poll();
// Records have been written to the log.
assertEquals(List.of(
records(timer.time().milliseconds(), "record1"),
records(timer.time().milliseconds(), "record2")
), writer.entries(TP));
// The write timeout tasks exist.
assertEquals(2, timer.size());
// Commit the first record.
writer.commit(TP, 1);
// Commit the second record.
writer.commit(TP, 2);
// We should still have one pending event and the pending high watermark should be updated.
assertEquals(1, processor.size());
assertEquals(2, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// The write timeout tasks should have not yet been cancelled.
assertEquals(2, timer.size());
timer.taskQueue().forEach(taskEntry -> assertFalse(taskEntry.cancelled()));
// Poll once to process the high watermark update and complete the writes.
processor.poll();
assertEquals(-1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
assertEquals(2, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset());
assertTrue(write1.isDone());
assertTrue(write2.isDone());
// All timer tasks have been cancelled. Hence,they have been removed in MockTimer.
assertEquals(0, timer.size());
}
@Test
public void testCoordinatorCompleteTransactionEventWriteTimeoutTaskIsCancelledWhenHighWatermarkIsUpdated() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// transaction completion.
// Use TV_1 since this test doesn't check epoch validation - it only tests transaction completion behavior.
CompletableFuture<Void> write1 = runtime.scheduleTransactionCompletion(
"transactional-write",
TP,
100L,
(short) 50,
1,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
DEFAULT_WRITE_TIMEOUT
);
processor.poll();
// Records have been written to the log.
assertEquals(List.of(
endTransactionMarker(100, (short) 50, timer.time().milliseconds(), 1, ControlRecordType.COMMIT)
), writer.entries(TP));
// The write timeout tasks exist.
assertEquals(1, timer.size());
// Commit the first record.
writer.commit(TP, 1);
// We should still have one pending event and the pending high watermark should be updated.
assertEquals(1, processor.size());
assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// The write timeout tasks should have not yet been cancelled.
assertEquals(1, timer.size());
timer.taskQueue().forEach(taskEntry -> assertFalse(taskEntry.cancelled()));
// Poll once to process the high watermark update and complete the writes.
processor.poll();
assertEquals(-1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset());
assertTrue(write1.isDone());
// All timer tasks have been cancelled. Hence, they have been removed in MockTimer.
assertEquals(0, timer.size());
}
@Test
public void testAppendRecordBatchSize() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
StringSerializer serializer = new StringSerializer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(serializer)
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
int maxBatchSize = writer.config(TP).maxMessageSize();
assertTrue(maxBatchSize > INITIAL_BUFFER_SIZE);
// Generate enough records to create a batch that has INITIAL_BUFFER_SIZE < batchSize < maxBatchSize
List<String> records = new ArrayList<>();
for (int i = 0; i < 50000; i++) {
records.add("record-" + i);
}
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(records, "response1")
);
// Verify that the write has not completed exceptionally.
// This will catch any exceptions thrown including RecordTooLargeException.
assertFalse(write1.isCompletedExceptionally());
int batchSize = writer.entries(TP).get(0).sizeInBytes();
assertTrue(batchSize > INITIAL_BUFFER_SIZE && batchSize < maxBatchSize);
}
@Test
public void testCoordinatorDoNotRetainBufferLargeThanMaxMessageSize() {
MockTimer timer = new MockTimer();
InMemoryPartitionWriter mockWriter = new InMemoryPartitionWriter(false) {
@Override
public LogConfig config(TopicPartition tp) {
return new LogConfig(Map.of(
TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024) // 1MB
));
}
};
StringSerializer serializer = new StringSerializer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(mockWriter)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(serializer)
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Generate a record larger than the maxBatchSize.
List<String> largeRecords = List.of("A".repeat(100 * 1024 * 1024));
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(largeRecords, "response1", null, true, false)
);
// Verify that the write has not completed exceptionally.
// This will catch any exceptions thrown including RecordTooLargeException.
assertFalse(write1.isCompletedExceptionally());
// Verify that the next buffer retrieved from the bufferSupplier is the initial small one, not the large buffer.
assertEquals(INITIAL_BUFFER_SIZE, ctx.bufferSupplier.get(1).capacity());
}
@Test
public void testCoordinatorRetainExpandedBufferLessOrEqualToMaxMessageSize() {
MockTimer timer = new MockTimer();
InMemoryPartitionWriter mockWriter = new InMemoryPartitionWriter(false) {
@Override
public LogConfig config(TopicPartition tp) {
return new LogConfig(Map.of(
TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024 * 1024) // 1GB
));
}
};
StringSerializer serializer = new StringSerializer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(mockWriter)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(serializer)
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
// Generate enough records to create a batch that has INITIAL_BUFFER_SIZE < batchSize < maxBatchSize
List<String> records = new ArrayList<>();
for (int i = 0; i < 1000000; i++) {
records.add("record-" + i);
}
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(records, "response1")
);
// Verify that the write has not completed exceptionally.
// This will catch any exceptions thrown including RecordTooLargeException.
assertFalse(write1.isCompletedExceptionally());
int batchSize = mockWriter.entries(TP).get(0).sizeInBytes();
int maxBatchSize = mockWriter.config(TP).maxMessageSize();
assertTrue(INITIAL_BUFFER_SIZE < batchSize && batchSize <= maxBatchSize);
// Verify that the next buffer retrieved from the bufferSupplier is the expanded buffer.
assertTrue(ctx.bufferSupplier.get(1).capacity() > INITIAL_BUFFER_SIZE);
}
@Test
public void testBufferShrinkWhenMaxMessageSizeReducedBelowInitialBufferSize() {
MockTimer timer = new MockTimer();
var mockWriter = new InMemoryPartitionWriter(false) {
private LogConfig config = new LogConfig(Map.of(
TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024) // 1MB
));
@Override
public LogConfig config(TopicPartition tp) {
return config;
}
public void updateConfig(LogConfig newConfig) {
this.config = newConfig;
}
};
StringSerializer serializer = new StringSerializer();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(mockWriter)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(serializer)
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
List<String> records = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
records.add("record-" + i);
}
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(records, "response1")
);
// Verify that the write has not completed exceptionally.
// This will catch any exceptions thrown including RecordTooLargeException.
assertFalse(write1.isCompletedExceptionally());
int batchSize = mockWriter.entries(TP).get(0).sizeInBytes();
int maxBatchSize = mockWriter.config(TP).maxMessageSize();
assertTrue(batchSize <= INITIAL_BUFFER_SIZE && INITIAL_BUFFER_SIZE <= maxBatchSize);
ByteBuffer cachedBuffer = ctx.bufferSupplier.get(1);
assertEquals(INITIAL_BUFFER_SIZE, cachedBuffer.capacity());
// ctx.bufferSupplier.get(1); will clear cachedBuffer in bufferSupplier. Use release to put it back to bufferSupplier
ctx.bufferSupplier.release(cachedBuffer);
// Reduce max message size below initial buffer size.
mockWriter.updateConfig(new LogConfig(
Map.of(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(INITIAL_BUFFER_SIZE - 66))));
assertEquals(INITIAL_BUFFER_SIZE - 66, mockWriter.config(TP).maxMessageSize());
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(records, "response2")
);
assertFalse(write2.isCompletedExceptionally());
// Verify that there is no cached buffer since the cached buffer size is greater than new maxMessageSize.
assertEquals(1, ctx.bufferSupplier.get(1).capacity());
// Write #3.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, DEFAULT_WRITE_TIMEOUT,
state -> new CoordinatorResult<>(records, "response3")
);
assertFalse(write3.isCompletedExceptionally());
// Verify that the cached buffer size is equals to new maxMessageSize that less than INITIAL_BUFFER_SIZE.
assertEquals(mockWriter.config(TP).maxMessageSize(), ctx.bufferSupplier.get(1).capacity());
}
@Test
public void testScheduleWriteOperationWithBatching() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1 with two records.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(0, 2), "response1")
);
// Verify that the write is not committed yet.
assertFalse(write1.isDone());
// A batch has been created.
assertNotNull(ctx.currentBatch);
// Verify the state. Records are replayed but no batch written.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Write #2 with one record.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(2, 3), "response2")
);
// Verify that the write is not committed yet.
assertFalse(write2.isDone());
// Verify the state. Records are replayed but no batch written.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Write #3 with one record. This one cannot go into the existing batch
// so the existing batch should be flushed and a new one should be created.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(3, 4), "response3")
);
// Verify that the write is not committed yet.
assertFalse(write3.isDone());
// Verify the state. Records are replayed. The previous batch
// got flushed with all the records but the new one from #3.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(timer.time().milliseconds(), records.subList(0, 3))
), writer.entries(TP));
// Advance past the linger time.
timer.advanceClock(11);
// Verify the state. The pending batch is flushed.
assertEquals(4L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L, 4L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(timer.time().milliseconds() - 11, records.subList(0, 3)),
records(timer.time().milliseconds() - 11, records.subList(3, 4))
), writer.entries(TP));
// Commit and verify that writes are completed.
writer.commit(TP);
assertTrue(write1.isDone());
assertTrue(write2.isDone());
assertTrue(write3.isDone());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
assertEquals("response3", write3.get(5, TimeUnit.SECONDS));
}
@Test
public void testScheduleWriteOperationWithBatchingWhenRecordsTooLarge() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write all the records.
CompletableFuture<String> write = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records, "response1")
);
assertFutureThrows(RecordTooLargeException.class, write);
}
@Test
public void testScheduleWriteOperationWithBatchingWhenWriteFails() {
MockTimer timer = new MockTimer();
// The partition writer does not accept any writes
MockPartitionWriter writer = new MockPartitionWriter(0);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(0, 1), "response1"));
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(1, 2), "response2"));
// Write #3.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(2, 3), "response3"));
// Verify the state.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Write #4. This write cannot make it in the current batch. So the current batch
// is flushed. It will fail. So we expect all writes to fail.
CompletableFuture<String> write4 = runtime.scheduleWriteOperation("write#4", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(3, 4), "response4"));
// Verify the futures.
assertFutureThrows(KafkaException.class, write1);
assertFutureThrows(KafkaException.class, write2);
assertFutureThrows(KafkaException.class, write3);
// Write #4 is also expected to fail.
assertFutureThrows(KafkaException.class, write4);
// Verify the state. The state should be reverted to the initial state.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
}
@Test
public void testScheduleWriteOperationWithBatchingWhenReplayFails() {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Override the coordinator with a coordinator that throws
// an exception when replay is called.
SnapshotRegistry snapshotRegistry = ctx.coordinator.snapshotRegistry();
ctx.coordinator = new SnapshottableCoordinator<>(
new LogContext(),
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, ctx.timer) {
@Override
public void replay(
long offset,
long producerId,
short producerEpoch,
String record
) throws RuntimeException {
if (offset >= 1) {
throw new IllegalArgumentException("error");
}
super.replay(
offset,
producerId,
producerEpoch,
record
);
}
},
TP
);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each.
List<String> records = Stream.of('1', '2').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(0, 1), "response1"));
// Verify the state.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Write #2. It should fail.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(1, 2), "response2"));
// Verify the futures.
assertFutureThrows(IllegalArgumentException.class, write1);
assertFutureThrows(IllegalArgumentException.class, write2);
// Verify the state.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
}
@Test
public void testScheduleTransactionalWriteOperationWithBatching() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Write #1 with one record.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record#1"), "response1")
);
// Verify that the write is not committed yet.
assertFalse(write1.isDone());
// Verify the state. Records are replayed but no batch written.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of(), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of("record#1"), ctx.coordinator.coordinator().records());
assertEquals(List.of(), writer.entries(TP));
// Transactional write #2 with one record. This will flush the current batch.
CompletableFuture<String> write2 = runtime.scheduleTransactionalWriteOperation(
"txn-write#1",
TP,
"transactional-id",
100L,
(short) 50,
Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record#2"), "response2"),
TXN_OFFSET_COMMIT_LATEST_VERSION
);
// Verify that the write is not committed yet.
assertFalse(write2.isDone());
// Verify the state. The current batch and the transactional records are
// written to the log.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 1L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record#2"), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of("record#1"), ctx.coordinator.coordinator().records());
assertEquals(List.of(
records(timer.time().milliseconds(), "record#1"),
transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record#2")
), writer.entries(TP));
// Write #3 with one record.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record#3"), "response3")
);
// Verify that the write is not committed yet.
assertFalse(write3.isDone());
// Verify the state.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 1L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of("record#2"), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of("record#1", "record#3"), ctx.coordinator.coordinator().records());
assertEquals(List.of(
records(timer.time().milliseconds(), "record#1"),
transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record#2")
), writer.entries(TP));
// Complete transaction #1. It will flush the current batch if any.
// Use TV_1 since this test doesn't check epoch validation
CompletableFuture<Void> complete1 = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
(short) 50,
10,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
DEFAULT_WRITE_TIMEOUT
);
// Verify that the completion is not committed yet.
assertFalse(complete1.isDone());
// Verify the state.
assertEquals(4L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 1L, 2L, 3L, 4L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(Set.of(), ctx.coordinator.coordinator().pendingRecords(100L));
assertEquals(Set.of("record#1", "record#2", "record#3"), ctx.coordinator.coordinator().records());
assertEquals(List.of(
records(timer.time().milliseconds(), "record#1"),
transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record#2"),
records(timer.time().milliseconds(), "record#3"),
endTransactionMarker(100L, (short) 50, timer.time().milliseconds(), 10, ControlRecordType.COMMIT)
), writer.entries(TP));
// Commit and verify that writes are completed.
writer.commit(TP);
assertTrue(write1.isDone());
assertTrue(write2.isDone());
assertTrue(write3.isDone());
assertTrue(complete1.isDone());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
assertEquals("response3", write3.get(5, TimeUnit.SECONDS));
assertNull(complete1.get(5, TimeUnit.SECONDS));
}
@Test
public void testAdaptiveAppendLingerTime() {
MockTimer timer = new MockTimer();
ManualEventProcessor processor = new ManualEventProcessor();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.empty())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Write #1.
runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")
);
// Write #2.
runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record3"), "response2")
);
// Execute write #1.
processor.poll();
// A batch has been created.
assertNotNull(ctx.currentBatch);
// A flush event is queued after write #2.
assertEquals(2, processor.size());
// Verify the state. Records are replayed but no batch written.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1"),
new MockCoordinatorShard.RecordAndMetadata(1, "record2")
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Execute write #2.
processor.poll();
assertEquals(1, processor.size());
// The batch has not been flushed.
assertNotNull(ctx.currentBatch);
// Verify the state. Records are replayed but no batch written.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1"),
new MockCoordinatorShard.RecordAndMetadata(1, "record2"),
new MockCoordinatorShard.RecordAndMetadata(2, "record3")
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Flush the batch.
processor.poll();
// The batch is flushed.
assertNull(ctx.currentBatch);
// Verify the state.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1"),
new MockCoordinatorShard.RecordAndMetadata(1, "record2"),
new MockCoordinatorShard.RecordAndMetadata(2, "record3")
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
TestUtil.records(timer.time().milliseconds(), "record1", "record2", "record3")
), writer.entries(TP));
}
/**
* Tests a flush triggered by the max batch size with an adaptive append linger time.
*
* The flush for the first batch must not flush the second batch.
*/
@Test
@SuppressWarnings("checkstyle:MethodLength")
public void testAdaptiveAppendLingerWithMaxBatchSizeFlush() {
MockTimer timer = new MockTimer();
ManualEventProcessor processor = new ManualEventProcessor();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.empty())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4', '5').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1.
runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(0, 1), "response1")
);
// Write #2.
runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(1, 2), "response2")
);
// Write #3.
runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(2, 3), "response3")
);
// Write #4.
runtime.scheduleWriteOperation("write#4", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(3, 4), "response4")
);
// Execute write #1, write #2 and write #3.
processor.poll();
processor.poll();
processor.poll();
// A batch has been created.
assertNotNull(ctx.currentBatch);
// A flush event is queued after write #4.
assertEquals(2, processor.size());
// Verify the state. Records are replayed but no batch written.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Write #5.
runtime.scheduleWriteOperation("write#5", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(4, 5), "response5")
);
// Execute write #4. This one cannot go into the existing batch
// so the existing batch should be flushed and a new one should be created.
processor.poll();
// A batch has been created.
assertNotNull(ctx.currentBatch);
// Another flush event is queued after write #5.
assertEquals(3, processor.size());
// Verify the state.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
TestUtil.records(timer.time().milliseconds(), records.subList(0, 3))
), writer.entries(TP));
// Execute the first flush.
processor.poll();
assertEquals(2, processor.size());
// The flush does not belong to the current batch and is ignored.
assertNotNull(ctx.currentBatch);
// Verify the state.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
TestUtil.records(timer.time().milliseconds(), records.subList(0, 3))
), writer.entries(TP));
// Execute write #5.
processor.poll();
assertEquals(1, processor.size());
// Verify the state.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3)),
new MockCoordinatorShard.RecordAndMetadata(4, records.get(4))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
TestUtil.records(timer.time().milliseconds(), records.subList(0, 3))
), writer.entries(TP));
// Execute the second flush.
processor.poll();
assertEquals(0, processor.size());
// The batch is flushed.
assertNull(ctx.currentBatch);
// Verify the state.
assertEquals(5L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L, 5L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3)),
new MockCoordinatorShard.RecordAndMetadata(4, records.get(4))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
TestUtil.records(timer.time().milliseconds(), records.subList(0, 3)),
TestUtil.records(timer.time().milliseconds(), records.subList(3, 5))
), writer.entries(TP));
}
/**
* Tests a transactional write with an adaptive append linger time.
*
* The transactional write must not enqueue a flush, since it flushes immediately.
* The flush for the batch before the transactional write must not flush the batch after the
* transactional write.
*/
@Test
public void testAdaptiveAppendLingerWithTransactionalWrite() {
MockTimer timer = new MockTimer();
ManualEventProcessor processor = new ManualEventProcessor();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.empty())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Write #1.
runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record1"), "response1")
);
// Transactional write #2. This will flush the batch.
runtime.scheduleTransactionalWriteOperation(
"txn-write#1",
TP,
"transactional-id",
100L,
(short) 50,
Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record2"), "response2"),
TXN_OFFSET_COMMIT_LATEST_VERSION
);
// Write #3.
runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record3"), "response3")
);
assertEquals(3, processor.size());
// Execute write #1.
processor.poll();
// A batch has been created.
assertNotNull(ctx.currentBatch);
// A flush event is queued after write #3.
assertEquals(3, processor.size());
// Verify the state. Records are replayed but no batch written.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1")
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Execute transactional write #2.
processor.poll();
// The batch is flushed.
assertNull(ctx.currentBatch);
// No flush event is queued.
assertEquals(2, processor.size());
// Verify the state. The current batch and the transactional records are
// written to the log.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 1L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1")
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
TestUtil.records(timer.time().milliseconds(), "record1"),
TestUtil.transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record2")
), writer.entries(TP));
// Execute write #3.
processor.poll();
// A batch has been created.
assertNotNull(ctx.currentBatch);
// A flush event is queued after the first flush.
assertEquals(2, processor.size());
// Verify the state. Records are replayed but no batch written.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 1L, 2L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1"),
new MockCoordinatorShard.RecordAndMetadata(2, "record3")
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
TestUtil.records(timer.time().milliseconds(), "record1"),
TestUtil.transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record2")
), writer.entries(TP));
// Execute the first flush.
processor.poll();
assertEquals(1, processor.size());
// The flush does not belong to the current batch and is ignored.
assertNotNull(ctx.currentBatch);
// Execute the second flush.
processor.poll();
assertEquals(0, processor.size());
// The batch is flushed.
assertNull(ctx.currentBatch);
// Verify the state.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 1L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1"),
new MockCoordinatorShard.RecordAndMetadata(2, "record3")
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
TestUtil.records(timer.time().milliseconds(), "record1"),
TestUtil.transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record2"),
TestUtil.records(timer.time().milliseconds(), "record3")
), writer.entries(TP));
}
@Test
public void testStateMachineIsReloadedWhenOutOfSync() {
MockTimer timer = new MockTimer();
MockCoordinatorLoader loader = spy(new MockCoordinatorLoader());
MockPartitionWriter writer = new MockPartitionWriter() {
@Override
public long append(
TopicPartition tp,
VerificationGuard verificationGuard,
MemoryRecords batch,
short transactionVersion
) {
// Add 1 to the returned offsets.
return super.append(tp, verificationGuard, batch, transactionVersion) + 1;
}
};
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(loader)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(ACTIVE, ctx.state);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Keep a reference to the current coordinator.
SnapshottableCoordinator<MockCoordinatorShard, String> coordinator = ctx.coordinator;
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(0, 1), "response1"));
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(1, 2), "response2"));
// Write #3.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(2, 3), "response3"));
// Write #4. This write cannot make it in the current batch. So the current batch
// is flushed. It will fail. So we expect all writes to fail.
CompletableFuture<String> write4 = runtime.scheduleWriteOperation("write#4", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(3, 4), "response4"));
// Verify the futures.
assertFutureThrows(NotCoordinatorException.class, write1);
assertFutureThrows(NotCoordinatorException.class, write2);
assertFutureThrows(NotCoordinatorException.class, write3);
// Write #4 is also expected to fail.
assertFutureThrows(NotCoordinatorException.class, write4);
// Verify that the state machine was loaded twice.
verify(loader, times(2)).load(eq(TP), any());
// Verify that the state is active and that the state machine
// is actually a new one.
assertEquals(ACTIVE, ctx.state);
assertNotEquals(coordinator, ctx.coordinator);
}
@Test
public void testWriteOpIsNotReleasedWhenStateMachineIsNotCaughtUpAfterLoad() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorLoader<String> loader = new CoordinatorLoader<String>() {
@Override
public CompletableFuture<LoadSummary> load(
TopicPartition tp,
CoordinatorPlayback<String> coordinator
) {
coordinator.replay(
0,
RecordBatch.NO_PRODUCER_ID,
RecordBatch.NO_PRODUCER_EPOCH,
"record#0"
);
coordinator.replay(
0,
RecordBatch.NO_PRODUCER_ID,
RecordBatch.NO_PRODUCER_EPOCH,
"record#1"
);
coordinator.updateLastWrittenOffset(2L);
coordinator.updateLastCommittedOffset(1L);
return CompletableFuture.completedFuture(new LoadSummary(
0L,
0L,
0L,
2,
1
));
}
@Override
public void close() {}
};
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(loader)
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(1L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(2L), ctx.coordinator.snapshotRegistry().epochsList());
// Schedule a write operation that does not generate any records.
CompletableFuture<String> write = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of(), "response1"));
// The write operation should not be done.
assertFalse(write.isDone());
// Advance the last committed offset.
ctx.highWatermarklistener.onHighWatermarkUpdated(TP, 2L);
// Verify the state.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(2L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(2L), ctx.coordinator.snapshotRegistry().epochsList());
// The write operation should be completed.
assertEquals("response1", write.get(5, TimeUnit.SECONDS));
}
@Test
public void testScheduleNonAtomicWriteOperation() throws ExecutionException, InterruptedException, TimeoutException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Let's try to write all the records atomically (the default) to ensure
// that it fails.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records, "write#1")
);
assertFutureThrows(RecordTooLargeException.class, write1);
// Let's try to write the same records non-atomically.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records, "write#2", null, true, false)
);
// The write is pending.
assertFalse(write2.isDone());
// Verify the state.
assertNotNull(ctx.currentBatch);
// The last written offset is 3L because one batch was written to the log with
// the first three records. The 4th one is pending.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(timer.time().milliseconds(), records.subList(0, 3))
), writer.entries(TP));
// Commit up to 3L.
writer.commit(TP, 3L);
// The write is still pending.
assertFalse(write2.isDone());
// Advance past the linger time to flush the pending batch.
timer.advanceClock(11);
// Verify the state.
assertNull(ctx.currentBatch);
assertEquals(4L, ctx.coordinator.lastWrittenOffset());
assertEquals(3L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(3L, 4L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(timer.time().milliseconds() - 11, records.subList(0, 3)),
records(timer.time().milliseconds() - 11, records.subList(3, 4))
), writer.entries(TP));
// Commit up to 4L.
writer.commit(TP, 4L);
// Verify that the write is completed.
assertTrue(write2.isDone());
assertEquals("write#2", write2.get(5, TimeUnit.SECONDS));
}
@Test
public void testScheduleNonAtomicWriteOperationWithRecordTooLarge() throws InterruptedException {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Create another record larger than the max batch size.
char[] payload = new char[maxBatchSize];
Arrays.fill(payload, '4');
String record = new String(payload);
// Let's write the first three records.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records, "write#1", null, true, false)
);
// Verify the state.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Let's write the 4th record which is too large. This will flush the current
// pending batch, allocate a new batch, and put the record into it.
// Note that the batch will fail only when the batch is written because the
// MemoryBatchBuilder always accept one record.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of(record), "write#2", null, true, false)
);
// Advance past the linger time to flush the pending batch.
timer.advanceClock(11);
// The write should have failed...
assertFutureThrows(RecordTooLargeException.class, write2);
// ... but write#1 should be left intact.
assertFalse(write1.isDone());
// Verify the state.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(timer.time().milliseconds() - 11, records.subList(0, 3))
), writer.entries(TP));
}
@Test
public void testScheduleNonAtomicWriteOperationWhenWriteFails() {
MockTimer timer = new MockTimer();
// The partition writer does not accept any writes.
MockPartitionWriter writer = new MockPartitionWriter(0);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(0, 1), "response1", null, true, false));
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(1, 2), "response2", null, true, false));
// Write #3.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(2, 3), "response3", null, true, false));
// Verify the state.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
// Write #4. This write cannot make it in the current batch. So the current batch
// is flushed. It will fail. So we expect all writes to fail.
CompletableFuture<String> write4 = runtime.scheduleWriteOperation("write#4", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(records.subList(3, 4), "response4", null, true, false));
// Verify the futures.
assertFutureThrows(KafkaException.class, write1);
assertFutureThrows(KafkaException.class, write2);
assertFutureThrows(KafkaException.class, write3);
// Write #4 is also expected to fail.
assertFutureThrows(KafkaException.class, write4);
// Verify the state. The state should be reverted to the initial state.
assertEquals(0L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList());
assertEquals(List.of(), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(), writer.entries(TP));
}
@Test
public void testEmptyBatch() throws Exception {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
ThrowingSerializer<String> serializer = new ThrowingSerializer<String>(new StringSerializer());
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(serializer)
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertNull(ctx.currentBatch);
// Write #1, which fails.
serializer.throwOnNextOperation();
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("1"), "response1"));
// Write #1 should fail and leave an empty batch.
assertFutureThrows(BufferOverflowException.class, write1);
assertNotNull(ctx.currentBatch);
// Write #2, with no records.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of(), "response2"));
// Write #2 should not be attached to the empty batch.
assertTrue(write2.isDone());
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
// Complete transaction #1. It will flush the current empty batch.
// The coordinator must not try to write an empty batch, otherwise the mock partition writer
// will throw an exception.
// Use TV_1 since this test doesn't check epoch validation
CompletableFuture<Void> complete1 = runtime.scheduleTransactionCompletion(
"complete#1",
TP,
100L,
(short) 50,
10,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
DEFAULT_WRITE_TIMEOUT
);
// Verify that the completion is not committed yet.
assertFalse(complete1.isDone());
// Commit and verify that writes are completed.
writer.commit(TP);
assertNull(complete1.get(5, TimeUnit.SECONDS));
}
@Test
public void testRecordAppendLingerTime() throws Exception {
MockTimer timer = new MockTimer();
// Writer sleeps for 10ms before appending records.
MockPartitionWriter writer = new MockPartitionWriter(timer.time(), Integer.MAX_VALUE, false);
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1 with two records.
long firstBatchTimestamp = timer.time().milliseconds();
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records.subList(0, 2), "response1")
);
// A batch has been created.
assertNotNull(ctx.currentBatch);
// Write #2 with one record.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records.subList(2, 3), "response2")
);
// Verify the state. Records are replayed but no batch written.
assertEquals(List.of(), writer.entries(TP));
verify(runtimeMetrics, times(0)).recordFlushTime(10);
// Write #3 with one record. This one cannot go into the existing batch
// so the existing batch should be flushed and a new one should be created.
long secondBatchTimestamp = timer.time().milliseconds();
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records.subList(3, 4), "response3")
);
// Verify the state. Records are replayed. The previous batch
// got flushed with all the records but the new one from #3.
// The new batch's timestamp comes from before the flush.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(firstBatchTimestamp, records.subList(0, 3))
), writer.entries(TP));
verify(runtimeMetrics, times(1)).recordLingerTime(0);
// Advance past the linger time.
timer.advanceClock(11);
// Verify the state. The pending batch is flushed.
assertEquals(4L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(secondBatchTimestamp, records.subList(0, 3)),
records(secondBatchTimestamp, records.subList(3, 4))
), writer.entries(TP));
verify(runtimeMetrics, times(1)).recordLingerTime(21);
// Commit and verify that writes are completed.
writer.commit(TP);
assertTrue(write1.isDone());
assertTrue(write2.isDone());
assertTrue(write3.isDone());
assertEquals(4L, ctx.coordinator.lastCommittedOffset());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
assertEquals("response3", write3.get(5, TimeUnit.SECONDS));
}
@Test
public void testRecordFlushTime() throws Exception {
MockTimer timer = new MockTimer();
// Writer sleeps for 10ms before appending records.
MockPartitionWriter writer = new MockPartitionWriter(timer.time(), Integer.MAX_VALUE, false);
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create records with a quarter of the max batch size each. Keep in mind that
// each batch has a header so it is not possible to have those four records
// in one single batch.
List<String> records = Stream.of('1', '2', '3', '4').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1 with two records.
long firstBatchTimestamp = timer.time().milliseconds();
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records.subList(0, 2), "response1")
);
// A batch has been created.
assertNotNull(ctx.currentBatch);
// Write #2 with one record.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records.subList(2, 3), "response2")
);
// Verify the state. Records are replayed but no batch written.
assertEquals(List.of(), writer.entries(TP));
verify(runtimeMetrics, times(0)).recordFlushTime(10);
// Write #3 with one record. This one cannot go into the existing batch
// so the existing batch should be flushed and a new one should be created.
long secondBatchTimestamp = timer.time().milliseconds();
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records.subList(3, 4), "response3")
);
// Verify the state. Records are replayed. The previous batch
// got flushed with all the records but the new one from #3.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(firstBatchTimestamp, records.subList(0, 3))
), writer.entries(TP));
verify(runtimeMetrics, times(1)).recordFlushTime(10);
// Advance past the linger time.
timer.advanceClock(11);
// Verify the state. The pending batch is flushed.
assertEquals(4L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)),
new MockCoordinatorShard.RecordAndMetadata(3, records.get(3))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(secondBatchTimestamp, records.subList(0, 3)),
records(secondBatchTimestamp, records.subList(3, 4))
), writer.entries(TP));
verify(runtimeMetrics, times(2)).recordFlushTime(10);
// Commit and verify that writes are completed.
writer.commit(TP);
assertTrue(write1.isDone());
assertTrue(write2.isDone());
assertTrue(write3.isDone());
assertEquals(4L, ctx.coordinator.lastCommittedOffset());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
assertEquals("response3", write3.get(5, TimeUnit.SECONDS));
}
@Test
public void testCompressibleRecordTriggersFlushAndSucceeds() throws Exception {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
Compression compression = Compression.gzip().build();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withCompression(compression)
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create 2 records with a quarter of the max batch size each.
List<String> records = Stream.of('1', '2').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1 with the small records, batch will be about half full
long firstBatchTimestamp = timer.time().milliseconds();
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records, "response1")
);
// A batch has been created.
assertNotNull(ctx.currentBatch);
// Verify the state - batch is not yet flushed
assertEquals(List.of(), writer.entries(TP));
// Create a record of highly compressible data
List<String> largeRecord = List.of("a".repeat((int) (0.75 * maxBatchSize)));
// Write #2 with the large record. This record is too large to go into the previous batch
// uncompressed but fits in a new buffer, so we should flush the previous batch and allocate
// a new one.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(largeRecord, "response2")
);
// Verify the state. The first batch has flushed but the second is pending.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, largeRecord.get(0))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(firstBatchTimestamp, compression, records)
), writer.entries(TP));
// Advance past the linger time
timer.advanceClock(11);
// Commit and verify that the second batch is completed
writer.commit(TP);
assertTrue(write1.isDone());
assertTrue(write2.isDone());
assertEquals(3L, ctx.coordinator.lastCommittedOffset());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
}
@Test
public void testLargeCompressibleRecordTriggersFlushAndSucceeds() throws Exception {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
Compression compression = Compression.gzip().build();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withCompression(compression)
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create 2 records with a quarter of the max batch size each.
List<String> records = Stream.of('1', '2').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1 with the small records, batch will be about half full
long firstBatchTimestamp = timer.time().milliseconds();
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records, "response1")
);
// A batch has been created.
assertNotNull(ctx.currentBatch);
// Verify the state - batch is not yet flushed
assertEquals(List.of(), writer.entries(TP));
// Create a large record of highly compressible data
List<String> largeRecord = List.of("a".repeat(3 * maxBatchSize));
// Write #2 with the large record. This record is too large to go into the previous batch
// uncompressed but will fit in the new buffer once compressed, so we should flush the
// previous batch and successfully allocate a new batch for this record. The new batch
// will also trigger an immediate flush.
long secondBatchTimestamp = timer.time().milliseconds();
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(largeRecord, "response2")
);
// Verify the state.
assertEquals(3L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)),
new MockCoordinatorShard.RecordAndMetadata(2, largeRecord.get(0))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(firstBatchTimestamp, compression, records),
records(secondBatchTimestamp, compression, largeRecord)
), writer.entries(TP));
// Commit and verify that writes are completed.
writer.commit(TP);
assertTrue(write1.isDone());
assertTrue(write2.isDone());
assertEquals(3L, ctx.coordinator.lastCommittedOffset());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
assertEquals("response2", write2.get(5, TimeUnit.SECONDS));
}
@Test
public void testLargeUncompressibleRecordTriggersFlushAndFails() throws Exception {
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
Compression compression = Compression.gzip().build();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withCompression(compression)
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertNull(ctx.currentBatch);
// Get the max batch size.
int maxBatchSize = writer.config(TP).maxMessageSize();
// Create 2 records with a quarter of the max batch size each.
List<String> records = Stream.of('1', '2').map(c -> {
char[] payload = new char[maxBatchSize / 4];
Arrays.fill(payload, c);
return new String(payload);
}).collect(Collectors.toList());
// Write #1 with the small records, batch will be about half full
long firstBatchTimestamp = timer.time().milliseconds();
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(records, "response1")
);
// A batch has been created.
assertNotNull(ctx.currentBatch);
// Verify the state - batch is not yet flushed
assertEquals(List.of(), writer.entries(TP));
// Create a large record of not very compressible data
char[] payload = new char[3 * maxBatchSize];
Random offset = new Random();
for (int i = 0; i < payload.length; i++) {
payload[i] = (char) ('a' + ((char) offset.nextInt() % 26));
}
List<String> largeRecord = List.of(new String(payload));
// Write #2 with the large record. This record is too large to go into the previous batch
// and is not compressible so it should be flushed. It is also too large to fit in a new batch
// so the write should fail with RecordTooLargeException
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(50),
state -> new CoordinatorResult<>(largeRecord, "response2")
);
// Check that write2 fails with RecordTooLargeException
assertFutureThrows(RecordTooLargeException.class, write2);
// Verify the state. The first batch was flushed and the largeRecord
// write failed.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)),
new MockCoordinatorShard.RecordAndMetadata(1, records.get(1))
), ctx.coordinator.coordinator().fullRecords());
assertEquals(List.of(
records(firstBatchTimestamp, compression, records)
), writer.entries(TP));
// Commit and verify that writes are completed.
writer.commit(TP);
assertTrue(write1.isDone());
assertTrue(write2.isDone());
assertEquals(2L, ctx.coordinator.lastCommittedOffset());
assertEquals("response1", write1.get(5, TimeUnit.SECONDS));
}
@Test
public void testRecordEventPurgatoryTime() throws Exception {
Duration writeTimeout = Duration.ofMillis(1000);
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(writeTimeout)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(0))
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// write#1 will be committed and update the high watermark. Record time spent in purgatory.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, writeTimeout,
state -> new CoordinatorResult<>(List.of("record1"), "response1")
);
// write#2 will time out sitting in the purgatory. Record time spent in purgatory.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, writeTimeout,
state -> new CoordinatorResult<>(List.of("record2"), "response2")
);
// write#3 will error while appending. Does not spend time in purgatory.
CompletableFuture<String> write3 = runtime.scheduleWriteOperation("write#3", TP, writeTimeout,
state -> {
throw new KafkaException("write#3 failed.");
});
processor.poll();
processor.poll();
processor.poll();
// Confirm we do not record purgatory time for write#3.
assertTrue(write3.isCompletedExceptionally());
verify(runtimeMetrics, times(0)).recordEventPurgatoryTime(0L);
// Records have been written to the log.
long writeTimestamp = timer.time().milliseconds();
assertEquals(List.of(
records(writeTimestamp, "record1"),
records(writeTimestamp, "record2")
), writer.entries(TP));
// There is no pending high watermark.
assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// Advance the clock then commit records from write#1.
timer.advanceClock(700);
writer.commit(TP, 1);
// We should still have one pending event and the pending high watermark should be updated.
assertEquals(1, processor.size());
assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// Poll once to process the high watermark update and complete the writes.
processor.poll();
long purgatoryTimeMs = timer.time().milliseconds() - writeTimestamp;
// Advance the clock past write timeout. write#2 has now timed out.
timer.advanceClock(300 + 1);
processor.poll();
assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset());
assertTrue(write1.isDone());
assertTrue(write2.isCompletedExceptionally());
verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(purgatoryTimeMs);
verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1);
}
@Test
public void testWriteEventCompletesOnlyOnce() throws Exception {
// Completes once via timeout, then again with HWM update.
Duration writeTimeout = Duration.ofMillis(1000L);
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(writeTimeout)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(0))
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// write#1 will be committed and update the high watermark. Record time spent in purgatory.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, writeTimeout,
state -> new CoordinatorResult<>(List.of("record1"), "response1")
);
processor.poll();
// Records have been written to the log.
long writeTimestamp = timer.time().milliseconds();
assertEquals(List.of(
records(writeTimestamp, "record1")
), writer.entries(TP));
// There is no pending high watermark.
assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// Advance the clock to time out the write event. Confirm write#1 is completed with a timeout.
timer.advanceClock(writeTimeout.toMillis() + 1L);
processor.poll();
verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1);
assertTrue(write1.isCompletedExceptionally());
// HWM update
writer.commit(TP, 1);
assertEquals(1, processor.size());
assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// Poll once to process the high watermark update and complete write#1. It has already
// been completed and this is a noop.
processor.poll();
assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset());
assertTrue(write1.isCompletedExceptionally());
verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1L);
}
@Test
public void testCompleteTransactionEventCompletesOnlyOnce() throws Exception {
// Completes once via timeout, then again with HWM update.
Duration writeTimeout = Duration.ofMillis(1000L);
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(writeTimeout)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withExecutorService(mock(ExecutorService.class))
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// transaction completion.
// Use TV_1 since this test doesn't check epoch validation - it only tests transaction completion behavior.
CompletableFuture<Void> write1 = runtime.scheduleTransactionCompletion(
"transactional-write",
TP,
100L,
(short) 50,
1,
TransactionResult.COMMIT,
TransactionVersion.TV_1.featureLevel(),
writeTimeout
);
processor.poll();
// Records have been written to the log.
assertEquals(List.of(
endTransactionMarker(100, (short) 50, timer.time().milliseconds(), 1, ControlRecordType.COMMIT)
), writer.entries(TP));
// The write timeout tasks exist.
assertEquals(1, timer.size());
assertFalse(write1.isDone());
// Advance the clock to time out the write event. Confirm write#1 is completed with a timeout.
timer.advanceClock(writeTimeout.toMillis() + 1L);
processor.poll();
verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1);
assertTrue(write1.isCompletedExceptionally());
// HWM update
writer.commit(TP, 1);
assertEquals(1, processor.size());
assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
// Poll once to process the high watermark update and complete write#1. It has already
// been completed and this is a noop.
processor.poll();
assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark());
assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset());
assertTrue(write1.isCompletedExceptionally());
verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1L);
}
@Test
public void testCoordinatorExecutor() {
Duration writeTimeout = Duration.ofMillis(1000);
MockTimer timer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
ManualEventProcessor processor = new ManualEventProcessor();
CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class);
ExecutorService executorService = mock(ExecutorService.class);
when(executorService.submit(any(Runnable.class))).thenAnswer(args -> {
Runnable op = args.getArgument(0);
op.run();
return CompletableFuture.completedFuture(null);
});
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(timer.time())
.withTimer(timer)
.withDefaultWriteTimeOut(writeTimeout)
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(processor)
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(runtimeMetrics)
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(0))
.withExecutorService(executorService)
.build();
// Loads the coordinator. Poll once to execute the load operation and once
// to complete the load.
runtime.scheduleLoadOperation(TP, 10);
processor.poll();
processor.poll();
// Schedule a write which schedules an async tasks.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, writeTimeout,
state -> {
state.executor().schedule(
"write#1#task",
() -> "task result",
(result, exception) -> {
assertEquals("task result", result);
assertNull(exception);
return new CoordinatorResult<>(List.of("record2"), null);
}
);
return new CoordinatorResult<>(List.of("record1"), "response1");
}
);
// Execute the write.
processor.poll();
// We should have a new write event in the queue as a result of the
// task being executed immediately.
assertEquals(1, processor.size());
// Verify the state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(1L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1")
), ctx.coordinator.coordinator().fullRecords());
// Execute the pending write.
processor.poll();
// The processor must be empty now.
assertEquals(0, processor.size());
// Verify the state.
assertEquals(2L, ctx.coordinator.lastWrittenOffset());
assertEquals(0L, ctx.coordinator.lastCommittedOffset());
assertEquals(List.of(
new MockCoordinatorShard.RecordAndMetadata(0, "record1"),
new MockCoordinatorShard.RecordAndMetadata(1, "record2")
), ctx.coordinator.coordinator().fullRecords());
// Commit.
writer.commit(TP);
processor.poll();
assertTrue(write1.isDone());
}
@Test
public void testLingerTimeComparisonInMaybeFlushCurrentBatch() throws Exception {
// Provides the runtime clock; we will advance it.
MockTimer clockTimer = new MockTimer();
// Used for scheduling timer tasks; we won't advance it to avoid a timer-triggered batch flush.
MockTimer schedulerTimer = new MockTimer();
MockPartitionWriter writer = new MockPartitionWriter();
CoordinatorRuntime<MockCoordinatorShard, String> runtime =
new CoordinatorRuntime.Builder<MockCoordinatorShard, String>()
.withTime(clockTimer.time())
.withTimer(schedulerTimer)
.withDefaultWriteTimeOut(Duration.ofMillis(20))
.withLoader(new MockCoordinatorLoader())
.withEventProcessor(new DirectEventProcessor())
.withPartitionWriter(writer)
.withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier())
.withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class))
.withCoordinatorMetrics(mock(CoordinatorMetrics.class))
.withSerializer(new StringSerializer())
.withAppendLingerMs(OptionalInt.of(10))
.withExecutorService(mock(ExecutorService.class))
.build();
// Schedule the loading.
runtime.scheduleLoadOperation(TP, 10);
// Verify the initial state.
CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP);
assertEquals(ACTIVE, ctx.state);
assertNull(ctx.currentBatch);
// Write #1.
CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record1"), "response1")
);
assertFalse(write1.isDone());
assertNotNull(ctx.currentBatch);
assertEquals(0, writer.entries(TP).size());
// Verify that the linger timeout task is created; there will also be a default write timeout task.
assertEquals(2, schedulerTimer.size());
// Advance past the linger time.
clockTimer.advanceClock(11);
// At this point, there are still two scheduled tasks; the linger task has not fired
// because we did not advance the schedulerTimer.
assertEquals(2, schedulerTimer.size());
// Write #2.
CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20),
state -> new CoordinatorResult<>(List.of("record2"), "response2")
);
// The batch should have been flushed.
assertEquals(1, writer.entries(TP).size());
// Because flushing the batch cancels the linger task, there should now be two write timeout tasks.
assertEquals(2, schedulerTimer.size());
// Verify batch contains both two records
MemoryRecords batch = writer.entries(TP).get(0);
RecordBatch recordBatch = batch.firstBatch();
assertEquals(2, recordBatch.countOrNull());
// Commit and verify that writes are completed.
writer.commit(TP);
assertTrue(write1.isDone());
assertTrue(write2.isDone());
// Now that all scheduled tasks have been cancelled, the scheduler queue should be empty.
assertEquals(0, schedulerTimer.size());
}
private static <S extends CoordinatorShard<U>, U> ArgumentMatcher<CoordinatorPlayback<U>> coordinatorMatcher(
CoordinatorRuntime<S, U> runtime,
TopicPartition tp
) {
return c -> c.equals(runtime.contextOrThrow(tp).coordinator);
}
}
| CoordinatorRuntimeTest |
java | quarkusio__quarkus | core/devmode-spi/src/main/java/io/quarkus/dev/console/DevConsoleManager.java | {
"start": 367,
"end": 1005
} | class ____ {
public static volatile String DEV_MANAGER_GLOBALS_ASSISTANT = "_assistant";
private static volatile Consumer<DevConsoleRequest> handler;
private static volatile Map<String, Map<String, Object>> templateInfo;
private static volatile HotReplacementContext hotReplacementContext;
private static volatile Object quarkusBootstrap;
private static volatile boolean doingHttpInitiatedReload;
/**
* Global map that can be used to share data between the runtime and deployment side
* to enable communication.
* <p>
* Key names should be namespaced.
* <p>
* As the | DevConsoleManager |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/search/SpellCheckArgs.java | {
"start": 331,
"end": 3034
} | class ____ implements RedisCommandExtraArguments {
private int distance;
private final List<String> includes = new ArrayList<>();
private final List<String> excludes = new ArrayList<>();
private int dialect = -1;
/**
* Sets the maximum Levenshtein distance for spelling suggestions (default: 1, max: 4).
*
* @param distance the distance
* @return the current {@code SpellCheckArgs}
*/
public SpellCheckArgs distance(int distance) {
if (distance < 1 || distance > 4) {
throw new IllegalArgumentException("`distance` must be in [1,4]");
}
this.distance = distance;
return this;
}
/**
* Specifies an inclusion of a custom dictionary named {@code dict}
*
* @param dict the dictionaries
* @return the current {@code SpellCheckArgs}
*/
public SpellCheckArgs includes(String... dict) {
ParameterValidation.doesNotContainNull(notNullOrEmpty(dict, "dict"), "dict");
Collections.addAll(includes, dict);
return this;
}
/**
* Specifies an exclusion of a custom dictionary named {@code dict}
*
* @param dict the dictionaries
* @return the current {@code SpellCheckArgs}
*/
public SpellCheckArgs excludes(String... dict) {
ParameterValidation.doesNotContainNull(notNullOrEmpty(dict, "dict"), "dict");
Collections.addAll(excludes, dict);
return this;
}
/**
* Selects the dialect version under which to execute the query. If not specified, the query will execute under the
* default dialect version set during module initial loading.
*
* @param dialect the dialect
* @return the current {@code SpellCheckArgs}
*/
public SpellCheckArgs dialect(int dialect) {
this.dialect = dialect;
return this;
}
@Override
public List<Object> toArgs() {
List<Object> list = new ArrayList<>();
if (distance != 0) {
list.add("DISTANCE");
list.add(Integer.toString(distance));
}
if (!includes.isEmpty() && !excludes.isEmpty()) {
throw new IllegalArgumentException("Cannot specify both `includes` and `excludes` terms");
}
if (!includes.isEmpty()) {
list.add("TERMS");
list.add("INCLUDE");
list.addAll(includes);
} else if (!excludes.isEmpty()) {
list.add("TERMS");
list.add("EXCLUDE");
list.addAll(excludes);
}
if (dialect != -1) {
list.add("DIALECT");
list.add(Integer.toString(dialect));
}
return list;
}
}
| SpellCheckArgs |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/ReactiveStreamsComponentBuilderFactory.java | {
"start": 1456,
"end": 2052
} | interface ____ {
/**
* Reactive Streams (camel-reactive-streams)
* Exchange messages with reactive stream processing libraries compatible
* with the reactive streams standard.
*
* Category: messaging
* Since: 2.19
* Maven coordinates: org.apache.camel:camel-reactive-streams
*
* @return the dsl builder
*/
static ReactiveStreamsComponentBuilder reactiveStreams() {
return new ReactiveStreamsComponentBuilderImpl();
}
/**
* Builder for the Reactive Streams component.
*/
| ReactiveStreamsComponentBuilderFactory |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/support/Spr8954Tests.java | {
"start": 3130,
"end": 3404
} | class ____ implements FactoryBean<Foo>, AnInterface {
@Override
public Foo getObject() {
return new Foo();
}
@Override
public Class<?> getObjectType() {
return Foo.class;
}
@Override
public boolean isSingleton() {
return true;
}
}
| FooFactoryBean |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/generic/GenericDatumReader.java | {
"start": 16960,
"end": 17524
} | class ____ used to represent a string Schema. By default uses
* {@link GenericData#STRING_PROP} to determine whether {@link Utf8} or
* {@link String} is used. Subclasses may override for alternate
* representations.
*/
protected Class findStringClass(Schema schema) {
String name = schema.getProp(GenericData.STRING_PROP);
if (name == null)
return CharSequence.class;
switch (GenericData.StringType.valueOf(name)) {
case String:
return String.class;
default:
return CharSequence.class;
}
}
/**
* This | to |
java | quarkusio__quarkus | extensions/mongodb-client/deployment/src/test/java/io/quarkus/mongodb/DefaultAndNamedMongoClientConfigTest.java | {
"start": 963,
"end": 3107
} | class ____ extends MongoWithReplicasTestBase {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar.addClasses(MongoTestBase.class))
.withConfigurationResource("application-default-and-named-mongoclient.properties");
@Inject
MongoClient client;
@Inject
@MongoClientName("cluster2")
MongoClient client2;
@Inject
@Any
MongoHealthCheck health;
@AfterEach
void cleanup() {
if (client != null) {
client.close();
}
if (client2 != null) {
client2.close();
}
}
@Test
public void testNamedDataSourceInjection() {
assertProperConnection(client, 27018);
assertProperConnection(client2, 27019);
assertThat(client.listDatabases().first()).isNotEmpty();
assertThat(client2.listDatabases().first()).isNotEmpty();
assertThat(Arc.container().instance(MongoClient.class).get()).isNotNull();
assertThat(Arc.container().instance(MongoClient.class, Default.Literal.INSTANCE).get()).isNotNull();
assertThat(Arc.container().instance(MongoClient.class, NamedLiteral.of("cluster2")).get()).isNotNull();
assertThat(Arc.container().instance(MongoClient.class, NamedLiteral.of("cluster3")).get()).isNull();
org.eclipse.microprofile.health.HealthCheckResponse response = health.call();
assertThat(response.getStatus()).isEqualTo(HealthCheckResponse.Status.UP);
assertThat(response.getData()).isNotEmpty();
assertThat(response.getData().get()).hasSize(2).contains(
entry("<default>", "OK"),
entry("cluster2", "OK"));
}
private void assertProperConnection(MongoClient client, int expectedPort) {
assertThat(ClientProxy.unwrap(client)).isInstanceOfSatisfying(MongoClientImpl.class, c -> {
assertThat(c.getCluster().getSettings().getHosts()).singleElement().satisfies(sa -> {
assertThat(sa.getPort()).isEqualTo(expectedPort);
});
});
}
}
| DefaultAndNamedMongoClientConfigTest |
java | apache__rocketmq | test/src/test/java/org/apache/rocketmq/test/client/producer/async/AsyncSendWithMessageQueueSelectorIT.java | {
"start": 1688,
"end": 4436
} | class ____ extends BaseConf {
private static Logger logger = LoggerFactory.getLogger(TagMessageWith1ConsumerIT.class);
private RMQAsyncSendProducer producer = null;
private String topic = null;
@Before
public void setUp() {
topic = initTopic();
logger.info(String.format("user topic[%s]!", topic));
producer = getAsyncProducer(NAMESRV_ADDR, topic);
}
@After
public void tearDown() {
super.shutdown();
}
@Test
public void testSendWithSelector() {
int msgSize = 20;
final int queueId = 0;
RMQNormalConsumer consumer = getConsumer(NAMESRV_ADDR, topic, "*", new RMQNormalListener());
producer.asyncSend(msgSize, new MessageQueueSelector() {
@Override
public MessageQueue select(List<MessageQueue> list, Message message, Object o) {
for (MessageQueue mq : list) {
if (mq.getQueueId() == queueId && mq.getBrokerName().equals(BROKER1_NAME)) {
return mq;
}
}
return list.get(0);
}
});
producer.waitForResponse(5 * 1000);
assertThat(producer.getSuccessMsgCount()).isEqualTo(msgSize);
consumer.getListener().waitForMessageConsume(producer.getAllMsgBody(), CONSUME_TIME);
assertThat(VerifyUtils.getFilterdMessage(producer.getAllMsgBody(),
consumer.getListener().getAllMsgBody()))
.containsExactlyElementsIn(producer.getAllMsgBody());
VerifyUtils.verifyMessageQueueId(queueId, consumer.getListener().getAllOriginMsg());
producer.clearMsg();
consumer.clearMsg();
producer.getSuccessSendResult().clear();
producer.asyncSend(msgSize, new MessageQueueSelector() {
@Override
public MessageQueue select(List<MessageQueue> list, Message message, Object o) {
for (MessageQueue mq : list) {
if (mq.getQueueId() == queueId && mq.getBrokerName().equals(BROKER2_NAME)) {
return mq;
}
}
return list.get(8);
}
});
producer.waitForResponse(5 * 1000);
assertThat(producer.getSuccessMsgCount()).isEqualTo(msgSize);
consumer.getListener().waitForMessageConsume(producer.getAllMsgBody(), CONSUME_TIME);
assertThat(VerifyUtils.getFilterdMessage(producer.getAllMsgBody(),
consumer.getListener().getAllMsgBody()))
.containsExactlyElementsIn(producer.getAllMsgBody());
VerifyUtils.verifyMessageQueueId(queueId, consumer.getListener().getAllOriginMsg());
}
}
| AsyncSendWithMessageQueueSelectorIT |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/lookup/StrSubstitutor.java | {
"start": 1402,
"end": 1843
} | class ____ a piece of text and substitutes all the variables within it.
* The default definition of a variable is <code>${variableName}</code>.
* The prefix and suffix can be changed via constructors and set methods.
* </p>
* <p>
* Variable values are typically resolved from a map, but could also be resolved
* from system properties, or by supplying a custom variable resolver.
* </p>
* <p>
* The simplest example is to use this | takes |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/env/InlinedPropertiesTestPropertySourceTests.java | {
"start": 1865,
"end": 3099
} | class ____ {
@Autowired
ConfigurableEnvironment env;
@Test
void propertiesAreAvailableInEnvironment() {
// Simple key/value pairs
assertEnvironmentProperty("foo", "bar");
assertEnvironmentProperty("baz", "quux");
assertEnvironmentProperty("enigma", "42");
// Values containing key/value delimiters (":", "=", " ")
assertEnvironmentProperty("x.y.z", "a=b=c");
assertEnvironmentProperty("server.url", "https://example.com");
assertEnvironmentProperty("key.value.1", "key=value");
assertEnvironmentProperty("key.value.2", "key=value");
assertEnvironmentProperty("key.value.3", "key:value");
}
@Test
@SuppressWarnings("rawtypes")
void propertyNameOrderingIsPreservedInEnvironment() {
EnumerablePropertySource eps = (EnumerablePropertySource) env.getPropertySources().get(
INLINED_PROPERTIES_PROPERTY_SOURCE_NAME);
assertThat(eps.getPropertyNames()).containsExactly("foo", "baz", "enigma", "x.y.z", "server.url",
"key.value.1", "key.value.2", "key.value.3" );
}
private void assertEnvironmentProperty(String name, Object value) {
assertThat(this.env.getProperty(name)).as("environment property '%s'", name).isEqualTo(value);
}
@Configuration
static | InlinedPropertiesTestPropertySourceTests |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/support/TypeWrapper.java | {
"start": 9028,
"end": 9229
} | class ____", ex);
}
}
}
/**
* {@link TypeProvider} for {@link Type}s obtained from a {@link MethodParameter}.
*/
@SuppressWarnings("serial")
static | structure |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeutils/NestedSerializersSnapshotDelegate.java | {
"start": 2083,
"end": 6277
} | class ____ {
/** Magic number for integrity checks during deserialization. */
private static final int MAGIC_NUMBER = 1333245;
/** Current version of the new serialization format. */
private static final int VERSION = 1;
/** The snapshots from the serializer that make up this composition. */
private final TypeSerializerSnapshot<?>[] nestedSnapshots;
/** Constructor to create a snapshot for writing. */
public NestedSerializersSnapshotDelegate(TypeSerializer<?>... serializers) {
this.nestedSnapshots = TypeSerializerUtils.snapshot(serializers);
}
/** Constructor to create a snapshot during deserialization. */
@Internal
NestedSerializersSnapshotDelegate(TypeSerializerSnapshot<?>[] snapshots) {
this.nestedSnapshots = checkNotNull(snapshots);
}
// ------------------------------------------------------------------------
// Nested Serializers and Compatibility
// ------------------------------------------------------------------------
/**
* Produces a restore serializer from each contained serializer configuration snapshot. The
* serializers are returned in the same order as the snapshots are stored.
*/
public TypeSerializer<?>[] getRestoredNestedSerializers() {
return snapshotsToRestoreSerializers(nestedSnapshots);
}
/** Creates the restore serializer from the pos-th config snapshot. */
public <T> TypeSerializer<T> getRestoredNestedSerializer(int pos) {
checkArgument(pos < nestedSnapshots.length);
@SuppressWarnings("unchecked")
TypeSerializerSnapshot<T> snapshot = (TypeSerializerSnapshot<T>) nestedSnapshots[pos];
return snapshot.restoreSerializer();
}
/**
* Returns the snapshots of the nested serializers.
*
* @return the snapshots of the nested serializers.
*/
public TypeSerializerSnapshot<?>[] getNestedSerializerSnapshots() {
return nestedSnapshots;
}
// ------------------------------------------------------------------------
// Serialization
// ------------------------------------------------------------------------
/** Writes the composite snapshot of all the contained serializers. */
public final void writeNestedSerializerSnapshots(DataOutputView out) throws IOException {
out.writeInt(MAGIC_NUMBER);
out.writeInt(VERSION);
out.writeInt(nestedSnapshots.length);
for (TypeSerializerSnapshot<?> snap : nestedSnapshots) {
TypeSerializerSnapshot.writeVersionedSnapshot(out, snap);
}
}
/** Reads the composite snapshot of all the contained serializers. */
public static NestedSerializersSnapshotDelegate readNestedSerializerSnapshots(
DataInputView in, ClassLoader cl) throws IOException {
final int magicNumber = in.readInt();
if (magicNumber != MAGIC_NUMBER) {
throw new IOException(
String.format(
"Corrupt data, magic number mismatch. Expected %8x, found %8x",
MAGIC_NUMBER, magicNumber));
}
final int version = in.readInt();
if (version != VERSION) {
throw new IOException("Unrecognized version: " + version);
}
final int numSnapshots = in.readInt();
final TypeSerializerSnapshot<?>[] nestedSnapshots =
new TypeSerializerSnapshot<?>[numSnapshots];
for (int i = 0; i < numSnapshots; i++) {
nestedSnapshots[i] = TypeSerializerSnapshot.readVersionedSnapshot(in, cl);
}
return new NestedSerializersSnapshotDelegate(nestedSnapshots);
}
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
private static TypeSerializer<?>[] snapshotsToRestoreSerializers(
TypeSerializerSnapshot<?>... snapshots) {
return Arrays.stream(snapshots)
.map(TypeSerializerSnapshot::restoreSerializer)
.toArray(TypeSerializer[]::new);
}
}
| NestedSerializersSnapshotDelegate |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/core/metadata/SybaseCallMetaDataProvider.java | {
"start": 1022,
"end": 2038
} | class ____ extends GenericCallMetaDataProvider {
private static final String REMOVABLE_COLUMN_PREFIX = "@";
private static final String RETURN_VALUE_NAME = "RETURN_VALUE";
public SybaseCallMetaDataProvider(DatabaseMetaData databaseMetaData) throws SQLException {
super(databaseMetaData);
}
@Override
public @Nullable String parameterNameToUse(@Nullable String parameterName) {
if (parameterName == null) {
return null;
}
else if (parameterName.length() > 1 && parameterName.startsWith(REMOVABLE_COLUMN_PREFIX)) {
return super.parameterNameToUse(parameterName.substring(1));
}
else {
return super.parameterNameToUse(parameterName);
}
}
@Override
public String namedParameterBindingToUse(@Nullable String parameterName) {
return parameterName + " = ?";
}
@Override
public boolean byPassReturnParameter(String parameterName) {
return (RETURN_VALUE_NAME.equals(parameterName) ||
RETURN_VALUE_NAME.equals(parameterNameToUse(parameterName)));
}
}
| SybaseCallMetaDataProvider |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/criteria/JpaTreatedJoin.java | {
"start": 237,
"end": 512
} | interface ____<L,R,R1 extends R> extends JpaTreatedFrom<L,R,R1>, JpaJoin<L,R1> {
@Override
<S extends R1> JpaTreatedJoin<L, R1, S> treatAs(Class<S> treatJavaType);
@Override
<S extends R1> JpaTreatedJoin<L, R1, S> treatAs(EntityDomainType<S> treatJavaType);
}
| JpaTreatedJoin |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/ReactiveAuthenticationManagerResolver.java | {
"start": 727,
"end": 901
} | interface ____ resolving a {@link ReactiveAuthenticationManager} based on the
* provided context
*
* @author Rafiullah Hamedy
* @since 5.2
*/
@FunctionalInterface
public | for |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/JUnit3TestNotRunTest.java | {
"start": 6567,
"end": 6949
} | class ____ extends TestCase {
// BUG: Diagnostic contains:
public void testDoesStuff(boolean param) {}
}
""")
.doTest();
}
@Test
public void suppressionWorks() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import junit.framework.TestCase;
public | Test |
java | hibernate__hibernate-orm | hibernate-testing/src/test/java/org/hibernate/testing/annotations/methods/EntityManagerFactoryScopeTesting.java | {
"start": 1107,
"end": 3630
} | class ____ {
@BeforeAll
public void setup(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
AnEntity ae = new AnEntity(1, "AnEntity_1");
entityManager.persist( ae );
}
);
}
@AfterAll
public void tearDown(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> entityManager.createQuery( "delete from AnEntity" ).executeUpdate()
);
}
@Test
public void testBasicUsage(EntityManagerFactoryScope scope) {
assertThat( scope, notNullValue() );
assertThat( scope.getEntityManagerFactory(), notNullValue() );
// check we can use the EMF to create EMs
scope.inTransaction(
(session) -> session.createQuery( "select a from AnEntity a" ).getResultList()
);
}
@Test
public void nonAnnotatedMethodTest(EntityManagerFactoryScope scope) {
Set<EntityType<?>> entities = scope.getEntityManagerFactory().getMetamodel().getEntities();
assertEquals( 1, entities.size() );
assertEquals( "AnEntity", entities.iterator().next().getName() );
scope.inEntityManager(
entityManager -> {
AnEntity ae = entityManager.find( AnEntity.class, 1 );
assertNotNull( ae );
assertEquals( 1, ae.getId() );
assertEquals( "AnEntity_1", ae.getName() );
}
);
}
@Jpa(
annotatedClasses = AnotherEntity.class,
integrationSettings = {@Setting(name = JpaComplianceSettings.JPA_QUERY_COMPLIANCE, value = "true")}
)
@Test
public void annotatedMethodTest(EntityManagerFactoryScope scope) {
assertThat( scope, notNullValue() );
assertThat( scope.getEntityManagerFactory(), notNullValue() );
Set<EntityType<?>> entities = scope.getEntityManagerFactory().getMetamodel().getEntities();
assertEquals( 1, entities.size() );
assertEquals( "AnotherEntity", entities.iterator().next().getName() );
assertEquals( "true", scope.getEntityManagerFactory().getProperties().get( "hibernate.jpa.compliance.query" ) );
scope.inTransaction(
entityManager -> {
AnotherEntity aoe = new AnotherEntity( 2, "AnotherEntity_1" );
entityManager.persist( aoe );
}
);
scope.inTransaction(
entityManager -> {
AnotherEntity aoe = entityManager.find( AnotherEntity.class, 2 );
assertNotNull( aoe );
assertEquals( 2, aoe.getId() );
assertEquals( "AnotherEntity_1", aoe.getName() );
}
);
Assertions.assertThrows(
IllegalArgumentException.class,
() -> scope.inTransaction(
entityManager -> entityManager.find( AnEntity.class, 1 )
)
);
}
}
| EntityManagerFactoryScopeTesting |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/BeanRecipientListTest.java | {
"start": 2557,
"end": 3078
} | class ____ {
private static final AtomicInteger counter = new AtomicInteger();
private final int id;
public MyBean() {
id = counter.incrementAndGet();
}
@Override
public String toString() {
return "MyBean:" + id;
}
@org.apache.camel.RecipientList
public String[] route(String body) {
LOG.debug("Called {} with body: {}", this, body);
return new String[] { "mock:a", "mock:b" };
}
}
}
| MyBean |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java | {
"start": 300052,
"end": 303934
} | class ____ extends UnquoteIdentifierContext {
public TerminalNode IDENTIFIER() {
return getToken(SqlBaseParser.IDENTIFIER, 0);
}
public NonReservedContext nonReserved() {
return getRuleContext(NonReservedContext.class, 0);
}
public UnquotedIdentifierContext(UnquoteIdentifierContext ctx) {
copyFrom(ctx);
}
@Override
public void enterRule(ParseTreeListener listener) {
if (listener instanceof SqlBaseListener) ((SqlBaseListener) listener).enterUnquotedIdentifier(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if (listener instanceof SqlBaseListener) ((SqlBaseListener) listener).exitUnquotedIdentifier(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if (visitor instanceof SqlBaseVisitor) return ((SqlBaseVisitor<? extends T>) visitor).visitUnquotedIdentifier(this);
else return visitor.visitChildren(this);
}
}
public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionException {
UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState());
enterRule(_localctx, 110, RULE_unquoteIdentifier);
try {
setState(861);
_errHandler.sync(this);
switch (_input.LA(1)) {
case IDENTIFIER:
_localctx = new UnquotedIdentifierContext(_localctx);
enterOuterAlt(_localctx, 1); {
setState(858);
match(IDENTIFIER);
}
break;
case ANALYZE:
case ANALYZED:
case CATALOGS:
case COLUMNS:
case CURRENT_DATE:
case CURRENT_TIME:
case CURRENT_TIMESTAMP:
case DAY:
case DEBUG:
case EXECUTABLE:
case EXPLAIN:
case FIRST:
case FORMAT:
case FULL:
case FUNCTIONS:
case GRAPHVIZ:
case HOUR:
case INTERVAL:
case LAST:
case LIMIT:
case MAPPED:
case MINUTE:
case MONTH:
case OPTIMIZED:
case PARSED:
case PHYSICAL:
case PIVOT:
case PLAN:
case RLIKE:
case QUERY:
case SCHEMAS:
case SECOND:
case SHOW:
case SYS:
case TABLES:
case TEXT:
case TOP:
case TYPE:
case TYPES:
case VERIFY:
case YEAR:
_localctx = new UnquotedIdentifierContext(_localctx);
enterOuterAlt(_localctx, 2); {
setState(859);
nonReserved();
}
break;
case DIGIT_IDENTIFIER:
_localctx = new DigitIdentifierContext(_localctx);
enterOuterAlt(_localctx, 3); {
setState(860);
match(DIGIT_IDENTIFIER);
}
break;
default:
throw new NoViableAltException(this);
}
} catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
} finally {
exitRule();
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | UnquotedIdentifierContext |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java | {
"start": 106494,
"end": 107598
} | class ____ is loaded only for
// DEFAULT_NODE_SORTING_POLICY.
if (policyClassName == null) {
throw new YarnRuntimeException(
str.trim() + " Class is not configured or not an instance of "
+ MultiNodeLookupPolicy.class.getCanonicalName());
}
policyClassName = normalizePolicyName(policyClassName.trim());
long policySortingInterval = getLong(
MULTI_NODE_SORTING_POLICY_NAME + DOT + str.trim()
+ DOT + "sorting-interval.ms",
DEFAULT_MULTI_NODE_SORTING_INTERVAL);
if (policySortingInterval < 0) {
throw new YarnRuntimeException(
str.trim()
+ " multi-node policy is configured with invalid"
+ " sorting-interval:" + policySortingInterval);
}
set.add(
new MultiNodePolicySpec(policyClassName, policySortingInterval));
}
}
return Collections.unmodifiableSet(set);
}
private String normalizePolicyName(String policyName) {
// Ensure that custom node sorting algorithm | name |
java | quarkusio__quarkus | core/processor/src/main/java/io/quarkus/annotation/processor/documentation/config/merger/MergedModel.java | {
"start": 491,
"end": 2510
} | class ____ {
private final Map<Extension, Map<ConfigRootKey, ConfigRoot>> configRoots;
private final Map<String, ConfigRoot> configRootsInSpecificFile;
private final Map<Extension, List<ConfigSection>> generatedConfigSections;
MergedModel(Map<Extension, Map<ConfigRootKey, ConfigRoot>> configRoots,
Map<String, ConfigRoot> configRootsInSpecificFile,
Map<Extension, List<ConfigSection>> configSections) {
this.configRoots = Collections.unmodifiableMap(configRoots);
this.configRootsInSpecificFile = Collections.unmodifiableMap(configRootsInSpecificFile);
this.generatedConfigSections = Collections.unmodifiableMap(configSections);
}
public Map<Extension, Map<ConfigRootKey, ConfigRoot>> getConfigRoots() {
return configRoots;
}
public Map<String, ConfigRoot> getConfigRootsInSpecificFile() {
return configRootsInSpecificFile;
}
public Map<Extension, List<ConfigSection>> getGeneratedConfigSections() {
return generatedConfigSections;
}
public boolean isEmpty() {
return configRoots.isEmpty();
}
public record ConfigRootKey(String topLevelPrefix, String description) implements Comparable<ConfigRootKey> {
@Override
public final String toString() {
return topLevelPrefix;
}
@Override
public int compareTo(ConfigRootKey other) {
int compareTopLevelPrefix = this.topLevelPrefix.compareToIgnoreCase(other.topLevelPrefix);
if (compareTopLevelPrefix != 0) {
return compareTopLevelPrefix;
}
if (this.description == null && other.description == null) {
return 0;
}
if (this.description == null) {
return -1;
}
if (other.description == null) {
return 1;
}
return this.description.compareToIgnoreCase(other.description);
}
}
}
| MergedModel |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/BeanWrapperGenericsTests.java | {
"start": 22959,
"end": 23155
} | class ____ implements ObjectWithId<Long> {
private Long id;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long aId) {
this.id = aId;
}
}
}
| Promotion |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/CastFunctionMiscLegacyITCase.java | {
"start": 1521,
"end": 2810
} | class ____ extends BuiltInFunctionTestBase {
Configuration getConfiguration() {
return super.getConfiguration()
.set(
TABLE_EXEC_LEGACY_CAST_BEHAVIOUR,
ExecutionConfigOptions.LegacyCastBehaviour.ENABLED)
.set(
TABLE_EXEC_SINK_NOT_NULL_ENFORCER,
ExecutionConfigOptions.NotNullEnforcer.ERROR);
}
@Override
Stream<TestSetSpec> getTestSetSpecs() {
return Stream.of(
TestSetSpec.forFunction(
BuiltInFunctionDefinitions.CAST, "legacy cast failure returns null")
.onFieldsWithData("invalid")
.andDataTypes(STRING().notNull())
.testSqlRuntimeError(
"CAST(f0 AS BIGINT)",
"Column 'EXPR$0' is NOT NULL, however, a null value is "
+ "being written into it. You can set job configuration "
+ "'table.exec.sink.not-null-enforcer'='DROP' to suppress "
+ "this exception and drop such records silently."));
}
}
| CastFunctionMiscLegacyITCase |
java | apache__camel | components/camel-smooks/src/test/java/org/apache/camel/component/smooks/SmooksProcessorTest.java | {
"start": 20162,
"end": 20826
} | class ____ implements DataSource {
private final String string;
private StringDataSource(final String string) {
this.string = string;
}
public String getContentType() {
return "text/plain";
}
public InputStream getInputStream() throws IOException {
return new ByteArrayInputStream(string.getBytes());
}
public String getName() {
return "StringDataSource";
}
public OutputStream getOutputStream() throws IOException {
throw new IOException("Method 'getOutputStream' is not implemented");
}
}
}
| StringDataSource |
java | quarkusio__quarkus | extensions/panache/hibernate-orm-panache/runtime/src/main/java/io/quarkus/hibernate/orm/panache/runtime/JpaOperations.java | {
"start": 224,
"end": 991
} | class ____ extends AbstractJpaOperations<PanacheQueryImpl<?>> {
/**
* Provides the default implementations for quarkus to wire up. Should not be used by third party developers.
*/
public static final JpaOperations INSTANCE = new JpaOperations();
@Override
protected PanacheQueryImpl<?> createPanacheQuery(Session session, String query, String originalQuery, String orderBy,
Object paramsArrayOrMap) {
return new PanacheQueryImpl<>(session, query, originalQuery, orderBy, paramsArrayOrMap);
}
@Override
public List<?> list(PanacheQueryImpl<?> query) {
return query.list();
}
@Override
public Stream<?> stream(PanacheQueryImpl<?> query) {
return query.stream();
}
}
| JpaOperations |
java | resilience4j__resilience4j | resilience4j-reactor/src/main/java/io/github/resilience4j/reactor/IllegalPublisherException.java | {
"start": 87,
"end": 349
} | class ____ extends IllegalStateException {
public IllegalPublisherException(Publisher publisher) {
super("Publisher of type <" + publisher.getClass().getSimpleName()
+ "> is not supported by this operator");
}
}
| IllegalPublisherException |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/component/ApiMethod.java | {
"start": 952,
"end": 1687
} | interface ____ {
/**
* Returns method name.
*
* @return name
*/
String getName();
/**
* Returns method result type.
*
* @return result type
*/
Class<?> getResultType();
/**
* Returns method argument names.
*
* @return argument names
*/
List<String> getArgNames();
/**
* Returns setter argument names.
*
* @return setter argument names
*/
List<String> getSetterArgNames();
/**
* Return method argument types.
*
* @return argument types
*/
List<Class<?>> getArgTypes();
/**
* Returns {@link Method} in proxy type.
*
* @return method
*/
Method getMethod();
}
| ApiMethod |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/X509KeyPairSettings.java | {
"start": 1090,
"end": 1227
} | class ____ using a {@link java.security.KeyStore} (with configurable {@link KeyStore#getType() type}) or PEM based files.
*/
public | supports |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/request/IbmWatsonxUtils.java | {
"start": 333,
"end": 688
} | class ____ {
public static final String ML = "ml";
public static final String V1 = "v1";
public static final String TEXT = "text";
public static final String EMBEDDINGS = "embeddings";
public static final String RERANKS = "reranks";
public static final String COMPLETIONS = "chat";
private IbmWatsonxUtils() {}
}
| IbmWatsonxUtils |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.