language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldBeInSameHourWindow.java | {
"start": 928,
"end": 1681
} | class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link ShouldBeInSameHourWindow}</code>.
*
* @param actual the actual value in the failed assertion.
* @param other the value used in the failed assertion to compare the actual value to.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldBeInSameHourWindow(Date actual, Date other) {
return new ShouldBeInSameHourWindow(actual, other);
}
private ShouldBeInSameHourWindow(Date actual, Date other) {
super("%nExpecting actual:%n %s%nto be close to:%n %s%nby less than one hour (strictly) but difference was: "
+ formatTimeDifference(actual, other), actual, other);
}
}
| ShouldBeInSameHourWindow |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/locking/LockRefreshReferencedAndCascadingTest.java | {
"start": 1374,
"end": 6468
} | class ____ {
@BeforeEach
public void setUp(EntityManagerFactoryScope scope) {
scope.inTransaction(entityManager -> {
var anotherReferencedEntity = new AnotherReferencedEntity(1L, "another lazy" );
var e1 = new ReferencedEntity( 0L, "lazy", anotherReferencedEntity );
var e2 = new ReferencedEntity( 1L, "eager", null );
entityManager.persist( e1 );
entityManager.persist( e2 );
var e3 = new MainEntity( 0L, e1, e2 );
entityManager.persist( e3 );
} );
}
@AfterEach
public void tearDown(EntityManagerFactoryScope scope) {
scope.dropData();
}
@Test
@SkipForDialect(dialectClass = InformixDialect.class,
reason = "Informix disallows FOR UPDATE with multi-table queries")
public void testRefreshBeforeRead(EntityManagerFactoryScope scope) {
scope.inTransaction(entityManager -> {
var m = entityManager.find( MainEntity.class, 0L );
assertNotNull( m );
var lazyReference = m.referencedLazy();
var eagerReference = m.referencedEager();
assertNotNull( lazyReference );
assertNotNull( eagerReference );
assertFalse( Hibernate.isInitialized( lazyReference ) );
// First refresh, then access
entityManager.refresh( eagerReference, LockModeType.PESSIMISTIC_WRITE );
assertFalse( Hibernate.isInitialized( lazyReference ) );
entityManager.refresh( lazyReference, LockModeType.PESSIMISTIC_WRITE );
assertTrue( Hibernate.isInitialized( lazyReference ) );
assertTrue( Hibernate.isInitialized( lazyReference.anotherReferencedEntity ) );
assertEquals( "lazy", lazyReference.status() );
assertEquals( "eager", eagerReference.status() );
assertEquals( LockModeType.PESSIMISTIC_WRITE, entityManager.getLockMode( lazyReference ) );
assertEquals( LockModeType.PESSIMISTIC_WRITE, entityManager.getLockMode( lazyReference.getAnotherReferencedEntity() ) );
assertEquals( LockModeType.PESSIMISTIC_WRITE, entityManager.getLockMode( eagerReference ) );
} );
}
@Test
public void testRefresh(EntityManagerFactoryScope scope) {
scope.inTransaction(entityManager -> {
var m = entityManager.find( MainEntity.class, 0L );
assertNotNull( m );
var lazyReference = m.referencedLazy();
var eagerReference = m.referencedEager();
assertNotNull( lazyReference );
assertNotNull( eagerReference );
assertFalse( Hibernate.isInitialized( lazyReference ) );
entityManager.refresh( m );
// CascadeType.REFRESH will trigger the initialization
assertTrue( Hibernate.isInitialized( lazyReference ) );
} );
}
@Test
@SkipForDialect(dialectClass = InformixDialect.class,
reason = "Informix disallows FOR UPDATE with multi-table queries")
public void testRefreshAfterRead(EntityManagerFactoryScope scope) {
scope.inTransaction(entityManager -> {
var m = entityManager.find( MainEntity.class, 0L );
assertNotNull( m );
var lazyReference = m.referencedLazy();
var eagerReference = m.referencedEager();
assertNotNull( lazyReference );
assertNotNull( eagerReference );
assertFalse( Hibernate.isInitialized( lazyReference ) );
// First access, the refresh
assertEquals( "lazy", lazyReference.status() );
assertEquals( "eager", eagerReference.status() );
entityManager.refresh( lazyReference, LockModeType.PESSIMISTIC_WRITE );
entityManager.refresh( eagerReference, LockModeType.PESSIMISTIC_WRITE );
assertEquals( LockModeType.PESSIMISTIC_WRITE, entityManager.getLockMode( lazyReference ) );
assertEquals( LockModeType.PESSIMISTIC_WRITE, entityManager.getLockMode( eagerReference ) );
} );
}
@Test
@SkipForDialect(dialectClass = InformixDialect.class,
reason = "Informix disallows FOR UPDATE with multi-table queries")
public void testRefreshLockMode(EntityManagerFactoryScope scope) {
scope.inTransaction(entityManager -> {
var m = entityManager.find( MainEntity.class, 0L );
assertNotNull( m );
var lazyReference = m.referencedLazy();
var eagerReference = m.referencedEager();
assertNotNull( lazyReference );
assertNotNull( eagerReference );
assertFalse( Hibernate.isInitialized( lazyReference ) );
entityManager.refresh( m, LockModeType.PESSIMISTIC_WRITE );
assertTrue( Hibernate.isInitialized( lazyReference ) );
var anotherReferencedEntity = lazyReference.getAnotherReferencedEntity();
assertTrue( Hibernate.isInitialized( anotherReferencedEntity ) );
assertEquals( LockModeType.PESSIMISTIC_WRITE, entityManager.getLockMode( lazyReference ) );
assertEquals(
LockModeType.PESSIMISTIC_WRITE,
entityManager.getLockMode( anotherReferencedEntity )
);
} );
}
@Test
@SkipForDialect(dialectClass = InformixDialect.class,
reason = "Informix disallows FOR UPDATE with multi-table queries")
public void testFindWithLockMode(EntityManagerFactoryScope scope) {
scope.inTransaction(session -> {
var mainEntity = session.find( MainEntity.class, 0L, LockModeType.PESSIMISTIC_WRITE );
assertThat( session.getLockMode( mainEntity.referencedEager() ) ).isEqualTo( LockModeType.PESSIMISTIC_WRITE );
} );
}
@Entity(name = "MainEntity")
public static | LockRefreshReferencedAndCascadingTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java | {
"start": 3962,
"end": 5066
} | class ____
implements AuthenticationHandler {
@Override
public String getType() {
return "dummy";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
if (request.getParameter("authenticated") != null) {
token = new AuthenticationToken(request.getParameter("authenticated"),
"U", "test");
} else {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "dummy");
}
return token;
}
}
public static | DummyAuthenticationHandler |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nestedbeans/mixed/_target/FishTankDto.java | {
"start": 249,
"end": 1368
} | class ____ {
private FishDto fish;
private WaterPlantDto plant;
private String name;
private MaterialDto material;
private OrnamentDto ornament;
private WaterQualityDto quality;
public FishDto getFish() {
return fish;
}
public void setFish(FishDto fish) {
this.fish = fish;
}
public WaterPlantDto getPlant() {
return plant;
}
public void setPlant(WaterPlantDto plant) {
this.plant = plant;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public MaterialDto getMaterial() {
return material;
}
public void setMaterial(MaterialDto material) {
this.material = material;
}
public OrnamentDto getOrnament() {
return ornament;
}
public void setOrnament(OrnamentDto ornament) {
this.ornament = ornament;
}
public WaterQualityDto getQuality() {
return quality;
}
public void setQuality(WaterQualityDto quality) {
this.quality = quality;
}
}
| FishTankDto |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/runtime/graal/GraalVM.java | {
"start": 802,
"end": 4298
} | class ____ {
private static final String VNUM = "(?<VNUM>[1-9][0-9]*(?:\\.(?:0|[1-9][0-9]*))*)";
private static final String PRE = "(?:-(?<PRE>[a-zA-Z0-9]+))?";
private static final String BUILD = "\\+(?<BUILD>0|[1-9][0-9]*(?:\\.(?:0|[1-9][0-9]*))*)?";
private static final String OPT = "(?:-(?<OPT>[-a-zA-Z0-9.]+))?";
private static final String VSTR_FORMAT = VNUM + "(?:" + PRE + BUILD + ")?" + OPT;
private static final String GRAALVM_CE_VERS_PREFIX = "GraalVM CE ";
private static final String LIBERICA_NIK_VERS_PREFIX = "Liberica-NIK-";
private static final String MANDREL_VERS_PREFIX = "Mandrel-";
private static final String ORACLE_GRAALVM_VERS_PREFIX = "Oracle GraalVM ";
private static final String VENDOR_PREFIX_GROUP = "VENDORPREFIX";
private static final String VENDOR_PREFIX = "(?<" + VENDOR_PREFIX_GROUP + ">" + GRAALVM_CE_VERS_PREFIX + "|"
+ LIBERICA_NIK_VERS_PREFIX + "|" + MANDREL_VERS_PREFIX + "|" + ORACLE_GRAALVM_VERS_PREFIX + ")";
private static final Pattern VENDOR_VERS_PATTERN = Pattern.compile(VENDOR_PREFIX + VSTR_FORMAT);
private static final String VERSION_GROUP = "VNUM";
static Version parse(String value) {
Matcher versionMatcher = VENDOR_VERS_PATTERN.matcher(value);
if (versionMatcher.find()) {
String vendor = versionMatcher.group(VENDOR_PREFIX_GROUP);
if (GRAALVM_CE_VERS_PREFIX.equals(vendor) || ORACLE_GRAALVM_VERS_PREFIX.equals(vendor)) {
String version = versionMatcher.group(VERSION_GROUP);
String tokens[] = version.split("\\.", 3);
String jdkFeature = tokens[0];
String jdkVers = jdkFeature;
if (tokens.length == 3 && !graalVMFutureVers(tokens)) {
String interim = tokens[1];
String update = tokens[2].split("\\+")[0];
jdkVers = String.format("%s.%s.%s", jdkFeature, interim, update);
}
// For JDK 26+ there is no more version mapping use the JDK version
String versionMapping = Version.GRAAL_MAPPING.getOrDefault(jdkFeature, version);
return new Version(value, versionMapping, jdkVers, Distribution.GRAALVM);
} else if (LIBERICA_NIK_VERS_PREFIX.equals(vendor)) {
return new Version(value, versionMatcher.group(VERSION_GROUP), Distribution.LIBERICA);
} else if (MANDREL_VERS_PREFIX.equals(vendor)) {
return new Version(value, versionMatcher.group(VERSION_GROUP), Distribution.MANDREL);
}
}
log.warnf("Failed to parse GraalVM version from: %s. Defaulting to currently supported version %s ", value,
Version.CURRENT);
return Version.CURRENT;
}
// Anything beyond 25.0 is a future GraalVM version not suitable for Runtime.Version.parse()
private static boolean graalVMFutureVers(String[] tokens) {
try {
int feature = Integer.valueOf(tokens[0]);
int interim = Integer.valueOf(tokens[1]);
return feature > 25 || (feature == 25 && interim > 0);
} catch (NumberFormatException e) {
return false;
}
}
}
public static | VersionParseHelper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/MappedSuperclassWithEmbeddableTest.java | {
"start": 2188,
"end": 2449
} | class ____ {
@Embedded
private EmbeddedValue superField;
public EmbeddedValue getSuperField() {
return superField;
}
public void setSuperField(EmbeddedValue superField) {
this.superField = superField;
}
}
@Embeddable
public static | BaseEntity |
java | apache__camel | components/camel-language/src/main/java/org/apache/camel/component/language/LanguageComponent.java | {
"start": 1458,
"end": 4404
} | class ____ extends DefaultComponent {
public static final String RESOURCE = "resource:";
@Metadata(defaultValue = "true", description = "Sets whether to use resource content cache or not")
private boolean contentCache = true;
@Metadata
private boolean allowTemplateFromHeader;
public LanguageComponent() {
}
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
String name = StringHelper.before(remaining, ":");
String script = StringHelper.after(remaining, ":");
// no script then remaining is the language name
if (name == null && script == null) {
name = remaining;
}
if (ObjectHelper.isEmpty(name)) {
throw new IllegalArgumentException("Illegal syntax. Name of language not given in uri: " + uri);
}
Language language = getCamelContext().resolveLanguage(name);
String resourceUri = null;
String resource = script;
if (resource != null) {
boolean resourcePrefix = false;
if (resource.startsWith(RESOURCE)) {
resourcePrefix = true;
resource = resource.substring(RESOURCE.length());
}
if (resourcePrefix) {
// the script is a uri for a resource
resourceUri = resource;
// then the script should be null
script = null;
} else {
// the script is provided as text in the uri, so decode to utf-8
script = URLDecoder.decode(script, StandardCharsets.UTF_8);
// then the resource should be null
resourceUri = null;
}
}
LanguageEndpoint endpoint = new LanguageEndpoint(uri, this, language, null, resourceUri);
endpoint.setScript(script);
endpoint.setAllowTemplateFromHeader(allowTemplateFromHeader);
endpoint.setContentCache(contentCache);
setProperties(endpoint, parameters);
return endpoint;
}
public boolean isContentCache() {
return contentCache;
}
/**
* Sets whether to use resource content cache or not
*/
public void setContentCache(boolean contentCache) {
this.contentCache = contentCache;
}
public boolean isAllowTemplateFromHeader() {
return allowTemplateFromHeader;
}
/**
* Whether to allow to use resource template from header or not (default false).
*
* Enabling this allows to specify dynamic templates via message header. However this can be seen as a potential
* security vulnerability if the header is coming from a malicious user, so use this with care.
*/
public void setAllowTemplateFromHeader(boolean allowTemplateFromHeader) {
this.allowTemplateFromHeader = allowTemplateFromHeader;
}
}
| LanguageComponent |
java | redisson__redisson | redisson-hibernate/redisson-hibernate-4/src/main/java/org/redisson/hibernate/region/RedissonQueryRegion.java | {
"start": 909,
"end": 1265
} | class ____ extends BaseRegion implements QueryResultsRegion {
public RedissonQueryRegion(RMapCache<Object, Object> mapCache, ServiceManager serviceManager,
RegionFactory regionFactory, Properties properties, String defaultKey) {
super(mapCache, serviceManager, regionFactory, null, properties, defaultKey);
}
}
| RedissonQueryRegion |
java | greenrobot__EventBus | EventBusTest/src/org/greenrobot/eventbus/AndroidComponentsAvailabilityTest.java | {
"start": 213,
"end": 414
} | class ____ {
@Test
public void shouldBeAvailable() {
assertTrue(AndroidComponents.areAvailable());
assertNotNull(AndroidComponents.get());
}
}
| AndroidComponentsAvailabilityTest |
java | spring-projects__spring-framework | spring-tx/src/test/java/org/springframework/transaction/interceptor/RuleBasedTransactionAttributeTests.java | {
"start": 6732,
"end": 6832
} | class ____ extends Exception {}
@SuppressWarnings("serial")
private static final | MyBusinessException |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableSampleWithObservable.java | {
"start": 932,
"end": 1653
} | class ____<T> extends AbstractObservableWithUpstream<T, T> {
final ObservableSource<?> other;
final boolean emitLast;
public ObservableSampleWithObservable(ObservableSource<T> source, ObservableSource<?> other, boolean emitLast) {
super(source);
this.other = other;
this.emitLast = emitLast;
}
@Override
public void subscribeActual(Observer<? super T> t) {
SerializedObserver<T> serial = new SerializedObserver<>(t);
if (emitLast) {
source.subscribe(new SampleMainEmitLast<>(serial, other));
} else {
source.subscribe(new SampleMainNoLast<>(serial, other));
}
}
abstract static | ObservableSampleWithObservable |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/expression/SqmWindowFunction.java | {
"start": 375,
"end": 534
} | interface ____<T> extends JpaFunction<T>, SqmExpression<T> {
SqmPredicate getFilter();
Boolean getRespectNulls();
Boolean getFromFirst();
}
| SqmWindowFunction |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/transformer/CustomTransformerDefinition.java | {
"start": 2122,
"end": 2262
} | class ____ of the {@link Transformer}
*/
public void setClassName(String className) {
this.className = className;
}
}
| name |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/api/functions/sink/SocketClientSinkTest.java | {
"start": 1898,
"end": 10824
} | class ____ {
private static final String TEST_MESSAGE = "testSocketSinkInvoke";
private static final String EXCEPTION_MESSGAE =
"Failed to send message '" + TEST_MESSAGE + "\n'";
private static final String host = "127.0.0.1";
private SerializationSchema<String> simpleSchema =
new SerializationSchema<String>() {
@Override
public byte[] serialize(String element) {
return element.getBytes(ConfigConstants.DEFAULT_CHARSET);
}
};
@Test
void testSocketSink() throws Exception {
final ServerSocket server = new ServerSocket(0);
final int port = server.getLocalPort();
CheckedThread sinkRunner =
new CheckedThread("Test sink runner") {
@Override
public void go() throws Exception {
SocketClientSink<String> simpleSink =
new SocketClientSink<>(host, port, simpleSchema, 0);
simpleSink.open(DefaultOpenContext.INSTANCE);
simpleSink.invoke(TEST_MESSAGE + '\n', SinkContextUtil.forTimestamp(0));
simpleSink.close();
}
};
sinkRunner.start();
Socket sk = NetUtils.acceptWithoutTimeout(server);
BufferedReader rdr = new BufferedReader(new InputStreamReader(sk.getInputStream()));
String value = rdr.readLine();
sinkRunner.sync();
server.close();
assertThat(value).isEqualTo(TEST_MESSAGE);
}
@Test
void testSinkAutoFlush() throws Exception {
final ServerSocket server = new ServerSocket(0);
final int port = server.getLocalPort();
final SocketClientSink<String> simpleSink =
new SocketClientSink<>(host, port, simpleSchema, 0, true);
simpleSink.open(DefaultOpenContext.INSTANCE);
CheckedThread sinkRunner =
new CheckedThread("Test sink runner") {
@Override
public void go() throws Exception {
// need two messages here: send a fin to cancel the client
// state:FIN_WAIT_2 while the server is CLOSE_WAIT
simpleSink.invoke(TEST_MESSAGE + '\n', SinkContextUtil.forTimestamp(0));
}
};
sinkRunner.start();
Socket sk = NetUtils.acceptWithoutTimeout(server);
BufferedReader rdr = new BufferedReader(new InputStreamReader(sk.getInputStream()));
String value = rdr.readLine();
sinkRunner.sync();
simpleSink.close();
server.close();
assertThat(value).isEqualTo(TEST_MESSAGE);
}
@Test
void testSocketSinkNoRetry() throws Exception {
final ServerSocket server = new ServerSocket(0);
final int port = server.getLocalPort();
try {
CheckedThread serverRunner =
new CheckedThread("Test server runner") {
@Override
public void go() throws Exception {
Socket sk = NetUtils.acceptWithoutTimeout(server);
sk.close();
}
};
serverRunner.start();
SocketClientSink<String> simpleSink =
new SocketClientSink<>(host, port, simpleSchema, 0, true);
simpleSink.open(DefaultOpenContext.INSTANCE);
// wait socket server to close
serverRunner.sync();
assertThatThrownBy(
() -> {
// socket should be closed, so this should trigger a re-try
// need two messages here: send a fin to cancel the client
// state:FIN_WAIT_2 while
// the server is CLOSE_WAIT
while (true) { // we have to do this more often as the server side
// closed is not
// guaranteed to be noticed immediately
simpleSink.invoke(
TEST_MESSAGE + '\n', SinkContextUtil.forTimestamp(0));
}
})
// check whether throw a exception that reconnect failed.
.isInstanceOf(IOException.class)
.hasMessageContaining(EXCEPTION_MESSGAE);
assertThat(simpleSink.getCurrentNumberOfRetries()).isZero();
} finally {
IOUtils.closeQuietly(server);
}
}
@Test
void testRetry() throws Exception {
final ServerSocket[] serverSocket = new ServerSocket[1];
final ExecutorService[] executor = new ExecutorService[1];
try {
serverSocket[0] = new ServerSocket(0);
executor[0] = Executors.newCachedThreadPool();
int port = serverSocket[0].getLocalPort();
Callable<Void> serverTask =
new Callable<Void>() {
@Override
public Void call() throws Exception {
Socket socket = NetUtils.acceptWithoutTimeout(serverSocket[0]);
BufferedReader reader =
new BufferedReader(
new InputStreamReader(socket.getInputStream()));
String value = reader.readLine();
assertThat(value).isEqualTo("0");
socket.close();
return null;
}
};
Future<Void> serverFuture = executor[0].submit(serverTask);
final SocketClientSink<String> sink =
new SocketClientSink<>(
host, serverSocket[0].getLocalPort(), simpleSchema, -1, true);
// Create the connection
sink.open(DefaultOpenContext.INSTANCE);
// Initial payload => this will be received by the server an then the socket will be
// closed.
sink.invoke("0\n", SinkContextUtil.forTimestamp(0));
// Get future an make sure there was no problem. This will rethrow any Exceptions from
// the server.
serverFuture.get();
// Shutdown the server socket
serverSocket[0].close();
assertThat(serverSocket[0].isClosed()).isTrue();
// No retries expected at this point
assertThat(sink.getCurrentNumberOfRetries()).isZero();
final CountDownLatch retryLatch = new CountDownLatch(1);
final CountDownLatch again = new CountDownLatch(1);
Callable<Void> sinkTask =
new Callable<Void>() {
@Override
public Void call() throws Exception {
// Send next payload => server is down, should try to reconnect.
// We need to send more than just one packet to notice the closed
// connection.
while (retryLatch.getCount() != 0) {
sink.invoke("1\n");
}
return null;
}
};
Future<Void> sinkFuture = executor[0].submit(sinkTask);
while (sink.getCurrentNumberOfRetries() == 0) {
// Wait for a retry
Thread.sleep(100);
}
// OK the poor guy retried to write
retryLatch.countDown();
// Restart the server
try {
serverSocket[0] = new ServerSocket(port);
} catch (BindException be) {
// some other process may be using this port now
throw new AssumptionViolatedException(
"Could not bind server to previous port.", be);
}
Socket socket = NetUtils.acceptWithoutTimeout(serverSocket[0]);
BufferedReader reader =
new BufferedReader(new InputStreamReader(socket.getInputStream()));
// Wait for the reconnect
String value = reader.readLine();
assertThat(value).isEqualTo("1");
// OK the sink re-connected. :)
} finally {
if (serverSocket[0] != null) {
serverSocket[0].close();
}
if (executor[0] != null) {
executor[0].shutdown();
}
}
}
}
| SocketClientSinkTest |
java | spring-projects__spring-boot | module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/context/properties/ConfigurationPropertiesReportEndpointAutoConfiguration.java | {
"start": 2064,
"end": 3074
} | class ____ {
@Bean
@ConditionalOnMissingBean
ConfigurationPropertiesReportEndpoint configurationPropertiesReportEndpoint(
ConfigurationPropertiesReportEndpointProperties properties,
ObjectProvider<SanitizingFunction> sanitizingFunctions) {
return new ConfigurationPropertiesReportEndpoint(sanitizingFunctions.orderedStream().toList(),
properties.getShowValues());
}
@Bean
@ConditionalOnMissingBean
@ConditionalOnBean(ConfigurationPropertiesReportEndpoint.class)
@ConditionalOnAvailableEndpoint(exposure = EndpointExposure.WEB)
ConfigurationPropertiesReportEndpointWebExtension configurationPropertiesReportEndpointWebExtension(
ConfigurationPropertiesReportEndpoint configurationPropertiesReportEndpoint,
ConfigurationPropertiesReportEndpointProperties properties) {
return new ConfigurationPropertiesReportEndpointWebExtension(configurationPropertiesReportEndpoint,
properties.getShowValues(), properties.getRoles());
}
}
| ConfigurationPropertiesReportEndpointAutoConfiguration |
java | apache__flink | flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/HadoopInputFormatBase.java | {
"start": 2436,
"end": 2519
} | class ____ between the Java and Scala API of Flink. */
@Internal
public abstract | shared |
java | apache__camel | dsl/camel-jbang/camel-jbang-plugin-kubernetes/src/main/java/org/apache/camel/dsl/jbang/core/commands/kubernetes/traits/DeploymentTrait.java | {
"start": 1241,
"end": 3311
} | class ____ extends BaseTrait {
public DeploymentTrait() {
super("deployment", 900);
}
@Override
public boolean configure(Traits traitConfig, TraitContext context) {
// disable the deployment trait if cronjob is enabled
boolean cronjobDisabled = traitConfig.getCronjob() == null
|| !Optional.ofNullable(traitConfig.getCronjob().getEnabled()).orElse(false);
// disable the deployment trait if knative-service is enabled
boolean knEnabled = false;
if (traitConfig.getKnativeService() != null) {
knEnabled = Optional.ofNullable(traitConfig.getKnativeService().getEnabled()).orElse(false);
}
return cronjobDisabled && !knEnabled;
}
@Override
public void apply(Traits traitConfig, TraitContext context) {
DeploymentBuilder deployment = new DeploymentBuilder()
.withNewMetadata()
.withName(context.getName())
.endMetadata()
.withNewSpec()
.withSelector(new LabelSelectorBuilder()
.withMatchLabels(Map.of(KUBERNETES_LABEL_NAME, context.getName()))
.build())
.endSpec();
Container containerTrait = Optional.ofNullable(traitConfig.getContainer()).orElseGet(Container::new);
Optional.ofNullable(containerTrait.getImagePullSecrets()).orElseGet(List::of).forEach(sec -> deployment.editSpec()
.editOrNewTemplate()
.editOrNewSpec()
.addNewImagePullSecret(sec)
.endSpec()
.endTemplate()
.endSpec());
if (context.getServiceAccount() != null) {
deployment.editSpec()
.editOrNewTemplate()
.editOrNewSpec()
.withServiceAccountName(context.getServiceAccount())
.endSpec()
.endTemplate()
.endSpec();
}
context.add(deployment);
}
}
| DeploymentTrait |
java | google__error-prone | core/src/test/java/com/google/errorprone/ErrorProneCompilerIntegrationTest.java | {
"start": 9562,
"end": 10128
} | class ____ extends BugChecker implements MethodTreeMatcher {
@Override
public Description matchMethod(MethodTree tree, VisitorState state) {
return describeMatch(tree);
}
}
@Test
public void ignoreGeneratedConstructors() {
compilerBuilder.report(ScannerSupplier.fromBugCheckerClasses(ConstructorMatcher.class));
compiler = compilerBuilder.build();
Result exitCode =
compiler.compile(
Arrays.asList(
forSourceLines(
"Test.java", //
"public | ConstructorMatcher |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/NoContentResponseTest.java | {
"start": 1566,
"end": 1714
} | interface ____ {
@GET
InputStream getStream();
@GET
Response getResponse();
}
@Path("/test")
static | Client |
java | alibaba__nacos | ai/src/test/java/com/alibaba/nacos/ai/service/McpServerValidationServiceTest.java | {
"start": 2248,
"end": 31235
} | class ____ {
private McpServerValidationService mcpServerValidationService;
@Mock
private McpServerOperationService mcpServerOperationService;
@BeforeEach
void setUp() {
mcpServerValidationService = new McpServerValidationService();
ReflectionTestUtils.setField(mcpServerValidationService, "mcpServerOperationService", mcpServerOperationService);
}
@Test
void validateServersWithNullServers() throws NacosException {
String namespaceId = "test-namespace";
McpServerImportValidationResult result = mcpServerValidationService.validateServers(namespaceId, null);
assertFalse(result.isValid());
assertEquals(0, result.getTotalCount());
assertEquals(0, result.getValidCount());
assertEquals(0, result.getInvalidCount());
assertEquals(0, result.getDuplicateCount());
assertNotNull(result.getErrors());
assertFalse(result.getErrors().isEmpty());
}
@Test
void validateServersWithEmptyServers() throws NacosException {
String namespaceId = "test-namespace";
McpServerImportValidationResult result = mcpServerValidationService.validateServers(namespaceId, new ArrayList<>());
assertTrue(result.isValid());
assertEquals(0, result.getTotalCount());
assertEquals(0, result.getValidCount());
assertEquals(0, result.getInvalidCount());
assertEquals(0, result.getDuplicateCount());
assertNotNull(result.getErrors());
assertTrue(result.getErrors().isEmpty());
}
@Test
void validateServersWithValidServer() throws NacosException {
String namespaceId = "test-namespace";
List<McpServerDetailInfo> servers = new ArrayList<>();
McpServerDetailInfo validServer = createValidServer();
servers.add(validServer);
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
McpServerImportValidationResult result = mcpServerValidationService.validateServers(namespaceId, servers);
assertTrue(result.isValid());
assertEquals(1, result.getTotalCount());
assertEquals(1, result.getValidCount());
assertEquals(0, result.getInvalidCount());
assertEquals(0, result.getDuplicateCount());
assertNotNull(result.getServers());
assertEquals(1, result.getServers().size());
assertEquals(McpServerValidationConstants.STATUS_VALID, result.getServers().get(0).getStatus());
}
@Test
void validateServersWithInvalidServer() throws NacosException {
final String namespaceId = "test-namespace";
final List<McpServerDetailInfo> servers = new ArrayList<>();
McpServerDetailInfo invalidServer = new McpServerDetailInfo();
initializeVersionDetail(invalidServer);
invalidServer.setName(""); // Empty name should be invalid
invalidServer.setProtocol("invalid-protocol");
invalidServer.setDescription("");
servers.add(invalidServer);
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
McpServerImportValidationResult result = mcpServerValidationService.validateServers(namespaceId, servers);
assertFalse(result.isValid());
assertEquals(1, result.getTotalCount());
assertEquals(0, result.getValidCount());
assertEquals(1, result.getInvalidCount());
assertEquals(0, result.getDuplicateCount());
assertNotNull(result.getServers());
assertEquals(1, result.getServers().size());
assertEquals(McpServerValidationConstants.STATUS_INVALID, result.getServers().get(0).getStatus());
}
@Test
void validateServersWithDuplicateServerInBatch() throws NacosException {
final String namespaceId = "test-namespace";
List<McpServerDetailInfo> servers = new ArrayList<>();
McpServerDetailInfo server1 = createValidServer();
server1.setName("duplicate-server");
McpServerDetailInfo server2 = createValidServer();
server2.setName("duplicate-server"); // Same name as server1
servers.add(server1);
servers.add(server2);
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
McpServerImportValidationResult result = mcpServerValidationService.validateServers(namespaceId, servers);
assertTrue(result.isValid());
assertEquals(2, result.getTotalCount());
assertEquals(1, result.getValidCount());
assertEquals(0, result.getInvalidCount());
assertEquals(1, result.getDuplicateCount());
assertNotNull(result.getServers());
assertEquals(2, result.getServers().size());
McpServerValidationItem item1 = result.getServers().get(0);
McpServerValidationItem item2 = result.getServers().get(1);
// First one should be valid
assertEquals(McpServerValidationConstants.STATUS_VALID, item1.getStatus());
// Second one should be duplicate
assertEquals(McpServerValidationConstants.STATUS_DUPLICATE, item2.getStatus());
}
@Test
void validateServersWithExistingServer() throws NacosException {
final String namespaceId = "test-namespace";
List<McpServerDetailInfo> servers = new ArrayList<>();
McpServerDetailInfo server = createValidServer();
server.setName("existing-server");
servers.add(server);
// Mock that server already exists
McpServerDetailInfo existingServer = createValidServer();
existingServer.setId(UUID.randomUUID().toString());
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(existingServer);
McpServerImportValidationResult result = mcpServerValidationService.validateServers(namespaceId, servers);
assertTrue(result.isValid());
assertEquals(1, result.getTotalCount());
assertEquals(0, result.getValidCount());
assertEquals(0, result.getInvalidCount());
assertEquals(1, result.getDuplicateCount());
assertNotNull(result.getServers());
assertEquals(1, result.getServers().size());
McpServerValidationItem item = result.getServers().get(0);
assertEquals(McpServerValidationConstants.STATUS_DUPLICATE, item.getStatus());
assertTrue(item.isExists());
}
@Test
void validateServersWithExceptionDuringValidation() throws NacosException {
String namespaceId = "test-namespace";
List<McpServerDetailInfo> servers = new ArrayList<>();
McpServerDetailInfo server = createValidServer();
servers.add(server);
// Mock exception during validation
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class)))
.thenThrow(new RuntimeException("Test exception"));
McpServerImportValidationResult result = mcpServerValidationService.validateServers(namespaceId, servers);
assertFalse(result.isValid());
assertEquals(1, result.getTotalCount());
assertEquals(0, result.getValidCount());
assertEquals(1, result.getInvalidCount());
assertEquals(0, result.getDuplicateCount());
assertNotNull(result.getErrors());
assertTrue(result.getErrors().isEmpty());
}
@Test
void validateSingleServerWithValidServer() throws Exception {
String namespaceId = "test-namespace";
McpServerDetailInfo server = createValidServer();
server.setName("valid-server");
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateSingleServer", String.class, McpServerDetailInfo.class, Set.class);
method.setAccessible(true);
McpServerValidationItem item = (McpServerValidationItem) method.invoke(
mcpServerValidationService, namespaceId, server, new HashSet<>());
assertNotNull(item);
assertEquals("valid-server", item.getServerName());
assertEquals(McpServerValidationConstants.STATUS_VALID, item.getStatus());
assertNotNull(item.getErrors());
assertTrue(item.getErrors().isEmpty());
assertFalse(item.isExists());
assertEquals(server, item.getServer());
}
@Test
void validateSingleServerWithMissingName() throws Exception {
final String namespaceId = "test-namespace";
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setName(""); // Empty name
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
server.setDescription("Test description");
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateSingleServer", String.class, McpServerDetailInfo.class, Set.class);
method.setAccessible(true);
McpServerValidationItem item = (McpServerValidationItem) method.invoke(
mcpServerValidationService, namespaceId, server, new HashSet<>());
assertNotNull(item);
assertEquals("", item.getServerName());
assertEquals(McpServerValidationConstants.STATUS_INVALID, item.getStatus());
assertNotNull(item.getErrors());
assertFalse(item.getErrors().isEmpty());
assertTrue(item.getErrors().contains("Server name is required"));
}
@Test
void validateSingleServerWithMissingProtocol() throws Exception {
final String namespaceId = "test-namespace";
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setName("test-server");
server.setProtocol(""); // Empty protocol
server.setDescription("Test description");
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateSingleServer", String.class, McpServerDetailInfo.class, Set.class);
method.setAccessible(true);
McpServerValidationItem item = (McpServerValidationItem) method.invoke(
mcpServerValidationService, namespaceId, server, new HashSet<>());
assertNotNull(item);
assertEquals("test-server", item.getServerName());
assertEquals(McpServerValidationConstants.STATUS_INVALID, item.getStatus());
assertNotNull(item.getErrors());
assertFalse(item.getErrors().isEmpty());
assertTrue(item.getErrors().contains("Protocol is required"));
}
@Test
void validateSingleServerWithInvalidProtocol() throws Exception {
final String namespaceId = "test-namespace";
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setName("test-server");
server.setProtocol("invalid-protocol");
server.setDescription("Test description");
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateSingleServer", String.class, McpServerDetailInfo.class, Set.class);
method.setAccessible(true);
McpServerValidationItem item = (McpServerValidationItem) method.invoke(
mcpServerValidationService, namespaceId, server, new HashSet<>());
assertNotNull(item);
assertEquals("test-server", item.getServerName());
assertEquals(McpServerValidationConstants.STATUS_INVALID, item.getStatus());
assertNotNull(item.getErrors());
assertFalse(item.getErrors().isEmpty());
assertTrue(item.getErrors().contains("Invalid protocol: invalid-protocol"));
}
@Test
void validateSingleServerWithMissingDescription() throws Exception {
final String namespaceId = "test-namespace";
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setName("test-server");
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
server.setDescription(""); // Empty description
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateSingleServer", String.class, McpServerDetailInfo.class, Set.class);
method.setAccessible(true);
McpServerValidationItem item = (McpServerValidationItem) method.invoke(
mcpServerValidationService, namespaceId, server, new HashSet<>());
assertNotNull(item);
assertEquals("test-server", item.getServerName());
assertEquals(McpServerValidationConstants.STATUS_INVALID, item.getStatus());
assertNotNull(item.getErrors());
assertFalse(item.getErrors().isEmpty());
assertTrue(item.getErrors().contains("Description is required"));
}
@Test
void validateSingleServerWithDuplicateInBatch() throws Exception {
String namespaceId = "test-namespace";
McpServerDetailInfo server = createValidServer();
server.setName("duplicate-server");
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
// Add server name + version to existing names set to simulate duplicate in batch
java.util.Set<String> existingNames = new java.util.HashSet<>();
existingNames.add("duplicate-server" + server.getVersionDetail().getVersion());
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateSingleServer", String.class, McpServerDetailInfo.class, Set.class);
method.setAccessible(true);
McpServerValidationItem item = (McpServerValidationItem) method.invoke(
mcpServerValidationService, namespaceId, server, existingNames);
assertNotNull(item);
assertEquals("duplicate-server", item.getServerName());
assertEquals(McpServerValidationConstants.STATUS_DUPLICATE, item.getStatus());
assertNotNull(item.getErrors());
assertFalse(item.getErrors().isEmpty());
assertTrue(item.getErrors().contains("Duplicate server name in import batch: duplicate-server"));
}
@Test
void validateSingleServerWithExceptionDuringExistenceCheck() throws Exception {
String namespaceId = "test-namespace";
McpServerDetailInfo server = createValidServer();
server.setName("test-server");
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class)))
.thenThrow(new RuntimeException("Test exception"));
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateSingleServer", String.class, McpServerDetailInfo.class, Set.class);
method.setAccessible(true);
McpServerValidationItem item = (McpServerValidationItem) method.invoke(
mcpServerValidationService, namespaceId, server, new HashSet<>());
assertNotNull(item);
assertEquals("test-server", item.getServerName());
// Should still be invalid if existence check fails
assertEquals(McpServerValidationConstants.STATUS_INVALID, item.getStatus());
assertNotNull(item.getErrors());
assertFalse(item.getErrors().isEmpty());
assertTrue(item.getErrors().contains("Error checking existing server: Test exception"));
}
@Test
void isValidProtocolWithValidProtocols() throws Exception {
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod("isValidProtocol", String.class);
method.setAccessible(true);
// Test all valid protocols
assertTrue((Boolean) method.invoke(mcpServerValidationService, AiConstants.Mcp.MCP_PROTOCOL_STDIO));
assertTrue((Boolean) method.invoke(mcpServerValidationService, AiConstants.Mcp.MCP_PROTOCOL_SSE));
assertTrue((Boolean) method.invoke(mcpServerValidationService, AiConstants.Mcp.MCP_PROTOCOL_STREAMABLE));
assertTrue((Boolean) method.invoke(mcpServerValidationService, AiConstants.Mcp.MCP_PROTOCOL_HTTP));
assertTrue((Boolean) method.invoke(mcpServerValidationService, AiConstants.Mcp.MCP_PROTOCOL_DUBBO));
}
@Test
void isValidProtocolWithInvalidProtocol() throws Exception {
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod("isValidProtocol", String.class);
method.setAccessible(true);
assertFalse((Boolean) method.invoke(mcpServerValidationService, "invalid-protocol"));
assertFalse((Boolean) method.invoke(mcpServerValidationService, ""));
assertFalse((Boolean) method.invoke(mcpServerValidationService, (String) null));
}
@Test
void validateProtocolSpecificConfigWithValidStdioConfig() throws Exception {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_STDIO);
// Valid stdio config with localServerConfig
Map<String, Object> localConfig = new HashMap<>();
localConfig.put("command", "test-command");
server.setLocalServerConfig(localConfig);
List<String> errors = new ArrayList<>();
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateProtocolSpecificConfig", McpServerDetailInfo.class, List.class);
method.setAccessible(true);
method.invoke(mcpServerValidationService, server, errors);
assertNotNull(errors);
assertTrue(errors.isEmpty());
}
@Test
void validateProtocolSpecificConfigWithValidStdioConfigWithPackages() throws Exception {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_STDIO);
// Valid stdio config with packages
List<Package> packages = new ArrayList<>();
packages.add(new Package());
server.setPackages(packages);
server.setLocalServerConfig(null); // No local config, but has packages
List<String> errors = new ArrayList<>();
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateProtocolSpecificConfig", McpServerDetailInfo.class, List.class);
method.setAccessible(true);
method.invoke(mcpServerValidationService, server, errors);
assertNotNull(errors);
assertTrue(errors.isEmpty());
}
@Test
void validateProtocolSpecificConfigWithInvalidStdioConfig() throws Exception {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_STDIO);
// Invalid stdio config - no local config and no packages
server.setLocalServerConfig(null);
server.setPackages(null);
List<String> errors = new ArrayList<>();
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateProtocolSpecificConfig", McpServerDetailInfo.class, List.class);
method.setAccessible(true);
method.invoke(mcpServerValidationService, server, errors);
assertNotNull(errors);
assertFalse(errors.isEmpty());
assertTrue(errors.contains("Local server configuration or packages are required for stdio protocol"));
}
@Test
void validateProtocolSpecificConfigWithValidRemoteConfig() throws Exception {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
// Valid remote config
McpServerRemoteServiceConfig remoteConfig = new McpServerRemoteServiceConfig();
server.setRemoteServerConfig(remoteConfig);
List<String> errors = new ArrayList<>();
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateProtocolSpecificConfig", McpServerDetailInfo.class, List.class);
method.setAccessible(true);
method.invoke(mcpServerValidationService, server, errors);
assertNotNull(errors);
assertTrue(errors.isEmpty());
}
@Test
void validateProtocolSpecificConfigWithInvalidRemoteConfig() throws Exception {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
// Invalid remote config - no remote config
server.setRemoteServerConfig(null);
List<String> errors = new ArrayList<>();
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateProtocolSpecificConfig", McpServerDetailInfo.class, List.class);
method.setAccessible(true);
method.invoke(mcpServerValidationService, server, errors);
assertNotNull(errors);
assertFalse(errors.isEmpty());
assertTrue(errors.contains("Remote server configuration is required for " + AiConstants.Mcp.MCP_PROTOCOL_HTTP + " protocol"));
}
@Test
void validateProtocolSpecificConfigWithValidToolSpec() throws Exception {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
server.setRemoteServerConfig(new McpServerRemoteServiceConfig());
// Valid tool spec with tools
McpToolSpecification toolSpec = new McpToolSpecification();
List<McpTool> tools = new ArrayList<>();
McpTool tool = new McpTool();
tool.setName("test-tool");
tools.add(tool);
toolSpec.setTools(tools);
server.setToolSpec(toolSpec);
List<String> errors = new ArrayList<>();
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateProtocolSpecificConfig", McpServerDetailInfo.class, List.class);
method.setAccessible(true);
method.invoke(mcpServerValidationService, server, errors);
assertNotNull(errors);
assertTrue(errors.isEmpty());
}
@Test
void validateProtocolSpecificConfigWithInvalidToolSpec() throws Exception {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
server.setRemoteServerConfig(new McpServerRemoteServiceConfig());
// Invalid tool spec - empty tools list
McpToolSpecification toolSpec = new McpToolSpecification();
toolSpec.setTools(new ArrayList<>()); // Empty tools list
server.setToolSpec(toolSpec);
List<String> errors = new ArrayList<>();
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateProtocolSpecificConfig", McpServerDetailInfo.class, List.class);
method.setAccessible(true);
method.invoke(mcpServerValidationService, server, errors);
assertNotNull(errors);
assertFalse(errors.isEmpty());
assertTrue(errors.contains("Tool specification should contain at least one tool"));
}
@Test
void validateProtocolSpecificConfigWithNullToolSpec() throws Exception {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
server.setRemoteServerConfig(new McpServerRemoteServiceConfig());
// No tool spec - should be valid
server.setToolSpec(null);
List<String> errors = new ArrayList<>();
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateProtocolSpecificConfig", McpServerDetailInfo.class, List.class);
method.setAccessible(true);
method.invoke(mcpServerValidationService, server, errors);
assertNotNull(errors);
assertTrue(errors.isEmpty());
}
@Test
void validateSingleServerDoesNotCallIndexWhenNameIsBlank() throws Exception {
final String namespaceId = "test-namespace";
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setName(""); // Blank name
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
server.setDescription("Test description");
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
// Should not call the index when name is blank
// 使用反射调用私有方法
Method method = McpServerValidationService.class.getDeclaredMethod(
"validateSingleServer", String.class, McpServerDetailInfo.class, Set.class);
method.setAccessible(true);
McpServerValidationItem item = (McpServerValidationItem) method.invoke(
mcpServerValidationService, namespaceId, server, new HashSet<>());
assertNotNull(item);
assertEquals("", item.getServerName());
assertEquals(McpServerValidationConstants.STATUS_INVALID, item.getStatus());
assertEquals("Server name is required", item.getErrors().get(0));
}
@Test
void validateServersWithMixedValidAndInvalidServers() throws NacosException {
final String namespaceId = "test-namespace";
List<McpServerDetailInfo> servers = new ArrayList<>();
// Add a valid server
McpServerDetailInfo validServer = createValidServer();
validServer.setName("valid-server");
servers.add(validServer);
// Add an invalid server
McpServerDetailInfo invalidServer = new McpServerDetailInfo();
initializeVersionDetail(invalidServer);
invalidServer.setName(""); // Missing name
invalidServer.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
invalidServer.setDescription("Test description");
servers.add(invalidServer);
// Add a duplicate server
McpServerDetailInfo duplicateServer = createValidServer();
duplicateServer.setName("valid-server"); // Same name as validServer
servers.add(duplicateServer);
when(mcpServerOperationService.getMcpServerDetail(anyString(), anyString(), anyString(), nullable(String.class))).thenReturn(null);
McpServerImportValidationResult result = mcpServerValidationService.validateServers(namespaceId, servers);
assertFalse(result.isValid());
assertEquals(3, result.getTotalCount());
assertEquals(1, result.getValidCount());
assertEquals(1, result.getInvalidCount());
assertEquals(1, result.getDuplicateCount());
assertNotNull(result.getServers());
assertEquals(3, result.getServers().size());
}
private void initializeVersionDetail(McpServerDetailInfo server) {
if (server.getVersionDetail() == null) {
ServerVersionDetail versionDetail = new ServerVersionDetail();
versionDetail.setVersion("1.0.0");
server.setVersionDetail(versionDetail);
}
}
private McpServerDetailInfo createValidServer() {
McpServerDetailInfo server = new McpServerDetailInfo();
initializeVersionDetail(server);
server.setName("test-server");
server.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_HTTP);
server.setDescription("Test description");
McpServerRemoteServiceConfig remoteConfig = new McpServerRemoteServiceConfig();
server.setRemoteServerConfig(remoteConfig);
return server;
}
} | McpServerValidationServiceTest |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/core/parameters/converters/ZonedDateTimeParamConverter.java | {
"start": 779,
"end": 1390
} | class ____ extends TemporalSupplier<ZonedDateTimeParamConverter> {
public Supplier() {
}
public Supplier(String pattern, String dateTimeFormatterProviderClassName) {
super(pattern, dateTimeFormatterProviderClassName);
}
@Override
protected ZonedDateTimeParamConverter createConverter(DateTimeFormatter dateTimeFormatter) {
return new ZonedDateTimeParamConverter(dateTimeFormatter);
}
@Override
public String getClassName() {
return ZonedDateTimeParamConverter.class.getName();
}
}
}
| Supplier |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/EventProcessor.java | {
"start": 1920,
"end": 2038
} | interface ____<T> {
/**
* Process an event that is received.
*/
void process(T event);
}
| EventProcessor |
java | quarkusio__quarkus | test-framework/junit5/src/test/java/io/quarkus/test/junit/util/QuarkusTestProfileAwareClassOrdererTest.java | {
"start": 14881,
"end": 14962
} | class ____ {
}
@WithTestResource(Manager3.class)
private static | Test02a |
java | google__guice | extensions/persist/test/com/google/inject/persist/jpa/ManualLocalTransactionsWithCustomMatcherTest.java | {
"start": 1334,
"end": 3390
} | class ____ extends TestCase {
private Injector injector;
private static final String UNIQUE_TEXT = "some unique text" + new Date();
private static final String UNIQUE_TEXT_2 = "some other unique text" + new Date();
@Override
public void setUp() {
injector = Guice.createInjector(new JpaPersistModule("testUnit"));
//startup persistence
injector.getInstance(PersistService.class).start();
}
@Override
public void tearDown() {
injector.getInstance(EntityManagerFactory.class).close();
}
public void testSimpleCrossTxnWork() {
// pretend that the request was started here
injector.getInstance(UnitOfWork.class).begin();
EntityManager em = injector.getInstance(EntityManager.class);
JpaTestEntity entity =
injector
.getInstance(ManualLocalTransactionsWithCustomMatcherTest.TransactionalObject.class)
.runOperationInTxn();
injector
.getInstance(ManualLocalTransactionsWithCustomMatcherTest.TransactionalObject.class)
.runOperationInTxn2();
//persisted entity should remain in the same em (which should still be open)
assertTrue(
"EntityManager appears to have been closed across txns!",
injector.getInstance(EntityManager.class).contains(entity));
assertTrue("EntityManager appears to have been closed across txns!", em.contains(entity));
assertTrue("EntityManager appears to have been closed across txns!", em.isOpen());
injector.getInstance(UnitOfWork.class).end();
// try to query them back out
injector.getInstance(UnitOfWork.class).begin();
em = injector.getInstance(EntityManager.class);
assertNotNull(
em.createQuery("from JpaTestEntity where text = :text")
.setParameter("text", UNIQUE_TEXT)
.getSingleResult());
assertNotNull(
em.createQuery("from JpaTestEntity where text = :text")
.setParameter("text", UNIQUE_TEXT_2)
.getSingleResult());
em.close();
}
public static | ManualLocalTransactionsWithCustomMatcherTest |
java | apache__hadoop | hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCommand.java | {
"start": 3305,
"end": 4776
} | class ____ " + suiteClassName + " must be an" +
" implementation of " + HdfsCompatSuite.class.getName());
}
if (suite.getSuiteName() == null || suite.getSuiteName().isEmpty()) {
throw new HdfsCompatIllegalArgumentException(
"suite " + suiteClassName + " suiteName is empty");
}
for (HdfsCompatSuite defaultSuite : defaultSuites.values()) {
if (suite.getSuiteName().equalsIgnoreCase(defaultSuite.getSuiteName())) {
throw new HdfsCompatIllegalArgumentException(
"suite " + suiteClassName + " suiteName" +
" conflicts with default suite " + defaultSuite.getSuiteName());
}
}
if (!hasApiCase() && !hasShellCase()) {
throw new HdfsCompatIllegalArgumentException(
"suite " + suiteClassName + " is empty for both API and SHELL");
}
}
private boolean hasApiCase() {
return (suite.getApiCases() != null) &&
(suite.getApiCases().length > 0);
}
private boolean hasShellCase() {
return (suite.getShellCases() != null) &&
(suite.getShellCases().length > 0);
}
@VisibleForTesting
protected Map<String, HdfsCompatSuite> getDefaultSuites() {
Map<String, HdfsCompatSuite> defaultSuites = new HashMap<>();
defaultSuites.put("all", new HdfsCompatSuiteForAll());
defaultSuites.put("shell", new HdfsCompatSuiteForShell());
defaultSuites.put("tpcds", new HdfsCompatSuiteForTpcds());
return defaultSuites;
}
} | name |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/PojoTypeExtractionTest.java | {
"start": 4560,
"end": 4649
} | class ____ extends ComplexHierarchy<Tuple1<String>> {}
public static | ComplexHierarchyTop |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java | {
"start": 6623,
"end": 11210
} | class ____ extends TokenRenewer {
private static int counter = 0;
private static Token<?> lastRenewed = null;
private static Token<?> tokenToRenewIn2Sec = null;
private static boolean cancelled = false;
private static void reset() {
counter = 0;
lastRenewed = null;
tokenToRenewIn2Sec = null;
cancelled = false;
}
@Override
public boolean handleKind(Text kind) {
return KIND.equals(kind);
}
@Override
public boolean isManaged(Token<?> token) throws IOException {
return true;
}
@Override
public long renew(Token<?> t, Configuration conf) throws IOException {
if ( !(t instanceof MyToken)) {
if(conf.get("override_token_expire_time") != null) {
return System.currentTimeMillis() +
Long.parseLong(conf.get("override_token_expire_time"));
} else {
// renew in 3 seconds
return System.currentTimeMillis() + 3000;
}
}
MyToken token = (MyToken)t;
if(token.isCanceled()) {
throw new InvalidToken("token has been canceled");
}
lastRenewed = token;
counter ++;
LOG.info("Called MYDFS.renewdelegationtoken " + token +
";this dfs=" + this.hashCode() + ";c=" + counter);
if(tokenToRenewIn2Sec == token) {
// this token first renewal in 2 seconds
LOG.info("RENEW in 2 seconds");
tokenToRenewIn2Sec=null;
return 2*1000 + System.currentTimeMillis();
} else {
return 86400*1000 + System.currentTimeMillis();
}
}
@Override
public void cancel(Token<?> t, Configuration conf) {
cancelled = true;
if (t instanceof MyToken) {
MyToken token = (MyToken) t;
LOG.info("Cancel token " + token);
token.cancelToken();
}
}
}
private static Configuration conf;
DelegationTokenRenewer delegationTokenRenewer;
private MockRM rm;
private MockRM rm1;
private MockRM rm2;
private DelegationTokenRenewer localDtr;
@BeforeAll
public static void setUpClass() throws Exception {
conf = new Configuration();
// create a fake FileSystem (MyFS) and assosiate it
// with "hdfs" schema.
URI uri = new URI(DelegationTokenRenewer.SCHEME+"://localhost:0");
System.out.println("scheme is : " + uri.getScheme());
conf.setClass("fs." + uri.getScheme() + ".impl", MyFS.class, DistributedFileSystem.class);
FileSystem.setDefaultUri(conf, uri);
LOG.info("filesystem uri = " + FileSystem.getDefaultUri(conf).toString());
}
@BeforeEach
public void setUp() throws Exception {
counter = new AtomicInteger(0);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
conf.set("override_token_expire_time", "3000");
conf.setBoolean(YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL,
false);
UserGroupInformation.setConfiguration(conf);
eventQueue = new LinkedBlockingQueue<Event>();
dispatcher = new AsyncDispatcher(eventQueue);
dispatcher.init(conf);
Renewer.reset();
delegationTokenRenewer = createNewDelegationTokenRenewer(conf, counter);
RMContext mockContext = mock(RMContext.class);
ClientRMService mockClientRMService = mock(ClientRMService.class);
when(mockContext.getSystemCredentialsForApps()).thenReturn(
new ConcurrentHashMap<ApplicationId, SystemCredentialsForAppsProto>());
when(mockContext.getDelegationTokenRenewer()).thenReturn(
delegationTokenRenewer);
when(mockContext.getDispatcher()).thenReturn(dispatcher);
when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
InetSocketAddress sockAddr =
InetSocketAddress.createUnresolved("localhost", 1234);
when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
delegationTokenRenewer.setDelegationTokenRenewerPoolTracker(false);
delegationTokenRenewer.setRMContext(mockContext);
delegationTokenRenewer.init(conf);
delegationTokenRenewer.start();
}
@AfterEach
public void tearDown() throws Exception {
try {
dispatcher.close();
} catch (IOException e) {
LOG.debug("Unable to close the dispatcher. " + e);
}
delegationTokenRenewer.stop();
if (rm != null) {
rm.close();
rm = null;
}
if (rm1 != null) {
rm1.close();
rm1 = null;
}
if (rm2 != null) {
rm2.close();
rm2 = null;
}
if (localDtr != null) {
localDtr.close();
localDtr = null;
}
}
private static | Renewer |
java | alibaba__nacos | client/src/main/java/com/alibaba/nacos/client/utils/LogUtils.java | {
"start": 902,
"end": 1221
} | class ____ {
public static final Logger NAMING_LOGGER;
static {
NacosLogging.getInstance().loadConfiguration();
NAMING_LOGGER = getLogger("com.alibaba.nacos.client.naming");
}
public static Logger logger(Class<?> clazz) {
return getLogger(clazz);
}
}
| LogUtils |
java | quarkusio__quarkus | integration-tests/hibernate-validator/src/test/java/io/quarkus/hibernate/validator/runtime/ArcProxyBeanMetaDataClassNormalizerTest.java | {
"start": 1922,
"end": 1975
} | class ____ extends FirstSubclass {
}
}
| SecondSubclass |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MustBeClosedCheckerTest.java | {
"start": 23281,
"end": 23939
} | class ____ {
@MustBeClosed
Closeable mustBeClosedMethod() {
return null;
}
}
void forLoopCondition() {
try (var closeable = new Foo().mustBeClosedMethod()) {
for (int i = 0; i < closeable.method(); ++i) {}
}
}
}
""")
.doTest();
}
@Ignore("b/236715080")
@Test
public void forLoopUnfixable() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import com.google.errorprone.annotations.MustBeClosed;
| Foo |
java | google__dagger | javatests/dagger/internal/codegen/ComponentProcessorTest.java | {
"start": 15454,
"end": 15765
} | class ____ extends ParentDepModule {}");
Source parentDep = CompilerTests.javaSource("test.ParentDepModule",
"package test;",
"",
"import dagger.Module;",
"",
"@Module(includes = {ParentDepIncluded.class, AlwaysIncluded.class})",
" | RefByDep |
java | micronaut-projects__micronaut-core | function/src/main/java/io/micronaut/function/executor/AbstractExecutor.java | {
"start": 1342,
"end": 4903
} | class ____<C> implements ApplicationContextProvider, Closeable, AutoCloseable {
/**
* The current {@link ApplicationContext}.
*/
protected ApplicationContext applicationContext;
/**
* Resolve a function from the {@link LocalFunctionRegistry}.
*
* @param localFunctionRegistry The {@link LocalFunctionRegistry}
* @param functionName The function name
* @return The method
*/
protected ExecutableMethod<Object, Object> resolveFunction(LocalFunctionRegistry localFunctionRegistry, String functionName) {
Optional<? extends ExecutableMethod<Object, Object>> registeredMethod;
if (functionName == null) {
registeredMethod = localFunctionRegistry.findFirst();
} else {
registeredMethod = localFunctionRegistry.find(functionName);
}
return registeredMethod
.orElseThrow(() -> new IllegalStateException("No function found for name: " + functionName));
}
/**
* Resolves the function name to execution for the environment.
*
* @param env The environment
* @return The function name
*/
protected String resolveFunctionName(Environment env) {
return env.getProperty(LocalFunctionRegistry.FUNCTION_NAME, String.class, (String) null);
}
/**
* @param context A platform specific context object
* @return Build the {@link ApplicationContext} to use
*/
protected ApplicationContext buildApplicationContext(@Nullable C context) {
if (applicationContext == null) {
final ApplicationContextBuilder contextBuilder = newApplicationContextBuilder();
final Package pkg = getClass().getPackage();
if (pkg != null) {
final String name = pkg.getName();
if (StringUtils.isNotEmpty(name)) {
contextBuilder.packages(name);
}
}
applicationContext = contextBuilder.build();
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
if (applicationContext != null && applicationContext.isRunning()) {
applicationContext.close();
applicationContext = null;
}
}));
}
return applicationContext;
}
/**
* Builds a new builder.
*
* @return The {@link ApplicationContextBuilder}
*/
protected @NonNull ApplicationContextBuilder newApplicationContextBuilder() {
return ApplicationContext.builder(Environment.FUNCTION);
}
/**
* Start the environment specified.
*
* @param applicationContext the application context with the environment
* @return The environment within the context
*/
protected Environment startEnvironment(ApplicationContext applicationContext) {
if (!applicationContext.isRunning()) {
if (this instanceof PropertySource source) {
applicationContext.getEnvironment().addPropertySource(source);
}
return applicationContext
.start()
.getEnvironment();
} else {
return applicationContext.getEnvironment();
}
}
@Override
public @NonNull ApplicationContext getApplicationContext() {
return this.applicationContext;
}
@Override
@PreDestroy
public void close() {
try {
applicationContext.close();
} catch (Exception e) {
// ignore
}
}
}
| AbstractExecutor |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/ClusterPushHandler.java | {
"start": 264,
"end": 912
} | interface ____ {
/**
* Add a new {@link RedisClusterPushListener listener}.
*
* @param listener the listener, must not be {@code null}.
*/
void addListener(RedisClusterPushListener listener);
/**
* Remove an existing {@link RedisClusterPushListener listener}.
*
* @param listener the listener, must not be {@code null}.
*/
void removeListener(RedisClusterPushListener listener);
/**
* Returns a collection of {@link RedisClusterPushListener}.
*
* @return the collection of listeners.
*/
Collection<RedisClusterPushListener> getPushListeners();
}
| ClusterPushHandler |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/servlet/result/MockMvcResultHandlers.java | {
"start": 2827,
"end": 3685
} | class ____ extends PrintingResultHandler {
public PrintWriterPrintingResultHandler(PrintWriter writer) {
super(new ResultValuePrinter() {
@Override
public void printHeading(String heading) {
writer.println();
writer.println(String.format("%s:", heading));
}
@Override
public void printValue(String label, @Nullable Object value) {
if (value != null && value.getClass().isArray()) {
value = CollectionUtils.arrayToList(value);
}
writer.println(String.format("%17s = %s", label, value));
}
});
}
}
/**
* A {@link ResultHandler} that logs {@link MvcResult} details at
* {@code DEBUG} level via Apache Commons Logging.
*
* <p>Delegates to a {@link PrintWriterPrintingResultHandler} for
* building the log message.
*
* @since 4.2
*/
private static | PrintWriterPrintingResultHandler |
java | quarkusio__quarkus | integration-tests/maven/src/test/resources-filtered/projects/codegen-config-factory/acme-codegen/deployment/src/test/java/org/acme/codegen/test/AcmeCodegenTest.java | {
"start": 309,
"end": 850
} | class ____ {
// Start unit test with your extension loaded
@RegisterExtension
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnUnitTest() {
// Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information
Assertions.assertTrue(true, "Add some assertions to " + getClass().getName());
}
}
| AcmeCodegenTest |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/StartupInfoLoggerTests.java | {
"start": 1299,
"end": 5121
} | class ____ {
private final Log log = mock(Log.class);
private MockEnvironment environment;
@BeforeEach
void setUp() {
this.environment = new MockEnvironment();
this.environment.setProperty("spring.application.version", "1.2.3");
this.environment.setProperty("spring.application.pid", "42");
}
@Test
void startingFormat() {
given(this.log.isInfoEnabled()).willReturn(true);
new StartupInfoLogger(getClass(), this.environment).logStarting(this.log);
then(this.log).should()
.info(assertArg(
(message) -> assertThat(message.toString()).contains("Starting " + getClass().getSimpleName()
+ " v1.2.3 using Java " + System.getProperty("java.version") + " with PID 42 (started by "
+ System.getProperty("user.name") + " in " + System.getProperty("user.dir") + ")")));
}
@Test
void startingFormatWhenVersionIsNotAvailable() {
this.environment.setProperty("spring.application.version", "");
given(this.log.isInfoEnabled()).willReturn(true);
new StartupInfoLogger(getClass(), this.environment).logStarting(this.log);
then(this.log).should()
.info(assertArg(
(message) -> assertThat(message.toString()).contains("Starting " + getClass().getSimpleName()
+ " using Java " + System.getProperty("java.version") + " with PID 42 (started by "
+ System.getProperty("user.name") + " in " + System.getProperty("user.dir") + ")")));
}
@Test
void startingFormatWhenPidIsNotAvailable() {
this.environment.setProperty("spring.application.pid", "");
given(this.log.isInfoEnabled()).willReturn(true);
new StartupInfoLogger(getClass(), this.environment).logStarting(this.log);
then(this.log).should()
.info(assertArg(
(message) -> assertThat(message.toString()).contains("Starting " + getClass().getSimpleName()
+ " v1.2.3 using Java " + System.getProperty("java.version") + " (started by "
+ System.getProperty("user.name") + " in " + System.getProperty("user.dir") + ")")));
}
@Test
void startingFormatInAotMode() {
System.setProperty("spring.aot.enabled", "true");
try {
given(this.log.isInfoEnabled()).willReturn(true);
new StartupInfoLogger(getClass(), this.environment).logStarting(this.log);
then(this.log).should()
.info(assertArg((message) -> assertThat(message.toString())
.contains("Starting AOT-processed " + getClass().getSimpleName() + " v1.2.3 using Java "
+ System.getProperty("java.version") + " with PID 42 (started by "
+ System.getProperty("user.name") + " in " + System.getProperty("user.dir") + ")")));
}
finally {
System.clearProperty("spring.aot.enabled");
}
}
@Test
void startedFormat() {
given(this.log.isInfoEnabled()).willReturn(true);
new StartupInfoLogger(getClass(), this.environment).logStarted(this.log, new TestStartup(1345L, "Started"));
then(this.log).should()
.info(assertArg((message) -> assertThat(message.toString()).matches("Started " + getClass().getSimpleName()
+ " in \\d+\\.\\d{1,3} seconds \\(process running for 1.345\\)")));
}
@Test
void startedWithoutUptimeFormat() {
given(this.log.isInfoEnabled()).willReturn(true);
new StartupInfoLogger(getClass(), this.environment).logStarted(this.log, new TestStartup(null, "Started"));
then(this.log).should()
.info(assertArg((message) -> assertThat(message.toString())
.matches("Started " + getClass().getSimpleName() + " in \\d+\\.\\d{1,3} seconds")));
}
@Test
void restoredFormat() {
given(this.log.isInfoEnabled()).willReturn(true);
new StartupInfoLogger(getClass(), this.environment).logStarted(this.log, new TestStartup(null, "Restored"));
then(this.log).should()
.info(assertArg((message) -> assertThat(message.toString())
.matches("Restored " + getClass().getSimpleName() + " in \\d+\\.\\d{1,3} seconds")));
}
static | StartupInfoLoggerTests |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/inheritedmappingmethod/FastCarMapper.java | {
"start": 443,
"end": 602
} | interface ____ extends BoundMappable<FastCarDto, FastCar> {
FastCarMapper INSTANCE = Mappers.getMapper( FastCarMapper.class );
}
// CHECKSTYLE:ON
| FastCarMapper |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MixedMutabilityReturnTypeTest.java | {
"start": 16610,
"end": 17356
} | class ____ {
ImmutableList<Object> foo() {
if (hashCode() > 0) {
return ImmutableList.of();
}
var ints = ImmutableList.builder();
ints.add(1);
return ints.build();
}
}
""")
.setFixChooser(FixChoosers.SECOND)
.doTest();
}
/** Regression test for b/192399621 */
@Test
public void biMap_doesNotCrash() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.ImmutableBiMap;
| Test |
java | apache__camel | components/camel-mllp/src/test/java/org/apache/camel/test/util/PayloadBuilder.java | {
"start": 962,
"end": 6554
} | class ____ {
ByteArrayOutputStream builderStream = new ByteArrayOutputStream();
public PayloadBuilder() {
}
public PayloadBuilder(byte b) throws IOException {
this.append(b);
}
public PayloadBuilder(byte[] bytes) throws IOException {
this.append(bytes);
}
public PayloadBuilder(char... chars) throws IOException {
this.append(chars);
}
public PayloadBuilder(String... strings) throws IOException {
this.append(strings);
}
public static byte[] build(byte b) {
try {
return new PayloadBuilder(b).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(byte) failure", e);
}
}
public static byte[] build(byte b, byte... bytes) {
try {
return new PayloadBuilder(b).append(bytes).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(byte) failure", e);
}
}
public static byte[] build(byte[] bytes) {
try {
return new PayloadBuilder(bytes).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(byte[]) failure", e);
}
}
public static byte[] build(char c) {
try {
return new PayloadBuilder(c).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(char...) failure", e);
}
}
public static byte[] build(char c, char... chars) {
try {
return new PayloadBuilder(c).append(chars).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(char...) failure", e);
}
}
public static byte[] build(char[] chars) {
try {
return new PayloadBuilder(chars).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(char...) failure", e);
}
}
public static byte[] build(String s) {
try {
return new PayloadBuilder(s).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(String) failure", e);
}
}
public static byte[] build(String[] strings) {
try {
return new PayloadBuilder(strings).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(String[]) failure", e);
}
}
public static byte[] build(char start, String s) {
try {
return new PayloadBuilder(start)
.append(s)
.build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(String) failure", e);
}
}
public static byte[] build(char start, String s, char... end) {
try {
return new PayloadBuilder(start)
.append(s)
.append(end)
.build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(char, String, char...) failure", e);
}
}
public static byte[] build(char start, byte[] bytes, char... end) {
try {
return new PayloadBuilder(start)
.append(bytes)
.append(end).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(char, byte[], char...) failure", e);
}
}
public static byte[] build(String s, char... end) {
try {
return new PayloadBuilder(s)
.append(end).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(String, char...) failure", e);
}
}
public static byte[] build(byte[] bytes, char... end) {
try {
return new PayloadBuilder(bytes)
.append(end).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(byte[], char...) failure", e);
}
}
public static byte[] build(byte[] bytes, String s) {
try {
return new PayloadBuilder(bytes)
.append(s).build();
} catch (IOException e) {
throw new RuntimeCamelException("PayloadBuilder.build(byte[], String) failure", e);
}
}
public PayloadBuilder append(byte b) throws IOException {
builderStream.write(b);
return this;
}
public PayloadBuilder append(byte[] bytes) throws IOException {
builderStream.write(bytes);
return this;
}
public PayloadBuilder append(char... chars) throws IOException {
if (chars != null) {
for (char c : chars) {
builderStream.write(c);
}
}
return this;
}
public PayloadBuilder append(String... strings) throws IOException {
if (strings != null) {
for (String s : strings) {
builderStream.write(s.getBytes());
}
}
return this;
}
public PayloadBuilder append(byte[] payload, int startPosition, int length) throws IOException {
builderStream.write(payload, startPosition, length);
return this;
}
public byte[] build() {
byte[] answer = builderStream.toByteArray();
builderStream.reset();
return answer;
}
}
| PayloadBuilder |
java | apache__camel | components/camel-univocity-parsers/src/main/java/org/apache/camel/dataformat/univocity/Unmarshaller.java | {
"start": 1138,
"end": 1241
} | class ____ the exchange body using an uniVocity parser.
*
* @param <P> Parser class
*/
final | unmarshalls |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/context/event/DubboModuleStateEvent.java | {
"start": 1092,
"end": 1780
} | class ____ extends ApplicationEvent {
private final DeployState state;
private Throwable cause;
public DubboModuleStateEvent(ModuleModel applicationModel, DeployState state) {
super(applicationModel);
this.state = state;
}
public DubboModuleStateEvent(ModuleModel applicationModel, DeployState state, Throwable cause) {
super(applicationModel);
this.state = state;
this.cause = cause;
}
public ModuleModel getModule() {
return (ModuleModel) getSource();
}
public DeployState getState() {
return state;
}
public Throwable getCause() {
return cause;
}
}
| DubboModuleStateEvent |
java | elastic__elasticsearch | modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java | {
"start": 2052,
"end": 11876
} | class ____ {
private RemoteRequestBuilders() {}
static Request initialSearch(SearchRequest searchRequest, BytesReference query, Version remoteVersion) {
// It is nasty to build paths with StringBuilder but we'll be careful....
StringBuilder path = new StringBuilder("/");
addIndices(path, searchRequest.indices());
path.append("_search");
Request request = new Request("POST", path.toString());
if (searchRequest.scroll() != null) {
TimeValue keepAlive = searchRequest.scroll();
// V_5_0_0
if (remoteVersion.before(Version.fromId(5000099))) {
/* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros
* so we toss out that resolution, rounding up because more scroll
* timeout seems safer than less. */
keepAlive = timeValueMillis((long) Math.ceil(keepAlive.millisFrac()));
}
request.addParameter("scroll", keepAlive.getStringRep());
}
request.addParameter("size", Integer.toString(searchRequest.source().size()));
if (searchRequest.source().version() == null || searchRequest.source().version() == false) {
request.addParameter("version", Boolean.FALSE.toString());
} else {
request.addParameter("version", Boolean.TRUE.toString());
}
if (searchRequest.source().sorts() != null) {
boolean useScan = false;
// Detect if we should use search_type=scan rather than a sort
if (remoteVersion.before(Version.fromId(2010099))) {
for (SortBuilder<?> sort : searchRequest.source().sorts()) {
if (sort instanceof FieldSortBuilder f) {
if (f.getFieldName().equals(FieldSortBuilder.DOC_FIELD_NAME)) {
useScan = true;
break;
}
}
}
}
if (useScan) {
request.addParameter("search_type", "scan");
} else {
StringBuilder sorts = new StringBuilder(sortToUri(searchRequest.source().sorts().get(0)));
for (int i = 1; i < searchRequest.source().sorts().size(); i++) {
sorts.append(',').append(sortToUri(searchRequest.source().sorts().get(i)));
}
request.addParameter("sort", sorts.toString());
}
}
if (remoteVersion.before(Version.fromId(2000099))) {
// Versions before 2.0.0 need prompting to return interesting fields. Note that timestamp isn't available at all....
searchRequest.source().storedField("_parent").storedField("_routing").storedField("_ttl");
if (remoteVersion.before(Version.fromId(1000099))) {
// Versions before 1.0.0 don't support `"_source": true` so we have to ask for the _source in a funny way.
if (false == searchRequest.source().storedFields().fieldNames().contains("_source")) {
searchRequest.source().storedField("_source");
}
}
}
if (searchRequest.source().storedFields() != null && false == searchRequest.source().storedFields().fieldNames().isEmpty()) {
StringBuilder fields = new StringBuilder(searchRequest.source().storedFields().fieldNames().get(0));
for (int i = 1; i < searchRequest.source().storedFields().fieldNames().size(); i++) {
fields.append(',').append(searchRequest.source().storedFields().fieldNames().get(i));
}
// V_5_0_0
String storedFieldsParamName = remoteVersion.before(Version.fromId(5000099)) ? "fields" : "stored_fields";
request.addParameter(storedFieldsParamName, fields.toString());
}
if (remoteVersion.onOrAfter(Version.fromId(6030099))) {
// allow_partial_results introduced in 6.3, running remote reindex against earlier versions still silently discards RED shards.
request.addParameter("allow_partial_search_results", "false");
}
// EMPTY is safe here because we're not calling namedObject
try (
XContentBuilder entity = JsonXContent.contentBuilder();
XContentParser queryParser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, query)
) {
entity.startObject();
entity.field("query");
{
/* We're intentionally a bit paranoid here - copying the query
* as xcontent rather than writing a raw field. We don't want
* poorly written queries to escape. Ever. */
entity.copyCurrentStructure(queryParser);
XContentParser.Token shouldBeEof = queryParser.nextToken();
if (shouldBeEof != null) {
throw new ElasticsearchException(
"query was more than a single object. This first token after the object is [" + shouldBeEof + "]"
);
}
}
var fetchSource = searchRequest.source().fetchSource();
if (fetchSource == null) {
if (remoteVersion.onOrAfter(Version.fromId(1000099))) {
// Versions before 1.0 don't support `"_source": true` so we have to ask for the source as a stored field.
entity.field("_source", true);
}
} else {
if (remoteVersion.onOrAfter(Version.V_9_1_0) || fetchSource.excludeVectors() == null) {
entity.field("_source", fetchSource);
} else {
// Versions before 9.1.0 don't support "exclude_vectors" so we need to manually convert.
if (fetchSource.includes().length == 0 && fetchSource.excludes().length == 0) {
if (remoteVersion.onOrAfter(Version.fromId(1000099))) {
// Versions before 1.0 don't support `"_source": true` so we have to ask for the source as a stored field.
entity.field("_source", true);
}
} else {
entity.startObject("_source");
if (fetchSource.includes().length > 0) {
entity.field(FetchSourceContext.INCLUDES_FIELD.getPreferredName(), fetchSource.includes());
}
if (fetchSource.excludes().length > 0) {
entity.field(FetchSourceContext.EXCLUDES_FIELD.getPreferredName(), fetchSource.excludes());
}
entity.endObject();
}
}
}
entity.endObject();
request.setJsonEntity(Strings.toString(entity));
} catch (IOException e) {
throw new ElasticsearchException("unexpected error building entity", e);
}
return request;
}
private static void addIndices(StringBuilder path, String[] indices) {
if (indices == null || indices.length == 0) {
return;
}
path.append(Arrays.stream(indices).map(RemoteRequestBuilders::encodeIndex).collect(Collectors.joining(","))).append('/');
}
private static String encodeIndex(String s) {
return URLEncoder.encode(s, StandardCharsets.UTF_8);
}
private static String sortToUri(SortBuilder<?> sort) {
if (sort instanceof FieldSortBuilder f) {
return f.getFieldName() + ":" + f.order();
}
throw new IllegalArgumentException("Unsupported sort [" + sort + "]");
}
static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) {
Request request = new Request("POST", "/_search/scroll");
// V_5_0_0
if (remoteVersion.before(Version.fromId(5000099))) {
/* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros
* so we toss out that resolution, rounding up so we shouldn't end up
* with 0s. */
keepAlive = timeValueMillis((long) Math.ceil(keepAlive.millisFrac()));
}
request.addParameter("scroll", keepAlive.getStringRep());
if (remoteVersion.before(Version.fromId(2000099))) {
// Versions before 2.0.0 extract the plain scroll_id from the body
request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN));
return request;
}
try (XContentBuilder entity = JsonXContent.contentBuilder()) {
entity.startObject().field("scroll_id", scroll).endObject();
request.setJsonEntity(Strings.toString(entity));
} catch (IOException e) {
throw new ElasticsearchException("failed to build scroll entity", e);
}
return request;
}
static Request clearScroll(String scroll, Version remoteVersion) {
Request request = new Request("DELETE", "/_search/scroll");
if (remoteVersion.before(Version.fromId(2000099))) {
// Versions before 2.0.0 extract the plain scroll_id from the body
request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN));
return request;
}
try (XContentBuilder entity = JsonXContent.contentBuilder()) {
entity.startObject().array("scroll_id", scroll).endObject();
request.setJsonEntity(Strings.toString(entity));
} catch (IOException e) {
throw new ElasticsearchException("failed to build clear scroll entity", e);
}
return request;
}
}
| RemoteRequestBuilders |
java | apache__camel | components/camel-smooks/src/main/java/org/apache/camel/dataformat/smooks/SmooksDataFormat.java | {
"start": 2712,
"end": 6275
} | class ____ extends ServiceSupport implements DataFormat, CamelContextAware {
private Smooks smooks;
private CamelContext camelContext;
private String smooksConfig;
/**
* Marshals the Object 'fromBody' to an OutputStream 'toStream'
* </p>
* <p/>
* The Camel framework will call this method from {@link MarshalProcessor#process(Exchange)} and it will take care
* of setting the Out Message's body to the bytes written to the toStream OutputStream.
*
* @param exchange The Camel {@link Exchange}.
* @param fromBody The object to be marshalled into the output stream.
* @param toStream The output stream that will be written to.
*/
@Override
public void marshal(final Exchange exchange, final Object fromBody, final OutputStream toStream) throws Exception {
final ExecutionContext executionContext = smooks.createExecutionContext();
final TypeConverter typeConverter = exchange.getContext().getTypeConverter();
final JavaSource javaSource = typeConverter.mandatoryConvertTo(JavaSource.class, exchange, fromBody);
final StringSink stringSink = new StringSink();
smooks.filterSource(executionContext, javaSource, stringSink);
toStream.write(stringSink.getResult().getBytes(executionContext.getContentEncoding()));
}
/**
* Unmarshals the fromStream to an Object.
* </p>
* The Camel framework will call this method from {@link UnmarshalProcessor#process(Exchange)} and it will take care
* of setting the returned Object on the Out Message's body.
*
* @param exchange The Camel {@link Exchange}.
* @param fromStream The InputStream that will be unmarshalled into an Object instance.
*/
@Override
public Object unmarshal(final Exchange exchange, final InputStream fromStream) {
final ExecutionContext executionContext = smooks.createExecutionContext();
final Exports exports = smooks.getApplicationContext().getRegistry().lookup(new ExportsLookup());
final Sink[] sinks = exports.createSinks();
smooks.filterSource(executionContext, new StreamSource<>(fromStream), sinks);
return getResult(exports, sinks, exchange);
}
protected Object getResult(final Exports exports, final Sink[] sinks, final Exchange exchange) {
final List<Object> objects = Exports.extractSinks(sinks, exports);
if (objects.size() == 1) {
return objects.get(0);
} else {
return objects;
}
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void doStart() {
final SmooksFactory smooksFactory = CamelContextHelper.findSingleByType(camelContext, SmooksFactory.class);
try {
if (smooksFactory != null) {
smooks = smooksFactory.createInstance(smooksConfig);
} else {
smooks = new Smooks(smooksConfig);
}
} catch (IOException | SAXException e) {
throw new SmooksException(e.getMessage(), e);
}
}
@Override
public void doStop() {
if (smooks != null) {
smooks.close();
}
}
public String getSmooksConfig() {
return smooksConfig;
}
public void setSmooksConfig(String smooksConfig) {
this.smooksConfig = smooksConfig;
}
}
| SmooksDataFormat |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java | {
"start": 19624,
"end": 21791
} | class ____ implements Iterable<AclEntry> {
private final List<AclEntry> aclSpec;
/**
* Creates a ValidatedAclSpec by pre-validating and sorting the given ACL
* entries. Pre-validation checks that it does not exceed the maximum
* entries. This check is performed before modifying the ACL, and it's
* actually insufficient for enforcing the maximum number of entries.
* Transformation logic can create additional entries automatically,such as
* the mask and some of the default entries, so we also need additional
* checks during transformation. The up-front check is still valuable here
* so that we don't run a lot of expensive transformation logic while
* holding the namesystem lock for an attacker who intentionally sent a huge
* ACL spec.
*
* @param aclSpec List<AclEntry> containing unvalidated input ACL spec
* @throws AclException if validation fails
*/
public ValidatedAclSpec(List<AclEntry> aclSpec) throws AclException {
Collections.sort(aclSpec, ACL_ENTRY_COMPARATOR);
checkMaxEntries(new ScopedAclEntries(aclSpec));
this.aclSpec = aclSpec;
}
/**
* Returns true if this contains an entry matching the given key. An ACL
* entry's key consists of scope, type and name (but not permission).
*
* @param key AclEntry search key
* @return boolean true if found
*/
public boolean containsKey(AclEntry key) {
return Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR) >= 0;
}
/**
* Returns the entry matching the given key or null if not found. An ACL
* entry's key consists of scope, type and name (but not permission).
*
* @param key AclEntry search key
* @return AclEntry entry matching the given key or null if not found
*/
public AclEntry findByKey(AclEntry key) {
int index = Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR);
if (index >= 0) {
return aclSpec.get(index);
}
return null;
}
@Override
public Iterator<AclEntry> iterator() {
return aclSpec.iterator();
}
}
}
| ValidatedAclSpec |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/filter/factory/cache/LocalResponseCacheGatewayFilterFactoryTests.java | {
"start": 14312,
"end": 16602
} | class ____ extends BaseWebClientTests {
@Test
void shouldApplyMaxAgeFromPropertiesWhenFilterHasNoParams() throws InterruptedException {
String uri = "/" + UUID.randomUUID() + "/cache/headers";
Long maxAgeRequest1 = testClient.get()
.uri(uri)
.header("Host", "www.localresponsecache.org")
.exchange()
.expectBody()
.returnResult()
.getResponseHeaders()
.get(HttpHeaders.CACHE_CONTROL)
.stream()
.map(LocalResponseCacheGatewayFilterFactoryTests::parseMaxAge)
.filter(Objects::nonNull)
.findAny()
.orElse(null);
assertThat(maxAgeRequest1).isLessThanOrEqualTo(20L);
Thread.sleep(2000);
Long maxAgeRequest2 = testClient.get()
.uri(uri)
.header("Host", "www.localresponsecache.org")
.exchange()
.expectBody()
.returnResult()
.getResponseHeaders()
.get(HttpHeaders.CACHE_CONTROL)
.stream()
.map(LocalResponseCacheGatewayFilterFactoryTests::parseMaxAge)
.filter(Objects::nonNull)
.findAny()
.orElse(null);
assertThat(maxAgeRequest2).isLessThan(maxAgeRequest1);
}
@Test
void shouldNotCacheWhenPrivateDirectiveIsInRequest() {
testClient = testClient.mutate().responseTimeout(Duration.ofHours(1)).build();
String uri = "/" + UUID.randomUUID() + "/cache/headers";
testClient.get()
.uri(uri)
.header("Host", "www.localresponsecache.org")
.header(HttpHeaders.CACHE_CONTROL, CacheControl.noStore().getHeaderValue())
.header(CUSTOM_HEADER, "1")
.exchange()
.expectBody()
.jsonPath("$.headers." + CUSTOM_HEADER);
testClient.get()
.uri(uri)
.header("Host", "www.localresponsecache.org")
.header(CUSTOM_HEADER, "2")
.exchange()
.expectBody()
.jsonPath("$.headers." + CUSTOM_HEADER)
.isEqualTo("2");
testClient.get()
.uri(uri)
.header("Host", "www.localresponsecache.org")
.header(CUSTOM_HEADER, "3") // second
// request
// cached
// "2"
// ->
// "3"
// will
// be
// ignored
.exchange()
.expectBody()
.jsonPath("$.headers." + CUSTOM_HEADER)
.isEqualTo("2");
}
@EnableAutoConfiguration
@SpringBootConfiguration
@Import(DefaultTestConfig.class)
public static | UsingPropertiesAsDefault |
java | quarkusio__quarkus | integration-tests/main/src/main/java/io/quarkus/it/faulttolerance/FaultToleranceTestResource.java | {
"start": 194,
"end": 1030
} | class ____ {
@Inject
Service service;
@Inject
SecondService secondService;
@GET
public String getName() {
AtomicInteger counter = new AtomicInteger();
String name = service.getName(counter);
return counter + ":" + name;
}
@GET
@Path("/retried")
public String retried() {
AtomicInteger counter = new AtomicInteger();
String name = service.retriedMethod(counter);
return counter + ":" + name;
}
@GET
@Path("/fallback")
public String fallback() {
AtomicInteger counter = new AtomicInteger();
String name = service.fallbackMethod(counter);
return counter + ":" + name;
}
@GET
@Path("/hello")
public String hello() {
return secondService.publicHello();
}
}
| FaultToleranceTestResource |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/aop/framework/adapter/AdvisorAdapterRegistrationTests.java | {
"start": 3452,
"end": 3772
} | class ____ implements MethodInterceptor {
private SimpleBeforeAdvice advice;
public SimpleBeforeAdviceInterceptor(SimpleBeforeAdvice advice) {
this.advice = advice;
}
@Override
public Object invoke(MethodInvocation mi) throws Throwable {
advice.before();
return mi.proceed();
}
}
| SimpleBeforeAdviceInterceptor |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparable.java | {
"start": 1680,
"end": 2778
} | class ____ implements
* WritableComparable{@literal <MyWritableComparable>} {
* // Some data
* private int counter;
* private long timestamp;
*
* public void write(DataOutput out) throws IOException {
* out.writeInt(counter);
* out.writeLong(timestamp);
* }
*
* public void readFields(DataInput in) throws IOException {
* counter = in.readInt();
* timestamp = in.readLong();
* }
*
* public int compareTo(MyWritableComparable o) {
* int thisValue = this.value;
* int thatValue = o.value;
* return (thisValue < thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
* }
*
* public int hashCode() {
* final int prime = 31;
* int result = 1;
* result = prime * result + counter;
* result = prime * result + (int) (timestamp ^ (timestamp >>> 32));
* return result
* }
* }
* </pre></blockquote>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public | MyWritableComparable |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/path/JSONPath_list_field.java | {
"start": 1197,
"end": 1482
} | class ____ {
private String name;
public Entity(String name){
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
| Entity |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/layout/LevelPatternSelector.java | {
"start": 2223,
"end": 9790
} | class ____ implements org.apache.logging.log4j.core.util.Builder<LevelPatternSelector> {
@PluginElement("PatternMatch")
private PatternMatch[] properties;
@PluginBuilderAttribute("defaultPattern")
private String defaultPattern;
@PluginBuilderAttribute(value = "alwaysWriteExceptions")
private boolean alwaysWriteExceptions = true;
@PluginBuilderAttribute(value = "disableAnsi")
private boolean disableAnsi;
@PluginBuilderAttribute(value = "noConsoleNoAnsi")
private boolean noConsoleNoAnsi;
@PluginConfiguration
private Configuration configuration;
@Override
public LevelPatternSelector build() {
if (defaultPattern == null) {
defaultPattern = PatternLayout.DEFAULT_CONVERSION_PATTERN;
}
if (properties == null || properties.length == 0) {
LOGGER.warn("No marker patterns were provided with PatternMatch");
return null;
}
return new LevelPatternSelector(
properties, defaultPattern, alwaysWriteExceptions, disableAnsi, noConsoleNoAnsi, configuration);
}
public Builder setProperties(final PatternMatch[] properties) {
this.properties = properties;
return this;
}
public Builder setDefaultPattern(final String defaultPattern) {
this.defaultPattern = defaultPattern;
return this;
}
public Builder setAlwaysWriteExceptions(final boolean alwaysWriteExceptions) {
this.alwaysWriteExceptions = alwaysWriteExceptions;
return this;
}
public Builder setDisableAnsi(final boolean disableAnsi) {
this.disableAnsi = disableAnsi;
return this;
}
public Builder setNoConsoleNoAnsi(final boolean noConsoleNoAnsi) {
this.noConsoleNoAnsi = noConsoleNoAnsi;
return this;
}
public Builder setConfiguration(final Configuration configuration) {
this.configuration = configuration;
return this;
}
}
private final Map<String, PatternFormatter[]> formatterMap = new HashMap<>();
private final Map<String, String> patternMap = new HashMap<>();
private final PatternFormatter[] defaultFormatters;
private final String defaultPattern;
private static Logger LOGGER = StatusLogger.getLogger();
private final boolean requiresLocation;
/**
* @deprecated Use {@link #newBuilder()} instead. This will be private in a future version.
*/
@Deprecated
public LevelPatternSelector(
final PatternMatch[] properties,
final String defaultPattern,
final boolean alwaysWriteExceptions,
final boolean noConsoleNoAnsi,
final Configuration config) {
this(properties, defaultPattern, alwaysWriteExceptions, false, noConsoleNoAnsi, config);
}
private LevelPatternSelector(
final PatternMatch[] properties,
final String defaultPattern,
final boolean alwaysWriteExceptions,
final boolean disableAnsi,
final boolean noConsoleNoAnsi,
final Configuration config) {
boolean needsLocation = false;
final PatternParser parser = PatternLayout.createPatternParser(config);
for (final PatternMatch property : properties) {
try {
final List<PatternFormatter> list =
parser.parse(property.getPattern(), alwaysWriteExceptions, disableAnsi, noConsoleNoAnsi);
final PatternFormatter[] formatters = list.toArray(PatternFormatter.EMPTY_ARRAY);
formatterMap.put(property.getKey(), formatters);
for (int i = 0; !needsLocation && i < formatters.length; ++i) {
needsLocation = formatters[i].requiresLocation();
}
patternMap.put(property.getKey(), property.getPattern());
} catch (final RuntimeException ex) {
throw new IllegalArgumentException("Cannot parse pattern '" + property.getPattern() + "'", ex);
}
}
try {
final List<PatternFormatter> list =
parser.parse(defaultPattern, alwaysWriteExceptions, disableAnsi, noConsoleNoAnsi);
defaultFormatters = list.toArray(PatternFormatter.EMPTY_ARRAY);
this.defaultPattern = defaultPattern;
for (int i = 0; !needsLocation && i < defaultFormatters.length; ++i) {
needsLocation = defaultFormatters[i].requiresLocation();
}
} catch (final RuntimeException ex) {
throw new IllegalArgumentException("Cannot parse pattern '" + defaultPattern + "'", ex);
}
requiresLocation = needsLocation;
}
@Override
public boolean requiresLocation() {
return requiresLocation;
}
@Override
public PatternFormatter[] getFormatters(final LogEvent event) {
final Level level = event.getLevel();
if (level == null) {
return defaultFormatters;
}
for (final String key : formatterMap.keySet()) {
if (level.name().equalsIgnoreCase(key)) {
return formatterMap.get(key);
}
}
return defaultFormatters;
}
/**
* Creates a builder for a custom ScriptPatternSelector.
*
* @return a ScriptPatternSelector builder.
*/
@PluginBuilderFactory
public static Builder newBuilder() {
return new Builder();
}
/**
* Deprecated, use {@link #newBuilder()} instead.
* @param properties PatternMatch configuration items
* @param defaultPattern the default pattern
* @param alwaysWriteExceptions To always write exceptions even if the pattern contains no exception conversions.
* @param noConsoleNoAnsi Do not output ANSI escape codes if System.console() is null.
* @param configuration the current configuration
* @return a new MarkerPatternSelector.
* @deprecated Use {@link #newBuilder()} instead.
*/
@Deprecated
public static LevelPatternSelector createSelector(
final PatternMatch[] properties,
final String defaultPattern,
final boolean alwaysWriteExceptions,
final boolean noConsoleNoAnsi,
final Configuration configuration) {
final Builder builder = newBuilder();
builder.setProperties(properties);
builder.setDefaultPattern(defaultPattern);
builder.setAlwaysWriteExceptions(alwaysWriteExceptions);
builder.setNoConsoleNoAnsi(noConsoleNoAnsi);
builder.setConfiguration(configuration);
return builder.build();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
boolean first = true;
for (final Map.Entry<String, String> entry : patternMap.entrySet()) {
if (!first) {
sb.append(", ");
}
sb.append("key=\"")
.append(entry.getKey())
.append("\", pattern=\"")
.append(entry.getValue())
.append("\"");
first = false;
}
if (!first) {
sb.append(", ");
}
sb.append("default=\"").append(defaultPattern).append("\"");
return sb.toString();
}
}
| Builder |
java | elastic__elasticsearch | x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java | {
"start": 779,
"end": 1655
} | class ____ extends LocalStateCompositeXPackPlugin implements CircuitBreakerPlugin {
private final EqlPlugin eqlPlugin;
public LocalStateEQLXPackPlugin(final Settings settings, final Path configPath) {
super(settings, configPath);
LocalStateEQLXPackPlugin thisVar = this;
this.eqlPlugin = new EqlPlugin() {
@Override
protected XPackLicenseState getLicenseState() {
return thisVar.getLicenseState();
}
};
plugins.add(eqlPlugin);
plugins.add(new QlPlugin());
}
@Override
public BreakerSettings getCircuitBreaker(Settings settings) {
return eqlPlugin.getCircuitBreaker(settings);
}
@Override
public void setCircuitBreaker(CircuitBreaker circuitBreaker) {
eqlPlugin.setCircuitBreaker(circuitBreaker);
}
}
| LocalStateEQLXPackPlugin |
java | apache__flink | flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogTruncateHelper.java | {
"start": 1683,
"end": 3721
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(ChangelogTruncateHelper.class);
private final StateChangelogWriter<?> stateChangelogWriter;
private final NavigableMap<Long, SequenceNumber> checkpointedUpTo = new TreeMap<>();
private SequenceNumber subsumedUpTo;
private SequenceNumber materializedUpTo;
ChangelogTruncateHelper(StateChangelogWriter<?> stateChangelogWriter) {
this.stateChangelogWriter = stateChangelogWriter;
}
/**
* Set the highest {@link SequenceNumber} of changelog used by the given checkpoint.
*
* @param lastUploadedTo exclusive
*/
public void checkpoint(long checkpointId, SequenceNumber lastUploadedTo) {
checkpointedUpTo.put(checkpointId, lastUploadedTo);
}
/** Handle checkpoint subsumption, potentially {@link #truncate() truncating} the changelog. */
public void checkpointSubsumed(long checkpointId) {
SequenceNumber sqn = checkpointedUpTo.get(checkpointId);
LOG.debug("checkpoint {} subsumed, max sqn: {}", checkpointId, sqn);
if (sqn != null) {
subsumedUpTo = sqn;
checkpointedUpTo.headMap(checkpointId, true).clear();
truncate();
}
}
/**
* Handle changelog materialization, potentially {@link #truncate() truncating} the changelog.
*
* @param upTo exclusive
*/
public void materialized(SequenceNumber upTo) {
materializedUpTo = upTo;
truncate();
}
private void truncate() {
if (subsumedUpTo != null && materializedUpTo != null) {
SequenceNumber to =
subsumedUpTo.compareTo(materializedUpTo) < 0 ? subsumedUpTo : materializedUpTo;
LOG.debug(
"truncate changelog to {} (subsumed up to: {}, materialized up to: {})",
to,
subsumedUpTo,
materializedUpTo);
stateChangelogWriter.truncate(to);
}
}
}
| ChangelogTruncateHelper |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/json/ResourceIDDeserializer.java | {
"start": 1287,
"end": 1682
} | class ____ extends StdDeserializer<ResourceID> {
private static final long serialVersionUID = -9058463293913469849L;
protected ResourceIDDeserializer() {
super(ResourceID.class);
}
@Override
public ResourceID deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
return new ResourceID(p.getValueAsString());
}
}
| ResourceIDDeserializer |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/naming/remote/request/AbstractNamingRequest.java | {
"start": 893,
"end": 1913
} | class ____ extends Request {
private String namespace;
private String serviceName;
private String groupName;
public AbstractNamingRequest() {
}
public AbstractNamingRequest(String namespace, String serviceName, String groupName) {
this.namespace = namespace;
this.serviceName = serviceName;
this.groupName = groupName;
}
@Override
public String getModule() {
return NAMING_MODULE;
}
public String getNamespace() {
return namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
public String getServiceName() {
return serviceName;
}
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
public String getGroupName() {
return groupName;
}
public void setGroupName(String groupName) {
this.groupName = groupName;
}
}
| AbstractNamingRequest |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/configuration/ConfigCompatibleConfig.java | {
"start": 920,
"end": 1869
} | class ____ extends AbstractDynamicConfig {
private static final String CONFIG_NAME = "configCompatible";
private boolean namespaceCompatibleMode = true;
private static final ConfigCompatibleConfig INSTANCE = new ConfigCompatibleConfig();
protected ConfigCompatibleConfig() {
super(CONFIG_NAME);
resetConfig();
}
public boolean isNamespaceCompatibleMode() {
return namespaceCompatibleMode;
}
@Override
protected void getConfigFromEnv() {
namespaceCompatibleMode = EnvUtil.getProperty(PropertiesConstant.NAMESPACE_COMPATIBLE_MODE, Boolean.class, true);
}
@Override
protected String printConfig() {
return "ConfigCompatibleConfig{" + "namespaceCompatibleMode=" + namespaceCompatibleMode
+ '}';
}
public static ConfigCompatibleConfig getInstance() {
return INSTANCE;
}
}
| ConfigCompatibleConfig |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/StopWatch.java | {
"start": 11548,
"end": 12758
} | class ____ {
private final String taskName;
private final long timeNanos;
TaskInfo(String taskName, long timeNanos) {
this.taskName = taskName;
this.timeNanos = timeNanos;
}
/**
* Get the name of this task.
*/
public String getTaskName() {
return this.taskName;
}
/**
* Get the time this task took in nanoseconds.
* @since 5.2
* @see #getTime(TimeUnit)
*/
public long getTimeNanos() {
return this.timeNanos;
}
/**
* Get the time this task took in milliseconds.
* @see #getTime(TimeUnit)
*/
public long getTimeMillis() {
return TimeUnit.NANOSECONDS.toMillis(this.timeNanos);
}
/**
* Get the time this task took in seconds.
* @see #getTime(TimeUnit)
*/
public double getTimeSeconds() {
return getTime(TimeUnit.SECONDS);
}
/**
* Get the time this task took in the requested time unit
* (with decimal points in nanosecond precision).
* @param timeUnit the unit to use
* @since 6.1
* @see #getTimeNanos()
* @see #getTimeMillis()
* @see #getTimeSeconds()
*/
public double getTime(TimeUnit timeUnit) {
return (double) this.timeNanos / TimeUnit.NANOSECONDS.convert(1, timeUnit);
}
}
}
| TaskInfo |
java | spring-projects__spring-security | access/src/main/java/org/springframework/security/web/access/channel/ChannelProcessor.java | {
"start": 1513,
"end": 2429
} | interface ____ {
/**
* Decided whether the presented {@link FilterInvocation} provides the appropriate
* level of channel security based on the requested list of <tt>ConfigAttribute</tt>s.
*/
void decide(FilterInvocation invocation, Collection<ConfigAttribute> config) throws IOException, ServletException;
/**
* Indicates whether this <code>ChannelProcessor</code> is able to process the passed
* <code>ConfigAttribute</code>.
* <p>
* This allows the <code>ChannelProcessingFilter</code> to check every configuration
* attribute can be consumed by the configured <code>ChannelDecisionManager</code>.
* @param attribute a configuration attribute that has been configured against the
* <tt>ChannelProcessingFilter</tt>.
* @return true if this <code>ChannelProcessor</code> can support the passed
* configuration attribute
*/
boolean supports(ConfigAttribute attribute);
}
| ChannelProcessor |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/abilities/SupportsLookupCustomShuffle.java | {
"start": 2063,
"end": 2377
} | interface ____ responsible for providing custom partitioning logic for the RowData
* records. We didn't use {@link Partitioner} directly because the input data is always RowData
* type, and we need to extract all join keys from the input data before send it to partitioner.
*/
@PublicEvolving
| is |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvnup/goals/CompatibilityFixStrategy.java | {
"start": 10906,
"end": 12252
} | class ____ {
final Element element;
final String sectionName;
DependencyContainer(Element element, String sectionName) {
this.element = element;
this.sectionName = sectionName;
}
}
/**
* Fixes duplicate plugins in plugins and pluginManagement sections.
*/
private boolean fixDuplicatePlugins(Document pomDocument, UpgradeContext context) {
Element root = pomDocument.root();
// Collect all build elements to process
Stream<BuildContainer> buildContainers = Stream.concat(
// Root level build
Stream.of(new BuildContainer(root.child(BUILD).orElse(null), BUILD))
.filter(container -> container.element != null),
// Profile builds
root.child(PROFILES).stream()
.flatMap(profiles -> profiles.children(PROFILE))
.map(profile -> new BuildContainer(profile.child(BUILD).orElse(null), "profile build"))
.filter(container -> container.element != null));
return buildContainers
.map(container -> fixPluginsInBuildElement(container.element, context, container.sectionName))
.reduce(false, Boolean::logicalOr);
}
private static | DependencyContainer |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/IntFloatBucketedSort.java | {
"start": 1209,
"end": 15633
} | class ____ implements Releasable {
private final BigArrays bigArrays;
private final SortOrder order;
private final int bucketSize;
/**
* {@code true} if the bucket is in heap mode, {@code false} if
* it is still gathering.
*/
private final BitArray heapMode;
/**
* An array containing all the values on all buckets. The structure is as follows:
* <p>
* For each bucket, there are bucketSize elements, based on the bucket id (0, 1, 2...).
* Then, for each bucket, it can be in 2 states:
* </p>
* <ul>
* <li>
* Gather mode: All buckets start in gather mode, and remain here while they have less than bucketSize elements.
* In gather mode, the elements are stored in the array from the highest index to the lowest index.
* The lowest index contains the offset to the next slot to be filled.
* <p>
* This allows us to insert elements in O(1) time.
* </p>
* <p>
* When the bucketSize-th element is collected, the bucket transitions to heap mode, by heapifying its contents.
* </p>
* </li>
* <li>
* Heap mode: The bucket slots are organized as a min heap structure.
* <p>
* The root of the heap is the minimum value in the bucket,
* which allows us to quickly discard new values that are not in the top N.
* </p>
* </li>
* </ul>
*/
private IntArray values;
private FloatArray extraValues;
public IntFloatBucketedSort(BigArrays bigArrays, SortOrder order, int bucketSize) {
this.bigArrays = bigArrays;
this.order = order;
this.bucketSize = bucketSize;
heapMode = new BitArray(0, bigArrays);
boolean success = false;
try {
values = bigArrays.newIntArray(0, false);
extraValues = bigArrays.newFloatArray(0, false);
success = true;
} finally {
if (success == false) {
close();
}
}
}
/**
* Collects a {@code value} into a {@code bucket}.
* <p>
* It may or may not be inserted in the heap, depending on if it is better than the current root.
* </p>
*/
public void collect(int value, float extraValue, int bucket) {
long rootIndex = (long) bucket * bucketSize;
if (inHeapMode(bucket)) {
if (betterThan(value, values.get(rootIndex), extraValue, extraValues.get(rootIndex))) {
values.set(rootIndex, value);
extraValues.set(rootIndex, extraValue);
downHeap(rootIndex, 0, bucketSize);
}
return;
}
// Gathering mode
long requiredSize = rootIndex + bucketSize;
if (values.size() < requiredSize) {
grow(bucket);
}
int next = getNextGatherOffset(rootIndex);
assert 0 <= next && next < bucketSize
: "Expected next to be in the range of valid buckets [0 <= " + next + " < " + bucketSize + "]";
long index = next + rootIndex;
values.set(index, value);
extraValues.set(index, extraValue);
if (next == 0) {
heapMode.set(bucket);
heapify(rootIndex, bucketSize);
} else {
setNextGatherOffset(rootIndex, next - 1);
}
}
/**
* The order of the sort.
*/
public SortOrder getOrder() {
return order;
}
/**
* The number of values to store per bucket.
*/
public int getBucketSize() {
return bucketSize;
}
/**
* Get the first and last indexes (inclusive, exclusive) of the values for a bucket.
* Returns [0, 0] if the bucket has never been collected.
*/
private Tuple<Long, Long> getBucketValuesIndexes(int bucket) {
long rootIndex = (long) bucket * bucketSize;
if (rootIndex >= values.size()) {
// We've never seen this bucket.
return Tuple.tuple(0L, 0L);
}
long start = inHeapMode(bucket) ? rootIndex : (rootIndex + getNextGatherOffset(rootIndex) + 1);
long end = rootIndex + bucketSize;
return Tuple.tuple(start, end);
}
/**
* Merge the values from {@code other}'s {@code otherGroupId} into {@code groupId}.
*/
public void merge(int groupId, IntFloatBucketedSort other, int otherGroupId) {
var otherBounds = other.getBucketValuesIndexes(otherGroupId);
// TODO: This can be improved for heapified buckets by making use of the heap structures
for (long i = otherBounds.v1(); i < otherBounds.v2(); i++) {
collect(other.values.get(i), other.extraValues.get(i), groupId);
}
}
/**
* Creates a block with the values from the {@code selected} groups.
*/
public void toBlocks(BlockFactory blockFactory, Block[] blocks, int offset, IntVector selected) {
// Check if the selected groups are all empty, to avoid allocating extra memory
if (allSelectedGroupsAreEmpty(selected)) {
Block constantNullBlock = blockFactory.newConstantNullBlock(selected.getPositionCount());
constantNullBlock.incRef();
blocks[offset] = constantNullBlock;
blocks[offset + 1] = constantNullBlock;
return;
}
try (
var builder = blockFactory.newIntBlockBuilder(selected.getPositionCount());
var extraBuilder = blockFactory.newFloatBlockBuilder(selected.getPositionCount())
) {
for (int s = 0; s < selected.getPositionCount(); s++) {
int bucket = selected.getInt(s);
var bounds = getBucketValuesIndexes(bucket);
var rootIndex = bounds.v1();
var size = bounds.v2() - bounds.v1();
if (size == 0) {
builder.appendNull();
extraBuilder.appendNull();
continue;
}
if (size == 1) {
builder.appendInt(values.get(rootIndex));
extraBuilder.appendFloat(extraValues.get(rootIndex));
continue;
}
// If we are in the gathering mode, we need to heapify before sorting.
if (inHeapMode(bucket) == false) {
heapify(rootIndex, (int) size);
}
heapSort(rootIndex, (int) size);
builder.beginPositionEntry();
extraBuilder.beginPositionEntry();
for (int i = 0; i < size; i++) {
builder.appendInt(values.get(rootIndex + i));
extraBuilder.appendFloat(extraValues.get(rootIndex + i));
}
builder.endPositionEntry();
extraBuilder.endPositionEntry();
}
blocks[offset] = builder.build();
blocks[offset + 1] = extraBuilder.build();
}
}
/**
* Checks if the selected groups are all empty.
*/
private boolean allSelectedGroupsAreEmpty(IntVector selected) {
return IntStream.range(0, selected.getPositionCount()).map(selected::getInt).noneMatch(bucket -> {
var bounds = this.getBucketValuesIndexes(bucket);
var size = bounds.v2() - bounds.v1();
return size > 0;
});
}
/**
* Is this bucket a min heap {@code true} or in gathering mode {@code false}?
*/
private boolean inHeapMode(int bucket) {
return heapMode.get(bucket);
}
/**
* Get the next index that should be "gathered" for a bucket rooted
* at {@code rootIndex}.
*/
private int getNextGatherOffset(long rootIndex) {
return values.get(rootIndex);
}
/**
* Set the next index that should be "gathered" for a bucket rooted
* at {@code rootIndex}.
*/
private void setNextGatherOffset(long rootIndex, int offset) {
values.set(rootIndex, offset);
}
/**
* {@code true} if the entry at index {@code lhs} is "better" than
* the entry at {@code rhs}. "Better" in this means "lower" for
* {@link SortOrder#ASC} and "higher" for {@link SortOrder#DESC}.
*/
private boolean betterThan(int lhs, int rhs, float lhsExtra, float rhsExtra) {
int res = Integer.compare(lhs, rhs);
if (res != 0) {
return getOrder().reverseMul() * res < 0;
}
res = Float.compare(lhsExtra, rhsExtra);
return getOrder().reverseMul() * res < 0;
}
/**
* Swap the data at two indices.
*/
private void swap(long lhs, long rhs) {
var tmp = values.get(lhs);
values.set(lhs, values.get(rhs));
values.set(rhs, tmp);
var tmpExtra = extraValues.get(lhs);
extraValues.set(lhs, extraValues.get(rhs));
extraValues.set(rhs, tmpExtra);
}
/**
* Allocate storage for more buckets and store the "next gather offset"
* for those new buckets. We always grow the storage by whole bucket's
* worth of slots at a time. We never allocate space for partial buckets.
*/
private void grow(int bucket) {
long oldMax = values.size();
assert oldMax % bucketSize == 0;
long newSize = BigArrays.overSize(((long) bucket + 1) * bucketSize, PageCacheRecycler.INT_PAGE_SIZE, Integer.BYTES);
// Round up to the next full bucket.
newSize = (newSize + bucketSize - 1) / bucketSize;
values = bigArrays.resize(values, newSize * bucketSize);
// Round up to the next full bucket.
extraValues = bigArrays.resize(extraValues, newSize * bucketSize);
// Set the next gather offsets for all newly allocated buckets.
fillGatherOffsets(oldMax);
}
/**
* Maintain the "next gather offsets" for newly allocated buckets.
*/
private void fillGatherOffsets(long startingAt) {
int nextOffset = getBucketSize() - 1;
for (long bucketRoot = startingAt; bucketRoot < values.size(); bucketRoot += getBucketSize()) {
setNextGatherOffset(bucketRoot, nextOffset);
}
}
/**
* Heapify a bucket whose entries are in random order.
* <p>
* This works by validating the heap property on each node, iterating
* "upwards", pushing any out of order parents "down". Check out the
* <a href="https://en.wikipedia.org/w/index.php?title=Binary_heap&oldid=940542991#Building_a_heap">wikipedia</a>
* entry on binary heaps for more about this.
* </p>
* <p>
* While this *looks* like it could easily be {@code O(n * log n)}, it is
* a fairly well studied algorithm attributed to Floyd. There's
* been a bunch of work that puts this at {@code O(n)}, close to 1.88n worst
* case.
* </p>
* <ul>
* <li>Hayward, Ryan; McDiarmid, Colin (1991).
* <a href="https://web.archive.org/web/20160205023201/http://www.stats.ox.ac.uk/__data/assets/pdf_file/0015/4173/heapbuildjalg.pdf">
* Average Case Analysis of Heap Building byRepeated Insertion</a> J. Algorithms.
* <li>D.E. Knuth, ”The Art of Computer Programming, Vol. 3, Sorting and Searching”</li>
* </ul>
* @param rootIndex the index the start of the bucket
*/
private void heapify(long rootIndex, int heapSize) {
int maxParent = heapSize / 2 - 1;
for (int parent = maxParent; parent >= 0; parent--) {
downHeap(rootIndex, parent, heapSize);
}
}
/**
* Sorts all the values in the heap using heap sort algorithm.
* This runs in {@code O(n log n)} time.
* @param rootIndex index of the start of the bucket
* @param heapSize Number of values that belong to the heap.
* Can be less than bucketSize.
* In such a case, the remaining values in range
* (rootIndex + heapSize, rootIndex + bucketSize)
* are *not* considered part of the heap.
*/
private void heapSort(long rootIndex, int heapSize) {
while (heapSize > 0) {
swap(rootIndex, rootIndex + heapSize - 1);
heapSize--;
downHeap(rootIndex, 0, heapSize);
}
}
/**
* Correct the heap invariant of a parent and its children. This
* runs in {@code O(log n)} time.
* @param rootIndex index of the start of the bucket
* @param parent Index within the bucket of the parent to check.
* For example, 0 is the "root".
* @param heapSize Number of values that belong to the heap.
* Can be less than bucketSize.
* In such a case, the remaining values in range
* (rootIndex + heapSize, rootIndex + bucketSize)
* are *not* considered part of the heap.
*/
private void downHeap(long rootIndex, int parent, int heapSize) {
while (true) {
long parentIndex = rootIndex + parent;
int worst = parent;
long worstIndex = parentIndex;
int leftChild = parent * 2 + 1;
long leftIndex = rootIndex + leftChild;
if (leftChild < heapSize) {
if (betterThan(values.get(worstIndex), values.get(leftIndex), extraValues.get(worstIndex), extraValues.get(leftIndex))) {
worst = leftChild;
worstIndex = leftIndex;
}
int rightChild = leftChild + 1;
long rightIndex = rootIndex + rightChild;
if (rightChild < heapSize
&& betterThan(
values.get(worstIndex),
values.get(rightIndex),
extraValues.get(worstIndex),
extraValues.get(rightIndex)
)) {
worst = rightChild;
worstIndex = rightIndex;
}
}
if (worst == parent) {
break;
}
swap(worstIndex, parentIndex);
parent = worst;
}
}
@Override
public final void close() {
Releasables.close(values, extraValues, heapMode);
}
}
| IntFloatBucketedSort |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java | {
"start": 1464,
"end": 2243
} | class ____<R extends BaseRecord> {
private static final Logger LOG = LoggerFactory.getLogger(RecordStore.class);
/** Class of the record stored in this State Store. */
private final Class<R> recordClass;
/** State store driver backed by persistent storage. */
private final StateStoreDriver driver;
/**
* Create a new store for records.
*
* @param clazz Class of the record to store.
* @param stateStoreDriver Driver for the State Store.
*/
protected RecordStore(Class<R> clazz, StateStoreDriver stateStoreDriver) {
this.recordClass = clazz;
this.driver = stateStoreDriver;
}
/**
* Report a required record to the data store. The data store uses this to
* create/maintain storage for the record.
*
* @return The | RecordStore |
java | google__guava | android/guava-tests/test/com/google/common/base/FinalizableReferenceQueueTest.java | {
"start": 5262,
"end": 7244
} | class ____ implements Closeable {
private static final FinalizableReferenceQueue frq = new FinalizableReferenceQueue();
private static final Set<Reference<?>> references = Sets.newConcurrentHashSet();
private final ServerSocket serverSocket;
private MyServerExampleWithFrq() throws IOException {
this.serverSocket = new ServerSocket(0);
}
static MyServerExampleWithFrq create(AtomicBoolean finalizeReferentRan) throws IOException {
MyServerExampleWithFrq myServer = new MyServerExampleWithFrq();
ServerSocket serverSocket = myServer.serverSocket;
Reference<?> reference =
new FinalizablePhantomReference<MyServerExampleWithFrq>(myServer, frq) {
@Override
public void finalizeReferent() {
references.remove(this);
if (!serverSocket.isClosed()) {
try {
serverSocket.close();
finalizeReferentRan.set(true);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
};
references.add(reference);
return myServer;
}
@Override
public void close() throws IOException {
serverSocket.close();
}
}
private ServerSocket makeMyServerExampleWithFrq(AtomicBoolean finalizeReferentRan)
throws IOException {
MyServerExampleWithFrq myServer = MyServerExampleWithFrq.create(finalizeReferentRan);
assertThat(myServer.serverSocket.isClosed()).isFalse();
return myServer.serverSocket;
}
@Test
public void testMyServerExampleWithFrq() throws Exception {
AtomicBoolean finalizeReferentRan = new AtomicBoolean(false);
ServerSocket serverSocket = makeMyServerExampleWithFrq(finalizeReferentRan);
GcFinalization.awaitDone(finalizeReferentRan::get);
assertThat(serverSocket.isClosed()).isTrue();
}
@SuppressWarnings("Java8ApiChecker")
static | MyServerExampleWithFrq |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NullableConstructorTest.java | {
"start": 864,
"end": 1218
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(NullableConstructor.class, getClass());
@Test
public void positive() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import javax.annotation.Nullable;
| NullableConstructorTest |
java | apache__kafka | core/src/test/java/kafka/server/share/SharePartitionTest.java | {
"start": 6067,
"end": 710965
} | class ____ {
private static final String ACQUISITION_LOCK_NEVER_GOT_RELEASED = "Acquisition lock never got released.";
private static final String GROUP_ID = "test-group";
private static final int MAX_DELIVERY_COUNT = 5;
private static final TopicIdPartition TOPIC_ID_PARTITION = new TopicIdPartition(Uuid.randomUuid(), 0, "test-topic");
private static final String MEMBER_ID = "member-1";
private static final Time MOCK_TIME = new MockTime();
private static final short MAX_IN_FLIGHT_RECORDS = 200;
private static final int ACQUISITION_LOCK_TIMEOUT_MS = 100;
private static final int DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS = 120;
private static final int BATCH_SIZE = 500;
private static final int DEFAULT_FETCH_OFFSET = 0;
private static final int MAX_FETCH_RECORDS = Integer.MAX_VALUE;
private static final byte ACKNOWLEDGE_TYPE_GAP_ID = 0;
private static final FetchIsolation FETCH_ISOLATION_HWM = FetchIsolation.HIGH_WATERMARK;
private static Timer mockTimer;
private SharePartitionMetrics sharePartitionMetrics;
@BeforeEach
public void setUp() {
kafka.utils.TestUtils.clearYammerMetrics();
mockTimer = new MockTimer();
sharePartitionMetrics = new SharePartitionMetrics(GROUP_ID, TOPIC_ID_PARTITION.topic(), TOPIC_ID_PARTITION.partition());
}
@AfterEach
public void tearDown() throws Exception {
mockTimer.close();
sharePartitionMetrics.close();
}
@Test
public void testMaybeInitialize() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(5, sharePartition.startOffset());
assertEquals(15, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(5L));
assertNotNull(sharePartition.cachedState().get(11L));
assertEquals(10, sharePartition.cachedState().get(5L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertEquals(2, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(5L).offsetState());
assertEquals(15, sharePartition.cachedState().get(11L).lastOffset());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState());
assertEquals(3, sharePartition.cachedState().get(11L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(11L).offsetState());
// deliveryCompleteCount is incremented by the number of ACKNOWLEDGED and ARCHIVED records in readState result.
assertEquals(5, sharePartition.deliveryCompleteCount());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 2,
"In-flight batch count should be 2.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 11,
"In-flight message count should be 11.");
assertEquals(11, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(2, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(6, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsEarliest() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(
0, PartitionFactory.DEFAULT_STATE_EPOCH,
PartitionFactory.UNINITIALIZED_START_OFFSET,
PartitionFactory.DEFAULT_ERROR_CODE,
PartitionFactory.DEFAULT_ERR_MESSAGE,
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
GroupConfig groupConfig = Mockito.mock(GroupConfig.class);
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig));
Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(ShareGroupAutoOffsetResetStrategy.EARLIEST);
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty());
Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).
when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean());
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withGroupConfigManager(groupConfigManager)
.withReplicaManager(replicaManager)
.build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
// replicaManager.fetchOffsetForTimestamp should be called with "ListOffsetsRequest.EARLIEST_TIMESTAMP"
Mockito.verify(replicaManager).fetchOffsetForTimestamp(
Mockito.any(TopicPartition.class),
Mockito.eq(ListOffsetsRequest.EARLIEST_TIMESTAMP),
Mockito.any(),
Mockito.any(),
Mockito.anyBoolean()
);
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(0, sharePartition.startOffset());
assertEquals(0, sharePartition.endOffset());
assertEquals(PartitionFactory.DEFAULT_STATE_EPOCH, sharePartition.stateEpoch());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsLatest() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(
0, PartitionFactory.DEFAULT_STATE_EPOCH,
PartitionFactory.UNINITIALIZED_START_OFFSET,
PartitionFactory.DEFAULT_ERROR_CODE,
PartitionFactory.DEFAULT_ERR_MESSAGE,
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
GroupConfig groupConfig = Mockito.mock(GroupConfig.class);
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig));
Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(ShareGroupAutoOffsetResetStrategy.LATEST);
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 15L, Optional.empty());
Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).
when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean());
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withGroupConfigManager(groupConfigManager)
.withReplicaManager(replicaManager)
.build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
// replicaManager.fetchOffsetForTimestamp should be called with "ListOffsetsRequest.LATEST_TIMESTAMP"
Mockito.verify(replicaManager).fetchOffsetForTimestamp(
Mockito.any(TopicPartition.class),
Mockito.eq(ListOffsetsRequest.LATEST_TIMESTAMP),
Mockito.any(),
Mockito.any(),
Mockito.anyBoolean()
);
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(15, sharePartition.startOffset());
assertEquals(15, sharePartition.endOffset());
assertEquals(PartitionFactory.DEFAULT_STATE_EPOCH, sharePartition.stateEpoch());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsByDuration()
throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(
0, PartitionFactory.DEFAULT_STATE_EPOCH,
PartitionFactory.UNINITIALIZED_START_OFFSET,
PartitionFactory.DEFAULT_ERROR_CODE,
PartitionFactory.DEFAULT_ERR_MESSAGE,
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
GroupConfig groupConfig = Mockito.mock(GroupConfig.class);
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig));
// Since the timestamp() of duration based strategy is not deterministic, we need to mock the ShareGroupAutoOffsetResetStrategy.
// mock: final ShareGroupAutoOffsetResetStrategy resetStrategy = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:PT1H");
final ShareGroupAutoOffsetResetStrategy resetStrategy = Mockito.mock(ShareGroupAutoOffsetResetStrategy.class);
final long expectedTimestamp = MOCK_TIME.milliseconds() - TimeUnit.HOURS.toMillis(1);
Mockito.when(resetStrategy.type()).thenReturn(ShareGroupAutoOffsetResetStrategy.StrategyType.BY_DURATION);
Mockito.when(resetStrategy.timestamp()).thenReturn(expectedTimestamp);
Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(resetStrategy);
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(
MOCK_TIME.milliseconds() - TimeUnit.HOURS.toMillis(1), 15L, Optional.empty());
Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).
when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean());
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withGroupConfigManager(groupConfigManager)
.withReplicaManager(replicaManager)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
// replicaManager.fetchOffsetForTimestamp should be called with the (current time - 1 hour)
Mockito.verify(replicaManager).fetchOffsetForTimestamp(
Mockito.any(TopicPartition.class),
Mockito.eq(expectedTimestamp),
Mockito.any(),
Mockito.any(),
Mockito.anyBoolean()
);
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(15, sharePartition.startOffset());
assertEquals(15, sharePartition.endOffset());
assertEquals(PartitionFactory.DEFAULT_STATE_EPOCH, sharePartition.stateEpoch());
assertEquals(0, sharePartition.deliveryCompleteCount());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 0,
"In-flight batch count should be 0.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 0,
"In-flight message count should be 0.");
}
@Test
public void testMaybeInitializeDefaultStartEpochGroupConfigNotPresent() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(
0, PartitionFactory.DEFAULT_STATE_EPOCH,
PartitionFactory.UNINITIALIZED_START_OFFSET,
PartitionFactory.DEFAULT_ERROR_CODE,
PartitionFactory.DEFAULT_ERR_MESSAGE,
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.empty());
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 15L, Optional.empty());
Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).
when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean());
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withGroupConfigManager(groupConfigManager)
.withReplicaManager(replicaManager)
.build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
// replicaManager.fetchOffsetForTimestamp should be called with "ListOffsetsRequest.LATEST_TIMESTAMP"
Mockito.verify(replicaManager).fetchOffsetForTimestamp(
Mockito.any(TopicPartition.class),
Mockito.eq(ListOffsetsRequest.LATEST_TIMESTAMP),
Mockito.any(),
Mockito.any(),
Mockito.anyBoolean()
);
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(15, sharePartition.startOffset());
assertEquals(15, sharePartition.endOffset());
assertEquals(PartitionFactory.DEFAULT_STATE_EPOCH, sharePartition.stateEpoch());
}
@Test
public void testMaybeInitializeFetchOffsetForLatestTimestampThrowsError() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(
0, PartitionFactory.DEFAULT_STATE_EPOCH,
PartitionFactory.UNINITIALIZED_START_OFFSET,
PartitionFactory.DEFAULT_ERROR_CODE,
PartitionFactory.DEFAULT_ERR_MESSAGE,
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.empty());
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
Mockito.when(replicaManager.fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()))
.thenThrow(new RuntimeException("fetch offsets exception"));
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withGroupConfigManager(groupConfigManager)
.withReplicaManager(replicaManager)
.build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
// replicaManager.fetchOffsetForTimestamp should be called with "ListOffsetsRequest.LATEST_TIMESTAMP"
Mockito.verify(replicaManager).fetchOffsetForTimestamp(
Mockito.any(TopicPartition.class),
Mockito.eq(ListOffsetsRequest.LATEST_TIMESTAMP),
Mockito.any(),
Mockito.any(),
Mockito.anyBoolean()
);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeFetchOffsetForEarliestTimestampThrowsError() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(
0, PartitionFactory.DEFAULT_STATE_EPOCH,
PartitionFactory.UNINITIALIZED_START_OFFSET,
PartitionFactory.DEFAULT_ERROR_CODE,
PartitionFactory.DEFAULT_ERR_MESSAGE,
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
GroupConfig groupConfig = Mockito.mock(GroupConfig.class);
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig));
Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(ShareGroupAutoOffsetResetStrategy.EARLIEST);
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
Mockito.when(replicaManager.fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()))
.thenThrow(new RuntimeException("fetch offsets exception"));
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withGroupConfigManager(groupConfigManager)
.withReplicaManager(replicaManager)
.build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
// replicaManager.fetchOffsetForTimestamp should be called with "ListOffsetsRequest.EARLIEST_TIMESTAMP"
Mockito.verify(replicaManager).fetchOffsetForTimestamp(
Mockito.any(TopicPartition.class),
Mockito.eq(ListOffsetsRequest.EARLIEST_TIMESTAMP),
Mockito.any(),
Mockito.any(),
Mockito.anyBoolean()
);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeFetchOffsetForByDurationThrowsError() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(
0, PartitionFactory.DEFAULT_STATE_EPOCH,
PartitionFactory.UNINITIALIZED_START_OFFSET,
PartitionFactory.DEFAULT_ERROR_CODE,
PartitionFactory.DEFAULT_ERR_MESSAGE,
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
GroupConfig groupConfig = Mockito.mock(GroupConfig.class);
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig));
// We need to mock the ShareGroupAutoOffsetResetStrategy as the timestamp() of duration based strategy is not deterministic.
// final ShareGroupAutoOffsetResetStrategy resetStrategy = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:PT1H");
final ShareGroupAutoOffsetResetStrategy resetStrategy = Mockito.mock(ShareGroupAutoOffsetResetStrategy.class);
final long expectedTimestamp = MOCK_TIME.milliseconds() - TimeUnit.HOURS.toMillis(1);
Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(resetStrategy);
Mockito.when(resetStrategy.type()).thenReturn(ShareGroupAutoOffsetResetStrategy.StrategyType.BY_DURATION);
Mockito.when(resetStrategy.timestamp()).thenReturn(expectedTimestamp);
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
Mockito.when(replicaManager.fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()))
.thenThrow(new RuntimeException("fetch offsets exception"));
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withGroupConfigManager(groupConfigManager)
.withReplicaManager(replicaManager)
.build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
Mockito.verify(replicaManager).fetchOffsetForTimestamp(
Mockito.any(TopicPartition.class),
Mockito.eq(expectedTimestamp),
Mockito.any(),
Mockito.any(),
Mockito.anyBoolean()
);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeSharePartitionAgain() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
// Initialize again, no need to send mock persister response again as the state is already initialized.
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
// Verify the persister read state is called only once.
Mockito.verify(persister, Mockito.times(1)).readState(Mockito.any());
}
@Test
public void testMaybeInitializeSharePartitionAgainConcurrentRequests() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
// No need to send mock persister response again as only 1 thread should read state from persister.
ExecutorService executorService = Executors.newFixedThreadPool(10);
List<CompletableFuture<Void>> results = new ArrayList<>(10);
try {
for (int i = 0; i < 10; i++) {
executorService.submit(() -> {
results.add(sharePartition.maybeInitialize());
});
}
} finally {
if (!executorService.awaitTermination(30, TimeUnit.MILLISECONDS))
executorService.shutdown();
}
assertTrue(results.stream().allMatch(CompletableFuture::isDone));
assertFalse(results.stream().allMatch(CompletableFuture::isCompletedExceptionally));
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
// Verify the persister read state is called only once.
Mockito.verify(persister, Mockito.times(1)).readState(Mockito.any());
}
@Test
public void testMaybeInitializeWithEmptyStateBatches() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.NONE.code(), Errors.NONE.message(), List.of()))))
);
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertTrue(sharePartition.cachedState().isEmpty());
assertEquals(10, sharePartition.startOffset());
assertEquals(10, sharePartition.endOffset());
assertEquals(5, sharePartition.stateEpoch());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeWithErrorPartitionResponse() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
// Mock NOT_COORDINATOR error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.NOT_COORDINATOR.code(), Errors.NOT_COORDINATOR.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(CoordinatorNotAvailableException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
// Mock COORDINATOR_NOT_AVAILABLE error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.COORDINATOR_NOT_AVAILABLE.code(), Errors.COORDINATOR_NOT_AVAILABLE.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(CoordinatorNotAvailableException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
// Mock COORDINATOR_LOAD_IN_PROGRESS error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.COORDINATOR_LOAD_IN_PROGRESS.code(), Errors.COORDINATOR_LOAD_IN_PROGRESS.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(CoordinatorNotAvailableException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
// Mock GROUP_ID_NOT_FOUND error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(GroupIdNotFoundException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
// Mock UNKNOWN_TOPIC_OR_PARTITION error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(UnknownTopicOrPartitionException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
// Mock FENCED_STATE_EPOCH error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.FENCED_STATE_EPOCH.code(), Errors.FENCED_STATE_EPOCH.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(NotLeaderOrFollowerException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
// Mock FENCED_LEADER_EPOCH error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.FENCED_LEADER_EPOCH.code(), Errors.FENCED_LEADER_EPOCH.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(NotLeaderOrFollowerException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
// Mock UNKNOWN_SERVER_ERROR error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.UNKNOWN_SERVER_ERROR.code(), Errors.UNKNOWN_SERVER_ERROR.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(UnknownServerException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
// Mock NETWORK_EXCEPTION error.
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.NETWORK_EXCEPTION.code(), Errors.NETWORK_EXCEPTION.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(UnknownServerException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeWithInvalidStartOffsetStateBatches() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 6L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeWithInvalidTopicIdResponse() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(Uuid.randomUuid(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeWithInvalidPartitionResponse() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(1, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeWithNoOpStatePersister() {
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty());
Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).
when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean());
SharePartition sharePartition = SharePartitionBuilder.builder().withReplicaManager(replicaManager).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertTrue(sharePartition.cachedState().isEmpty());
assertEquals(0, sharePartition.startOffset());
assertEquals(0, sharePartition.endOffset());
assertEquals(0, sharePartition.stateEpoch());
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeWithNullResponse() {
Persister persister = Mockito.mock(Persister.class);
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(null));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeWithNullTopicsData() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(null);
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeWithEmptyTopicsData() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of());
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeWithReadException() {
Persister persister = Mockito.mock(Persister.class);
// Complete the future exceptionally for read state.
Mockito.when(persister.readState(Mockito.any())).thenReturn(FutureUtils.failedFuture(new RuntimeException("Read exception")));
SharePartition sharePartition1 = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition1.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(RuntimeException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition1.partitionState());
persister = Mockito.mock(Persister.class);
// Throw exception for read state.
Mockito.when(persister.readState(Mockito.any())).thenThrow(new RuntimeException("Read exception"));
SharePartition sharePartition2 = SharePartitionBuilder.builder().withPersister(persister).build();
assertThrows(RuntimeException.class, sharePartition2::maybeInitialize);
}
@Test
public void testMaybeInitializeFencedSharePartition() {
SharePartition sharePartition = SharePartitionBuilder.builder().build();
// Mark the share partition as fenced.
sharePartition.markFenced();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(LeaderNotAvailableException.class, result);
assertEquals(SharePartitionState.FENCED, sharePartition.partitionState());
}
@Test
public void testMaybeInitializeStateBatchesWithGapAtBeginning() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 10 to 14
new PersisterStateBatch(21L, 30L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(10, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(15L));
assertNotNull(sharePartition.cachedState().get(21L));
assertEquals(20, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(15L).batchState());
assertEquals(2, sharePartition.cachedState().get(15L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(15L).offsetState());
assertEquals(30, sharePartition.cachedState().get(21L).lastOffset());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState());
assertEquals(3, sharePartition.cachedState().get(21L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(21L).offsetState());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(10, persisterReadResultGapWindow.gapStartOffset());
assertEquals(30, persisterReadResultGapWindow.endOffset());
// deliveryCompleteCount is incremented by the number of ACKNOWLEDGED and ARCHIVED records in readState result.
assertEquals(16, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeStateBatchesWithMultipleGaps() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 10 to 14
new PersisterStateBatch(30L, 40L, RecordState.ARCHIVED.id, (short) 3))))))); // There is a gap from 21 to 29
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(10, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(15L));
assertNotNull(sharePartition.cachedState().get(30L));
assertEquals(20, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(15L).batchState());
assertEquals(2, sharePartition.cachedState().get(15L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(15L).offsetState());
assertEquals(40, sharePartition.cachedState().get(30L).lastOffset());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(30L).batchState());
assertEquals(3, sharePartition.cachedState().get(30L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(30L).offsetState());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(10, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
// deliveryCompleteCount is incremented by the number of ACKNOWLEDGED and ARCHIVED records in readState result.
assertEquals(17, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeStateBatchesWithGapNotAtBeginning() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 15L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2),
new PersisterStateBatch(30L, 40L, RecordState.ARCHIVED.id, (short) 3))))))); // There is a gap from 21 to 29
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
// The start offset will be moved to 21, since the offsets 15 to 20 are acknowledged, and will be removed
// from cached state in the maybeUpdateCachedStateAndOffsets method
assertEquals(21, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(21, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(30L));
assertEquals(40, sharePartition.cachedState().get(30L).lastOffset());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(30L).batchState());
assertEquals(3, sharePartition.cachedState().get(30L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(30L).offsetState());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
assertEquals(11, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeStateBatchesWithoutGaps() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 15L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2),
new PersisterStateBatch(21L, 30L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertTrue(sharePartition.cachedState().isEmpty());
assertEquals(31, sharePartition.startOffset());
assertEquals(31, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(31, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
// Since there are no gaps present in the readState response, persisterReadResultGapWindow should be null
assertNull(persisterReadResultGapWindow);
}
@Test
public void testMaybeInitializeAndAcquire() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(10, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Create a single batch record that covers the entire range from 10 to 30 of initial read gap.
// The records in the batch are from 10 to 49.
MemoryRecords records = memoryRecords(10, 40);
// Set max fetch records to 1, records will be acquired till the first gap is encountered.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
1,
10,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5);
assertArrayEquals(expectedAcquiredRecord(10, 14, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.cachedState().size());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(10L).offsetState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(15L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Send the same batch again to acquire the next set of records.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
10,
15,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
13);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(15, 18, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(26, 30, 2));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(31, sharePartition.nextFetchOffset());
assertEquals(6, sharePartition.cachedState().size());
assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
assertEquals(1, sharePartition.cachedState().get(19L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(19L).offsetState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
assertEquals(1, sharePartition.cachedState().get(23L).batchDeliveryCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState());
assertEquals(30L, sharePartition.endOffset());
// As all the gaps are now filled, the persisterReadResultGapWindow should be null.
assertNull(sharePartition.persisterReadResultGapWindow());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Now initial read gap is filled, so the complete batch can be acquired despite max fetch records being 1.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
1,
31,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
19);
assertArrayEquals(expectedAcquiredRecord(31, 49, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(50, sharePartition.nextFetchOffset());
assertEquals(7, sharePartition.cachedState().size());
assertEquals(31, sharePartition.cachedState().get(31L).firstOffset());
assertEquals(49, sharePartition.cachedState().get(31L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState());
assertEquals(1, sharePartition.cachedState().get(31L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(31L).offsetState());
assertEquals(49L, sharePartition.endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeAndAcquireWithHigherMaxFetchRecords() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(10, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Create a single batch record that covers the entire range from 10 to 30 of initial read gap.
// The records in the batch are from 10 to 49.
MemoryRecords records = memoryRecords(10, 40);
// Set max fetch records to 500, all records should be acquired.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
10,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
37);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(26, 30, 2));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(31, 49, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(50, sharePartition.nextFetchOffset());
assertEquals(7, sharePartition.cachedState().size());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
assertEquals(31, sharePartition.cachedState().get(31L).firstOffset());
assertEquals(49, sharePartition.cachedState().get(31L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState());
assertEquals(49L, sharePartition.endOffset());
// As all the gaps are now filled, the persisterReadResultGapWindow should be null.
assertNull(sharePartition.persisterReadResultGapWindow());
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeAndAcquireWithFetchBatchLastOffsetWithinCachedBatch() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(10, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Create a single batch record that ends in between the cached batch and the fetch offset is
// post startOffset.
MemoryRecords records = memoryRecords(12, 16);
// Set max fetch records to 500, records should be acquired till the last offset of the fetched batch.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
10,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
13);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(12, 14, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecords(26, 27, 2));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(28, sharePartition.nextFetchOffset());
assertEquals(6, sharePartition.cachedState().size());
assertEquals(12, sharePartition.cachedState().get(12L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(12L).lastOffset());
assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(12L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(26L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(26L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(27L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(28L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(29L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(30L).state());
assertEquals(30L, sharePartition.endOffset());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(28L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeAndAcquireWithFetchBatchPriorStartOffset() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(10, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Create a single batch record where first offset is prior startOffset.
MemoryRecords records = memoryRecords(6, 16);
// Set max fetch records to 500, records should be acquired till the last offset of the fetched batch.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
10,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(23, sharePartition.nextFetchOffset());
assertEquals(5, sharePartition.cachedState().size());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertEquals(30L, sharePartition.endOffset());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(20L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeAndAcquireWithMultipleBatches() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(5, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(5L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Create multiple batch records that covers the entire range from 5 to 30 of initial read gap.
// The records in the batch are from 5 to 49.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 5, 2).close();
memoryRecordsBuilder(buffer, 8, 1).close();
memoryRecordsBuilder(buffer, 10, 2).close();
memoryRecordsBuilder(buffer, 13, 6).close();
memoryRecordsBuilder(buffer, 19, 3).close();
memoryRecordsBuilder(buffer, 22, 9).close();
memoryRecordsBuilder(buffer, 31, 19).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Set max fetch records to 1, records will be acquired till the first gap is encountered.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
1,
5L,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
2);
assertArrayEquals(expectedAcquiredRecord(5, 6, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(7, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.cachedState().size());
assertEquals(5, sharePartition.cachedState().get(5L).firstOffset());
assertEquals(6, sharePartition.cachedState().get(5L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(7L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Remove first batch from the records as the fetch offset has moved forward to 7 offset.
List<RecordBatch> batch = TestUtils.toList(records.batches());
records = records.slice(batch.get(0).sizeInBytes(), records.sizeInBytes() - batch.get(0).sizeInBytes());
// Send the batch again to acquire the next set of records.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
3,
7L,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
4);
assertArrayEquals(expectedAcquiredRecord(8, 11, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(5, sharePartition.cachedState().size());
assertEquals(8, sharePartition.cachedState().get(8L).firstOffset());
assertEquals(11, sharePartition.cachedState().get(8L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(8L).batchState());
assertEquals(1, sharePartition.cachedState().get(8L).batchDeliveryCount());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertEquals(30L, sharePartition.endOffset());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(12L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Remove the next 2 batches from the records as the fetch offset has moved forward to 12 offset.
int size = batch.get(1).sizeInBytes() + batch.get(2).sizeInBytes();
records = records.slice(size, records.sizeInBytes() - size);
// Send the records with 8 as max fetch records to acquire new and existing cached batches.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
8,
12,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(13, 14, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(26, sharePartition.nextFetchOffset());
assertEquals(8, sharePartition.cachedState().size());
assertEquals(13, sharePartition.cachedState().get(13L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(13L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(13L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertEquals(30L, sharePartition.endOffset());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(26L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Remove the next 2 batches from the records as the fetch offset has moved forward to 26 offset.
// Do not remove the 5th batch as it's only partially acquired.
size = batch.get(3).sizeInBytes() + batch.get(4).sizeInBytes();
records = records.slice(size, records.sizeInBytes() - size);
// Send the records with 10 as max fetch records to acquire the existing and till end of the
// fetched data.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
10,
26,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
24);
expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(26, 30, 2));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(31, 49, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(50, sharePartition.nextFetchOffset());
assertEquals(9, sharePartition.cachedState().size());
assertEquals(31, sharePartition.cachedState().get(31L).firstOffset());
assertEquals(49, sharePartition.cachedState().get(31L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState());
assertEquals(49L, sharePartition.endOffset());
// As all the gaps are now filled, the persisterReadResultGapWindow should be null.
assertNull(sharePartition.persisterReadResultGapWindow());
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeAndAcquireWithMultipleBatchesAndLastOffsetWithinCachedBatch() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(5, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(5L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Create multiple batch records that ends in between the cached batch and the fetch offset is
// post startOffset.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 7, 2).close();
memoryRecordsBuilder(buffer, 10, 2).close();
memoryRecordsBuilder(buffer, 13, 6).close();
// Though 19 offset is a gap but still be acquired.
memoryRecordsBuilder(buffer, 20, 8).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Set max fetch records to 500, records should be acquired till the last offset of the fetched batch.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
18);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(7, 14, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecords(26, 27, 2));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(28, sharePartition.nextFetchOffset());
assertEquals(6, sharePartition.cachedState().size());
assertEquals(7, sharePartition.cachedState().get(7L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(7L).lastOffset());
assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(26L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(26L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(27L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(28L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(29L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(30L).state());
assertEquals(30L, sharePartition.endOffset());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(28L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeAndAcquireWithMultipleBatchesPriorStartOffset() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(10, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Create multiple batch records where multiple batches base offsets are prior startOffset.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 3, 2).close();
memoryRecordsBuilder(buffer, 6, 1).close();
memoryRecordsBuilder(buffer, 8, 4).close();
memoryRecordsBuilder(buffer, 13, 10).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Set max fetch records to 500, records should be acquired till the last offset of the fetched batch.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
10,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(23, sharePartition.nextFetchOffset());
assertEquals(5, sharePartition.cachedState().size());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertEquals(30L, sharePartition.endOffset());
assertNotNull(sharePartition.persisterReadResultGapWindow());
assertEquals(20L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeInitializeWithInvalidOffsetInBatch() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 10L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
assertEquals(SharePartitionState.FAILED, sharePartition.partitionState());
}
@Test
public void testAcquireSingleRecord() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
MemoryRecords records = memoryRecords(1);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 1);
assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(1, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(0, sharePartition.cachedState().get(0L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(0L).offsetState());
// deliveryCompleteCount will not be changed because no record went to a Terminal state.
assertEquals(0, sharePartition.deliveryCompleteCount());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 1,
"In-flight message count should be 1.");
assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().sum());
}
@Test
public void testAcquireMultipleRecords() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
MemoryRecords records = memoryRecords(10, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3L, 5);
assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(10L).offsetState());
// deliveryCompleteCount will not be changed because no record went to a Terminal state.
assertEquals(0, sharePartition.deliveryCompleteCount());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 5,
"In-flight message count should be 5.");
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().sum());
}
@Test
public void testAcquireWithMaxFetchRecords() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// Less-number of records than max fetch records.
MemoryRecords records = memoryRecords(5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5);
assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(4, sharePartition.cachedState().get(0L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(0L).offsetState());
// More-number of records than max fetch records, but from 0 offset hence previous 10 records
// should be ignored and new full batch till end should be acquired.
records = memoryRecords(25);
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
20);
assertArrayEquals(expectedAcquiredRecord(5, 24, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(25, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(5, sharePartition.cachedState().get(5L).firstOffset());
assertEquals(24, sharePartition.cachedState().get(5L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(5L).offsetState());
}
@Test
public void testAcquireWithMultipleBatchesAndMaxFetchRecords() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
// Create 3 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 10, 5).close();
memoryRecordsBuilder(buffer, 15, 15).close();
memoryRecordsBuilder(buffer, 30, 15).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire 10 records.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 10),
FETCH_ISOLATION_HWM),
20);
// Validate 2 batches are fetched one with 5 records and other till end of batch, third batch
// should be skipped.
assertArrayEquals(expectedAcquiredRecord(10, 29, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(30, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(29, sharePartition.cachedState().get(10L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(10L).offsetState());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 20,
"In-flight message count should be 20.");
assertEquals(20, sharePartitionMetrics.inFlightBatchMessageCount().sum());
}
@Test
public void testAcquireMultipleRecordsWithOverlapAndNewBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records = memoryRecords(5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5);
assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(5, sharePartition.nextFetchOffset());
// Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored.
records = memoryRecords(10);
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5);
assertArrayEquals(expectedAcquiredRecords(memoryRecords(5, 5), 1).toArray(), acquiredRecordsList.toArray());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
}
@Test
public void testAcquireSameBatchAgain() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records = memoryRecords(10, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5);
assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 0);
// No records should be returned as the batch is already acquired.
assertEquals(0, acquiredRecordsList.size());
assertEquals(15, sharePartition.nextFetchOffset());
// Send subset of the same batch again, no records should be returned.
MemoryRecords subsetRecords = memoryRecords(10, 2);
acquiredRecordsList = fetchAcquiredRecords(sharePartition, subsetRecords, 3, 0);
// No records should be returned as the batch is already acquired.
assertEquals(0, acquiredRecordsList.size());
assertEquals(15, sharePartition.nextFetchOffset());
// Cache shouldn't be tracking per offset records
assertNull(sharePartition.cachedState().get(10L).offsetState());
}
@Test
public void testAcquireWithEmptyFetchRecords() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(
sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
MAX_FETCH_RECORDS,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(MemoryRecords.EMPTY),
FETCH_ISOLATION_HWM),
0
);
assertEquals(0, acquiredRecordsList.size());
assertEquals(0, sharePartition.nextFetchOffset());
}
@Test
public void testAcquireWithBatchSizeAndSingleBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// Single batch has more records than batch size. Hence, only a single batch exceeding the batch size
// should be acquired.
MemoryRecords records = memoryRecords(5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
2 /* Batch size */,
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5);
assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(4, sharePartition.cachedState().get(0L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(0L).offsetState());
}
@Test
public void testAcquireWithBatchSizeAndMultipleBatches() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// Create 4 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 2, 5).close();
memoryRecordsBuilder(buffer, 10, 5).close();
memoryRecordsBuilder(buffer, 15, 7).close();
memoryRecordsBuilder(buffer, 22, 6).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
5 /* Batch size */,
100,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
26 /* Gap of 3 records will also be added to first batch */);
// Fetch expected records from 4 batches, but change the first expected record to include gap offsets.
List<AcquiredRecords> expectedAcquiredRecords = expectedAcquiredRecords(records, 1);
expectedAcquiredRecords.remove(0);
expectedAcquiredRecords.addAll(0, expectedAcquiredRecord(2, 9, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(28, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().containsKey(2L));
assertTrue(sharePartition.cachedState().containsKey(10L));
assertTrue(sharePartition.cachedState().containsKey(15L));
assertTrue(sharePartition.cachedState().containsKey(22L));
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(22L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(22L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(2L).batchDeliveryCount());
assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount());
assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount());
assertEquals(1, sharePartition.cachedState().get(22L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(2L).offsetState());
assertNull(sharePartition.cachedState().get(10L).offsetState());
assertNull(sharePartition.cachedState().get(15L).offsetState());
assertNull(sharePartition.cachedState().get(22L).offsetState());
}
@Test
public void testAcquireWithBatchSizeAndMaxFetchRecords() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// Create 3 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 0, 5).close();
memoryRecordsBuilder(buffer, 5, 15).close();
memoryRecordsBuilder(buffer, 20, 15).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
2 /* Batch size */,
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
20);
List<AcquiredRecords> expectedAcquiredRecords = expectedAcquiredRecords(records, 1);
// The last batch should be ignored as it exceeds the max fetch records.
expectedAcquiredRecords.remove(2);
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(20, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(4, sharePartition.cachedState().get(0L).lastOffset());
assertEquals(5, sharePartition.cachedState().get(5L).firstOffset());
assertEquals(19, sharePartition.cachedState().get(5L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount());
assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(0L).offsetState());
assertNull(sharePartition.cachedState().get(5L).offsetState());
}
@Test
public void testAcquireSingleBatchWithBatchSizeAndEndOffsetLargerThanBatchFirstOffset() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
sharePartition.updateCacheAndOffsets(8L);
MemoryRecords records = memoryRecords(5, 10);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
5 /* Batch size */,
100,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
7 /* Acquisition of records starts post endOffset */);
// Fetch expected single batch, but change the first offset as per endOffset.
assertArrayEquals(expectedAcquiredRecord(8, 14, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().containsKey(8L));
}
@Test
public void testAcquireWithBatchSizeAndEndOffsetLargerThanBatchFirstOffset()
throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
sharePartition.updateCacheAndOffsets(4L);
// Create 2 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 2, 8).close();
memoryRecordsBuilder(buffer, 10, 7).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
5 /* Batch size */,
100,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
13 /* Acquisition of records starts post endOffset */);
// Fetch expected records from 2 batches, but change the first batch's first offset as per endOffset.
List<AcquiredRecords> expectedAcquiredRecords = expectedAcquiredRecords(records, 1);
expectedAcquiredRecords.remove(0);
expectedAcquiredRecords.addAll(0, expectedAcquiredRecord(4, 9, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(17, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().containsKey(4L));
assertTrue(sharePartition.cachedState().containsKey(10L));
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 2,
"In-flight batch count should be 2.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 13,
"In-flight message count should be 13.");
assertEquals(13, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(2, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(6, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(7, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcquireBatchSkipWithBatchSizeAndEndOffsetLargerThanFirstBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
sharePartition.updateCacheAndOffsets(12L);
// Create 2 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 2, 8).close();
memoryRecordsBuilder(buffer, 10, 7).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
5 /* Batch size */,
100,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5 /* Acquisition of records starts post endOffset */);
// First batch should be skipped and fetch should result a single batch (second batch), but
// change the first offset of acquired batch as per endOffset.
assertArrayEquals(expectedAcquiredRecord(12, 16, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(17, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().containsKey(12L));
}
@Test
public void testAcquireWithMaxInFlightRecordsAndTryAcquireNewBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.withMaxInflightRecords(20)
.build();
// Acquire records, all 10 records should be acquired as within maxInflightRecords limit.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500 /* Max fetch records */,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(memoryRecords(10), 0),
FETCH_ISOLATION_HWM),
10);
// Validate all 10 records will be acquired as the maxInFlightRecords is 20.
assertArrayEquals(expectedAcquiredRecord(0, 9, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(10, sharePartition.nextFetchOffset());
// Create 4 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 10, 5).close();
memoryRecordsBuilder(buffer, 15, 10).close();
memoryRecordsBuilder(buffer, 25, 5).close();
memoryRecordsBuilder(buffer, 30, 2).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire records, should be acquired till maxInFlightRecords i.e. 20 records. As second batch
// is ending at 24 offset, hence additional 15 records will be acquired.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500 /* Max fetch records */,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 0),
FETCH_ISOLATION_HWM),
15);
// Validate 2 batches are fetched one with 5 records and other till end of batch, third batch
// should be skipped.
assertArrayEquals(expectedAcquiredRecord(10, 24, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(25, sharePartition.nextFetchOffset());
// Should not acquire any records as the share partition is at capacity and fetch offset is beyond
// the end offset.
fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500 /* Max fetch records */,
25 /* Fetch Offset */,
fetchPartitionData(memoryRecords(25, 10), 10),
FETCH_ISOLATION_HWM),
0);
assertEquals(25, sharePartition.nextFetchOffset());
}
@Test
public void testAcquireWithMaxInFlightRecordsAndReleaseLastOffset() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.withMaxInflightRecords(20)
.build();
// Create 4 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 10, 5).close();
memoryRecordsBuilder(buffer, 15, 10).close();
memoryRecordsBuilder(buffer, 25, 5).close();
memoryRecordsBuilder(buffer, 30, 3).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire records, should be acquired till maxInFlightRecords i.e. 20 records till 29 offset.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500 /* Max fetch records */,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 10),
FETCH_ISOLATION_HWM),
20);
// Validate 3 batches are fetched and fourth batch should be skipped. Max in-flight records
// limit is reached.
assertArrayEquals(expectedAcquiredRecord(10, 29, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(30, sharePartition.nextFetchOffset());
// Release middle batch.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(15, 19, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
// Validate the nextFetchOffset is updated to 15.
assertEquals(15, sharePartition.nextFetchOffset());
// The complete released batch should be acquired but not the last batch, starting at offset 30,
// as the lastOffset is adjusted according to the endOffset.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500 /* Max fetch records */,
15 /* Fetch Offset */,
fetchPartitionData(records, 10),
FETCH_ISOLATION_HWM),
5);
// Validate 1 batch is fetched, with 5 records till end of batch, last available batch should
// not be acquired
assertArrayEquals(expectedAcquiredRecords(15, 19, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(30, sharePartition.nextFetchOffset());
// Release last offset of the acquired batch. Only 1 record should be released and later acquired.
ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(29, 29, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
// Validate the nextFetchOffset is updated to 29.
assertEquals(29, sharePartition.nextFetchOffset());
// Only the last record of the acquired batch should be acquired again.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500 /* Max fetch records */,
29 /* Fetch Offset */,
fetchPartitionData(records, 10),
FETCH_ISOLATION_HWM),
1);
// Validate 1 record is acquired.
assertArrayEquals(expectedAcquiredRecord(29, 29, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(30, sharePartition.nextFetchOffset());
}
@Test
public void testAcquireWithMaxInFlightRecordsReleaseBatchAndAcquireSubsetRecords() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.withMaxInflightRecords(20)
.build();
// Acquire records, should be acquired till maxInFlightRecords i.e. 25 records till 24 offset.
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(15, 10), 10);
// Validate 3 batches are fetched and fourth batch should be skipped. Max in-flight records
// limit is reached.
assertEquals(3, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(4, sharePartition.cachedState().get(0L).lastOffset());
assertEquals(5, sharePartition.cachedState().get(5L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(5L).lastOffset());
assertEquals(15, sharePartition.cachedState().get(15L).firstOffset());
assertEquals(24, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(25, sharePartition.nextFetchOffset());
// Release middle batch.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 14, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
// Validate the nextFetchOffset is updated to 5.
assertEquals(5, sharePartition.nextFetchOffset());
// The complete released batch should be acquired but not any other batch as the lastOffset
// is adjusted according to the minimum of fetched batch and endOffset.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500 /* Max fetch records */,
5 /* Fetch Offset */,
fetchPartitionData(memoryRecords(5, 10), 0),
FETCH_ISOLATION_HWM),
10);
// Validate 1 batch is fetched, with 10 records till end of batch.
assertArrayEquals(expectedAcquiredRecord(5, 14, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(25, sharePartition.nextFetchOffset());
}
@Test
public void testAcquireWithMaxInFlightRecordsReleaseBatchAndAcquireSubsetRecordsOverlap() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.withMaxInflightRecords(20)
.build();
// Acquire records, should be acquired till maxInFlightRecords i.e. 25 records till 24 offset.
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(15, 10), 10);
// Validate 4 batches are fetched and fourth batch should be skipped. Max in-flight records
// limit is reached.
assertEquals(4, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(4, sharePartition.cachedState().get(0L).lastOffset());
assertEquals(5, sharePartition.cachedState().get(5L).firstOffset());
assertEquals(9, sharePartition.cachedState().get(5L).lastOffset());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
assertEquals(15, sharePartition.cachedState().get(15L).firstOffset());
assertEquals(24, sharePartition.cachedState().get(15L).lastOffset());
assertEquals(25, sharePartition.nextFetchOffset());
// Release only 1 middle batch.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
// Validate the nextFetchOffset is updated to 5.
assertEquals(5, sharePartition.nextFetchOffset());
// Adjust the max fetch records to 6 so it's just 1 record more than the released batch size.
// This shall not impact the acquired records as only the released batch should be acquired.
// However, this previously caused an issue where the subset of records were acquired from the
// next batch due to incorrect calculation.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
6 /* Max fetch records */,
5 /* Fetch Offset */,
fetchPartitionData(memoryRecords(5, 5), 0),
FETCH_ISOLATION_HWM),
5);
// Validate 1 batch is fetched, with 5 records till end of batch.
assertArrayEquals(expectedAcquiredRecord(5, 9, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(25, sharePartition.nextFetchOffset());
}
@Test
public void testNextFetchOffsetInitialState() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
assertEquals(0, sharePartition.nextFetchOffset());
}
@Test
public void testNextFetchOffsetWithCachedStateAcquired() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5), 2, 5);
assertEquals(5, sharePartition.nextFetchOffset());
}
@Test
public void testNextFetchOffsetWithFindAndCachedStateEmpty() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
sharePartition.updateFindNextFetchOffset(true);
assertTrue(sharePartition.findNextFetchOffset());
assertEquals(0, sharePartition.nextFetchOffset());
assertFalse(sharePartition.findNextFetchOffset());
}
@Test
public void testNextFetchOffsetWithFindAndCachedState() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
sharePartition.updateFindNextFetchOffset(true);
assertTrue(sharePartition.findNextFetchOffset());
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
assertEquals(5, sharePartition.nextFetchOffset());
assertFalse(sharePartition.findNextFetchOffset());
}
@Test
public void testCanAcquireRecordsWithEmptyCache() {
SharePartition sharePartition = SharePartitionBuilder.builder().withMaxInflightRecords(1).build();
assertTrue(sharePartition.canAcquireRecords());
}
@Test
public void testCanAcquireRecordsWithCachedDataAndLimitNotReached() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxInflightRecords(6)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
// Limit not reached as only 6 in-flight records is the limit.
assertTrue(sharePartition.canAcquireRecords());
}
@Test
public void testCanAcquireRecordsWithCachedDataAndLimitReached() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxInflightRecords(1)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
// Limit reached as only one in-flight record is the limit.
assertFalse(sharePartition.canAcquireRecords());
}
@Test
public void testMaybeAcquireAndReleaseFetchLock() {
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty());
Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).
when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean());
Time time = mock(Time.class);
when(time.hiResClockMs())
.thenReturn(100L) // for tracking loadTimeMs
.thenReturn(110L) // for time when lock is acquired
.thenReturn(120L) // for time when lock is released
.thenReturn(140L) // for subsequent lock acquire
.thenReturn(170L); // for subsequent lock release
SharePartition sharePartition = SharePartitionBuilder.builder()
.withReplicaManager(replicaManager)
.withTime(time)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
Uuid fetchId = Uuid.randomUuid();
sharePartition.maybeInitialize();
assertTrue(sharePartition.maybeAcquireFetchLock(fetchId));
// Lock cannot be acquired again, as already acquired.
assertFalse(sharePartition.maybeAcquireFetchLock(fetchId));
// Release the lock.
sharePartition.releaseFetchLock(fetchId);
assertEquals(1, sharePartitionMetrics.fetchLockTimeMs().count());
assertEquals(10, sharePartitionMetrics.fetchLockTimeMs().sum());
assertEquals(1, sharePartitionMetrics.fetchLockRatio().count());
// Since first request didn't have any lock idle wait time, the ratio should be 1.
assertEquals(100, sharePartitionMetrics.fetchLockRatio().mean());
// Lock can be acquired again.
assertTrue(sharePartition.maybeAcquireFetchLock(fetchId));
// Release lock to update metrics and verify.
sharePartition.releaseFetchLock(fetchId);
assertEquals(2, sharePartitionMetrics.fetchLockTimeMs().count());
assertEquals(40, sharePartitionMetrics.fetchLockTimeMs().sum());
assertEquals(2, sharePartitionMetrics.fetchLockRatio().count());
// Since the second request had 20ms of idle wait time, the ratio should be 0.6 and mean as 0.8.
assertEquals(80, sharePartitionMetrics.fetchLockRatio().mean());
}
@Test
public void testRecordFetchLockRatioMetric() {
Time time = mock(Time.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withTime(time)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
// Acquired time and last lock acquisition time is 0;
sharePartition.recordFetchLockRatioMetric(0);
assertEquals(1, sharePartitionMetrics.fetchLockRatio().count());
assertEquals(100, sharePartitionMetrics.fetchLockRatio().mean());
when(time.hiResClockMs())
.thenReturn(10L) // for time when lock is acquired
.thenReturn(80L) // for time when lock is released
.thenReturn(160L); // to update lock idle duration while acquiring lock again.
Uuid fetchId = Uuid.randomUuid();
assertTrue(sharePartition.maybeAcquireFetchLock(fetchId));
sharePartition.releaseFetchLock(fetchId);
// Acquired time is 70 but last lock acquisition time was still 0, as it's the first request
// when last acquisition time was recorded. The last acquisition time should be updated to 80.
assertEquals(2, sharePartitionMetrics.fetchLockRatio().count());
assertEquals(100, sharePartitionMetrics.fetchLockRatio().mean());
assertTrue(sharePartition.maybeAcquireFetchLock(fetchId));
// Update metric again with 0 as acquire time and 80 as idle duration ms.
sharePartition.recordFetchLockRatioMetric(0);
assertEquals(3, sharePartitionMetrics.fetchLockRatio().count());
// Mean should be (100+100+1)/3 = 67, as when idle duration is 80, the ratio should be 1.
assertEquals(67, sharePartitionMetrics.fetchLockRatio().mean());
// Update metric again with 10 as acquire time and 80 as idle duration ms.
sharePartition.recordFetchLockRatioMetric(10);
assertEquals(4, sharePartitionMetrics.fetchLockRatio().count());
// Mean should be (100+100+1+11)/4 = 53, as when idle time is 80 and acquire time 10, the ratio should be 11.
assertEquals(53, sharePartitionMetrics.fetchLockRatio().mean());
}
@Test
public void testAcknowledgeSingleRecordBatch() {
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withReplicaManager(replicaManager)
.withState(SharePartitionState.ACTIVE)
.build();
MemoryRecords records1 = memoryRecords(1);
MemoryRecords records2 = memoryRecords(1, 1);
// Another batch is acquired because if there is only 1 batch, and it is acknowledged, the batch will be removed from cachedState
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 1);
assertEquals(1, acquiredRecordsList.size());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 1);
assertEquals(1, acquiredRecordsList.size());
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(1, 1, List.of(AcknowledgeType.ACCEPT.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(2, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(1L).batchState());
assertEquals(1, sharePartition.cachedState().get(1L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(1L).offsetState());
// Should not invoke completeDelayedShareFetchRequest as the first offset is not acknowledged yet.
Mockito.verify(replicaManager, Mockito.times(0))
.completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(GROUP_ID, TOPIC_ID_PARTITION));
assertEquals(1, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeMultipleRecordBatch() {
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withReplicaManager(replicaManager)
.withState(SharePartitionState.ACTIVE)
.build();
MemoryRecords records = memoryRecords(5, 10);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 10);
assertEquals(1, acquiredRecordsList.size());
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 14, List.of(AcknowledgeType.ACCEPT.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.cachedState().size());
// Should invoke completeDelayedShareFetchRequest as the start offset is moved.
Mockito.verify(replicaManager, Mockito.times(1))
.completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(GROUP_ID, TOPIC_ID_PARTITION));
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeMultipleRecordBatchWithGapOffsets() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 5);
// Gap from 15-17 offsets.
recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 2);
assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(7, sharePartition.nextFetchOffset());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 9);
assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(19, sharePartition.nextFetchOffset());
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(
new ShareAcknowledgementBatch(5, 6, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(10, 18, List.of(
AcknowledgeType.RELEASE.id, AcknowledgeType.RELEASE.id, AcknowledgeType.RELEASE.id,
AcknowledgeType.RELEASE.id, AcknowledgeType.RELEASE.id, ACKNOWLEDGE_TYPE_GAP_ID,
ACKNOWLEDGE_TYPE_GAP_ID, ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id
))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState());
assertNull(sharePartition.cachedState().get(5L).offsetState());
assertNotNull(sharePartition.cachedState().get(10L).offsetState());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
// Out of the 11 records acquired, 3 are GAP and 1 is ACCEPTED, which are Terminal. Thus deliveryCompleteCount
// will be 4
assertEquals(4, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeMultipleSubsetRecordBatchWithGapOffsets() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 2);
// Gap from 12-13 offsets.
recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap for 15 offset.
recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap from 17-19 offsets.
recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 2);
assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(7, sharePartition.nextFetchOffset());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 11);
assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(21, sharePartition.nextFetchOffset());
// Acknowledging over subset of both batch with subset of gap offsets.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(6, 18, List.of(
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id,
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id,
ACKNOWLEDGE_TYPE_GAP_ID, ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id,
ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID,
AcknowledgeType.ACCEPT.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(21, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(5L).batchState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
expectedOffsetStateMap.clear();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
// After acknowledgements, records at offsets 6, and 10 -> 18 are in Terminal state.
assertEquals(10, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeOutOfRangeCachedData() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// Acknowledge a batch when cache is empty.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(0, 15, List.of(AcknowledgeType.REJECT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
MemoryRecords records = memoryRecords(5, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5);
assertEquals(1, acquiredRecordsList.size());
// Cached data with offset 5-9 should exist.
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(5L));
ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(20, 25, List.of(AcknowledgeType.REJECT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRequestException.class, ackResult);
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeOutOfRangeCachedDataFirstBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// Create data for the batch with offsets 0-4.
MemoryRecords records = memoryRecords(5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5);
assertEquals(1, acquiredRecordsList.size());
// Create data for the batch with offsets 20-24.
records = memoryRecords(20, 5);
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5);
assertEquals(1, acquiredRecordsList.size());
// Acknowledge a batch when first batch violates the range.
List<ShareAcknowledgementBatch> acknowledgeBatches = List.of(
new ShareAcknowledgementBatch(0, 10, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(20, 24, List.of(AcknowledgeType.ACCEPT.id)));
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID, acknowledgeBatches);
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRequestException.class, ackResult);
// Create data for the batch with offsets 5-10.
records = memoryRecords(5, 6);
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 6);
assertEquals(1, acquiredRecordsList.size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Previous failed acknowledge request should succeed now.
ackResult = sharePartition.acknowledge(
MEMBER_ID, acknowledgeBatches);
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
// After the acknowledgments are successful, the cache is updated. Since all record batches are in Terminal state,
// the cache is cleared and thus deliveryCompleteCount is set as 0.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeWithAnotherMember() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records = memoryRecords(5, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5);
assertEquals(1, acquiredRecordsList.size());
// Cached data with offset 5-9 should exist.
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(5L));
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
"member-2",
List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.REJECT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
}
@Test
public void testAcknowledgeWhenOffsetNotAcquired() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records = memoryRecords(5, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5);
assertEquals(1, acquiredRecordsList.size());
// Cached data with offset 5-9 should exist.
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(5L));
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
// All records are RELEASED, so none of them moved to a Terminal state.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acknowledge the same batch again but with ACCEPT type.
ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
// Re-acquire the same batch and then acknowledge subset with ACCEPT type.
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5);
assertEquals(1, acquiredRecordsList.size());
assertEquals(0, sharePartition.deliveryCompleteCount());
ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(6, 8, List.of(AcknowledgeType.REJECT.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(3, sharePartition.deliveryCompleteCount());
// Re-acknowledge the subset batch with REJECT type.
ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(6, 8, List.of(AcknowledgeType.REJECT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeRollbackWithFullBatchError() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 5);
MemoryRecords records2 = memoryRecords(10, 5);
MemoryRecords records3 = memoryRecords(15, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 5);
assertEquals(1, acquiredRecordsList.size());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 5);
assertEquals(1, acquiredRecordsList.size());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records3, 5);
assertEquals(1, acquiredRecordsList.size());
// Cached data with offset 5-19 should exist.
assertEquals(3, sharePartition.cachedState().size());
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(
new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(15, 19, List.of(AcknowledgeType.ACCEPT.id)),
// Add another batch which should fail the request.
new ShareAcknowledgementBatch(15, 19, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
// Check the state of the cache. The state should be acquired itself.
assertEquals(3, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeRollbackWithSubsetError() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 5);
MemoryRecords records2 = memoryRecords(10, 5);
MemoryRecords records3 = memoryRecords(15, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 5);
assertEquals(1, acquiredRecordsList.size());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 5);
assertEquals(1, acquiredRecordsList.size());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records3, 5);
assertEquals(1, acquiredRecordsList.size());
// Cached data with offset 5-19 should exist.
assertEquals(3, sharePartition.cachedState().size());
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(
new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(15, 19, List.of(AcknowledgeType.ACCEPT.id)),
// Add another batch which should fail the request.
new ShareAcknowledgementBatch(16, 19, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
// Check the state of the cache. The state should be acquired itself.
assertEquals(3, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
// Though the last batch is subset but the offset state map will not be exploded as the batch is
// not in acquired state due to previous batch acknowledgement.
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquireReleasedRecord() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records = memoryRecords(10, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5);
assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(12, 13, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState());
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Send the same fetch request batch again but only 2 offsets should come as acquired.
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 2);
assertArrayEquals(expectedAcquiredRecords(12, 13, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquireReleasedRecordMultipleBatches() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// First fetch request with 5 records starting from offset 10.
MemoryRecords records1 = memoryRecords(10, 5);
// Second fetch request with 5 records starting from offset 15.
MemoryRecords records2 = memoryRecords(15, 5);
// Third fetch request with 5 records starting from offset 23, gap of 3 offsets.
MemoryRecords records3 = memoryRecords(23, 5);
// Fourth fetch request with 5 records starting from offset 28.
MemoryRecords records4 = memoryRecords(28, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 5);
assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 5);
assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(20, sharePartition.nextFetchOffset());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records3, 5);
assertArrayEquals(expectedAcquiredRecords(records3, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(28, sharePartition.nextFetchOffset());
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records4, 5);
assertArrayEquals(expectedAcquiredRecords(records4, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(33, sharePartition.nextFetchOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(28L).batchState());
assertNull(sharePartition.cachedState().get(10L).offsetState());
assertNull(sharePartition.cachedState().get(15L).offsetState());
assertNull(sharePartition.cachedState().get(23L).offsetState());
assertNull(sharePartition.cachedState().get(28L).offsetState());
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(12, 30, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.cachedState().size());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState());
assertNotNull(sharePartition.cachedState().get(10L).offsetState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
assertNull(sharePartition.cachedState().get(15L).offsetState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(23L).batchState());
assertNull(sharePartition.cachedState().get(23L).offsetState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(23L).batchMemberId());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(28L).batchState());
assertNotNull(sharePartition.cachedState().get(28L).offsetState());
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
expectedOffsetStateMap.clear();
expectedOffsetStateMap.put(28L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(29L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(30L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(31L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(32L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(28L).offsetState());
// Send next batch from offset 12, only 3 records should be acquired.
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 3);
assertArrayEquals(expectedAcquiredRecords(12, 14, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
// Though record2 batch exists to acquire but send batch record3, it should be acquired but
// next fetch offset should not move.
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records3, 5);
assertArrayEquals(expectedAcquiredRecords(records3, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
// Acquire partial records from batch 2.
MemoryRecords subsetRecords = memoryRecords(17, 2);
acquiredRecordsList = fetchAcquiredRecords(sharePartition, subsetRecords, 2);
assertArrayEquals(expectedAcquiredRecords(17, 18, 2).toArray(), acquiredRecordsList.toArray());
// Next fetch offset should not move.
assertEquals(15, sharePartition.nextFetchOffset());
// Acquire partial records from record 4 to further test if the next fetch offset move
// accordingly once complete record 2 is also acquired.
subsetRecords = memoryRecords(28, 1);
acquiredRecordsList = fetchAcquiredRecords(sharePartition, subsetRecords, 1);
assertArrayEquals(expectedAcquiredRecords(28, 28, 2).toArray(), acquiredRecordsList.toArray());
// Next fetch offset should not move.
assertEquals(15, sharePartition.nextFetchOffset());
// Try to acquire complete record 2 though it's already partially acquired, the next fetch
// offset should move.
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 3);
// Offset 15,16 and 19 should be acquired.
List<AcquiredRecords> expectedAcquiredRecords = expectedAcquiredRecords(15, 16, 2);
expectedAcquiredRecords.addAll(expectedAcquiredRecords(19, 19, 2));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
// Next fetch offset should not move.
assertEquals(29, sharePartition.nextFetchOffset());
}
@Test
public void testAcquireGapAtBeginningAndRecordsFetchedFromGap() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(20, sharePartition.deliveryCompleteCount());
// All records fetched are part of the gap. The gap is from 11 to 20, fetched offsets are 11 to 15.
MemoryRecords records = memoryRecords(11, 5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5);
assertArrayEquals(expectedAcquiredRecord(11, 15, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(16, sharePartition.nextFetchOffset());
assertEquals(20, sharePartition.deliveryCompleteCount());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
// After records are acquired, the persisterReadResultGapWindow should be updated
assertEquals(16, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireGapAtBeginningAndFetchedRecordsOverlapInFlightBatches() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(20, sharePartition.deliveryCompleteCount());
// Fetched offsets overlap the inFlight batches. The gap is from 11 to 20, but fetched records are from 11 to 25.
MemoryRecords records = memoryRecords(11, 15);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 10);
assertArrayEquals(expectedAcquiredRecord(11, 20, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(41, sharePartition.nextFetchOffset());
assertEquals(20, sharePartition.deliveryCompleteCount());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
// After records are acquired, the persisterReadResultGapWindow should be updated
assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireGapAtBeginningAndFetchedRecordsOverlapInFlightAvailableBatches() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(10, sharePartition.deliveryCompleteCount());
// Fetched offsets overlap the inFlight batches. The gap is from 11 to 20, but fetched records are from 11 to 25.
MemoryRecords records = memoryRecords(11, 15);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 15);
// The gap from 11 to 20 will be acquired. Since the next batch is AVAILABLE, and we records fetched from replica manager
// overlap with the next batch, some records from the next batch will also be acquired
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 21, 3));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(22, 22, 3));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(23, 23, 3));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(24, 24, 3));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(25, 25, 3));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(26, sharePartition.nextFetchOffset());
assertEquals(10, sharePartition.deliveryCompleteCount());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
// After records are acquired, the persisterReadResultGapWindow should be updated
assertEquals(26, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireWhenCachedStateContainsGapsAndRecordsFetchedFromNonGapOffset() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21-30
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(10, sharePartition.deliveryCompleteCount());
// Fetched records are part of inFlightBatch 11-20 with state AVAILABLE. Fetched offsets also overlap the
// inFlight batches. The gap is from 11 to 20, but fetched records are from 11 to 25.
MemoryRecords records = memoryRecords(11, 15);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 15);
// 2 different batches will be acquired this time (11-20 and 21-25). The first batch will have delivery count 3
// as previous deliveryCount was 2. The second batch will have delivery count 1 as it is acquired for the first time.
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 3));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 25, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(26, sharePartition.nextFetchOffset());
assertEquals(10, sharePartition.deliveryCompleteCount());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
// After records are acquired, the persisterReadResultGapWindow should be updated
assertEquals(26, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireGapAtBeginningAndFetchedRecordsOverlapMultipleInFlightBatches() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(41L, 50L, RecordState.AVAILABLE.id, (short) 1), // There is a gap from 31 to 40
new PersisterStateBatch(61L, 70L, RecordState.ARCHIVED.id, (short) 1), // There is a gap from 51 to 60
new PersisterStateBatch(81L, 90L, RecordState.AVAILABLE.id, (short) 1) // There is a gap from 71 to 80
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(20, sharePartition.deliveryCompleteCount());
MemoryRecords records = memoryRecords(11, 75);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 55);
// Acquired batches will contain the following ->
// 1. 11-20 (gap offsets)
// 2. 31-40 (gap offsets)
// 3. 41-50 (AVAILABLE batch in cachedState)
// 4. 51-60 (gap offsets)
// 5. 71-80 (gap offsets)
// 6. 81-85 (AVAILABLE batch in cachedState). These will be acquired as separate batches because we are breaking a batch in the cachedState
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(41, 50, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(71, 80, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(81, 81, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(82, 82, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(83, 83, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(84, 84, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(85, 85, 2));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(90, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(86, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(20, sharePartition.deliveryCompleteCount());
// After records are acquired, the persisterReadResultGapWindow should be updated
assertEquals(86, persisterReadResultGapWindow.gapStartOffset());
assertEquals(90, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireGapAtBeginningAndFetchedRecordsEndJustBeforeGap() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(41L, 50L, RecordState.ACKNOWLEDGED.id, (short) 1), // There is a gap from 31 to 40
new PersisterStateBatch(61L, 70L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 51 to 60
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(20, sharePartition.deliveryCompleteCount());
MemoryRecords records = memoryRecords(11, 20);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 20);
// Acquired batches will contain the following ->
// 1. 11-20 (gap offsets)
// 2. 21-30 (AVAILABLE batch in cachedState)
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 30, 3));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(70, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(31, sharePartition.nextFetchOffset());
assertEquals(20, sharePartition.deliveryCompleteCount());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
// After records are acquired, the persisterReadResultGapWindow should be updated
assertEquals(31, persisterReadResultGapWindow.gapStartOffset());
assertEquals(70, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireGapAtBeginningAndFetchedRecordsIncludeGapOffsetsAtEnd() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(41L, 50L, RecordState.AVAILABLE.id, (short) 1), // There is a gap from 31 to 40
new PersisterStateBatch(61L, 70L, RecordState.ARCHIVED.id, (short) 1), // There is a gap from 51 to 60
new PersisterStateBatch(81L, 90L, RecordState.AVAILABLE.id, (short) 1) // There is a gap from 71 to 80
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(20, sharePartition.deliveryCompleteCount());
MemoryRecords records = memoryRecords(11, 65);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 45);
// Acquired batches will contain the following ->
// 1. 11-20 (gap offsets)
// 2. 31-40 (gap offsets)
// 3. 41-50 (AVAILABLE batch in cachedState)
// 4. 51-60 (gap offsets)
// 5. 71-75 (gap offsets). The gap is from 71 to 80, but the fetched records end at 75. These gap offsets will be acquired as a single batch
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(41, 50, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(71, 75, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(90, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(76, sharePartition.nextFetchOffset());
assertEquals(20, sharePartition.deliveryCompleteCount());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
// After records are acquired, the persisterReadResultGapWindow should be updated
assertEquals(76, persisterReadResultGapWindow.gapStartOffset());
assertEquals(90, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireWhenRecordsFetchedFromGapAndMaxFetchRecordsIsExceeded() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2),
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21-30
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
// After initialization is successful, the startOffset can move ahead because the very first batch in cached state
// is in a Terminal state (11 -> 20 ACKNOWLEDGED). Thus, start offset will move past it and the only batch remaining
// in cached state will be (31 -> 40) ARCHIVED. This, instead of 20, deliveryCompleteCount is 10.
assertEquals(10, sharePartition.deliveryCompleteCount());
// Creating 3 batches of records with a total of 8 records
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 21, 3).close();
memoryRecordsBuilder(buffer, 24, 3).close();
memoryRecordsBuilder(buffer, 27, 2).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
6, // maxFetchRecords is less than the number of records fetched
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
6);
// Since max fetch records (6) is less than the number of records fetched (8), only 6 records will be acquired
assertArrayEquals(expectedAcquiredRecord(21, 26, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(21, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(27, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(27, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireMaxFetchRecordsExceededAfterAcquiringGaps() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 2), // There is a gap from 11-20
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(10, sharePartition.deliveryCompleteCount());
// Creating 3 batches of records with a total of 8 records
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 11, 10).close();
memoryRecordsBuilder(buffer, 21, 10).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
8, // maxFetchRecords is less than the number of records fetched
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
assertArrayEquals(expectedAcquiredRecord(11, 20, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(21, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireMaxFetchRecordsExceededBeforeAcquiringGaps() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(31L, 40L, RecordState.AVAILABLE.id, (short) 1) // There is a gap from 21-30
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(0, sharePartition.deliveryCompleteCount());
// Creating 3 batches of records with a total of 8 records
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 11, 10).close();
memoryRecordsBuilder(buffer, 21, 20).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
8, // maxFetchRecords is less than the number of records fetched
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
assertArrayEquals(expectedAcquiredRecord(11, 20, 3).toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(21, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
public void testAcquireWhenRecordsFetchedFromGapAndPartitionContainsNaturalGaps() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 10 to 14
new PersisterStateBatch(30L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21-29
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(17, sharePartition.deliveryCompleteCount());
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 10, 11).close();
memoryRecordsBuilder(buffer, 30, 21).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 24);
// Acquired batches will contain the following ->
// 1. 10-14 (gap offsets)
// 2. 21-29 (gap offsets)
// 3. 41-50 (gap offsets)
// The offsets fetched from partition include a natural gap from 21 to 29. The cached state also contain the
// gap from 21 to 29. But since the broker does not parse the fetched records, the broker is not aware of this
// natural gap. In this case, the gap will be acquired, and it is the client's responsibility to inform the
// broker about this gap.
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(10, 14, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 29, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(41, 50, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(10, sharePartition.startOffset());
assertEquals(50, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(51, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNull(persisterReadResultGapWindow);
}
@Test
public void testAcquireCachedStateInitialGapMatchesWithActualPartitionGap() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(41L, 50L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 31-40
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(20, sharePartition.deliveryCompleteCount());
// Creating 2 batches starting from 21, such that there is a natural gap from 11 to 20
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 21, 15).close();
memoryRecordsBuilder(buffer, 36, 25).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 20);
// Acquired batches will contain the following ->
// 1. 31-40 (gap offsets)
// 2. 51-60 (new offsets)
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(31, 40, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(60, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(61, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNull(persisterReadResultGapWindow);
}
@Test
public void testAcquireCachedStateInitialGapOverlapsWithActualPartitionGap() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(41L, 50L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 31-40
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(20, sharePartition.deliveryCompleteCount());
// Creating 2 batches starting from 16, such that there is a natural gap from 11 to 15
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 16, 20).close();
memoryRecordsBuilder(buffer, 36, 25).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 25);
// Acquired batches will contain the following ->
// 1. 16-20 (gap offsets)
// 2. 31-40 (gap offsets)
// 3. 51-60 (new offsets)
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(16, 20, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(16, sharePartition.startOffset());
assertEquals(60, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(61, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNull(persisterReadResultGapWindow);
}
@Test
public void testAcquireCachedStateGapInBetweenOverlapsWithActualPartitionGap() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(41L, 50L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 31-40
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(20, sharePartition.deliveryCompleteCount());
// Creating 3 batches starting from 11, such that there is a natural gap from 26 to 30
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 11, 10).close();
memoryRecordsBuilder(buffer, 21, 15).close();
memoryRecordsBuilder(buffer, 41, 20).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 30);
// Acquired batches will contain the following ->
// 1. 11-20 (gap offsets)
// 2. 31-40 (gap offsets)
// 3. 51-60 (new offsets)
// The entire gap of 31 to 40 will be acquired even when the fetched records only contain offsets 31 to 36 because
// we rely on the client to inform the broker about these natural gaps in the partition log
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(60, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(61, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNull(persisterReadResultGapWindow);
}
@Test
public void testAcquireWhenRecordsFetchedAfterGapsAreFetched() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2),
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21 to 30
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(10, sharePartition.deliveryCompleteCount());
// Fetched records are from 21 to 35
MemoryRecords records = memoryRecords(21, 15);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 10);
// Since the gap if only from 21 to 30 and the next batch is ARCHIVED, only 10 gap offsets will be acquired as a single batch
assertArrayEquals(expectedAcquiredRecord(21, 30, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(21, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(41, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(31, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
// Fetching from the nextFetchOffset so that endOffset moves ahead
records = memoryRecords(41, 15);
acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 15);
assertArrayEquals(expectedAcquiredRecord(41, 55, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(21, sharePartition.startOffset());
assertEquals(55, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(56, sharePartition.nextFetchOffset());
// Since the endOffset is now moved ahead, the persisterReadResultGapWindow should be empty
persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNull(persisterReadResultGapWindow);
}
@Test
public void testAcquisitionLockForAcquiringSingleRecord() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(1), 1);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.nextFetchOffset() == 0 &&
sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 &&
sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null &&
sharePartition.timer().size() == 0,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of())));
assertEquals(1, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count());
assertTrue(sharePartitionMetrics.acquisitionLockTimeoutPerSec().meanRate() > 0);
// Since the delivery attempts are not exhausted, the deliveryCompleteCount will still be 0 as the state
// of the record is AVAILABLE.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockForAcquiringMultipleRecords() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
assertEquals(1, sharePartition.timer().size());
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0
&& sharePartition.nextFetchOffset() == 10
&& sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE
&& sharePartition.cachedState().get(10L).batchDeliveryCount() == 1
&& sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(10L, List.of())));
assertEquals(5, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count());
assertTrue(sharePartitionMetrics.acquisitionLockTimeoutPerSec().meanRate() > 0);
// Since the delivery attempts are not exhausted, the deliveryCompleteCount will still be 0 as the state
// of the record is AVAILABLE.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockForAcquiringMultipleRecordsWithOverlapAndNewBatch() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored.
fetchAcquiredRecords(sharePartition, memoryRecords(10), 5);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(2, sharePartition.timer().size());
// Allowing acquisition lock to expire. The acquisition lock timeout will cause release of records for all the acquired records.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 0 &&
sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null &&
sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of(), 5L, List.of())));
assertEquals(10, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count());
assertTrue(sharePartitionMetrics.acquisitionLockTimeoutPerSec().meanRate() > 0);
// Since the delivery attempts are not exhausted, the deliveryCompleteCount will still be 0 as the state
// of the record is AVAILABLE.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockForAcquiringSameBatchAgain() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 10 &&
sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(10L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acquire the same batch again.
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
// Acquisition lock timeout task should be created on re-acquire action.
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockOnAcknowledgingSingleRecordBatch() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(1), 1);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 0, List.of(AcknowledgeType.RELEASE.id))));
assertNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState());
assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(0L).offsetState());
// Allowing acquisition lock to expire. This will not cause any change to cached state map since the batch is already acknowledged.
// Hence, the acquisition lock timeout task would be cancelled already.
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 0 &&
sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 &&
sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockOnAcknowledgingMultipleRecordBatch() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 10);
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 14, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire. This will not cause any change to cached state map since the batch is already acknowledged.
// Hence, the acquisition lock timeout task would be cancelled already.
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 5 &&
sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(5L).batchDeliveryCount() == 1 &&
sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockOnAcknowledgingMultipleRecordBatchWithGapOffsets() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 5);
// Gap from 15-17 offsets.
recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
MemoryRecords records3 = memoryRecords(1, 2);
fetchAcquiredRecords(sharePartition, records3, 2);
assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
fetchAcquiredRecords(sharePartition, records1, 2);
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(2, sharePartition.timer().size());
fetchAcquiredRecords(sharePartition, records2, 9);
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(3, sharePartition.timer().size());
sharePartition.acknowledge(MEMBER_ID,
// Do not send gap offsets to verify that they are ignored and accepted as per client ack.
List.of(new ShareAcknowledgementBatch(5, 18, List.of(AcknowledgeType.ACCEPT.id))));
assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
// All the acquired records except 1 -> 2 have been acknowledged.
assertEquals(11, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire. The acquisition lock timeout will cause release of records for batch with starting offset 1.
// Since, other records have been acknowledged.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 1 &&
sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask() == null &&
sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null &&
sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(1L, List.of(), 5L, List.of(), 10L, List.of())));
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(1L).batchState());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(10L).batchState());
assertEquals(11, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockForAcquiringSubsetBatchAgain() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(10, 8), 8);
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 10 &&
sharePartition.cachedState().size() == 1 &&
sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(10L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acquire subset of records again.
fetchAcquiredRecords(sharePartition, memoryRecords(12, 3), 3);
// Acquisition lock timeout task should be created only on offsets which have been acquired again.
assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask());
assertEquals(3, sharePartition.timer().size());
// Allowing acquisition lock to expire for the acquired subset batch.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
return sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 10 &&
expectedOffsetStateMap.equals(sharePartition.cachedState().get(10L).offsetState());
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(10L, List.of(10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L))));
assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockOnAcknowledgingMultipleSubsetRecordBatchWithGapOffsets() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 2);
// Gap from 12-13 offsets.
recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap for 15 offset.
recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap from 17-19 offsets.
recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
fetchAcquiredRecords(sharePartition, records1, 2);
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
fetchAcquiredRecords(sharePartition, records2, 11);
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(2, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acknowledging over subset of both batch with subset of gap offsets.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(
6, 18, List.of(
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id,
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id,
ACKNOWLEDGE_TYPE_GAP_ID, ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id,
ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID,
AcknowledgeType.ACCEPT.id))));
assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask());
assertEquals(3, sharePartition.timer().size());
assertEquals(10, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire for the offsets that have not been acknowledged yet.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
Map<Long, InFlightState> expectedOffsetStateMap1 = new HashMap<>();
expectedOffsetStateMap1.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap1.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
Map<Long, InFlightState> expectedOffsetStateMap2 = new HashMap<>();
expectedOffsetStateMap2.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
return sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 5 &&
expectedOffsetStateMap1.equals(sharePartition.cachedState().get(5L).offsetState()) &&
expectedOffsetStateMap2.equals(sharePartition.cachedState().get(10L).offsetState());
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L), 10L, List.of(10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L))));
assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask());
assertEquals(10, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockTimeoutCauseMaxDeliveryCountExceed() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records
.withState(SharePartitionState.ACTIVE)
.build();
// Adding memoryRecords(10) in the sharePartition to make sure that SPSO doesn't move forward when delivery count of records2
// exceed the max delivery count.
fetchAcquiredRecords(sharePartition, memoryRecords(10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 10), 10);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(2, sharePartition.timer().size());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 0 &&
sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(10L).batchDeliveryCount() == 1 &&
sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(10L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(10, 10), 10);
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(2, sharePartition.cachedState().get(10L).batchDeliveryCount());
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
// Allowing acquisition lock to expire to archive the records that reach max delivery count.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 0 &&
// After the second delivery attempt fails to acknowledge the record correctly, the record should be archived.
sharePartition.cachedState().get(10L).batchState() == RecordState.ARCHIVED &&
sharePartition.cachedState().get(10L).batchDeliveryCount() == 2 &&
sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(10L, List.of())));
// After the acquisition lock expires for the second time, the records should be archived as the max delivery count is reached.
assertEquals(10, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockTimeoutCauseSPSOMoveForward() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(10), 10);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 0 &&
sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 &&
sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(3L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(4L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(7L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(8L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(9L).acquisitionLockTimeoutTask());
// Allowing acquisition lock to expire to archive the records that reach max delivery count.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(0L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(1L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(2L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(3L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(4L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
return sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 5 &&
expectedOffsetStateMap.equals(sharePartition.cachedState().get(0L).offsetState());
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L))));
assertNull(sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(3L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(4L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(7L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(8L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(0L).offsetState().get(9L).acquisitionLockTimeoutTask());
// Since only first 5 records from the batch are archived, the batch remains in the cachedState, but the
// start offset is updated
assertEquals(5, sharePartition.startOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockTimeoutCauseSPSOMoveForwardAndClearCachedState() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(10), 10);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 0 &&
sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(10), 10);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
// Allowing acquisition lock to expire to archive the records that reach max delivery count.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
// After the second failed attempt to acknowledge the record batch successfully, the record batch is archived.
// Since this is the first batch in the share partition, SPSO moves forward and the cachedState is cleared
sharePartition.cachedState().isEmpty() &&
sharePartition.nextFetchOffset() == 10,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of()));
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeAfterAcquisitionLockTimeout() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 5 &&
sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acknowledge with ACCEPT type should throw InvalidRecordStateException since they've been released due to acquisition lock timeout.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Try acknowledging with REJECT type should throw InvalidRecordStateException since they've been released due to acquisition lock timeout.
ackResult = sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.REJECT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockAfterDifferentAcknowledges() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
// Acknowledge with RELEASE type.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 6, List.of(AcknowledgeType.RELEASE.id))));
assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask());
assertEquals(3, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acknowledge with ACCEPT type.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(8, 9, List.of(AcknowledgeType.ACCEPT.id))));
assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
assertEquals(2, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire will only affect the offsets that have not been acknowledged yet.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
return sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 5 &&
expectedOffsetStateMap.equals(sharePartition.cachedState().get(5L).offsetState());
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L, 7L, 8L, 9L))));
assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask());
assertEquals(2, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockOnBatchWithWriteShareGroupStateFailure() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 10);
assertEquals(1, sharePartition.timer().size());
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
// Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 5 &&
sharePartition.cachedState().size() == 1 &&
sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of())));
}
@Test
public void testDeliveryCompleteCountOnLockExpiryAndWriteFailureOnBatchLastDelivery() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.withMaxDeliveryCount(2)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(15, 10), 10);
assertEquals(2, sharePartition.timer().size());
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(15L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 5 &&
sharePartition.cachedState().size() == 2 &&
sharePartition.cachedState().get(15L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(15L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of())));
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(15, 10), 10);
assertEquals(1, sharePartition.timer().size());
assertNotNull(sharePartition.cachedState().get(15L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
// Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 5 &&
sharePartition.cachedState().size() == 2 &&
sharePartition.cachedState().get(15L).batchState() == RecordState.ARCHIVED &&
sharePartition.cachedState().get(15L).batchAcquisitionLockTimeoutTask() == null,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of())));
// Even though the write state call failed, the records are still archived and deliveryCompleteCount is updated.
assertEquals(10, sharePartition.deliveryCompleteCount());
}
@Test
public void testDeliveryCompleteCountOnLockExpiryAndWriteFailureOnOffsetLastDelivery() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.withMaxDeliveryCount(2)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true for acknowledge to pass.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
fetchAcquiredRecords(sharePartition, memoryRecords(5, 6), 6);
assertEquals(1, sharePartition.timer().size());
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(8, 9, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(2, sharePartition.deliveryCompleteCount());
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
// Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
return sharePartition.timer().size() == 0 && sharePartition.cachedState().size() == 1 &&
expectedOffsetStateMap.equals(sharePartition.cachedState().get(5L).offsetState());
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L, 7L, 8L, 9L, 10L))));
assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertEquals(2, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(10, 1), 1);
assertEquals(1, sharePartition.timer().size());
assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(10L).acquisitionLockTimeoutTask());
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
// Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID));
return sharePartition.timer().size() == 0 && sharePartition.cachedState().size() == 1 &&
expectedOffsetStateMap.equals(sharePartition.cachedState().get(5L).offsetState());
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L, 7L, 10L))));
// Even though the write state call failed, the record is still archived and deliveryCompleteCount is updated.
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockOnOffsetWithWriteShareGroupStateFailure() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true for acknowledge to pass.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
fetchAcquiredRecords(sharePartition, memoryRecords(5, 6), 6);
assertEquals(1, sharePartition.timer().size());
assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(8, 9, List.of(AcknowledgeType.ACCEPT.id))));
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
// Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
return sharePartition.timer().size() == 0 && sharePartition.cachedState().size() == 1 &&
expectedOffsetStateMap.equals(sharePartition.cachedState().get(5L).offsetState());
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L, 7L, 8L, 9L, 10L))));
assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(10L).acquisitionLockTimeoutTask());
}
@Test
public void testReleaseSingleRecordBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(1), 1);
assertEquals(0, sharePartition.deliveryCompleteCount());
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState());
// Release delivery count.
assertEquals(0, sharePartition.cachedState().get(0L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(0L).offsetState());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseMultipleRecordBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 10);
assertEquals(0, sharePartition.deliveryCompleteCount());
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertEquals(0, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(5L).offsetState());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseMultipleAcknowledgedRecordBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records0 = memoryRecords(5);
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecords records2 = memoryRecords(10, 9);
fetchAcquiredRecords(sharePartition, records0, 5);
fetchAcquiredRecords(sharePartition, records1, 2);
fetchAcquiredRecords(sharePartition, records2, 9);
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 18, List.of(AcknowledgeType.ACCEPT.id))));
// After the acknowledgements, the cached state has 11 Terminal records ->
// (5 -> 6)
// (10 -> 18)
assertEquals(11, sharePartition.deliveryCompleteCount());
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(10L).batchState());
assertNull(sharePartition.cachedState().get(5L).offsetState());
assertNull(sharePartition.cachedState().get(10L).offsetState());
assertEquals(11, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseAcknowledgedMultipleSubsetRecordBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 2);
// Gap from 12-13 offsets.
recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap for 15 offset.
recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap from 17-19 offsets.
recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
fetchAcquiredRecords(sharePartition, records1, 2);
fetchAcquiredRecords(sharePartition, records2, 11);
// Acknowledging over subset of both batch with subset of gap offsets.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(6, 18, List.of(
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id,
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id,
ACKNOWLEDGE_TYPE_GAP_ID, ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id,
ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID,
AcknowledgeType.ACCEPT.id))));
// After the acknowledgements, the cached state has 10 Terminal records ->
// 6
// (10 -> 18)
assertEquals(10, sharePartition.deliveryCompleteCount());
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(5, sharePartition.nextFetchOffset());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
expectedOffsetStateMap.clear();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertEquals(10, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseAcquiredRecordsWithAnotherMember() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 1);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 2);
// Gap from 12-13 offsets.
recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap for 15 offset.
recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap from 17-19 offsets.
recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 5, fetchPartitionData(records1), FETCH_ISOLATION_HWM);
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(records2), FETCH_ISOLATION_HWM);
// Acknowledging over subset of second batch with subset of gap offsets.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(10, 18, List.of(
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID, ACKNOWLEDGE_TYPE_GAP_ID,
AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID,
AcknowledgeType.ACCEPT.id))));
// After the acknowledgements, the cached state has 9 Terminal records ->
// (10 -> 18)
assertEquals(9, sharePartition.deliveryCompleteCount());
// Release acquired records for "member-1".
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(19, sharePartition.nextFetchOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertEquals(9, sharePartition.deliveryCompleteCount());
// Release acquired records for "member-2".
releaseResult = sharePartition.releaseAcquiredRecords("member-2");
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
// Check cached state.
expectedOffsetStateMap.clear();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertEquals(9, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseAcquiredRecordsWithAnotherMemberAndSubsetAcknowledged() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 2);
// Gap from 12-13 offsets.
recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap for 15 offset.
recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap from 17-19 offsets.
recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 5, fetchPartitionData(records1), FETCH_ISOLATION_HWM);
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(records2), FETCH_ISOLATION_HWM);
// Acknowledging over subset of second batch with subset of gap offsets.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(10, 18, List.of(
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID, ACKNOWLEDGE_TYPE_GAP_ID,
AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID,
AcknowledgeType.ACCEPT.id))));
// After the acknowledgements, the cached state has 9 Terminal records ->
// (10 -> 18)
assertEquals(9, sharePartition.deliveryCompleteCount());
// Release acquired records for "member-1".
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(19, sharePartition.nextFetchOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertEquals(9, sharePartition.deliveryCompleteCount());
// Ack subset of records by "member-2".
sharePartition.acknowledge("member-2",
List.of(new ShareAcknowledgementBatch(5, 5, List.of(AcknowledgeType.ACCEPT.id))));
// After the acknowledgements, the startOffset will be upadated to 6, since offset 5 is Terminal. Hence
// deliveryCompleteCount will remain 9.
assertEquals(9, sharePartition.deliveryCompleteCount());
// Release acquired records for "member-2".
releaseResult = sharePartition.releaseAcquiredRecords("member-2");
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(6, sharePartition.nextFetchOffset());
// Check cached state.
expectedOffsetStateMap.clear();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
expectedOffsetStateMap.clear();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertEquals(9, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseAcquiredRecordsForEmptyCachedData() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// Release a batch when cache is empty.
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.cachedState().size());
}
@Test
public void testReleaseAcquiredRecordsAfterDifferentAcknowledges() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 6, List.of(AcknowledgeType.RELEASE.id))));
sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(8, 9, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(2, sharePartition.deliveryCompleteCount());
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(5, sharePartition.nextFetchOffset());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
assertEquals(2, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaxDeliveryCountLimitNotExceededForRecordsSubsetAfterReleaseAcquiredRecords() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxDeliveryCount(2)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(10), 10);
MemoryRecords records2 = memoryRecords(10, 5);
fetchAcquiredRecords(sharePartition, records2, 5);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, records2, 5);
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState());
assertNull(sharePartition.cachedState().get(10L).offsetState());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaxDeliveryCountLimitNotExceededForRecordsSubsetAfterReleaseAcquiredRecordsSubset() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxDeliveryCount(2)
.withState(SharePartitionState.ACTIVE)
.build();
// First fetch request with 5 records starting from offset 10.
MemoryRecords records1 = memoryRecords(10, 5);
// Second fetch request with 5 records starting from offset 15.
MemoryRecords records2 = memoryRecords(15, 5);
// third fetch request with 5 records starting from offset20.
MemoryRecords records3 = memoryRecords(20, 5);
fetchAcquiredRecords(sharePartition, records1, 5);
fetchAcquiredRecords(sharePartition, records2, 5);
fetchAcquiredRecords(sharePartition, records3, 5);
sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of(
new ShareAcknowledgementBatch(13, 16, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(17, 19, List.of(AcknowledgeType.REJECT.id)),
new ShareAcknowledgementBatch(20, 24, List.of(AcknowledgeType.RELEASE.id))
)));
assertEquals(3, sharePartition.deliveryCompleteCount());
// Send next batch from offset 13, only 2 records should be acquired.
fetchAcquiredRecords(sharePartition, records1, 2);
// Send next batch from offset 15, only 2 records should be acquired.
fetchAcquiredRecords(sharePartition, records2, 2);
fetchAcquiredRecords(sharePartition, records3, 5);
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(3, sharePartition.cachedState().size());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState());
assertNotNull(sharePartition.cachedState().get(10L).offsetState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(15L).batchState());
assertNotNull(sharePartition.cachedState().get(10L).offsetState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(20L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId());
assertNull(sharePartition.cachedState().get(20L).offsetState());
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
expectedOffsetStateMap.clear();
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(15L).offsetState());
assertEquals(3, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaxDeliveryCountLimitExceededForRecordsSubsetCacheCleared() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxDeliveryCount(2)
.withState(SharePartitionState.ACTIVE)
.build();
// First fetch request with 5 records starting from offset 10.
MemoryRecords records1 = memoryRecords(10, 5);
// Second fetch request with 5 records starting from offset 15.
MemoryRecords records2 = memoryRecords(15, 5);
// Third fetch request with 5 records starting from offset 20.
MemoryRecords records3 = memoryRecords(20, 5);
fetchAcquiredRecords(sharePartition, records1, 5);
fetchAcquiredRecords(sharePartition, records2, 5);
fetchAcquiredRecords(sharePartition, records3, 5);
sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of(
new ShareAcknowledgementBatch(10, 12, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(13, 16, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(17, 19, List.of(AcknowledgeType.REJECT.id)),
new ShareAcknowledgementBatch(20, 24, List.of(AcknowledgeType.RELEASE.id))
)));
// After acknowledgements, since offsets 10 -> 12 are at the start of the caches state and are in Terminal state,
// the start offset will be updated to 13. From the remaining offstes in flight, only records (17 -> 19) are in Terminal state.
assertEquals(3, sharePartition.deliveryCompleteCount());
// Send next batch from offset 13, only 2 records should be acquired.
fetchAcquiredRecords(sharePartition, records1, 2);
// Send next batch from offset 15, only 2 records should be acquired.
fetchAcquiredRecords(sharePartition, records2, 2);
fetchAcquiredRecords(sharePartition, records3, 5);
sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of(
new ShareAcknowledgementBatch(13, 16, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(20, 24, List.of(AcknowledgeType.RELEASE.id))
)));
assertEquals(25, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseAcquiredRecordsSubsetWithAnotherMember() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 7);
sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 7, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(0, sharePartition.deliveryCompleteCount());
// Release acquired records subset with another member.
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords("member-2");
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseBatchWithWriteShareGroupStateFailure() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withState(SharePartitionState.ACTIVE)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 10);
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertTrue(releaseResult.isCompletedExceptionally());
assertFutureThrows(GroupIdNotFoundException.class, releaseResult);
// Due to failure in writeShareGroupState, the cached state should not be updated.
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseOffsetWithWriteShareGroupStateFailure() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withState(SharePartitionState.ACTIVE)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true for acknowledge to pass.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
fetchAcquiredRecords(sharePartition, memoryRecords(5, 6), 6);
sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(8, 9, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(2, sharePartition.deliveryCompleteCount());
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertTrue(releaseResult.isCompletedExceptionally());
assertFutureThrows(GroupIdNotFoundException.class, releaseResult);
// Due to failure in writeShareGroupState, the cached state should not be updated.
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(5L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(6L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(7L).state());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).offsetState().get(8L).state());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).offsetState().get(9L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(10L).state());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(5L).memberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(6L).memberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(7L).memberId());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(8L).memberId());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(9L).memberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(10L).memberId());
assertEquals(2, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockOnReleasingMultipleRecordBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 10);
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertEquals(0, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(5L).offsetState());
// Acquisition lock timer task would be cancelled by the release acquired records operation.
assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.timer().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockOnReleasingAcknowledgedMultipleSubsetRecordBatchWithGapOffsets() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 2);
// Gap from 12-13 offsets.
recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap for 15 offset.
recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap from 17-19 offsets.
recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
fetchAcquiredRecords(sharePartition, records1, 2);
fetchAcquiredRecords(sharePartition, records2, 11);
// Acknowledging over subset of both batch with subset of gap offsets.
sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(6, 18, List.of(
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id,
AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id,
ACKNOWLEDGE_TYPE_GAP_ID, ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id,
ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID,
AcknowledgeType.ACCEPT.id))));
assertEquals(10, sharePartition.deliveryCompleteCount());
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(5, sharePartition.nextFetchOffset());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
expectedOffsetStateMap.clear();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
// Acquisition lock timer task would be cancelled by the release acquired records operation.
assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask());
assertEquals(0, sharePartition.timer().size());
assertEquals(10, sharePartition.deliveryCompleteCount());
}
@Test
public void testDeliveryCompleteCountWhenStaleBatchesAreArchived() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 1),
new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(11, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Fetched records are from 21 to 30
MemoryRecords records = memoryRecords(21, 10);
// The fetch offset is set as 11, which is the next fetch offset, but the returned records are from 21 onwards.
// This means there is a gap in the partition from 11 to 20. In this case, the batch 11 to 20 will be archived
// during the acquire operation.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
MAX_FETCH_RECORDS,
11,
fetchPartitionData(records),
FETCH_ISOLATION_HWM), 10);
assertArrayEquals(expectedAcquiredRecord(21, 30, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
// Since the records 11 -> 20 are ARCHIVED, deliveryCompleteCount will be 10.
assertEquals(10, sharePartition.deliveryCompleteCount());
}
@Test
public void testDeliveryCompleteCountWhenStaleOffsetsAreArchived() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 1),
new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(11, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Fetched records are from 21 to 30
MemoryRecords records = memoryRecords(16, 15);
// The fetch offset is set as 11, which is the next fetch offset, but the returned records are from 16 onwards.
// This means there is a gap in the partition from 11 to 16. In this case, the offsets 11 to 15 will be archived
// during the acquire operation.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
MAX_FETCH_RECORDS,
11,
fetchPartitionData(records),
FETCH_ISOLATION_HWM), 15);
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(16, 16, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(17, 17, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(18, 18, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(19, 19, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(20, 20, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 30, 2));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(11L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(12L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(13L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(14L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(15L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
// Since the records 11 -> 15 are ARCHIVED, deliveryCompleteCount will be 5.
assertEquals(5, sharePartition.deliveryCompleteCount());
}
@Test
public void testLsoMovementOnInitializationSharePartition() {
// LSO is at 0.
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
sharePartition.updateCacheAndOffsets(0);
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.startOffset());
assertEquals(0, sharePartition.endOffset());
// LSO is at 5.
sharePartition.updateCacheAndOffsets(5);
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(5, sharePartition.startOffset());
assertEquals(5, sharePartition.endOffset());
}
@Test
public void testLsoMovementForArchivingBatches() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(12, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(17, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(22, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(27, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(32, 5), 5);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(2, 6, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(12, 16, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(22, 26, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(27, 31, List.of(AcknowledgeType.REJECT.id))
));
// After the acknowledgements, the records in Terminal state are ->
// 27 -> 31: ARCHIVED
// Records 2 -> 6 are ACKNOWLEDGED, but since they are at the start of the cache, the start offset will be moved to 7.
assertEquals(5, sharePartition.deliveryCompleteCount());
// LSO is at 20.
sharePartition.updateCacheAndOffsets(20);
assertEquals(22, sharePartition.nextFetchOffset());
assertEquals(20, sharePartition.startOffset());
assertEquals(36, sharePartition.endOffset());
// For cached state corresponding to entry 2, the batch state will be ACKNOWLEDGED, hence it will be cleared as part of acknowledgement.
assertEquals(6, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertNotNull(sharePartition.cachedState().get(7L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(12L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(12L).batchState());
assertNull(sharePartition.cachedState().get(12L).batchAcquisitionLockTimeoutTask());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(17L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(17L).batchState());
assertNotNull(sharePartition.cachedState().get(17L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(22L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(22L).batchState());
assertNull(sharePartition.cachedState().get(22L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(27L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(27L).batchState());
assertNull(sharePartition.cachedState().get(27L).batchAcquisitionLockTimeoutTask());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(32L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(32L).batchState());
assertNotNull(sharePartition.cachedState().get(32L).batchAcquisitionLockTimeoutTask());
// After the LSO is moved, AVAILABLE batches are ARCHIVED. Thus, the records 12 -> 16 will be ARCHIVED. Since
// these are prior to the new startOffset, deliveryCompleteCount remains the same.
assertEquals(5, sharePartition.deliveryCompleteCount());
}
@Test
public void testLsoMovementPostArchivedBatches() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(12, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(17, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(22, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(27, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(32, 5), 5);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(2, 6, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(12, 16, List.of(AcknowledgeType.REJECT.id)),
new ShareAcknowledgementBatch(22, 26, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(27, 31, List.of(AcknowledgeType.REJECT.id))
));
// After the acknowledgements, the records in Terminal state are ->
// 12 -> 16: ARCHIVED
// 27 -> 31: ARCHIVED
assertEquals(10, sharePartition.deliveryCompleteCount());
// LSO is at 20.
sharePartition.updateCacheAndOffsets(20);
assertEquals(22, sharePartition.nextFetchOffset());
assertEquals(20, sharePartition.startOffset());
assertEquals(36, sharePartition.endOffset());
assertEquals(7, sharePartition.cachedState().size());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState());
assertNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertNotNull(sharePartition.cachedState().get(7L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(12L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(12L).batchState());
assertNull(sharePartition.cachedState().get(12L).batchAcquisitionLockTimeoutTask());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(17L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(17L).batchState());
assertNotNull(sharePartition.cachedState().get(17L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(22L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(22L).batchState());
assertNull(sharePartition.cachedState().get(22L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(27L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(27L).batchState());
assertNull(sharePartition.cachedState().get(27L).batchAcquisitionLockTimeoutTask());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(32L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(32L).batchState());
assertNotNull(sharePartition.cachedState().get(32L).batchAcquisitionLockTimeoutTask());
// After the LSO is moved, the number of Terminal records between old and new start offsets is calculated.
// In this case it is 5 (for the records 12 -> 16). Thus, deliveryCompleteCount is decremented by 5.
assertEquals(5, sharePartition.deliveryCompleteCount());
}
@Test
public void testLsoMovementPostArchivedRecords() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(12, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(17, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(22, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(27, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(32, 5), 5);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(2, 6, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(12, 16, List.of(AcknowledgeType.REJECT.id)),
new ShareAcknowledgementBatch(19, 21, List.of(AcknowledgeType.REJECT.id)),
new ShareAcknowledgementBatch(22, 26, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(27, 31, List.of(AcknowledgeType.REJECT.id))
));
// After the acknowledgements, the records in Terminal state are ->
// 12 -> 16: ARCHIVED
// 19 -> 21: ARCHIVED
// 27 -> 31: ARCHIVED
assertEquals(13, sharePartition.deliveryCompleteCount());
// LSO is at 20.
sharePartition.updateCacheAndOffsets(20);
assertEquals(22, sharePartition.nextFetchOffset());
assertEquals(20, sharePartition.startOffset());
assertEquals(36, sharePartition.endOffset());
assertEquals(7, sharePartition.cachedState().size());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState());
assertNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertNotNull(sharePartition.cachedState().get(7L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(12L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(12L).batchState());
assertNull(sharePartition.cachedState().get(12L).batchAcquisitionLockTimeoutTask());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(17L).offsetState().get(17L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(17L).offsetState().get(18L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(17L).offsetState().get(19L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(17L).offsetState().get(20L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(17L).offsetState().get(21L).state());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(22L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(22L).batchState());
assertNull(sharePartition.cachedState().get(22L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(27L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(27L).batchState());
assertNull(sharePartition.cachedState().get(27L).batchAcquisitionLockTimeoutTask());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(32L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(32L).batchState());
assertNotNull(sharePartition.cachedState().get(32L).batchAcquisitionLockTimeoutTask());
// After the LSO is moved, the number of Terminal records between old and new start offsets is calculated.
// In this case it is 6 (for the records 12 -> 16 and 19). Thus, deliveryCompleteCount is decremented by 6.
assertEquals(7, sharePartition.deliveryCompleteCount());
}
@Test
public void testLsoMovementForArchivingAllAvailableBatches() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// A client acquires 4 batches, 11 -> 20, 21 -> 30, 31 -> 40, 41 -> 50.
fetchAcquiredRecords(sharePartition, memoryRecords(11, 10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(21, 10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(31, 10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(41, 10), 10);
// After the acknowledgements, the state of share partition will be:
// 1. 11 -> 20: AVAILABLE
// 2. 21 -> 30: ACQUIRED
// 3. 31 -> 40: AVAILABLE
// 4. 41 -> 50: ACQUIRED
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(11, 20, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(31, 40, List.of(AcknowledgeType.RELEASE.id))
));
assertEquals(0, sharePartition.deliveryCompleteCount());
// Move the LSO to 41. When the LSO moves ahead, all batches that are AVAILABLE before the new LSO will be ARCHIVED.
// Thus, the state of the share partition will be:
// 1. 11 -> 20: ARCHIVED
// 2. 21 -> 30: ACQUIRED
// 3. 31 -> 40: ARCHIVED
// 4. 41 -> 50: ACQUIRED
// Note, the records that are in ACQUIRED state will remain in ACQUIRED state and will be transitioned to a Terminal
// state when the corresponding acquisition lock timer task expires.
sharePartition.updateCacheAndOffsets(41);
assertEquals(51, sharePartition.nextFetchOffset());
assertEquals(41, sharePartition.startOffset());
assertEquals(50, sharePartition.endOffset());
assertEquals(4, sharePartition.cachedState().size());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(41L).batchState());
// There are no records in flight in Terminal state.
assertEquals(0, sharePartition.deliveryCompleteCount());
// The client acknowledges the batch 21 -> 30. Since this batch is before the LSO, nothing will be done and these
// records will remain in the ACQUIRED state.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21L, 30L, List.of(AcknowledgeType.RELEASE.id))));
// The acknowledgements make no difference to in flight records.
assertEquals(0, sharePartition.deliveryCompleteCount());
// The batch is still in ACQUIRED state.
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
// Once the acquisition lock timer task for the batch 21 -> 30 is expired, these records will directly be
// ARCHIVED.
sharePartition.cachedState().get(21L).batchAcquisitionLockTimeoutTask().run();
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState());
// Even when the acquisition lock expires, this happens for records before the LSO, hence in flight terminal records remain 0.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testLsoMovementForArchivingAllAvailableOffsets() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// A client acquires 4 batches, 11 -> 20, 21 -> 30, 31 -> 40, 41 -> 50.
fetchAcquiredRecords(sharePartition, memoryRecords(11, 10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(21, 10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(31, 10), 10);
fetchAcquiredRecords(sharePartition, memoryRecords(41, 10), 10);
// After the acknowledgements, the share partition state will be:
// 1. 11 -> 20: AVAILABLE
// 2. 21 -> 30: ACQUIRED
// 3. 31 -> 40: AVAILABLE
// 4. 41 -> 50: ACQUIRED
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(11, 20, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(31, 40, List.of(AcknowledgeType.RELEASE.id))
));
assertEquals(0, sharePartition.deliveryCompleteCount());
// Move the LSO to 36. When the LSO moves ahead, all records that are AVAILABLE before the new LSO will be ARCHIVED.
// Thus, the state of the share partition will be:
// 1. 11 -> 20: ARCHIVED
// 2. 21 -> 30: ACQUIRED
// 3. 31 -> 35: ARCHIVED
// 3. 36 -> 40: AVAILABLE
// 4. 41 -> 50: ACQUIRED
// Note, the records that are in ACQUIRED state will remain in ACQUIRED state and will be transitioned to a Terminal
// state when the corresponding acquisition lock timer task expires.
sharePartition.updateCacheAndOffsets(36);
assertEquals(36, sharePartition.nextFetchOffset());
assertEquals(36, sharePartition.startOffset());
assertEquals(50, sharePartition.endOffset());
assertEquals(4, sharePartition.cachedState().size());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(31L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(32L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(33L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(34L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(35L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(36L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(37L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(38L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(39L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(40L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(41L).batchState());
assertEquals(0, sharePartition.deliveryCompleteCount());
// The client acknowledges the batch 21 -> 30. Since this batch is before the LSO, nothing will be done and these
// records will remain in the ACQUIRED state.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21L, 30L, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(0, sharePartition.deliveryCompleteCount());
// The batch is still in ACQUIRED state.
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Once the acquisition lock timer task for the batch 21 -> 30 is expired, these records will directly be
// ARCHIVED.
sharePartition.cachedState().get(21L).batchAcquisitionLockTimeoutTask().run();
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testLsoMovementForArchivingOffsets() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(4, 8, List.of(AcknowledgeType.ACCEPT.id))));
// LSO at is 5.
sharePartition.updateCacheAndOffsets(5);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(5, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
// Check cached offset state map.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState());
assertNull(sharePartition.cachedState().get(7L).offsetState().get(7L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(7L).offsetState().get(8L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(9L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(11L).acquisitionLockTimeoutTask());
// Check cached offset state map.
expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(2L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(3L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(4L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(2L).offsetState());
assertNotNull(sharePartition.cachedState().get(2L).offsetState().get(2L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(2L).offsetState().get(3L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(2L).offsetState().get(4L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(2L).offsetState().get(5L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(2L).offsetState().get(6L).acquisitionLockTimeoutTask());
}
@Test
public void testLsoMovementForArchivingOffsetsWithStartAndEndBatchesNotFullMatches() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// LSO is at 4.
sharePartition.updateCacheAndOffsets(4);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
// LSO is at 8.
sharePartition.updateCacheAndOffsets(8);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(8, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
}
@Test
public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatches() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// LSO is at 4.
sharePartition.updateCacheAndOffsets(4);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
// LSO is at 7.
sharePartition.updateCacheAndOffsets(7);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(7, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
}
@Test
public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostAcceptAcknowledgement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// LSO is at 4.
sharePartition.updateCacheAndOffsets(4);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
// Acknowledge with ACCEPT action.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(7, 8, List.of(AcknowledgeType.ACCEPT.id))));
// LSO is at 7.
sharePartition.updateCacheAndOffsets(7);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(7, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
// Check cached offset state map.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState());
}
@Test
public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostReleaseAcknowledgement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// LSO is at 4.
sharePartition.updateCacheAndOffsets(4);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
// Acknowledge with RELEASE action.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(7, 8, List.of(AcknowledgeType.RELEASE.id))));
// LSO is at 7.
sharePartition.updateCacheAndOffsets(7);
assertEquals(7, sharePartition.nextFetchOffset());
assertEquals(7, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
// Check cached offset state map.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState());
}
@Test
public void testLsoMovementToEndOffset() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// Acknowledge with RELEASE action.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(7, 8, List.of(AcknowledgeType.RELEASE.id))));
// LSO is at 11.
sharePartition.updateCacheAndOffsets(11);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(11, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
// Check cached offset state map.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState());
}
@Test
public void testLsoMovementToEndOffsetWhereEndOffsetIsAvailable() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// Acknowledge with RELEASE action.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(7, 8, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(11, 11, List.of(AcknowledgeType.RELEASE.id))));
// LSO is at 11.
sharePartition.updateCacheAndOffsets(11);
assertEquals(11, sharePartition.nextFetchOffset());
assertEquals(11, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
// Check cached offset state map.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState());
}
@Test
public void testLsoMovementAheadOfEndOffsetPostAcknowledgement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// Acknowledge with RELEASE action.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(7, 8, List.of(AcknowledgeType.RELEASE.id))));
// LSO is at 12.
sharePartition.updateCacheAndOffsets(12);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(12, sharePartition.startOffset());
assertEquals(12, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
// Check cached offset state map.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState());
}
@Test
public void testLsoMovementAheadOfEndOffset() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// LSO is at 14.
sharePartition.updateCacheAndOffsets(14);
assertEquals(14, sharePartition.nextFetchOffset());
assertEquals(14, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
}
@Test
public void testLsoMovementWithGapsInCachedStateMap() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(2, 5);
// Gap of 7-9.
MemoryRecords records2 = memoryRecords(10, 5);
// Gap of 15-19.
MemoryRecords records3 = memoryRecords(20, 5);
fetchAcquiredRecords(sharePartition, records1, 5);
fetchAcquiredRecords(sharePartition, records2, 5);
fetchAcquiredRecords(sharePartition, records3, 5);
// LSO is at 18.
sharePartition.updateCacheAndOffsets(18);
assertEquals(25, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.startOffset());
assertEquals(24, sharePartition.endOffset());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState());
}
@Test
public void testLsoMovementWithGapsInCachedStateMapAndAcknowledgedBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(2, 5);
// Gap of 7-9.
MemoryRecords records2 = memoryRecords(10, 5);
fetchAcquiredRecords(sharePartition, records1, 5);
fetchAcquiredRecords(sharePartition, records2, 5);
// Acknowledge with RELEASE action.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.RELEASE.id))));
// LSO is at 10.
sharePartition.updateCacheAndOffsets(10);
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(10, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState());
}
@Test
public void testLsoMovementPostGapsInAcknowledgements() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 5);
// Gap from 15-17 offsets.
recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
fetchAcquiredRecords(sharePartition, records1, 2);
fetchAcquiredRecords(sharePartition, records2, 9);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 6, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(10, 18, List.of(
AcknowledgeType.RELEASE.id, AcknowledgeType.RELEASE.id, AcknowledgeType.RELEASE.id,
AcknowledgeType.RELEASE.id, AcknowledgeType.RELEASE.id, ACKNOWLEDGE_TYPE_GAP_ID,
ACKNOWLEDGE_TYPE_GAP_ID, ACKNOWLEDGE_TYPE_GAP_ID, AcknowledgeType.RELEASE.id
))));
// LSO is at 18.
sharePartition.updateCacheAndOffsets(18);
assertEquals(18, sharePartition.nextFetchOffset());
assertEquals(18, sharePartition.startOffset());
assertEquals(18, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
}
@Test
public void testTerminalRecordsNotUpdatedWhenBatchesBeforeStartOffsetAreExpired() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 1),
new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(11, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Fetched records are from 11 to 20
MemoryRecords records = memoryRecords(10, 11);
// A member acquired the available records 11 -> 20.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
MAX_FETCH_RECORDS,
records.batches().iterator().next().baseOffset(),
fetchPartitionData(records),
FETCH_ISOLATION_HWM), 10);
assertArrayEquals(expectedAcquiredRecord(11, 20, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
// Move the LSO to 21.
sharePartition.updateCacheAndOffsets(21);
// After the LSO is moved to 21, all the records after new Start offset are in non-Terminal states. Thus,
// deliveryCompleteCount is not changed.
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
// Acknowledge the acquired records. Since these records are present before the startOffset, these acknowledgements
// will simply be ignored.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(11L, 20L, List.of(AcknowledgeType.RELEASE.id))));
// Since the acknowledgements are ignored, the deliveryCompleteCount should not change.
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(null));
// Expiring the acquisition lock timer task of the ACQUIRED batch.
sharePartition.cachedState().get(11L).batchAcquisitionLockTimeoutTask().run();
// After the acquisition lock timer task is expired, the records present before the startOffset are directly
// moved to the ARCHIVED state.
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
// Since these records are present before the start offset, the deliveryCompleteCount should not change.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testTerminalRecordsNotUpdatedWhenOffsetsBeforeStartOffsetAreExpired() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 1),
new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(11, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Fetched records are from 11 to 20
MemoryRecords records = memoryRecords(10, 11);
// A member acquired the available records 11 -> 20.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
MAX_FETCH_RECORDS,
records.batches().iterator().next().baseOffset(),
fetchPartitionData(records),
FETCH_ISOLATION_HWM), 10);
assertArrayEquals(expectedAcquiredRecord(11, 20, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
// Move the LSO to 16.
sharePartition.updateCacheAndOffsets(16);
// After the LSO is moved to 21, all the records after new Start offset are in non-Terminal states. Thus,
// deliveryCompleteCount is not changed.
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(16, sharePartition.startOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(null));
// Expiring the acquisition lock timer task of the ACQUIRED batch.
sharePartition.cachedState().get(11L).batchAcquisitionLockTimeoutTask().run();
// Since these records are present before the start offset, the deliveryCompleteCount should not change.
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(11L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(12L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(13L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(14L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(15L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(16L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(17L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(18L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(19L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(20L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
}
@Test
public void testTerminalRecordsNotUpdatedWhenOffsetsBeforeStartOffsetAreExpiredAfterAcknowledgements() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 1),
new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(11, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Fetched records are from 11 to 20
MemoryRecords records = memoryRecords(10, 11);
// A member acquired the available records 11 -> 20.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
MAX_FETCH_RECORDS,
records.batches().iterator().next().baseOffset(),
fetchPartitionData(records),
FETCH_ISOLATION_HWM), 10);
assertArrayEquals(expectedAcquiredRecord(11, 20, 2).toArray(), acquiredRecordsList.toArray());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
// Move the LSO to 16.
sharePartition.updateCacheAndOffsets(16);
// There are no Terminal records between start offset and end offset.
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(16, sharePartition.startOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
// Acknowledge the acquired records. Only those records that are after the startOffset will be acknowledged.
// In this case, records 11 -> 15 will remain in the ACQUIRED state, while records 16 -> 20 will be RELEASED.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(11L, 20L, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).offsetState().get(11L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).offsetState().get(12L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).offsetState().get(13L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).offsetState().get(14L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(11L).offsetState().get(15L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(16L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(17L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(18L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(19L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(20L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(null));
// Expiring the acquisition lock timer task of the ACQUIRED records.
sharePartition.cachedState().get(11L).offsetState().get(11L).acquisitionLockTimeoutTask().run();
sharePartition.cachedState().get(11L).offsetState().get(12L).acquisitionLockTimeoutTask().run();
sharePartition.cachedState().get(11L).offsetState().get(13L).acquisitionLockTimeoutTask().run();
sharePartition.cachedState().get(11L).offsetState().get(14L).acquisitionLockTimeoutTask().run();
sharePartition.cachedState().get(11L).offsetState().get(15L).acquisitionLockTimeoutTask().run();
// Since these records are present before the start offset, the deliveryCompleteCount should not change.
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(11L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(12L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(13L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(14L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).offsetState().get(15L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(16L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(17L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(18L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(19L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(11L).offsetState().get(20L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
}
@Test
public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 15, fetchPartitionData(memoryRecords(15, 5)), FETCH_ISOLATION_HWM);
fetchAcquiredRecords(sharePartition, memoryRecords(20, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(25, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(30, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(35, 5), 5);
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acknowledge records.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(6, 7, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(8, 8, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(25, 29, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(35, 37, List.of(AcknowledgeType.RELEASE.id))
));
assertEquals(2, sharePartition.deliveryCompleteCount());
// LSO is at 24.
sharePartition.updateCacheAndOffsets(24);
assertEquals(25, sharePartition.nextFetchOffset());
assertEquals(24, sharePartition.startOffset());
assertEquals(39, sharePartition.endOffset());
assertEquals(7, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Release acquired records for MEMBER_ID.
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState());
assertEquals("member-2", sharePartition.cachedState().get(15L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(21L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(22L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(23L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(24L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(20L).offsetState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(25L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(25L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(30L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(30L).batchState());
expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(35L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(36L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(37L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(38L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(39L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(35L).offsetState());
}
@Test
public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToStartOfBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
// LSO is at 10.
sharePartition.updateCacheAndOffsets(10);
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(10, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Release acquired records.
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState());
// The records after the start offset are in non-Terminal states. Thus, deliveryCompleteCount is not changed.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToMiddleOfBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
// LSO is at 11.
sharePartition.updateCacheAndOffsets(11);
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(11, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Release acquired records.
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState());
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
// The records after the start offset are in non-Terminal states. Thus, deliveryCompleteCount is not changed.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testReleaseAcquiredRecordsDecreaseDeliveryCount() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(12, 13, List.of(AcknowledgeType.ACCEPT.id))));
// Records 12 and 13 are ACKNOWLEDGED.
assertEquals(2, sharePartition.deliveryCompleteCount());
// LSO is at 11.
sharePartition.updateCacheAndOffsets(11);
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(11, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(2, sharePartition.deliveryCompleteCount());
// Before release, the delivery count was incremented.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
// Release acquired records.
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
// Check delivery count.
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState());
assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount());
// After release, the delivery count was decremented.
expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertEquals(2, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovement() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 15, fetchPartitionData(memoryRecords(15, 5)), FETCH_ISOLATION_HWM);
fetchAcquiredRecords(sharePartition, memoryRecords(20, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(25, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(30, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(35, 5), 5);
// Acknowledge records.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(6, 7, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(8, 8, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(25, 29, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(35, 37, List.of(AcknowledgeType.RELEASE.id))
));
assertEquals(2, sharePartition.deliveryCompleteCount());
// LSO is at 24.
sharePartition.updateCacheAndOffsets(24);
assertEquals(25, sharePartition.nextFetchOffset());
assertEquals(24, sharePartition.startOffset());
assertEquals(39, sharePartition.endOffset());
assertEquals(7, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
Map<Long, InFlightState> expectedOffsetStateMap1 = new HashMap<>();
expectedOffsetStateMap1.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap1.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap1.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap1.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap1.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
Map<Long, InFlightState> expectedOffsetStateMap2 = new HashMap<>();
expectedOffsetStateMap2.put(20L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(21L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(22L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(23L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap2.put(24L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
Map<Long, InFlightState> expectedOffsetStateMap3 = new HashMap<>();
expectedOffsetStateMap3.put(35L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap3.put(36L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap3.put(37L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap3.put(38L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap3.put(39L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
return sharePartition.cachedState().get(5L).offsetState().equals(expectedOffsetStateMap1) &&
sharePartition.cachedState().get(20L).offsetState().equals(expectedOffsetStateMap2) &&
sharePartition.cachedState().get(25L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(30L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(35L).offsetState().equals(expectedOffsetStateMap3);
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L, 7L, 8L, 9L), 20L, List.of(20L, 21L, 22L, 23L, 24L), 25L, List.of(), 30L, List.of(), 35L, List.of(35L, 36L, 37L, 38L, 39L))));
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(15L).batchState());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToStartOfBatch() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
// LSO is at 10.
sharePartition.updateCacheAndOffsets(10);
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(10, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.cachedState().get(5L).batchMemberId().equals(EMPTY_MEMBER_ID) &&
sharePartition.cachedState().get(5L).batchState() == RecordState.ARCHIVED &&
sharePartition.cachedState().get(10L).batchMemberId().equals(EMPTY_MEMBER_ID) &&
sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(), 10L, List.of())));
// All records after startOffset are in non-Terminal states. Thus, deliveryCompleteCount is not changed.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToMiddleOfBatch() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
// LSO is at 11.
sharePartition.updateCacheAndOffsets(11);
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(11, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> {
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
return sharePartition.cachedState().get(10L).offsetState().equals(expectedOffsetStateMap) &&
sharePartition.cachedState().get(5L).batchMemberId().equals(EMPTY_MEMBER_ID) &&
sharePartition.cachedState().get(5L).batchState() == RecordState.ARCHIVED;
},
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(), 10L, List.of(10L, 11L, 12L, 13L, 14L))));
// All records after startOffset are in non-Terminal states. Thus, deliveryCompleteCount is not changed.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testScheduleAcquisitionLockTimeoutValueFromGroupConfig() {
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
GroupConfig groupConfig = Mockito.mock(GroupConfig.class);
int expectedDurationMs = 500;
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig));
Mockito.when(groupConfig.shareRecordLockDurationMs()).thenReturn(expectedDurationMs);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withGroupConfigManager(groupConfigManager).build();
AcquisitionLockTimerTask timerTask = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L);
Mockito.verify(groupConfigManager, Mockito.times(2)).groupConfig(GROUP_ID);
Mockito.verify(groupConfig).shareRecordLockDurationMs();
assertEquals(expectedDurationMs, timerTask.delayMs);
}
@Test
public void testScheduleAcquisitionLockTimeoutValueUpdatesSuccessfully() {
GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class);
GroupConfig groupConfig = Mockito.mock(GroupConfig.class);
int expectedDurationMs1 = 500;
int expectedDurationMs2 = 1000;
Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig));
// First invocation of shareRecordLockDurationMs() returns 500, and the second invocation returns 1000
Mockito.when(groupConfig.shareRecordLockDurationMs())
.thenReturn(expectedDurationMs1)
.thenReturn(expectedDurationMs2);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withGroupConfigManager(groupConfigManager).build();
AcquisitionLockTimerTask timerTask1 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L);
Mockito.verify(groupConfigManager, Mockito.times(2)).groupConfig(GROUP_ID);
Mockito.verify(groupConfig).shareRecordLockDurationMs();
assertEquals(expectedDurationMs1, timerTask1.delayMs);
AcquisitionLockTimerTask timerTask2 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L);
Mockito.verify(groupConfigManager, Mockito.times(4)).groupConfig(GROUP_ID);
Mockito.verify(groupConfig, Mockito.times(2)).shareRecordLockDurationMs();
assertEquals(expectedDurationMs2, timerTask2.delayMs);
}
@Test
public void testAcknowledgeBatchAndOffsetPostLsoMovement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
// LSO is at 12.
sharePartition.updateCacheAndOffsets(12);
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(12, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Check cached state map.
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
// Acknowledge with RELEASE action.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(2, 6, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
// No record is moved to Terminal state, thus deliveryCompleteCount is not changed.
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(12, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertNotNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask());
// Check cached offset state map.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcknowledgeBatchPostLsoMovement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(20, 5), 5);
// LSO is at 14.
sharePartition.updateCacheAndOffsets(14);
assertEquals(25, sharePartition.nextFetchOffset());
assertEquals(14, sharePartition.startOffset());
assertEquals(24, sharePartition.endOffset());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acknowledge with ACCEPT action.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(2, 14, List.of(AcknowledgeType.ACCEPT.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
// Only record 14 is post startOffset and in a Terminal state. Thus, only that is considered for deliveryCompleteCount.
assertEquals(1, sharePartition.deliveryCompleteCount());
assertEquals(25, sharePartition.nextFetchOffset());
// For cached state corresponding to entry 2, the offset states will be ARCHIVED, ARCHIVED, ARCHIVED, ARCHIVED and ACKNOWLEDGED.
// Hence, it will get removed when calling maybeUpdateCachedStateAndOffsets() internally.
assertEquals(14, sharePartition.startOffset());
assertEquals(24, sharePartition.endOffset());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState());
// Check cached state offset map.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
}
@Test
public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledge() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
// LSO is at 7.
sharePartition.updateCacheAndOffsets(7);
assertEquals(7, sharePartition.nextFetchOffset());
assertEquals(7, sharePartition.startOffset());
assertEquals(7, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
// Check cached state map.
assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState());
assertNotNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.nextFetchOffset() == 7 && sharePartition.cachedState().isEmpty() &&
sharePartition.startOffset() == 7 && sharePartition.endOffset() == 7,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of()));
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(10, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acknowledge with RELEASE action. This contains a batch that doesn't exist at all.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(2, 14, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(10, sharePartition.startOffset());
assertEquals(14, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState());
assertNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledgeBatchLastOffsetAheadOfStartOffsetBatch() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(1, 2), 2);
// LSO is at 3.
sharePartition.updateCacheAndOffsets(3);
assertEquals(3, sharePartition.nextFetchOffset());
assertEquals(3, sharePartition.startOffset());
assertEquals(3, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Check cached state map.
assertEquals(MEMBER_ID, sharePartition.cachedState().get(1L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(1L).batchState());
assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.nextFetchOffset() == 3 && sharePartition.cachedState().isEmpty() &&
sharePartition.startOffset() == 3 && sharePartition.endOffset() == 3,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of()));
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(3, 2), 2);
fetchAcquiredRecords(sharePartition, memoryRecords(5, 3), 3);
assertEquals(8, sharePartition.nextFetchOffset());
assertEquals(3, sharePartition.startOffset());
assertEquals(7, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acknowledge with RELEASE action. This contains a batch that doesn't exist at all.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(1, 7, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(3, sharePartition.nextFetchOffset());
assertEquals(3, sharePartition.startOffset());
assertEquals(7, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(3L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(3L).batchState());
assertNull(sharePartition.cachedState().get(3L).batchAcquisitionLockTimeoutTask());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testWriteShareGroupStateWithNullResponse() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(null));
CompletableFuture<Void> result = sharePartition.writeShareGroupState(List.of());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
}
@Test
public void testWriteShareGroupStateWithNullTopicsData() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(null);
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, result);
}
@Test
public void testWriteShareGroupStateWithInvalidTopicsData() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
// TopicsData is empty.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of());
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> writeResult = sharePartition.writeShareGroupState(anyList());
assertTrue(writeResult.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, writeResult);
// TopicsData contains more results than expected.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of()),
new TopicData<>(Uuid.randomUuid(), List.of())));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
writeResult = sharePartition.writeShareGroupState(anyList());
assertTrue(writeResult.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, writeResult);
// TopicsData contains no partition data.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of())));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
writeResult = sharePartition.writeShareGroupState(anyList());
assertTrue(writeResult.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, writeResult);
// TopicsData contains wrong topicId.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(Uuid.randomUuid(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
writeResult = sharePartition.writeShareGroupState(anyList());
assertTrue(writeResult.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, writeResult);
// TopicsData contains more partition data than expected.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()),
PartitionFactory.newPartitionErrorData(1, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
writeResult = sharePartition.writeShareGroupState(anyList());
assertTrue(writeResult.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, writeResult);
// TopicsData contains wrong partition.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(1, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
writeResult = sharePartition.writeShareGroupState(anyList());
assertTrue(writeResult.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, writeResult);
}
@Test
public void testWriteShareGroupStateWithWriteException() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition1 = SharePartitionBuilder.builder().withPersister(persister).build();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(FutureUtils.failedFuture(new RuntimeException("Write exception")));
CompletableFuture<Void> writeResult = sharePartition1.writeShareGroupState(anyList());
assertTrue(writeResult.isCompletedExceptionally());
assertFutureThrows(IllegalStateException.class, writeResult);
persister = Mockito.mock(Persister.class);
// Throw exception for write state.
mockPersisterReadStateMethod(persister);
SharePartition sharePartition2 = SharePartitionBuilder.builder().withPersister(persister).build();
Mockito.when(persister.writeState(Mockito.any())).thenThrow(new RuntimeException("Write exception"));
assertThrows(RuntimeException.class, () -> sharePartition2.writeShareGroupState(anyList()));
}
@Test
public void testWriteShareGroupState() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> result = sharePartition.writeShareGroupState(anyList());
assertNull(result.join());
assertFalse(result.isCompletedExceptionally());
}
@Test
public void testWriteShareGroupStateFailure() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withState(SharePartitionState.ACTIVE)
.build();
// Mock Write state RPC to return error response, NOT_COORDINATOR.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NOT_COORDINATOR.code(), Errors.NOT_COORDINATOR.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(CoordinatorNotAvailableException.class, result);
// Mock Write state RPC to return error response, COORDINATOR_NOT_AVAILABLE.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.COORDINATOR_NOT_AVAILABLE.code(), Errors.COORDINATOR_NOT_AVAILABLE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(CoordinatorNotAvailableException.class, result);
// Mock Write state RPC to return error response, COORDINATOR_LOAD_IN_PROGRESS.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.COORDINATOR_LOAD_IN_PROGRESS.code(), Errors.COORDINATOR_LOAD_IN_PROGRESS.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(CoordinatorNotAvailableException.class, result);
// Mock Write state RPC to return error response, GROUP_ID_NOT_FOUND.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(GroupIdNotFoundException.class, result);
// Mock Write state RPC to return error response, UNKNOWN_TOPIC_OR_PARTITION.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(UnknownTopicOrPartitionException.class, result);
// Mock Write state RPC to return error response, FENCED_STATE_EPOCH.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.FENCED_STATE_EPOCH.code(), Errors.FENCED_STATE_EPOCH.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(NotLeaderOrFollowerException.class, result);
// Mock Write state RPC to return error response, FENCED_LEADER_EPOCH.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.FENCED_LEADER_EPOCH.code(), Errors.FENCED_LEADER_EPOCH.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(NotLeaderOrFollowerException.class, result);
// Mock Write state RPC to return error response, UNKNOWN_SERVER_ERROR.
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_SERVER_ERROR.code(), Errors.UNKNOWN_SERVER_ERROR.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
result = sharePartition.writeShareGroupState(anyList());
assertTrue(result.isCompletedExceptionally());
assertFutureThrows(UnknownServerException.class, result);
}
@Test
public void testWriteShareGroupStateWithNoOpStatePersister() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
List<PersisterStateBatch> stateBatches = List.of(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3));
CompletableFuture<Void> result = sharePartition.writeShareGroupState(stateBatches);
assertNull(result.join());
assertFalse(result.isCompletedExceptionally());
}
@Test
public void testMaybeUpdateCachedStateWhenAcknowledgeTypeAccept() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(250), 250);
assertFalse(sharePartition.canAcquireRecords());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 249, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(250, sharePartition.nextFetchOffset());
// The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED.
assertEquals(250, sharePartition.startOffset());
assertEquals(250, sharePartition.endOffset());
assertTrue(sharePartition.canAcquireRecords());
// The records have been accepted, thus they are removed from the cached state.
assertEquals(0, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeUpdateCachedStateWhenAcknowledgeTypeReject() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(250), 250);
assertFalse(sharePartition.canAcquireRecords());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 249, List.of((AcknowledgeType.REJECT.id)))));
assertEquals(250, sharePartition.nextFetchOffset());
// The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED.
assertEquals(250, sharePartition.startOffset());
assertEquals(250, sharePartition.endOffset());
assertTrue(sharePartition.canAcquireRecords());
// The records have been rejected, thus they are removed from the cached state.
assertEquals(0, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeUpdateCachedStateWhenAcknowledgeTypeRelease() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(250), 250);
assertFalse(sharePartition.canAcquireRecords());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 249, List.of(AcknowledgeType.RELEASE.id))));
// The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED.
assertEquals(0, sharePartition.startOffset());
assertEquals(249, sharePartition.endOffset());
assertTrue(sharePartition.canAcquireRecords());
// The records have been released, thus they are not removed from the cached state.
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForBatchSubset() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxInflightRecords(20)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(15), 15);
assertTrue(sharePartition.canAcquireRecords());
fetchAcquiredRecords(sharePartition, memoryRecords(15, 15), 15);
assertFalse(sharePartition.canAcquireRecords());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 12, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(12L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(13L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertTrue(sharePartition.canAcquireRecords());
assertEquals(13, sharePartition.startOffset());
assertEquals(29, sharePartition.endOffset());
assertEquals(30, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForEntireBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxInflightRecords(20)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(15), 15);
assertTrue(sharePartition.canAcquireRecords());
fetchAcquiredRecords(sharePartition, memoryRecords(15, 15), 15);
assertFalse(sharePartition.canAcquireRecords());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 14, List.of(AcknowledgeType.REJECT.id))));
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount());
assertTrue(sharePartition.canAcquireRecords());
assertEquals(15, sharePartition.startOffset());
assertEquals(29, sharePartition.endOffset());
assertEquals(30, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeUpdateCachedStateWhenAcknowledgementsInBetween() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxInflightRecords(20)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(15), 15);
assertTrue(sharePartition.canAcquireRecords());
fetchAcquiredRecords(sharePartition, memoryRecords(15, 15), 15);
assertFalse(sharePartition.canAcquireRecords());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.REJECT.id))));
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(9L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(0L).offsetState().get(10L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount());
assertFalse(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(29, sharePartition.endOffset());
assertEquals(30, sharePartition.nextFetchOffset());
// Records 10 -> 14 are in ARCHIVED state, and so deliveryCompleteCount is 5.
assertEquals(5, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeUpdateCachedStateWhenAllRecordsInCachedStateAreAcknowledged() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxInflightRecords(20)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(15), 15);
assertTrue(sharePartition.canAcquireRecords());
fetchAcquiredRecords(sharePartition, memoryRecords(15, 15), 15);
assertFalse(sharePartition.canAcquireRecords());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 29, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(sharePartition.canAcquireRecords());
assertEquals(30, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(30, sharePartition.nextFetchOffset());
// Cache state is empty.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxInflightRecords(100)
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(20), 20);
assertTrue(sharePartition.canAcquireRecords());
fetchAcquiredRecords(sharePartition, memoryRecords(20, 20), 20);
assertTrue(sharePartition.canAcquireRecords());
fetchAcquiredRecords(sharePartition, memoryRecords(40, 20), 20);
assertTrue(sharePartition.canAcquireRecords());
// First Acknowledgement for the first batch of records 0-19.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 19, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(sharePartition.canAcquireRecords());
assertEquals(20, sharePartition.startOffset());
assertEquals(59, sharePartition.endOffset());
assertEquals(60, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(60, 20), 20);
assertTrue(sharePartition.canAcquireRecords());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(20, 49, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(40L).offsetState().get(49L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(40L).offsetState().get(50L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(60L).batchState());
assertTrue(sharePartition.canAcquireRecords());
assertEquals(50, sharePartition.startOffset());
assertEquals(79, sharePartition.endOffset());
assertEquals(80, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(80, 100), 100);
assertFalse(sharePartition.canAcquireRecords());
// Final Acknowledgement, all records are acknowledged here.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(50, 179, List.of(AcknowledgeType.REJECT.id))));
assertEquals(0, sharePartition.cachedState().size());
assertTrue(sharePartition.canAcquireRecords());
assertEquals(180, sharePartition.startOffset());
assertEquals(180, sharePartition.endOffset());
assertEquals(180, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
fetchAcquiredRecords(sharePartition, memoryRecords(180, 20), 20);
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(180L).batchState());
assertTrue(sharePartition.canAcquireRecords());
assertEquals(180, sharePartition.startOffset());
assertEquals(199, sharePartition.endOffset());
assertEquals(200, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testMaybeUpdateCachedStateGapAfterLastOffsetAcknowledged() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 1),
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21 to 30
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
assertEquals(10, sharePartition.deliveryCompleteCount());
// Acquiring the first AVAILABLE batch from 11 to 20
fetchAcquiredRecords(sharePartition, memoryRecords(11, 10), 10);
assertTrue(sharePartition.canAcquireRecords());
// Sending acknowledgement for the first batch from 11 to 20
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(11, 20, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(sharePartition.canAcquireRecords());
// After the acknowledgement is done successfully, maybeUpdateCachedStateAndOffsets method is invoked to see
// if the start offset can be moved ahead. The last offset acknowledged is 20. But instead of moving start
// offset to the next batch in the cached state (31 to 40), it is moved to the next offset of the last
// acknowledged offset (21). This is because there is an acquirable gap in the cached state from 21 to 30.
assertEquals(21, sharePartition.startOffset());
assertEquals(40, sharePartition.endOffset());
assertEquals(21, sharePartition.nextFetchOffset());
assertEquals(10, sharePartition.deliveryCompleteCount());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
public void testCanAcquireRecordsReturnsTrue() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
assertEquals(0, sharePartition.startOffset());
assertEquals(0, sharePartition.endOffset());
fetchAcquiredRecords(sharePartition, memoryRecords(150), 150);
assertTrue(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(149, sharePartition.endOffset());
}
@Test
public void testCanAcquireRecordsChangeResponsePostAcknowledgement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
assertEquals(0, sharePartition.startOffset());
assertEquals(0, sharePartition.endOffset());
fetchAcquiredRecords(sharePartition, memoryRecords(150), 150);
assertTrue(sharePartition.canAcquireRecords());
fetchAcquiredRecords(sharePartition, memoryRecords(150, 100), 100);
assertFalse(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(249, sharePartition.endOffset());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 249, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(sharePartition.canAcquireRecords());
assertEquals(250, sharePartition.startOffset());
assertEquals(250, sharePartition.endOffset());
}
@Test
public void testCanAcquireRecordsAfterReleaseAcknowledgement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(150), 150);
assertTrue(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(149, sharePartition.endOffset());
fetchAcquiredRecords(sharePartition, memoryRecords(150, 100), 100);
assertFalse(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(249, sharePartition.endOffset());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 89, List.of(AcknowledgeType.RELEASE.id))));
// The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED.
assertEquals(0, sharePartition.startOffset());
assertEquals(249, sharePartition.endOffset());
// The records have been released, thus they are still available for being acquired.
assertTrue(sharePartition.canAcquireRecords());
}
@Test
public void testCanAcquireRecordsAfterArchiveAcknowledgement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(150), 150);
assertTrue(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(149, sharePartition.endOffset());
fetchAcquiredRecords(sharePartition, memoryRecords(150, 100), 100);
assertFalse(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(249, sharePartition.endOffset());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 89, List.of(AcknowledgeType.REJECT.id))));
// The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED.
assertEquals(90, sharePartition.startOffset());
assertEquals(249, sharePartition.endOffset());
assertTrue(sharePartition.canAcquireRecords());
}
@Test
public void testCanAcquireRecordsAfterAcceptAcknowledgement() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(150), 150);
assertTrue(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(149, sharePartition.endOffset());
fetchAcquiredRecords(sharePartition, memoryRecords(150, 100), 100);
assertFalse(sharePartition.canAcquireRecords());
assertEquals(0, sharePartition.startOffset());
assertEquals(249, sharePartition.endOffset());
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 89, List.of(AcknowledgeType.ACCEPT.id))));
// The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED.
assertEquals(90, sharePartition.startOffset());
assertEquals(249, sharePartition.endOffset());
assertTrue(sharePartition.canAcquireRecords());
}
@Test
public void testAcknowledgeBatchWithWriteShareGroupStateFailure() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withState(SharePartitionState.ACTIVE)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 10);
CompletableFuture<Void> ackResult = sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 14, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(UnknownTopicOrPartitionException.class, ackResult);
// Due to failure in writeShareGroupState, the cached state should not be updated.
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
}
@Test
public void testAcknowledgeOffsetWithWriteShareGroupStateFailure() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withState(SharePartitionState.ACTIVE)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
fetchAcquiredRecords(sharePartition, memoryRecords(5, 6), 6);
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(8, 10, List.of(AcknowledgeType.REJECT.id))));
assertTrue(ackResult.isCompletedExceptionally());
// Due to failure in writeShareGroupState, the cached state should not be updated.
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(5L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(6L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(7L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(8L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(9L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(10L).state());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(5L).memberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(6L).memberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(7L).memberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(8L).memberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(9L).memberId());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(10L).memberId());
}
@Test
public void testAcknowledgeSubsetWithAnotherMember() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 7);
sharePartition.acknowledge(MEMBER_ID,
List.of(new ShareAcknowledgementBatch(5, 7, List.of(AcknowledgeType.ACCEPT.id))));
// Acknowledge subset with another member.
CompletableFuture<Void> ackResult = sharePartition.acknowledge("member-2",
List.of(new ShareAcknowledgementBatch(9, 11, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
}
@Test
public void testAcknowledgeWithAnotherMemberRollbackBatchError() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(memoryRecords(10, 5)), FETCH_ISOLATION_HWM);
fetchAcquiredRecords(sharePartition, memoryRecords(15, 5), 5);
CompletableFuture<Void> ackResult = sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id)),
// Acknowledging batch with another member will cause failure and rollback.
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(15, 19, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
// State should be rolled back to the previous state for any changes.
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals("member-2", sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount());
}
@Test
public void testAcknowledgeWithAnotherMemberRollbackSubsetError() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 15, fetchPartitionData(memoryRecords(15, 5)), FETCH_ISOLATION_HWM);
CompletableFuture<Void> ackResult = sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.ACCEPT.id)),
// Acknowledging subset with another member will cause failure and rollback.
new ShareAcknowledgementBatch(16, 18, List.of(AcknowledgeType.ACCEPT.id))));
assertTrue(ackResult.isCompletedExceptionally());
assertFutureThrows(InvalidRecordStateException.class, ackResult);
assertEquals(3, sharePartition.cachedState().size());
// Check the state of the cache. State should be rolled back to the previous state for any changes.
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals("member-2", sharePartition.cachedState().get(15L).batchMemberId());
assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount());
}
@Test
public void testMaxDeliveryCountLimitExceededForRecordBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxDeliveryCount(2)
.withState(SharePartitionState.ACTIVE)
.build();
MemoryRecords records = memoryRecords(5, 10);
fetchAcquiredRecords(sharePartition, records, 10);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 14, List.of(AcknowledgeType.RELEASE.id))));
fetchAcquiredRecords(sharePartition, records, 10);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 14, List.of(AcknowledgeType.RELEASE.id))));
// All the records in the batch reached the max delivery count, hence they got archived and the cached state cleared.
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(15, sharePartition.startOffset());
assertEquals(15, sharePartition.endOffset());
assertEquals(0, sharePartition.cachedState().size());
}
@Test
public void testMaxDeliveryCountLimitExceededForRecordsSubset() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxDeliveryCount(2)
.withState(SharePartitionState.ACTIVE)
.build();
// First fetch request with 5 records starting from offset 10.
MemoryRecords records1 = memoryRecords(10, 5);
// Second fetch request with 5 records starting from offset 15.
MemoryRecords records2 = memoryRecords(15, 5);
fetchAcquiredRecords(sharePartition, records1, 5);
fetchAcquiredRecords(sharePartition, records2, 5);
sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of(
new ShareAcknowledgementBatch(10, 12, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(13, 16, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(17, 19, List.of(AcknowledgeType.ACCEPT.id)))));
// Send next batch from offset 13, only 2 records should be acquired.
fetchAcquiredRecords(sharePartition, records1, 2);
// Send next batch from offset 15, only 2 records should be acquired.
fetchAcquiredRecords(sharePartition, records2, 2);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(13, 16, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(20, sharePartition.nextFetchOffset());
// Cached state will be empty because after the second release, the acquired records will now have moved to
// ARCHIVE state, since their max delivery count exceeded. Also, now since all the records are either in ACKNOWLEDGED or ARCHIVED
// state, cached state should be empty.
assertEquals(0, sharePartition.cachedState().size());
}
@Test
public void testMaxDeliveryCountLimitExceededForRecordsSubsetAndCachedStateNotCleared() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxDeliveryCount(2)
.withState(SharePartitionState.ACTIVE)
.build();
// First fetch request with 5 records starting from offset 0.
MemoryRecords records1 = memoryRecords(5);
fetchAcquiredRecords(sharePartition, records1, 5);
sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of(
new ShareAcknowledgementBatch(0, 1, List.of(AcknowledgeType.RELEASE.id)))));
// Send next batch from offset 0, only 2 records should be acquired.
fetchAcquiredRecords(sharePartition, memoryRecords(2), 2);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 4, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(2, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(0L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(1L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(2L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(3L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(4L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(0L).offsetState());
}
@Test
public void testNextFetchOffsetPostAcquireAndAcknowledgeFunctionality() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(10);
String memberId1 = "memberId-1";
String memberId2 = "memberId-2";
sharePartition.acquire(memberId1, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, DEFAULT_FETCH_OFFSET, fetchPartitionData(records1), FETCH_ISOLATION_HWM);
assertFalse(sharePartition.findNextFetchOffset());
assertEquals(10, sharePartition.nextFetchOffset());
sharePartition.acquire(memberId2, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(memoryRecords(10, 10)), FETCH_ISOLATION_HWM);
assertFalse(sharePartition.findNextFetchOffset());
assertEquals(20, sharePartition.nextFetchOffset());
sharePartition.acknowledge(memberId1, List.of(
new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id))));
assertTrue(sharePartition.findNextFetchOffset());
assertEquals(5, sharePartition.nextFetchOffset());
sharePartition.acquire(memberId1, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, DEFAULT_FETCH_OFFSET, fetchPartitionData(records1), FETCH_ISOLATION_HWM);
assertTrue(sharePartition.findNextFetchOffset());
assertEquals(20, sharePartition.nextFetchOffset());
assertFalse(sharePartition.findNextFetchOffset());
}
@Test
public void testNextFetchOffsetWithMultipleConsumers() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withMaxInflightRecords(100)
.withState(SharePartitionState.ACTIVE)
.build();
MemoryRecords records1 = memoryRecords(3);
String memberId1 = MEMBER_ID;
String memberId2 = "member-2";
sharePartition.acquire(memberId1, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, DEFAULT_FETCH_OFFSET, fetchPartitionData(records1), FETCH_ISOLATION_HWM);
assertEquals(3, sharePartition.nextFetchOffset());
sharePartition.acknowledge(memberId1, List.of(
new ShareAcknowledgementBatch(0, 2, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
sharePartition.acquire(memberId2, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 3, fetchPartitionData(memoryRecords(3, 2)), FETCH_ISOLATION_HWM);
assertEquals(0, sharePartition.nextFetchOffset());
sharePartition.acquire(memberId1, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, DEFAULT_FETCH_OFFSET, fetchPartitionData(records1), FETCH_ISOLATION_HWM);
assertEquals(5, sharePartition.nextFetchOffset());
sharePartition.acknowledge(memberId2, List.of(
new ShareAcknowledgementBatch(3, 4, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(3, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testNumberOfWriteCallsOnUpdates() {
SharePartition sharePartition = Mockito.spy(SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build());
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(2, 6, List.of(AcknowledgeType.ACCEPT.id))));
// Acknowledge records will induce 1 write state RPC call via function isWriteShareGroupStateSuccessful.
Mockito.verify(sharePartition, Mockito.times(1)).writeShareGroupState(anyList());
sharePartition.releaseAcquiredRecords(MEMBER_ID);
// Release acquired records will induce 0 write state RPC call via function isWriteShareGroupStateSuccessful
// because the in-flight batch has been acknowledged. Hence, the total calls remain 1.
Mockito.verify(sharePartition, Mockito.times(1)).writeShareGroupState(anyList());
}
@Test
public void testReacquireSubsetWithAnotherMember() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 5);
fetchAcquiredRecords(sharePartition, records1, 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 12), 12);
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 11, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(12, 13, List.of(ACKNOWLEDGE_TYPE_GAP_ID)),
new ShareAcknowledgementBatch(14, 15, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(17, 20, List.of(AcknowledgeType.RELEASE.id))));
// Records 12-13 have been identified as gaps, hence they are kept in the cache as ARCHIVED state.
assertEquals(2, sharePartition.deliveryCompleteCount());
// Reacquire with another member.
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 5, fetchPartitionData(records1), FETCH_ISOLATION_HWM);
assertEquals(10, sharePartition.nextFetchOffset());
// Reacquire with another member.
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(memoryRecords(10, 7)), FETCH_ISOLATION_HWM);
assertEquals(17, sharePartition.nextFetchOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals("member-2", sharePartition.cachedState().get(5L).batchMemberId());
assertEquals(2, sharePartition.cachedState().get(5L).batchDeliveryCount());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
// Records 10-11, 14-15 were reacquired by member-2.
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2"));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2"));
// Records 12-13 were kept as gapOffsets, hence they are not reacquired and are kept in ARCHIVED state.
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2"));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2"));
// Record 16 was not released in the acknowledgements. It was included in the reacquire by member-2,
// still its ownership is with member-1 and delivery count is 1.
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(21L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
}
@Test
public void testMaybeInitializeWhenReadStateRpcReturnsZeroAvailableRecords() {
List<PersisterStateBatch> stateBatches = new ArrayList<>();
stateBatches.add(new PersisterStateBatch(233L, 233L, RecordState.ARCHIVED.id, (short) 1));
for (int i = 0; i < 500; i++) {
stateBatches.add(new PersisterStateBatch(234L + i, 234L + i, RecordState.ACKNOWLEDGED.id, (short) 1));
}
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 233L, Errors.NONE.code(), Errors.NONE.message(),
stateBatches)))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertTrue(sharePartition.cachedState().isEmpty());
assertEquals(734, sharePartition.nextFetchOffset());
assertEquals(734, sharePartition.startOffset());
assertEquals(734, sharePartition.endOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquireWithWriteShareGroupStateDelay() {
Persister persister = Mockito.mock(Persister.class);
mockPersisterReadStateMethod(persister);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withState(SharePartitionState.ACTIVE)
.build();
// Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true with a delay of 5 sec.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
CompletableFuture<WriteShareGroupStateResult> future = new CompletableFuture<>();
// persister.writeState RPC will not complete instantaneously due to which commit won't happen for acknowledged offsets.
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future);
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
List<ShareAcknowledgementBatch> acknowledgementBatches = new ArrayList<>();
acknowledgementBatches.add(new ShareAcknowledgementBatch(2, 3, List.of(AcknowledgeType.RELEASE.id)));
acknowledgementBatches.add(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id)));
// Acknowledge 2-3, 5-9 offsets with RELEASE acknowledge type.
sharePartition.acknowledge(MEMBER_ID, acknowledgementBatches);
assertEquals(2, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
// Even though offsets 2-3, 5-9 are in available state, but they won't be acquired since they are still in transition from ACQUIRED
// to AVAILABLE state as the write state RPC has not completed yet, so the commit hasn't happened yet.
fetchAcquiredRecords(sharePartition, memoryRecords(15), 5);
assertEquals(3, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
// persister.writeState RPC will complete now. This is going to commit all the acknowledged batches. Hence, their
// rollBack state will become null and they will be available for acquire again.
future.complete(writeShareGroupStateResult);
fetchAcquiredRecords(sharePartition, memoryRecords(15), 7);
assertEquals(3, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(3L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
}
@Test
public void testComputeStartOffsetAdvanceResultWhenGapAtBeginning() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1)
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
sharePartition.maybeInitialize();
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
// Since there is a gap in the beginning, the persisterReadResultGapWindow window is same as the cachedState
assertEquals(11, persisterReadResultGapWindow.gapStartOffset());
assertEquals(40, persisterReadResultGapWindow.endOffset());
SharePartition.OffsetAndMetadata result = sharePartition.findLastOffsetAcknowledgedAndMetadata();
// Since the persisterReadResultGapWindow window begins at startOffset, we cannot count any of the offsets as acknowledged.
// Thus, lastAckedOffset should be -1 and numTerminalRecords should be 0.
assertEquals(-1, result.lastAcknowledgedOffset());
assertEquals(0, result.numTerminalRecords());
}
@Test
public void testCacheUpdateWhenBatchHasOngoingTransition() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withPersister(persister)
.build();
// Acquire a single batch.
fetchAcquiredRecords(
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 21,
fetchPartitionData(memoryRecords(21, 10)), FETCH_ISOLATION_HWM
), 10
);
// Validate that there is no ongoing transition.
assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition());
// Return a future which will be completed later, so the batch state has ongoing transition.
CompletableFuture<WriteShareGroupStateResult> future = new CompletableFuture<>();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future);
// Acknowledge batch to create ongoing transition.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 30, List.of(AcknowledgeType.ACCEPT.id))));
// Assert the start offset has not moved and batch has ongoing transition.
assertEquals(21L, sharePartition.startOffset());
assertEquals(1, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition());
// Validate that offset can't be moved because batch has ongoing transition.
assertFalse(sharePartition.canMoveStartOffset());
SharePartition.OffsetAndMetadata result = sharePartition.findLastOffsetAcknowledgedAndMetadata();
assertEquals(-1, result.lastAcknowledgedOffset());
assertEquals(0, result.numTerminalRecords());
// Complete the future so acknowledge API can be completed, which updates the cache.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
future.complete(writeShareGroupStateResult);
// Validate the cache has been updated.
assertEquals(31L, sharePartition.startOffset());
assertTrue(sharePartition.cachedState().isEmpty());
}
@Test
public void testCacheUpdateWhenOffsetStateHasOngoingTransition() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withPersister(persister)
.build();
// Acquire a single batch.
fetchAcquiredRecords(
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 21,
fetchPartitionData(memoryRecords(21, 10)), FETCH_ISOLATION_HWM
), 10
);
// Validate that there is no ongoing transition.
assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition());
assertNull(sharePartition.cachedState().get(21L).offsetState());
// Return a future which will be completed later, so the batch state has ongoing transition.
CompletableFuture<WriteShareGroupStateResult> future = new CompletableFuture<>();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future);
// Acknowledge offsets to create ongoing transition.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 23, List.of(AcknowledgeType.ACCEPT.id))));
// Assert the start offset has not moved and offset state is now maintained. Offset state should
// have ongoing transition.
assertEquals(21L, sharePartition.startOffset());
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(21L).offsetState());
assertTrue(sharePartition.cachedState().get(21L).offsetState().get(21L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(21L).offsetState().get(22L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(21L).offsetState().get(23L).hasOngoingStateTransition());
// Only 21, 22 and 23 offsets should have ongoing state transition as the acknowledge request
// contains 21-23 offsets.
assertFalse(sharePartition.cachedState().get(21L).offsetState().get(24L).hasOngoingStateTransition());
// Validate that offset can't be moved because batch has ongoing transition.
assertFalse(sharePartition.canMoveStartOffset());
SharePartition.OffsetAndMetadata result = sharePartition.findLastOffsetAcknowledgedAndMetadata();
assertEquals(-1, result.lastAcknowledgedOffset());
assertEquals(0, result.numTerminalRecords());
// Complete the future so acknowledge API can be completed, which updates the cache.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
future.complete(writeShareGroupStateResult);
// Validate the cache has been updated.
assertEquals(24L, sharePartition.startOffset());
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(21L));
}
/**
* Test the case where the fetch batch has first record offset greater than the record batch start offset.
* Such batches can exist for compacted topics.
*/
@Test
public void testAcquireAndAcknowledgeWithRecordsAheadOfRecordBatchStartOffset() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
ByteBuffer buffer = ByteBuffer.allocate(4096);
// Set the base offset at 5.
try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE,
TimestampType.CREATE_TIME, 5, 2)) {
// Append records from offset 10.
memoryRecords(10, 2).records().forEach(builder::append);
// Append records from offset 15.
memoryRecords(15, 2).records().forEach(builder::append);
}
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Complete batch from 5-16 will be acquired, hence 12 records.
fetchAcquiredRecords(sharePartition, records, 12);
// Partially acknowledge the batch from 5-16.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 9, List.of(ACKNOWLEDGE_TYPE_GAP_ID)),
new ShareAcknowledgementBatch(10, 11, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(12, 14, List.of(AcknowledgeType.REJECT.id)),
new ShareAcknowledgementBatch(15, 16, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(5L));
assertNotNull(sharePartition.cachedState().get(5L).offsetState());
// after acknowledgements, the start offset moves to 15, and thus there are no Terminal records post that.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
}
/**
* Test the case where the available cached batches never appear again in fetch response within the
* previous fetch offset range. Also remove records from the previous fetch batches.
* <p>
* Such case can arise with compacted topics where complete batches are removed or records within
* batches are removed.
*/
@Test
public void testAcquireWhenBatchesAreRemovedFromBetweenInSubsequentFetchData() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
// Create 3 batches of records for a single acquire.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 0, 5).close();
memoryRecordsBuilder(buffer, 5, 15).close();
memoryRecordsBuilder(buffer, 20, 15).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire batch (0-34) which shall create single cache entry.
fetchAcquiredRecords(sharePartition, records, 35);
// Acquire another 3 individual batches of records.
fetchAcquiredRecords(sharePartition, memoryRecords(40, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(45, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(50, 15), 15);
// Release all batches in the cache.
sharePartition.releaseAcquiredRecords(MEMBER_ID);
// Validate cache has 4 entries.
assertEquals(4, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Compact all batches and remove some of the batches from the fetch response.
buffer = ByteBuffer.allocate(4096);
try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE,
TimestampType.CREATE_TIME, 0, 2)) {
// Append only 2 records for 0 offset batch starting from offset 1.
memoryRecords(1, 2).records().forEach(builder::append);
}
// Do not include batch from offset 5. And compact batch starting at offset 20.
try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE,
TimestampType.CREATE_TIME, 20, 2)) {
// Append 2 records for 20 offset batch starting from offset 20.
memoryRecords(20, 2).records().forEach(builder::append);
// And append 2 records matching the end offset of the batch.
memoryRecords(33, 2).records().forEach(builder::append);
}
// Send the full batch at offset 40.
memoryRecordsBuilder(buffer, 40, 5).close();
// Do not include batch from offset 45. And compact the batch at offset 50.
try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE,
TimestampType.CREATE_TIME, 50, 2)) {
// Append 5 records for 50 offset batch starting from offset 51.
memoryRecords(51, 5).records().forEach(builder::append);
// Append 2 records for in middle of the batch.
memoryRecords(58, 2).records().forEach(builder::append);
// And append 1 record prior to the end offset.
memoryRecords(63, 1).records().forEach(builder::append);
}
buffer.flip();
records = MemoryRecords.readableRecords(buffer);
// Acquire the new compacted batches. The acquire method determines the acquisition range using
// the first and last offsets of the fetched batches and acquires all available cached batches
// within that range. That means the batch from offset 45-49 which is not included in the
// fetch response will also be acquired. Similarly, for the batch from offset 5-19 which is
// anyway in the bigger cached batch of 0-34, will also be acquired. This avoids iterating
// through individual fetched batch boundaries; the client is responsible for reporting any
// data gaps via acknowledgements. This test also covers the edge case where the last fetched
// batch is compacted, and its last offset is before the previously cached version's last offset.
// In this situation, the last batch's offset state tracking is initialized. This is handled
// correctly because the client will send individual offset acknowledgements, which require offset
// state tracking anyway. While this last scenario is unlikely in practice (as a batch's reported
// last offset should remain correct even after compaction), the test verifies its proper handling.
fetchAcquiredRecords(sharePartition, records, 59);
assertEquals(64, sharePartition.nextFetchOffset());
assertEquals(4, sharePartition.cachedState().size());
sharePartition.cachedState().forEach((offset, inFlightState) -> {
// All batches other than the last batch should have batch state maintained.
if (offset < 50) {
assertNotNull(inFlightState.batchState());
assertEquals(RecordState.ACQUIRED, inFlightState.batchState());
} else {
assertNotNull(inFlightState.offsetState());
inFlightState.offsetState().forEach((recordOffset, offsetState) -> {
// All offsets other than the last offset should be acquired.
RecordState recordState = recordOffset < 64 ? RecordState.ACQUIRED : RecordState.AVAILABLE;
assertEquals(recordState, offsetState.state(), "Incorrect state for offset: " + recordOffset);
});
}
});
// All in flight records are in a non-Terminal state.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
/**
* This test verifies that cached batches which are no longer returned in fetch responses (starting
* from the fetchOffset) are correctly archived. Archiving these batches is crucial for the SPSO
* and the next fetch offset to advance. Without archiving, these offsets would be stuck, as the
* cached batches would remain available.
* <p>
* This scenario can occur with compacted topics when entire batches, previously held in the cache,
* are removed from the log at the offset where reading occurs.
*/
@Test
public void testAcquireWhenBatchesRemovedForFetchOffset() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 15), 15);
// Release the batches in the cache.
sharePartition.releaseAcquiredRecords(MEMBER_ID);
// Validate cache has 3 entries.
assertEquals(3, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Compact second batch and remove first batch from the fetch response.
ByteBuffer buffer = ByteBuffer.allocate(4096);
try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE,
TimestampType.CREATE_TIME, 5, 2)) {
// Append only 4 records for 5th offset batch starting from offset 6.
memoryRecords(6, 4).records().forEach(builder::append);
}
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Only second batch should be acquired and first batch offsets should be archived. Send
// fetchOffset as 0.
fetchAcquiredRecords(sharePartition, records, 0, 0, 5);
assertEquals(10, sharePartition.nextFetchOffset());
// The next fetch offset has been updated, but the start offset should remain unchanged since
// the acquire operation only marks offsets as archived. The start offset will be correctly
// updated once any records are acknowledged.
assertEquals(0, sharePartition.startOffset());
// Releasing acquired records updates the cache and moves the start offset.
sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertEquals(5, sharePartition.startOffset());
assertEquals(5, sharePartition.nextFetchOffset());
// Validate first batch has been removed from the cache.
assertEquals(2, sharePartition.cachedState().size());
sharePartition.cachedState().forEach((offset, inFlightState) -> {
assertNotNull(inFlightState.batchState());
assertEquals(RecordState.AVAILABLE, inFlightState.batchState());
});
assertEquals(0, sharePartition.deliveryCompleteCount());
}
/**
* This test verifies that cached batches which are no longer returned in fetch responses are
* correctly archived, when fetchOffset is within an already cached batch. Archiving these batches/offsets
* is crucial for the SPSO and the next fetch offset to advance.
* <p>
* This scenario can occur with compacted topics when fetch triggers from an offset which is within
* a cached batch, and respective batch is removed from the log.
*/
@Test
public void testAcquireWhenBatchesRemovedForFetchOffsetWithinBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 15), 15);
// Acknowledge subset of the first batch offsets.
sharePartition.acknowledge(MEMBER_ID, List.of(
// Accept the 3 offsets of first batch.
new ShareAcknowledgementBatch(5, 7, List.of(AcknowledgeType.ACCEPT.id)))).join();
// After acknowledgements, the start offset moves past Terminal records, hence deliveryCompleteCount is 0.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Release the remaining batches/offsets in the cache.
sharePartition.releaseAcquiredRecords(MEMBER_ID).join();
// Validate cache has 2 entries.
assertEquals(2, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Mark fetch offset within the first batch to 8, first available offset.
fetchAcquiredRecords(sharePartition, memoryRecords(10, 15), 8, 0, 15);
assertEquals(25, sharePartition.nextFetchOffset());
// The next fetch offset has been updated, but the start offset should remain unchanged since
// the acquire operation only marks offsets as archived. The start offset will be correctly
// updated once any records are acknowledged.
assertEquals(8, sharePartition.startOffset());
// Since the fetchOffset in the acquire request was prior to the actual records fetched, the records 8 and 9 are marked
// as ARCHIVED. Thus, there are 2 Terminal records in the cache.
assertEquals(2, sharePartition.deliveryCompleteCount());
// Releasing acquired records updates the cache and moves the start offset.
sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertEquals(10, sharePartition.startOffset());
assertEquals(10, sharePartition.nextFetchOffset());
// Validate first batch has been removed from the cache.
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState());
// Since the start offset has moved past all Terminal records, the count is 0.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
/**
* This test verifies that when cached batch consists of multiple fetched batches but batches are
* removed from the log, starting at fetch offset, then cached batch is updated.
* <p>
* This scenario can occur with compacted topics when entire batches, previously held in the cache,
* are removed from the log at the offset where reading occurs.
*/
@Test
public void testAcquireWhenBatchesRemovedForFetchOffsetForSameCachedBatch() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
// Create 3 batches of records for a single acquire.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 0, 5).close();
memoryRecordsBuilder(buffer, 5, 15).close();
memoryRecordsBuilder(buffer, 20, 15).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire batch (0-34) which shall create single cache entry.
fetchAcquiredRecords(sharePartition, records, 35);
// Release the batches in the cache.
sharePartition.releaseAcquiredRecords(MEMBER_ID);
// Validate cache has 1 entry.
assertEquals(1, sharePartition.cachedState().size());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Compact second batch and remove first batch from the fetch response.
buffer = ByteBuffer.allocate(4096);
try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE,
TimestampType.CREATE_TIME, 5, 2)) {
// Append only 4 records for 5th offset batch starting from offset 6.
memoryRecords(6, 4).records().forEach(builder::append);
}
buffer.flip();
records = MemoryRecords.readableRecords(buffer);
// Only second batch should be acquired and first batch offsets should be archived. Send
// fetchOffset as 0.
fetchAcquiredRecords(sharePartition, records, 0, 0, 5);
assertEquals(10, sharePartition.nextFetchOffset());
// The next fetch offset has been updated, but the start offset should remain unchanged since
// the acquire operation only marks offsets as archived. The start offset will be correctly
// updated once any records are acknowledged.
assertEquals(0, sharePartition.startOffset());
assertEquals(5, sharePartition.deliveryCompleteCount());
// Releasing acquired records updates the cache and moves the start offset.
sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertEquals(5, sharePartition.startOffset());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
sharePartition.cachedState().forEach((offset, inFlightState) -> {
assertNotNull(inFlightState.offsetState());
inFlightState.offsetState().forEach((recordOffset, offsetState) -> {
RecordState recordState = recordOffset < 5 ? RecordState.ARCHIVED : RecordState.AVAILABLE;
assertEquals(recordState, offsetState.state());
});
});
assertEquals(0, sharePartition.deliveryCompleteCount());
}
private String assertionFailedMessage(SharePartition sharePartition, Map<Long, List<Long>> offsets) {
StringBuilder errorMessage = new StringBuilder(ACQUISITION_LOCK_NEVER_GOT_RELEASED + String.format(
" timer size: %d, next fetch offset: %d\n",
sharePartition.timer().size(),
sharePartition.nextFetchOffset()));
for (Map.Entry<Long, List<Long>> entry : offsets.entrySet()) {
if (entry.getValue() != null && !entry.getValue().isEmpty()) {
errorMessage.append(String.format("batch start offset: %d\n", entry.getKey()));
for (Long offset : entry.getValue()) {
errorMessage.append(String.format("\toffset: %d, offset state: %s, offset acquisition lock timeout task present: %b\n",
offset, sharePartition.cachedState().get(entry.getKey()).offsetState().get(offset).state().id(),
sharePartition.cachedState().get(entry.getKey()).offsetState().get(offset).acquisitionLockTimeoutTask() != null));
}
} else {
errorMessage.append(String.format("batch start offset: %d, batch state: %s, batch acquisition lock timeout task present: %b\n",
entry.getKey(), sharePartition.cachedState().get(entry.getKey()).batchState().id(),
sharePartition.cachedState().get(entry.getKey()).batchAcquisitionLockTimeoutTask() != null));
}
}
return errorMessage.toString();
}
@Test
public void testFilterRecordBatchesFromAcquiredRecords() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
List<AcquiredRecords> acquiredRecords1 = List.of(
new AcquiredRecords().setFirstOffset(1).setLastOffset(5).setDeliveryCount((short) 1),
new AcquiredRecords().setFirstOffset(10).setLastOffset(15).setDeliveryCount((short) 2),
new AcquiredRecords().setFirstOffset(20).setLastOffset(25).setDeliveryCount((short) 1)
);
List<RecordBatch> recordBatches1 = List.of(
memoryRecordsBuilder(2, 3).build().batches().iterator().next(),
memoryRecordsBuilder(12, 3).build().batches().iterator().next()
);
assertEquals(
List.of(
new AcquiredRecords().setFirstOffset(1).setLastOffset(1).setDeliveryCount((short) 1),
new AcquiredRecords().setFirstOffset(5).setLastOffset(5).setDeliveryCount((short) 1),
new AcquiredRecords().setFirstOffset(10).setLastOffset(11).setDeliveryCount((short) 2),
new AcquiredRecords().setFirstOffset(15).setLastOffset(15).setDeliveryCount((short) 2),
new AcquiredRecords().setFirstOffset(20).setLastOffset(25).setDeliveryCount((short) 1)),
sharePartition.filterRecordBatchesFromAcquiredRecords(acquiredRecords1, recordBatches1));
List<AcquiredRecords> acquiredRecords2 = List.of(
new AcquiredRecords().setFirstOffset(1).setLastOffset(4).setDeliveryCount((short) 3),
new AcquiredRecords().setFirstOffset(5).setLastOffset(8).setDeliveryCount((short) 3),
new AcquiredRecords().setFirstOffset(9).setLastOffset(30).setDeliveryCount((short) 2),
new AcquiredRecords().setFirstOffset(31).setLastOffset(40).setDeliveryCount((short) 3)
);
List<RecordBatch> recordBatches2 = List.of(
memoryRecordsBuilder(5, 21).build().batches().iterator().next(),
memoryRecordsBuilder(31, 5).build().batches().iterator().next()
);
assertEquals(
List.of(
new AcquiredRecords().setFirstOffset(1).setLastOffset(4).setDeliveryCount((short) 3),
new AcquiredRecords().setFirstOffset(26).setLastOffset(30).setDeliveryCount((short) 2),
new AcquiredRecords().setFirstOffset(36).setLastOffset(40).setDeliveryCount((short) 3)
), sharePartition.filterRecordBatchesFromAcquiredRecords(acquiredRecords2, recordBatches2)
);
// Record batches is empty.
assertEquals(acquiredRecords2, sharePartition.filterRecordBatchesFromAcquiredRecords(acquiredRecords2, List.of()));
List<AcquiredRecords> acquiredRecords3 = List.of(
new AcquiredRecords().setFirstOffset(0).setLastOffset(19).setDeliveryCount((short) 1)
);
List<RecordBatch> recordBatches3 = List.of(
memoryRecordsBuilder(8, 1).build().batches().iterator().next(),
memoryRecordsBuilder(18, 1).build().batches().iterator().next()
);
assertEquals(
List.of(
new AcquiredRecords().setFirstOffset(0).setLastOffset(7).setDeliveryCount((short) 1),
new AcquiredRecords().setFirstOffset(9).setLastOffset(17).setDeliveryCount((short) 1),
new AcquiredRecords().setFirstOffset(19).setLastOffset(19).setDeliveryCount((short) 1)
), sharePartition.filterRecordBatchesFromAcquiredRecords(acquiredRecords3, recordBatches3)
);
}
@Test
public void testAcquireWithReadCommittedIsolationLevel() {
SharePartition sharePartition = Mockito.spy(SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build());
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 10, 5).close();
memoryRecordsBuilder(buffer, 15, 5).close();
memoryRecordsBuilder(buffer, 20, 15).close();
memoryRecordsBuilder(buffer, 50, 8).close();
memoryRecordsBuilder(buffer, 58, 10).close();
memoryRecordsBuilder(buffer, 70, 5).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
FetchPartitionData fetchPartitionData = fetchPartitionData(records, newAbortedTransactions());
// We are mocking the result of function fetchAbortedTransactionRecordBatches. The records present at these offsets need to be archived.
// We won't be utilizing the aborted transactions passed in fetchPartitionData.
when(sharePartition.fetchAbortedTransactionRecordBatches(fetchPartitionData.records.batches(), fetchPartitionData.abortedTransactions.get())).thenReturn(
List.of(
memoryRecordsBuilder(10, 5).build().batches().iterator().next(),
memoryRecordsBuilder(58, 10).build().batches().iterator().next(),
memoryRecordsBuilder(70, 5).build().batches().iterator().next()
)
);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(
sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
10 /* Batch size */,
100,
DEFAULT_FETCH_OFFSET,
fetchPartitionData,
FetchIsolation.TXN_COMMITTED),
45 /* Gap of 15 records will be added to second batch, gap of 2 records will also be added to fourth batch */);
assertEquals(List.of(
new AcquiredRecords().setFirstOffset(15).setLastOffset(19).setDeliveryCount((short) 1),
new AcquiredRecords().setFirstOffset(20).setLastOffset(49).setDeliveryCount((short) 1),
new AcquiredRecords().setFirstOffset(50).setLastOffset(57).setDeliveryCount((short) 1),
new AcquiredRecords().setFirstOffset(68).setLastOffset(69).setDeliveryCount((short) 1)
), acquiredRecordsList);
assertEquals(75, sharePartition.nextFetchOffset());
// Checking cached state.
assertEquals(4, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().containsKey(10L));
assertTrue(sharePartition.cachedState().containsKey(20L));
assertTrue(sharePartition.cachedState().containsKey(50L));
assertTrue(sharePartition.cachedState().containsKey(70L));
assertNotNull(sharePartition.cachedState().get(10L).offsetState());
assertNotNull(sharePartition.cachedState().get(50L).offsetState());
assertEquals(19L, sharePartition.cachedState().get(10L).lastOffset());
assertEquals(49L, sharePartition.cachedState().get(20L).lastOffset());
assertEquals(69L, sharePartition.cachedState().get(50L).lastOffset());
assertEquals(74L, sharePartition.cachedState().get(70L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(70L).batchState());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(70L).batchMemberId());
assertNotNull(sharePartition.cachedState().get(20L).batchAcquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(70L).batchAcquisitionLockTimeoutTask());
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask());
expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(50L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(51L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(52L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(53L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(54L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(55L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(56L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(57L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(58L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(59L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(60L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(61L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(62L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(63L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(64L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(65L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(66L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(67L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(68L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(69L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(50L).offsetState());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(50L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(51L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(52L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(53L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(54L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(55L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(56L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(57L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(58L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(59L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(60L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(61L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(62L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(63L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(64L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(65L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(66L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(50L).offsetState().get(67L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(68L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(69L).acquisitionLockTimeoutTask());
}
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
public void testContainsAbortMarker() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
// Record batch is not a control batch.
RecordBatch recordBatch = mock(RecordBatch.class);
when(recordBatch.isControlBatch()).thenReturn(false);
assertFalse(sharePartition.containsAbortMarker(recordBatch));
// Record batch is a control batch but doesn't contain any records.
recordBatch = mock(RecordBatch.class);
Iterator batchIterator = mock(Iterator.class);
when(batchIterator.hasNext()).thenReturn(false);
when(recordBatch.iterator()).thenReturn(batchIterator);
when(recordBatch.isControlBatch()).thenReturn(true);
assertFalse(sharePartition.containsAbortMarker(recordBatch));
// Record batch is a control batch which contains a record of type ControlRecordType.ABORT.
recordBatch = mock(RecordBatch.class);
batchIterator = mock(Iterator.class);
when(batchIterator.hasNext()).thenReturn(true);
DefaultRecord record = mock(DefaultRecord.class);
ByteBuffer buffer = ByteBuffer.allocate(4096);
// Buffer has to be created in a way that ControlRecordType.parse(buffer) returns ControlRecordType.ABORT.
buffer.putShort((short) 5);
buffer.putShort(ControlRecordType.ABORT.type());
buffer.putInt(23432); // some field added in version 5
buffer.flip();
when(record.key()).thenReturn(buffer);
when(batchIterator.next()).thenReturn(record);
when(recordBatch.iterator()).thenReturn(batchIterator);
when(recordBatch.isControlBatch()).thenReturn(true);
assertTrue(sharePartition.containsAbortMarker(recordBatch));
// Record batch is a control batch which contains a record of type ControlRecordType.COMMIT.
recordBatch = mock(RecordBatch.class);
batchIterator = mock(Iterator.class);
when(batchIterator.hasNext()).thenReturn(true);
record = mock(DefaultRecord.class);
buffer = ByteBuffer.allocate(4096);
// Buffer has to be created in a way that ControlRecordType.parse(buffer) returns ControlRecordType.COMMIT.
buffer.putShort((short) 5);
buffer.putShort(ControlRecordType.COMMIT.type());
buffer.putInt(23432); // some field added in version 5
buffer.flip();
when(record.key()).thenReturn(buffer);
when(batchIterator.next()).thenReturn(record);
when(recordBatch.iterator()).thenReturn(batchIterator);
when(recordBatch.isControlBatch()).thenReturn(true);
assertFalse(sharePartition.containsAbortMarker(recordBatch));
}
@Test
public void testFetchAbortedTransactionRecordBatchesForOnlyAbortedTransactions() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
// Case 1 - Creating 10 transactional records in a single batch followed by a ABORT marker record for producerId 1.
ByteBuffer buffer = ByteBuffer.allocate(1024);
newTransactionalRecords(buffer, ControlRecordType.ABORT, 10, 1, 0);
buffer.flip();
Records records = MemoryRecords.readableRecords(buffer);
List<FetchResponseData.AbortedTransaction> abortedTransactions = List.of(
new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1)
);
// records from 0 to 9 should be archived because they are a part of aborted transactions.
List<RecordBatch> actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions);
assertEquals(1, actual.size());
assertEquals(0, actual.get(0).baseOffset());
assertEquals(9, actual.get(0).lastOffset());
assertEquals(1, actual.get(0).producerId());
// Case 2: 3 individual batches each followed by a ABORT marker record for producerId 1.
buffer = ByteBuffer.allocate(1024);
newTransactionalRecords(buffer, ControlRecordType.ABORT, 1, 1, 0);
newTransactionalRecords(buffer, ControlRecordType.ABORT, 1, 1, 2);
newTransactionalRecords(buffer, ControlRecordType.ABORT, 1, 1, 4);
buffer.flip();
records = MemoryRecords.readableRecords(buffer);
abortedTransactions = List.of(
new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1),
new FetchResponseData.AbortedTransaction().setFirstOffset(2).setProducerId(1),
new FetchResponseData.AbortedTransaction().setFirstOffset(4).setProducerId(1)
);
actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions);
assertEquals(3, actual.size());
assertEquals(0, actual.get(0).baseOffset());
assertEquals(0, actual.get(0).lastOffset());
assertEquals(1, actual.get(0).producerId());
assertEquals(2, actual.get(1).baseOffset());
assertEquals(2, actual.get(1).lastOffset());
assertEquals(1, actual.get(1).producerId());
assertEquals(4, actual.get(2).baseOffset());
assertEquals(4, actual.get(2).lastOffset());
assertEquals(1, actual.get(2).producerId());
// Case 3: The producer id of records is different, so they should not be archived,
buffer = ByteBuffer.allocate(1024);
// We are creating 10 transactional records followed by a ABORT marker record for producerId 2.
newTransactionalRecords(buffer, ControlRecordType.ABORT, 10, 2, 0);
buffer.flip();
records = MemoryRecords.readableRecords(buffer);
abortedTransactions = List.of(
new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1)
);
actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions);
assertEquals(0, actual.size());
}
@Test
public void testFetchAbortedTransactionRecordBatchesForAbortedAndCommittedTransactions() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
ByteBuffer buffer = ByteBuffer.allocate(1024);
newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 1, 0);
newTransactionalRecords(buffer, ControlRecordType.COMMIT, 2, 2, 3);
newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 2, 6);
newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 1, 9);
newTransactionalRecords(buffer, ControlRecordType.COMMIT, 2, 1, 12);
newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 1, 15);
buffer.flip();
Records records = MemoryRecords.readableRecords(buffer);
// Case 1 - Aborted transactions does not contain the record batch from offsets 6-7 with producer id 2.
List<FetchResponseData.AbortedTransaction> abortedTransactions = List.of(
new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1),
new FetchResponseData.AbortedTransaction().setFirstOffset(6).setProducerId(1),
new FetchResponseData.AbortedTransaction().setFirstOffset(9).setProducerId(1),
new FetchResponseData.AbortedTransaction().setFirstOffset(15).setProducerId(1)
);
List<RecordBatch> actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions);
assertEquals(3, actual.size());
assertEquals(0, actual.get(0).baseOffset());
assertEquals(1, actual.get(0).lastOffset());
assertEquals(1, actual.get(0).producerId());
assertEquals(9, actual.get(1).baseOffset());
assertEquals(10, actual.get(1).lastOffset());
assertEquals(1, actual.get(1).producerId());
assertEquals(15, actual.get(2).baseOffset());
assertEquals(16, actual.get(2).lastOffset());
assertEquals(1, actual.get(2).producerId());
// Case 2 - Aborted transactions contains the record batch from offsets 6-7 with producer id 2.
abortedTransactions = List.of(
new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1),
new FetchResponseData.AbortedTransaction().setFirstOffset(6).setProducerId(2),
new FetchResponseData.AbortedTransaction().setFirstOffset(9).setProducerId(1),
new FetchResponseData.AbortedTransaction().setFirstOffset(15).setProducerId(1)
);
actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions);
assertEquals(4, actual.size());
assertEquals(0, actual.get(0).baseOffset());
assertEquals(1, actual.get(0).lastOffset());
assertEquals(1, actual.get(0).producerId());
assertEquals(6, actual.get(1).baseOffset());
assertEquals(7, actual.get(1).lastOffset());
assertEquals(2, actual.get(1).producerId());
assertEquals(9, actual.get(2).baseOffset());
assertEquals(10, actual.get(2).lastOffset());
assertEquals(1, actual.get(2).producerId());
assertEquals(15, actual.get(3).baseOffset());
assertEquals(16, actual.get(3).lastOffset());
assertEquals(1, actual.get(3).producerId());
}
@Test
public void testTerminalRecordsUpdatedWhenAbortedTransactionBatchesAreArchived() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
// Create 3 batches: first batch (0-8), middle batch with ABORTED transactions (10-18), last batch (20-28), each having
// a transaction marker at the end.
ByteBuffer buffer = ByteBuffer.allocate(2048);
// First batch: normal records (0-8)
newTransactionalRecords(buffer, ControlRecordType.COMMIT, 9, 1, 0);
// Middle batch: ABORTED transaction records (10-18)
newTransactionalRecords(buffer, ControlRecordType.ABORT, 9, 2, 10);
// Last batch: normal records (20-28)
newTransactionalRecords(buffer, ControlRecordType.COMMIT, 9, 3, 20);
buffer.flip();
Records records = MemoryRecords.readableRecords(buffer);
// Create aborted transactions list for the middle batch
List<FetchResponseData.AbortedTransaction> abortedTransactions = List.of(
new FetchResponseData.AbortedTransaction().setFirstOffset(10).setProducerId(2)
);
FetchPartitionData fetchPartitionData = fetchPartitionData(records, abortedTransactions);
// The batchSize is set to 1 to make sure that the batch with ABORTED transactions don't contain the transaction end
// marker. During the acquire methodology, initially all 30 records will be acquired. But when the aborted transactions
// are filtered, records 10 -> 18 will be filtered out of acquired records, leaving the acquired records count to be 21.
ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire(
MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, 1, MAX_FETCH_RECORDS, 0, fetchPartitionData, FetchIsolation.TXN_COMMITTED
);
// Verify that 21 records were acquired.
assertEquals(21, shareAcquiredRecords.count());
assertEquals(6, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(9L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(29L).batchState());
// Records 10 -> 18 are ARCHIVED, hence deliveryCompleteCount should be 9.
assertEquals(9, sharePartition.deliveryCompleteCount());
}
@Test
public void testTerminalRecordsUpdatedWhenAbortedTransactionOffsetsAreArchived() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
// Create 3 batches: first batch (0-1), middle batch with ABORTED transactions (3-4), last batch (6-7), each having
// a transaction marker at the end.
ByteBuffer buffer = ByteBuffer.allocate(2048);
// First batch: normal records (0-1)
newTransactionalRecords(buffer, ControlRecordType.COMMIT, 2, 1, 0);
// Middle batch: ABORTED transaction records (3-4)
newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 2, 3);
// Last batch: normal records (6-7)
newTransactionalRecords(buffer, ControlRecordType.COMMIT, 2, 3, 6);
buffer.flip();
Records records = MemoryRecords.readableRecords(buffer);
// Create aborted transactions list for the middle batch
List<FetchResponseData.AbortedTransaction> abortedTransactions = List.of(
new FetchResponseData.AbortedTransaction().setFirstOffset(3).setProducerId(2)
);
FetchPartitionData fetchPartitionData = fetchPartitionData(records, abortedTransactions);
// All the 9 records will be acquired as a single cached state batch. During the acquire code flow, initially all
// 9 records will be acquired. But when the aborted transactions are filtered, records 3 -> 4 will be filtered
// out of acquired records, leaving the acquired records count to be 7.
ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire(
MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 0, fetchPartitionData, FetchIsolation.TXN_COMMITTED
);
// Verify that 7 records were acquired.
assertEquals(7, shareAcquiredRecords.count());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(0L).offsetState().get(3L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(0L).offsetState().get(4L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(5L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(6L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(7L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(8L).state());
// Records 3 -> 4 are ARCHIVED, hence deliveryCompleteCount should be 2.
assertEquals(2, sharePartition.deliveryCompleteCount());
}
@Test
public void testFetchLockReleasedByDifferentId() {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.build();
Uuid fetchId1 = Uuid.randomUuid();
Uuid fetchId2 = Uuid.randomUuid();
// Initially, fetch lock is not acquired.
assertNull(sharePartition.fetchLock());
// fetchId1 acquires the fetch lock.
assertTrue(sharePartition.maybeAcquireFetchLock(fetchId1));
// If we release fetch lock by fetchId2, it will work. Currently, we have kept the release of fetch lock as non-strict
// such that even if the caller's id for releasing fetch lock does not match the id that holds the lock, we will
// still release it. This has been done to avoid the scenarios where we hold the fetch lock for a share partition
// forever due to faulty code. In the future, we plan to make the locks handling strict, then this test case needs to be updated.
sharePartition.releaseFetchLock(fetchId2);
assertNull(sharePartition.fetchLock()); // Fetch lock has been released.
}
@Test
public void testAcquireWhenBatchHasOngoingTransition() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withPersister(persister)
.build();
// Acquire a single batch with member-1.
fetchAcquiredRecords(
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 21,
fetchPartitionData(memoryRecords(21, 10)), FETCH_ISOLATION_HWM
), 10
);
// Validate that there is no ongoing transition.
assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition());
// Return a future which will be completed later, so the batch state has ongoing transition.
CompletableFuture<WriteShareGroupStateResult> future = new CompletableFuture<>();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future);
// Acknowledge batch to create ongoing transition.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 30, List.of(AcknowledgeType.RELEASE.id))));
// Since the future is not yet completed, deliveryCompleteCount will not be updated yet.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Assert the start offset has not moved and batch has ongoing transition.
assertEquals(21L, sharePartition.startOffset());
assertEquals(1, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(21L).batchMemberId());
// Acquire the same batch with member-2. This function call will return with 0 records since there is an ongoing
// transition for this batch.
fetchAcquiredRecords(
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 21,
fetchPartitionData(memoryRecords(21, 10)), FETCH_ISOLATION_HWM
), 0
);
// Since no new records are acquired, deliveryCompleteCount will remain the same.
assertEquals(0, sharePartition.deliveryCompleteCount());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(21L).batchMemberId());
// Complete the future so acknowledge API can be completed, which updates the cache. Now the records can be acquired.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
future.complete(writeShareGroupStateResult);
// Since the records successfully acknowledged are moved to AVAILABLE state, deliveryCompleteCount will still not change.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Acquire the same batch with member-2. 10 records will be acquired.
fetchAcquiredRecords(
sharePartition.acquire("member-2", ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 21,
fetchPartitionData(memoryRecords(21, 10)), FETCH_ISOLATION_HWM
), 10
);
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
assertEquals("member-2", sharePartition.cachedState().get(21L).batchMemberId());
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testNextFetchOffsetWhenBatchHasOngoingTransition() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withPersister(persister)
.build();
// Acquire a single batch 0-9 with member-1.
fetchAcquiredRecords(
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 0,
fetchPartitionData(memoryRecords(10)), FETCH_ISOLATION_HWM
), 10
);
// Acquire a single batch 10-19 with member-1.
fetchAcquiredRecords(
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 10,
fetchPartitionData(memoryRecords(10, 10)), FETCH_ISOLATION_HWM
), 10
);
// Validate that there is no ongoing transition.
assertEquals(2, sharePartition.cachedState().size());
assertFalse(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(10L).batchHasOngoingStateTransition());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
// Return futures which will be completed later, so the batch state has ongoing transition.
CompletableFuture<WriteShareGroupStateResult> future1 = new CompletableFuture<>();
CompletableFuture<WriteShareGroupStateResult> future2 = new CompletableFuture<>();
// Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for
// offsets 0-9 and 10-19 respectively.
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2);
// Acknowledge batch to create ongoing transition.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 9, List.of(AcknowledgeType.RELEASE.id))));
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(10, 19, List.of(AcknowledgeType.RELEASE.id))));
// deliveryCompleteCount will not be updated, because the acknowledgment type is RELEASE.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Complete future2 so second acknowledge API can be completed, which updates the cache.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
future2.complete(writeShareGroupStateResult);
// Since the records successfully acknowledged are moved to AVAILABLE state, deliveryCompleteCount will still not change.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Offsets 0-9 will have ongoing state transition since future1 is not complete yet.
// Offsets 10-19 won't have ongoing state transition since future2 has been completed.
assertTrue(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(10L).batchHasOngoingStateTransition());
// nextFetchOffset should return 10 and not 0 because batch 0-9 is undergoing state transition.
assertEquals(10, sharePartition.nextFetchOffset());
}
@Test
public void testNextFetchOffsetWhenOffsetsHaveOngoingTransition() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withPersister(persister)
.build();
// Acquire a single batch 0-50 with member-1.
fetchAcquiredRecords(
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 0,
fetchPartitionData(memoryRecords(50)), FETCH_ISOLATION_HWM
), 50
);
// Validate that there is no ongoing transition.
assertFalse(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition());
// Return futures which will be completed later, so the batch state has ongoing transition.
CompletableFuture<WriteShareGroupStateResult> future1 = new CompletableFuture<>();
CompletableFuture<WriteShareGroupStateResult> future2 = new CompletableFuture<>();
// Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for
// offsets 5-9 and 20-24 respectively.
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2);
// Acknowledge batch to create ongoing transition.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id))));
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(20, 24, List.of(AcknowledgeType.RELEASE.id))));
// deliveryCompleteCount will not be updated, because the acknowledgment type is RELEASE.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Complete future2 so second acknowledge API can be completed, which updates the cache.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
future2.complete(writeShareGroupStateResult);
// Since the records successfully acknowledged are moved to AVAILABLE state, deliveryCompleteCount will still not change.
assertEquals(0, sharePartition.deliveryCompleteCount());
// Offsets 5-9 will have ongoing state transition since future1 is not complete yet.
// Offsets 20-24 won't have ongoing state transition since future2 has been completed.
assertTrue(sharePartition.cachedState().get(0L).offsetState().get(5L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(0L).offsetState().get(6L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(0L).offsetState().get(7L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(0L).offsetState().get(8L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(0L).offsetState().get(9L).hasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(0L).offsetState().get(20L).hasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(0L).offsetState().get(21L).hasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(0L).offsetState().get(22L).hasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(0L).offsetState().get(23L).hasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(0L).offsetState().get(24L).hasOngoingStateTransition());
// nextFetchOffset should return 20 and not 5 because offsets 5-9 is undergoing state transition.
assertEquals(20, sharePartition.nextFetchOffset());
}
@Test
public void testAcquisitionLockTimeoutWithConcurrentAcknowledgement() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withPersister(persister)
.build();
// Create 2 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 0, 5).close();
memoryRecordsBuilder(buffer, 5, 15).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire 10 records.
fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
5, /* Batch size of 5 so cache can have 2 entries */
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 0),
FETCH_ISOLATION_HWM),
20);
assertEquals(2, sharePartition.cachedState().size());
assertEquals(2, sharePartition.timer().size());
// Return 2 future which will be completed later.
CompletableFuture<WriteShareGroupStateResult> future1 = new CompletableFuture<>();
CompletableFuture<WriteShareGroupStateResult> future2 = new CompletableFuture<>();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2);
// Store the corresponding batch timer tasks.
TimerTask timerTask1 = sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask();
TimerTask timerTask2 = sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask();
// Acknowledge 1 offset in first batch as Accept to create offset tracking, accept complete
// sencond batch. And mark offset 0 as release so cached state do not move ahead.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 0, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(1, 1, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(5, 19, List.of(AcknowledgeType.ACCEPT.id))));
// Assert the start offset has not moved.
assertEquals(0L, sharePartition.startOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(0L).state());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState());
// Verify ongoing transition states.
assertTrue(sharePartition.cachedState().get(0L).offsetState().get(0L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(0L).offsetState().get(1L).hasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(0L).offsetState().get(2L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(5L).batchHasOngoingStateTransition());
// Records 1 and 5 -> 19 are acked with ACKNOWLEDGE type, thus deliveryCompleteCount will account for these.
assertEquals(16, sharePartition.deliveryCompleteCount());
// Validate first timer task is already cancelled.
assertTrue(timerTask1.isCancelled());
assertFalse(timerTask2.isCancelled());
// Fetch offset state timer tasks.
TimerTask timerTaskOffsetState1 = sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask();
TimerTask timerTaskOffsetState2 = sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask();
TimerTask timerTaskOffsetState3 = sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask();
// Complete futures.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
future1.complete(writeShareGroupStateResult);
future2.complete(writeShareGroupStateResult);
// Now that the futures are completed, offsets 1 and 5 -> 19 are all committed to the final ACKNOWLEDGED state.
// The deliveryCompleteCount will remain same as before the future is completed.
assertEquals(16, sharePartition.deliveryCompleteCount());
// Verify timer tasks are now cancelled, except unacknowledged offsets.
assertEquals(2, sharePartition.cachedState().size());
assertTrue(timerTask2.isCancelled());
assertTrue(timerTaskOffsetState1.isCancelled());
assertTrue(timerTaskOffsetState2.isCancelled());
assertFalse(timerTaskOffsetState3.isCancelled());
// Verify the state prior executing the timer tasks.
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState());
// Running expired timer tasks should not mark offsets available, except for offset 2.
timerTask1.run();
// State should remain same.
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
timerTask2.run();
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState());
timerTaskOffsetState2.run();
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
// Should update the state to available as the timer task is not yet expired.
timerTaskOffsetState3.run();
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertEquals(16, sharePartition.deliveryCompleteCount());
}
@Test
public void testLsoMovementWithWriteStateRPCFailuresInAcknowledgement() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withPersister(persister)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// Validate that there is no ongoing transition.
assertFalse(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition());
// Return futures which will be completed later, so the batch state has ongoing transition.
CompletableFuture<WriteShareGroupStateResult> future1 = new CompletableFuture<>();
CompletableFuture<WriteShareGroupStateResult> future2 = new CompletableFuture<>();
// Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for
// offsets 2-6 and 7-11 respectively.
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2);
// Acknowledge batch to create ongoing transition.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(2, 6, List.of(AcknowledgeType.RELEASE.id))));
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.ACCEPT.id))));
// Validate that there is no ongoing transition.
assertTrue(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition());
// Records 7 -> 11 are acked with ACCEPT type, thus deliveryCompleteCount will account for these.
assertEquals(5, sharePartition.deliveryCompleteCount());
// Move LSO to 7, so some records/offsets can be marked archived for the first batch.
sharePartition.updateCacheAndOffsets(7L);
// Start offset will be moved.
assertEquals(12L, sharePartition.nextFetchOffset());
assertEquals(7L, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState());
assertEquals(5, sharePartition.deliveryCompleteCount());
// Complete future1 exceptionally so acknowledgement for 2-6 offsets will be completed.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
future1.complete(writeShareGroupStateResult);
// The completion of future1 with exception should not impact the cached state since those records have already
// been archived.
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(7, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertFalse(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState());
assertEquals(5, sharePartition.deliveryCompleteCount());
future2.complete(writeShareGroupStateResult);
assertEquals(12L, sharePartition.nextFetchOffset());
assertEquals(7, sharePartition.startOffset());
assertEquals(11, sharePartition.endOffset());
assertEquals(2, sharePartition.cachedState().size());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
// After the write RPC failure, the record states are rolled back and deliveryCompleteCount is calculated
// from scratch. Since there is no Terminal record now in flight, deliveryCompleteCount becomes 0.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testAcquisitionLockTimeoutWithWriteStateRPCFailure() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withPersister(persister)
.build();
fetchAcquiredRecords(
sharePartition.acquire(MEMBER_ID, ShareAcquireMode.BATCH_OPTIMIZED, BATCH_SIZE, MAX_FETCH_RECORDS, 0,
fetchPartitionData(memoryRecords(2)), FETCH_ISOLATION_HWM
), 2
);
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
// Return a future which will be completed later, so the batch state has ongoing transition.
CompletableFuture<WriteShareGroupStateResult> future = new CompletableFuture<>();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future);
// Acknowledge batch to create ongoing transition.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 1, List.of(AcknowledgeType.ACCEPT.id))));
// Assert the start offset has not moved and batch has ongoing transition.
assertEquals(0L, sharePartition.startOffset());
assertEquals(1, sharePartition.cachedState().size());
assertTrue(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId());
// Timer task has not been expired yet.
assertFalse(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask().hasExpired());
// Record are acked with ACKNOWLEDGED type, thus deliveryCompleteCount will account for these.
assertEquals(2, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire. This will not cause any change because the record is not in ACQUIRED state.
// This will remove the entry of the timer task from timer.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.cachedState().get(0L).batchState() == RecordState.ACKNOWLEDGED &&
sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 &&
sharePartition.timer().size() == 0,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of())));
// Acquisition lock timeout task has run already and is not null.
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
// Timer task should be expired now.
assertTrue(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask().hasExpired());
assertEquals(2, sharePartition.deliveryCompleteCount());
// Complete future exceptionally so acknowledgement for 0-1 offsets will be completed.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
future.complete(writeShareGroupStateResult);
// Even though write state RPC has failed and corresponding acquisition lock timeout task has expired,
// the record should not stuck in ACQUIRED state with no acquisition lock timeout task.
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState());
assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId());
assertNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
// After the write RPC failure, the record states are rolled back and deliveryCompleteCount is calculated
// from scratch. Since there is no Terminal record now in flight, deliveryCompleteCount becomes 0.
assertEquals(0, sharePartition.deliveryCompleteCount());
}
@Test
public void testRecordArchivedWithWriteStateRPCFailure() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
// Futures which will be completed later, so the batch state has ongoing transition.
CompletableFuture<WriteShareGroupStateResult> future1 = new CompletableFuture<>();
CompletableFuture<WriteShareGroupStateResult> future2 = new CompletableFuture<>();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2);
// Acknowledge batches.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(3, 3, List.of(AcknowledgeType.ACCEPT.id))));
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(2L).offsetState().get(3L).state());
assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState());
assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount());
// Records 3 and 7 -> 11 are acked with ACKNOWLEDGE type, thus deliveryCompleteCount will account for these.
assertEquals(6, sharePartition.deliveryCompleteCount());
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message())))));
future1.complete(writeShareGroupStateResult);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).offsetState().get(3L).state());
assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState());
assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount());
assertEquals(5, sharePartition.deliveryCompleteCount());
future2.complete(writeShareGroupStateResult);
assertEquals(12L, sharePartition.nextFetchOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).offsetState().get(3L).state());
assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Allowing acquisition lock to expire. This will also ensure that acquisition lock timeout task
// is run successfully post write state RPC failure.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.cachedState().get(2L).offsetState().get(3L).state() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(7L).batchState() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount() == 1 &&
sharePartition.cachedState().get(7L).batchDeliveryCount() == 1 &&
sharePartition.timer().size() == 0,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(2L, List.of(3L), 7L, List.of())));
// Acquisition lock timeout task has run already and next fetch offset is moved to 2.
assertEquals(2, sharePartition.nextFetchOffset());
assertEquals(0, sharePartition.deliveryCompleteCount());
// Send the same batches again.
fetchAcquiredRecords(sharePartition, memoryRecords(2, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 5);
future1 = new CompletableFuture<>();
future2 = new CompletableFuture<>();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2);
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(3, 3, List.of(AcknowledgeType.ACCEPT.id))));
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(6, sharePartition.deliveryCompleteCount());
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
// Verify the timer tasks have run and the state is archived for the offsets which are not acknowledged,
// but the acquisition lock timeout task should be just expired for acknowledged offsets, though
// the state should not be archived.
TestUtils.waitForCondition(
() -> sharePartition.cachedState().get(2L).offsetState().get(2L).state() == RecordState.ARCHIVED &&
sharePartition.cachedState().get(2L).offsetState().get(3L).state() == RecordState.ACKNOWLEDGED &&
sharePartition.cachedState().get(2L).offsetState().get(3L).acquisitionLockTimeoutTask().hasExpired() &&
sharePartition.cachedState().get(7L).batchState() == RecordState.ACKNOWLEDGED &&
sharePartition.cachedState().get(7L).batchAcquisitionLockTimeoutTask().hasExpired(),
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(2L, List.of(3L), 7L, List.of())));
// After the acquisition lock timeout task has expired, records 2, 4 -> 6 are archived, and thus deliveryCompleteCount
// increases by 4.
assertEquals(10, sharePartition.deliveryCompleteCount());
future1.complete(writeShareGroupStateResult);
// Now the state should be archived for the offsets despite the write state RPC failure, as the
// delivery count has reached the max delivery count and the acquisition lock timeout task
// has already expired for the offsets which were acknowledged.
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).offsetState().get(3L).state());
assertEquals(2, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState());
assertEquals(2, sharePartition.cachedState().get(7L).batchDeliveryCount());
future2.complete(writeShareGroupStateResult);
assertEquals(12L, sharePartition.nextFetchOffset());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(7L).batchState());
assertEquals(2, sharePartition.cachedState().get(7L).batchDeliveryCount());
// At this point, the batch 2 -> 6 is removed from the cached state and startOffset is moved to 7. Thus, in flight
// contains records 7 -> 11 which are archived. Therefore, deliveryCompleteCount becomes 5.
assertEquals(5, sharePartition.deliveryCompleteCount());
}
@Test
public void testAckTypeToRecordStateMapping() {
// This test will help catch bugs if the map changes.
Map<Byte, RecordState> actualMap = SharePartition.ackTypeToRecordStateMapping();
assertEquals(4, actualMap.size());
Map<Byte, RecordState> expected = Map.of(
(byte) 0, RecordState.ARCHIVED,
AcknowledgeType.ACCEPT.id, RecordState.ACKNOWLEDGED,
AcknowledgeType.RELEASE.id, RecordState.AVAILABLE,
AcknowledgeType.REJECT.id, RecordState.ARCHIVED
);
for (byte key : expected.keySet()) {
assertEquals(expected.get(key), actualMap.get(key));
}
}
@Test
public void testFetchAckTypeMapForBatch() {
ShareAcknowledgementBatch batch = mock(ShareAcknowledgementBatch.class);
when(batch.acknowledgeTypes()).thenReturn(List.of((byte) -1));
assertThrows(IllegalArgumentException.class, () -> SharePartition.fetchAckTypeMapForBatch(batch));
}
@Test
public void testRenewAcknowledgeWithCompleteBatchAck() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
List<AcquiredRecords> records = fetchAcquiredRecords(sharePartition, memoryRecords(0, 1), 1);
assertEquals(1, records.size());
assertEquals(records.get(0).firstOffset(), records.get(0).lastOffset());
assertEquals(1, sharePartition.cachedState().size());
InFlightBatch batch = sharePartition.cachedState().get(0L);
AcquisitionLockTimerTask taskOrig = batch.batchAcquisitionLockTimeoutTask();
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 0, List.of(AcknowledgeType.RENEW.id))));
assertTrue(taskOrig.isCancelled()); // Original acq lock cancelled.
assertNotEquals(taskOrig, batch.batchAcquisitionLockTimeoutTask()); // Lock changes.
assertEquals(1, sharePartition.timer().size()); // Timer jobs
assertEquals(RecordState.ACQUIRED, batch.batchState());
Mockito.verify(persister, Mockito.times(0)).writeState(Mockito.any()); // No persister call.
// Expire timer
// On expiration state will transition to AVAILABLE resulting in persister write RPC
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
mockTimer.advanceClock(ACQUISITION_LOCK_TIMEOUT_MS + 1); // Trigger expire
assertNull(batch.batchAcquisitionLockTimeoutTask());
assertEquals(RecordState.AVAILABLE, batch.batchState()); // Verify batch record state
assertEquals(0, sharePartition.timer().size()); // Timer jobs
Mockito.verify(persister, Mockito.times(1)).writeState(Mockito.any()); // 1 persister call.
}
@Test
public void testRenewAcknowledgeOnExpiredBatch() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
List<AcquiredRecords> records = fetchAcquiredRecords(sharePartition, memoryRecords(0, 1), 1);
assertEquals(1, records.size());
assertEquals(records.get(0).firstOffset(), records.get(0).lastOffset());
assertEquals(1, sharePartition.cachedState().size());
InFlightBatch batch = sharePartition.cachedState().get(0L);
AcquisitionLockTimerTask taskOrig = batch.batchAcquisitionLockTimeoutTask();
// Expire acq lock timeout.
// Persister mocking for recordState transition.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
mockTimer.advanceClock(ACQUISITION_LOCK_TIMEOUT_MS + 1);
TestUtils.waitForCondition(() -> batch.batchAcquisitionLockTimeoutTask() == null, "Acq lock timeout not cancelled.");
CompletableFuture<Void> future = sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 0, List.of(AcknowledgeType.RENEW.id))));
assertTrue(future.isCompletedExceptionally());
try {
future.get();
fail("No exception thrown");
} catch (Exception e) {
assertNotNull(e);
assertInstanceOf(InvalidRecordStateException.class, e.getCause());
}
assertTrue(taskOrig.isCancelled()); // Original acq lock cancelled.
assertNotEquals(taskOrig, batch.batchAcquisitionLockTimeoutTask()); // Lock changes.
assertEquals(0, sharePartition.timer().size()); // Timer jobs
assertEquals(RecordState.AVAILABLE, batch.batchState());
Mockito.verify(persister, Mockito.times(1)).writeState(Mockito.any()); // 1 persister call to update record state.
}
@Test
public void testRenewAcknowledgeWithPerOffsetAck() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
List<AcquiredRecords> records = fetchAcquiredRecords(sharePartition, memoryRecords(0, 2), 2);
assertEquals(1, records.size());
assertEquals(records.get(0).firstOffset() + 1, records.get(0).lastOffset());
assertEquals(1, sharePartition.cachedState().size());
InFlightBatch batch = sharePartition.cachedState().get(0L);
assertEquals(RecordState.ACQUIRED, batch.batchState());
AcquisitionLockTimerTask taskOrig = batch.batchAcquisitionLockTimeoutTask();
// For ACCEPT ack call.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 1,
List.of(AcknowledgeType.RENEW.id, AcknowledgeType.ACCEPT.id))));
assertTrue(taskOrig.isCancelled()); // Original acq lock cancelled.
assertNotEquals(taskOrig, sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(0L).offsetState());
InFlightState offset0 = sharePartition.cachedState().get(0L).offsetState().get(0L);
InFlightState offset1 = sharePartition.cachedState().get(0L).offsetState().get(1L);
assertEquals(RecordState.ACQUIRED, offset0.state());
assertNotNull(offset0.acquisitionLockTimeoutTask());
assertEquals(1, sharePartition.timer().size()); // Timer jobs
assertEquals(RecordState.ACKNOWLEDGED, offset1.state());
assertNull(offset1.acquisitionLockTimeoutTask());
Mockito.verify(persister, Mockito.times(1)).writeState(Mockito.any());
// Expire timer
mockTimer.advanceClock(ACQUISITION_LOCK_TIMEOUT_MS + 1); // Trigger expire
assertNull(offset0.acquisitionLockTimeoutTask());
assertEquals(RecordState.AVAILABLE, offset0.state()); // Verify batch record state
assertEquals(0, sharePartition.timer().size()); // Timer jobs
Mockito.verify(persister, Mockito.times(2)).writeState(Mockito.any()); // 1 more persister call.
}
@Test
public void testLsoMovementWithBatchRenewal() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
List<AcquiredRecords> records = fetchAcquiredRecords(sharePartition, memoryRecords(0, 10), 10);
assertEquals(1, records.size());
assertNotEquals(records.get(0).firstOffset(), records.get(0).lastOffset());
assertEquals(1, sharePartition.cachedState().size());
InFlightBatch batch = sharePartition.cachedState().get(0L);
AcquisitionLockTimerTask taskOrig = batch.batchAcquisitionLockTimeoutTask();
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 9, List.of(AcknowledgeType.RENEW.id))));
sharePartition.updateCacheAndOffsets(5);
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(5, sharePartition.startOffset());
assertEquals(9, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertTrue(taskOrig.isCancelled()); // Original acq lock cancelled.
assertNotEquals(taskOrig, batch.batchAcquisitionLockTimeoutTask()); // Lock changes.
assertEquals(1, sharePartition.timer().size()); // Timer jobs
Mockito.verify(persister, Mockito.times(0)).writeState(Mockito.any()); // No persister call.
}
@Test
public void testLsoMovementWithPerOffsetRenewal() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
List<AcquiredRecords> records = fetchAcquiredRecords(sharePartition, memoryRecords(0, 5), 5);
assertEquals(1, records.size());
assertEquals(records.get(0).firstOffset() + 4, records.get(0).lastOffset());
assertEquals(1, sharePartition.cachedState().size());
InFlightBatch batch = sharePartition.cachedState().get(0L);
assertEquals(RecordState.ACQUIRED, batch.batchState());
AcquisitionLockTimerTask taskOrig = batch.batchAcquisitionLockTimeoutTask();
// For ACCEPT ack call.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 4,
List.of(AcknowledgeType.RENEW.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.RENEW.id, AcknowledgeType.ACCEPT.id, AcknowledgeType.RENEW.id))));
sharePartition.updateCacheAndOffsets(3);
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(3, sharePartition.startOffset());
assertEquals(4, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
assertTrue(taskOrig.isCancelled()); // Original acq lock cancelled.
assertNotEquals(taskOrig, sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(0L).offsetState());
InFlightState offset0 = sharePartition.cachedState().get(0L).offsetState().get(0L);
InFlightState offset1 = sharePartition.cachedState().get(0L).offsetState().get(1L);
InFlightState offset2 = sharePartition.cachedState().get(0L).offsetState().get(2L);
InFlightState offset3 = sharePartition.cachedState().get(0L).offsetState().get(3L);
InFlightState offset4 = sharePartition.cachedState().get(0L).offsetState().get(4L);
assertEquals(RecordState.ACQUIRED, offset0.state());
assertNotNull(offset0.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACKNOWLEDGED, offset1.state());
assertNull(offset1.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACQUIRED, offset2.state());
assertNotNull(offset2.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACKNOWLEDGED, offset3.state());
assertNull(offset3.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACQUIRED, offset4.state());
assertNotNull(offset4.acquisitionLockTimeoutTask());
assertEquals(3, sharePartition.timer().size()); // Timer jobs - 3 because the renewed offsets are non-contiguous.
// Expire timer
mockTimer.advanceClock(ACQUISITION_LOCK_TIMEOUT_MS + 1); // Trigger expire
List<RecordState> expectedStates = List.of(RecordState.ARCHIVED, RecordState.ACKNOWLEDGED, RecordState.ARCHIVED, RecordState.ACKNOWLEDGED, RecordState.AVAILABLE);
for (long i = 0; i <= 4; i++) {
InFlightState offset = sharePartition.cachedState().get(0L).offsetState().get(i);
assertNull(offset.acquisitionLockTimeoutTask());
assertEquals(expectedStates.get((int) i), offset.state());
}
assertEquals(0, sharePartition.timer().size()); // Timer jobs
Mockito.verify(persister, Mockito.times(4)).writeState(Mockito.any());
}
@Test
public void testRenewAcknowledgeWithPerOffsetAndBatchMix() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
// Batch
List<AcquiredRecords> recordsB = fetchAcquiredRecords(sharePartition, memoryRecords(0, 1), 1);
assertEquals(1, recordsB.size());
assertEquals(recordsB.get(0).firstOffset(), recordsB.get(0).lastOffset());
assertEquals(1, sharePartition.cachedState().size());
InFlightBatch batchB = sharePartition.cachedState().get(0L);
AcquisitionLockTimerTask taskOrigB = batchB.batchAcquisitionLockTimeoutTask();
// Per offset
List<AcquiredRecords> recordsO = fetchAcquiredRecords(sharePartition, memoryRecords(1, 2), 2);
assertEquals(1, recordsO.size());
assertEquals(recordsO.get(0).firstOffset() + 1, recordsO.get(0).lastOffset());
assertEquals(2, sharePartition.cachedState().size());
InFlightBatch batchO = sharePartition.cachedState().get(0L);
assertEquals(RecordState.ACQUIRED, batchO.batchState());
AcquisitionLockTimerTask taskOrigO = batchO.batchAcquisitionLockTimeoutTask();
// For ACCEPT ack call.
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(0, 0, List.of(AcknowledgeType.RENEW.id)),
new ShareAcknowledgementBatch(1, 2, List.of(AcknowledgeType.RENEW.id, AcknowledgeType.ACCEPT.id))
));
// Batch checks
assertTrue(taskOrigB.isCancelled()); // Original acq lock cancelled.
assertNotEquals(taskOrigB, batchB.batchAcquisitionLockTimeoutTask()); // Lock changes.
// Per offset checks
assertTrue(taskOrigO.isCancelled()); // Original acq lock cancelled.
assertNotEquals(taskOrigO, sharePartition.cachedState().get(1L).offsetState().get(1L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(1L).offsetState());
InFlightState offset1 = sharePartition.cachedState().get(1L).offsetState().get(1L);
InFlightState offset2 = sharePartition.cachedState().get(1L).offsetState().get(2L);
assertEquals(RecordState.ACQUIRED, offset1.state());
assertNotNull(offset1.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACKNOWLEDGED, offset2.state());
assertNull(offset2.acquisitionLockTimeoutTask());
assertEquals(2, sharePartition.timer().size()); // Timer jobs one for batch and one for single renewal in per offset.
Mockito.verify(persister, Mockito.times(1)).writeState(Mockito.any());
}
@Test
public void testAcquireSingleBatchInRecordLimitMode() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withPersister(persister)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
// Member-1 attempts to acquire records in strict mode with a maximum fetch limit of 5 records.
MemoryRecords records = memoryRecords(10);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
2,
5,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5);
assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(9, sharePartition.cachedState().get(0L).lastOffset());
assertNotNull(sharePartition.cachedState().get(0L).offsetState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(0L).batchState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(0L).batchMemberId());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(0L).batchDeliveryCount());
assertEquals(10, sharePartition.cachedState().get(0L).offsetState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).offsetState().get(0L).memberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).offsetState().get(0L).memberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(5L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(9L).state());
// Acquire the same batch with member-2. 5 records will be acquired.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
"member-2",
ShareAcquireMode.RECORD_LIMIT,
2,
5,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5);
// Should acquire the subset of records in InflightBatch which are still available.
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(5, 5, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(6, 6, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(7, 7, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(8, 8, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(9, 9, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
assertEquals(9, sharePartition.cachedState().get(0L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(5L).state());
assertEquals("member-2", sharePartition.cachedState().get(0L).offsetState().get(5L).memberId());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(9L).state());
assertEquals("member-2", sharePartition.cachedState().get(0L).offsetState().get(5L).memberId());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 10,
"In-flight message count should be 10.");
assertEquals(10, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(10, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(10, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcquireMultipleBatchesInRecordLimitMode() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withPersister(persister)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
// Create 3 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 10, 5).close();
memoryRecordsBuilder(buffer, 15, 15).close();
memoryRecordsBuilder(buffer, 30, 15).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire 10 records.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 10),
FETCH_ISOLATION_HWM),
10);
assertArrayEquals(expectedAcquiredRecord(10, 19, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(20, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(29, sharePartition.cachedState().get(10L).lastOffset());
assertNotNull(sharePartition.cachedState().get(10L).offsetState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchMemberId());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchDeliveryCount());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).offsetState().get(19L).state());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).offsetState().get(19L).memberId());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).offsetState().get(20L).state());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 20,
"In-flight message count should be 20.");
assertEquals(20, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(20, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(20, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcquireWhenInsufficientRecordsInRecordLimitMode() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withPersister(persister)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
// Create 3 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 10, 5).close();
memoryRecordsBuilder(buffer, 15, 5).close();
memoryRecordsBuilder(buffer, 20, 5).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Requested 20 records, but only 15 available.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
2,
20,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
15);
assertArrayEquals(expectedAcquiredRecord(10, 24, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(25, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
assertEquals(24, sharePartition.cachedState().get(10L).lastOffset());
// Since all the records in 3 batches are acquired, the offset state of the InFlight batch should be null and batch state should be ACQUIRED.
assertNull(sharePartition.cachedState().get(10L).offsetState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(1, sharePartition.timer().size());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 15,
"In-flight message count should be 15.");
assertEquals(15, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(15, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(15, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcquireAndAcknowledgeMultipleSubsetRecordInRecordLimitMode() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
ByteBuffer buffer = ByteBuffer.allocate(4096);
try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE,
TimestampType.CREATE_TIME, 5, 2)) {
// Append records from offset 10.
memoryRecords(10, 4).records().forEach(builder::append);
// Append records from offset 19.
memoryRecords(16, 5).records().forEach(builder::append);
}
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
2,
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
assertArrayEquals(expectedAcquiredRecord(5, 14, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(5L).offsetState());
// Partially acknowledge the batch from 5-12.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 9, List.of(ACKNOWLEDGE_TYPE_GAP_ID)),
new ShareAcknowledgementBatch(10, 10, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(11, 11, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(12, 12, List.of(AcknowledgeType.ACCEPT.id))));
assertEquals(10, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(5L));
assertNotNull(sharePartition.cachedState().get(5L).offsetState());
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
"member-2",
ShareAcquireMode.RECORD_LIMIT,
2,
10,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
7);
// Acquired batches will contain the following ->
// 1. 10-10 (released offsets)
// 2. 15-20 (new records)
assertEquals(1, sharePartition.cachedState().size());
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 10, 2));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 15, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(16, 16, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(17, 17, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(18, 18, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(20, 20, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2"));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ACQUIRED, (short) 1, "member-2"));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACQUIRED, (short) 1, "member-2"));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ACQUIRED, (short) 1, "member-2"));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACQUIRED, (short) 1, "member-2"));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ACQUIRED, (short) 1, "member-2"));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.ACQUIRED, (short) 1, "member-2"));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
// End offset(20) - Start offset(10) + 1 = 11
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 11,
"In-flight message count should be 11.");
// 16 messages(5-20)
assertEquals(16, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(16, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(16, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcquireMultipleRecordsWithOverlapAndNewBatchInRecordLimitMode() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
MemoryRecords records = memoryRecords(5);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
3,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 0),
FETCH_ISOLATION_HWM),
3);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(0, 2, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(3, sharePartition.nextFetchOffset());
// Add records from 0-9 offsets, 3-5 should be acquired and 0-2 should be ignored.
records = memoryRecords(10);
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
3,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 0),
FETCH_ISOLATION_HWM),
3);
expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(3, 3, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(4, 4, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(5, 5, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(6, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(0L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(1L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(2L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(3L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
expectedOffsetStateMap.put(4L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(0L).offsetState());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 2,
"In-flight batch count should be 2.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 10,
"In-flight message count should be 10.");
assertEquals(10, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(2, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcknowledgeInRecordLimitMode() {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = Mockito.spy(SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withPersister(persister)
.build());
MemoryRecords records = memoryRecords(10);
// Acquire 1 record.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
2,
1,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
1);
assertArrayEquals(expectedAcquiredRecord(0, 0, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(1, sharePartition.nextFetchOffset());
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(0, 0, List.of(AcknowledgeType.ACCEPT.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(1, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.startOffset());
assertEquals(9, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(0L).offsetState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
// Acquire 2 records.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
2,
2,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
2);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(1, 1, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(2, 2, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
// Ack only 1 record
ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(1, 1, List.of(AcknowledgeType.ACCEPT.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(3, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.startOffset());
assertEquals(9, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(0L).offsetState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state());
}
@Test
public void testAcquisitionLockSingleRecordBatchInRecordLimitMode() throws InterruptedException {
SharePartition sharePartition = SharePartitionBuilder.builder()
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withState(SharePartitionState.ACTIVE)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.timer().size() == 0 &&
sharePartition.nextFetchOffset() == 10 &&
sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(10L, List.of())));
assertEquals(5, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count());
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
2,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(memoryRecords(10, 5), 10),
FETCH_ISOLATION_HWM),
2);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 10, 2));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(11, 11, 2));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(12, sharePartition.nextFetchOffset());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 2, MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 2, MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 5,
"In-flight message count should be 5.");
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcquisitionLockTimeoutMultipleRecordBatchInRecordLimitMode() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withPersister(persister)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
// Create 3 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 0, 5).close();
memoryRecordsBuilder(buffer, 5, 15).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire 3 records.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
2,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 10),
FETCH_ISOLATION_HWM),
2);
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
assertArrayEquals(expectedAcquiredRecord(0, 1, 1).toArray(), acquiredRecordsList.toArray());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
// There should be 2 timer tasks for 2 offsets.
assertEquals(2, sharePartition.timer().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state());
assertFalse(sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask().hasExpired());
assertFalse(sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask().hasExpired());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.cachedState().get(0L).offsetState().get(0L).state() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(0L).offsetState().get(1L).state() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask() == null &&
sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask() == null &&
sharePartition.timer().size() == 0,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of(0L, 1L))));
assertEquals(2, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().max());
// Acquisition lock timeout task has run already and next fetch offset is moved to 0.
assertEquals(0, sharePartition.nextFetchOffset());
assertEquals(1, sharePartition.cachedState().get(0L).offsetState().get(0L).deliveryCount());
assertEquals(1, sharePartition.cachedState().get(0L).offsetState().get(1L).deliveryCount());
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
2,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 10),
FETCH_ISOLATION_HWM),
2);
// delivery count increased to 2
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(0, 0, 2));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(1, 1, 2));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
// Ack offset at 1 and let the other offset to expire again.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(1, 1, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS);
TestUtils.waitForCondition(
() -> sharePartition.cachedState().get(0L).offsetState().get(0L).state() == RecordState.AVAILABLE &&
sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask() == null &&
sharePartition.timer().size() == 0,
DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS,
() -> assertionFailedMessage(sharePartition, Map.of(0L, List.of(0L, 1L))));
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1,
"In-flight batch count should be 1.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 5,
"In-flight message count should be 5.");
assertEquals(3, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().max());
assertEquals(0, sharePartition.nextFetchOffset());
}
@Test
public void testAcquireCachedStateInitialGapOverlapsWithActualPartitionGapInRecordLimitMode() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(41L, 50L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 31-40
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
sharePartition.maybeInitialize();
// Creating 2 batches starting from 16, such that there is a natural gap from 11 to 15
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 16, 20).close();
memoryRecordsBuilder(buffer, 36, 25).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire 20 records.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
20,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 0),
FETCH_ISOLATION_HWM),
20);
// Acquired batches will contain the following ->
// 1. 16-20 (gap offsets)
// 2. 31-40 (gap offsets)
// 3. 51-55 (new offsets)
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(16, 20, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 55, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(16, sharePartition.startOffset());
assertEquals(60, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(56, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNull(persisterReadResultGapWindow);
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 5,
"In-flight batch count should be 5.");
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 45,
"In-flight message count should be 45.");
assertEquals(45, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(10, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcquireCachedStateGapInBetweenOverlapsWithActualPartitionGapInRecordLimitMode() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20
new PersisterStateBatch(41L, 50L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 31-40
))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withSharePartitionMetrics(sharePartitionMetrics)
.build();
sharePartition.maybeInitialize();
// Creating 3 batches starting from 11, such that there is a natural gap from 36 to 40
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 11, 10).close();
memoryRecordsBuilder(buffer, 21, 15).close();
memoryRecordsBuilder(buffer, 41, 20).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
15,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records, 0),
FETCH_ISOLATION_HWM),
15);
// Acquired batches will contain the following ->
// 1. 11-20 (gap offsets)
// 2. 31-35 (gap offsets)
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 35, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(11, sharePartition.startOffset());
assertEquals(50, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(36, sharePartition.nextFetchOffset());
GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
assertNotNull(persisterReadResultGapWindow);
// Gap still exists from 36 to 40
assertEquals(36L, persisterReadResultGapWindow.gapStartOffset());
assertEquals(50L, persisterReadResultGapWindow.endOffset());
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 4,
"In-flight batch count should be 4.");
// End offset(50) - Start offset(11) + 1 = 40
TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 40,
"In-flight message count should be 40.");
// 35 messages: 10 (11-20) + 10 (21-30) + 5 (31-35) + 10 (41-50)
assertEquals(35, sharePartitionMetrics.inFlightBatchMessageCount().sum());
assertEquals(4, sharePartitionMetrics.inFlightBatchMessageCount().count());
assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().min());
assertEquals(10, sharePartitionMetrics.inFlightBatchMessageCount().max());
}
@Test
public void testAcquisitionLockOnReleasingAcknowledgedMultipleSubsetRecordBatchWithGapOffsetsInRecordLimitMode() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
MemoryRecords records1 = memoryRecords(5, 2);
// Untracked gap of 3 offsets from 7-9.
MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(10, 2);
// Gap from 12-13 offsets.
recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap for 15 offset.
recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
// Gap from 17-19 offsets.
recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes());
MemoryRecords records2 = recordsBuilder.build();
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
2,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records1, 0),
FETCH_ISOLATION_HWM),
2);
assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray());
assertEquals(7, sharePartition.nextFetchOffset());
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
5,
DEFAULT_FETCH_OFFSET,
fetchPartitionData(records2, 0),
FETCH_ISOLATION_HWM),
5);
// Acquired batches will contain the following ->
// 1. 10-14, including 12-13 (gap offsets)
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(10, 14, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(15, sharePartition.nextFetchOffset());
assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
// 6 timer tasks for 10-14 offsets and batch 0-1
assertEquals(6, sharePartition.timer().size());
for (int i = 10; i <= 14; i++) {
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).offsetState().get((long) i).state());
assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).offsetState().get((long) i).memberId());
assertFalse(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask().hasExpired());
}
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).offsetState().get(15L).state());
// Acknowledging over subset of second batch with subset of gap offsets.
sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(10, 12, List.of(AcknowledgeType.ACCEPT.id, AcknowledgeType.ACCEPT.id, ACKNOWLEDGE_TYPE_GAP_ID))));
// Release acquired records for "member-1".
CompletableFuture<Void> releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID);
assertNull(releaseResult.join());
assertFalse(releaseResult.isCompletedExceptionally());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertNull(sharePartition.cachedState().get(5L).offsetState());
// Check cached state.
Map<Long, InFlightState> expectedOffsetStateMap = new HashMap<>();
expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(17L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(18L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID));
assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState());
}
@Test
public void testMultipleMemberAcquireInDifferentAcquireModes() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
// Create 3 batches of records.
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 10, 5).close();
memoryRecordsBuilder(buffer, 15, 15).close();
memoryRecordsBuilder(buffer, 30, 15).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Member-1 acquires two full batches in batch_optimized mode.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
10,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
20);
// Acknowledge a subset of records from member-1.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(10, 14, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(15, 20, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(21, 22, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(23, 28, List.of(AcknowledgeType.ACCEPT.id))));
// Member-2 acquires records in record_limit mode.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
"member-2",
ShareAcquireMode.RECORD_LIMIT,
BATCH_SIZE,
2,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
2);
List<AcquiredRecords> expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(10, 10, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(11, 11, 2));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(12, sharePartition.nextFetchOffset());
// Member-3 acquires records in batch_optimized mode.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
"member-3",
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
10,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
20);
// Acquired batches will contain the following ->
// 1. 12-14 (released offsets)
// 2. 21-22 (released offsets)
// 3. 30-44 (new offsets)
expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(12, 12, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(13, 13, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(14, 14, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 21, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(22, 22, 2));
expectedAcquiredRecord.addAll(expectedAcquiredRecord(30, 44, 1));
assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray());
assertEquals(45, sharePartition.nextFetchOffset());
}
@Test
public void testLsoMovementWithPendingAcknowledgements() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
List<AcquiredRecords> records = fetchAcquiredRecords(sharePartition, memoryRecords(0, 5), 5);
assertEquals(1, records.size());
assertEquals(0, records.get(0).firstOffset());
assertEquals(4, records.get(0).lastOffset());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
AcquisitionLockTimerTask taskOrig = sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask();
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
// Acknowledge offsets 1 and 3 out of 0-4 with ACCEPT.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(1, 1, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(3, 3, List.of(AcknowledgeType.ACCEPT.id))));
// Move LSO to 3.
sharePartition.updateCacheAndOffsets(3);
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(3, sharePartition.startOffset());
assertEquals(4, sharePartition.endOffset());
assertEquals(1, sharePartition.cachedState().size());
assertTrue(taskOrig.isCancelled()); // Original acq lock cancelled.
assertNotEquals(taskOrig, sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(0L).offsetState());
InFlightState offset0 = sharePartition.cachedState().get(0L).offsetState().get(0L);
InFlightState offset1 = sharePartition.cachedState().get(0L).offsetState().get(1L);
InFlightState offset2 = sharePartition.cachedState().get(0L).offsetState().get(2L);
InFlightState offset3 = sharePartition.cachedState().get(0L).offsetState().get(3L);
InFlightState offset4 = sharePartition.cachedState().get(0L).offsetState().get(4L);
assertEquals(RecordState.ACQUIRED, offset0.state());
assertNotNull(offset0.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACKNOWLEDGED, offset1.state());
assertNull(offset1.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACQUIRED, offset2.state());
assertNotNull(offset2.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACKNOWLEDGED, offset3.state());
assertNull(offset3.acquisitionLockTimeoutTask());
assertEquals(RecordState.ACQUIRED, offset4.state());
assertNotNull(offset4.acquisitionLockTimeoutTask());
assertEquals(3, sharePartition.timer().size()); // offsets 0,2 and 4 are still in ACQUIRED state.
// Expire acquisition lock timeout
mockTimer.advanceClock(ACQUISITION_LOCK_TIMEOUT_MS + 1);
List<RecordState> expectedStates = List.of(RecordState.ARCHIVED, RecordState.ACKNOWLEDGED, RecordState.ARCHIVED, RecordState.ACKNOWLEDGED, RecordState.AVAILABLE);
for (long i = 0; i <= 4; i++) {
InFlightState offset = sharePartition.cachedState().get(0L).offsetState().get(i);
assertNull(offset.acquisitionLockTimeoutTask());
assertEquals(expectedStates.get((int) i), offset.state());
}
assertEquals(0, sharePartition.timer().size()); // All timer jobs have completed
Mockito.verify(persister, Mockito.times(4)).writeState(Mockito.any());
}
@Test
public void testLsoMovementWithPendingAcknowledgementsForBatches() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
SharePartition sharePartition = SharePartitionBuilder.builder()
.withState(SharePartitionState.ACTIVE)
.withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS)
.withMaxDeliveryCount(2)
.withPersister(persister)
.build();
fetchAcquiredRecords(sharePartition, memoryRecords(0, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(15, 5), 5);
fetchAcquiredRecords(sharePartition, memoryRecords(20, 5), 5);
assertEquals(5, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState());
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
// Acknowledge batches 5-9 and 15-19 with ACCEPT.
sharePartition.acknowledge(MEMBER_ID, List.of(
new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.ACCEPT.id)),
new ShareAcknowledgementBatch(15, 19, List.of(AcknowledgeType.ACCEPT.id))));
// Move LSO to 12.
sharePartition.updateCacheAndOffsets(12);
assertEquals(25, sharePartition.nextFetchOffset());
assertEquals(12, sharePartition.startOffset());
assertEquals(24, sharePartition.endOffset());
assertEquals(5, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState());
assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(15L).batchAcquisitionLockTimeoutTask());
assertNotNull(sharePartition.cachedState().get(20L).batchAcquisitionLockTimeoutTask());
assertEquals(3, sharePartition.timer().size());
// Expire acquisition lock timeout.
mockTimer.advanceClock(ACQUISITION_LOCK_TIMEOUT_MS + 1);
assertEquals(12, sharePartition.nextFetchOffset());
assertEquals(12, sharePartition.startOffset());
assertEquals(24, sharePartition.endOffset());
assertEquals(5, sharePartition.cachedState().size());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(0L).batchState());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState());
// Batch 10-14 will now be tracked on a per-offset basis.
assertNotNull(sharePartition.cachedState().get(10L).offsetState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).offsetState().get(10L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).offsetState().get(11L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).offsetState().get(12L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).offsetState().get(13L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).offsetState().get(14L).state());
assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(20L).batchState());
assertNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(15L).batchAcquisitionLockTimeoutTask());
assertNull(sharePartition.cachedState().get(20L).batchAcquisitionLockTimeoutTask());
assertEquals(0, sharePartition.timer().size()); // All timer jobs have completed
Mockito.verify(persister, Mockito.times(4)).writeState(Mockito.any());
}
@Test
public void testThrottleRecordsWhenPendingDeliveriesExist() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 19L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 3),
new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(3, sharePartition.cachedState().size());
assertEquals(5, sharePartition.startOffset());
assertEquals(30, sharePartition.endOffset());
assertEquals(5, sharePartition.nextFetchOffset());
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 7, 13).close();
memoryRecordsBuilder(buffer, 20, 8).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Set max fetch records to 500, records should be acquired till the offset 26 of the fetched batch.
// 16 records should be returned: 7-19, 23-25
// The record at offset 26 has a delivery count of 3 and is a subject to be throttled; it should be skipped.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
16);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(7, 14, 1));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 19, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
assertEquals(26, sharePartition.nextFetchOffset());
assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
assertEquals(30L, sharePartition.endOffset());
assertEquals(3, sharePartition.deliveryCompleteCount());
// The record at offset 26 has a delivery count of 3 and is a subject to be throttled;
// First acquisition attempt fails: batch size should be halved (5 -> 2)
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
26,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
2);
expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(26, 26, 4));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(27, 27, 4));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
}
@Test
public void testAcquireRecordsHalvesBatchSizeOnEachFailureUntilSingleRecordOnLastAttempt() throws InterruptedException {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 34L, RecordState.AVAILABLE.id, (short) 4)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder()
.withPersister(persister)
.withMaxDeliveryCount(7)
.withDefaultAcquisitionLockTimeoutMs(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS)
.build();
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(5, sharePartition.startOffset());
assertEquals(34, sharePartition.endOffset());
assertEquals(5, sharePartition.nextFetchOffset());
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 15, 20).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// The record at offset 15 has a delivery count of 4 and is a subject to be throttled
// First acquisition attempt fails: batch size should be halved (20 -> 10)
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
final List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(15, 15, 5));
IntStream.range(1, 10).forEach(i -> expectedAcquiredRecords.addAll(expectedAcquiredRecord(15 + i, 15 + i, 5)));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS + 1);
// Second failure: batch size halved again (now ~1/4 of original, 20 -> 5)
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5);
final List<AcquiredRecords> expectedAcquiredRecords2 = new ArrayList<>(expectedAcquiredRecord(15, 15, 6));
IntStream.range(1, 5).forEach(i -> expectedAcquiredRecords2.addAll(expectedAcquiredRecord(15 + i, 15 + i, 6)));
assertArrayEquals(expectedAcquiredRecords2.toArray(), acquiredRecordsList.toArray());
// Allowing acquisition lock to expire.
mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS + 1);
List<AcquiredRecords> expectedLastAttemptAcquiredRecords;
// Last delivery attempt, records are delivered individually.
for (int i = 0; i < 5; i++) {
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
1);
expectedLastAttemptAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(15 + i, 15 + i, 7));
assertArrayEquals(expectedLastAttemptAcquiredRecords.toArray(), acquiredRecordsList.toArray());
}
// The record at offset 20 has a delivery count of 5 and is a subject to be throttled;
// Second acquisition attempt fails: batch size should be ~1/4 of original, 20 -> 5
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5);
final List<AcquiredRecords> expectedAcquiredRecords3 = new ArrayList<>(expectedAcquiredRecord(20, 20, 6));
IntStream.range(1, 5).forEach(i -> expectedAcquiredRecords3.addAll(expectedAcquiredRecord(20 + i, 20 + i, 6)));
assertArrayEquals(expectedAcquiredRecords3.toArray(), acquiredRecordsList.toArray());
// The record at offset 25 has a delivery count of 4 and is a subject to be throttled;
// First acquisition attempt fails: batch size should be ~1/2 of original, 20 -> 10
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
final List<AcquiredRecords> expectedAcquiredRecords4 = new ArrayList<>(expectedAcquiredRecord(25, 25, 5));
IntStream.range(1, 10).forEach(i -> expectedAcquiredRecords4.addAll(expectedAcquiredRecord(25 + i, 25 + i, 5)));
assertArrayEquals(expectedAcquiredRecords4.toArray(), acquiredRecordsList.toArray());
}
@Test
public void testLastAttemptRecordIsolationWithMixedDeliveryCount() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 15L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 34L, RecordState.AVAILABLE.id, (short) 2)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(15, sharePartition.startOffset());
assertEquals(34, sharePartition.endOffset());
assertEquals(15, sharePartition.nextFetchOffset());
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 15, 20).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
15,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
20);
// Release middle batch.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(17, 17, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(20, 20, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
15,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
2);
// Release all batch.
ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(15, 34, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).offsetState().get(15L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).offsetState().get(16L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).offsetState().get(17L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).offsetState().get(18L).state());
assertEquals(3, sharePartition.cachedState().get(15L).offsetState().get(15L).deliveryCount());
assertEquals(3, sharePartition.cachedState().get(15L).offsetState().get(16L).deliveryCount());
assertEquals(4, sharePartition.cachedState().get(15L).offsetState().get(17L).deliveryCount());
assertEquals(3, sharePartition.cachedState().get(15L).offsetState().get(18L).deliveryCount());
// The record at offset 17 (delivery count of 4) is on its last attempt and should be delivered alone,
// so this acquisition correctly stops at offset 16.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
2);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(15, 15, 4));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(16, 16, 4));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
// The record at offset 17 should ba delivered alone.
acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
1);
expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(17, 17, 5));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
}
@Test
public void testAcquisitionNotThrottledIfHighDeliveryCountRecordNotAcquired() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 15L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 19L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class);
Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message())))));
Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult));
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(15, sharePartition.startOffset());
assertEquals(19, sharePartition.endOffset());
assertEquals(15, sharePartition.nextFetchOffset());
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 15, 5).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
15,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
5);
// Release middle batch.
CompletableFuture<Void> ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(15, 15, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
15,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
1);
// Release middle batch.
ackResult = sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(16, 19, List.of(AcknowledgeType.RELEASE.id))));
assertNull(ackResult.join());
assertFalse(ackResult.isCompletedExceptionally());
assertEquals(1, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).offsetState().get(15L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).offsetState().get(16L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).offsetState().get(17L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).offsetState().get(18L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).offsetState().get(19L).state());
assertEquals(3, sharePartition.cachedState().get(15L).offsetState().get(15L).deliveryCount());
assertEquals(2, sharePartition.cachedState().get(15L).offsetState().get(16L).deliveryCount());
assertEquals(2, sharePartition.cachedState().get(15L).offsetState().get(17L).deliveryCount());
assertEquals(2, sharePartition.cachedState().get(15L).offsetState().get(18L).deliveryCount());
assertEquals(2, sharePartition.cachedState().get(15L).offsetState().get(19L).deliveryCount());
// This acquisition should not be throttled, as the high-delivery-count record (offset 15) was not acquired.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
5,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
4);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(16, 16, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(17, 17, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(18, 18, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 3));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
}
@Test
public void testAcquisitionThrottlingWithOngoingStateTransition() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 3, 15L, Errors.NONE.code(), Errors.NONE.message(),
List.of(
new PersisterStateBatch(15L, 19L, RecordState.AVAILABLE.id, (short) 1),
// Batch of 20-24 has been set to delivery count of 2 so in next acquisition it will be 3,
// and post that it should be throttled but because of pending state transition it
// should not be throttled.
new PersisterStateBatch(20L, 24L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(25L, 29L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(30L, 34L, RecordState.AVAILABLE.id, (short) 2),
// Similarly, batch of 35-39 has been set to delivery count of 2 so in next offset
// acquisition, some offsets will be at 3 delivery count, and post that offsets
// should be throttled but because of pending state transition they will not be throttled.
new PersisterStateBatch(35, 39L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(40, 44L, RecordState.ARCHIVED.id, (short) 5),
new PersisterStateBatch(45, 49L, RecordState.AVAILABLE.id, (short) 1)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
CompletableFuture<Void> result = sharePartition.maybeInitialize();
assertTrue(result.isDone());
assertFalse(result.isCompletedExceptionally());
// Acquire batches 20-24 and 36-37 (offset based) and create a pending state transition.
fetchAcquiredRecords(sharePartition, memoryRecords(20, 5), 5);
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState());
fetchAcquiredRecords(sharePartition, memoryRecords(36, 2), 2);
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(35L).offsetState().get(35L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(35L).offsetState().get(36L).state());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(35L).offsetState().get(37L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(35L).offsetState().get(38L).state());
// Create a pending future which will block state updates.
CompletableFuture<WriteShareGroupStateResult> future = new CompletableFuture<>();
Mockito.when(persister.writeState(Mockito.any())).thenReturn(future);
// Release batch of 20-24 and offset 36-37, which will have pending state transition.
sharePartition.acknowledge(
MEMBER_ID,
List.of(new ShareAcknowledgementBatch(20, 24, List.of(AcknowledgeType.RELEASE.id)),
new ShareAcknowledgementBatch(36, 37, List.of(AcknowledgeType.RELEASE.id))));
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(20L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(35L).offsetState().get(36L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(35L).offsetState().get(37L).state());
assertTrue(sharePartition.cachedState().get(20L).batchHasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(35L).offsetState().get(35L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(35L).offsetState().get(36L).hasOngoingStateTransition());
assertTrue(sharePartition.cachedState().get(35L).offsetState().get(37L).hasOngoingStateTransition());
assertFalse(sharePartition.cachedState().get(35L).offsetState().get(38L).hasOngoingStateTransition());
ByteBuffer buffer = ByteBuffer.allocate(4096);
memoryRecordsBuilder(buffer, 15, 5).close();
memoryRecordsBuilder(buffer, 20, 5).close();
memoryRecordsBuilder(buffer, 25, 5).close();
memoryRecordsBuilder(buffer, 30, 5).close();
memoryRecordsBuilder(buffer, 35, 5).close();
memoryRecordsBuilder(buffer, 40, 5).close();
memoryRecordsBuilder(buffer, 45, 5).close();
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
// Acquire batches and batch 15-19, 25-29 will be acquired as batch 20-24 has pending state transition.
// Without pending transition, the acquisition would have happened only for 20-24 batch as the batch
// 20-24 would have marked to be throttled but eventually couldn't be acquired because of state transition.
fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
10,
15,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
10);
assertEquals(7, sharePartition.cachedState().size());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(20L).batchState());
assertTrue(sharePartition.cachedState().get(20L).batchHasOngoingStateTransition());
assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(25L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(30L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(35L).offsetState().get(35L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(35L).offsetState().get(36L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(35L).offsetState().get(37L).state());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(35L).offsetState().get(38L).state());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(40L).batchState());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(45L).batchState());
// Re-trigger the acquisition and rest all the records will be acquired, including the offsets
// ones. The throttling should not happen because of pending state transition.
List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
500,
15,
fetchPartitionData(records),
FETCH_ISOLATION_HWM),
13);
List<AcquiredRecords> expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(30, 34, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(35, 35, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(38, 38, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(39, 39, 3));
expectedAcquiredRecords.addAll(expectedAcquiredRecord(45, 49, 2));
assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
}
/**
* This function produces transactional data of a given no. of records followed by a transactional marker (COMMIT/ABORT).
*/
private void newTransactionalRecords(ByteBuffer buffer, ControlRecordType controlRecordType, int numRecords, long producerId, long baseOffset) {
try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer,
RecordBatch.CURRENT_MAGIC_VALUE,
Compression.NONE,
TimestampType.CREATE_TIME,
baseOffset,
MOCK_TIME.milliseconds(),
producerId,
(short) 0,
0,
true,
RecordBatch.NO_PARTITION_LEADER_EPOCH)) {
for (int i = 0; i < numRecords; i++)
builder.append(new SimpleRecord(MOCK_TIME.milliseconds(), "key".getBytes(), "value".getBytes()));
builder.build();
}
writeTransactionMarker(buffer, controlRecordType, (int) baseOffset + numRecords, producerId);
}
private void writeTransactionMarker(ByteBuffer buffer, ControlRecordType controlRecordType, int offset, long producerId) {
MemoryRecords.writeEndTransactionalMarker(buffer,
offset,
MOCK_TIME.milliseconds(),
0,
producerId,
(short) 0,
new EndTransactionMarker(controlRecordType, 0));
}
private List<FetchResponseData.AbortedTransaction> newAbortedTransactions() {
FetchResponseData.AbortedTransaction abortedTransaction = new FetchResponseData.AbortedTransaction();
abortedTransaction.setFirstOffset(0);
abortedTransaction.setProducerId(1000L);
return List.of(abortedTransaction);
}
private FetchPartitionData fetchPartitionData(Records records) {
return fetchPartitionData(records, 0);
}
private FetchPartitionData fetchPartitionData(Records records, List<FetchResponseData.AbortedTransaction> abortedTransactions) {
return fetchPartitionData(records, 0, abortedTransactions);
}
private FetchPartitionData fetchPartitionData(Records records, long logStartOffset) {
return new FetchPartitionData(Errors.NONE, 5, logStartOffset, records,
Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false);
}
private FetchPartitionData fetchPartitionData(Records records, long logStartOffset, List<FetchResponseData.AbortedTransaction> abortedTransactions) {
return new FetchPartitionData(Errors.NONE, 5, logStartOffset, records,
Optional.empty(), OptionalLong.empty(), Optional.of(abortedTransactions), OptionalInt.empty(), false);
}
private List<AcquiredRecords> fetchAcquiredRecords(SharePartition sharePartition, Records records, long logStartOffset, int expectedOffsetCount) {
return fetchAcquiredRecords(sharePartition, records, records.batches().iterator().next().baseOffset(), logStartOffset, expectedOffsetCount);
}
private List<AcquiredRecords> fetchAcquiredRecords(SharePartition sharePartition, Records records, long fetchOffset, long logStartOffset, int expectedOffsetCount) {
ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
MAX_FETCH_RECORDS,
fetchOffset,
fetchPartitionData(records, logStartOffset),
FETCH_ISOLATION_HWM);
return fetchAcquiredRecords(shareAcquiredRecords, expectedOffsetCount);
}
private List<AcquiredRecords> fetchAcquiredRecords(SharePartition sharePartition, Records records, int expectedOffsetCount) {
ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire(
MEMBER_ID,
ShareAcquireMode.BATCH_OPTIMIZED,
BATCH_SIZE,
MAX_FETCH_RECORDS,
records.batches().iterator().next().baseOffset(),
fetchPartitionData(records),
FETCH_ISOLATION_HWM);
return fetchAcquiredRecords(shareAcquiredRecords, expectedOffsetCount);
}
private List<AcquiredRecords> fetchAcquiredRecords(ShareAcquiredRecords shareAcquiredRecords, int expectedOffsetCount) {
assertNotNull(shareAcquiredRecords);
assertEquals(expectedOffsetCount, shareAcquiredRecords.count());
return shareAcquiredRecords.acquiredRecords();
}
private MemoryRecords memoryRecords(int numOfRecords) {
return memoryRecords(0, numOfRecords);
}
private MemoryRecords memoryRecords(long startOffset, int numOfRecords) {
try (MemoryRecordsBuilder builder = memoryRecordsBuilder(startOffset, numOfRecords)) {
return builder.build();
}
}
private List<AcquiredRecords> expectedAcquiredRecord(long baseOffset, long lastOffset, int deliveryCount) {
return List.of(new AcquiredRecords()
.setFirstOffset(baseOffset)
.setLastOffset(lastOffset)
.setDeliveryCount((short) deliveryCount));
}
private List<AcquiredRecords> expectedAcquiredRecords(MemoryRecords memoryRecords, int deliveryCount) {
List<AcquiredRecords> acquiredRecordsList = new ArrayList<>();
memoryRecords.batches().forEach(batch -> acquiredRecordsList.add(new AcquiredRecords()
.setFirstOffset(batch.baseOffset())
.setLastOffset(batch.lastOffset())
.setDeliveryCount((short) deliveryCount)));
return acquiredRecordsList;
}
private List<AcquiredRecords> expectedAcquiredRecords(long baseOffset, long lastOffset, int deliveryCount) {
List<AcquiredRecords> acquiredRecordsList = new ArrayList<>();
for (long i = baseOffset; i <= lastOffset; i++) {
acquiredRecordsList.add(new AcquiredRecords()
.setFirstOffset(i)
.setLastOffset(i)
.setDeliveryCount((short) deliveryCount));
}
return acquiredRecordsList;
}
public void mockPersisterReadStateMethod(Persister persister) {
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
PartitionFactory.newPartitionAllData(0, 0, 0L, Errors.NONE.code(), Errors.NONE.message(),
List.of())))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
}
private static | SharePartitionTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestIOStatisticsSetters.java | {
"start": 1869,
"end": 2163
} | interface ____ through
* a parameterized run with each implementation.
* For each of the setters, the value is set, verified,
* updated, verified again.
* An option known to be undefined in all created IOStatisticsStore instances
* is set, to verify it is harmless.
*/
public | implementations |
java | spring-projects__spring-boot | loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/JarEntriesStream.java | {
"start": 985,
"end": 1132
} | class ____ iterate entries in a jar file and check that content matches a related
* entry.
*
* @author Phillip Webb
* @author Andy Wilkinson
*/
| to |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/converter/xml/Jaxb2CollectionHttpMessageConverterTests.java | {
"start": 1785,
"end": 9402
} | class ____ {
private Jaxb2CollectionHttpMessageConverter<?> converter;
private Type rootElementListType;
private Type rootElementSetType;
private Type typeListType;
private Type typeSetType;
@BeforeEach
void setup() {
converter = new Jaxb2CollectionHttpMessageConverter<Collection<Object>>();
rootElementListType = new ParameterizedTypeReference<List<RootElement>>() {}.getType();
rootElementSetType = new ParameterizedTypeReference<Set<RootElement>>() {}.getType();
typeListType = new ParameterizedTypeReference<List<TestType>>() {}.getType();
typeSetType = new ParameterizedTypeReference<Set<TestType>>() {}.getType();
}
@Test
void canRead() {
assertThat(converter.canRead(rootElementListType, null, null)).isTrue();
assertThat(converter.canRead(rootElementSetType, null, null)).isTrue();
assertThat(converter.canRead(typeSetType, null, null)).isTrue();
}
@Test
@SuppressWarnings("unchecked")
public void readXmlRootElementList() throws Exception {
String content = "<list><rootElement><type s=\"1\"/></rootElement><rootElement><type s=\"2\"/></rootElement></list>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(content.getBytes(StandardCharsets.UTF_8));
List<RootElement> result = (List<RootElement>) converter.read(rootElementListType, null, inputMessage);
assertThat(result).as("Invalid result").hasSize(2);
assertThat(result.get(0).type.s).as("Invalid result").isEqualTo("1");
assertThat(result.get(1).type.s).as("Invalid result").isEqualTo("2");
}
@Test
@SuppressWarnings("unchecked")
public void readXmlRootElementSet() throws Exception {
String content = "<set><rootElement><type s=\"1\"/></rootElement><rootElement><type s=\"2\"/></rootElement></set>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(content.getBytes(StandardCharsets.UTF_8));
Set<RootElement> result = (Set<RootElement>) converter.read(rootElementSetType, null, inputMessage);
assertThat(result).as("Invalid result").hasSize(2);
assertThat(result.contains(new RootElement("1"))).as("Invalid result").isTrue();
assertThat(result.contains(new RootElement("2"))).as("Invalid result").isTrue();
}
@Test
@SuppressWarnings("unchecked")
public void readXmlTypeList() throws Exception {
String content = "<list><foo s=\"1\"/><bar s=\"2\"/></list>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(content.getBytes(StandardCharsets.UTF_8));
List<TestType> result = (List<TestType>) converter.read(typeListType, null, inputMessage);
assertThat(result).as("Invalid result").hasSize(2);
assertThat(result.get(0).s).as("Invalid result").isEqualTo("1");
assertThat(result.get(1).s).as("Invalid result").isEqualTo("2");
}
@Test
@SuppressWarnings("unchecked")
public void readXmlTypeSet() throws Exception {
String content = "<set><foo s=\"1\"/><bar s=\"2\"/></set>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(content.getBytes(StandardCharsets.UTF_8));
Set<TestType> result = (Set<TestType>) converter.read(typeSetType, null, inputMessage);
assertThat(result).as("Invalid result").hasSize(2);
assertThat(result.contains(new TestType("1"))).as("Invalid result").isTrue();
assertThat(result.contains(new TestType("2"))).as("Invalid result").isTrue();
}
@Test
@SuppressWarnings("unchecked")
public void readXmlRootElementExternalEntityDisabled() throws Exception {
Resource external = new ClassPathResource("external.txt", getClass());
String content = "<!DOCTYPE root [" +
" <!ELEMENT external ANY >\n" +
" <!ENTITY ext SYSTEM \"" + external.getURI() + "\" >]>" +
" <list><rootElement><type s=\"1\"/><external>&ext;</external></rootElement></list>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(content.getBytes(StandardCharsets.UTF_8));
converter = new Jaxb2CollectionHttpMessageConverter<Collection<Object>>() {
@Override
protected XMLInputFactory createXmlInputFactory() {
XMLInputFactory inputFactory = super.createXmlInputFactory();
inputFactory.setProperty(XMLInputFactory.SUPPORT_DTD, true);
return inputFactory;
}
};
try {
Collection<RootElement> result = converter.read(rootElementListType, null, inputMessage);
assertThat(result).hasSize(1);
assertThat(result.iterator().next().external).isEmpty();
}
catch (HttpMessageNotReadableException ex) {
// Some parsers raise an exception
}
}
@Test
@SuppressWarnings("unchecked")
public void readXmlRootElementExternalEntityEnabled() throws Exception {
Resource external = new ClassPathResource("external.txt", getClass());
String content = "<!DOCTYPE root [" +
" <!ELEMENT external ANY >\n" +
" <!ENTITY ext SYSTEM \"" + external.getURI() + "\" >]>" +
" <list><rootElement><type s=\"1\"/><external>&ext;</external></rootElement></list>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(content.getBytes(StandardCharsets.UTF_8));
Jaxb2CollectionHttpMessageConverter<?> c = new Jaxb2CollectionHttpMessageConverter<Collection<Object>>() {
@Override
protected XMLInputFactory createXmlInputFactory() {
XMLInputFactory inputFactory = XMLInputFactory.newInstance();
inputFactory.setProperty(XMLInputFactory.IS_REPLACING_ENTITY_REFERENCES, true);
return inputFactory;
}
};
Collection<RootElement> result = c.read(rootElementListType, null, inputMessage);
assertThat(result).hasSize(1);
assertThat(result.iterator().next().external).isEqualTo("Foo Bar");
}
@Test
void testXmlBomb() {
// https://en.wikipedia.org/wiki/Billion_laughs
// https://msdn.microsoft.com/en-us/magazine/ee335713.aspx
String content = """
<?xml version="1.0"?>
<!DOCTYPE lolz [
<!ENTITY lol "lol">
<!ELEMENT lolz (#PCDATA)>
<!ENTITY lol1 "&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;">
<!ENTITY lol2 "&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;">
<!ENTITY lol3 "&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;">
<!ENTITY lol4 "&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;">
<!ENTITY lol5 "&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;">
<!ENTITY lol6 "&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;">
<!ENTITY lol7 "&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;">
<!ENTITY lol8 "&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;">
<!ENTITY lol9 "&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;">
]>
<list><rootElement><external>&lol9;</external></rootElement></list>""";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(content.getBytes(StandardCharsets.UTF_8));
assertThatExceptionOfType(HttpMessageNotReadableException.class)
.isThrownBy(() -> this.converter.read(this.rootElementListType, null, inputMessage))
.withMessageContaining("\"lol9\"");
}
@Test
@SuppressWarnings("unchecked")
public void readXmlRootElementListHeaderCharset() throws Exception {
String content = "<list><rootElement><type s=\"Hellø Wørld\"/></rootElement></list>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(content.getBytes(StandardCharsets.ISO_8859_1));
inputMessage.getHeaders().setContentType(MediaType.parseMediaType("application/xml;charset=iso-8859-1"));
List<RootElement> result = (List<RootElement>) converter.read(rootElementListType, null, inputMessage);
assertThat(result).as("Invalid result").hasSize(1);
assertThat(result.get(0).type.s).as("Invalid result").isEqualTo("Hellø Wørld");
}
@XmlRootElement
public static | Jaxb2CollectionHttpMessageConverterTests |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/ArrayRecordComponent.java | {
"start": 1248,
"end": 1696
} | class ____ extends BugChecker implements VariableTreeMatcher {
@Override
public Description matchVariable(VariableTree tree, VisitorState state) {
var sym = ASTHelpers.getSymbol(tree);
// isRecord(VarSymbol) is true iff the symbol represents a record component
if (ASTHelpers.isRecord(sym) && sym.asType().getKind() == TypeKind.ARRAY) {
return describeMatch(tree);
}
return Description.NO_MATCH;
}
}
| ArrayRecordComponent |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryParenthesesTest.java | {
"start": 2776,
"end": 3304
} | class ____ {
Iterable<Integer> f(List<Integer> l) {
return Iterables.transform(
l,
new Function<Integer, Integer>() {
public Integer apply(Integer a) {
return a * 2;
}
});
}
}
""")
.doTest();
}
@Test
public void binaryTrees() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RRateLimiter.java | {
"start": 787,
"end": 8903
} | interface ____ extends RRateLimiterAsync, RExpirable {
/**
* Use {@link #trySetRate(RateType, long, Duration)} instead
*
* @param mode rate mode
* @param rate rate
* @param rateInterval rate time interval
* @param rateIntervalUnit rate time interval unit
* @return {@code true} if rate was set and {@code false}
* otherwise
*/
@Deprecated
boolean trySetRate(RateType mode, long rate, long rateInterval, RateIntervalUnit rateIntervalUnit);
/**
* Sets the rate limit only if it hasn't been set before.
*
* @param mode rate mode
* @param rate rate
* @param rateInterval rate time interval
* @return {@code true} if rate was set and {@code false}
* otherwise
*/
boolean trySetRate(RateType mode, long rate, Duration rateInterval);
/**
* Sets the rate limit only if it hasn't been set before.
* Time to live is applied only if rate limit has been set successfully.
*
* @param mode rate mode
* @param rate rate
* @param rateInterval rate time interval
* @param keepAliveTime this is the maximum time that the limiter will wait for a new acquisition before deletion
* @return {@code true} if rate was set and {@code false}
* otherwise
*/
boolean trySetRate(RateType mode, long rate, Duration rateInterval, Duration keepAliveTime);
/**
* Use {@link #setRate(RateType, long, Duration)} instead.
*
* @param mode rate mode
* @param rate rate
* @param rateInterval rate time interval
* @param rateIntervalUnit rate time interval unit
*/
@Deprecated
void setRate(RateType mode, long rate, long rateInterval, RateIntervalUnit rateIntervalUnit);
/**
* Sets the rate limit and clears the state.
* Overrides both limit and state if they haven't been set before.
*
* @param mode rate mode
* @param rate rate
* @param rateInterval rate time interval
*/
void setRate(RateType mode, long rate, Duration rateInterval);
/**
* Sets time to live, the rate limit, and clears the state.
* Overrides both limit and state if they haven't been set before.
*
* @param mode rate mode
* @param rate rate
* @param rateInterval rate time interval
* @param keepAliveTime this is the maximum time that the limiter will wait for a new acquisition before deletion
*/
void setRate(RateType mode, long rate, Duration rateInterval, Duration keepAliveTime);
/**
* Acquires a permit only if one is available at the
* time of invocation.
*
* <p>Acquires a permit, if one is available and returns immediately,
* with the value {@code true},
* reducing the number of available permits by one.
*
* <p>If no permit is available then this method will return
* immediately with the value {@code false}.
*
* @return {@code true} if a permit was acquired and {@code false}
* otherwise
*/
boolean tryAcquire();
/**
* Acquires the given number of <code>permits</code> only if all are available at the
* time of invocation.
*
* <p>Acquires a permits, if all are available and returns immediately,
* with the value {@code true},
* reducing the number of available permits by given number of permits.
*
* <p>If no permits are available then this method will return
* immediately with the value {@code false}.
*
* @param permits the number of permits to acquire
* @return {@code true} if a permit was acquired and {@code false}
* otherwise
*/
boolean tryAcquire(long permits);
/**
* Acquires a permit from this RateLimiter, blocking until one is available.
*
* <p>Acquires a permit, if one is available and returns immediately,
* reducing the number of available permits by one.
*
*/
void acquire();
/**
* Acquires a specified <code>permits</code> from this RateLimiter,
* blocking until one is available.
*
* <p>Acquires the given number of permits, if they are available
* and returns immediately, reducing the number of available permits
* by the given amount.
*
* @param permits the number of permits to acquire
*/
void acquire(long permits);
/**
* Use {@link #tryAcquire(Duration)} instead.
*
* @param timeout the maximum time to wait for a permit
* @param unit the time unit of the {@code timeout} argument
* @return {@code true} if a permit was acquired and {@code false}
* if the waiting time elapsed before a permit was acquired
*/
@Deprecated
boolean tryAcquire(long timeout, TimeUnit unit);
/**
* Acquires a permit from this RateLimiter, if one becomes available
* within the given waiting time.
*
* <p>Acquires a permit, if one is available and returns immediately,
* with the value {@code true},
* reducing the number of available permits by one.
*
* <p>If no permit is available then the current thread becomes
* disabled for thread scheduling purposes and lies dormant until
* specified waiting time elapses.
*
* <p>If a permit is acquired then the value {@code true} is returned.
*
* <p>If the specified waiting time elapses then the value {@code false}
* is returned. If the time is less than or equal to zero, the method
* will not wait at all.
*
* @param timeout the maximum time to wait for a permit
* @return {@code true} if a permit was acquired and {@code false}
* if the waiting time elapsed before a permit was acquired
*/
boolean tryAcquire(Duration timeout);
/**
* Use {@link #tryAcquire(long, Duration)} instead.
*
* @param permits amount
* @param timeout the maximum time to wait for a permit
* @param unit the time unit of the {@code timeout} argument
* @return {@code true} if a permit was acquired and {@code false}
* if the waiting time elapsed before a permit was acquired
*/
@Deprecated
boolean tryAcquire(long permits, long timeout, TimeUnit unit);
/**
* Acquires the given number of <code>permits</code> only if all are available
* within the given waiting time.
*
* <p>Acquires the given number of permits, if all are available and returns immediately,
* with the value {@code true}, reducing the number of available permits by one.
*
* <p>If no permit is available then the current thread becomes
* disabled for thread scheduling purposes and lies dormant until
* the specified waiting time elapses.
*
* <p>If a permits is acquired then the value {@code true} is returned.
*
* <p>If the specified waiting time elapses then the value {@code false}
* is returned. If the time is less than or equal to zero, the method
* will not wait at all.
*
* @param permits amount
* @param timeout the maximum time to wait for a permit
* @return {@code true} if a permit was acquired and {@code false}
* if the waiting time elapsed before a permit was acquired
*/
boolean tryAcquire(long permits, Duration timeout);
/**
* Releases the given number of <code>permits</code>.
*
* <p>Increases the number of available permits by the specified amount and completes
* immediately, causing any waiting acquirers that can now obtain permits to proceed.
*
* <p>The returned future completes when the release has been applied.
*
* @param permits amount to release; must be greater than or equal to zero
*/
void release(long permits);
/**
* Returns current configuration of this RateLimiter object.
*
* @return config object
*/
RateLimiterConfig getConfig();
/**
* Returns amount of available permits.
*
* @return number of permits
*/
long availablePermits();
}
| RRateLimiter |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java | {
"start": 1735,
"end": 1838
} | class ____ extends HttpClientBase implements HttpClientInternal, MetricsProvider {
static | HttpClientImpl |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java | {
"start": 1710,
"end": 4126
} | class ____ {
private String start = "";
private String end = "";
private Builder() {}
public Builder startTime(String start) {
this.start = ExceptionsHelper.requireNonNull(start, "start");
return this;
}
public Builder endTime(String end) {
this.end = ExceptionsHelper.requireNonNull(end, "end");
return this;
}
/**
* Create a new TimeRange instance after validating the start and end params.
* Throws {@link ElasticsearchStatusException} if the validation fails
* @return The time range
*/
public TimeRange build() {
return createTimeRange(start, end);
}
private static TimeRange createTimeRange(String start, String end) {
Long epochStart = null;
Long epochEnd = null;
if (start.isEmpty() == false) {
epochStart = paramToEpochIfValidOrThrow(START_PARAM, start) / MILLISECONDS_IN_SECOND;
epochEnd = paramToEpochIfValidOrThrow(END_PARAM, end) / MILLISECONDS_IN_SECOND;
if (end.isEmpty() || epochEnd.equals(epochStart)) {
epochEnd = epochStart + 1;
}
if (epochEnd < epochStart) {
String msg = Messages.getMessage(Messages.REST_START_AFTER_END, end, start);
throw new IllegalArgumentException(msg);
}
} else {
if (end.isEmpty() == false) {
epochEnd = paramToEpochIfValidOrThrow(END_PARAM, end) / MILLISECONDS_IN_SECOND;
}
}
return new TimeRange(epochStart, epochEnd);
}
/**
* Returns epoch milli seconds
*/
private static long paramToEpochIfValidOrThrow(String paramName, String date) {
if (NOW.equals(date)) {
return System.currentTimeMillis();
}
long epoch = 0;
if (date.isEmpty() == false) {
epoch = TimeUtils.dateStringToEpoch(date);
if (epoch < 0) {
String msg = Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, paramName, date);
throw new ElasticsearchParseException(msg);
}
}
return epoch;
}
}
}
| Builder |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/result/view/freemarker/FreeMarkerConfigurer.java | {
"start": 1399,
"end": 2607
} | class ____ to specify just a "templateLoaderPath"
* (for example, "classpath:templates"); you do not need any further configuration then.
*
* <p>This bean must be included in the application context of any application
* using {@link FreeMarkerView}. It exists purely to configure FreeMarker.
* It is not meant to be referenced by application components but just internally
* by {@code FreeMarkerView}. Implements {@link FreeMarkerConfig} to be found by
* {@code FreeMarkerView} without depending on the bean name of the configurer.
*
* <p>Note that you can also refer to a pre-configured FreeMarker {@code Configuration}
* instance via the "configuration" property. This allows to share a FreeMarker
* {@code Configuration} for web and email usage for example.
*
* <p>This configurer registers a template loader for this package, allowing to
* reference the "spring.ftl" macro library contained in this package:
*
* <pre class="code">
* <#import "/spring.ftl" as spring/>
* <@spring.bind "person.age"/>
* age is ${spring.status.value}</pre>
*
* <p>Note: Spring's FreeMarker support requires FreeMarker 2.3.33 or higher.
*
* @author Rossen Stoyanchev
* @since 5.0
*/
public | is |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/AnnotationIntrospector.java | {
"start": 49781,
"end": 54397
} | class ____ one.
*
* @return Default Creator to possibly use for {@code valueClass}, if one can be
* determined; {@code null} if not.
*/
public PotentialCreator findPreferredCreator(MapperConfig<?> config,
AnnotatedClass valueClass,
List<PotentialCreator> declaredConstructors,
List<PotentialCreator> declaredFactories,
Optional<PotentialCreator> zeroParamsConstructor) {
return null;
}
/*
/**********************************************************************
/* Deserialization: other property annotations
/**********************************************************************
*/
/**
* Method for checking whether given property accessors (method,
* field) has an annotation that suggests property name to use
* for deserialization (reading JSON into POJOs).
* Should return null if no annotation
* is found; otherwise a non-null name (possibly
* {@link PropertyName#USE_DEFAULT}, which means "use default heuristics").
*
* @param ann Annotated entity to check
*
* @return Name to use if found; null if not.
*/
public PropertyName findNameForDeserialization(MapperConfig<?> config, Annotated ann) {
return null;
}
/**
* Method for checking whether given method has an annotation
* that suggests that the method is to serve as "any setter";
* method to be used for setting values of any properties for
* which no dedicated setter method is found.
*
* @param ann Annotated entity to check
*
* @return True if such annotation is found (and is not disabled),
* false otherwise
*/
public Boolean hasAnySetter(MapperConfig<?> config, Annotated ann) {
return null;
}
/**
* Method for finding possible settings for property, given annotations
* on an accessor.
*/
public JsonSetter.Value findSetterInfo(MapperConfig<?> config, Annotated a) {
return JsonSetter.Value.empty();
}
/**
* Method for finding merge settings for property, if any.
*/
public Boolean findMergeInfo(MapperConfig<?> config, Annotated a) {
return null;
}
/*
/**********************************************************************
/* Overridable methods: may be used as low-level extension points.
/**********************************************************************
*/
/**
* Method that should be used by sub-classes for ALL
* annotation access;
* overridable so
* that sub-classes may, if they choose to, mangle actual access to
* block access ("hide" annotations) or perhaps change it.
*<p>
* Default implementation is simply:
*<code>
* return annotated.getAnnotation(annoClass);
*</code>
*
* @param ann Annotated entity to check for specified annotation
* @param annoClass Type of annotation to find
*
* @return Value of given annotation (as per {@code annoClass}), if entity
* has one; {@code null} otherwise
*/
protected <A extends Annotation> A _findAnnotation(Annotated ann,
Class<A> annoClass) {
return ann.getAnnotation(annoClass);
}
/**
* Method that should be used by sub-classes for ALL
* annotation existence access;
* overridable so that sub-classes may, if they choose to, mangle actual access to
* block access ("hide" annotations) or perhaps change value seen.
*<p>
* Default implementation is simply:
*<code>
* return annotated.hasAnnotation(annoClass);
*</code>
*
* @param ann Annotated entity to check for specified annotation
* @param annoClass Type of annotation to find
*
* @return {@code true} if specified annotation exists in given entity; {@code false} if not
*/
protected boolean _hasAnnotation(Annotated ann, Class<? extends Annotation> annoClass) {
return ann.hasAnnotation(annoClass);
}
/**
* Alternative lookup method that is used to see if annotation has at least one of
* annotations of types listed in second argument.
*
* @param ann Annotated entity to check for specified annotation
* @param annoClasses Types of annotation to find
*
* @return {@code true} if at least one of specified annotation exists in given entity;
* {@code false} otherwise
*/
protected boolean _hasOneOf(Annotated ann, Class<? extends Annotation>[] annoClasses) {
return ann.hasOneOf(annoClasses);
}
}
| has |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/DesensitizationTest_delete.java | {
"start": 213,
"end": 892
} | class ____ extends TestCase {
public void test_for_desensitization() throws Exception {
String sql = "DELETE FROM customers\n" +
"WHERE last_name = 'Smith';";
SQLUtils.FormatOption option = new SQLUtils.FormatOption();
option.setDesensitize(true);
option.setParameterized(true);
String desens_Sql = SQLUtils.format(sql, JdbcConstants.ORACLE, option);
System.out.println(sql);
System.out.println("-------------------");
System.out.println(desens_Sql);
assertEquals("DELETE FROM T_0C9879D1E6FFA3BE\n" +
"WHERE last_name = ?;", desens_Sql);
}
}
| DesensitizationTest_delete |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/test/websocket/WebSocketIntegrationTests.java | {
"start": 13641,
"end": 14155
} | class ____ implements WebSocketHandler {
@Override
public Mono<Void> handle(WebSocketSession session) {
HttpHeaders headers = session.getHandshakeInfo().getHeaders();
if (!headers.containsHeader("my-header")) {
return Mono.error(new IllegalStateException("Missing my-header"));
}
String payload = "my-header:" + headers.getFirst("my-header");
WebSocketMessage message = session.textMessage(payload);
return session.send(Mono.just(message));
}
}
private static final | CustomHeaderHandler |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/engine/DefaultSupervisingRouteControllerTest.java | {
"start": 9201,
"end": 9647
} | class ____ extends SedaEndpoint {
private final String name;
public MyJmsEndpoint(String name) {
this.name = name;
}
@Override
public Consumer createConsumer(Processor processor) {
return new MyJmsConsumer(this, processor);
}
@Override
protected String createEndpointUri() {
return "jms:" + name;
}
}
private static | MyJmsEndpoint |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/event/jfr/EventRecorder.java | {
"start": 552,
"end": 1421
} | interface ____ {
/**
* Obtain an instance of the {@link EventRecorder}.
*
* @return an instance of the {@link EventRecorder}.
*/
static EventRecorder getInstance() {
return EventRecorderHolder.EVENT_RECORDER;
}
/**
* Record an event.
*
* @param event the event to record, must not be {@code null}.
*/
void record(Event event);
/**
* Start recording an event. This method returns a {@link RecordableEvent} that can be recorded by calling
* {@link RecordableEvent#record()}. These events can be used to measure time between start and record.
*
* @param event the event to record, must not be {@code null}.
*/
RecordableEvent start(Event event);
/**
* Interface defining a recordable event that is recorded on calling {@link #record()}.
*/
| EventRecorder |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/event/AnalyticsEvent.java | {
"start": 2251,
"end": 6287
} | enum ____ {
PAGE_VIEW("page_view"),
SEARCH("search"),
SEARCH_CLICK("search_click");
private final String typeName;
Type(String typeName) {
this.typeName = typeName;
}
@Override
public String toString() {
return typeName.toLowerCase(Locale.ROOT);
}
}
private final String eventCollectionName;
private final Type eventType;
private final long eventTime;
private final BytesReference payload;
private final XContentType xContentType;
protected AnalyticsEvent(
String eventCollectionName,
long eventTime,
Type eventType,
XContentType xContentType,
BytesReference payload
) {
this.eventCollectionName = Strings.requireNonBlank(eventCollectionName, "eventCollectionName cannot be null");
this.eventTime = eventTime;
this.eventType = eventType;
this.xContentType = Objects.requireNonNull(xContentType, "xContentType cannot be null");
this.payload = Objects.requireNonNull(payload, "payload cannot be null");
}
public AnalyticsEvent(StreamInput in) throws IOException {
this(in.readString(), in.readLong(), in.readEnum(Type.class), in.readEnum(XContentType.class), in.readBytesReference());
}
public static Builder builder(AnalyticsEvent.Context context) {
return new AnalyticsEvent.Builder(context);
}
public String eventCollectionName() {
return eventCollectionName;
}
public long eventTime() {
return eventTime;
}
public Type eventType() {
return eventType;
}
public XContentType xContentType() {
return xContentType;
}
public BytesReference payload() {
return payload;
}
public Map<String, Object> payloadAsMap() {
return XContentHelper.convertToMap(payload(), true, xContentType()).v2();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(eventCollectionName);
out.writeLong(eventTime);
out.writeEnum(eventType);
XContentHelper.writeTo(out, xContentType);
out.writeBytesReference(payload);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
builder.field(TIMESTAMP_FIELD.getPreferredName(), eventTime());
builder.startObject(EVENT_FIELD.getPreferredName());
{
builder.field(EVENT_ACTION_FIELD.getPreferredName(), eventType());
}
builder.endObject();
builder.startObject(DATA_STREAM_FIELD.getPreferredName());
{
builder.field(DATA_STREAM_TYPE_FIELD.getPreferredName(), EVENT_DATA_STREAM_TYPE);
builder.field(DATA_STREAM_DATASET_FIELD.getPreferredName(), EVENT_DATA_STREAM_DATASET);
builder.field(DATA_STREAM_NAMESPACE_FIELD.getPreferredName(), eventCollectionName());
}
builder.endObject();
builder.mapContents(payloadAsMap());
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AnalyticsEvent that = (AnalyticsEvent) o;
return eventCollectionName.equals(that.eventCollectionName)
&& eventTime == that.eventTime
&& eventType == that.eventType
&& xContentType.equals(that.xContentType)
&& payloadAsMap().equals(that.payloadAsMap());
}
@Override
public int hashCode() {
return Objects.hash(eventCollectionName, eventTime, xContentType, payloadAsMap());
}
@Override
public String toString() {
return Strings.toString(this);
}
/**
* Analytics context. Used to carry information to parsers.
*/
public | Type |
java | netty__netty | common/src/main/java/io/netty/util/ResourceLeakDetectorFactory.java | {
"start": 4173,
"end": 5862
} | class ____ extends ResourceLeakDetectorFactory {
private final Constructor<?> obsoleteCustomClassConstructor;
private final Constructor<?> customClassConstructor;
DefaultResourceLeakDetectorFactory() {
String customLeakDetector;
try {
customLeakDetector = SystemPropertyUtil.get("io.netty.customResourceLeakDetector");
} catch (Throwable cause) {
logger.error("Could not access System property: io.netty.customResourceLeakDetector", cause);
customLeakDetector = null;
}
if (customLeakDetector == null) {
obsoleteCustomClassConstructor = customClassConstructor = null;
} else {
obsoleteCustomClassConstructor = obsoleteCustomClassConstructor(customLeakDetector);
customClassConstructor = customClassConstructor(customLeakDetector);
}
}
private static Constructor<?> obsoleteCustomClassConstructor(String customLeakDetector) {
try {
final Class<?> detectorClass = Class.forName(customLeakDetector, true,
PlatformDependent.getSystemClassLoader());
if (ResourceLeakDetector.class.isAssignableFrom(detectorClass)) {
return detectorClass.getConstructor(Class.class, int.class, long.class);
} else {
logger.error("Class {} does not inherit from ResourceLeakDetector.", customLeakDetector);
}
} catch (Throwable t) {
logger.error("Could not load custom resource leak detector | DefaultResourceLeakDetectorFactory |
java | spring-projects__spring-security | test/src/test/java/org/springframework/security/test/web/servlet/request/SecurityMockMvcRequestPostProcessorsCsrfTests.java | {
"start": 8273,
"end": 8374
} | class ____ {
@RequestMapping("/")
String index() {
return "Hi";
}
}
}
}
| TheController |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java | {
"start": 1338,
"end": 8928
} | class ____ {
private static String villain = "Dr. Heinz Doofenshmirtz";
private static String special =
". * | ? + \t ( ) [ ] { } ^ $ \\ \" %";
/**
*
*/
@Test
void testEncodeDecodeString() {
for (Separator separator : Separator.values()) {
testEncodeDecode(separator, "");
testEncodeDecode(separator, " ");
testEncodeDecode(separator, "!");
testEncodeDecode(separator, "?");
testEncodeDecode(separator, "&");
testEncodeDecode(separator, "+");
testEncodeDecode(separator, "\t");
testEncodeDecode(separator, "Dr.");
testEncodeDecode(separator, "Heinz");
testEncodeDecode(separator, "Doofenshmirtz");
testEncodeDecode(separator, villain);
testEncodeDecode(separator, special);
assertNull(separator.encode(null));
}
}
private void testEncodeDecode(Separator separator, String token) {
String encoded = separator.encode(token);
String decoded = separator.decode(encoded);
String msg = "token:" + token + " separator:" + separator + ".";
assertEquals(token, decoded, msg);
}
@Test
void testEncodeDecode() {
testEncodeDecode("Dr.", Separator.QUALIFIERS);
testEncodeDecode("Heinz", Separator.QUALIFIERS, Separator.QUALIFIERS);
testEncodeDecode("Doofenshmirtz", Separator.QUALIFIERS, null,
Separator.QUALIFIERS);
testEncodeDecode("&Perry", Separator.QUALIFIERS, Separator.VALUES, null);
testEncodeDecode("the ", Separator.QUALIFIERS, Separator.SPACE);
testEncodeDecode("Platypus...", (Separator) null);
testEncodeDecode("The what now ?!?", Separator.QUALIFIERS,
Separator.VALUES, Separator.SPACE);
}
@Test
void testEncodedValues() {
testEncodeDecode("Double-escape %2$ and %9$ or %%2$ or %%3$, nor %%%2$" +
"= no problem!",
Separator.QUALIFIERS, Separator.VALUES, Separator.SPACE, Separator.TAB);
}
@Test
void testSplits() {
byte[] maxLongBytes = Bytes.toBytes(Long.MAX_VALUE);
byte[] maxIntBytes = Bytes.toBytes(Integer.MAX_VALUE);
for (Separator separator : Separator.values()) {
String str1 = "cl" + separator.getValue() + "us";
String str2 = separator.getValue() + "rst";
byte[] sepByteArr = Bytes.toBytes(separator.getValue());
byte[] longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
sepByteArr.length, Bytes.SIZEOF_LONG - sepByteArr.length));
byte[] intVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxIntBytes,
sepByteArr.length, Bytes.SIZEOF_INT - sepByteArr.length));
byte[] arr = separator.join(
Bytes.toBytes(separator.encode(str1)), longVal1Arr,
Bytes.toBytes(separator.encode(str2)), intVal1Arr);
int[] sizes = {Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
Separator.VARIABLE_SIZE, Bytes.SIZEOF_INT};
byte[][] splits = separator.split(arr, sizes);
assertEquals(4, splits.length);
assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
longVal1Arr = Bytes.add(Bytes.copy(maxLongBytes, 0, Bytes.SIZEOF_LONG -
sepByteArr.length), sepByteArr);
intVal1Arr = Bytes.add(Bytes.copy(maxIntBytes, 0, Bytes.SIZEOF_INT -
sepByteArr.length), sepByteArr);
arr = separator.join(Bytes.toBytes(separator.encode(str1)), longVal1Arr,
Bytes.toBytes(separator.encode(str2)), intVal1Arr);
splits = separator.split(arr, sizes);
assertEquals(4, splits.length);
assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
sepByteArr.length, 4 - sepByteArr.length), sepByteArr);
longVal1Arr = Bytes.add(longVal1Arr, Bytes.copy(maxLongBytes, 4, 3 -
sepByteArr.length), sepByteArr);
arr = separator.join(Bytes.toBytes(separator.encode(str1)), longVal1Arr,
Bytes.toBytes(separator.encode(str2)), intVal1Arr);
splits = separator.split(arr, sizes);
assertEquals(4, splits.length);
assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
arr = separator.join(Bytes.toBytes(separator.encode(str1)),
Bytes.toBytes(separator.encode(str2)), intVal1Arr, longVal1Arr);
int[] sizes1 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
Bytes.SIZEOF_INT, Bytes.SIZEOF_LONG};
splits = separator.split(arr, sizes1);
assertEquals(4, splits.length);
assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
assertEquals(str2, separator.decode(Bytes.toString(splits[1])));
assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[2]));
assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[3]));
try {
int[] sizes2 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
Bytes.SIZEOF_INT, 7};
splits = separator.split(arr, sizes2);
fail("Exception should have been thrown.");
} catch (IllegalArgumentException e) {
}
try {
int[] sizes2 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, 2,
Bytes.SIZEOF_LONG};
splits = separator.split(arr, sizes2);
fail("Exception should have been thrown.");
} catch (IllegalArgumentException e) {
}
}
}
/**
* Simple test to encode and decode using the same separators and confirm that
* we end up with the same as what we started with.
*
* @param token
* @param separators
*/
private static void testEncodeDecode(String token, Separator... separators) {
byte[] encoded = Separator.encode(token, separators);
String decoded = Separator.decode(encoded, separators);
assertEquals(token, decoded);
}
@Test
void testJoinStripped() {
List<String> stringList = new ArrayList<String>(0);
stringList.add("nothing");
String joined = Separator.VALUES.joinEncoded(stringList);
Iterable<String> split = Separator.VALUES.splitEncoded(joined);
assertTrue(Iterables.elementsEqual(stringList, split));
stringList = new ArrayList<String>(3);
stringList.add("a");
stringList.add("b?");
stringList.add("c");
joined = Separator.VALUES.joinEncoded(stringList);
split = Separator.VALUES.splitEncoded(joined);
assertTrue(Iterables.elementsEqual(stringList, split));
String[] stringArray1 = {"else"};
joined = Separator.VALUES.joinEncoded(stringArray1);
split = Separator.VALUES.splitEncoded(joined);
assertTrue(Iterables.elementsEqual(Arrays.asList(stringArray1), split));
String[] stringArray2 = {"d", "e?", "f"};
joined = Separator.VALUES.joinEncoded(stringArray2);
split = Separator.VALUES.splitEncoded(joined);
assertTrue(Iterables.elementsEqual(Arrays.asList(stringArray2), split));
List<String> empty = new ArrayList<String>(0);
split = Separator.VALUES.splitEncoded(null);
assertTrue(Iterables.elementsEqual(empty, split));
}
}
| TestSeparator |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/AbstractNestedCondition.java | {
"start": 2678,
"end": 3597
} | class ____ {
private final List<ConditionOutcome> all;
private final List<ConditionOutcome> matches;
private final List<ConditionOutcome> nonMatches;
public MemberMatchOutcomes(MemberConditions memberConditions) {
this.all = Collections.unmodifiableList(memberConditions.getMatchOutcomes());
List<ConditionOutcome> matches = new ArrayList<>();
List<ConditionOutcome> nonMatches = new ArrayList<>();
for (ConditionOutcome outcome : this.all) {
(outcome.isMatch() ? matches : nonMatches).add(outcome);
}
this.matches = Collections.unmodifiableList(matches);
this.nonMatches = Collections.unmodifiableList(nonMatches);
}
public List<ConditionOutcome> getAll() {
return this.all;
}
public List<ConditionOutcome> getMatches() {
return this.matches;
}
public List<ConditionOutcome> getNonMatches() {
return this.nonMatches;
}
}
private static | MemberMatchOutcomes |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceSettings.java | {
"start": 1360,
"end": 6906
} | class ____ extends FilteredXContentObject
implements
ServiceSettings,
AlibabaCloudSearchRateLimitServiceSettings {
public static final String NAME = "alibabacloud_search_service_settings";
public static final String SERVICE_ID = "service_id";
public static final String HOST = "host";
public static final String WORKSPACE_NAME = "workspace";
public static final String HTTP_SCHEMA_NAME = "http_schema";
private static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(1_000);
public static AlibabaCloudSearchServiceSettings fromMap(Map<String, Object> map, ConfigurationParseContext context) {
ValidationException validationException = new ValidationException();
String modelId = extractRequiredString(map, SERVICE_ID, ModelConfigurations.SERVICE_SETTINGS, validationException);
String host = extractRequiredString(map, HOST, ModelConfigurations.SERVICE_SETTINGS, validationException);
var workspaceName = extractRequiredString(map, WORKSPACE_NAME, ModelConfigurations.SERVICE_SETTINGS, validationException);
var httpSchema = extractOptionalString(map, HTTP_SCHEMA_NAME, ModelConfigurations.SERVICE_SETTINGS, validationException);
if (httpSchema != null) {
var validSchemas = Set.of("https", "http");
if (validSchemas.contains(httpSchema) == false) {
validationException.addValidationError("Invalid value for [http_schema]. Must be one of [https, http]");
}
}
RateLimitSettings rateLimitSettings = RateLimitSettings.of(
map,
DEFAULT_RATE_LIMIT_SETTINGS,
validationException,
AlibabaCloudSearchService.NAME,
context
);
if (validationException.validationErrors().isEmpty() == false) {
throw validationException;
}
return new AlibabaCloudSearchServiceSettings(modelId, host, workspaceName, httpSchema, rateLimitSettings);
}
private final String serviceId;
private final String host;
private final String workspaceName;
private final String httpSchema;
private final RateLimitSettings rateLimitSettings;
public AlibabaCloudSearchServiceSettings(
String serviceId,
String host,
String workspaceName,
@Nullable String httpSchema,
@Nullable RateLimitSettings rateLimitSettings
) {
this.serviceId = serviceId;
this.host = host;
this.workspaceName = workspaceName;
this.httpSchema = httpSchema;
this.rateLimitSettings = Objects.requireNonNullElse(rateLimitSettings, DEFAULT_RATE_LIMIT_SETTINGS);
}
public AlibabaCloudSearchServiceSettings(StreamInput in) throws IOException {
serviceId = in.readString();
host = in.readString();
workspaceName = in.readString();
httpSchema = in.readOptionalString();
rateLimitSettings = new RateLimitSettings(in);
}
@Override
public String modelId() {
return serviceId;
}
public String getHost() {
return host;
}
public String getWorkspaceName() {
return workspaceName;
}
public String getHttpSchema() {
return httpSchema;
}
@Override
public RateLimitSettings rateLimitSettings() {
return rateLimitSettings;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
toXContentFragment(builder, params);
builder.endObject();
return builder;
}
public XContentBuilder toXContentFragment(XContentBuilder builder, Params params) throws IOException {
return toXContentFragmentOfExposedFields(builder, params);
}
@Override
public XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException {
if (serviceId != null) {
builder.field(SERVICE_ID, serviceId);
}
builder.field(HOST, host);
builder.field(WORKSPACE_NAME, workspaceName);
if (httpSchema != null) {
builder.field(HTTP_SCHEMA_NAME, httpSchema);
}
rateLimitSettings.toXContent(builder, params);
return builder;
}
@Override
public ToXContentObject getFilteredXContentObject() {
return this;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_16_0;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(serviceId);
out.writeString(host);
out.writeString(workspaceName);
out.writeOptionalString(httpSchema);
rateLimitSettings.writeTo(out);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AlibabaCloudSearchServiceSettings that = (AlibabaCloudSearchServiceSettings) o;
return Objects.equals(serviceId, that.serviceId)
&& Objects.equals(host, that.host)
&& Objects.equals(workspaceName, that.workspaceName)
&& Objects.equals(httpSchema, that.httpSchema);
}
@Override
public int hashCode() {
return Objects.hash(serviceId, host, workspaceName, httpSchema);
}
}
| AlibabaCloudSearchServiceSettings |
java | grpc__grpc-java | benchmarks/src/jmh/java/io/grpc/benchmarks/netty/AbstractBenchmark.java | {
"start": 3402,
"end": 3692
} | enum ____ {
NIO, LOCAL;
}
private static final CallOptions CALL_OPTIONS = CallOptions.DEFAULT;
private static final InetAddress BENCHMARK_ADDR = buildBenchmarkAddr();
/**
* Resolve the address bound to the benchmark interface. Currently we assume it's a
* child | ChannelType |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataImpl.java | {
"start": 20006,
"end": 20460
} | class ____ known: " + entityName );
}
if ( !persistentClass.hasIdentifierProperty() ) {
return null;
}
return persistentClass.getIdentifierProperty().getName();
}
@Override
public org.hibernate.type.Type getReferencedPropertyType(String entityName, String propertyName) throws MappingException {
final var persistentClass = entityBindingMap.get( entityName );
if ( persistentClass == null ) {
throw new MappingException( "Persistent | not |
java | apache__camel | components/camel-huawei/camel-huaweicloud-imagerecognition/src/test/java/org/apache/camel/component/huaweicloud/image/TagRecognitionWithImageUrlAndMockClientTest.java | {
"start": 1410,
"end": 3734
} | class ____ extends CamelTestSupport {
TestConfiguration testConfiguration = new TestConfiguration();
@BindToRegistry("imageClient")
ImageClientMock imageClient = new ImageClientMock(null);
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:trigger_route")
.setProperty(ImageRecognitionProperties.IMAGE_URL,
constant(testConfiguration.getProperty("imageUrl")))
.setProperty(ImageRecognitionProperties.THRESHOLD,
constant(testConfiguration.getProperty("tagThreshold")))
.to("hwcloud-imagerecognition:tagRecognition?accessKey=" + testConfiguration.getProperty("accessKey")
+ "&secretKey=" + testConfiguration.getProperty("secretKey") + "&projectId="
+ testConfiguration.getProperty("projectId") + "®ion="
+ testConfiguration.getProperty("region") + "&ignoreSslVerification=true"
+ "&imageClient=#imageClient")
.log("perform tag recognition successful")
.to("mock:perform_tag_recognition_result");
}
};
}
@Test
public void testTagRecognition() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:perform_tag_recognition_result");
mock.expectedMinimumMessageCount(1);
template.sendBody("direct:trigger_route", "");
Exchange responseExchange = mock.getExchanges().get(0);
mock.assertIsSatisfied();
assertTrue(responseExchange.getIn().getBody() instanceof ImageTaggingResponseResult);
ImageTaggingResponseResult response = responseExchange.getIn().getBody(ImageTaggingResponseResult.class);
assertEquals(1, response.getTags().size());
assertEquals(MockResult.TAG_RECOGNITION_RESULT_TAG, response.getTags().get(0).getTag());
assertEquals(MockResult.TAG_RECOGNITION_RESULT_TYPE, response.getTags().get(0).getType());
assertEquals(MockResult.TAG_RECOGNITION_RESULT_CONFIDENCE,
response.getTags().get(0).getConfidence());
}
}
| TagRecognitionWithImageUrlAndMockClientTest |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/auth/AuthConfig.java | {
"start": 1092,
"end": 2579
} | class ____ {
@Bean
public FilterRegistrationBean<AuthFilter> authFilterRegistration(AuthFilter authFilter) {
FilterRegistrationBean<AuthFilter> registration = new FilterRegistrationBean<>();
registration.setFilter(authFilter);
registration.addUrlPatterns("/*");
registration.setName("authFilter");
registration.setOrder(6);
return registration;
}
@Bean
public FilterRegistrationBean<AuthAdminFilter> authAdminFilterRegistration(AuthAdminFilter authAdminFilter) {
FilterRegistrationBean<AuthAdminFilter> registration = new FilterRegistrationBean<>();
registration.setFilter(authAdminFilter);
registration.addUrlPatterns("/*");
registration.setName("authAdminFilter");
registration.setOrder(6);
return registration;
}
@Bean
public AuthFilter authFilter(ControllerMethodsCache methodsCache, InnerApiAuthEnabled innerApiAuthEnabled) {
return new AuthFilter(NacosAuthConfigHolder.getInstance()
.getNacosAuthConfigByScope(NacosServerAuthConfig.NACOS_SERVER_AUTH_SCOPE), methodsCache,
innerApiAuthEnabled);
}
@Bean
public AuthAdminFilter authAdminFilter(ControllerMethodsCache methodsCache) {
return new AuthAdminFilter(NacosAuthConfigHolder.getInstance()
.getNacosAuthConfigByScope(NacosServerAdminAuthConfig.NACOS_SERVER_ADMIN_AUTH_SCOPE), methodsCache);
}
} | AuthConfig |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/AbstractField.java | {
"start": 481,
"end": 1293
} | class ____ implements ExtractedField {
private final String name;
private final Set<String> types;
AbstractField(String name, Set<String> types) {
this.name = Objects.requireNonNull(name);
this.types = Objects.requireNonNull(types);
}
@Override
public String getName() {
return name;
}
@Override
public String getSearchField() {
return name;
}
@Override
public Set<String> getTypes() {
return types;
}
protected Object[] getFieldValue(SearchHit hit) {
DocumentField keyValue = hit.field(getSearchField());
if (keyValue != null) {
List<Object> values = keyValue.getValues();
return values.toArray(new Object[0]);
}
return new Object[0];
}
}
| AbstractField |
java | elastic__elasticsearch | x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ContentLocation.java | {
"start": 356,
"end": 798
} | class ____ {
public static final ContentLocation UNKNOWN = new ContentLocation(-1, -1);
public final int lineNumber;
public final int columnNumber;
public ContentLocation(int lineNumber, int columnNumber) {
super();
this.lineNumber = lineNumber;
this.columnNumber = columnNumber;
}
@Override
public String toString() {
return lineNumber + ":" + columnNumber;
}
}
| ContentLocation |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/AnnotationBasedAuthMechanismSelectionTest.java | {
"start": 1940,
"end": 11511
} | class ____ {
private static final List<AuthMechRequest> REQUESTS = List.of(
new AuthMechRequest("annotated-http-permissions/no-roles-allowed-basic").basic().noRbacAnnotation(),
new AuthMechRequest("unannotated-http-permissions/no-roles-allowed-basic").basic().noRbacAnnotation(),
new AuthMechRequest("annotated-http-permissions/roles-allowed-annotation-basic-auth").basic(),
new AuthMechRequest("unannotated-http-permissions/roles-allowed-annotation-basic-auth").basic(),
new AuthMechRequest("annotated-http-permissions/unauthenticated-form").form().noRbacAnnotation(),
new AuthMechRequest("unannotated-http-permissions/unauthenticated-form").form().noRbacAnnotation(),
new AuthMechRequest("annotated-http-permissions/authenticated-form").form().authRequest(),
new AuthMechRequest("unannotated-http-permissions/authenticated-form").form().authRequest(),
new AuthMechRequest("annotated-http-permissions/custom-inherited").custom(),
new AuthMechRequest("annotated-http-permissions/basic-inherited").basic().authRequest(),
new AuthMechRequest("annotated-http-permissions/form-default").form().defaultAuthMech().authRequest(),
new AuthMechRequest("annotated-http-permissions/custom").custom().noRbacAnnotation(),
new AuthMechRequest("annotated-http-permissions/custom-roles-allowed").custom(),
new AuthMechRequest("unannotated-http-permissions/deny-custom").custom().denyPolicy(),
new AuthMechRequest("annotated-http-permissions/roles-allowed-jax-rs-policy").form());
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(TestIdentityProvider.class, TestIdentityController.class,
CustomBasicAuthMechanism.class, AbstractHttpPermissionsResource.class,
AnnotatedHttpPermissionsResource.class, AbstractAnnotatedHttpPermissionsResource.class,
UnannotatedHttpPermissionsResource.class, AuthMechRequest.class,
TestTrustedIdentityProvider.class)
.addAsResource(
new StringAsset(
"""
quarkus.http.auth.proactive=false
quarkus.http.auth.form.enabled=true
quarkus.http.auth.form.login-page=
quarkus.http.auth.form.error-page=
quarkus.http.auth.form.landing-page=
quarkus.http.auth.basic=true
quarkus.http.auth.permission.roles1.paths=/annotated-http-permissions/roles-allowed,/unannotated-http-permissions/roles-allowed
quarkus.http.auth.permission.roles1.policy=roles1
quarkus.http.auth.permission.jax-rs.paths=/annotated-http-permissions/roles-allowed-jax-rs-policy
quarkus.http.auth.permission.jax-rs.policy=roles1
quarkus.http.auth.permission.jax-rs.applies-to=JAXRS
quarkus.http.auth.policy.roles1.roles-allowed=admin
quarkus.http.auth.permission.authenticated.auth-mechanism=basic
quarkus.http.auth.permission.authenticated.paths=/annotated-http-permissions/authenticated,/unannotated-http-permissions/authenticated
quarkus.http.auth.permission.authenticated.policy=authenticated
quarkus.http.auth.permission.same-mechanism.paths=/annotated-http-permissions/same-mech
quarkus.http.auth.permission.same-mechanism.policy=authenticated
quarkus.http.auth.permission.same-mechanism.auth-mechanism=custom
quarkus.http.auth.permission.diff-mechanism.paths=/annotated-http-permissions/diff-mech
quarkus.http.auth.permission.diff-mechanism.policy=authenticated
quarkus.http.auth.permission.diff-mechanism.auth-mechanism=basic
quarkus.http.auth.permission.permit1.paths=/annotated-http-permissions/permit,/unannotated-http-permissions/permit
quarkus.http.auth.permission.permit1.policy=permit
quarkus.http.auth.permission.deny1.paths=/annotated-http-permissions/deny,/unannotated-http-permissions/deny
quarkus.http.auth.permission.deny1.policy=deny
"""),
"application.properties"));
@BeforeAll
public static void setupUsers() {
TestIdentityController.resetRoles()
.add("admin", "admin", "admin")
.add("user", "user", "user");
RestAssured.enableLoggingOfRequestAndResponseIfValidationFails();
}
@MethodSource("authMechanismRequestsIdxs")
@ParameterizedTest
public void testAuthMechanismSelection(final int idx) {
var in = REQUESTS.get(idx);
in.requestSpecification.get()
.get(in.path)
.then()
.statusCode(in.expectedStatus)
.body(is(in.expectedBody))
.header(in.expectedHeaderKey, in.expectedHeaderVal);
if (in.authRequired && in.unauthorizedRequestSpec != null) {
in.unauthorizedRequestSpec.get().get(in.path).then().statusCode(403).header(in.expectedHeaderKey,
in.expectedHeaderVal);
}
if (in.authRequired && in.unauthenticatedRequestSpec != null) {
in.unauthenticatedRequestSpec.get().get(in.path).then().statusCode(401).header(in.expectedHeaderKey,
in.expectedHeaderVal);
}
if (in.requestUsingOtherAuthMech != null) {
if (in.authRequired) {
in.requestUsingOtherAuthMech.get().get(in.path).then().statusCode(401).header(in.expectedHeaderKey,
in.expectedHeaderVal);
} else {
// anonymous request - principal name is empty
in.requestUsingOtherAuthMech.get().get(in.path).then().header(in.expectedHeaderKey,
in.expectedHeaderVal).body(is(""));
}
}
}
@Test
public void testHttpPolicyApplied() {
given().get("/annotated-http-permissions/authenticated").then().statusCode(401);
given().get("/unannotated-http-permissions/authenticated").then().statusCode(401);
given().get("/annotated-http-permissions/deny").then().statusCode(401);
given().get("/unannotated-http-permissions/deny").then().statusCode(401);
// both basic and form auth mechanism can be used even though the resource is annotated with 'form'
// because HTTP policies are applied before the mechanism is selected
requestWithBasicAuthUser().get("/annotated-http-permissions/roles-allowed").then().statusCode(403);
requestWithFormAuth("user").get("/unannotated-http-permissions/roles-allowed").then().statusCode(403);
requestWithFormAuth("admin").get("/annotated-http-permissions/roles-allowed").then().statusCode(200);
requestWithFormAuth("admin").get("/unannotated-http-permissions/roles-allowed").then().statusCode(200);
requestWithFormAuth("user").get("/unannotated-http-permissions/authenticated").then().statusCode(401);
// works because no authentication is performed by HTTP permissions policy 'permit', but for @Form is applied
// @Authenticated by default
given().get("/annotated-http-permissions/permit").then().statusCode(401);
given().get("/unannotated-http-permissions/permit").then().statusCode(401);
}
@Test
public void testBothHttpSecPolicyAndAnnotationApplied() {
// HTTP policy requires basic, but resource method inherits class-level `@Form` annotation
requestWithBasicAuthUser().get("/annotated-http-permissions/authenticated").then().statusCode(401);
requestWithFormAuth("user").get("/annotated-http-permissions/authenticated").then().statusCode(401);
// send both form & basic credentials
requestWithFormAuth("user").auth().preemptive().basic("admin", "admin").get("/annotated-http-permissions/authenticated")
.then().statusCode(401);
}
@Test
public void testAuthenticatedHttpPolicyUsingSameMechanism() {
requestWithBasicAuthUser().get("/annotated-http-permissions/same-mech").then().statusCode(200);
}
@Test
public void testAuthenticatedHttpPolicyUsingDiffMechanism() {
requestWithBasicAuthUser().get("/annotated-http-permissions/diff-mech").then().statusCode(401);
}
private static IntStream authMechanismRequestsIdxs() {
return IntStream.range(0, REQUESTS.size());
}
@Path("unannotated-http-permissions")
public static | AnnotationBasedAuthMechanismSelectionTest |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/filter/GatewayMetricsFilterTests.java | {
"start": 4784,
"end": 5245
} | class ____ {
@Value("${test.uri}")
protected String testUri;
@Bean
public RouteLocator myRouteLocator(RouteLocatorBuilder builder) {
return builder.routes()
.route("test_custom_http_status_metrics",
r -> r.host("*.setcustomstatusmetrics.org").filters(f -> f.setStatus(432)).uri(testUri))
.build();
}
@GetMapping("/httpbin/badtargeturi")
public String exception() {
throw new RuntimeException("an error");
}
}
}
| CustomConfig |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/startup/StartupEndpoint.java | {
"start": 1933,
"end": 2795
} | class ____ {
private final BufferingApplicationStartup applicationStartup;
/**
* Creates a new {@code StartupEndpoint} that will describe the timeline of buffered
* application startup events.
* @param applicationStartup the application startup
*/
public StartupEndpoint(BufferingApplicationStartup applicationStartup) {
this.applicationStartup = applicationStartup;
}
@ReadOperation
public StartupDescriptor startupSnapshot() {
StartupTimeline startupTimeline = this.applicationStartup.getBufferedTimeline();
return new StartupDescriptor(startupTimeline);
}
@WriteOperation
public StartupDescriptor startup() {
StartupTimeline startupTimeline = this.applicationStartup.drainBufferedTimeline();
return new StartupDescriptor(startupTimeline);
}
/**
* Description of an application startup.
*/
public static final | StartupEndpoint |
java | spring-projects__spring-security | webauthn/src/main/java/org/springframework/security/web/webauthn/jackson/AuthenticatorAttestationResponseJackson2Mixin.java | {
"start": 1671,
"end": 1888
} | class ____ {
@JsonSetter
abstract AuthenticatorAttestationResponse.AuthenticatorAttestationResponseBuilder transports(
List<AuthenticatorTransport> transports);
}
}
| AuthenticatorAttestationResponseBuilderMixin |
java | apache__spark | resource-managers/yarn/src/main/java/org/apache/spark/deploy/yarn/AmIpServletRequestWrapper.java | {
"start": 985,
"end": 1172
} | class ____ copied from Hadoop 3.4.0
// org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpServletRequestWrapper
//
// Modification:
// Migrate from javax.servlet to jakarta.servlet
public | is |
java | quarkusio__quarkus | extensions/hibernate-reactive/deployment/src/test/java/io/quarkus/hibernate/reactive/config/datasource/MultiplePUAsAlternativesWithBeanProducerTest.java | {
"start": 6095,
"end": 6848
} | class ____ {
@Inject
@PersistenceUnit("pu-1")
InjectableInstance<Mutiny.SessionFactory> pu1SessionFactoryBean;
@Inject
@PersistenceUnit("pu-2")
InjectableInstance<Mutiny.SessionFactory> pu2SessionFactoryBean;
@Produces
@ApplicationScoped
public Mutiny.SessionFactory sessionFactory() {
if (pu1SessionFactoryBean.getHandle().getBean().isActive()) {
return pu1SessionFactoryBean.get();
} else if (pu2SessionFactoryBean.getHandle().getBean().isActive()) {
return pu2SessionFactoryBean.get();
} else {
throw new RuntimeException("No active persistence unit!");
}
}
}
}
| MyProducer |
java | elastic__elasticsearch | client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java | {
"start": 6769,
"end": 15093
} | class ____ implements HttpHandler {
private final int sniffTimeoutMillis;
private final SniffResponse sniffResponse;
ResponseHandler(int sniffTimeoutMillis, SniffResponse sniffResponse) {
this.sniffTimeoutMillis = sniffTimeoutMillis;
this.sniffResponse = sniffResponse;
}
@Override
public void handle(HttpExchange httpExchange) throws IOException {
if (httpExchange.getRequestMethod().equals(HttpGet.METHOD_NAME)) {
if (httpExchange.getRequestURI().getRawQuery().equals("timeout=" + sniffTimeoutMillis + "ms")) {
String nodesInfoBody = sniffResponse.nodesInfoBody;
httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length());
try (OutputStream out = httpExchange.getResponseBody()) {
out.write(nodesInfoBody.getBytes(Consts.UTF_8));
return;
}
}
}
httpExchange.sendResponseHeaders(404, 0);
httpExchange.close();
}
}
private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme scheme) throws IOException {
int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
List<Node> nodes = new ArrayList<>(numNodes);
JsonFactory jsonFactory = new JsonFactory();
StringWriter writer = new StringWriter();
JsonGenerator generator = jsonFactory.createGenerator(writer);
generator.writeStartObject();
if (getRandom().nextBoolean()) {
generator.writeStringField("cluster_name", "elasticsearch");
}
if (getRandom().nextBoolean()) {
generator.writeObjectFieldStart("bogus_object");
generator.writeEndObject();
}
generator.writeObjectFieldStart("nodes");
for (int i = 0; i < numNodes; i++) {
String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10);
String host = "host" + i;
int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299);
HttpHost publishHost = new HttpHost(host, port, scheme.toString());
Set<HttpHost> boundHosts = new HashSet<>();
boundHosts.add(publishHost);
if (randomBoolean()) {
int bound = between(1, 5);
for (int b = 0; b < bound; b++) {
boundHosts.add(new HttpHost(host + b, port, scheme.toString()));
}
}
int numAttributes = between(0, 5);
Map<String, List<String>> attributes = new HashMap<>(numAttributes);
for (int j = 0; j < numAttributes; j++) {
int numValues = frequently() ? 1 : between(2, 5);
List<String> values = new ArrayList<>();
for (int v = 0; v < numValues; v++) {
values.add(j + "value" + v);
}
attributes.put("attr" + j, values);
}
final Set<String> nodeRoles = new TreeSet<>();
if (randomBoolean()) {
nodeRoles.add("master");
}
if (randomBoolean()) {
nodeRoles.add("data");
}
if (randomBoolean()) {
nodeRoles.add("data_content");
}
if (randomBoolean()) {
nodeRoles.add("data_hot");
}
if (randomBoolean()) {
nodeRoles.add("data_warm");
}
if (randomBoolean()) {
nodeRoles.add("data_cold");
}
if (randomBoolean()) {
nodeRoles.add("data_frozen");
}
if (randomBoolean()) {
nodeRoles.add("ingest");
}
Node node = new Node(
publishHost,
boundHosts,
randomAsciiAlphanumOfLength(5),
randomAsciiAlphanumOfLength(5),
new Node.Roles(nodeRoles),
attributes
);
generator.writeObjectFieldStart(nodeId);
if (getRandom().nextBoolean()) {
generator.writeObjectFieldStart("bogus_object");
generator.writeEndObject();
}
if (getRandom().nextBoolean()) {
generator.writeArrayFieldStart("bogus_array");
generator.writeStartObject();
generator.writeEndObject();
generator.writeEndArray();
}
boolean isHttpEnabled = rarely() == false;
if (isHttpEnabled) {
nodes.add(node);
generator.writeObjectFieldStart("http");
generator.writeArrayFieldStart("bound_address");
for (HttpHost bound : boundHosts) {
generator.writeString(bound.toHostString());
}
generator.writeEndArray();
if (getRandom().nextBoolean()) {
generator.writeObjectFieldStart("bogus_object");
generator.writeEndObject();
}
generator.writeStringField("publish_address", publishHost.toHostString());
if (getRandom().nextBoolean()) {
generator.writeNumberField("max_content_length_in_bytes", 104857600);
}
generator.writeEndObject();
}
List<String> roles = Arrays.asList(
new String[] { "master", "data", "ingest", "data_content", "data_hot", "data_warm", "data_cold", "data_frozen" }
);
Collections.shuffle(roles, getRandom());
generator.writeArrayFieldStart("roles");
for (String role : roles) {
if ("master".equals(role) && node.getRoles().isMasterEligible()) {
generator.writeString("master");
}
if ("data".equals(role) && node.getRoles().hasDataRole()) {
generator.writeString("data");
}
if ("data_content".equals(role) && node.getRoles().hasDataContentRole()) {
generator.writeString("data_content");
}
if ("data_hot".equals(role) && node.getRoles().hasDataHotRole()) {
generator.writeString("data_hot");
}
if ("data_warm".equals(role) && node.getRoles().hasDataWarmRole()) {
generator.writeString("data_warm");
}
if ("data_cold".equals(role) && node.getRoles().hasDataColdRole()) {
generator.writeString("data_cold");
}
if ("data_frozen".equals(role) && node.getRoles().hasDataFrozenRole()) {
generator.writeString("data_frozen");
}
if ("ingest".equals(role) && node.getRoles().isIngest()) {
generator.writeString("ingest");
}
}
generator.writeEndArray();
generator.writeFieldName("version");
generator.writeString(node.getVersion());
generator.writeFieldName("name");
generator.writeString(node.getName());
if (numAttributes > 0) {
generator.writeObjectFieldStart("attributes");
for (Map.Entry<String, List<String>> entry : attributes.entrySet()) {
if (entry.getValue().size() == 1) {
generator.writeStringField(entry.getKey(), entry.getValue().get(0));
} else {
for (int v = 0; v < entry.getValue().size(); v++) {
generator.writeStringField(entry.getKey() + "." + v, entry.getValue().get(v));
}
}
}
generator.writeEndObject();
}
generator.writeEndObject();
}
generator.writeEndObject();
generator.writeEndObject();
generator.close();
return SniffResponse.buildResponse(writer.toString(), nodes);
}
private static | ResponseHandler |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/co/CoBroadcastWithKeyedOperatorTest.java | {
"start": 34662,
"end": 38284
} | class ____<T> implements KeySelector<T, T> {
private static final long serialVersionUID = 1L;
@Override
public T getKey(T value) throws Exception {
return value;
}
}
private static <KEY, IN1, IN2, OUT>
TwoInputStreamOperatorTestHarness<IN1, IN2, OUT> getInitializedTestHarness(
final TypeInformation<KEY> keyTypeInfo,
final KeySelector<IN1, KEY> keyKeySelector,
final KeyedBroadcastProcessFunction<KEY, IN1, IN2, OUT> function)
throws Exception {
return getInitializedTestHarness(keyTypeInfo, keyKeySelector, function, 1, 1, 0);
}
private static <KEY, IN1, IN2, OUT>
TwoInputStreamOperatorTestHarness<IN1, IN2, OUT> getInitializedTestHarness(
final TypeInformation<KEY> keyTypeInfo,
final KeySelector<IN1, KEY> keyKeySelector,
final KeyedBroadcastProcessFunction<KEY, IN1, IN2, OUT> function,
final int maxParallelism,
final int numTasks,
final int taskIdx)
throws Exception {
return getInitializedTestHarness(
keyTypeInfo, keyKeySelector, function, maxParallelism, numTasks, taskIdx, null);
}
private static OperatorSubtaskState repartitionInitState(
final OperatorSubtaskState initState,
final int numKeyGroups,
final int oldParallelism,
final int newParallelism,
final int subtaskIndex) {
return AbstractStreamOperatorTestHarness.repartitionOperatorState(
initState, numKeyGroups, oldParallelism, newParallelism, subtaskIndex);
}
private static <KEY, IN1, IN2, OUT>
TwoInputStreamOperatorTestHarness<IN1, IN2, OUT> getInitializedTestHarness(
final TypeInformation<KEY> keyTypeInfo,
final KeySelector<IN1, KEY> keyKeySelector,
final KeyedBroadcastProcessFunction<KEY, IN1, IN2, OUT> function,
final int maxParallelism,
final int numTasks,
final int taskIdx,
final OperatorSubtaskState initState)
throws Exception {
final TwoInputStreamOperatorTestHarness<IN1, IN2, OUT> testHarness =
new KeyedTwoInputStreamOperatorTestHarness<>(
new CoBroadcastWithKeyedOperator<>(
Preconditions.checkNotNull(function),
Collections.singletonList(STATE_DESCRIPTOR)),
keyKeySelector,
null,
keyTypeInfo,
maxParallelism,
numTasks,
taskIdx);
testHarness.setup();
testHarness.initializeState(initState);
testHarness.open();
return testHarness;
}
private static String mapToString(List<Map.Entry<String, Integer>> entries) {
entries.sort(
Comparator.comparing(
(Function<Map.Entry<String, Integer>, String>) Map.Entry::getKey)
.thenComparingInt(Map.Entry::getValue));
final StringBuilder builder = new StringBuilder();
for (Map.Entry<String, Integer> entry : entries) {
builder.append(' ').append(entry.getKey()).append('=').append(entry.getValue());
}
return builder.toString();
}
}
| IdentityKeySelector |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/client/RedisClientName.java | {
"start": 930,
"end": 1040
} | interface ____ {
/**
* The Redis client name.
*/
String value() default "";
| RedisClientName |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/onetoone/bidirectional/BidirectionalOneToOneWithIdClassesTest.java | {
"start": 2159,
"end": 2343
} | class ____ implements Serializable {
@ManyToOne
private Operator operator;
private String price;
}
@Entity( name = "Product" )
@IdClass( ProductPK.class )
public static | PricePK |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.