language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
grpc__grpc-java
|
istio-interop-testing/src/generated/main/grpc/io/istio/test/EchoTestServiceGrpc.java
|
{
"start": 6474,
"end": 7086
}
|
interface ____ {
/**
*/
default void echo(io.istio.test.Echo.EchoRequest request,
io.grpc.stub.StreamObserver<io.istio.test.Echo.EchoResponse> responseObserver) {
io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getEchoMethod(), responseObserver);
}
/**
*/
default void forwardEcho(io.istio.test.Echo.ForwardEchoRequest request,
io.grpc.stub.StreamObserver<io.istio.test.Echo.ForwardEchoResponse> responseObserver) {
io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getForwardEchoMethod(), responseObserver);
}
}
/**
* Base
|
AsyncService
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/RawTypeInjectionTest.java
|
{
"start": 3885,
"end": 4146
}
|
class ____<T> {",
" @Inject Foo() {}",
"}");
Source bar =
CompilerTests.javaSource(
"test.Bar",
"package test;",
"",
"import javax.inject.Inject;",
"",
"
|
Foo
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportDeleteConnectorAction.java
|
{
"start": 879,
"end": 1875
}
|
class ____ extends TransportAction<DeleteConnectorAction.Request, AcknowledgedResponse> {
protected final ConnectorIndexService connectorIndexService;
@Inject
public TransportDeleteConnectorAction(TransportService transportService, ActionFilters actionFilters, Client client) {
super(DeleteConnectorAction.NAME, actionFilters, transportService.getTaskManager(), EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.connectorIndexService = new ConnectorIndexService(client);
}
@Override
protected void doExecute(Task task, DeleteConnectorAction.Request request, ActionListener<AcknowledgedResponse> listener) {
String connectorId = request.getConnectorId();
boolean hardDelete = request.isHardDelete();
boolean shouldDeleteSyncJobs = request.shouldDeleteSyncJobs();
connectorIndexService.deleteConnector(connectorId, hardDelete, shouldDeleteSyncJobs, listener.map(v -> AcknowledgedResponse.TRUE));
}
}
|
TransportDeleteConnectorAction
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/cglib/beans/BeanGenerator.java
|
{
"start": 1840,
"end": 1983
}
|
class ____ extend. The class
* must not be declared as final, and must have a non-private
* no-argument constructor.
* @param superclass
|
will
|
java
|
elastic__elasticsearch
|
x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/IndexInputStats.java
|
{
"start": 883,
"end": 7213
}
|
class ____ {
/* A threshold beyond which an index input seeking is counted as "large" */
static final ByteSizeValue SEEKING_THRESHOLD = ByteSizeValue.of(8, ByteSizeUnit.MB);
private final long numFiles;
private final long totalSize;
private final long minSize;
private final long maxSize;
private final long seekingThreshold;
private final LongSupplier currentTimeNanos;
private final LongAdder opened = new LongAdder();
private final LongAdder closed = new LongAdder();
private final Counter forwardSmallSeeks = new Counter();
private final Counter backwardSmallSeeks = new Counter();
private final Counter forwardLargeSeeks = new Counter();
private final Counter backwardLargeSeeks = new Counter();
private final Counter contiguousReads = new Counter();
private final Counter nonContiguousReads = new Counter();
private final TimedCounter directBytesRead = new TimedCounter();
private final TimedCounter optimizedBytesRead = new TimedCounter();
private final Counter cachedBytesRead = new Counter();
private final Counter indexCacheBytesRead = new Counter();
private final TimedCounter cachedBytesWritten = new TimedCounter();
private final Counter blobStoreBytesRequested = new Counter();
private final AtomicLong currentIndexCacheFills = new AtomicLong();
private final Counter luceneBytesRead = new Counter();
public IndexInputStats(long numFiles, long totalSize, long minSize, long maxSize, LongSupplier currentTimeNanos) {
this(numFiles, totalSize, minSize, maxSize, SEEKING_THRESHOLD.getBytes(), currentTimeNanos);
}
public IndexInputStats(
long numFiles,
long totalSize,
long minSize,
long maxSize,
long seekingThreshold,
LongSupplier currentTimeNanos
) {
this.numFiles = numFiles;
this.totalSize = totalSize;
this.minSize = minSize;
this.maxSize = maxSize;
this.seekingThreshold = seekingThreshold;
this.currentTimeNanos = currentTimeNanos;
}
/**
* @return the current time in nanoseconds that should be used to measure statistics.
*/
public long currentTimeNanos() {
return currentTimeNanos.getAsLong();
}
public void incrementOpenCount() {
opened.increment();
}
public void incrementCloseCount() {
closed.increment();
}
public void addCachedBytesRead(int bytesRead) {
cachedBytesRead.add(bytesRead);
}
public void addIndexCacheBytesRead(int bytesRead) {
indexCacheBytesRead.add(bytesRead);
}
public void addCachedBytesWritten(long bytesWritten, long nanoseconds) {
cachedBytesWritten.add(bytesWritten, nanoseconds);
}
public void addDirectBytesRead(int bytesRead, long nanoseconds) {
directBytesRead.add(bytesRead, nanoseconds);
}
public void addOptimizedBytesRead(int bytesRead, long nanoseconds) {
optimizedBytesRead.add(bytesRead, nanoseconds);
}
public void incrementBytesRead(long previousPosition, long currentPosition, int bytesRead) {
LongConsumer incBytesRead = (previousPosition == currentPosition) ? contiguousReads::add : nonContiguousReads::add;
incBytesRead.accept(bytesRead);
}
public void incrementSeeks(long currentPosition, long newPosition) {
final long delta = newPosition - currentPosition;
if (delta == 0L) {
return;
}
final boolean isLarge = isLargeSeek(delta);
if (delta > 0) {
if (isLarge) {
forwardLargeSeeks.add(delta);
} else {
forwardSmallSeeks.add(delta);
}
} else {
if (isLarge) {
backwardLargeSeeks.add(-delta);
} else {
backwardSmallSeeks.add(-delta);
}
}
}
public void addBlobStoreBytesRequested(long bytesRequested) {
blobStoreBytesRequested.add(bytesRequested);
}
public Releasable addIndexCacheFill() {
final long openValue = currentIndexCacheFills.incrementAndGet();
assert openValue > 0 : openValue;
return () -> {
final long closeValue = currentIndexCacheFills.decrementAndGet();
assert closeValue >= 0 : closeValue;
};
}
public void addLuceneBytesRead(int bytesRead) {
luceneBytesRead.add(bytesRead);
}
public long getNumFiles() {
return numFiles;
}
public long getTotalSize() {
return totalSize;
}
public long getMinSize() {
return minSize;
}
public long getMaxSize() {
return maxSize;
}
public LongAdder getOpened() {
return opened;
}
public LongAdder getClosed() {
return closed;
}
public Counter getForwardSmallSeeks() {
return forwardSmallSeeks;
}
public Counter getBackwardSmallSeeks() {
return backwardSmallSeeks;
}
public Counter getForwardLargeSeeks() {
return forwardLargeSeeks;
}
public Counter getBackwardLargeSeeks() {
return backwardLargeSeeks;
}
public Counter getContiguousReads() {
return contiguousReads;
}
public Counter getNonContiguousReads() {
return nonContiguousReads;
}
public TimedCounter getDirectBytesRead() {
return directBytesRead;
}
public TimedCounter getOptimizedBytesRead() {
return optimizedBytesRead;
}
public Counter getCachedBytesRead() {
return cachedBytesRead;
}
public Counter getIndexCacheBytesRead() {
return indexCacheBytesRead;
}
public TimedCounter getCachedBytesWritten() {
return cachedBytesWritten;
}
public Counter getBlobStoreBytesRequested() {
return blobStoreBytesRequested;
}
public Counter getLuceneBytesRead() {
return luceneBytesRead;
}
@SuppressForbidden(reason = "Handles Long.MIN_VALUE before using Math.abs()")
public boolean isLargeSeek(long delta) {
return delta != Long.MIN_VALUE && Math.abs(delta) > seekingThreshold;
}
public long getCurrentIndexCacheFills() {
return currentIndexCacheFills.get();
}
public static
|
IndexInputStats
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1400/Issue1405.java
|
{
"start": 2435,
"end": 3447
}
|
class ____ {
@RequestMapping(value = "/test1405", method = RequestMethod.GET)
public
@ResponseBody
ModelAndView test7() {
AuthIdentityRequest authRequest = new AuthIdentityRequest();
authRequest.setAppId("cert01");
authRequest.setUserId(2307643);
authRequest.setIdNumber("34324324234234");
authRequest.setRealName("杨力");
authRequest.setBusinessLine("");
authRequest.setIgnoreIdNumberRepeat(false);
authRequest.setOffline(false);
ModelAndView modelAndView = new ModelAndView();
modelAndView.addObject("message", authRequest);
modelAndView.addObject("title", "testPage");
modelAndView.setViewName("test");
return modelAndView;
}
}
@ComponentScan(basePackages = "com.alibaba.json.bvt.issue_1400")
@Configuration
@Order(Ordered.LOWEST_PRECEDENCE + 1)
@EnableWebMvc
public static
|
BeanController
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/cdi/lifecycle/ExtendedBeanManagerNotAvailableDuringTypeResolutionTest.java
|
{
"start": 1949,
"end": 3363
}
|
class ____=\"" + TheEntity.class.getName() + "\" entity-name=\"" + ENTITY_NAME + "\" table=\"" + TABLE_NAME + "\">\n" +
" <id name=\"id\">\n" +
" <generator class=\"org.hibernate.id.enhanced.SequenceStyleGenerator\">\n" +
" <param name=\"sequence_name\">" + TABLE_NAME + "_GENERATOR</param>\n" +
" <param name=\"table_name\">" + TABLE_NAME + "_GENERATOR</param>\n" +
" <param name=\"initial_value\">1</param>\n" +
" <param name=\"increment_size\">1</param>\n" +
" </generator>\n" +
" </id>\n" +
" <property name=\"name\" />\n" +
" <property name=\"myEnum\" >\n" +
" <type name=\"org.hibernate.orm.test.EnumType\">\n" +
" <param name=\"enumClass\">" + MyEnum.class.getName() + "</param>\n" +
" </type>\n" +
" </property>\n" +
" </class>\n" +
"</hibernate-mapping>\n";
private Integer id;
private String name;
private MyEnum myEnum;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public MyEnum getMyEnum() {
return myEnum;
}
public void setMyEnum(
MyEnum myEnum) {
this.myEnum = myEnum;
}
}
private
|
name
|
java
|
apache__logging-log4j2
|
log4j-api-test/src/test/java/org/apache/logging/log4j/AbstractLoggerTest.java
|
{
"start": 37561,
"end": 38384
}
|
class ____ implements Message {
private final FormattedMessageSupplier formattedMessageSupplier;
private final String format;
TestMessage(final FormattedMessageSupplier formattedMessageSupplier, final String format) {
this.formattedMessageSupplier = formattedMessageSupplier;
this.format = format;
}
@Override
public String getFormattedMessage() {
return formattedMessageSupplier.getFormattedMessage();
}
@Override
public String getFormat() {
return format;
}
@Override
public Object[] getParameters() {
return Constants.EMPTY_OBJECT_ARRAY;
}
@Override
public Throwable getThrowable() {
return null;
}
|
TestMessage
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_2800/Issue2830.java
|
{
"start": 124,
"end": 508
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSONObject jsonObject = JSONObject.parseObject("{\"qty\":\"10\",\"qty1\":\"10.0\",\"qty2\":\"10.000\"}");
assertEquals(10, jsonObject.getIntValue("qty"));
assertEquals(10, jsonObject.getIntValue("qty1"));
assertEquals(10, jsonObject.getIntValue("qty2"));
}
}
|
Issue2830
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java
|
{
"start": 1477,
"end": 9690
}
|
class ____ extends AbstractQueryTestCase<MatchPhraseQueryBuilder> {
@Override
protected MatchPhraseQueryBuilder doCreateTestQueryBuilder() {
String fieldName = randomFrom(
TEXT_FIELD_NAME,
TEXT_ALIAS_FIELD_NAME,
BOOLEAN_FIELD_NAME,
INT_FIELD_NAME,
DOUBLE_FIELD_NAME,
DATE_FIELD_NAME
);
Object value;
if (isTextField(fieldName)) {
int terms = randomIntBetween(0, 3);
StringBuilder builder = new StringBuilder();
for (int i = 0; i < terms; i++) {
builder.append(randomAlphaOfLengthBetween(1, 10)).append(" ");
}
value = builder.toString().trim();
} else {
value = getRandomValueForFieldName(fieldName);
}
MatchPhraseQueryBuilder matchQuery = new MatchPhraseQueryBuilder(fieldName, value);
if (randomBoolean() && isTextField(fieldName)) {
matchQuery.analyzer(randomFrom("simple", "keyword", "whitespace"));
}
if (randomBoolean()) {
matchQuery.slop(randomIntBetween(0, 10));
}
if (randomBoolean()) {
matchQuery.zeroTermsQuery(randomFrom(ZeroTermsQueryOption.ALL, ZeroTermsQueryOption.NONE));
}
return matchQuery;
}
@Override
protected Map<String, MatchPhraseQueryBuilder> getAlternateVersions() {
Map<String, MatchPhraseQueryBuilder> alternateVersions = new HashMap<>();
MatchPhraseQueryBuilder matchPhraseQuery = new MatchPhraseQueryBuilder(
randomAlphaOfLengthBetween(1, 10),
randomAlphaOfLengthBetween(1, 10)
);
String contentString = Strings.format("""
{
"match_phrase" : {
"%s" : "%s"
}
}""", matchPhraseQuery.fieldName(), matchPhraseQuery.value());
alternateVersions.put(contentString, matchPhraseQuery);
return alternateVersions;
}
@Override
protected void doAssertLuceneQuery(MatchPhraseQueryBuilder queryBuilder, Query query, SearchExecutionContext context)
throws IOException {
assertThat(query, notNullValue());
if (query instanceof MatchAllDocsQuery) {
assertThat(queryBuilder.zeroTermsQuery(), equalTo(ZeroTermsQueryOption.ALL));
return;
}
assertThat(
query,
either(instanceOf(BooleanQuery.class)).or(instanceOf(PhraseQuery.class))
.or(instanceOf(TermQuery.class))
.or(instanceOf(PointRangeQuery.class))
.or(instanceOf(IndexOrDocValuesQuery.class))
.or(instanceOf(MatchNoDocsQuery.class))
.or(instanceOf(IndexSortSortedNumericDocValuesRangeQuery.class))
);
}
public void testIllegalValues() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MatchPhraseQueryBuilder(null, "value"));
assertEquals("[match_phrase] requires fieldName", e.getMessage());
e = expectThrows(IllegalArgumentException.class, () -> new MatchPhraseQueryBuilder("fieldName", null));
assertEquals("[match_phrase] requires query value", e.getMessage());
}
public void testBadAnalyzer() throws IOException {
MatchPhraseQueryBuilder matchQuery = new MatchPhraseQueryBuilder("fieldName", "text");
matchQuery.analyzer("bogusAnalyzer");
QueryShardException e = expectThrows(QueryShardException.class, () -> matchQuery.toQuery(createSearchExecutionContext()));
assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found"));
}
public void testFromSimpleJson() throws IOException {
String json1 = """
{
"match_phrase" : {
"message" : "this is a test"
}
}""";
String expected = """
{
"match_phrase" : {
"message" : {
"query" : "this is a test"
}
}
}""";
MatchPhraseQueryBuilder qb = (MatchPhraseQueryBuilder) parseQuery(json1);
checkGeneratedJson(expected, qb);
}
public void testFromJson() throws IOException {
String json = """
{
"match_phrase" : {
"message" : {
"query" : "this is a test",
"slop" : 2,
"zero_terms_query" : "ALL",
"boost" : 2.0
}
}
}""";
MatchPhraseQueryBuilder parsed = (MatchPhraseQueryBuilder) parseQuery(json);
checkGeneratedJson(json, parsed);
assertEquals(json, "this is a test", parsed.value());
assertEquals(json, 2, parsed.slop());
assertEquals(json, ZeroTermsQueryOption.ALL, parsed.zeroTermsQuery());
}
public void testParseFailsWithMultipleFields() throws IOException {
String json = """
{
"match_phrase" : {
"message1" : {
"query" : "this is a test"
},
"message2" : {
"query" : "this is a test"
}
}
}""";
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));
assertEquals("[match_phrase] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage());
String shortJson = """
{
"match_phrase" : {
"message1" : "this is a test",
"message2" : "this is a test"
}
}""";
e = expectThrows(ParsingException.class, () -> parseQuery(shortJson));
assertEquals("[match_phrase] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage());
}
public void testRewriteToTermQueries() throws IOException {
QueryBuilder queryBuilder = new MatchPhraseQueryBuilder(KEYWORD_FIELD_NAME, "value");
for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) {
QueryBuilder rewritten = queryBuilder.rewrite(context);
assertThat(rewritten, instanceOf(TermQueryBuilder.class));
TermQueryBuilder tqb = (TermQueryBuilder) rewritten;
assertEquals(KEYWORD_FIELD_NAME, tqb.fieldName);
assertEquals(new BytesRef("value"), tqb.value);
}
}
public void testRewriteToTermQueryWithAnalyzer() throws IOException {
MatchPhraseQueryBuilder queryBuilder = new MatchPhraseQueryBuilder(TEXT_FIELD_NAME, "value");
queryBuilder.analyzer("keyword");
for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) {
QueryBuilder rewritten = queryBuilder.rewrite(context);
assertThat(rewritten, instanceOf(TermQueryBuilder.class));
TermQueryBuilder tqb = (TermQueryBuilder) rewritten;
assertEquals(TEXT_FIELD_NAME, tqb.fieldName);
assertEquals(new BytesRef("value"), tqb.value);
}
}
public void testRewriteIndexQueryToMatchNone() throws IOException {
QueryBuilder query = new MatchPhraseQueryBuilder("_index", "does_not_exist");
for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) {
QueryBuilder rewritten = query.rewrite(context);
assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class));
}
}
public void testRewriteIndexQueryToNotMatchNone() throws IOException {
QueryBuilder query = new MatchPhraseQueryBuilder("_index", getIndex().getName());
for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) {
QueryBuilder rewritten = query.rewrite(context);
assertThat(rewritten, instanceOf(MatchAllQueryBuilder.class));
}
}
}
|
MatchPhraseQueryBuilderTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java
|
{
"start": 1071,
"end": 1289
}
|
class ____ validations of header, claims and signatures against the incoming {@link JwtAuthenticationToken}.
* It returns the {@link JWTClaimsSet} associated to the token if validation is successful.
* Note this
|
performs
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/Throttler.java
|
{
"start": 1191,
"end": 1768
}
|
class ____ throttles calls to a logger. The first unique log message is permitted to emit a message. Any subsequent log messages
* matching a message that has already been emitted will only increment a counter. A thread runs on an interval
* to emit any log messages that have been repeated beyond the initial emitted message. Once the thread emits a repeated
* message the counter is reset. If another message is received matching a previously emitted message by the thread, it will be consider
* the first time a unique message is received and will be logged.
*/
public
|
that
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockito/internal/progress/TimesTest.java
|
{
"start": 377,
"end": 748
}
|
class ____ {
@Test
public void shouldNotAllowNegativeNumberOfInvocations() {
assertThatThrownBy(
() -> {
VerificationModeFactory.times(-50);
})
.isInstanceOf(MockitoException.class)
.hasMessage("Negative value is not allowed here");
}
}
|
TimesTest
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/config/plugins/processor/PluginProcessor.java
|
{
"start": 11421,
"end": 12424
}
|
class ____ extends SimpleElementVisitor7<PluginEntry, Plugin> {
private final Elements elements;
private PluginElementVisitor(final Elements elements) {
this.elements = elements;
}
@Override
public PluginEntry visitType(final TypeElement e, final Plugin plugin) {
Objects.requireNonNull(plugin, "Plugin annotation is null.");
final PluginEntry entry = new PluginEntry();
entry.setKey(toRootLowerCase(plugin.name()));
entry.setClassName(elements.getBinaryName(e).toString());
entry.setName(Plugin.EMPTY.equals(plugin.elementType()) ? plugin.name() : plugin.elementType());
entry.setPrintable(plugin.printObject());
entry.setDefer(plugin.deferChildren());
entry.setCategory(plugin.category());
return entry;
}
}
/**
* ElementVisitor to scan the PluginAliases annotation.
*/
private static final
|
PluginElementVisitor
|
java
|
apache__flink
|
flink-yarn/src/test/java/org/apache/flink/yarn/TestingContainerStatus.java
|
{
"start": 1146,
"end": 2089
}
|
class ____ extends ContainerStatusPBImpl {
private final ContainerId containerId;
private final ContainerState containerState;
private final String diagnostics;
private final int exitStatus;
TestingContainerStatus(
final ContainerId containerId,
final ContainerState containerState,
final String diagnostics,
final int exitStatus) {
this.containerId = containerId;
this.containerState = containerState;
this.diagnostics = diagnostics;
this.exitStatus = exitStatus;
}
@Override
public ContainerId getContainerId() {
return containerId;
}
@Override
public ContainerState getState() {
return containerState;
}
@Override
public int getExitStatus() {
return exitStatus;
}
@Override
public String getDiagnostics() {
return diagnostics;
}
}
|
TestingContainerStatus
|
java
|
apache__spark
|
sql/catalyst/src/main/java/org/apache/spark/sql/connector/metric/CustomSumMetric.java
|
{
"start": 1125,
"end": 1374
}
|
class ____ implements CustomMetric {
@Override
public String aggregateTaskMetrics(long[] taskMetrics) {
long sum = 0L;
for (long taskMetric : taskMetrics) {
sum += taskMetric;
}
return String.valueOf(sum);
}
}
|
CustomSumMetric
|
java
|
quarkusio__quarkus
|
extensions/smallrye-fault-tolerance/deployment/src/main/java/io/quarkus/smallrye/faulttolerance/deployment/FaultToleranceScanner.java
|
{
"start": 2035,
"end": 16079
}
|
class ____ {
private final IndexView index;
private final AnnotationStore annotationStore;
private final AnnotationProxyBuildItem proxy;
private final ClassOutput output;
private final RecorderContext recorderContext;
private final BuildProducer<ReflectiveMethodBuildItem> reflectiveMethod;
private final FaultToleranceMethodSearch methodSearch;
FaultToleranceScanner(IndexView index, AnnotationStore annotationStore, AnnotationProxyBuildItem proxy,
ClassOutput output, RecorderContext recorderContext, BuildProducer<ReflectiveMethodBuildItem> reflectiveMethod) {
this.index = index;
this.annotationStore = annotationStore;
this.proxy = proxy;
this.output = output;
this.recorderContext = recorderContext;
this.reflectiveMethod = reflectiveMethod;
this.methodSearch = new FaultToleranceMethodSearch(index);
}
boolean hasFTAnnotations(ClassInfo clazz) {
// first check annotations on type
if (annotationStore.hasAnyAnnotation(clazz, DotNames.FT_ANNOTATIONS)) {
return true;
}
// then check on the methods
for (MethodInfo method : clazz.methods()) {
if (annotationStore.hasAnyAnnotation(method, DotNames.FT_ANNOTATIONS)) {
return true;
}
}
// then check on the parent
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return false;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return false;
}
return hasFTAnnotations(parentClass);
}
void forEachMethod(ClassInfo clazz, Consumer<MethodInfo> action) {
for (MethodInfo method : clazz.methods()) {
if (method.name().startsWith("<")) {
// constructors and static initializers can't be intercepted
continue;
}
if (method.isSynthetic()) {
// synthetic methods can't be intercepted
continue;
}
if (Modifier.isPrivate(method.flags())) {
// private methods can't be intercepted
continue;
}
if (annotationStore.hasAnnotation(method, io.quarkus.arc.processor.DotNames.NO_CLASS_INTERCEPTORS)
&& !annotationStore.hasAnyAnnotation(method, DotNames.FT_ANNOTATIONS)) {
// methods annotated @NoClassInterceptors and not annotated with an interceptor binding are not intercepted
continue;
}
action.accept(method);
}
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return;
}
forEachMethod(parentClass, action);
}
FaultToleranceMethod createFaultToleranceMethod(ClassInfo beanClass, MethodInfo method) {
Set<Class<? extends Annotation>> annotationsPresentDirectly = new HashSet<>();
FaultToleranceMethod result = new FaultToleranceMethod();
result.beanClass = getClassProxy(beanClass);
result.method = createMethodDescriptor(method);
result.applyFaultTolerance = getAnnotation(ApplyFaultTolerance.class, DotNames.APPLY_FAULT_TOLERANCE,
method, beanClass, annotationsPresentDirectly);
result.applyGuard = getAnnotation(ApplyGuard.class, DotNames.APPLY_GUARD,
method, beanClass, annotationsPresentDirectly);
result.asynchronous = getAnnotation(Asynchronous.class, DotNames.ASYNCHRONOUS,
method, beanClass, annotationsPresentDirectly);
result.asynchronousNonBlocking = getAnnotation(AsynchronousNonBlocking.class, DotNames.ASYNCHRONOUS_NON_BLOCKING,
method, beanClass, annotationsPresentDirectly);
result.blocking = getAnnotation(Blocking.class, DotNames.BLOCKING,
method, beanClass, annotationsPresentDirectly);
result.nonBlocking = getAnnotation(NonBlocking.class, DotNames.NON_BLOCKING,
method, beanClass, annotationsPresentDirectly);
result.bulkhead = getAnnotation(Bulkhead.class, DotNames.BULKHEAD,
method, beanClass, annotationsPresentDirectly);
result.circuitBreaker = getAnnotation(CircuitBreaker.class, DotNames.CIRCUIT_BREAKER,
method, beanClass, annotationsPresentDirectly);
result.circuitBreakerName = getAnnotation(CircuitBreakerName.class, DotNames.CIRCUIT_BREAKER_NAME,
method, beanClass, annotationsPresentDirectly);
result.fallback = getAnnotation(Fallback.class, DotNames.FALLBACK,
method, beanClass, annotationsPresentDirectly);
result.rateLimit = getAnnotation(RateLimit.class, DotNames.RATE_LIMIT,
method, beanClass, annotationsPresentDirectly);
result.retry = getAnnotation(Retry.class, DotNames.RETRY,
method, beanClass, annotationsPresentDirectly);
result.timeout = getAnnotation(Timeout.class, DotNames.TIMEOUT,
method, beanClass, annotationsPresentDirectly);
result.customBackoff = getAnnotation(CustomBackoff.class, DotNames.CUSTOM_BACKOFF,
method, beanClass, annotationsPresentDirectly);
result.exponentialBackoff = getAnnotation(ExponentialBackoff.class, DotNames.EXPONENTIAL_BACKOFF,
method, beanClass, annotationsPresentDirectly);
result.fibonacciBackoff = getAnnotation(FibonacciBackoff.class, DotNames.FIBONACCI_BACKOFF,
method, beanClass, annotationsPresentDirectly);
result.retryWhen = getAnnotation(RetryWhen.class, DotNames.RETRY_WHEN,
method, beanClass, annotationsPresentDirectly);
result.beforeRetry = getAnnotation(BeforeRetry.class, DotNames.BEFORE_RETRY,
method, beanClass, annotationsPresentDirectly);
result.annotationsPresentDirectly = annotationsPresentDirectly;
searchForMethods(result, beanClass, method, annotationsPresentDirectly);
return result;
}
private MethodDescriptor createMethodDescriptor(MethodInfo method) {
MethodDescriptor result = new MethodDescriptor();
result.declaringClass = getClassProxy(method.declaringClass());
result.name = method.name();
Class<?>[] parameterTypes = new Class<?>[method.parametersCount()];
for (int i = 0; i < method.parametersCount(); i++) {
parameterTypes[i] = getClassProxy(method.parameterType(i));
}
result.parameterTypes = parameterTypes;
result.returnType = getClassProxy(method.returnType());
return result;
}
private <A extends Annotation> A getAnnotation(Class<A> annotationType, DotName annotationName,
MethodInfo method, ClassInfo beanClass, Set<Class<? extends Annotation>> directlyPresent) {
if (annotationStore.hasAnnotation(method, annotationName)) {
directlyPresent.add(annotationType);
AnnotationInstance annotation = annotationStore.getAnnotation(method, annotationName);
return createAnnotation(annotationType, annotation);
}
return getAnnotationFromClass(annotationType, annotationName, beanClass);
}
// ---
private void searchForMethods(FaultToleranceMethod result, ClassInfo beanClass, MethodInfo method,
Set<Class<? extends Annotation>> annotationsPresentDirectly) {
if (result.fallback != null) {
String fallbackMethod = getMethodNameFromConfig(method, annotationsPresentDirectly,
Fallback.class, "fallbackMethod");
if (fallbackMethod == null) {
fallbackMethod = result.fallback.fallbackMethod();
}
if (fallbackMethod != null && !fallbackMethod.isEmpty()) {
ClassInfo declaringClass = method.declaringClass();
Type[] parameterTypes = method.parameterTypes().toArray(new Type[0]);
Type returnType = method.returnType();
MethodInfo foundMethod = methodSearch.findFallbackMethod(beanClass,
declaringClass, fallbackMethod, parameterTypes, returnType);
Set<MethodInfo> foundMethods = methodSearch.findFallbackMethodsWithExceptionParameter(beanClass,
declaringClass, fallbackMethod, parameterTypes, returnType);
result.fallbackMethod = createMethodDescriptorIfNotNull(foundMethod);
result.fallbackMethodsWithExceptionParameter = createMethodDescriptorsIfNotEmpty(foundMethods);
if (foundMethod != null) {
reflectiveMethod.produce(new ReflectiveMethodBuildItem("@Fallback method", foundMethod));
}
for (MethodInfo m : foundMethods) {
reflectiveMethod.produce(new ReflectiveMethodBuildItem("@Fallback method", m));
}
}
}
if (result.beforeRetry != null) {
String beforeRetryMethod = getMethodNameFromConfig(method, annotationsPresentDirectly,
BeforeRetry.class, "methodName");
if (beforeRetryMethod == null) {
beforeRetryMethod = result.beforeRetry.methodName();
}
if (beforeRetryMethod != null && !beforeRetryMethod.isEmpty()) {
MethodInfo foundMethod = methodSearch.findBeforeRetryMethod(beanClass,
method.declaringClass(), beforeRetryMethod);
result.beforeRetryMethod = createMethodDescriptorIfNotNull(foundMethod);
if (foundMethod != null) {
reflectiveMethod.produce(new ReflectiveMethodBuildItem("@BeforeRetry method", foundMethod));
}
}
}
}
// copy of generated code to obtain a config value and translation from reflection to Jandex
// no need to check whether `ftAnnotation` is enabled, this will happen at runtime
private String getMethodNameFromConfig(MethodInfo method, Set<Class<? extends Annotation>> annotationsPresentDirectly,
Class<? extends Annotation> ftAnnotation, String memberName) {
String result;
org.eclipse.microprofile.config.Config config = ConfigProvider.getConfig();
if (annotationsPresentDirectly.contains(ftAnnotation)) {
// smallrye.faulttolerance."<classname>/<methodname>".<annotation>.<member>
String newKey = ConfigUtilJandex.newKey(ftAnnotation, memberName, method);
// <classname>/<methodname>/<annotation>/<member>
String oldKey = ConfigUtilJandex.oldKey(ftAnnotation, memberName, method);
result = config.getOptionalValue(newKey, String.class)
.or(() -> config.getOptionalValue(oldKey, String.class))
.orElse(null);
} else {
// smallrye.faulttolerance."<classname>".<annotation>.<member>
String newKey = ConfigUtilJandex.newKey(ftAnnotation, memberName, method.declaringClass());
// <classname>/<annotation>/<member>
String oldKey = ConfigUtilJandex.oldKey(ftAnnotation, memberName, method.declaringClass());
result = config.getOptionalValue(newKey, String.class)
.or(() -> config.getOptionalValue(oldKey, String.class))
.orElse(null);
}
if (result == null) {
// smallrye.faulttolerance.global.<annotation>.<member>
String newKey = ConfigUtilJandex.newKey(ftAnnotation, memberName);
// <annotation>/<member>
String oldKey = ConfigUtilJandex.oldKey(ftAnnotation, memberName);
result = config.getOptionalValue(newKey, String.class)
.or(() -> config.getOptionalValue(oldKey, String.class))
.orElse(null);
}
return result;
}
private MethodDescriptor createMethodDescriptorIfNotNull(MethodInfo method) {
return method == null ? null : createMethodDescriptor(method);
}
private List<MethodDescriptor> createMethodDescriptorsIfNotEmpty(Collection<MethodInfo> methods) {
if (methods.isEmpty()) {
return null;
}
List<MethodDescriptor> result = new ArrayList<>(methods.size());
for (MethodInfo method : methods) {
result.add(createMethodDescriptor(method));
}
return result;
}
// ---
// almost all FT annotations are inherited (except `@Blocking` and `@NonBlocking`, which we'll remove soon,
// and `@CircuitBreakerName`, which can only be put on methods), so no need to test for that here
private <A extends Annotation> A getAnnotationFromClass(Class<A> annotationType, DotName annotationName, ClassInfo clazz) {
if (annotationStore.hasAnnotation(clazz, annotationName)) {
AnnotationInstance annotation = annotationStore.getAnnotation(clazz, annotationName);
return createAnnotation(annotationType, annotation);
}
// then check on the parent
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return null;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return null;
}
return getAnnotationFromClass(annotationType, annotationName, parentClass);
}
private <A extends Annotation> A createAnnotation(Class<A> annotationType, AnnotationInstance instance) {
return proxy.builder(instance, annotationType).build(output);
}
// using
|
FaultToleranceScanner
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java
|
{
"start": 23596,
"end": 24203
}
|
class ____ implements ClusterStateTaskListener {
private final DesiredBalance desiredBalance;
private ReconcileDesiredBalanceTask(DesiredBalance desiredBalance) {
this.desiredBalance = desiredBalance;
}
@Override
public void onFailure(Exception e) {
assert MasterService.isPublishFailureException(e) : e;
}
@Override
public String toString() {
return "ReconcileDesiredBalanceTask[lastConvergedIndex=" + desiredBalance.lastConvergedIndex() + "]";
}
}
private final
|
ReconcileDesiredBalanceTask
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_2901/ConditionWithTargetTypeOnCollectionMapper.java
|
{
"start": 325,
"end": 549
}
|
interface ____ {
Target map(Source source);
@Condition
default boolean check(List<String> test, @TargetType Class<?> type) {
return type.isInstance( test );
}
}
|
ConditionWithTargetTypeOnCollectionMapper
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/exponentialhistogram/fielddata/ExponentialHistogramValuesReader.java
|
{
"start": 429,
"end": 2144
}
|
interface ____ {
/**
* Advances to the exact document id, returning true if the document has a value for this field.
* @param docId the document id
* @return true if the document has a value for this field, false otherwise
*/
boolean advanceExact(int docId) throws IOException;
/**
* Returns the histogram value for the current document. Must be called only after a successful call to {@link #advanceExact(int)}.
* The returned histogram instance may be reused across calls, so if you need to hold on to it, make a copy.
*
* @return the histogram value for the current document
*/
ExponentialHistogram histogramValue() throws IOException;
/**
* A shortcut for invoking {@link ExponentialHistogram#valueCount()} on the return value of {@link #histogramValue()}.
* This method is more performant because it avoids loading the unnecessary parts of the histogram.
* Must be called only after a successful call to {@link #advanceExact(int)}.
*
* @return the count of values in the histogram for the current document
*/
long valuesCountValue() throws IOException;
/**
* A shortcut for invoking {@link ExponentialHistogram#sum()} on the return value of {@link #histogramValue()}.
* This method is more performant because it avoids loading the unnecessary parts of the histogram.
* Must be called only after a successful call to {@link #advanceExact(int)}.
*
* @return the sum of values in the histogram for the current document
*/
double sumValue() throws IOException;
// TODO: add accessors for min/max/sum which don't load the entire histogram
}
|
ExponentialHistogramValuesReader
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_2221/Issue2221Test.java
|
{
"start": 555,
"end": 1107
}
|
class ____ {
@ProcessorTest
public void multiSourceInheritConfigurationShouldWork() {
SiteDto site = RestSiteMapper.INSTANCE.convert(
new RestSiteDto( "restTenant", "restSite", "restCti" ),
"parameterTenant",
"parameterSite"
);
assertThat( site ).isNotNull();
assertThat( site.getTenantId() ).isEqualTo( "parameterTenant" );
assertThat( site.getSiteId() ).isEqualTo( "parameterSite" );
assertThat( site.getCtiId() ).isEqualTo( "restCti" );
}
}
|
Issue2221Test
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/MissingBindingValidationTest.java
|
{
"start": 40756,
"end": 42200
}
|
interface ____ {",
" String string();",
" Foo foo();",
"}");
CompilerTests.daggerCompiler(foo, component)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
String.join(
"\n",
"String cannot be provided without an @Inject constructor or an "
+ "@Provides-annotated method.",
"",
" String is requested at",
" [TestComponent] TestComponent.string()",
"It is also requested at:",
" Foo(one, …)",
" Foo(…, two, …)",
" Foo(…, three, …)",
" Foo(…, four, …)",
" Foo(…, five, …)",
" Foo(…, six, …)",
" Foo(…, seven, …)",
" Foo(…, eight, …)",
" Foo(…, nine, …)",
" Foo(…, ten, …)",
" and 3 others",
"The following other entry points also depend on it:",
" TestComponent.foo()"))
.onSource(component)
.onLineContaining("
|
TestComponent
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
|
{
"start": 3288,
"end": 3468
}
|
interface ____ the underlying storage that
* stores replicas for a data node.
* The default implementation stores replicas on local drives.
*/
@InterfaceAudience.Private
public
|
for
|
java
|
quarkusio__quarkus
|
integration-tests/openapi/src/main/java/io/quarkus/it/openapi/spring/FileResource.java
|
{
"start": 717,
"end": 2530
}
|
class ____ {
@GetMapping("/justFile/{fileName}")
public File justFile(@PathVariable("fileName") String filename) {
return toFile(filename);
}
@PostMapping("/justFile")
public File justFile(File file) {
return file;
}
@GetMapping("/responseEntityFile/{fileName}")
public ResponseEntity<File> restResponseFile(@PathVariable("fileName") String filename) {
return ResponseEntity.ok(toFile(filename));
}
@PostMapping("/responseEntityFile")
public ResponseEntity<File> restResponseFile(File file) {
return ResponseEntity.ok(file);
}
@GetMapping("/optionalFile/{fileName}")
public Optional<File> optionalFile(@PathVariable("fileName") String filename) {
return Optional.of(toFile(filename));
}
@PostMapping("/optionalFile")
public Optional<File> optionalFile(Optional<File> file) {
return file;
}
@GetMapping("/uniFile/{fileName}")
public Uni<File> uniFile(@PathVariable("fileName") String filename) {
return Uni.createFrom().item(toFile(filename));
}
@GetMapping("/completionStageFile/{fileName}")
public CompletionStage<File> completionStageFile(@PathVariable("fileName") String filename) {
return CompletableFuture.completedStage(toFile(filename));
}
@GetMapping("/completedFutureFile/{fileName}")
public CompletableFuture<File> completedFutureFile(@PathVariable("fileName") String filename) {
return CompletableFuture.completedFuture(toFile(filename));
}
private File toFile(String filename) {
try {
String f = URLDecoder.decode(filename, "UTF-8");
return new File(f);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
}
|
FileResource
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/model/RoutePropertiesTest.java
|
{
"start": 1188,
"end": 2743
}
|
class ____ extends ContextTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testRouteProperties() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("route-id").routeProperty("key1", "val1").routeProperty("key2", "val2")
.to("mock:output");
}
});
context.start();
RouteDefinition definition = context.getRouteDefinition("route-id");
Route route = context.getRoute("route-id");
assertNotNull(definition.getRouteProperties());
assertEquals(2, definition.getRouteProperties().size());
assertNotNull(route.getProperties());
assertEquals("val1", route.getProperties().get("key1"));
assertEquals("val2", route.getProperties().get("key2"));
}
@DisplayName("Checks that trying to use a reserved property leads to failure")
@Test
public void testRoutePropertiesFailure() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("route-id").routeProperty(Route.ID_PROPERTY, "the id").to("mock:output");
}
});
Assertions.assertThrows(FailedToCreateRouteException.class, () -> context.start(),
"Should have prevented setting a property with a reserved name");
}
}
|
RoutePropertiesTest
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/server/exception/OutOfStockExceptionHandler.java
|
{
"start": 1239,
"end": 1903
}
|
class ____ implements ExceptionHandler<OutOfStockException, HttpResponse> {
private final ErrorResponseProcessor<?> errorResponseProcessor;
public OutOfStockExceptionHandler(ErrorResponseProcessor<?> errorResponseProcessor) {
this.errorResponseProcessor = errorResponseProcessor;
}
@Override
public HttpResponse handle(HttpRequest request, OutOfStockException e) {
return errorResponseProcessor.processResponse(ErrorContext.builder(request)
.cause(e)
.errorMessage("No stock available")
.build(), HttpResponse.badRequest()); // <1>
}
}
//end::clazz[]
|
OutOfStockExceptionHandler
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/fetch/subphase/PopulateFieldLookupTests.java
|
{
"start": 1261,
"end": 3677
}
|
class ____ extends MapperServiceTestCase {
public void testPopulateFieldLookup() throws IOException {
final XContentBuilder mapping = createMapping();
final MapperService mapperService = createMapperService(mapping);
withLuceneIndex(mapperService, iw -> {
final Document doc = new Document();
doc.add(new StoredField("integer", 101));
doc.add(new StoredField("keyword", new BytesRef("foobar")));
iw.addDocument(doc);
}, reader -> {
final StoredFields storedFields = reader.storedFields();
final Document document = storedFields.document(0);
final List<String> documentFields = document.getFields().stream().map(IndexableField::name).toList();
assertThat(documentFields, Matchers.containsInAnyOrder("integer", "keyword"));
final IndexSearcher searcher = newSearcher(reader);
final LeafReaderContext readerContext = searcher.getIndexReader().leaves().get(0);
final LeafFieldLookupProvider provider = LeafFieldLookupProvider.fromStoredFields().apply(readerContext);
final FieldLookup integerFieldLookup = new FieldLookup(mapperService.fieldType("integer"));
final FieldLookup keywordFieldLookup = new FieldLookup(mapperService.fieldType("keyword"));
provider.populateFieldLookup(integerFieldLookup, 0);
provider.populateFieldLookup(keywordFieldLookup, 0);
assertEquals(List.of(101), integerFieldLookup.getValues());
assertEquals(List.of("foobar"), keywordFieldLookup.getValues());
});
}
private static XContentBuilder createMapping() throws IOException {
final XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc");
{
mapping.startObject("properties");
{
mapping.startObject("integer");
{
mapping.field("type", "integer").field("store", "true");
}
mapping.endObject();
mapping.startObject("keyword");
{
mapping.field("type", "keyword").field("store", "true");
}
mapping.endObject();
}
mapping.endObject();
}
return mapping.endObject().endObject();
}
}
|
PopulateFieldLookupTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/metamodel/model/domain/internal/ListAttributeImpl.java
|
{
"start": 778,
"end": 2733
}
|
class ____<X, E>
extends AbstractPluralAttribute<X, List<E>, E>
implements SqmListPersistentAttribute<X, E> {
private final SqmPathSource<Integer> indexPathSource;
public ListAttributeImpl(PluralAttributeBuilder<X, List<E>, E, ?> builder) {
super( builder );
//noinspection unchecked
this.indexPathSource = (SqmPathSource<Integer>) SqmMappingModelHelper.resolveSqmKeyPathSource(
builder.getListIndexOrMapKeyType(),
BindableType.PLURAL_ATTRIBUTE,
false
);
}
@Override
public CollectionType getCollectionType() {
return CollectionType.LIST;
}
@Override
public SqmPathSource<Integer> getIndexPathSource() {
return indexPathSource;
}
@Override
public @Nullable SqmPathSource<?> findSubPathSource(String name) {
final CollectionPart.Nature nature = CollectionPart.Nature.fromNameExact( name );
if ( nature != null ) {
switch ( nature ) {
case INDEX:
return indexPathSource;
case ELEMENT:
return getElementPathSource();
}
}
return getElementPathSource().findSubPathSource( name );
}
@Override
public @Nullable SqmPathSource<?> findSubPathSource(String name, boolean includeSubtypes) {
return CollectionPart.Nature.INDEX.getName().equals( name )
? indexPathSource
: super.findSubPathSource( name, includeSubtypes );
}
@Override
public SqmPathSource<?> getIntermediatePathSource(SqmPathSource<?> pathSource) {
final String pathName = pathSource.getPathName();
return pathName.equals( getElementPathSource().getPathName() )
|| pathName.equals( indexPathSource.getPathName() ) ? null : getElementPathSource();
}
@Override
public SqmAttributeJoin<X,E> createSqmJoin(
SqmFrom<?,X> lhs,
SqmJoinType joinType,
@Nullable String alias,
boolean fetched,
SqmCreationState creationState) {
return new SqmListJoin<>(
lhs,
this,
alias,
joinType,
fetched,
creationState.getCreationContext().getNodeBuilder()
);
}
}
|
ListAttributeImpl
|
java
|
google__guava
|
android/guava/src/com/google/common/hash/AbstractNonStreamingHashFunction.java
|
{
"start": 3324,
"end": 3843
}
|
class ____ extends ByteArrayOutputStream {
ExposedByteArrayOutputStream(int expectedInputSize) {
super(expectedInputSize);
}
void write(ByteBuffer input) {
int remaining = input.remaining();
if (count + remaining > buf.length) {
buf = Arrays.copyOf(buf, count + remaining);
}
input.get(buf, count, remaining);
count += remaining;
}
byte[] byteArray() {
return buf;
}
int length() {
return count;
}
}
}
|
ExposedByteArrayOutputStream
|
java
|
apache__dubbo
|
dubbo-configcenter/dubbo-configcenter-nacos/src/main/java/org/apache/dubbo/configcenter/support/nacos/NacosConfigServiceWrapper.java
|
{
"start": 1163,
"end": 3323
}
|
class ____ {
private static final String INNERCLASS_SYMBOL = "$";
private static final String INNERCLASS_COMPATIBLE_SYMBOL = "___";
private static final long DEFAULT_TIMEOUT = 3000L;
private final ConfigService configService;
public NacosConfigServiceWrapper(ConfigService configService) {
this.configService = configService;
}
public ConfigService getConfigService() {
return configService;
}
public void addListener(String dataId, String group, Listener listener) throws NacosException {
configService.addListener(handleInnerSymbol(dataId), handleInnerSymbol(group), listener);
}
public String getConfig(String dataId, String group) throws NacosException {
return configService.getConfig(handleInnerSymbol(dataId), handleInnerSymbol(group), DEFAULT_TIMEOUT);
}
public String getConfig(String dataId, String group, long timeout) throws NacosException {
return configService.getConfig(handleInnerSymbol(dataId), handleInnerSymbol(group), timeout);
}
public boolean publishConfig(String dataId, String group, String content) throws NacosException {
return configService.publishConfig(handleInnerSymbol(dataId), handleInnerSymbol(group), content);
}
public boolean publishConfigCas(String dataId, String group, String content, String casMd5) throws NacosException {
return configService.publishConfigCas(handleInnerSymbol(dataId), handleInnerSymbol(group), content, casMd5);
}
public boolean removeConfig(String dataId, String group) throws NacosException {
return configService.removeConfig(handleInnerSymbol(dataId), handleInnerSymbol(group));
}
public void shutdown() throws NacosException {
configService.shutDown();
}
/**
* see {@link com.alibaba.nacos.client.config.utils.ParamUtils#isValid(java.lang.String)}
*/
private String handleInnerSymbol(String data) {
if (data == null) {
return null;
}
return data.replace(INNERCLASS_SYMBOL, INNERCLASS_COMPATIBLE_SYMBOL).replace(SLASH_CHAR, HYPHEN_CHAR);
}
}
|
NacosConfigServiceWrapper
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ImmutableMemberCollectionTest.java
|
{
"start": 2619,
"end": 3029
}
|
class ____ {
private final List<String> myList;
private List<String> doNotTouchThisList;
Test() {
myList = ImmutableList.of("a");
}
}
""")
.addOutputLines(
"Test.java",
"""
import com.google.common.collect.ImmutableList;
import java.util.List;
|
Test
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java
|
{
"start": 6935,
"end": 201840
}
|
class ____ {
private final String topic1 = "test1";
private final String topic2 = "test2";
private final TopicPartition t1p = new TopicPartition(topic1, 0);
private final TopicPartition t2p = new TopicPartition(topic2, 0);
private final String groupId = "test-group";
private final Optional<String> groupInstanceId = Optional.of("test-instance");
private final int rebalanceTimeoutMs = 60000;
private final int sessionTimeoutMs = 10000;
private final int heartbeatIntervalMs = 5000;
private final long retryBackoffMs = 100;
private final long retryBackoffMaxMs = 1000;
private final int autoCommitIntervalMs = 2000;
private final int requestTimeoutMs = 30000;
private final int throttleMs = 10;
private final MockTime time = new MockTime();
private GroupRebalanceConfig rebalanceConfig;
private final ConsumerPartitionAssignor.RebalanceProtocol protocol;
private final MockPartitionAssignor partitionAssignor;
private final ThrowOnAssignmentAssignor throwOnAssignmentAssignor;
private final ThrowOnAssignmentAssignor throwFatalErrorOnAssignmentAssignor;
private final List<ConsumerPartitionAssignor> assignors;
private final Map<String, MockPartitionAssignor> assignorMap;
private final String consumerId = "consumer";
private final String consumerId2 = "consumer2";
private MockClient client;
private final MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap<>() {
{
put(topic1, 1);
put(topic2, 1);
}
});
private final Node node = metadataResponse.brokers().iterator().next();
private SubscriptionState subscriptions;
private ConsumerMetadata metadata;
private Metrics metrics;
private ConsumerNetworkClient consumerClient;
private MockRebalanceListener rebalanceListener;
private MockCommitCallback mockOffsetCommitCallback;
private ConsumerCoordinator coordinator;
public ConsumerCoordinatorTest(final ConsumerPartitionAssignor.RebalanceProtocol protocol) {
this.protocol = protocol;
this.partitionAssignor = new MockPartitionAssignor(Collections.singletonList(protocol));
this.throwOnAssignmentAssignor = new ThrowOnAssignmentAssignor(Collections.singletonList(protocol),
new KafkaException("Kaboom for assignment!"),
"throw-on-assignment-assignor");
this.throwFatalErrorOnAssignmentAssignor = new ThrowOnAssignmentAssignor(Collections.singletonList(protocol),
new IllegalStateException("Illegal state for assignment!"),
"throw-fatal-error-on-assignment-assignor");
this.assignors = Arrays.asList(partitionAssignor, throwOnAssignmentAssignor, throwFatalErrorOnAssignmentAssignor);
this.assignorMap = mkMap(mkEntry(partitionAssignor.name(), partitionAssignor),
mkEntry(throwOnAssignmentAssignor.name(), throwOnAssignmentAssignor),
mkEntry(throwFatalErrorOnAssignmentAssignor.name(), throwFatalErrorOnAssignmentAssignor));
}
@BeforeEach
public void setup() {
LogContext logContext = new LogContext();
this.subscriptions = new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST);
this.metadata = new ConsumerMetadata(0, 0, Long.MAX_VALUE, false,
false, subscriptions, logContext, new ClusterResourceListeners());
this.client = new MockClient(time, metadata);
this.client.updateMetadata(metadataResponse);
this.consumerClient = new ConsumerNetworkClient(logContext, client, metadata, time, 100,
requestTimeoutMs, Integer.MAX_VALUE);
this.metrics = new Metrics(time);
this.rebalanceListener = new MockRebalanceListener();
this.mockOffsetCommitCallback = new MockCommitCallback();
this.partitionAssignor.clear();
this.rebalanceConfig = buildRebalanceConfig(Optional.empty(), null);
this.coordinator = buildCoordinator(rebalanceConfig,
metrics,
assignors,
false,
subscriptions);
}
private GroupRebalanceConfig buildRebalanceConfig(Optional<String> groupInstanceId, String rackId) {
return new GroupRebalanceConfig(sessionTimeoutMs,
rebalanceTimeoutMs,
heartbeatIntervalMs,
groupId,
groupInstanceId,
rackId,
retryBackoffMs,
retryBackoffMaxMs);
}
@AfterEach
public void teardown() {
this.metrics.close();
this.coordinator.close(time.timer(0), CloseOptions.GroupMembershipOperation.DEFAULT);
}
@Test
public void testMetrics() {
assertNotNull(getMetric("commit-latency-avg"));
assertNotNull(getMetric("commit-latency-max"));
assertNotNull(getMetric("commit-rate"));
assertNotNull(getMetric("commit-total"));
assertNotNull(getMetric("partition-revoked-latency-avg"));
assertNotNull(getMetric("partition-revoked-latency-max"));
assertNotNull(getMetric("partition-assigned-latency-avg"));
assertNotNull(getMetric("partition-assigned-latency-max"));
assertNotNull(getMetric("partition-lost-latency-avg"));
assertNotNull(getMetric("partition-lost-latency-max"));
assertNotNull(getMetric("assigned-partitions"));
metrics.sensor("commit-latency").record(1.0d);
metrics.sensor("commit-latency").record(6.0d);
metrics.sensor("commit-latency").record(2.0d);
assertEquals(3.0d, getMetric("commit-latency-avg").metricValue());
assertEquals(6.0d, getMetric("commit-latency-max").metricValue());
assertEquals(0.1d, getMetric("commit-rate").metricValue());
assertEquals(3.0d, getMetric("commit-total").metricValue());
metrics.sensor("partition-revoked-latency").record(1.0d);
metrics.sensor("partition-revoked-latency").record(2.0d);
metrics.sensor("partition-assigned-latency").record(1.0d);
metrics.sensor("partition-assigned-latency").record(2.0d);
metrics.sensor("partition-lost-latency").record(1.0d);
metrics.sensor("partition-lost-latency").record(2.0d);
assertEquals(1.5d, getMetric("partition-revoked-latency-avg").metricValue());
assertEquals(2.0d, getMetric("partition-revoked-latency-max").metricValue());
assertEquals(1.5d, getMetric("partition-assigned-latency-avg").metricValue());
assertEquals(2.0d, getMetric("partition-assigned-latency-max").metricValue());
assertEquals(1.5d, getMetric("partition-lost-latency-avg").metricValue());
assertEquals(2.0d, getMetric("partition-lost-latency-max").metricValue());
assertEquals(0.0d, getMetric("assigned-partitions").metricValue());
subscriptions.assignFromUser(Collections.singleton(t1p));
assertEquals(1.0d, getMetric("assigned-partitions").metricValue());
subscriptions.assignFromUser(Set.of(t1p, t2p));
assertEquals(2.0d, getMetric("assigned-partitions").metricValue());
}
private KafkaMetric getMetric(final String name) {
return metrics.metrics().get(metrics.metricName(name, consumerId + groupId + "-coordinator-metrics"));
}
@SuppressWarnings("unchecked")
@Test
public void testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfNeeded() {
SubscriptionState mockSubscriptionState = Mockito.mock(SubscriptionState.class);
// the consumer only subscribed to "topic1"
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
List<JoinGroupResponseData.JoinGroupResponseMember> metadata = new ArrayList<>();
for (Map.Entry<String, List<String>> subscriptionEntry : memberSubscriptions.entrySet()) {
ConsumerPartitionAssignor.Subscription subscription = new ConsumerPartitionAssignor.Subscription(subscriptionEntry.getValue());
ByteBuffer buf = ConsumerProtocol.serializeSubscription(subscription);
metadata.add(new JoinGroupResponseData.JoinGroupResponseMember()
.setMemberId(subscriptionEntry.getKey())
.setMetadata(buf.array()));
}
// normal case: the assignment result will have partitions for only the subscribed topic: "topic1"
partitionAssignor.prepare(Collections.singletonMap(consumerId, singletonList(t1p)));
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) {
coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false);
ArgumentCaptor<Collection<String>> topicsCaptor = ArgumentCaptor.forClass(Collection.class);
// groupSubscribe should be only called 1 time, which is before assignment,
// because the assigned topics are the same as the subscribed topics
Mockito.verify(mockSubscriptionState, Mockito.times(1)).groupSubscribe(topicsCaptor.capture());
List<Collection<String>> capturedTopics = topicsCaptor.getAllValues();
// expected the final group subscribed topics to be updated to "topic1"
assertEquals(Collections.singleton(topic1), capturedTopics.get(0));
}
Mockito.clearInvocations(mockSubscriptionState);
// unsubscribed topic partition assigned case: the assignment result will have partitions for (1) subscribed topic: "topic1"
// and (2) the additional unsubscribed topic: "topic2". We should add "topic2" into group subscription list
partitionAssignor.prepare(Collections.singletonMap(consumerId, Arrays.asList(t1p, t2p)));
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) {
coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false);
ArgumentCaptor<Collection<String>> topicsCaptor = ArgumentCaptor.forClass(Collection.class);
// groupSubscribe should be called 2 times, once before assignment, once after assignment
// (because the assigned topics are not the same as the subscribed topics)
Mockito.verify(mockSubscriptionState, Mockito.times(2)).groupSubscribe(topicsCaptor.capture());
List<Collection<String>> capturedTopics = topicsCaptor.getAllValues();
// expected the final group subscribed topics to be updated to "topic1" and "topic2"
Set<String> expectedTopicsGotCalled = Set.of(topic1, topic2);
assertEquals(expectedTopicsGotCalled, capturedTopics.get(1));
}
}
public ByteBuffer subscriptionUserData(int generation) {
final String generationKeyName = "generation";
final Schema cooperativeStickyAssignorUserDataV0 = new Schema(
new Field(generationKeyName, Type.INT32));
Struct struct = new Struct(cooperativeStickyAssignorUserDataV0);
struct.set(generationKeyName, generation);
ByteBuffer buffer = ByteBuffer.allocate(cooperativeStickyAssignorUserDataV0.sizeOf(struct));
cooperativeStickyAssignorUserDataV0.write(buffer, struct);
buffer.flip();
return buffer;
}
private List<JoinGroupResponseData.JoinGroupResponseMember> validateCooperativeAssignmentTestSetup() {
// consumer1 and consumer2 subscribed to "topic1" with 2 partitions: t1p, t2p
Map<String, List<String>> memberSubscriptions = new HashMap<>();
List<String> subscribedTopics = singletonList(topic1);
memberSubscriptions.put(consumerId, subscribedTopics);
memberSubscriptions.put(consumerId2, subscribedTopics);
// the ownedPartition for consumer1 is t1p, t2p
ConsumerPartitionAssignor.Subscription subscriptionConsumer1 = new ConsumerPartitionAssignor.Subscription(
subscribedTopics, subscriptionUserData(1), Arrays.asList(t1p, t2p));
// the ownedPartition for consumer2 is empty
ConsumerPartitionAssignor.Subscription subscriptionConsumer2 = new ConsumerPartitionAssignor.Subscription(
subscribedTopics, subscriptionUserData(1), emptyList());
List<JoinGroupResponseData.JoinGroupResponseMember> metadata = new ArrayList<>();
for (Map.Entry<String, List<String>> subscriptionEntry : memberSubscriptions.entrySet()) {
ByteBuffer buf;
if (subscriptionEntry.getKey().equals(consumerId)) {
buf = ConsumerProtocol.serializeSubscription(subscriptionConsumer1);
} else {
buf = ConsumerProtocol.serializeSubscription(subscriptionConsumer2);
}
metadata.add(new JoinGroupResponseData.JoinGroupResponseMember()
.setMemberId(subscriptionEntry.getKey())
.setMetadata(buf.array()));
}
return metadata;
}
@Test
public void testPerformAssignmentShouldValidateCooperativeAssignment() {
SubscriptionState mockSubscriptionState = Mockito.mock(SubscriptionState.class);
List<JoinGroupResponseData.JoinGroupResponseMember> metadata = validateCooperativeAssignmentTestSetup();
// simulate the custom cooperative assignor didn't revoke the partition first before assign to other consumer
Map<String, List<TopicPartition>> assignment = new HashMap<>();
assignment.put(consumerId, singletonList(t1p));
assignment.put(consumerId2, singletonList(t2p));
partitionAssignor.prepare(assignment);
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) {
if (protocol == COOPERATIVE) {
// in cooperative protocol, we should throw exception when validating cooperative assignment
Exception e = assertThrows(IllegalStateException.class,
() -> coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false));
assertTrue(e.getMessage().contains("Assignor supporting the COOPERATIVE protocol violates its requirements"));
} else {
// in eager protocol, we should not validate assignment
coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false);
}
}
}
@Test
public void testOnLeaderElectedShouldSkipAssignment() {
SubscriptionState mockSubscriptionState = Mockito.mock(SubscriptionState.class);
ConsumerPartitionAssignor assignor = Mockito.mock(ConsumerPartitionAssignor.class);
String assignorName = "mock-assignor";
Mockito.when(assignor.name()).thenReturn(assignorName);
Mockito.when(assignor.supportedProtocols()).thenReturn(Collections.singletonList(protocol));
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
List<JoinGroupResponseData.JoinGroupResponseMember> metadata = new ArrayList<>();
for (Map.Entry<String, List<String>> subscriptionEntry : memberSubscriptions.entrySet()) {
ConsumerPartitionAssignor.Subscription subscription = new ConsumerPartitionAssignor.Subscription(subscriptionEntry.getValue());
ByteBuffer buf = ConsumerProtocol.serializeSubscription(subscription);
metadata.add(new JoinGroupResponseData.JoinGroupResponseMember()
.setMemberId(subscriptionEntry.getKey())
.setMetadata(buf.array()));
}
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), Collections.singletonList(assignor), false, mockSubscriptionState)) {
assertEquals(Collections.emptyMap(), coordinator.onLeaderElected("1", assignorName, metadata, true));
assertTrue(coordinator.isLeader());
}
Mockito.verify(assignor, Mockito.never()).assign(Mockito.any(), Mockito.any());
}
@Test
public void testPerformAssignmentShouldSkipValidateCooperativeAssignmentForBuiltInCooperativeStickyAssignor() {
SubscriptionState mockSubscriptionState = Mockito.mock(SubscriptionState.class);
List<JoinGroupResponseData.JoinGroupResponseMember> metadata = validateCooperativeAssignmentTestSetup();
List<ConsumerPartitionAssignor> assignorsWithCooperativeStickyAssignor = new ArrayList<>(assignors);
// create a mockPartitionAssignor with the same name as cooperative sticky assignor
MockPartitionAssignor mockCooperativeStickyAssignor = new MockPartitionAssignor(Collections.singletonList(protocol)) {
@Override
public String name() {
return COOPERATIVE_STICKY_ASSIGNOR_NAME;
}
};
assignorsWithCooperativeStickyAssignor.add(mockCooperativeStickyAssignor);
// simulate the cooperative sticky assignor do the assignment with out-of-date ownedPartition
Map<String, List<TopicPartition>> assignment = new HashMap<>();
assignment.put(consumerId, singletonList(t1p));
assignment.put(consumerId2, singletonList(t2p));
mockCooperativeStickyAssignor.prepare(assignment);
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignorsWithCooperativeStickyAssignor, false, mockSubscriptionState)) {
// should not validate assignment for built-in cooperative sticky assignor
coordinator.onLeaderElected("1", mockCooperativeStickyAssignor.name(), metadata, false);
}
}
@Test
public void testSelectRebalanceProtocol() {
List<ConsumerPartitionAssignor> assignors = new ArrayList<>();
assignors.add(new MockPartitionAssignor(Collections.singletonList(ConsumerPartitionAssignor.RebalanceProtocol.EAGER)));
assignors.add(new MockPartitionAssignor(Collections.singletonList(COOPERATIVE)));
// no commonly supported protocols
assertThrows(IllegalArgumentException.class, () -> buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, subscriptions));
assignors.clear();
assignors.add(new MockPartitionAssignor(Arrays.asList(ConsumerPartitionAssignor.RebalanceProtocol.EAGER, COOPERATIVE)));
assignors.add(new MockPartitionAssignor(Arrays.asList(ConsumerPartitionAssignor.RebalanceProtocol.EAGER, COOPERATIVE)));
// select higher indexed (more advanced) protocols
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, subscriptions)) {
assertEquals(COOPERATIVE, coordinator.getProtocol());
}
}
@Test
public void testNormalHeartbeat() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal heartbeat
time.sleep(sessionTimeoutMs);
RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); // should send out the heartbeat
assertEquals(1, consumerClient.pendingRequestCount());
assertFalse(future.isDone());
client.prepareResponse(heartbeatResponse(Errors.NONE));
consumerClient.poll(time.timer(0));
assertTrue(future.isDone());
assertTrue(future.succeeded());
}
@Test
public void testGroupDescribeUnauthorized() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.GROUP_AUTHORIZATION_FAILED));
assertThrows(GroupAuthorizationException.class, () -> coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)));
}
@Test
public void testGroupReadUnauthorized() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
client.prepareResponse(joinGroupLeaderResponse(0, "memberId", Collections.emptyMap(),
Errors.GROUP_AUTHORIZATION_FAILED));
assertThrows(GroupAuthorizationException.class, () -> coordinator.poll(time.timer(Long.MAX_VALUE)));
}
@Test
public void testCoordinatorNotAvailableWithUserAssignedType() {
subscriptions.assignFromUser(Collections.singleton(t1p));
// should mark coordinator unknown after COORDINATOR_NOT_AVAILABLE error
client.prepareResponse(groupCoordinatorResponse(node, Errors.COORDINATOR_NOT_AVAILABLE));
// set timeout to 0 because we don't want to retry after the error
coordinator.poll(time.timer(0));
assertTrue(coordinator.coordinatorUnknown());
// should not try to find coordinator since we are in manual assignment
// hence the prepared response should not be returned
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertTrue(coordinator.coordinatorUnknown());
}
@Test
public void testAutoCommitAsyncWithUserAssignedType() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.assignFromUser(Collections.singleton(t1p));
// set timeout to 0 because we expect no requests sent
coordinator.poll(time.timer(0));
assertTrue(coordinator.coordinatorUnknown());
assertFalse(client.hasInFlightRequests());
// elapse auto commit interval and set committable position
time.sleep(autoCommitIntervalMs);
subscriptions.seekUnvalidated(t1p, new SubscriptionState.FetchPosition(100L));
// should try to find coordinator since we are auto committing
coordinator.poll(time.timer(0));
assertTrue(coordinator.coordinatorUnknown());
assertTrue(client.hasInFlightRequests());
client.respond(groupCoordinatorResponse(node, Errors.NONE));
coordinator.poll(time.timer(0));
assertFalse(coordinator.coordinatorUnknown());
// after we've discovered the coordinator we should send
// out the commit request immediately
assertTrue(client.hasInFlightRequests());
}
}
@Test
public void testCommitAsyncWithUserAssignedType() {
subscriptions.assignFromUser(Collections.singleton(t1p));
// set timeout to 0 because we expect no requests sent
coordinator.poll(time.timer(0));
assertTrue(coordinator.coordinatorUnknown());
assertFalse(client.hasInFlightRequests());
// should try to find coordinator since we are commit async
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), (offsets, exception) ->
fail("Commit should not get responses, but got offsets:" + offsets + ", and exception:" + exception)
);
coordinator.poll(time.timer(0));
assertTrue(coordinator.coordinatorUnknown());
assertTrue(client.hasInFlightRequests());
assertEquals(0, coordinator.inFlightAsyncCommits.get());
client.respond(groupCoordinatorResponse(node, Errors.NONE));
coordinator.poll(time.timer(0));
assertFalse(coordinator.coordinatorUnknown());
// after we've discovered the coordinator we should send
// out the commit request immediately
assertTrue(client.hasInFlightRequests());
assertEquals(1, coordinator.inFlightAsyncCommits.get());
}
@Test
public void testCoordinatorNotAvailable() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// COORDINATOR_NOT_AVAILABLE will mark coordinator as unknown
time.sleep(sessionTimeoutMs);
RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); // should send out the heartbeat
assertEquals(1, consumerClient.pendingRequestCount());
assertFalse(future.isDone());
client.prepareResponse(heartbeatResponse(Errors.COORDINATOR_NOT_AVAILABLE));
time.sleep(sessionTimeoutMs);
consumerClient.poll(time.timer(0));
assertTrue(future.isDone());
assertTrue(future.failed());
assertEquals(Errors.COORDINATOR_NOT_AVAILABLE.exception(), future.exception());
assertTrue(coordinator.coordinatorUnknown());
}
@Test
public void testEnsureCompletingAsyncCommitsWhenSyncCommitWithoutOffsets() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
TopicPartition tp = new TopicPartition("foo", 0);
Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(tp, new OffsetAndMetadata(123));
final AtomicBoolean committed = new AtomicBoolean();
coordinator.commitOffsetsAsync(offsets, (committedOffsets, exception) -> committed.set(true));
assertFalse(coordinator.commitOffsetsSync(Collections.emptyMap(), time.timer(100L)), "expected sync commit to fail");
assertFalse(committed.get());
assertEquals(1, coordinator.inFlightAsyncCommits.get());
prepareOffsetCommitRequest(singletonMap(tp, 123L), Errors.NONE);
assertTrue(coordinator.commitOffsetsSync(Collections.emptyMap(), time.timer(Long.MAX_VALUE)), "expected sync commit to succeed");
assertTrue(committed.get(), "expected commit callback to be invoked");
assertEquals(0, coordinator.inFlightAsyncCommits.get());
}
@Test
public void testManyInFlightAsyncCommitsWithCoordinatorDisconnect() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
int numRequests = 1000;
TopicPartition tp = new TopicPartition("foo", 0);
final AtomicInteger responses = new AtomicInteger(0);
for (int i = 0; i < numRequests; i++) {
Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(tp, new OffsetAndMetadata(i));
coordinator.commitOffsetsAsync(offsets, (offsets1, exception) -> {
responses.incrementAndGet();
Throwable cause = exception.getCause();
assertInstanceOf(DisconnectException.class, cause,
"Unexpected exception cause type: " + (cause == null ? null : cause.getClass()));
});
}
assertEquals(numRequests, coordinator.inFlightAsyncCommits.get());
coordinator.markCoordinatorUnknown("test cause");
consumerClient.pollNoWakeup();
coordinator.invokeCompletedOffsetCommitCallbacks();
assertEquals(numRequests, responses.get());
assertEquals(0, coordinator.inFlightAsyncCommits.get());
}
@Test
public void testCoordinatorUnknownInUnsentCallbacksAfterCoordinatorDead() {
// When the coordinator is marked dead, all unsent or in-flight requests are cancelled
// with a disconnect error. This test case ensures that the corresponding callbacks see
// the coordinator as unknown which prevents additional retries to the same coordinator.
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
final AtomicBoolean asyncCallbackInvoked = new AtomicBoolean(false);
OffsetCommitRequestData offsetCommitRequestData = new OffsetCommitRequestData()
.setGroupId(groupId)
.setTopics(Collections.singletonList(new
OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName("foo")
.setPartitions(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH)
.setCommittedMetadata("")
.setCommittedOffset(13L)
))
)
);
consumerClient.send(coordinator.checkAndGetCoordinator(), OffsetCommitRequest.Builder.forTopicNames(offsetCommitRequestData))
.compose(new RequestFutureAdapter<>() {
@Override
public void onSuccess(ClientResponse value, RequestFuture<Object> future) {}
@Override
public void onFailure(RuntimeException e, RequestFuture<Object> future) {
assertInstanceOf(DisconnectException.class, e, "Unexpected exception type: " + e.getClass());
assertTrue(coordinator.coordinatorUnknown());
asyncCallbackInvoked.set(true);
}
});
coordinator.markCoordinatorUnknown("test cause");
consumerClient.pollNoWakeup();
assertTrue(asyncCallbackInvoked.get());
assertEquals(0, coordinator.inFlightAsyncCommits.get());
}
@Test
public void testNotCoordinator() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// not_coordinator will mark coordinator as unknown
time.sleep(sessionTimeoutMs);
RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); // should send out the heartbeat
assertEquals(1, consumerClient.pendingRequestCount());
assertFalse(future.isDone());
client.prepareResponse(heartbeatResponse(Errors.NOT_COORDINATOR));
time.sleep(sessionTimeoutMs);
consumerClient.poll(time.timer(0));
assertTrue(future.isDone());
assertTrue(future.failed());
assertEquals(Errors.NOT_COORDINATOR.exception(), future.exception());
assertTrue(coordinator.coordinatorUnknown());
}
@Test
public void testIllegalGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// illegal_generation will cause re-partition
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
subscriptions.assignFromSubscribed(Collections.singletonList(t1p));
time.sleep(sessionTimeoutMs);
RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); // should send out the heartbeat
assertEquals(1, consumerClient.pendingRequestCount());
assertFalse(future.isDone());
client.prepareResponse(heartbeatResponse(Errors.ILLEGAL_GENERATION));
time.sleep(sessionTimeoutMs);
consumerClient.poll(time.timer(0));
assertTrue(future.isDone());
assertTrue(future.failed());
assertEquals(Errors.ILLEGAL_GENERATION.exception(), future.exception());
assertTrue(coordinator.rejoinNeededOrPending());
coordinator.poll(time.timer(0));
assertEquals(1, rebalanceListener.lostCount);
assertEquals(Collections.singleton(t1p), rebalanceListener.lost);
}
@Test
public void testUnsubscribeWithValidGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
ByteBuffer buffer = ConsumerProtocol.serializeAssignment(
new ConsumerPartitionAssignor.Assignment(Collections.singletonList(t1p), ByteBuffer.wrap(new byte[0])));
coordinator.onJoinComplete(1, "memberId", partitionAssignor.name(), buffer);
coordinator.onLeavePrepare();
assertEquals(1, rebalanceListener.lostCount);
assertEquals(0, rebalanceListener.revokedCount);
}
@Test
public void testRevokeExceptionThrownFirstNonBlockingSubCallbacks() {
MockRebalanceListener throwOnRevokeListener = new MockRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
super.onPartitionsRevoked(partitions);
throw new KafkaException("Kaboom on revoke!");
}
};
if (protocol == COOPERATIVE) {
verifyOnCallbackExceptions(throwOnRevokeListener,
throwOnAssignmentAssignor.name(), "Kaboom on revoke!", null);
} else {
// Eager protocol doesn't revoke partitions.
verifyOnCallbackExceptions(throwOnRevokeListener,
throwOnAssignmentAssignor.name(), "Kaboom for assignment!", null);
}
}
@Test
public void testOnAssignmentExceptionThrownFirstNonBlockingSubCallbacks() {
MockRebalanceListener throwOnAssignListener = new MockRebalanceListener() {
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
super.onPartitionsAssigned(partitions);
throw new KafkaException("Kaboom on partition assign!");
}
};
verifyOnCallbackExceptions(throwOnAssignListener,
throwOnAssignmentAssignor.name(), "Kaboom for assignment!", null);
}
@Test
public void testOnPartitionsAssignExceptionThrownWhenNoPreviousThrownCallbacks() {
MockRebalanceListener throwOnAssignListener = new MockRebalanceListener() {
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
super.onPartitionsAssigned(partitions);
throw new KafkaException("Kaboom on partition assign!");
}
};
verifyOnCallbackExceptions(throwOnAssignListener,
partitionAssignor.name(), "Kaboom on partition assign!", null);
}
@Test
public void testOnRevokeExceptionShouldBeRenderedIfNotKafkaException() {
MockRebalanceListener throwOnRevokeListener = new MockRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
super.onPartitionsRevoked(partitions);
throw new IllegalStateException("Illegal state on partition revoke!");
}
};
if (protocol == COOPERATIVE) {
verifyOnCallbackExceptions(throwOnRevokeListener,
throwOnAssignmentAssignor.name(),
"User rebalance callback throws an error", "Illegal state on partition revoke!");
} else {
// Eager protocol doesn't revoke partitions.
verifyOnCallbackExceptions(throwOnRevokeListener,
throwOnAssignmentAssignor.name(), "Kaboom for assignment!", null);
}
}
@Test
public void testOnAssignmentExceptionShouldBeRenderedIfNotKafkaException() {
MockRebalanceListener throwOnAssignListener = new MockRebalanceListener() {
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
super.onPartitionsAssigned(partitions);
throw new KafkaException("Kaboom on partition assign!");
}
};
verifyOnCallbackExceptions(throwOnAssignListener,
throwFatalErrorOnAssignmentAssignor.name(),
"User rebalance callback throws an error", "Illegal state for assignment!");
}
@Test
public void testOnPartitionsAssignExceptionShouldBeRenderedIfNotKafkaException() {
MockRebalanceListener throwOnAssignListener = new MockRebalanceListener() {
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
super.onPartitionsAssigned(partitions);
throw new IllegalStateException("Illegal state on partition assign!");
}
};
verifyOnCallbackExceptions(throwOnAssignListener,
partitionAssignor.name(), "User rebalance callback throws an error",
"Illegal state on partition assign!");
}
private void verifyOnCallbackExceptions(final MockRebalanceListener rebalanceListener,
final String assignorName,
final String exceptionMessage,
final String causeMessage) {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
ByteBuffer buffer = ConsumerProtocol.serializeAssignment(
new ConsumerPartitionAssignor.Assignment(Collections.singletonList(t1p), ByteBuffer.wrap(new byte[0])));
subscriptions.assignFromSubscribed(singleton(t2p));
if (exceptionMessage != null) {
final Exception exception = assertThrows(KafkaException.class,
() -> coordinator.onJoinComplete(1, "memberId", assignorName, buffer));
assertEquals(exceptionMessage, exception.getMessage());
if (causeMessage != null) {
assertEquals(causeMessage, exception.getCause().getMessage());
}
}
// Eager doesn't trigger on partition revoke.
assertEquals(protocol == COOPERATIVE ? 1 : 0, rebalanceListener.revokedCount);
assertEquals(0, rebalanceListener.lostCount);
assertEquals(1, rebalanceListener.assignedCount);
assertTrue(assignorMap.containsKey(assignorName), "Unknown assignor name: " + assignorName);
assertEquals(1, assignorMap.get(assignorName).numAssignment());
}
@Test
public void testUnsubscribeWithInvalidGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
subscriptions.assignFromSubscribed(Collections.singletonList(t1p));
coordinator.onLeavePrepare();
assertEquals(1, rebalanceListener.lostCount);
assertEquals(0, rebalanceListener.revokedCount);
}
@Test
public void testUnknownMemberId() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// illegal_generation will cause re-partition
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
subscriptions.assignFromSubscribed(Collections.singletonList(t1p));
time.sleep(sessionTimeoutMs);
RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); // should send out the heartbeat
assertEquals(1, consumerClient.pendingRequestCount());
assertFalse(future.isDone());
client.prepareResponse(heartbeatResponse(Errors.UNKNOWN_MEMBER_ID));
time.sleep(sessionTimeoutMs);
consumerClient.poll(time.timer(0));
assertTrue(future.isDone());
assertTrue(future.failed());
assertEquals(Errors.UNKNOWN_MEMBER_ID.exception(), future.exception());
assertTrue(coordinator.rejoinNeededOrPending());
coordinator.poll(time.timer(0));
assertEquals(1, rebalanceListener.lostCount);
assertEquals(Collections.singleton(t1p), rebalanceListener.lost);
}
@Test
public void testCoordinatorDisconnect() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// coordinator disconnect will mark coordinator as unknown
time.sleep(sessionTimeoutMs);
RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); // should send out the heartbeat
assertEquals(1, consumerClient.pendingRequestCount());
assertFalse(future.isDone());
client.prepareResponse(heartbeatResponse(Errors.NONE), true); // return disconnected
time.sleep(sessionTimeoutMs);
consumerClient.poll(time.timer(0));
assertTrue(future.isDone());
assertTrue(future.failed());
assertInstanceOf(DisconnectException.class, future.exception());
assertTrue(coordinator.coordinatorUnknown());
}
@Test
public void testJoinGroupInvalidGroupId() {
final String consumerId = "leader";
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
// ensure metadata is up-to-date for leader
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
client.prepareResponse(joinGroupLeaderResponse(0, consumerId, Collections.emptyMap(),
Errors.INVALID_GROUP_ID));
assertThrows(ApiException.class, () -> coordinator.poll(time.timer(Long.MAX_VALUE)));
}
@Test
public void testNormalJoinGroupLeader() {
final String consumerId = "leader";
final Set<String> subscription = singleton(topic1);
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = singletonList(t1p);
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
// ensure metadata is up-to-date for leader
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, assigned));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(assigned, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.copyOf(assigned), subscriptions.assignedPartitions());
assertEquals(subscription, subscriptions.metadataTopics());
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
@Test
public void testOutdatedCoordinatorAssignment() {
createMockHeartbeatThreadCoordinator();
final String consumerId = "outdated_assignment";
final List<TopicPartition> owned = Collections.emptyList();
final List<String> oldSubscription = singletonList(topic2);
final List<TopicPartition> oldAssignment = singletonList(t2p);
final List<String> newSubscription = singletonList(topic1);
final List<TopicPartition> newAssignment = singletonList(t1p);
subscriptions.subscribe(Set.copyOf(oldSubscription), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// Test coordinator returning unsubscribed partitions
partitionAssignor.prepare(singletonMap(consumerId, newAssignment));
// First incorrect assignment for subscription
client.prepareResponse(
joinGroupLeaderResponse(
1, consumerId, singletonMap(consumerId, oldSubscription), Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(oldAssignment, Errors.NONE));
// Second correct assignment for subscription
client.prepareResponse(
joinGroupLeaderResponse(
1, consumerId, singletonMap(consumerId, newSubscription), Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(newAssignment, Errors.NONE));
// Poll once so that the join group future gets created and complete
coordinator.poll(time.timer(0));
// Before the sync group response gets completed change the subscription
subscriptions.subscribe(Set.copyOf(newSubscription), Optional.of(rebalanceListener));
coordinator.poll(time.timer(0));
coordinator.poll(time.timer(Long.MAX_VALUE));
final Collection<TopicPartition> assigned = getAdded(owned, newAssignment);
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.copyOf(newAssignment), subscriptions.assignedPartitions());
assertEquals(Set.copyOf(newSubscription), subscriptions.metadataTopics());
assertEquals(protocol == EAGER ? 1 : 0, rebalanceListener.revokedCount);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(assigned, rebalanceListener.assigned);
}
@Test
public void testMetadataTopicsDuringSubscriptionChange() {
final String consumerId = "subscription_change";
final List<String> oldSubscription = singletonList(topic1);
final List<TopicPartition> oldAssignment = Collections.singletonList(t1p);
final List<String> newSubscription = singletonList(topic2);
final List<TopicPartition> newAssignment = Collections.singletonList(t2p);
subscriptions.subscribe(Set.copyOf(oldSubscription), Optional.of(rebalanceListener));
assertEquals(Set.copyOf(oldSubscription), subscriptions.metadataTopics());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareJoinAndSyncResponse(consumerId, 1, oldSubscription, oldAssignment);
coordinator.poll(time.timer(0));
assertEquals(Set.copyOf(oldSubscription), subscriptions.metadataTopics());
subscriptions.subscribe(Set.copyOf(newSubscription), Optional.of(rebalanceListener));
assertEquals(Set.of(topic1, topic2), subscriptions.metadataTopics());
prepareJoinAndSyncResponse(consumerId, 2, newSubscription, newAssignment);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.copyOf(newAssignment), subscriptions.assignedPartitions());
assertEquals(Set.copyOf(newSubscription), subscriptions.metadataTopics());
}
@Test
public void testPatternJoinGroupLeader() {
final String consumerId = "leader";
final List<TopicPartition> assigned = Arrays.asList(t1p, t2p);
final List<TopicPartition> owned = Collections.emptyList();
subscriptions.subscribe(Pattern.compile("test.*"), Optional.of(rebalanceListener));
// partially update the metadata with one topic first,
// let the leader to refresh metadata during assignment
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, assigned));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(assigned, Errors.NONE));
// expect client to force updating the metadata, if yes gives it both topics
client.prepareMetadataUpdate(metadataResponse);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(2, subscriptions.numAssignedPartitions());
assertEquals(2, subscriptions.metadataTopics().size());
assertEquals(2, subscriptions.subscription().size());
// callback not triggered at all since there's nothing to be revoked
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
@Test
public void testMetadataRefreshDuringRebalance() {
final String consumerId = "leader";
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> oldAssigned = singletonList(t1p);
subscriptions.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener));
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
coordinator.maybeUpdateSubscriptionMetadata();
assertEquals(singleton(topic1), subscriptions.subscription());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
Map<String, List<String>> initialSubscription = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, oldAssigned));
// the metadata will be updated in flight with a new topic added
final List<String> updatedSubscription = Arrays.asList(topic1, topic2);
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, initialSubscription, Errors.NONE));
client.prepareResponse(body -> {
final Map<String, Integer> updatedPartitions = new HashMap<>();
for (String topic : updatedSubscription)
updatedPartitions.put(topic, 1);
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, updatedPartitions));
return true;
}, syncGroupResponse(oldAssigned, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
// rejoin will only be set in the next poll call
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(singleton(topic1), subscriptions.subscription());
assertEquals(Set.copyOf(oldAssigned), subscriptions.assignedPartitions());
// nothing to be revoked and hence no callback triggered
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, oldAssigned), rebalanceListener.assigned);
List<TopicPartition> newAssigned = Arrays.asList(t1p, t2p);
final Map<String, List<String>> updatedSubscriptions = singletonMap(consumerId, Arrays.asList(topic1, topic2));
partitionAssignor.prepare(singletonMap(consumerId, newAssigned));
// we expect to see a second rebalance with the new-found topics
client.prepareResponse(body -> {
JoinGroupRequest join = (JoinGroupRequest) body;
Iterator<JoinGroupRequestData.JoinGroupRequestProtocol> protocolIterator =
join.data().protocols().iterator();
assertTrue(protocolIterator.hasNext());
JoinGroupRequestData.JoinGroupRequestProtocol protocolMetadata = protocolIterator.next();
ByteBuffer metadata = ByteBuffer.wrap(protocolMetadata.metadata());
ConsumerPartitionAssignor.Subscription subscription = ConsumerProtocol.deserializeSubscription(metadata);
metadata.rewind();
return subscription.topics().containsAll(updatedSubscription);
}, joinGroupLeaderResponse(2, consumerId, updatedSubscriptions, Errors.NONE));
// update the metadata again back to topic1
client.prepareResponse(body -> {
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
return true;
}, syncGroupResponse(newAssigned, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
Collection<TopicPartition> revoked = getRevoked(oldAssigned, newAssigned);
int revokedCount = revoked.isEmpty() ? 0 : 1;
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.copyOf(updatedSubscription), subscriptions.subscription());
assertEquals(Set.copyOf(newAssigned), subscriptions.assignedPartitions());
assertEquals(revokedCount, rebalanceListener.revokedCount);
assertEquals(revoked.isEmpty() ? null : revoked, rebalanceListener.revoked);
assertEquals(2, rebalanceListener.assignedCount);
assertEquals(getAdded(oldAssigned, newAssigned), rebalanceListener.assigned);
// we expect to see a third rebalance with the new-found topics
partitionAssignor.prepare(singletonMap(consumerId, oldAssigned));
client.prepareResponse(body -> {
JoinGroupRequest join = (JoinGroupRequest) body;
Iterator<JoinGroupRequestData.JoinGroupRequestProtocol> protocolIterator =
join.data().protocols().iterator();
assertTrue(protocolIterator.hasNext());
JoinGroupRequestData.JoinGroupRequestProtocol protocolMetadata = protocolIterator.next();
ByteBuffer metadata = ByteBuffer.wrap(protocolMetadata.metadata());
ConsumerPartitionAssignor.Subscription subscription = ConsumerProtocol.deserializeSubscription(metadata);
metadata.rewind();
return subscription.topics().contains(topic1);
}, joinGroupLeaderResponse(3, consumerId, initialSubscription, Errors.NONE));
client.prepareResponse(syncGroupResponse(oldAssigned, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
revoked = getRevoked(newAssigned, oldAssigned);
assertFalse(revoked.isEmpty());
revokedCount += 1;
Collection<TopicPartition> added = getAdded(newAssigned, oldAssigned);
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(singleton(topic1), subscriptions.subscription());
assertEquals(Set.copyOf(oldAssigned), subscriptions.assignedPartitions());
assertEquals(revokedCount, rebalanceListener.revokedCount);
assertEquals(revoked.isEmpty() ? null : revoked, rebalanceListener.revoked);
assertEquals(3, rebalanceListener.assignedCount);
assertEquals(added, rebalanceListener.assigned);
assertEquals(0, rebalanceListener.lostCount);
}
@Test
public void testForceMetadataRefreshForPatternSubscriptionDuringRebalance() {
// Set up a non-leader consumer with pattern subscription and a cluster containing one topic matching the
// pattern.
subscriptions.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener));
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
coordinator.maybeUpdateSubscriptionMetadata();
assertEquals(singleton(topic1), subscriptions.subscription());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// Instrument the test so that metadata will contain two topics after next refresh.
client.prepareMetadataUpdate(metadataResponse);
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().isEmpty();
}, syncGroupResponse(singletonList(t1p), Errors.NONE));
partitionAssignor.prepare(singletonMap(consumerId, singletonList(t1p)));
// This will trigger rebalance.
coordinator.poll(time.timer(Long.MAX_VALUE));
// Make sure that the metadata was refreshed during the rebalance and thus subscriptions now contain two topics.
final Set<String> updatedSubscriptionSet = Set.of(topic1, topic2);
assertEquals(updatedSubscriptionSet, subscriptions.subscription());
// Refresh the metadata again. Since there have been no changes since the last refresh, it won't trigger
// rebalance again.
metadata.requestUpdate(true);
consumerClient.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
}
@Test
public void testForceMetadataDeleteForPatternSubscriptionDuringRebalance() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.subscribe(Pattern.compile("test.*"), Optional.of(rebalanceListener));
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, new HashMap<>() {
{
put(topic1, 1);
put(topic2, 1);
}
}));
coordinator.maybeUpdateSubscriptionMetadata();
assertEquals(Set.of(topic1, topic2), subscriptions.subscription());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
MetadataResponse deletedMetadataResponse = RequestTestUtils.metadataUpdateWith(1, Map.of(topic1, 1));
// Instrument the test so that metadata will contain only one topic after next refresh.
client.prepareMetadataUpdate(deletedMetadataResponse);
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().isEmpty();
}, syncGroupResponse(singletonList(t1p), Errors.NONE));
partitionAssignor.prepare(singletonMap(consumerId, singletonList(t1p)));
// This will trigger rebalance.
coordinator.poll(time.timer(Long.MAX_VALUE));
// Make sure that the metadata was refreshed during the rebalance and thus subscriptions now contain only one topic.
assertEquals(singleton(topic1), subscriptions.subscription());
// Refresh the metadata again. Since there have been no changes since the last refresh, it won't trigger
// rebalance again.
metadata.requestUpdate(true);
consumerClient.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
}
}
@Test
public void testOnJoinPrepareWithOffsetCommitShouldSuccessAfterRetry() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.empty(), false)) {
int generationId = 42;
String memberId = "consumer-42";
Timer pollTimer = time.timer(100L);
client.prepareResponse(offsetCommitResponse(singletonMap(t1p, Errors.UNKNOWN_TOPIC_OR_PARTITION)));
boolean res = coordinator.onJoinPrepare(pollTimer, generationId, memberId);
assertFalse(res);
pollTimer = time.timer(100L);
client.prepareResponse(offsetCommitResponse(singletonMap(t1p, Errors.NONE)));
res = coordinator.onJoinPrepare(pollTimer, generationId, memberId);
assertTrue(res);
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
assertFalse(coordinator.coordinatorUnknown());
}
}
@Test
public void testOnJoinPrepareWithOffsetCommitShouldKeepJoinAfterNonRetryableException() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.empty(), false)) {
int generationId = 42;
String memberId = "consumer-42";
Timer pollTimer = time.timer(100L);
client.prepareResponse(offsetCommitResponse(singletonMap(t1p, Errors.UNKNOWN_MEMBER_ID)));
boolean res = coordinator.onJoinPrepare(pollTimer, generationId, memberId);
assertTrue(res);
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
assertFalse(coordinator.coordinatorUnknown());
}
}
@Test
public void testOnJoinPrepareWithOffsetCommitShouldKeepJoinAfterRebalanceTimeout() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.empty(), false)) {
int generationId = 42;
String memberId = "consumer-42";
Timer pollTimer = time.timer(0L);
boolean res = coordinator.onJoinPrepare(pollTimer, generationId, memberId);
assertFalse(res);
pollTimer = time.timer(100L);
time.sleep(rebalanceTimeoutMs);
client.respond(offsetCommitResponse(singletonMap(t1p, Errors.UNKNOWN_TOPIC_OR_PARTITION)));
res = coordinator.onJoinPrepare(pollTimer, generationId, memberId);
assertTrue(res);
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
assertFalse(coordinator.coordinatorUnknown());
}
}
@Test
public void testJoinPrepareWithDisableAutoCommit() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"), true)) {
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
int generationId = 42;
String memberId = "consumer-42";
boolean res = coordinator.onJoinPrepare(time.timer(0L), generationId, memberId);
assertTrue(res);
assertTrue(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
assertFalse(coordinator.coordinatorUnknown());
}
}
@Test
public void testJoinPrepareAndCommitCompleted() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.of("group-id"), true)) {
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
int generationId = 42;
String memberId = "consumer-42";
boolean res = coordinator.onJoinPrepare(time.timer(0L), generationId, memberId);
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(res);
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
assertFalse(coordinator.coordinatorUnknown());
}
}
@Test
public void testJoinPrepareAndCommitWithCoordinatorNotAvailable() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.of("group-id"), true)) {
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE);
int generationId = 42;
String memberId = "consumer-42";
boolean res = coordinator.onJoinPrepare(time.timer(0L), generationId, memberId);
coordinator.invokeCompletedOffsetCommitCallbacks();
assertFalse(res);
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
assertTrue(coordinator.coordinatorUnknown());
}
}
@Test
public void testJoinPrepareAndCommitWithUnknownMemberId() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.of("group-id"), true)) {
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.UNKNOWN_MEMBER_ID);
int generationId = 42;
String memberId = "consumer-42";
boolean res = coordinator.onJoinPrepare(time.timer(0L), generationId, memberId);
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(res);
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
assertFalse(coordinator.coordinatorUnknown());
}
}
/**
* Verifies that the consumer re-joins after a metadata change. If JoinGroup fails
* and metadata reverts to its original value, the consumer should still retry JoinGroup.
*/
@Test
public void testRebalanceWithMetadataChange() {
MetadataResponse metadataResponse1 = RequestTestUtils.metadataUpdateWith(1,
Utils.mkMap(Utils.mkEntry(topic1, 1), Utils.mkEntry(topic2, 1)));
MetadataResponse metadataResponse2 = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1));
verifyRebalanceWithMetadataChange(Optional.empty(), partitionAssignor, metadataResponse1, metadataResponse2, true);
}
@Test
public void testRackAwareConsumerRebalanceWithDifferentRacks() {
verifyRackAwareConsumerRebalance(
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
Arrays.asList(Arrays.asList(0, 2), Arrays.asList(1, 2), Arrays.asList(2, 0)),
true, true);
}
@Test
public void testNonRackAwareConsumerRebalanceWithDifferentRacks() {
verifyRackAwareConsumerRebalance(
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
Arrays.asList(Arrays.asList(0, 2), Arrays.asList(1, 2), Arrays.asList(2, 0)),
false, false);
}
@Test
public void testRackAwareConsumerRebalanceWithAdditionalRacks() {
verifyRackAwareConsumerRebalance(
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
Arrays.asList(Arrays.asList(0, 1, 2), Arrays.asList(1, 2), Arrays.asList(2, 0)),
true, true);
}
@Test
public void testRackAwareConsumerRebalanceWithLessRacks() {
verifyRackAwareConsumerRebalance(
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Collections.singletonList(2)),
true, true);
}
@Test
public void testRackAwareConsumerRebalanceWithNewPartitions() {
verifyRackAwareConsumerRebalance(
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0), Arrays.asList(0, 1)),
true, true);
}
@Test
public void testRackAwareConsumerRebalanceWithNoMetadataChange() {
verifyRackAwareConsumerRebalance(
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
true, false);
}
@Test
public void testRackAwareConsumerRebalanceWithNoRackChange() {
verifyRackAwareConsumerRebalance(
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
Arrays.asList(Arrays.asList(3, 4), Arrays.asList(4, 5), Arrays.asList(5, 3)),
true, false);
}
@Test
public void testRackAwareConsumerRebalanceWithNewReplicasOnSameRacks() {
verifyRackAwareConsumerRebalance(
Arrays.asList(Arrays.asList(0, 1), Arrays.asList(1, 2), Arrays.asList(2, 0)),
Arrays.asList(Arrays.asList(0, 1, 3), Arrays.asList(1, 2, 5), Arrays.asList(2, 0, 3)),
true, false);
}
private void verifyRackAwareConsumerRebalance(List<List<Integer>> partitionReplicas1,
List<List<Integer>> partitionReplicas2,
boolean rackAwareConsumer,
boolean expectRebalance) {
List<String> racks = Arrays.asList("rack-a", "rack-b", "rack-c");
MockPartitionAssignor assignor = partitionAssignor;
String consumerRackId = null;
if (rackAwareConsumer) {
consumerRackId = racks.get(0);
assignor = new RackAwareAssignor(protocol);
createRackAwareCoordinator(consumerRackId, assignor);
}
MetadataResponse metadataResponse1 = rackAwareMetadata(6, racks, Collections.singletonMap(topic1, partitionReplicas1));
MetadataResponse metadataResponse2 = rackAwareMetadata(6, racks, Collections.singletonMap(topic1, partitionReplicas2));
verifyRebalanceWithMetadataChange(Optional.ofNullable(consumerRackId), assignor, metadataResponse1, metadataResponse2, expectRebalance);
}
private void verifyRebalanceWithMetadataChange(Optional<String> rackId,
MockPartitionAssignor partitionAssignor,
MetadataResponse metadataResponse1,
MetadataResponse metadataResponse2,
boolean expectRebalance) {
final String consumerId = "leader";
final List<String> topics = Arrays.asList(topic1, topic2);
final List<TopicPartition> partitions = metadataResponse1.topicMetadata().stream()
.flatMap(t -> t.partitionMetadata().stream().map(p -> new TopicPartition(t.topic(), p.partition())))
.collect(Collectors.toList());
subscriptions.subscribe(Set.copyOf(topics), Optional.of(rebalanceListener));
client.updateMetadata(metadataResponse1);
coordinator.maybeUpdateSubscriptionMetadata();
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
Map<String, List<String>> initialSubscription = singletonMap(consumerId, topics);
partitionAssignor.prepare(singletonMap(consumerId, partitions));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, initialSubscription, false, Errors.NONE, rackId));
client.prepareResponse(syncGroupResponse(partitions, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
// rejoin will only be set in the next poll call
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.copyOf(topics), subscriptions.subscription());
assertEquals(Set.copyOf(partitions), subscriptions.assignedPartitions());
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
// Change metadata to trigger rebalance.
client.updateMetadata(metadataResponse2);
coordinator.poll(time.timer(0));
if (!expectRebalance) {
assertEquals(0, client.requests().size());
return;
}
assertEquals(1, client.requests().size());
// Revert metadata to original value. Fail pending JoinGroup. Another
// JoinGroup should be sent, which will be completed successfully.
client.updateMetadata(metadataResponse1);
client.respond(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NOT_COORDINATOR));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
assertFalse(client.hasInFlightRequests());
coordinator.poll(time.timer(1));
assertTrue(coordinator.rejoinNeededOrPending());
client.respond(request -> {
if (!(request instanceof JoinGroupRequest)) {
return false;
} else {
JoinGroupRequest joinRequest = (JoinGroupRequest) request;
return consumerId.equals(joinRequest.data().memberId());
}
}, joinGroupLeaderResponse(2, consumerId, initialSubscription, false, Errors.NONE, rackId));
client.prepareResponse(syncGroupResponse(partitions, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
Collection<TopicPartition> revoked = getRevoked(partitions, partitions);
assertEquals(revoked.isEmpty() ? 0 : 1, rebalanceListener.revokedCount);
assertEquals(revoked.isEmpty() ? null : revoked, rebalanceListener.revoked);
// No partitions have been lost since the rebalance failure was not fatal
assertEquals(0, rebalanceListener.lostCount);
assertNull(rebalanceListener.lost);
Collection<TopicPartition> added = getAdded(partitions, partitions);
assertEquals(2, rebalanceListener.assignedCount);
assertEquals(added.isEmpty() ? Collections.emptySet() : Set.copyOf(partitions), rebalanceListener.assigned);
assertEquals(Set.copyOf(partitions), subscriptions.assignedPartitions());
}
@Test
public void testWakeupDuringJoin() {
final String consumerId = "leader";
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = singletonList(t1p);
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
// ensure metadata is up-to-date for leader
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, assigned));
// prepare only the first half of the join and then trigger the wakeup
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
consumerClient.wakeup();
try {
coordinator.poll(time.timer(Long.MAX_VALUE));
} catch (WakeupException e) {
// ignore
}
// now complete the second half
client.prepareResponse(syncGroupResponse(assigned, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.copyOf(assigned), subscriptions.assignedPartitions());
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
@Test
public void testNormalJoinGroupFollower() {
final Set<String> subscription = singleton(topic1);
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = singletonList(t1p);
subscriptions.subscribe(subscription, Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().isEmpty();
}, syncGroupResponse(assigned, Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.copyOf(assigned), subscriptions.assignedPartitions());
assertEquals(subscription, subscriptions.metadataTopics());
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
@Test
public void testUpdateLastHeartbeatPollWhenCoordinatorUnknown() throws Exception {
// If we are part of an active group and we cannot find the coordinator, we should nevertheless
// continue to update the last poll time so that we do not expire the consumer
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// Join the group, but signal a coordinator change after the first heartbeat
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
client.prepareResponse(heartbeatResponse(Errors.NOT_COORDINATOR));
coordinator.poll(time.timer(Long.MAX_VALUE));
time.sleep(heartbeatIntervalMs);
// Await the first heartbeat which forces us to find a new coordinator
TestUtils.waitForCondition(() -> !client.hasPendingResponses(),
"Failed to observe expected heartbeat from background thread");
assertTrue(coordinator.coordinatorUnknown());
assertFalse(coordinator.poll(time.timer(0)));
assertEquals(time.milliseconds(), coordinator.heartbeat().lastPollTime());
time.sleep(rebalanceTimeoutMs - 1);
assertFalse(coordinator.heartbeat().pollTimeoutExpired(time.milliseconds()));
}
@Test
public void testPatternJoinGroupFollower() {
final Set<String> subscription = Set.of(topic1, topic2);
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = Arrays.asList(t1p, t2p);
subscriptions.subscribe(Pattern.compile("test.*"), Optional.of(rebalanceListener));
// partially update the metadata with one topic first,
// let the leader to refresh metadata during assignment
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().isEmpty();
}, syncGroupResponse(assigned, Errors.NONE));
// expect client to force updating the metadata, if yes gives it both topics
client.prepareMetadataUpdate(metadataResponse);
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(assigned.size(), subscriptions.numAssignedPartitions());
assertEquals(subscription, subscriptions.subscription());
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
@Test
public void testLeaveGroupOnClose() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
joinAsFollowerAndReceiveAssignment(coordinator, singletonList(t1p));
final AtomicBoolean received = new AtomicBoolean(false);
client.prepareResponse(body -> {
received.set(true);
LeaveGroupRequest leaveRequest = (LeaveGroupRequest) body;
return validateLeaveGroup(groupId, consumerId, leaveRequest);
}, new LeaveGroupResponse(
new LeaveGroupResponseData().setErrorCode(Errors.NONE.code())));
coordinator.close(time.timer(0), CloseOptions.GroupMembershipOperation.DEFAULT);
assertTrue(received.get());
}
@Test
public void testMaybeLeaveGroup() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
joinAsFollowerAndReceiveAssignment(coordinator, singletonList(t1p));
final AtomicBoolean received = new AtomicBoolean(false);
client.prepareResponse(body -> {
received.set(true);
LeaveGroupRequest leaveRequest = (LeaveGroupRequest) body;
return validateLeaveGroup(groupId, consumerId, leaveRequest);
}, new LeaveGroupResponse(new LeaveGroupResponseData().setErrorCode(Errors.NONE.code())));
coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "test maybe leave group");
assertTrue(received.get());
AbstractCoordinator.Generation generation = coordinator.generationIfStable();
assertNull(generation);
}
private boolean validateLeaveGroup(String groupId,
String consumerId,
LeaveGroupRequest leaveRequest) {
List<MemberIdentity> members = leaveRequest.data().members();
return leaveRequest.data().groupId().equals(groupId) &&
members.size() == 1 &&
members.get(0).memberId().equals(consumerId);
}
/**
* This test checks if a consumer that has a valid member ID but an invalid generation
* ({@link org.apache.kafka.clients.consumer.internals.AbstractCoordinator.Generation#NO_GENERATION})
* can still execute a leave group request. Such a situation may arise when a consumer has initiated a JoinGroup
* request without a memberId, but is shutdown or restarted before it has a chance to initiate and complete the
* second request.
*/
@Test
public void testPendingMemberShouldLeaveGroup() {
final String consumerId = "consumer-id";
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// here we return a DEFAULT_GENERATION_ID, but valid member id and leader id.
client.prepareResponse(joinGroupFollowerResponse(-1, consumerId, "leader-id", Errors.MEMBER_ID_REQUIRED));
// execute join group
coordinator.joinGroupIfNeeded(time.timer(0));
final AtomicBoolean received = new AtomicBoolean(false);
client.prepareResponse(body -> {
received.set(true);
LeaveGroupRequest leaveRequest = (LeaveGroupRequest) body;
return validateLeaveGroup(groupId, consumerId, leaveRequest);
}, new LeaveGroupResponse(new LeaveGroupResponseData().setErrorCode(Errors.NONE.code())));
coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "pending member leaves");
assertTrue(received.get());
}
@Test
public void testUnexpectedErrorOnSyncGroup() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// join initially, but let coordinator rebalance on sync
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.UNKNOWN_SERVER_ERROR));
assertThrows(KafkaException.class, () -> coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE)));
}
@Test
public void testUnknownMemberIdOnSyncGroup() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// join initially, but let coordinator returns unknown member id
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.UNKNOWN_MEMBER_ID));
// now we should see a new join with the empty UNKNOWN_MEMBER_ID
client.prepareResponse(body -> {
JoinGroupRequest joinRequest = (JoinGroupRequest) body;
return joinRequest.data().memberId().equals(JoinGroupRequest.UNKNOWN_MEMBER_ID);
}, joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(singleton(t1p), subscriptions.assignedPartitions());
}
@Test
public void testRebalanceInProgressOnSyncGroup() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// join initially, but let coordinator rebalance on sync
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.REBALANCE_IN_PROGRESS));
// then let the full join/sync finish successfully
client.prepareResponse(joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(singleton(t1p), subscriptions.assignedPartitions());
}
@Test
public void testIllegalGenerationOnSyncGroup() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// join initially, but let coordinator rebalance on sync
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.ILLEGAL_GENERATION));
// then let the full join/sync finish successfully
client.prepareResponse(body -> {
JoinGroupRequest joinRequest = (JoinGroupRequest) body;
// member ID should not be reset under ILLEGAL_GENERATION error
return joinRequest.data().memberId().equals(consumerId);
}, joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(singleton(t1p), subscriptions.assignedPartitions());
}
@Test
public void testMetadataChangeTriggersRebalance() {
// ensure metadata is up-to-date for leader
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, singletonList(t1p)));
// the leader is responsible for picking up metadata changes and forcing a group rebalance
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
// a new partition is added to the topic
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 2)), false, time.milliseconds());
coordinator.maybeUpdateSubscriptionMetadata();
// we should detect the change and ask for reassignment
assertTrue(coordinator.rejoinNeededOrPending());
}
@Test
public void testStaticLeaderRejoinsGroupAndCanTriggersRebalance() {
// ensure metadata is up-to-date for leader
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// the leader is responsible for picking up metadata changes and forcing a group rebalance.
// note that `MockPartitionAssignor.prepare` is not called therefore calling `MockPartitionAssignor.assign`
// will throw a IllegalStateException. this indirectly verifies that `assign` is correctly skipped.
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, true, Errors.NONE, Optional.empty()));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(singleton(topic1), coordinator.subscriptionState().metadataTopics());
// a new partition is added to the topic
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 2)), false, time.milliseconds());
coordinator.maybeUpdateSubscriptionMetadata();
// we should detect the change and ask for reassignment
assertTrue(coordinator.rejoinNeededOrPending());
}
@Test
public void testStaticLeaderRejoinsGroupAndCanDetectMetadataChangesForOtherMembers() {
// ensure metadata is up-to-date for leader
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// the leader is responsible for picking up metadata changes and forcing a group rebalance.
// note that `MockPartitionAssignor.prepare` is not called therefore calling `MockPartitionAssignor.assign`
// will throw a IllegalStateException. this indirectly verifies that `assign` is correctly skipped.
Map<String, List<String>> memberSubscriptions = mkMap(
mkEntry(consumerId, singletonList(topic1)),
mkEntry(consumerId2, singletonList(topic2))
);
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, true, Errors.NONE, Optional.empty()));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.of(topic1, topic2), coordinator.subscriptionState().metadataTopics());
// a new partition is added to the topic2 that only consumerId2 is subscribed to
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic2, 2)), false, time.milliseconds());
coordinator.maybeUpdateSubscriptionMetadata();
// we should detect the change and ask for reassignment
assertTrue(coordinator.rejoinNeededOrPending());
}
@Test
public void testUpdateMetadataDuringRebalance() {
final String topic1 = "topic1";
final String topic2 = "topic2";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
final String consumerId = "leader";
List<String> topics = Arrays.asList(topic1, topic2);
subscriptions.subscribe(new HashSet<>(topics), Optional.of(rebalanceListener));
// we only have metadata for one topic initially
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// prepare initial rebalance
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, topics);
partitionAssignor.prepare(singletonMap(consumerId, singletonList(tp1)));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
if (sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().containsKey(consumerId)) {
// trigger the metadata update including both topics after the sync group request has been sent
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put(topic1, 1);
topicPartitionCounts.put(topic2, 1);
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, topicPartitionCounts));
return true;
}
return false;
}, syncGroupResponse(Collections.singletonList(tp1), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
// the metadata update should trigger a second rebalance
client.prepareResponse(joinGroupLeaderResponse(2, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(syncGroupResponse(Arrays.asList(tp1, tp2), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.of(tp1, tp2), subscriptions.assignedPartitions());
}
/**
* Verifies that subscription change updates SubscriptionState correctly even after JoinGroup failures
* that don't re-invoke onJoinPrepare.
*/
@Test
public void testSubscriptionChangeWithAuthorizationFailure() {
// Subscribe to two topics of which only one is authorized and verify that metadata failure is propagated.
subscriptions.subscribe(Set.of(topic1, topic2), Optional.of(rebalanceListener));
client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWith("kafka-cluster", 1,
Collections.singletonMap(topic2, Errors.TOPIC_AUTHORIZATION_FAILED), singletonMap(topic1, 1)));
assertThrows(TopicAuthorizationException.class, () -> coordinator.poll(time.timer(Long.MAX_VALUE)));
client.respond(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// Fail the first JoinGroup request
client.prepareResponse(joinGroupLeaderResponse(0, consumerId, Collections.emptyMap(),
Errors.GROUP_AUTHORIZATION_FAILED));
assertThrows(GroupAuthorizationException.class, () -> coordinator.poll(time.timer(Long.MAX_VALUE)));
// Change subscription to include only the authorized topic. Complete rebalance and check that
// references to topic2 have been removed from SubscriptionState.
subscriptions.subscribe(Set.of(topic1), Optional.of(rebalanceListener));
assertEquals(Collections.singleton(topic1), subscriptions.metadataTopics());
client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWith("kafka-cluster", 1,
Collections.emptyMap(), singletonMap(topic1, 1)));
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, singletonList(t1p)));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(singleton(topic1), subscriptions.subscription());
assertEquals(singleton(topic1), subscriptions.metadataTopics());
}
@Test
public void testWakeupFromAssignmentCallback() {
final String topic = "topic1";
TopicPartition partition = new TopicPartition(topic, 0);
final String consumerId = "follower";
Set<String> topics = Collections.singleton(topic);
MockRebalanceListener rebalanceListener = new MockRebalanceListener() {
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
boolean raiseWakeup = this.assignedCount == 0;
super.onPartitionsAssigned(partitions);
if (raiseWakeup)
throw new WakeupException();
}
};
subscriptions.subscribe(topics, Optional.of(rebalanceListener));
// we only have metadata for one topic initially
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// prepare initial rebalance
partitionAssignor.prepare(singletonMap(consumerId, Collections.singletonList(partition)));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.singletonList(partition), Errors.NONE));
// The first call to poll should raise the exception from the rebalance listener
try {
coordinator.poll(time.timer(Long.MAX_VALUE));
fail("Expected exception thrown from assignment callback");
} catch (WakeupException e) {
}
// The second call should retry the assignment callback and succeed
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(0, rebalanceListener.revokedCount);
assertEquals(2, rebalanceListener.assignedCount);
}
@Test
public void testRebalanceAfterTopicUnavailableWithSubscribe() {
unavailableTopicTest(false, Collections.emptySet());
}
@Test
public void testRebalanceAfterTopicUnavailableWithPatternSubscribe() {
unavailableTopicTest(true, Collections.emptySet());
}
@Test
public void testRebalanceAfterNotMatchingTopicUnavailableWithPatternSubscribe() {
unavailableTopicTest(true, Collections.singleton("notmatching"));
}
private void unavailableTopicTest(boolean patternSubscribe, Set<String> unavailableTopicsInLastMetadata) {
if (patternSubscribe)
subscriptions.subscribe(Pattern.compile("test.*"), Optional.of(rebalanceListener));
else
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWith("kafka-cluster", 1,
Collections.singletonMap(topic1, Errors.UNKNOWN_TOPIC_OR_PARTITION), Collections.emptyMap()));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(Collections.emptyMap());
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
// callback not triggered since there's nothing to be assigned
assertEquals(Collections.emptySet(), rebalanceListener.assigned);
assertTrue(metadata.updateRequested(), "Metadata refresh not requested for unavailable partitions");
Map<String, Errors> topicErrors = new HashMap<>();
for (String topic : unavailableTopicsInLastMetadata)
topicErrors.put(topic, Errors.UNKNOWN_TOPIC_OR_PARTITION);
client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWith("kafka-cluster", 1,
topicErrors, singletonMap(topic1, 1)));
consumerClient.poll(time.timer(0));
client.prepareResponse(joinGroupLeaderResponse(2, consumerId, memberSubscriptions, Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(metadata.updateRequested(), "Metadata refresh requested unnecessarily");
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(singleton(t1p), rebalanceListener.assigned);
}
@Test
public void testExcludeInternalTopicsConfigOption() {
testInternalTopicInclusion(false);
}
@Test
public void testIncludeInternalTopicsConfigOption() {
testInternalTopicInclusion(true);
}
private void testInternalTopicInclusion(boolean includeInternalTopics) {
metadata = new ConsumerMetadata(0, 0, Long.MAX_VALUE, includeInternalTopics,
false, subscriptions, new LogContext(), new ClusterResourceListeners());
client = new MockClient(time, metadata);
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, subscriptions)) {
subscriptions.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener));
Node node = new Node(0, "localhost", 9999);
MetadataResponse.PartitionMetadata partitionMetadata =
new MetadataResponse.PartitionMetadata(Errors.NONE, new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0),
Optional.of(node.id()), Optional.empty(), singletonList(node.id()), singletonList(node.id()),
singletonList(node.id()));
MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(Errors.NONE,
Topic.GROUP_METADATA_TOPIC_NAME, true, singletonList(partitionMetadata));
client.updateMetadata(RequestTestUtils.metadataResponse(singletonList(node), "clusterId", node.id(),
singletonList(topicMetadata)));
coordinator.maybeUpdateSubscriptionMetadata();
assertEquals(includeInternalTopics, subscriptions.subscription().contains(Topic.GROUP_METADATA_TOPIC_NAME));
}
}
@Test
public void testRejoinGroup() {
String otherTopic = "otherTopic";
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = singletonList(t1p);
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
// join the group once
joinAsFollowerAndReceiveAssignment(coordinator, assigned);
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
// and join the group again
rebalanceListener.revoked = null;
rebalanceListener.assigned = null;
subscriptions.subscribe(Set.of(topic1, otherTopic), Optional.of(rebalanceListener));
client.prepareResponse(joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(assigned, Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
Collection<TopicPartition> revoked = getRevoked(assigned, assigned);
Collection<TopicPartition> added = getAdded(assigned, assigned);
assertEquals(revoked.isEmpty() ? 0 : 1, rebalanceListener.revokedCount);
assertEquals(revoked.isEmpty() ? null : revoked, rebalanceListener.revoked);
assertEquals(2, rebalanceListener.assignedCount);
assertEquals(added, rebalanceListener.assigned);
}
@Test
public void testDisconnectInJoin() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = singletonList(t1p);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// disconnected from original coordinator will cause re-discover and join again
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE), true);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(assigned, Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(Set.copyOf(assigned), subscriptions.assignedPartitions());
// nothing to be revoked hence callback not triggered
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
@Test
public void testInvalidSessionTimeout() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// coordinator doesn't like the session timeout
client.prepareResponse(joinGroupFollowerResponse(0, consumerId, "", Errors.INVALID_SESSION_TIMEOUT));
assertThrows(ApiException.class, () -> coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE)));
}
@Test
public void testCommitOffsetOnly() {
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
AtomicBoolean success = new AtomicBoolean(false);
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), callback(success));
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(success.get());
}
@Test
public void testCoordinatorDisconnectAfterNotCoordinatorError() {
testInFlightRequestsFailedAfterCoordinatorMarkedDead(Errors.NOT_COORDINATOR);
}
@Test
public void testCoordinatorDisconnectAfterCoordinatorNotAvailableError() {
testInFlightRequestsFailedAfterCoordinatorMarkedDead(Errors.COORDINATOR_NOT_AVAILABLE);
}
private void testInFlightRequestsFailedAfterCoordinatorMarkedDead(Errors error) {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// Send two async commits and fail the first one with an error.
// This should cause a coordinator disconnect which will cancel the second request.
MockCommitCallback firstCommitCallback = new MockCommitCallback();
MockCommitCallback secondCommitCallback = new MockCommitCallback();
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), firstCommitCallback);
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), secondCommitCallback);
assertEquals(2, coordinator.inFlightAsyncCommits.get());
respondToOffsetCommitRequest(singletonMap(t1p, 100L), error);
consumerClient.pollNoWakeup();
consumerClient.pollNoWakeup(); // second poll since coordinator disconnect is async
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(coordinator.coordinatorUnknown());
assertInstanceOf(RetriableCommitFailedException.class, firstCommitCallback.exception);
assertInstanceOf(RetriableCommitFailedException.class, secondCommitCallback.exception);
assertEquals(0, coordinator.inFlightAsyncCommits.get());
}
@Test
public void testAutoCommitDynamicAssignment() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
joinAsFollowerAndReceiveAssignment(coordinator, singletonList(t1p));
subscriptions.seek(t1p, 100);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
time.sleep(autoCommitIntervalMs);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(client.hasPendingResponses());
}
}
@Test
public void testAutoCommitRetryBackoff() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
joinAsFollowerAndReceiveAssignment(coordinator, singletonList(t1p));
subscriptions.seek(t1p, 100);
time.sleep(autoCommitIntervalMs);
// Send an offset commit, but let it fail with a retriable error
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NOT_COORDINATOR);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertTrue(coordinator.coordinatorUnknown());
// After the disconnect, we should rediscover the coordinator
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
subscriptions.seek(t1p, 200);
// Until the retry backoff has expired, we should not retry the offset commit
time.sleep(retryBackoffMs / 2);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(0, client.inFlightRequestCount());
// Once the backoff expires, we should retry
time.sleep(retryBackoffMs / 2);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(1, client.inFlightRequestCount());
respondToOffsetCommitRequest(singletonMap(t1p, 200L), Errors.NONE);
}
}
@Test
public void testAutoCommitAwaitsInterval() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
joinAsFollowerAndReceiveAssignment(coordinator, singletonList(t1p));
subscriptions.seek(t1p, 100);
time.sleep(autoCommitIntervalMs);
// Send the offset commit request, but do not respond
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(1, client.inFlightRequestCount());
time.sleep(autoCommitIntervalMs / 2);
// Ensure that no additional offset commit is sent
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(1, client.inFlightRequestCount());
respondToOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(0, client.inFlightRequestCount());
subscriptions.seek(t1p, 200);
// If we poll again before the auto-commit interval, there should be no new sends
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(0, client.inFlightRequestCount());
// After the remainder of the interval passes, we send a new offset commit
time.sleep(autoCommitIntervalMs / 2);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(1, client.inFlightRequestCount());
respondToOffsetCommitRequest(singletonMap(t1p, 200L), Errors.NONE);
}
}
@Test
public void testAutoCommitDynamicAssignmentRebalance() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// haven't joined, so should not cause a commit
time.sleep(autoCommitIntervalMs);
consumerClient.poll(time.timer(0));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
subscriptions.seek(t1p, 100);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
time.sleep(autoCommitIntervalMs);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(client.hasPendingResponses());
}
}
@Test
public void testAutoCommitManualAssignment() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.assignFromUser(singleton(t1p));
subscriptions.seek(t1p, 100);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
time.sleep(autoCommitIntervalMs);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(client.hasPendingResponses());
}
}
@Test
public void testAutoCommitManualAssignmentCoordinatorUnknown() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.assignFromUser(singleton(t1p));
subscriptions.seek(t1p, 100);
// no commit initially since coordinator is unknown
consumerClient.poll(time.timer(0));
time.sleep(autoCommitIntervalMs);
consumerClient.poll(time.timer(0));
// now find the coordinator
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// sleep only for the retry backoff
time.sleep(retryBackoffMs);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(client.hasPendingResponses());
}
}
@Test
public void testCommitOffsetMetadataAsync() {
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
AtomicBoolean success = new AtomicBoolean(false);
OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(100L, "hello");
Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p, offsetAndMetadata);
coordinator.commitOffsetsAsync(offsets, callback(offsets, success));
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(success.get());
assertEquals(0, coordinator.inFlightAsyncCommits.get());
}
@Test
public void testCommitOffsetMetadataSync() {
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(100L, "hello");
Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p, offsetAndMetadata);
boolean success = coordinator.commitOffsetsSync(offsets, time.timer(Long.MAX_VALUE));
assertTrue(success);
}
@Test
public void testCommitOffsetAsyncWithDefaultCallback() {
int invokedBeforeTest = mockOffsetCommitCallback.invoked;
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), mockOffsetCommitCallback);
assertEquals(0, coordinator.inFlightAsyncCommits.get());
coordinator.invokeCompletedOffsetCommitCallbacks();
assertEquals(invokedBeforeTest + 1, mockOffsetCommitCallback.invoked);
assertNull(mockOffsetCommitCallback.exception);
}
@Test
public void testCommitAfterLeaveGroup() {
// enable auto-assignment
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
joinAsFollowerAndReceiveAssignment(coordinator, singletonList(t1p));
// now switch to manual assignment
client.prepareResponse(new LeaveGroupResponse(new LeaveGroupResponseData()
.setErrorCode(Errors.NONE.code())));
subscriptions.unsubscribe();
coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "test commit after leave");
subscriptions.assignFromUser(singleton(t1p));
// the client should not reuse generation/memberId from auto-subscribed generation
client.prepareResponse(body -> {
OffsetCommitRequest commitRequest = (OffsetCommitRequest) body;
return commitRequest.data().memberId().equals(OffsetCommitRequest.DEFAULT_MEMBER_ID) &&
commitRequest.data().generationIdOrMemberEpoch() == OffsetCommitRequest.DEFAULT_GENERATION_ID;
}, offsetCommitResponse(singletonMap(t1p, Errors.NONE)));
AtomicBoolean success = new AtomicBoolean(false);
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), callback(success));
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(success.get());
assertEquals(0, coordinator.inFlightAsyncCommits.get());
}
@Test
public void testCommitOffsetAsyncFailedWithDefaultCallback() {
int invokedBeforeTest = mockOffsetCommitCallback.invoked;
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE);
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), mockOffsetCommitCallback);
assertEquals(0, coordinator.inFlightAsyncCommits.get());
coordinator.invokeCompletedOffsetCommitCallbacks();
assertEquals(invokedBeforeTest + 1, mockOffsetCommitCallback.invoked);
assertInstanceOf(RetriableCommitFailedException.class, mockOffsetCommitCallback.exception);
}
@Test
public void testCommitOffsetAsyncCoordinatorNotAvailable() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// async commit with coordinator not available
MockCommitCallback cb = new MockCommitCallback();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE);
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb);
assertEquals(0, coordinator.inFlightAsyncCommits.get());
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(coordinator.coordinatorUnknown());
assertEquals(1, cb.invoked);
assertInstanceOf(RetriableCommitFailedException.class, cb.exception);
}
@Test
public void testCommitOffsetAsyncNotCoordinator() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// async commit with not coordinator
MockCommitCallback cb = new MockCommitCallback();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NOT_COORDINATOR);
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb);
assertEquals(0, coordinator.inFlightAsyncCommits.get());
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(coordinator.coordinatorUnknown());
assertEquals(1, cb.invoked);
assertInstanceOf(RetriableCommitFailedException.class, cb.exception);
}
@Test
public void testCommitOffsetAsyncDisconnected() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// async commit with coordinator disconnected
MockCommitCallback cb = new MockCommitCallback();
prepareOffsetCommitRequestDisconnect(singletonMap(t1p, 100L));
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb);
assertEquals(0, coordinator.inFlightAsyncCommits.get());
coordinator.invokeCompletedOffsetCommitCallbacks();
assertTrue(coordinator.coordinatorUnknown());
assertEquals(1, cb.invoked);
assertInstanceOf(RetriableCommitFailedException.class, cb.exception);
}
@Test
public void testCommitOffsetSyncNotCoordinator() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// sync commit with coordinator disconnected (should connect, get metadata, and then submit the commit request)
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NOT_COORDINATOR);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE));
}
@Test
public void testCommitOffsetSyncCoordinatorNotAvailable() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// sync commit with coordinator disconnected (should connect, get metadata, and then submit the commit request)
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE));
}
@Test
public void testCommitOffsetSyncCoordinatorDisconnected() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// sync commit with coordinator disconnected (should connect, get metadata, and then submit the commit request)
prepareOffsetCommitRequestDisconnect(singletonMap(t1p, 100L));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE));
}
@Test
public void testAsyncCommitCallbacksInvokedPriorToSyncCommitCompletion() throws Exception {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
final List<OffsetAndMetadata> committedOffsets = Collections.synchronizedList(new ArrayList<>());
final OffsetAndMetadata firstOffset = new OffsetAndMetadata(0L);
final OffsetAndMetadata secondOffset = new OffsetAndMetadata(1L);
coordinator.commitOffsetsAsync(singletonMap(t1p, firstOffset), (offsets, exception) -> committedOffsets.add(firstOffset));
// Do a synchronous commit in the background so that we can send both responses at the same time
Thread thread = new Thread() {
@Override
public void run() {
coordinator.commitOffsetsSync(singletonMap(t1p, secondOffset), time.timer(10000));
committedOffsets.add(secondOffset);
}
};
assertEquals(1, coordinator.inFlightAsyncCommits.get());
thread.start();
client.waitForRequests(2, 5000);
respondToOffsetCommitRequest(singletonMap(t1p, firstOffset.offset()), Errors.NONE);
respondToOffsetCommitRequest(singletonMap(t1p, secondOffset.offset()), Errors.NONE);
thread.join();
assertEquals(0, coordinator.inFlightAsyncCommits.get());
assertEquals(Arrays.asList(firstOffset, secondOffset), committedOffsets);
}
@Test
public void testRetryCommitUnknownTopicOrPartition() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
client.prepareResponse(offsetCommitResponse(singletonMap(t1p, Errors.UNKNOWN_TOPIC_OR_PARTITION)));
client.prepareResponse(offsetCommitResponse(singletonMap(t1p, Errors.NONE)));
assertTrue(coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")), time.timer(10000)));
}
@Test
public void testCommitOffsetMetadataTooLarge() {
// since offset metadata is provided by the user, we have to propagate the exception so they can handle it
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.OFFSET_METADATA_TOO_LARGE);
assertThrows(OffsetMetadataTooLarge.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")), time.timer(Long.MAX_VALUE)));
}
@Test
public void testCommitOffsetIllegalGeneration() {
// we cannot retry if a rebalance occurs before the commit completed
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.ILLEGAL_GENERATION);
assertThrows(CommitFailedException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")), time.timer(Long.MAX_VALUE)));
}
@Test
public void testCommitOffsetUnknownMemberId() {
// we cannot retry if a rebalance occurs before the commit completed
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.UNKNOWN_MEMBER_ID);
assertThrows(CommitFailedException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")), time.timer(Long.MAX_VALUE)));
}
@Test
public void testCommitOffsetIllegalGenerationWithNewGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
final AbstractCoordinator.Generation currGen = new AbstractCoordinator.Generation(
1,
"memberId",
null);
coordinator.setNewGeneration(currGen);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.ILLEGAL_GENERATION);
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
// change the generation
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
2,
"memberId-new",
null);
coordinator.setNewGeneration(newGen);
coordinator.setNewState(AbstractCoordinator.MemberState.PREPARING_REBALANCE);
assertTrue(consumerClient.poll(future, time.timer(30000)));
assertInstanceOf(future.exception().getClass(), Errors.REBALANCE_IN_PROGRESS.exception());
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testCommitOffsetIllegalGenerationShouldResetGenerationId() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.ILLEGAL_GENERATION);
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
assertTrue(consumerClient.poll(future, time.timer(30000)));
assertEquals(AbstractCoordinator.Generation.NO_GENERATION.generationId, coordinator.generation().generationId);
assertEquals(AbstractCoordinator.Generation.NO_GENERATION.protocolName, coordinator.generation().protocolName);
// member ID should not be reset
assertEquals(consumerId, coordinator.generation().memberId);
}
@Test
public void testCommitOffsetIllegalGenerationWithResetGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
final AbstractCoordinator.Generation currGen = new AbstractCoordinator.Generation(
1,
"memberId",
null);
coordinator.setNewGeneration(currGen);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.ILLEGAL_GENERATION);
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
// reset the generation
coordinator.setNewGeneration(AbstractCoordinator.Generation.NO_GENERATION);
assertTrue(consumerClient.poll(future, time.timer(30000)));
assertInstanceOf(future.exception().getClass(), new CommitFailedException());
// the generation should not be reset
assertEquals(AbstractCoordinator.Generation.NO_GENERATION, coordinator.generation());
}
@Test
public void testCommitOffsetUnknownMemberWithNewGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
final AbstractCoordinator.Generation currGen = new AbstractCoordinator.Generation(
1,
"memberId",
null);
coordinator.setNewGeneration(currGen);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.UNKNOWN_MEMBER_ID);
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
// change the generation
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
2,
"memberId-new",
null);
coordinator.setNewGeneration(newGen);
coordinator.setNewState(AbstractCoordinator.MemberState.PREPARING_REBALANCE);
assertTrue(consumerClient.poll(future, time.timer(30000)));
assertInstanceOf(future.exception().getClass(), Errors.REBALANCE_IN_PROGRESS.exception());
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testCommitOffsetUnknownMemberWithResetGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
final AbstractCoordinator.Generation currGen = new AbstractCoordinator.Generation(
1,
"memberId",
null);
coordinator.setNewGeneration(currGen);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.UNKNOWN_MEMBER_ID);
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
// reset the generation
coordinator.setNewGeneration(AbstractCoordinator.Generation.NO_GENERATION);
assertTrue(consumerClient.poll(future, time.timer(30000)));
assertInstanceOf(future.exception().getClass(), new CommitFailedException());
// the generation should be reset
assertEquals(AbstractCoordinator.Generation.NO_GENERATION, coordinator.generation());
}
@Test
public void testCommitOffsetUnknownMemberShouldResetToNoGeneration() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.UNKNOWN_MEMBER_ID);
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
assertTrue(consumerClient.poll(future, time.timer(30000)));
assertEquals(AbstractCoordinator.Generation.NO_GENERATION, coordinator.generation());
}
@Test
public void testCommitOffsetFencedInstanceWithRebalancingGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
final AbstractCoordinator.Generation currGen = new AbstractCoordinator.Generation(
1,
"memberId",
null);
coordinator.setNewGeneration(currGen);
coordinator.setNewState(AbstractCoordinator.MemberState.PREPARING_REBALANCE);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.FENCED_INSTANCE_ID);
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
// change the generation
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
2,
"memberId-new",
null);
coordinator.setNewGeneration(newGen);
assertTrue(consumerClient.poll(future, time.timer(30000)));
assertInstanceOf(future.exception().getClass(), Errors.REBALANCE_IN_PROGRESS.exception());
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testCommitOffsetFencedInstanceWithNewGeneration() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
final AbstractCoordinator.Generation currGen = new AbstractCoordinator.Generation(
1,
"memberId",
null);
coordinator.setNewGeneration(currGen);
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.FENCED_INSTANCE_ID);
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
// change the generation
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
2,
"memberId-new",
null);
coordinator.setNewGeneration(newGen);
assertTrue(consumerClient.poll(future, time.timer(30000)));
assertInstanceOf(future.exception().getClass(), new CommitFailedException());
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testCommitOffsetShouldNotSetInstanceIdIfMemberIdIsUnknown() {
rebalanceConfig = buildRebalanceConfig(groupInstanceId, null);
ConsumerCoordinator coordinator = buildCoordinator(
rebalanceConfig,
new Metrics(),
assignors,
false,
subscriptions
);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(5000));
client.prepareResponse(body -> {
OffsetCommitRequestData data = ((OffsetCommitRequest) body).data();
return data.groupInstanceId() == null && data.memberId().isEmpty();
}, offsetCommitResponse(Collections.emptyMap()));
RequestFuture<Void> future = coordinator.sendOffsetCommitRequest(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")));
assertTrue(consumerClient.poll(future, time.timer(5000)));
assertFalse(future.failed());
}
@Test
public void testCommitOffsetRebalanceInProgress() {
// we cannot retry if a rebalance occurs before the commit completed
final String consumerId = "leader";
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
// ensure metadata is up-to-date for leader
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
partitionAssignor.prepare(singletonMap(consumerId, singletonList(t1p)));
coordinator.ensureActiveGroup(time.timer(0L));
assertTrue(coordinator.rejoinNeededOrPending());
assertNull(coordinator.generationIfStable());
// when the state is REBALANCING, we would not even send out the request but fail immediately
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")), time.timer(Long.MAX_VALUE)));
final Node coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.respondFrom(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE), coordinatorNode);
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
AbstractCoordinator.Generation expectedGeneration = new AbstractCoordinator.Generation(1, consumerId, partitionAssignor.name());
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(expectedGeneration, coordinator.generationIfStable());
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")), time.timer(Long.MAX_VALUE)));
assertTrue(coordinator.rejoinNeededOrPending());
assertEquals(expectedGeneration, coordinator.generationIfStable());
}
@Test
public void testCommitOffsetSyncCallbackWithNonRetriableException() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// sync commit with invalid partitions should throw if we have no callback
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.UNKNOWN_SERVER_ERROR);
assertThrows(KafkaException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE)));
}
@Test
public void testCommitOffsetSyncWithoutFutureGetsCompleted() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(0)));
}
@Test
public void testRefreshOffset() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L));
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
assertEquals(Collections.emptySet(), subscriptions.initializingPartitions());
assertTrue(subscriptions.hasAllFetchPositions());
assertEquals(100L, subscriptions.position(t1p).offset);
}
@Test
public void testRefreshOffsetWithValidation() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
// Initial leader epoch of 4
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("kafka-cluster", 1,
Collections.emptyMap(), singletonMap(topic1, 1), tp -> 4);
client.updateMetadata(metadataResponse);
// Load offsets from previous epoch
client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L, Optional.of(3)));
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
// Offset gets loaded, but requires validation
assertEquals(Collections.emptySet(), subscriptions.initializingPartitions());
assertFalse(subscriptions.hasAllFetchPositions());
assertTrue(subscriptions.awaitingValidation(t1p));
assertEquals(100L, subscriptions.position(t1p).offset);
assertNull(subscriptions.validPosition(t1p));
}
@Test
public void testFetchCommittedOffsets() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
long offset = 500L;
String metadata = "blahblah";
Optional<Integer> leaderEpoch = Optional.of(15);
client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of(
new OffsetFetchResponseData.OffsetFetchResponseTopics()
.setName(t1p.topic())
.setPartitions(List.of(
new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(t1p.partition())
.setCommittedOffset(offset)
.setCommittedLeaderEpoch(leaderEpoch.get())
.setMetadata(metadata)
))
)));
Map<TopicPartition, OffsetAndMetadata> fetchedOffsets = coordinator.fetchCommittedOffsets(singleton(t1p),
time.timer(Long.MAX_VALUE));
assertNotNull(fetchedOffsets);
assertEquals(new OffsetAndMetadata(offset, leaderEpoch, metadata), fetchedOffsets.get(t1p));
}
@Test
public void testTopicAuthorizationFailedInOffsetFetch() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of(
new OffsetFetchResponseData.OffsetFetchResponseTopics()
.setName(t1p.topic())
.setPartitions(List.of(
new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(t1p.partition())
.setCommittedOffset(-1)
.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code())
))
)));
TopicAuthorizationException exception = assertThrows(TopicAuthorizationException.class, () ->
coordinator.fetchCommittedOffsets(singleton(t1p), time.timer(Long.MAX_VALUE)));
assertEquals(singleton(topic1), exception.unauthorizedTopics());
}
@Test
public void testRefreshOffsetsGroupNotAuthorized() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(offsetFetchResponse(Errors.GROUP_AUTHORIZATION_FAILED, List.of()));
try {
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
fail("Expected group authorization error");
} catch (GroupAuthorizationException e) {
assertEquals(groupId, e.groupId());
}
}
@Test
public void testRefreshOffsetWithPendingTransactions() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(offsetFetchResponse(t1p, Errors.UNSTABLE_OFFSET_COMMIT, "", -1L));
client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L));
assertEquals(Collections.singleton(t1p), subscriptions.initializingPartitions());
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(0L));
assertEquals(Collections.singleton(t1p), subscriptions.initializingPartitions());
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(0L));
assertEquals(Collections.emptySet(), subscriptions.initializingPartitions());
assertTrue(subscriptions.hasAllFetchPositions());
assertEquals(100L, subscriptions.position(t1p).offset);
}
@Test
public void testRefreshOffsetUnknownTopicOrPartition() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(offsetFetchResponse(t1p, Errors.UNKNOWN_TOPIC_OR_PARTITION, "", 100L));
assertThrows(KafkaException.class, () -> coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE)));
}
@ParameterizedTest
@CsvSource({
"NOT_COORDINATOR, true",
"COORDINATOR_NOT_AVAILABLE, true",
"COORDINATOR_LOAD_IN_PROGRESS, false",
"NETWORK_EXCEPTION, false",
})
public void testRefreshOffsetRetriableErrorCoordinatorLookup(Errors error, boolean expectCoordinatorRelookup) {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(offsetFetchResponse(error, List.of()));
if (expectCoordinatorRelookup) {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
}
client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L));
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
assertEquals(Collections.emptySet(), subscriptions.initializingPartitions());
assertTrue(subscriptions.hasAllFetchPositions());
assertEquals(100L, subscriptions.position(t1p).offset);
}
@Test
public void testRefreshOffsetWithNoFetchableOffsets() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", -1L));
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
assertEquals(Collections.singleton(t1p), subscriptions.initializingPartitions());
assertEquals(Collections.emptySet(), subscriptions.partitionsNeedingReset(time.milliseconds()));
assertFalse(subscriptions.hasAllFetchPositions());
assertNull(subscriptions.position(t1p));
}
@Test
public void testNoCoordinatorDiscoveryIfPositionsKnown() {
assertTrue(coordinator.coordinatorUnknown());
subscriptions.assignFromUser(singleton(t1p));
subscriptions.seek(t1p, 500L);
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
assertEquals(Collections.emptySet(), subscriptions.initializingPartitions());
assertTrue(subscriptions.hasAllFetchPositions());
assertEquals(500L, subscriptions.position(t1p).offset);
assertTrue(coordinator.coordinatorUnknown());
}
@Test
public void testNoCoordinatorDiscoveryIfPartitionAwaitingReset() {
assertTrue(coordinator.coordinatorUnknown());
subscriptions.assignFromUser(singleton(t1p));
subscriptions.requestOffsetReset(t1p, AutoOffsetResetStrategy.EARLIEST);
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
assertEquals(Collections.emptySet(), subscriptions.initializingPartitions());
assertFalse(subscriptions.hasAllFetchPositions());
assertEquals(Collections.singleton(t1p), subscriptions.partitionsNeedingReset(time.milliseconds()));
assertEquals(AutoOffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(t1p));
assertTrue(coordinator.coordinatorUnknown());
}
@Test
public void testAuthenticationFailureInEnsureActiveGroup() {
client.createPendingAuthenticationError(node, 300);
assertThrows(AuthenticationException.class,
() -> coordinator.ensureActiveGroup(),
"Expected an authentication error.");
}
@Test
public void testThreadSafeAssignedPartitionsMetric() throws Exception {
// Get the assigned-partitions metric
final Metric metric = metrics.metric(new MetricName("assigned-partitions", consumerId + groupId + "-coordinator-metrics",
"", Collections.emptyMap()));
// Start polling the metric in the background
final AtomicBoolean doStop = new AtomicBoolean();
final AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
final AtomicInteger observedSize = new AtomicInteger();
Thread poller = new Thread() {
@Override
public void run() {
// Poll as fast as possible to reproduce ConcurrentModificationException
while (!doStop.get()) {
try {
int size = ((Double) metric.metricValue()).intValue();
observedSize.set(size);
} catch (Exception e) {
exceptionHolder.set(e);
return;
}
}
}
};
poller.start();
// Assign two partitions to trigger a metric change that can lead to ConcurrentModificationException
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// Change the assignment several times to increase likelihood of concurrent updates
Set<TopicPartition> partitions = new HashSet<>();
int totalPartitions = 10;
for (int partition = 0; partition < totalPartitions; partition++) {
partitions.add(new TopicPartition(topic1, partition));
subscriptions.assignFromUser(partitions);
}
// Wait for the metric poller to observe the final assignment change or raise an error
TestUtils.waitForCondition(
() -> observedSize.get() == totalPartitions ||
exceptionHolder.get() != null, "Failed to observe expected assignment change");
doStop.set(true);
poller.join();
assertNull(exceptionHolder.get(), "Failed fetching the metric at least once");
}
@Test
public void testCloseDynamicAssignment() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.empty(), true)) {
gracefulCloseTest(coordinator, true);
}
}
@Test
public void testCloseManualAssignment() {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(false, true, Optional.empty(), true)) {
gracefulCloseTest(coordinator, false);
}
}
@Test
public void testCloseCoordinatorNotKnownManualAssignment() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(false, true, Optional.empty(), true)) {
makeCoordinatorUnknown(coordinator, Errors.NOT_COORDINATOR);
time.sleep(autoCommitIntervalMs);
closeVerifyTimeout(coordinator, 1000, 1000, 1000);
}
}
@Test
public void testCloseCoordinatorNotKnownNoCommits() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.empty(), true)) {
makeCoordinatorUnknown(coordinator, Errors.NOT_COORDINATOR);
closeVerifyTimeout(coordinator, 1000, 0, 0);
}
}
@Test
public void testCloseCoordinatorNotKnownWithCommits() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.empty(), true)) {
makeCoordinatorUnknown(coordinator, Errors.NOT_COORDINATOR);
time.sleep(autoCommitIntervalMs);
closeVerifyTimeout(coordinator, 1000, 1000, 1000);
}
}
@Test
public void testCloseCoordinatorUnavailableNoCommits() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.empty(), true)) {
makeCoordinatorUnknown(coordinator, Errors.COORDINATOR_NOT_AVAILABLE);
closeVerifyTimeout(coordinator, 1000, 0, 0);
}
}
@Test
public void testCloseTimeoutCoordinatorUnavailableForCommit() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, groupInstanceId, true)) {
makeCoordinatorUnknown(coordinator, Errors.COORDINATOR_NOT_AVAILABLE);
time.sleep(autoCommitIntervalMs);
closeVerifyTimeout(coordinator, 1000, 1000, 1000);
}
}
@Test
public void testCloseMaxWaitCoordinatorUnavailableForCommit() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, groupInstanceId, true)) {
makeCoordinatorUnknown(coordinator, Errors.COORDINATOR_NOT_AVAILABLE);
time.sleep(autoCommitIntervalMs);
closeVerifyTimeout(coordinator, Long.MAX_VALUE, requestTimeoutMs, requestTimeoutMs);
}
}
@Test
public void testCloseNoResponseForCommit() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, groupInstanceId, true)) {
time.sleep(autoCommitIntervalMs);
closeVerifyTimeout(coordinator, Long.MAX_VALUE, requestTimeoutMs, requestTimeoutMs);
}
}
@Test
public void testCloseNoResponseForLeaveGroup() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.empty(), true)) {
closeVerifyTimeout(coordinator, Long.MAX_VALUE, requestTimeoutMs, requestTimeoutMs);
}
}
@Test
public void testCloseNoWait() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, groupInstanceId, true)) {
time.sleep(autoCommitIntervalMs);
closeVerifyTimeout(coordinator, 0, 0, 0);
}
}
@Test
public void testHeartbeatThreadClose() throws Exception {
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, groupInstanceId, true)) {
coordinator.ensureActiveGroup();
time.sleep(heartbeatIntervalMs + 100);
Thread.yield(); // Give heartbeat thread a chance to attempt heartbeat
closeVerifyTimeout(coordinator, Long.MAX_VALUE, requestTimeoutMs, requestTimeoutMs);
Thread[] threads = new Thread[Thread.activeCount()];
int threadCount = Thread.enumerate(threads);
for (int i = 0; i < threadCount; i++) {
assertFalse(threads[i].getName().contains(groupId), "Heartbeat thread active after close");
}
}
}
@Test
public void testAutoCommitAfterCoordinatorBackToService() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.assignFromUser(Collections.singleton(t1p));
subscriptions.seek(t1p, 100L);
coordinator.markCoordinatorUnknown("test cause");
assertTrue(coordinator.coordinatorUnknown());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
// async commit offset should find coordinator
time.sleep(autoCommitIntervalMs); // sleep for a while to ensure auto commit does happen
coordinator.maybeAutoCommitOffsetsAsync(time.milliseconds());
assertFalse(coordinator.coordinatorUnknown());
assertEquals(100L, subscriptions.position(t1p).offset);
}
}
@Test
public void testCommitOffsetRequestSyncWithFencedInstanceIdException() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// sync commit with invalid partitions should throw if we have no callback
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.FENCED_INSTANCE_ID);
assertThrows(FencedInstanceIdException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE)));
}
@Test
public void testCommitOffsetRequestAsyncWithFencedInstanceIdException() {
assertThrows(FencedInstanceIdException.class, this::receiveFencedInstanceIdException);
}
@Test
public void testCommitOffsetRequestAsyncAlwaysReceiveFencedException() {
// Once we get fenced exception once, we should always hit fencing case.
assertThrows(FencedInstanceIdException.class, this::receiveFencedInstanceIdException);
assertThrows(FencedInstanceIdException.class, () ->
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), new MockCommitCallback()));
assertEquals(0, coordinator.inFlightAsyncCommits.get());
assertThrows(FencedInstanceIdException.class, () ->
coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE)));
}
@Test
public void testGetGroupMetadata() {
final ConsumerGroupMetadata groupMetadata = coordinator.groupMetadata();
assertNotNull(groupMetadata);
assertEquals(groupId, groupMetadata.groupId());
assertEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadata.generationId());
assertEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, groupMetadata.memberId());
assertFalse(groupMetadata.groupInstanceId().isPresent());
try (final ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, groupInstanceId, true)) {
coordinator.ensureActiveGroup();
final ConsumerGroupMetadata joinedGroupMetadata = coordinator.groupMetadata();
assertNotNull(joinedGroupMetadata);
assertEquals(groupId, joinedGroupMetadata.groupId());
assertEquals(1, joinedGroupMetadata.generationId());
assertEquals(consumerId, joinedGroupMetadata.memberId());
assertEquals(groupInstanceId, joinedGroupMetadata.groupInstanceId());
}
}
@Test
public void shouldUpdateConsumerGroupMetadataBeforeCallbacks() {
final MockRebalanceListener rebalanceListener = new MockRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
assertEquals(2, coordinator.groupMetadata().generationId());
}
};
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
{
ByteBuffer buffer = ConsumerProtocol.serializeAssignment(
new ConsumerPartitionAssignor.Assignment(Collections.singletonList(t1p), ByteBuffer.wrap(new byte[0])));
coordinator.onJoinComplete(1, "memberId", partitionAssignor.name(), buffer);
}
ByteBuffer buffer = ConsumerProtocol.serializeAssignment(
new ConsumerPartitionAssignor.Assignment(Collections.emptyList(), ByteBuffer.wrap(new byte[0])));
coordinator.onJoinComplete(2, "memberId", partitionAssignor.name(), buffer);
}
@Test
public void testPrepareJoinAndRejoinAfterFailedRebalance() {
final List<TopicPartition> partitions = singletonList(t1p);
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"), true)) {
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(
singletonMap(t1p, new OffsetAndMetadata(100L)),
time.timer(Long.MAX_VALUE)));
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
int generationId = 42;
String memberId = "consumer-42";
client.prepareResponse(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE));
MockTime time = new MockTime(1);
// onJoinPrepare will be executed and onJoinComplete will not.
boolean res = coordinator.joinGroupIfNeeded(time.timer(100));
assertFalse(res);
assertFalse(client.hasPendingResponses());
// SynGroupRequest not responded.
assertEquals(1, client.inFlightRequestCount());
assertEquals(generationId, coordinator.generation().generationId);
assertEquals(memberId, coordinator.generation().memberId);
// Imitating heartbeat thread that clears generation data.
coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "Clear generation data.");
assertEquals(AbstractCoordinator.Generation.NO_GENERATION, coordinator.generation());
client.respond(syncGroupResponse(partitions, Errors.NONE));
// Join future should succeed but generation already cleared so result of join is false.
res = coordinator.joinGroupIfNeeded(time.timer(1));
assertFalse(res);
// should have retried sending a join group request already
assertFalse(client.hasPendingResponses());
assertEquals(1, client.inFlightRequestCount());
// Retry join should then succeed
client.respond(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(partitions, Errors.NONE));
res = coordinator.joinGroupIfNeeded(time.timer(3000));
assertTrue(res);
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
}
Collection<TopicPartition> lost = getLost(partitions);
assertEquals(lost.isEmpty() ? null : lost, rebalanceListener.lost);
assertEquals(lost.size(), rebalanceListener.lostCount);
}
@Test
public void shouldLoseAllOwnedPartitionsBeforeRejoiningAfterDroppingOutOfTheGroup() {
final List<TopicPartition> partitions = singletonList(t1p);
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"), true)) {
final Time realTime = Time.SYSTEM;
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(
singletonMap(t1p, new OffsetAndMetadata(100L)),
time.timer(Long.MAX_VALUE)));
int generationId = 42;
String memberId = "consumer-42";
client.prepareResponse(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.UNKNOWN_MEMBER_ID));
boolean res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
assertEquals(AbstractCoordinator.Generation.NO_GENERATION, coordinator.generation());
assertEquals("", coordinator.generation().memberId);
res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
}
Collection<TopicPartition> lost = getLost(partitions);
assertEquals(lost.isEmpty() ? 0 : 1, rebalanceListener.lostCount);
assertEquals(lost.isEmpty() ? null : lost, rebalanceListener.lost);
}
@Test
public void shouldLoseAllOwnedPartitionsBeforeRejoiningAfterResettingGenerationId() {
final List<TopicPartition> partitions = singletonList(t1p);
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"), true)) {
final Time realTime = Time.SYSTEM;
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(
singletonMap(t1p, new OffsetAndMetadata(100L)),
time.timer(Long.MAX_VALUE)));
int generationId = 42;
String memberId = "consumer-42";
client.prepareResponse(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.ILLEGAL_GENERATION));
boolean res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
assertEquals(AbstractCoordinator.Generation.NO_GENERATION.generationId, coordinator.generation().generationId);
assertEquals(AbstractCoordinator.Generation.NO_GENERATION.protocolName, coordinator.generation().protocolName);
// member ID should not be reset
assertEquals(memberId, coordinator.generation().memberId);
res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
}
Collection<TopicPartition> lost = getLost(partitions);
assertEquals(lost.isEmpty() ? 0 : 1, rebalanceListener.lostCount);
assertEquals(lost.isEmpty() ? null : lost, rebalanceListener.lost);
}
@Test
public void testSubscriptionRackId() {
String rackId = "rack-a";
RackAwareAssignor assignor = new RackAwareAssignor(protocol);
createRackAwareCoordinator(rackId, assignor);
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.updateMetadata(metadataResponse);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
assignor.prepare(singletonMap(consumerId, singletonList(t1p)));
client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, false, Errors.NONE, Optional.of(rackId)));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.poll(time.timer(Long.MAX_VALUE));
assertEquals(singleton(t1p), coordinator.subscriptionState().assignedPartitions());
assertEquals(singleton(rackId), assignor.rackIds);
}
@Test
public void testThrowOnUnsupportedStableFlag() {
supportStableFlag((short) 6, true);
}
@Test
public void testNoThrowWhenStableFlagIsSupported() {
supportStableFlag((short) 7, false);
}
private void supportStableFlag(final short upperVersion, final boolean expectThrows) {
ConsumerCoordinator coordinator = new ConsumerCoordinator(
rebalanceConfig,
new LogContext(),
consumerClient,
assignors,
metadata,
subscriptions,
new Metrics(time),
consumerId + groupId,
time,
false,
autoCommitIntervalMs,
null,
true,
Optional.empty());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.OFFSET_FETCH.id, (short) 0, upperVersion));
long offset = 500L;
String metadata = "blahblah";
Optional<Integer> leaderEpoch = Optional.of(15);
client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of(
new OffsetFetchResponseData.OffsetFetchResponseTopics()
.setName(t1p.topic())
.setPartitions(List.of(
new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(t1p.partition())
.setCommittedOffset(offset)
.setCommittedLeaderEpoch(leaderEpoch.get())
.setMetadata(metadata)
))
)));
if (expectThrows) {
assertThrows(UnsupportedVersionException.class,
() -> coordinator.fetchCommittedOffsets(singleton(t1p), time.timer(Long.MAX_VALUE)));
} else {
Map<TopicPartition, OffsetAndMetadata> fetchedOffsets = coordinator.fetchCommittedOffsets(singleton(t1p),
time.timer(Long.MAX_VALUE));
assertNotNull(fetchedOffsets);
assertEquals(new OffsetAndMetadata(offset, leaderEpoch, metadata), fetchedOffsets.get(t1p));
}
}
private void receiveFencedInstanceIdException() {
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.FENCED_INSTANCE_ID);
coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), new MockCommitCallback());
assertEquals(0, coordinator.inFlightAsyncCommits.get());
coordinator.invokeCompletedOffsetCommitCallbacks();
}
private ConsumerCoordinator prepareCoordinatorForCloseTest(final boolean useGroupManagement,
final boolean autoCommit,
final Optional<String> groupInstanceId,
final boolean shouldPoll) {
rebalanceConfig = buildRebalanceConfig(groupInstanceId, null);
ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig,
new Metrics(),
assignors,
autoCommit,
subscriptions);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
if (useGroupManagement) {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
} else {
subscriptions.assignFromUser(singleton(t1p));
}
subscriptions.seek(t1p, 100);
if (shouldPoll) {
coordinator.poll(time.timer(Long.MAX_VALUE));
}
return coordinator;
}
private void makeCoordinatorUnknown(ConsumerCoordinator coordinator, Errors error) {
time.sleep(sessionTimeoutMs);
coordinator.sendHeartbeatRequest();
client.prepareResponse(heartbeatResponse(error));
time.sleep(sessionTimeoutMs);
consumerClient.poll(time.timer(0));
assertTrue(coordinator.coordinatorUnknown());
}
private void closeVerifyTimeout(final ConsumerCoordinator coordinator,
final long closeTimeoutMs,
final long expectedMinTimeMs,
final long expectedMaxTimeMs) throws Exception {
ExecutorService executor = Executors.newSingleThreadExecutor();
try {
boolean coordinatorUnknown = coordinator.coordinatorUnknown();
// Run close on a different thread. Coordinator is locked by this thread, so it is
// not safe to use the coordinator from the main thread until the task completes.
Future<?> future = executor.submit(
() -> coordinator.close(time.timer(Math.min(closeTimeoutMs, requestTimeoutMs)), CloseOptions.GroupMembershipOperation.DEFAULT));
// Wait for close to start. If coordinator is known, wait for close to queue
// at least one request. Otherwise, sleep for a short time.
if (!coordinatorUnknown)
client.waitForRequests(1, 1000);
else
Thread.sleep(200);
if (expectedMinTimeMs > 0) {
time.sleep(expectedMinTimeMs - 1);
try {
future.get(500, TimeUnit.MILLISECONDS);
fail("Close completed ungracefully without waiting for timeout");
} catch (TimeoutException e) {
// Expected timeout
}
}
if (expectedMaxTimeMs >= 0)
time.sleep(expectedMaxTimeMs - expectedMinTimeMs + 2);
future.get(2000, TimeUnit.MILLISECONDS);
} finally {
executor.shutdownNow();
}
}
private void gracefulCloseTest(ConsumerCoordinator coordinator, boolean shouldLeaveGroup) {
final AtomicBoolean commitRequested = new AtomicBoolean();
final AtomicBoolean leaveGroupRequested = new AtomicBoolean();
client.prepareResponse(body -> {
commitRequested.set(true);
OffsetCommitRequest commitRequest = (OffsetCommitRequest) body;
return commitRequest.data().groupId().equals(groupId);
}, new OffsetCommitResponse(new OffsetCommitResponseData()));
if (shouldLeaveGroup)
client.prepareResponse(body -> {
leaveGroupRequested.set(true);
LeaveGroupRequest leaveRequest = (LeaveGroupRequest) body;
return leaveRequest.data().groupId().equals(groupId);
}, new LeaveGroupResponse(new LeaveGroupResponseData()
.setErrorCode(Errors.NONE.code())));
client.prepareResponse(body -> {
commitRequested.set(true);
OffsetCommitRequest commitRequest = (OffsetCommitRequest) body;
return commitRequest.data().groupId().equals(groupId);
}, new OffsetCommitResponse(new OffsetCommitResponseData()));
coordinator.close();
assertTrue(commitRequested.get(), "Commit not requested");
assertEquals(shouldLeaveGroup, leaveGroupRequested.get(), "leaveGroupRequested should be " + shouldLeaveGroup);
if (shouldLeaveGroup) {
assertEquals(1, rebalanceListener.revokedCount);
assertEquals(singleton(t1p), rebalanceListener.revoked);
}
}
private ConsumerCoordinator buildCoordinator(final GroupRebalanceConfig rebalanceConfig,
final Metrics metrics,
final List<ConsumerPartitionAssignor> assignors,
final boolean autoCommitEnabled,
final SubscriptionState subscriptionState) {
return new ConsumerCoordinator(
rebalanceConfig,
new LogContext(),
consumerClient,
assignors,
metadata,
subscriptionState,
metrics,
consumerId + groupId,
time,
autoCommitEnabled,
autoCommitIntervalMs,
null,
false,
Optional.empty());
}
private Collection<TopicPartition> getRevoked(final List<TopicPartition> owned,
final List<TopicPartition> assigned) {
switch (protocol) {
case EAGER:
return Set.copyOf(owned);
case COOPERATIVE:
final List<TopicPartition> revoked = new ArrayList<>(owned);
revoked.removeAll(assigned);
return Set.copyOf(revoked);
default:
throw new IllegalStateException("This should not happen");
}
}
private Collection<TopicPartition> getLost(final List<TopicPartition> owned) {
switch (protocol) {
case EAGER:
return emptySet();
case COOPERATIVE:
return Set.copyOf(owned);
default:
throw new IllegalStateException("This should not happen");
}
}
private Collection<TopicPartition> getAdded(final List<TopicPartition> owned,
final List<TopicPartition> assigned) {
switch (protocol) {
case EAGER:
return Set.copyOf(assigned);
case COOPERATIVE:
final List<TopicPartition> added = new ArrayList<>(assigned);
added.removeAll(owned);
return Set.copyOf(added);
default:
throw new IllegalStateException("This should not happen");
}
}
private FindCoordinatorResponse groupCoordinatorResponse(Node node, Errors error) {
return FindCoordinatorResponse.prepareResponse(error, groupId, node);
}
private HeartbeatResponse heartbeatResponse(Errors error) {
return new HeartbeatResponse(new HeartbeatResponseData().setErrorCode(error.code()));
}
private JoinGroupResponse joinGroupLeaderResponse(
int generationId,
String memberId,
Map<String, List<String>> subscriptions,
Errors error
) {
return joinGroupLeaderResponse(generationId, memberId, subscriptions, false, error, Optional.empty());
}
private JoinGroupResponse joinGroupLeaderResponse(
int generationId,
String memberId,
Map<String, List<String>> subscriptions,
boolean skipAssignment,
Errors error,
Optional<String> rackId
) {
List<JoinGroupResponseData.JoinGroupResponseMember> metadata = new ArrayList<>();
for (Map.Entry<String, List<String>> subscriptionEntry : subscriptions.entrySet()) {
ConsumerPartitionAssignor.Subscription subscription = new ConsumerPartitionAssignor.Subscription(subscriptionEntry.getValue(),
null, Collections.emptyList(), DEFAULT_GENERATION, rackId);
ByteBuffer buf = ConsumerProtocol.serializeSubscription(subscription);
metadata.add(new JoinGroupResponseData.JoinGroupResponseMember()
.setMemberId(subscriptionEntry.getKey())
.setMetadata(buf.array()));
}
return new JoinGroupResponse(
new JoinGroupResponseData()
.setErrorCode(error.code())
.setGenerationId(generationId)
.setProtocolName(partitionAssignor.name())
.setLeader(memberId)
.setSkipAssignment(skipAssignment)
.setMemberId(memberId)
.setMembers(metadata),
ApiKeys.JOIN_GROUP.latestVersion()
);
}
private JoinGroupResponse joinGroupFollowerResponse(int generationId, String memberId, String leaderId, Errors error) {
return new JoinGroupResponse(
new JoinGroupResponseData()
.setErrorCode(error.code())
.setGenerationId(generationId)
.setProtocolName(partitionAssignor.name())
.setLeader(leaderId)
.setMemberId(memberId)
.setMembers(Collections.emptyList()),
ApiKeys.JOIN_GROUP.latestVersion()
);
}
private SyncGroupResponse syncGroupResponse(List<TopicPartition> partitions, Errors error) {
ByteBuffer buf = ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(partitions));
return new SyncGroupResponse(
new SyncGroupResponseData()
.setErrorCode(error.code())
.setAssignment(Utils.toArray(buf))
);
}
private OffsetCommitResponse offsetCommitResponse(Map<TopicPartition, Errors> responseData) {
return new OffsetCommitResponse(responseData);
}
private OffsetFetchResponse offsetFetchResponse(
Errors errors,
List<OffsetFetchResponseData.OffsetFetchResponseTopics> topics
) {
return new OffsetFetchResponse(
new OffsetFetchResponseData()
.setGroups(List.of(
new OffsetFetchResponseData.OffsetFetchResponseGroup()
.setGroupId(groupId)
.setErrorCode(errors.code())
.setTopics(topics)
)),
ApiKeys.OFFSET_FETCH.latestVersion()
);
}
private OffsetFetchResponse offsetFetchResponse(TopicPartition tp, Errors partitionLevelError, String metadata, long offset) {
return offsetFetchResponse(tp, partitionLevelError, metadata, offset, Optional.empty());
}
private OffsetFetchResponse offsetFetchResponse(TopicPartition tp, Errors partitionLevelError, String metadata, long offset, Optional<Integer> epoch) {
return offsetFetchResponse(Errors.NONE, List.of(
new OffsetFetchResponseData.OffsetFetchResponseTopics()
.setName(tp.topic())
.setPartitions(List.of(
new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(tp.partition())
.setCommittedOffset(offset)
.setCommittedLeaderEpoch(epoch.orElse(-1))
.setMetadata(metadata)
.setErrorCode(partitionLevelError.code())
))
));
}
private OffsetCommitCallback callback(final AtomicBoolean success) {
return (offsets, exception) -> {
if (exception == null)
success.set(true);
};
}
private void joinAsFollowerAndReceiveAssignment(ConsumerCoordinator coordinator,
List<TopicPartition> assignment) {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(assignment, Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
}
private void prepareOffsetCommitRequest(Map<TopicPartition, Long> expectedOffsets, Errors error) {
prepareOffsetCommitRequest(expectedOffsets, error, false);
}
private void prepareOffsetCommitRequestDisconnect(Map<TopicPartition, Long> expectedOffsets) {
prepareOffsetCommitRequest(expectedOffsets, Errors.NONE, true);
}
private void prepareOffsetCommitRequest(final Map<TopicPartition, Long> expectedOffsets,
Errors error,
boolean disconnected) {
Map<TopicPartition, Errors> errors = partitionErrors(expectedOffsets.keySet(), error);
client.prepareResponse(offsetCommitRequestMatcher(expectedOffsets), offsetCommitResponse(errors), disconnected);
}
private void prepareJoinAndSyncResponse(String consumerId, int generation, List<String> subscription, List<TopicPartition> assignment) {
partitionAssignor.prepare(singletonMap(consumerId, assignment));
client.prepareResponse(
joinGroupLeaderResponse(
generation, consumerId, singletonMap(consumerId, subscription), Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == generation &&
sync.groupAssignments().containsKey(consumerId);
}, syncGroupResponse(assignment, Errors.NONE));
}
private Map<TopicPartition, Errors> partitionErrors(Collection<TopicPartition> partitions, Errors error) {
final Map<TopicPartition, Errors> errors = new HashMap<>();
for (TopicPartition partition : partitions) {
errors.put(partition, error);
}
return errors;
}
private void respondToOffsetCommitRequest(final Map<TopicPartition, Long> expectedOffsets, Errors error) {
Map<TopicPartition, Errors> errors = partitionErrors(expectedOffsets.keySet(), error);
client.respond(offsetCommitRequestMatcher(expectedOffsets), offsetCommitResponse(errors));
}
private MockClient.RequestMatcher offsetCommitRequestMatcher(final Map<TopicPartition, Long> expectedOffsets) {
return body -> {
OffsetCommitRequest req = (OffsetCommitRequest) body;
Map<TopicPartition, Long> offsets = req.offsets();
if (offsets.size() != expectedOffsets.size())
return false;
for (Map.Entry<TopicPartition, Long> expectedOffset : expectedOffsets.entrySet()) {
if (!offsets.containsKey(expectedOffset.getKey())) {
return false;
} else {
Long actualOffset = offsets.get(expectedOffset.getKey());
if (!actualOffset.equals(expectedOffset.getValue())) {
return false;
}
}
}
return true;
};
}
private OffsetCommitCallback callback(final Map<TopicPartition, OffsetAndMetadata> expectedOffsets,
final AtomicBoolean success) {
return (offsets, exception) -> {
if (expectedOffsets.equals(offsets) && exception == null)
success.set(true);
};
}
private void createRackAwareCoordinator(String rackId, MockPartitionAssignor assignor) {
metrics.close();
coordinator.close(time.timer(0), CloseOptions.GroupMembershipOperation.DEFAULT);
metrics = new Metrics(time);
rebalanceConfig = buildRebalanceConfig(rebalanceConfig.groupInstanceId, rackId);
coordinator = new ConsumerCoordinator(rebalanceConfig, new LogContext(), consumerClient,
Collections.singletonList(assignor), metadata, subscriptions,
metrics, consumerId + groupId, time, false, autoCommitIntervalMs, null, false, Optional.empty());
}
private static MetadataResponse rackAwareMetadata(int numNodes,
List<String> racks,
Map<String, List<List<Integer>>> partitionReplicas) {
final List<Node> nodes = new ArrayList<>(numNodes);
for (int i = 0; i < numNodes; i++)
nodes.add(new Node(i, "localhost", 1969 + i, racks.get(i % racks.size())));
List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>();
for (Map.Entry<String, List<List<Integer>>> topicPartitionCountEntry : partitionReplicas.entrySet()) {
String topic = topicPartitionCountEntry.getKey();
int numPartitions = topicPartitionCountEntry.getValue().size();
List<MetadataResponse.PartitionMetadata> partitionMetadata = new ArrayList<>(numPartitions);
for (int i = 0; i < numPartitions; i++) {
TopicPartition tp = new TopicPartition(topic, i);
List<Integer> replicaIds = topicPartitionCountEntry.getValue().get(i);
partitionMetadata.add(new PartitionMetadata(
Errors.NONE, tp, Optional.of(replicaIds.get(0)), Optional.empty(),
replicaIds, replicaIds, Collections.emptyList()));
}
topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, topic, Uuid.ZERO_UUID,
Topic.isInternal(topic), partitionMetadata, MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED));
}
return RequestTestUtils.metadataResponse(nodes, "kafka-cluster", 0, topicMetadata, ApiKeys.METADATA.latestVersion());
}
private static
|
ConsumerCoordinatorTest
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/AbstractChannelWriterOutputView.java
|
{
"start": 1189,
"end": 2018
}
|
class ____ extends AbstractPagedOutputView {
public AbstractChannelWriterOutputView(int segmentSize, int headerLength) {
super(segmentSize, headerLength);
}
/** Get the underlying channel. */
public abstract FileIOChannel getChannel();
/**
* Closes this OutputView, closing the underlying writer
*
* @return the number of bytes in last memory segment.
*/
public abstract int close() throws IOException;
/** Gets the number of blocks used by this view. */
public abstract int getBlockCount();
/** Get output bytes. */
public abstract long getNumBytes() throws IOException;
/** Get output compressed bytes, return num bytes if there is no compression. */
public abstract long getNumCompressedBytes() throws IOException;
}
|
AbstractChannelWriterOutputView
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/SqlDataTypesTests.java
|
{
"start": 2832,
"end": 10222
}
|
class ____ extends ESTestCase {
public void testMetadataType() {
assertEquals(Integer.valueOf(91), metaSqlDataType(DATE));
assertEquals(Integer.valueOf(92), metaSqlDataType(TIME));
assertEquals(Integer.valueOf(9), metaSqlDataType(DATETIME));
DataType t = randomDataTypeNoDateTime();
assertEquals(sqlType(t).getVendorTypeNumber(), metaSqlDataType(t));
}
public void testMetaDateTypeSub() {
assertEquals(Integer.valueOf(1), metaSqlDateTimeSub(DATE));
assertEquals(Integer.valueOf(2), metaSqlDateTimeSub(TIME));
assertEquals(Integer.valueOf(3), metaSqlDateTimeSub(DATETIME));
assertEquals(Integer.valueOf(0), metaSqlDateTimeSub(randomDataTypeNoDateTime()));
}
public void testMetaMinimumScale() {
assertNull(metaSqlMinimumScale(DATE));
assertEquals(Short.valueOf((short) 9), metaSqlMinimumScale(TIME));
assertEquals(Short.valueOf((short) 9), metaSqlMinimumScale(DATETIME));
assertEquals(Short.valueOf((short) 0), metaSqlMinimumScale(LONG));
assertEquals(Short.valueOf((short) defaultPrecision(FLOAT)), metaSqlMaximumScale(FLOAT));
assertNull(metaSqlMinimumScale(KEYWORD));
}
public void testMetaMaximumScale() {
assertNull(metaSqlMinimumScale(DATE));
assertEquals(Short.valueOf((short) 9), metaSqlMinimumScale(TIME));
assertEquals(Short.valueOf((short) 9), metaSqlMaximumScale(DATETIME));
assertEquals(Short.valueOf((short) 0), metaSqlMaximumScale(LONG));
assertEquals(Short.valueOf((short) defaultPrecision(FLOAT)), metaSqlMaximumScale(FLOAT));
assertNull(metaSqlMaximumScale(KEYWORD));
}
public void testMetaRadix() {
assertNull(metaSqlRadix(DATETIME));
assertNull(metaSqlRadix(KEYWORD));
assertEquals(Integer.valueOf(10), metaSqlRadix(LONG));
assertEquals(Integer.valueOf(2), metaSqlRadix(FLOAT));
}
// type checks
public void testIsInterval() {
for (DataType dataType : asList(
INTERVAL_YEAR,
INTERVAL_MONTH,
INTERVAL_DAY,
INTERVAL_HOUR,
INTERVAL_MINUTE,
INTERVAL_SECOND,
INTERVAL_YEAR_TO_MONTH,
INTERVAL_DAY_TO_HOUR,
INTERVAL_DAY_TO_MINUTE,
INTERVAL_DAY_TO_SECOND,
INTERVAL_HOUR_TO_MINUTE,
INTERVAL_HOUR_TO_SECOND,
INTERVAL_MINUTE_TO_SECOND
)) {
assertTrue(dataType + " is not an interval", isInterval(dataType));
}
}
public void testIntervalCompatibilityYearMonth() {
assertEquals(INTERVAL_YEAR_TO_MONTH, compatibleInterval(INTERVAL_YEAR, INTERVAL_MONTH));
assertEquals(INTERVAL_YEAR_TO_MONTH, compatibleInterval(INTERVAL_YEAR, INTERVAL_YEAR_TO_MONTH));
assertEquals(INTERVAL_YEAR_TO_MONTH, compatibleInterval(INTERVAL_MONTH, INTERVAL_YEAR));
assertEquals(INTERVAL_YEAR_TO_MONTH, compatibleInterval(INTERVAL_MONTH, INTERVAL_YEAR_TO_MONTH));
}
public void testIntervalCompatibilityDayTime() {
assertEquals(INTERVAL_DAY_TO_HOUR, compatibleInterval(INTERVAL_DAY, INTERVAL_HOUR));
assertEquals(INTERVAL_DAY_TO_HOUR, compatibleInterval(INTERVAL_DAY_TO_HOUR, INTERVAL_HOUR));
assertEquals(INTERVAL_DAY_TO_MINUTE, compatibleInterval(INTERVAL_DAY, INTERVAL_MINUTE));
assertEquals(INTERVAL_DAY_TO_MINUTE, compatibleInterval(INTERVAL_DAY_TO_HOUR, INTERVAL_HOUR_TO_MINUTE));
assertEquals(INTERVAL_DAY_TO_MINUTE, compatibleInterval(INTERVAL_MINUTE, INTERVAL_DAY_TO_HOUR));
assertEquals(INTERVAL_DAY_TO_MINUTE, compatibleInterval(INTERVAL_DAY, INTERVAL_DAY_TO_MINUTE));
assertEquals(INTERVAL_DAY_TO_SECOND, compatibleInterval(INTERVAL_DAY, INTERVAL_SECOND));
assertEquals(INTERVAL_DAY_TO_SECOND, compatibleInterval(INTERVAL_MINUTE, INTERVAL_DAY_TO_SECOND));
assertEquals(INTERVAL_HOUR_TO_MINUTE, compatibleInterval(INTERVAL_MINUTE, INTERVAL_HOUR));
assertEquals(INTERVAL_HOUR_TO_SECOND, compatibleInterval(INTERVAL_SECOND, INTERVAL_HOUR));
assertEquals(INTERVAL_HOUR_TO_SECOND, compatibleInterval(INTERVAL_SECOND, INTERVAL_HOUR_TO_MINUTE));
assertEquals(INTERVAL_HOUR_TO_SECOND, compatibleInterval(INTERVAL_SECOND, INTERVAL_HOUR_TO_MINUTE));
assertEquals(INTERVAL_MINUTE_TO_SECOND, compatibleInterval(INTERVAL_SECOND, INTERVAL_MINUTE));
}
public void testIncompatibleInterval() {
assertNull(compatibleInterval(INTERVAL_YEAR, INTERVAL_SECOND));
assertNull(compatibleInterval(INTERVAL_YEAR, INTERVAL_DAY_TO_HOUR));
assertNull(compatibleInterval(INTERVAL_HOUR, INTERVAL_MONTH));
assertNull(compatibleInterval(INTERVAL_MINUTE_TO_SECOND, INTERVAL_MONTH));
}
public void testIntervalCompabitilityWithDateTimes() {
for (DataType intervalType : asList(
INTERVAL_YEAR,
INTERVAL_MONTH,
INTERVAL_DAY,
INTERVAL_HOUR,
INTERVAL_MINUTE,
INTERVAL_SECOND,
INTERVAL_YEAR_TO_MONTH,
INTERVAL_DAY_TO_HOUR,
INTERVAL_DAY_TO_MINUTE,
INTERVAL_DAY_TO_SECOND,
INTERVAL_HOUR_TO_MINUTE,
INTERVAL_HOUR_TO_SECOND,
INTERVAL_MINUTE_TO_SECOND
)) {
for (DataType dateTimeType : asList(DATE, DATETIME)) {
assertTrue(areCompatible(intervalType, dateTimeType));
assertTrue(areCompatible(dateTimeType, intervalType));
}
}
}
public void testEsToDataType() {
List<String> types = new ArrayList<>(
Arrays.asList(
"null",
"boolean",
"bool",
"byte",
"tinyint",
"short",
"smallint",
"integer",
"long",
"bigint",
"double",
"real",
"half_float",
"scaled_float",
"float",
"decimal",
"numeric",
"keyword",
"text",
"varchar",
"date",
"datetime",
"timestamp",
"binary",
"varbinary",
"ip",
"interval_year",
"interval_month",
"interval_year_to_month",
"interval_day",
"interval_hour",
"interval_minute",
"interval_second",
"interval_day_to_hour",
"interval_day_to_minute",
"interval_day_to_second",
"interval_hour_to_minute",
"interval_hour_to_second",
"interval_minute_to_second"
)
);
types.addAll(SqlDataTypes.types().stream().filter(DataTypes::isPrimitive).map(DataType::typeName).collect(toList()));
String type = randomFrom(types.toArray(new String[0]));
DataType dataType = SqlDataTypes.fromSqlOrEsType(type);
assertNotNull("cound not find " + type, dataType);
}
private DataType randomDataTypeNoDateTime() {
return randomValueOtherThanMany(SqlDataTypes::isDateOrTimeBased, () -> randomFrom(SqlDataTypes.types()));
}
}
|
SqlDataTypesTests
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStore.java
|
{
"start": 3255,
"end": 3419
}
|
class ____ of type <Bytes,byte[]>, hence we use {@link Serde}s
* to convert from <K,V> to <Bytes,byte[]>
*
* @param <K>
* @param <V>
*/
public
|
is
|
java
|
elastic__elasticsearch
|
x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsSystemIndicesIntegTests.java
|
{
"start": 1452,
"end": 4258
}
|
class ____ extends BaseFrozenSearchableSnapshotsIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopy(super.nodePlugins(), TestSystemIndexPlugin.class);
}
public void testCannotMountSystemIndex() throws Exception {
executeTest(
TestSystemIndexPlugin.INDEX_NAME,
SearchableSnapshotsSystemIndicesIntegTests.class.getSimpleName(),
new OriginSettingClient(client(), ClientHelper.SEARCHABLE_SNAPSHOTS_ORIGIN)
);
}
public void testCannotMountSnapshotBlobCacheIndex() throws Exception {
executeTest(SearchableSnapshots.SNAPSHOT_BLOB_CACHE_INDEX, "searchable_snapshots", client());
}
private void executeTest(final String indexName, final String featureName, final Client client) throws Exception {
createAndPopulateIndex(indexName, Settings.builder());
final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createRepository(repositoryName, "fs");
final String snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
final int numPrimaries = getNumShards(indexName).numPrimaries;
final SnapshotInfo snapshotInfo = createSnapshot(
repositoryName,
snapshotName,
Collections.singletonList("-*"),
Collections.singletonList(featureName)
);
// NOTE: The below assertion assumes that the only index in the feature is the named one. If that's not the case, this will fail.
assertThat(snapshotInfo.successfulShards(), equalTo(numPrimaries));
assertThat(snapshotInfo.failedShards(), equalTo(0));
if (randomBoolean()) {
assertAcked(client.admin().indices().prepareClose(indexName));
} else {
assertAcked(client.admin().indices().prepareDelete(indexName));
}
final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest(
TEST_REQUEST_TIMEOUT,
indexName,
repositoryName,
snapshotName,
indexName,
Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean()).build(),
Strings.EMPTY_ARRAY,
true,
randomFrom(MountSearchableSnapshotRequest.Storage.values())
);
final ElasticsearchException exception = expectThrows(
ElasticsearchException.class,
() -> client.execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet()
);
assertThat(exception.getMessage(), containsString("system index [" + indexName + "] cannot be mounted as searchable snapshots"));
}
public static
|
SearchableSnapshotsSystemIndicesIntegTests
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/introspect/TypeResolutionContext.java
|
{
"start": 349,
"end": 443
}
|
interface ____ {
public JavaType resolveType(Type t);
public static
|
TypeResolutionContext
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/basic/RepeatedMappingUserTypeTests.java
|
{
"start": 2764,
"end": 4821
}
|
class ____ implements UserType<SortedSet<Integer>>,
AttributeConverter<SortedSet<Integer>, String> {
@Override
public int getSqlType() {
return SqlTypes.VARCHAR;
}
@Override
public Class<SortedSet<Integer>> returnedClass() {
//noinspection unchecked
return (Class) Set.class;
}
@Override
public boolean equals(SortedSet<Integer> x, SortedSet<Integer> y) {
return Objects.equals( x, y );
}
@Override
public int hashCode(SortedSet<Integer> x) {
return Objects.hashCode( x );
}
@Override
public SortedSet<Integer> nullSafeGet(
ResultSet rs,
int position,
WrapperOptions options) throws SQLException {
return convertToEntityAttribute( rs.getString( position ) );
}
@Override
public void nullSafeSet(
PreparedStatement st,
SortedSet<Integer> values,
int index,
WrapperOptions options) throws SQLException {
if ( values == null || values.isEmpty() ) {
st.setNull( index, SqlTypes.VARCHAR );
return;
}
String databaseValue = convertToDatabaseColumn( values );
st.setString( index, databaseValue );
}
@Override
public SortedSet<Integer> deepCopy(SortedSet<Integer> value) {
return new TreeSet<>( value );
}
@Override
public boolean isMutable() {
return true;
}
@Override
public Serializable disassemble(SortedSet<Integer> value) {
return (Serializable) value;
}
@Override
public SortedSet<Integer> assemble(Serializable cached, Object owner) {
return (SortedSet<Integer>) cached;
}
@Override
public String convertToDatabaseColumn(SortedSet<Integer> values) {
return values.stream().map( Object::toString ).collect( Collectors.joining( "|", "|", "|" ) );
}
@Override
public SortedSet<Integer> convertToEntityAttribute(String databaseValue) {
return Arrays.stream( databaseValue.split( "\\|" ) )
.map( value -> value.trim() )
.filter( value -> !value.isEmpty() )
.map( value -> Integer.valueOf( value ) )
.collect( Collectors.toCollection( TreeSet::new ) );
}
}
}
|
CodeJavaType
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/internal/TypeComparators.java
|
{
"start": 1212,
"end": 2299
}
|
class ____ extends TypeHolder<Comparator<?>> {
private static final double DOUBLE_COMPARATOR_PRECISION = 1e-15;
private static final DoubleComparator DEFAULT_DOUBLE_COMPARATOR = new DoubleComparator(DOUBLE_COMPARATOR_PRECISION);
private static final float FLOAT_COMPARATOR_PRECISION = 1e-6f;
private static final FloatComparator DEFAULT_FLOAT_COMPARATOR = new FloatComparator(FLOAT_COMPARATOR_PRECISION);
private static final Comparator<Path> DEFAULT_PATH_COMPARATOR = PathNaturalOrderComparator.INSTANCE;
public static TypeComparators defaultTypeComparators() {
TypeComparators comparatorByType = new TypeComparators();
comparatorByType.registerComparator(Double.class, DEFAULT_DOUBLE_COMPARATOR);
comparatorByType.registerComparator(Float.class, DEFAULT_FLOAT_COMPARATOR);
comparatorByType.registerComparator(Path.class, DEFAULT_PATH_COMPARATOR);
return comparatorByType;
}
/**
* This method returns the most relevant comparator for the given class. The most relevant comparator is the
* comparator which is registered for the
|
TypeComparators
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableFromObservable.java
|
{
"start": 797,
"end": 1165
}
|
class ____<T> extends Flowable<T> {
private final ObservableSource<T> upstream;
public FlowableFromObservable(ObservableSource<T> upstream) {
this.upstream = upstream;
}
@Override
protected void subscribeActual(Subscriber<? super T> s) {
upstream.subscribe(new SubscriberObserver<>(s));
}
static final
|
FlowableFromObservable
|
java
|
grpc__grpc-java
|
core/src/main/java/io/grpc/internal/ProxyDetectorImpl.java
|
{
"start": 1543,
"end": 8970
}
|
class ____ implements ProxyDetector {
// To validate this code: set up a local squid proxy instance, and
// try to communicate with grpc-test.sandbox.googleapis.com:443.
// The endpoint runs an instance of TestServiceGrpc, see
// AbstractInteropTest for an example how to run a
// TestService.EmptyCall RPC.
//
// The instructions below assume Squid 3.5.23 and a recent
// version of Debian.
//
// Set the contents of /etc/squid/squid.conf to be:
// WARNING: THESE CONFIGS HAVE NOT BEEN REVIEWED FOR SECURITY, DO
// NOT USE OUTSIDE OF TESTING. COMMENT OUT THIS WARNING TO
// UNBREAK THE CONFIG FILE.
// acl SSL_ports port 443
// acl Safe_ports port 80
// acl Safe_ports port 21
// acl Safe_ports port 443
// acl Safe_ports port 70
// acl Safe_ports port 210
// acl Safe_ports port 1025-65535
// acl Safe_ports port 280
// acl Safe_ports port 488
// acl Safe_ports port 591
// acl Safe_ports port 777
// acl CONNECT method CONNECT
// http_access deny !Safe_ports
// http_access deny CONNECT !SSL_ports
// http_access allow localhost manager
// http_access deny manager
// http_access allow localhost
// http_access deny all
// http_port 3128
// coredump_dir /var/spool/squid
// refresh_pattern ^ftp: 1440 20% 10080
// refresh_pattern ^gopher: 1440 0% 1440
// refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
// refresh_pattern . 0 20% 4320
//
// Restart squid:
// $ sudo /etc/init.d/squid restart
//
// To test with passwords:
//
// Run this command and follow the instructions to set up a user/pass:
// $ sudo htpasswd -c /etc/squid/passwd myuser1
//
// Make the file readable to squid:
// $ sudo chmod 644 /etc/squid/passwd
//
// Validate the username and password, you should see OK printed:
// $ /usr/lib/squid3/basic_ncsa_auth /etc/squid/passwd
// myuser1 <your password here>
//
// Add these additional lines to the beginning of squid.conf (the ordering matters):
// auth_param basic program /usr/lib/squid3/basic_ncsa_auth /etc/squid/passwd
// auth_param basic children 5
// auth_param basic realm Squid proxy-caching web server
// auth_param basic credentialsttl 2 hours
// acl ncsa_users proxy_auth REQUIRED
// http_access allow ncsa_users
//
// Restart squid:
// $ sudo /etc/init.d/squid restart
//
// In both cases, start the JVM with -Dhttps.proxyHost=127.0.0.1 -Dhttps.proxyPort=3128 to
// configure the proxy. For passwords, use java.net.Authenticator.setDefault().
//
// Testing with curl, no password:
// $ curl -U myuser1:pass1 -x http://localhost:3128 -L grpc.io
// Testing with curl, with password:
// $ curl -U myuser1:pass1 -x http://localhost:3128 -L grpc.io
//
// It may be helpful to monitor the squid access logs:
// $ sudo tail -f /var/log/squid/access.log
private static final Logger log = Logger.getLogger(ProxyDetectorImpl.class.getName());
private static final AuthenticationProvider DEFAULT_AUTHENTICATOR = new AuthenticationProvider() {
@Override
public PasswordAuthentication requestPasswordAuthentication(
String host, InetAddress addr, int port, String protocol, String prompt, String scheme) {
URL url = null;
try {
url = new URL(protocol, host, port, "");
} catch (MalformedURLException e) {
// let url be null
log.log(
Level.WARNING,
"failed to create URL for Authenticator: {0} {1}", new Object[] {protocol, host});
}
return Authenticator.requestPasswordAuthentication(
host, addr, port, protocol, prompt, scheme, url, Authenticator.RequestorType.PROXY);
}
};
private static final Supplier<ProxySelector> DEFAULT_PROXY_SELECTOR =
new Supplier<ProxySelector>() {
@Override
public ProxySelector get() {
return ProxySelector.getDefault();
}
};
// Do not hard code a ProxySelector because the global default ProxySelector can change
private final Supplier<ProxySelector> proxySelector;
private final AuthenticationProvider authenticationProvider;
// We want an HTTPS proxy, which operates on the entire data stream (See IETF rfc2817).
static final String PROXY_SCHEME = "https";
/**
* A proxy selector that uses the global {@link ProxySelector#getDefault()} and
* {@link ProxyDetectorImpl.AuthenticationProvider} to detect proxy parameters.
*/
public ProxyDetectorImpl() {
this(DEFAULT_PROXY_SELECTOR, DEFAULT_AUTHENTICATOR);
}
@VisibleForTesting
ProxyDetectorImpl(
Supplier<ProxySelector> proxySelector,
AuthenticationProvider authenticationProvider) {
this.proxySelector = checkNotNull(proxySelector);
this.authenticationProvider = checkNotNull(authenticationProvider);
}
@Nullable
@Override
public ProxiedSocketAddress proxyFor(SocketAddress targetServerAddress) throws IOException {
if (!(targetServerAddress instanceof InetSocketAddress)) {
return null;
}
return detectProxy((InetSocketAddress) targetServerAddress);
}
private ProxiedSocketAddress detectProxy(InetSocketAddress targetAddr) throws IOException {
URI uri;
String host = targetAddr.getHostString();
try {
uri =
new URI(
PROXY_SCHEME,
null, /* userInfo */
host,
targetAddr.getPort(),
null, /* path */
null, /* query */
null /* fragment */);
} catch (final URISyntaxException e) {
log.log(
Level.WARNING,
"Failed to construct URI for proxy lookup, proceeding without proxy",
e);
return null;
}
ProxySelector proxySelector = this.proxySelector.get();
if (proxySelector == null) {
log.log(Level.FINE, "proxy selector is null, so continuing without proxy lookup");
return null;
}
List<Proxy> proxies = proxySelector.select(uri);
if (proxies.size() > 1) {
log.warning("More than 1 proxy detected, gRPC will select the first one");
}
Proxy proxy = proxies.get(0);
if (proxy.type() == Proxy.Type.DIRECT) {
return null;
}
InetSocketAddress proxyAddr = (InetSocketAddress) proxy.address();
// The prompt string should be the realm as returned by the server.
// We don't have it because we are avoiding the full handshake.
String promptString = "";
PasswordAuthentication auth =
authenticationProvider.requestPasswordAuthentication(
proxyAddr.getHostString(),
proxyAddr.getAddress(),
proxyAddr.getPort(),
PROXY_SCHEME,
promptString,
null);
final InetSocketAddress resolvedProxyAddr;
if (proxyAddr.isUnresolved()) {
InetAddress resolvedAddress = InetAddress.getByName(proxyAddr.getHostName());
resolvedProxyAddr = new InetSocketAddress(resolvedAddress, proxyAddr.getPort());
} else {
resolvedProxyAddr = proxyAddr;
}
HttpConnectProxiedSocketAddress.Builder builder =
HttpConnectProxiedSocketAddress.newBuilder()
.setTargetAddress(targetAddr)
.setProxyAddress(resolvedProxyAddr);
if (auth == null) {
return builder.build();
}
return builder
.setUsername(auth.getUserName())
.setPassword(auth.getPassword() == null ? null : new String(auth.getPassword()))
.build();
}
/**
* This
|
ProxyDetectorImpl
|
java
|
apache__kafka
|
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/StreamsTopologyTest.java
|
{
"start": 1721,
"end": 15878
}
|
class ____ {
private static final String SUBTOPOLOGY_ID_1 = "subtopology-1";
private static final String SUBTOPOLOGY_ID_2 = "subtopology-2";
private static final String SOURCE_TOPIC_1 = "source-topic-1";
private static final String SOURCE_TOPIC_2 = "source-topic-2";
private static final String SOURCE_TOPIC_3 = "source-topic-3";
private static final String REPARTITION_TOPIC_1 = "repartition-topic-1";
private static final String REPARTITION_TOPIC_2 = "repartition-topic-2";
private static final String REPARTITION_TOPIC_3 = "repartition-topic-3";
private static final String CHANGELOG_TOPIC_1 = "changelog-1";
private static final String CHANGELOG_TOPIC_2 = "changelog-2";
private static final String CHANGELOG_TOPIC_3 = "changelog-3";
@Test
public void subtopologiesMapShouldNotBeNull() {
final Exception exception = assertThrows(NullPointerException.class, () -> new StreamsTopology(1, null));
assertEquals("Subtopologies cannot be null.", exception.getMessage());
}
@Test
public void topologyEpochShouldNotBeNegative() {
Map<String, Subtopology> subtopologies = mkMap(
mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1())
);
final Exception exception = assertThrows(IllegalArgumentException.class, () -> new StreamsTopology(-1, subtopologies));
assertEquals("Topology epoch must be non-negative.", exception.getMessage());
}
@Test
public void subtopologiesMapShouldBeImmutable() {
Map<String, Subtopology> subtopologies = mkMap(
mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1())
);
assertThrows(
UnsupportedOperationException.class,
() -> new StreamsTopology(1, subtopologies).subtopologies().put("subtopology-2", mkSubtopology2())
);
}
@Test
public void requiredTopicsShouldBeCorrect() {
Map<String, Subtopology> subtopologies = mkMap(
mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1()),
mkEntry(SUBTOPOLOGY_ID_2, mkSubtopology2())
);
StreamsTopology topology = new StreamsTopology(1, subtopologies);
Set<String> expectedTopics = Set.of(
SOURCE_TOPIC_1, SOURCE_TOPIC_2, SOURCE_TOPIC_3,
REPARTITION_TOPIC_1, REPARTITION_TOPIC_2, REPARTITION_TOPIC_3,
CHANGELOG_TOPIC_1, CHANGELOG_TOPIC_2, CHANGELOG_TOPIC_3
);
assertEquals(expectedTopics, topology.requiredTopics());
}
@Test
public void fromRecordShouldCreateCorrectTopology() {
StreamsGroupTopologyValue record = new StreamsGroupTopologyValue()
.setEpoch(1)
.setSubtopologies(Arrays.asList(mkSubtopology1(), mkSubtopology2()));
StreamsTopology topology = StreamsTopology.fromRecord(record);
assertEquals(1, topology.topologyEpoch());
assertEquals(2, topology.subtopologies().size());
assertTrue(topology.subtopologies().containsKey(SUBTOPOLOGY_ID_1));
assertEquals(mkSubtopology1(), topology.subtopologies().get(SUBTOPOLOGY_ID_1));
assertTrue(topology.subtopologies().containsKey(SUBTOPOLOGY_ID_2));
assertEquals(mkSubtopology2(), topology.subtopologies().get(SUBTOPOLOGY_ID_2));
}
@Test
public void fromHeartbeatRequestShouldCreateCorrectTopology() {
StreamsGroupHeartbeatRequestData.Topology requestTopology = new StreamsGroupHeartbeatRequestData.Topology()
.setEpoch(1)
.setSubtopologies(List.of(mkRequestSubtopology1(), mkRequestSubtopology2()));
StreamsTopology topology = StreamsTopology.fromHeartbeatRequest(requestTopology);
assertEquals(1, topology.topologyEpoch());
assertEquals(2, topology.subtopologies().size());
assertTrue(topology.subtopologies().containsKey(SUBTOPOLOGY_ID_1));
assertEquals(mkSubtopology1(), topology.subtopologies().get(SUBTOPOLOGY_ID_1));
assertTrue(topology.subtopologies().containsKey(SUBTOPOLOGY_ID_2));
assertEquals(mkSubtopology2(), topology.subtopologies().get(SUBTOPOLOGY_ID_2));
}
@Test
public void asStreamsGroupDescribeTopologyShouldReturnCorrectStructure() {
Map<String, Subtopology> subtopologies = mkMap(
mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1()),
mkEntry(SUBTOPOLOGY_ID_2, mkSubtopology2())
);
StreamsTopology topology = new StreamsTopology(1, subtopologies);
StreamsGroupDescribeResponseData.Topology describeTopology = topology.asStreamsGroupDescribeTopology();
assertEquals(1, describeTopology.epoch());
assertEquals(2, describeTopology.subtopologies().size());
// Verify subtopologies are correctly converted and sorted
List<StreamsGroupDescribeResponseData.Subtopology> sortedSubtopologies =
describeTopology.subtopologies().stream()
.sorted(Comparator.comparing(StreamsGroupDescribeResponseData.Subtopology::subtopologyId))
.toList();
// Verify first subtopology
StreamsGroupDescribeResponseData.Subtopology sub1 = sortedSubtopologies.get(0);
assertEquals(SUBTOPOLOGY_ID_1, sub1.subtopologyId());
// Source topics are sorted alphabetically
assertEquals(List.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_2, SOURCE_TOPIC_1, SOURCE_TOPIC_2),
sub1.sourceTopics());
assertEquals(List.of(REPARTITION_TOPIC_3), sub1.repartitionSinkTopics());
assertEquals(2, sub1.repartitionSourceTopics().size());
assertEquals(2, sub1.stateChangelogTopics().size());
// Verify second subtopology
StreamsGroupDescribeResponseData.Subtopology sub2 = sortedSubtopologies.get(1);
assertEquals(SUBTOPOLOGY_ID_2, sub2.subtopologyId());
// Source topics are sorted alphabetically
assertEquals(List.of(REPARTITION_TOPIC_3, SOURCE_TOPIC_3), sub2.sourceTopics());
assertEquals(List.of(), sub2.repartitionSinkTopics());
assertEquals(1, sub2.repartitionSourceTopics().size());
assertEquals(1, sub2.stateChangelogTopics().size());
}
@Test
public void asStreamsGroupDescribeTopicInfoShouldConvertCorrectly() {
Map<String, Subtopology> subtopologies = mkMap(
mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1())
);
StreamsTopology topology = new StreamsTopology(1, subtopologies);
StreamsGroupDescribeResponseData.Topology describeTopology = topology.asStreamsGroupDescribeTopology();
StreamsGroupDescribeResponseData.Subtopology describedSub = describeTopology.subtopologies().get(0);
// Verify repartition source topics are correctly converted
List<StreamsGroupDescribeResponseData.TopicInfo> repartitionTopics = describedSub.repartitionSourceTopics();
assertEquals(2, repartitionTopics.size());
// Find the first repartition topic (they should be sorted by name)
StreamsGroupDescribeResponseData.TopicInfo firstTopic = repartitionTopics.stream()
.filter(topic -> topic.name().equals(REPARTITION_TOPIC_1))
.findFirst()
.orElseThrow();
assertEquals(REPARTITION_TOPIC_1, firstTopic.name());
// Verify changelog topics are correctly converted
List<StreamsGroupDescribeResponseData.TopicInfo> changelogTopics = describedSub.stateChangelogTopics();
assertEquals(2, changelogTopics.size());
// Find the first changelog topic (they should be sorted by name)
StreamsGroupDescribeResponseData.TopicInfo firstChangelog = changelogTopics.stream()
.filter(topic -> topic.name().equals(CHANGELOG_TOPIC_1))
.findFirst()
.orElseThrow();
assertEquals(CHANGELOG_TOPIC_1, firstChangelog.name());
}
@Test
public void asStreamsGroupDescribeTopologyWithEmptySubtopologies() {
StreamsTopology topology = new StreamsTopology(0, Map.of());
StreamsGroupDescribeResponseData.Topology describeTopology = topology.asStreamsGroupDescribeTopology();
assertEquals(0, describeTopology.epoch());
assertEquals(0, describeTopology.subtopologies().size());
}
@Test
public void sourceTopicMapShouldBeComputedCorrectly() {
Map<String, Subtopology> subtopologies = mkMap(
mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1()),
mkEntry(SUBTOPOLOGY_ID_2, mkSubtopology2())
);
StreamsTopology topology = new StreamsTopology(1, subtopologies);
// Verify sourceTopicMap contains all source topics from both subtopologies
Map<String, Subtopology> sourceTopicMap = topology.sourceTopicMap();
// From subtopology 1: SOURCE_TOPIC_1, SOURCE_TOPIC_2, REPARTITION_TOPIC_1, REPARTITION_TOPIC_2
// From subtopology 2: SOURCE_TOPIC_3, REPARTITION_TOPIC_3
assertEquals(6, sourceTopicMap.size());
// Verify regular source topics
assertTrue(sourceTopicMap.containsKey(SOURCE_TOPIC_1));
assertEquals(mkSubtopology1(), sourceTopicMap.get(SOURCE_TOPIC_1));
assertTrue(sourceTopicMap.containsKey(SOURCE_TOPIC_2));
assertEquals(mkSubtopology1(), sourceTopicMap.get(SOURCE_TOPIC_2));
assertTrue(sourceTopicMap.containsKey(SOURCE_TOPIC_3));
assertEquals(mkSubtopology2(), sourceTopicMap.get(SOURCE_TOPIC_3));
// Verify repartition source topics
assertTrue(sourceTopicMap.containsKey(REPARTITION_TOPIC_1));
assertEquals(mkSubtopology1(), sourceTopicMap.get(REPARTITION_TOPIC_1));
assertTrue(sourceTopicMap.containsKey(REPARTITION_TOPIC_2));
assertEquals(mkSubtopology1(), sourceTopicMap.get(REPARTITION_TOPIC_2));
assertTrue(sourceTopicMap.containsKey(REPARTITION_TOPIC_3));
assertEquals(mkSubtopology2(), sourceTopicMap.get(REPARTITION_TOPIC_3));
}
@Test
public void sourceTopicMapShouldBeImmutable() {
Map<String, Subtopology> subtopologies = mkMap(
mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1())
);
StreamsTopology topology = new StreamsTopology(1, subtopologies);
assertThrows(
UnsupportedOperationException.class,
() -> topology.sourceTopicMap().put("test-topic", mkSubtopology1())
);
}
private Subtopology mkSubtopology1() {
return new Subtopology()
.setSubtopologyId(SUBTOPOLOGY_ID_1)
.setSourceTopics(List.of(
SOURCE_TOPIC_1,
SOURCE_TOPIC_2,
REPARTITION_TOPIC_1,
REPARTITION_TOPIC_2
))
.setRepartitionSourceTopics(List.of(
new TopicInfo().setName(REPARTITION_TOPIC_1),
new TopicInfo().setName(REPARTITION_TOPIC_2)
))
.setRepartitionSinkTopics(List.of(
REPARTITION_TOPIC_3
))
.setStateChangelogTopics(List.of(
new TopicInfo().setName(CHANGELOG_TOPIC_1),
new TopicInfo().setName(CHANGELOG_TOPIC_2)
))
.setCopartitionGroups(List.of(
new StreamsGroupTopologyValue.CopartitionGroup()
.setRepartitionSourceTopics(List.of((short) 0))
.setSourceTopics(List.of((short) 0)),
new StreamsGroupTopologyValue.CopartitionGroup()
.setRepartitionSourceTopics(List.of((short) 1))
.setSourceTopics(List.of((short) 1))
));
}
private Subtopology mkSubtopology2() {
return new Subtopology()
.setSubtopologyId(SUBTOPOLOGY_ID_2)
.setSourceTopics(List.of(
SOURCE_TOPIC_3,
REPARTITION_TOPIC_3
))
.setRepartitionSourceTopics(List.of(
new TopicInfo().setName(REPARTITION_TOPIC_3)
))
.setStateChangelogTopics(List.of(
new TopicInfo().setName(CHANGELOG_TOPIC_3)
));
}
private StreamsGroupHeartbeatRequestData.Subtopology mkRequestSubtopology1() {
return new StreamsGroupHeartbeatRequestData.Subtopology()
.setSubtopologyId(SUBTOPOLOGY_ID_1)
.setSourceTopics(List.of(
SOURCE_TOPIC_1,
SOURCE_TOPIC_2,
REPARTITION_TOPIC_1,
REPARTITION_TOPIC_2
))
.setRepartitionSourceTopics(List.of(
new StreamsGroupHeartbeatRequestData.TopicInfo().setName(REPARTITION_TOPIC_1),
new StreamsGroupHeartbeatRequestData.TopicInfo().setName(REPARTITION_TOPIC_2)
))
.setRepartitionSinkTopics(List.of(
REPARTITION_TOPIC_3
))
.setStateChangelogTopics(List.of(
new StreamsGroupHeartbeatRequestData.TopicInfo().setName(CHANGELOG_TOPIC_1),
new StreamsGroupHeartbeatRequestData.TopicInfo().setName(CHANGELOG_TOPIC_2)
))
.setCopartitionGroups(List.of(
new StreamsGroupHeartbeatRequestData.CopartitionGroup()
.setRepartitionSourceTopics(List.of((short) 0))
.setSourceTopics(List.of((short) 0)),
new StreamsGroupHeartbeatRequestData.CopartitionGroup()
.setRepartitionSourceTopics(List.of((short) 1))
.setSourceTopics(List.of((short) 1))
));
}
private StreamsGroupHeartbeatRequestData.Subtopology mkRequestSubtopology2() {
return new StreamsGroupHeartbeatRequestData.Subtopology()
.setSubtopologyId(SUBTOPOLOGY_ID_2)
.setSourceTopics(List.of(
SOURCE_TOPIC_3,
REPARTITION_TOPIC_3
))
.setRepartitionSourceTopics(List.of(
new StreamsGroupHeartbeatRequestData.TopicInfo().setName(REPARTITION_TOPIC_3)
))
.setStateChangelogTopics(List.of(
new StreamsGroupHeartbeatRequestData.TopicInfo().setName(CHANGELOG_TOPIC_3)
));
}
}
|
StreamsTopologyTest
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/collection_in_constructor/Clerk.java
|
{
"start": 748,
"end": 1616
}
|
class ____ {
private Integer id;
private String name;
public Clerk() {
super();
}
public Clerk(Integer id, String name) {
super();
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public int hashCode() {
return Objects.hash(id, name);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof Clerk)) {
return false;
}
Clerk other = (Clerk) obj;
return Objects.equals(id, other.id) && Objects.equals(name, other.name);
}
@Override
public String toString() {
return "Clerk [id=" + id + ", name=" + name + "]";
}
}
|
Clerk
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java
|
{
"start": 3969,
"end": 4626
}
|
class ____ {
public static final FieldType FIELD_TYPE;
static {
final FieldType ft = new FieldType();
ft.setTokenized(true);
ft.setStored(false);
ft.setStoreTermVectors(false);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
ft.setOmitNorms(true);
FIELD_TYPE = freezeAndDeduplicateFieldType(ft);
}
public static final boolean DEFAULT_PRESERVE_SEPARATORS = true;
public static final boolean DEFAULT_POSITION_INCREMENTS = true;
public static final int DEFAULT_MAX_INPUT_LENGTH = 50;
}
public static
|
Defaults
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/bug/Bug_for_xuershan.java
|
{
"start": 967,
"end": 1842
}
|
class ____ extends TestCase {
private DruidDataSource dataSource;
private MockDriver driver;
protected void setUp() throws Exception {
driver = new MockDriver() {
public ResultSet createResultSet(MockPreparedStatement stmt) {
return null;
}
};
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xx");
dataSource.setDriver(driver);
}
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
public void test_bug_for_xuershan() throws Exception {
String sql = "select 1";
Connection conn = dataSource.getConnection();
PreparedStatement stmt = conn.prepareStatement(sql);
stmt.execute();
assertNull(stmt.getResultSet());
stmt.close();
conn.close();
}
}
|
Bug_for_xuershan
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/hql/Human.java
|
{
"start": 395,
"end": 2371
}
|
class ____ extends Mammal {
private Name name;
private String nickName;
private Collection friends;
private Collection pets;
private Map family;
private double heightInches;
private BigInteger bigIntegerValue;
private BigDecimal bigDecimalValue;
private int intValue;
private float floatValue;
private Set nickNames;
private Map addresses;
public Collection getFriends() {
return friends;
}
public void setFriends(Collection friends) {
this.friends = friends;
}
public Collection getPets() {
return pets;
}
public void setPets(Collection pets) {
this.pets = pets;
}
public Name getName() {
return name;
}
public void setName(Name name) {
this.name = name;
}
public String getNickName() {
return nickName;
}
public void setNickName(String nickName) {
this.nickName = nickName;
}
public double getHeightInches() {
return heightInches;
}
public void setHeightInches(double height) {
this.heightInches = height;
}
public Map getFamily() {
return family;
}
public void setFamily(Map family) {
this.family = family;
}
public Set getNickNames() {
return nickNames;
}
public void setNickNames(Set nickNames) {
this.nickNames = nickNames;
}
public Map getAddresses() {
return addresses;
}
public void setAddresses(Map addresses) {
this.addresses = addresses;
}
public BigDecimal getBigDecimalValue() {
return bigDecimalValue;
}
public void setBigDecimalValue(BigDecimal bigDecimalValue) {
this.bigDecimalValue = bigDecimalValue;
}
public BigInteger getBigIntegerValue() {
return bigIntegerValue;
}
public void setBigIntegerValue(BigInteger bigIntegerValue) {
this.bigIntegerValue = bigIntegerValue;
}
public float getFloatValue() {
return floatValue;
}
public void setFloatValue(float floatValue) {
this.floatValue = floatValue;
}
public int getIntValue() {
return intValue;
}
public void setIntValue(int intValue) {
this.intValue = intValue;
}
}
|
Human
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/restriction/RestrictionTest.java
|
{
"start": 1960,
"end": 13607
}
|
class ____ {
@Test
void test(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
scope.inTransaction( session -> {
session.persist(new Book("9781932394153", "Hibernate in Action", 400));
session.persist(new Book("9781617290459", "Java Persistence with Hibernate", 1000));
});
var bookType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Book.class);
@SuppressWarnings( "unchecked" )
var title = (SingularAttribute<? super Book, String>) bookType.findSingularAttribute("title");
@SuppressWarnings( "unchecked" )
var isbn = (SingularAttribute<? super Book, String>) bookType.findSingularAttribute("isbn");
@SuppressWarnings( "unchecked" )
var pages = (SingularAttribute<? super Book, Integer>) bookType.findSingularAttribute("pages");
Book book = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( equal( isbn, "9781932394153" ) )
.createQuery( session )
.getSingleResult() );
assertEquals( "Hibernate in Action", book.title );
List<Book> books = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( like( title, "%Hibernate%" ) )
.sort( desc( title ) )
.createQuery( session )
.getResultList() );
assertEquals( 2, books.size() );
assertEquals( "Java Persistence with Hibernate", books.get(0).title );
assertEquals( "Hibernate in Action", books.get(1).title );
List<Book> booksByIsbn = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( in( isbn, List.of("9781932394153", "9781617290459") ) )
.sort( asc( title ) )
.createQuery( session )
.getResultList() );
assertEquals( 2, booksByIsbn.size() );
assertEquals( "Hibernate in Action", booksByIsbn.get(0).title );
assertEquals( "Java Persistence with Hibernate", booksByIsbn.get(1).title );
List<Book> booksByPages = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( greaterThan( pages, 500 ) )
.createQuery( session )
.getResultList() );
assertEquals( 1, booksByPages.size() );
List<Book> booksByPageRange = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( between( pages, 150, 400 ) )
.createQuery( session )
.getResultList() );
assertEquals( 1, booksByPageRange.size() );
Book bookByTitle = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( equalIgnoringCase( title, "hibernate in action" ) )
.createQuery( session )
.getSingleResultOrNull() );
assertEquals( "9781932394153", bookByTitle.isbn );
Book bookByTitleUnsafe = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( restrict( Book.class, "title",
singleCaseInsensitiveValue("hibernate in action") ) )
.createQuery( session )
.getSingleResultOrNull() );
assertEquals( "9781932394153", bookByTitleUnsafe.isbn );
List<Book> allBooks = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( unrestricted() )
.createQuery( session )
.getResultList() );
assertEquals( 2, allBooks.size() );
List<Book> noBooks = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( unrestricted().negated() )
.createQuery( session )
.getResultList() );
assertEquals( 0, noBooks.size() );
List<Book> books1 = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( endsWith(title, "Hibernate") )
.createQuery( session )
.getResultList() );
assertEquals( 1, books1.size() );
List<Book> books2 = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( like(title, "*Hibernat?", false, '?', '*') )
.createQuery( session )
.getResultList() );
assertEquals( 1, books2.size() );
List<Book> books3 = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( contains(title, "Hibernate") )
.createQuery( session )
.getResultList() );
assertEquals( 2, books3.size() );
List<Book> booksByTitleAndIsbn = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( all( contains(title, "Hibernate"),
equal( isbn, "9781932394153" ) ) )
.createQuery( session )
.getResultList() );
assertEquals( 1, booksByTitleAndIsbn.size() );
List<Book> booksByTitleOrIsbn = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( any( contains(title, "Hibernate"),
equal( isbn, "9781932394153" ) ) )
.createQuery( session )
.getResultList() );
assertEquals( 2, booksByTitleOrIsbn.size() );
List<Book> booksByIsbn1 = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( in( isbn, "9781932394153", "9781617290459", "XYZ" ) )
.createQuery( session )
.getResultList() );
assertEquals( 2, booksByIsbn1.size() );
List<Book> booksByIsbn2 = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( in( isbn, List.of("9781617290459", "XYZ", "ABC") ) )
.createQuery( session )
.getResultList() );
assertEquals( 1, booksByIsbn2.size() );
}
@Test
void testPath(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
scope.inTransaction( session -> {
Publisher pub = new Publisher();
pub.name = "Manning";
session.persist( pub );
session.persist( new Book( "9781932394153", "Hibernate in Action", 400, pub ) );
session.persist( new Book( "9781617290459", "Java Persistence with Hibernate", 1000, pub ) );
} );
var bookType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Book.class);
var pubType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Publisher.class);
@SuppressWarnings( "unchecked" )
var title = (SingularAttribute<? super Book, String>) bookType.findSingularAttribute("title");
@SuppressWarnings( "unchecked" )
var isbn = (SingularAttribute<? super Book, String>) bookType.findSingularAttribute("isbn");
@SuppressWarnings( "unchecked" )
var pages = (SingularAttribute<? super Book, Integer>) bookType.findSingularAttribute("pages");
@SuppressWarnings( "unchecked" )
var publisher = (SingularAttribute<? super Book, Publisher>) bookType.findSingularAttribute("publisher");
@SuppressWarnings( "unchecked" )
var name = (SingularAttribute<? super Publisher, String>) pubType.findSingularAttribute("name");
@SuppressWarnings( "unchecked" )
var version = (SingularAttribute<? super Publisher, Integer>) pubType.findSingularAttribute("version");
scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( from(Book.class).equalTo( session.find(Book.class, "9781932394153") ) )
.createQuery( session )
.getSingleResult() );
List<Book> booksInIsbn = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( from(Book.class).to(isbn).in( List.of("9781932394153", "9781617290459") ) )
.sort( desc( isbn ) )
.createQuery( session )
.getResultList() );
assertEquals( 2, booksInIsbn.size() );
List<Book> booksWithPub = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( from(Book.class).to(publisher).to(name).equalTo("Manning") )
.sort( desc( title ) )
.createQuery( session )
.getResultList() );
assertEquals( 2, booksWithPub.size() );
List<Book> noBookWithPub = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( from(Book.class).to(publisher).to(name).notEqualTo("Manning") )
.sort( desc( title ) )
.createQuery( session )
.getResultList() );
assertEquals( 0, noBookWithPub.size() );
List<Book> books = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( from(Book.class).to(title).restrict( containing("hibernate", false) ) )
.sort( desc( title ) )
.createQuery( session )
.getResultList() );
assertEquals( 2, books.size() );
List<Book> booksWithPubVersion = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( from(Book.class).to(publisher).to(version).restrict( greaterThan(5) ) )
.createQuery( session )
.getResultList() );
assertEquals( 0, booksWithPubVersion.size() );
List<Book> unsafeTest = scope.fromSession( session ->
SelectionSpecification.create( Book.class, "from Book" )
.restrict( from(Book.class)
.to("publisher", Publisher.class)
.to("name", String.class).equalTo("Manning") )
.createQuery( session )
.getResultList() );
assertEquals( 2, unsafeTest.size() );
}
@Test
void testCriteria(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
scope.inTransaction( session -> {
session.persist(new Book("9781932394153", "Hibernate in Action", 400));
session.persist(new Book("9781617290459", "Java Persistence with Hibernate", 1000));
});
var bookType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Book.class);
@SuppressWarnings( "unchecked" )
var title = (SingularAttribute<? super Book, String>) bookType.findSingularAttribute("title");
@SuppressWarnings( "unchecked" )
var isbn = (SingularAttribute<? super Book, String>) bookType.findSingularAttribute("isbn");
@SuppressWarnings( "unchecked" )
var pages = (SingularAttribute<? super Book, Integer>) bookType.findSingularAttribute("pages");
scope.inSession( session -> {
var query = session.getCriteriaBuilder().createQuery(String.class);
var root = query.from( Book.class );
like( title, "%Hibernate%" ).apply( query, root );
query.select( root.get( title ) );
List<String> titles = session.createQuery( query ).getResultList();
assertEquals( 2, titles.size() );
} );
scope.inSession( session -> {
var query = session.getCriteriaBuilder().createQuery(String.class);
var root = query.from( Book.class );
equal( isbn, "9781932394153" ).apply( query, root );
query.select( root.get( title ) );
List<String> titles = session.createQuery( query ).getResultList();
assertEquals( 1, titles.size() );
} );
scope.inSession( session -> {
var query = session.getCriteriaBuilder().createQuery("select title from Book", String.class);
var root = query.getRoot(0, Book.class);
equal( isbn, "9781932394153" ).apply( query, root );
List<String> titles = session.createQuery( query ).getResultList();
assertEquals( 1, titles.size() );
} );
var builder = scope.getSessionFactory().getCriteriaBuilder();
var query = builder.createQuery("from Book where pages > 200", Book.class);
var root = query.getRoot(0, Book.class);
like( title, "Hibernate%" ).apply( query, root );
query.orderBy(builder.asc(root.get(title)), builder.desc(root.get(isbn)));
scope.inSession( session -> {
List<Book> matchingBooks = session.createSelectionQuery(query).getResultList();
assertEquals( 1, matchingBooks.size() );
});
}
@Entity(name="Book")
static
|
RestrictionTest
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/interceptor/FailingPrivateInterceptedMethodTest.java
|
{
"start": 1863,
"end": 2169
}
|
class ____ {
@AroundInvoke
public Object mySuperCoolAroundInvoke(InvocationContext ctx) throws Exception {
return "private" + ctx.proceed();
}
}
@Target({ TYPE, METHOD })
@Retention(RUNTIME)
@Documented
@InterceptorBinding
public @
|
SimpleInterceptor
|
java
|
quarkusio__quarkus
|
integration-tests/test-extension/extension/runtime/src/main/java/io/quarkus/extest/runtime/beans/PublicKeyProducer.java
|
{
"start": 282,
"end": 704
}
|
class ____ {
private static final Logger log = Logger.getLogger("PublicKeyProducer");
private DSAPublicKey publicKey;
public PublicKeyProducer() {
}
@Produces
public DSAPublicKey getPublicKey() {
return publicKey;
}
public void setPublicKey(DSAPublicKey publicKey) {
log.debugf("setPublicKey, key=%s", publicKey);
this.publicKey = publicKey;
}
}
|
PublicKeyProducer
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java
|
{
"start": 812,
"end": 1609
}
|
class ____ extends ActionResponse implements ToXContentObject {
private final Set<String> found;
public DeletePrivilegesResponse(StreamInput in) throws IOException {
this.found = in.readCollectionAsImmutableSet(StreamInput::readString);
}
public DeletePrivilegesResponse(Collection<String> found) {
this.found = Set.copyOf(found);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject().field("found", found).endObject();
return builder;
}
public Set<String> found() {
return this.found;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringCollection(found);
}
}
|
DeletePrivilegesResponse
|
java
|
google__auto
|
common/src/test/java/com/google/auto/common/BasicAnnotationProcessorTest.java
|
{
"start": 33757,
"end": 34339
}
|
class ____ {}",
"}");
Compilation compilation =
javac().withProcessors(new MissingAnnotationProcessor()).compile(source);
assertThat(compilation).succeeded();
assertThat(compilation).generatedSourceFile("test.ValidInRound2XYZ");
}
@Test
public void properlySkipsMissingAnnotations_passesValidAnnotationsToProcess() {
JavaFileObject source =
JavaFileObjects.forSourceLines(
"test.ClassA",
"package test;",
"",
"@" + AnAnnotation.class.getCanonicalName(),
"public
|
ValidInRound1
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/config/DeprecatedBeanWarnerTests.java
|
{
"start": 1583,
"end": 1892
}
|
class ____ extends DeprecatedBeanWarner {
@Override
protected void logDeprecatedBean(String beanName, Class<?> beanType, BeanDefinition beanDefinition) {
DeprecatedBeanWarnerTests.this.beanName = beanName;
DeprecatedBeanWarnerTests.this.beanDefinition = beanDefinition;
}
}
}
|
MyDeprecatedBeanWarner
|
java
|
apache__dubbo
|
dubbo-remoting/dubbo-remoting-api/src/test/java/org/apache/dubbo/remoting/buffer/ChannelBufferFactoryTest.java
|
{
"start": 1033,
"end": 2183
}
|
class ____ {
@Test
void test() {
ChannelBufferFactory directChannelBufferFactory = DirectChannelBufferFactory.getInstance();
ChannelBufferFactory heapChannelBufferFactory = HeapChannelBufferFactory.getInstance();
ChannelBuffer directBuffer1 = directChannelBufferFactory.getBuffer(16);
ChannelBuffer directBuffer2 = directChannelBufferFactory.getBuffer(ByteBuffer.allocate(16));
ChannelBuffer directBuffer3 = directChannelBufferFactory.getBuffer(new byte[] {1}, 0, 1);
Assertions.assertTrue(directBuffer1.isDirect());
Assertions.assertTrue(directBuffer2.isDirect());
Assertions.assertTrue(directBuffer3.isDirect());
ChannelBuffer heapBuffer1 = heapChannelBufferFactory.getBuffer(16);
ChannelBuffer heapBuffer2 = heapChannelBufferFactory.getBuffer(ByteBuffer.allocate(16));
ChannelBuffer heapBuffer3 = heapChannelBufferFactory.getBuffer(new byte[] {1}, 0, 1);
Assertions.assertTrue(heapBuffer1.hasArray());
Assertions.assertTrue(heapBuffer2.hasArray());
Assertions.assertTrue(heapBuffer3.hasArray());
}
}
|
ChannelBufferFactoryTest
|
java
|
spring-projects__spring-boot
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java
|
{
"start": 20758,
"end": 21668
}
|
class ____ extends ZipInflaterInputStream {
private final Cleanable cleanup;
private volatile boolean closed;
JarEntryInflaterInputStream(JarEntryInputStream inputStream, NestedJarFileResources resources) {
this(inputStream, resources, resources.getOrCreateInflater());
}
private JarEntryInflaterInputStream(JarEntryInputStream inputStream, NestedJarFileResources resources,
Inflater inflater) {
super(inputStream, inflater, inputStream.getUncompressedSize());
this.cleanup = NestedJarFile.this.cleaner.register(this, resources.createInflatorCleanupAction(inflater));
}
@Override
public void close() throws IOException {
if (this.closed) {
return;
}
this.closed = true;
super.close();
NestedJarFile.this.resources.removeInputStream(this);
this.cleanup.clean();
}
}
/**
* {@link InputStream} for raw zip data.
*/
private
|
JarEntryInflaterInputStream
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/server/ServerWebExchange.java
|
{
"start": 1567,
"end": 9282
}
|
interface ____ {
/**
* Name of {@link #getAttributes() attribute} whose value can be used to
* correlate log messages for this exchange. Use {@link #getLogPrefix()} to
* obtain a consistently formatted prefix based on this attribute.
* @since 5.1
* @see #getLogPrefix()
*/
String LOG_ID_ATTRIBUTE = ServerWebExchange.class.getName() + ".LOG_ID";
/**
* Return the current HTTP request.
*/
ServerHttpRequest getRequest();
/**
* Return the current HTTP response.
*/
ServerHttpResponse getResponse();
/**
* Return a mutable map of request attributes for the current exchange.
*/
Map<String, Object> getAttributes();
/**
* Return the request attribute value if present.
* @param name the attribute name
* @param <T> the attribute type
* @return the attribute value
*/
@SuppressWarnings("unchecked")
default <T> @Nullable T getAttribute(String name) {
return (T) getAttributes().get(name);
}
/**
* Return the request attribute value or if not present raise an
* {@link IllegalArgumentException}.
* @param name the attribute name
* @param <T> the attribute type
* @return the attribute value
*/
@SuppressWarnings("unchecked")
default <T> T getRequiredAttribute(String name) {
T value = getAttribute(name);
Assert.notNull(value, () -> "Required attribute '" + name + "' is missing");
return value;
}
/**
* Return the request attribute value, or a default, fallback value.
* @param name the attribute name
* @param defaultValue a default value to return instead
* @param <T> the attribute type
* @return the attribute value
*/
@SuppressWarnings("unchecked")
default <T> T getAttributeOrDefault(String name, T defaultValue) {
return (T) getAttributes().getOrDefault(name, defaultValue);
}
/**
* Return the web session for the current request.
* <p>Always guaranteed to return either an instance matching the session id
* requested by the client, or a new session either because the client did not
* specify a session id or because the underlying session expired.
* <p>Use of this method does not automatically create a session. See
* {@link WebSession} for more details.
*/
Mono<WebSession> getSession();
/**
* Return the authenticated user for the request, if any.
*/
<T extends Principal> Mono<T> getPrincipal();
/**
* Return the form data from the body of the request if the Content-Type is
* {@code "application/x-www-form-urlencoded"} or an empty map otherwise.
* <p><strong>Note:</strong> calling this method causes the request body to
* be read and parsed in full and the resulting {@code MultiValueMap} is
* cached so that this method is safe to call more than once.
*/
Mono<MultiValueMap<String, String>> getFormData();
/**
* Return the parts of a multipart request if the Content-Type is
* {@code "multipart/form-data"} or an empty map otherwise.
* <p><strong>Note:</strong> calling this method causes the request body to
* be read and parsed in full and the resulting {@code MultiValueMap} is
* cached so that this method is safe to call more than once.
* <p><strong>Note:</strong>the {@linkplain Part#content() contents} of each
* part is not cached, and can only be read once.
*/
Mono<MultiValueMap<String, Part>> getMultipartData();
/**
* Cleans up any storage used for multipart handling.
* @since 6.0.10
* @see Part#delete()
*/
default Mono<Void> cleanupMultipart() {
return getMultipartData()
.onErrorComplete() // ignore errors reading multipart data
.flatMapIterable(Map::values)
.flatMapIterable(Function.identity())
.flatMap(part -> part.delete().onErrorComplete())
.then();
}
/**
* Return the {@link LocaleContext} using the configured
* {@link org.springframework.web.server.i18n.LocaleContextResolver}.
*/
LocaleContext getLocaleContext();
/**
* Return the {@link ApplicationContext} associated with the web application,
* if it was initialized with one via
* {@link org.springframework.web.server.adapter.WebHttpHandlerBuilder#applicationContext(ApplicationContext)}.
* @since 5.0.3
* @see org.springframework.web.server.adapter.WebHttpHandlerBuilder#applicationContext(ApplicationContext)
*/
@Nullable ApplicationContext getApplicationContext();
/**
* Returns {@code true} if the one of the {@code checkNotModified} methods
* in this contract were used and they returned true.
*/
boolean isNotModified();
/**
* An overloaded variant of {@link #checkNotModified(String, Instant)} with
* a last-modified timestamp only.
* @param lastModified the last-modified time
* @return whether the request qualifies as not modified
*/
boolean checkNotModified(Instant lastModified);
/**
* An overloaded variant of {@link #checkNotModified(String, Instant)} with
* an {@code ETag} (entity tag) value only.
* @param etag the entity tag for the underlying resource.
* @return true if the request does not require further processing.
*/
boolean checkNotModified(String etag);
/**
* Check whether the requested resource has been modified given the supplied
* {@code ETag} (entity tag) and last-modified timestamp as determined by
* the application. Also transparently prepares the response, setting HTTP
* status, and adding "ETag" and "Last-Modified" headers when applicable.
* This method works with conditional GET/HEAD requests as well as with
* conditional POST/PUT/DELETE requests.
* <p><strong>Note:</strong> The HTTP specification recommends setting both
* ETag and Last-Modified values, but you can also use
* {@code #checkNotModified(String)} or
* {@link #checkNotModified(Instant)}.
* @param etag the entity tag that the application determined for the
* underlying resource. This parameter will be padded with quotes (")
* if necessary.
* @param lastModified the last-modified timestamp that the application
* determined for the underlying resource
* @return true if the request does not require further processing.
*/
boolean checkNotModified(@Nullable String etag, Instant lastModified);
/**
* Transform the given url according to the registered transformation function(s).
* By default, this method returns the given {@code url}, though additional
* transformation functions can be registered with {@link #addUrlTransformer}
* @param url the URL to transform
* @return the transformed URL
*/
String transformUrl(String url);
/**
* Register an additional URL transformation function for use with {@link #transformUrl}.
* The given function can be used to insert an id for authentication, a nonce for CSRF
* protection, etc.
* <p>Note that the given function is applied after any previously registered functions.
* @param transformer a URL transformation function to add
*/
void addUrlTransformer(Function<String, String> transformer);
/**
* Return a log message prefix to use to correlate messages for this exchange.
* The prefix is based on the value of the attribute {@link #LOG_ID_ATTRIBUTE}
* along with some extra formatting so that the prefix can be conveniently
* prepended with no further formatting no separators required.
* @return the log message prefix or an empty String if the
* {@link #LOG_ID_ATTRIBUTE} is not set.
* @since 5.1
*/
String getLogPrefix();
/**
* Return a builder to mutate properties of this exchange by wrapping it
* with {@link ServerWebExchangeDecorator} and returning either mutated
* values or delegating back to this instance.
*/
default Builder mutate() {
return new DefaultServerWebExchangeBuilder(this);
}
/**
* Builder for mutating an existing {@link ServerWebExchange}.
* Removes the need
*/
|
ServerWebExchange
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/cache/TestingWeighers.java
|
{
"start": 1837,
"end": 2025
}
|
class ____ implements Weigher<Object, Integer> {
@Override
public int weigh(Object key, Integer value) {
return value;
}
}
private TestingWeighers() {}
}
|
IntValueWeigher
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/expressions/converter/OverConvertRule.java
|
{
"start": 2989,
"end": 11925
}
|
class ____ implements CallExpressionConvertRule {
@Override
public Optional<RexNode> convert(CallExpression call, ConvertContext context) {
List<Expression> children = call.getChildren();
if (call.getFunctionDefinition() == BuiltInFunctionDefinitions.OVER) {
FlinkTypeFactory typeFactory = context.getTypeFactory();
Expression agg = children.get(0);
FunctionDefinition def = ((CallExpression) agg).getFunctionDefinition();
boolean isDistinct = BuiltInFunctionDefinitions.DISTINCT == def;
SqlAggFunction aggFunc = agg.accept(new SqlAggFunctionVisitor(context.getRelBuilder()));
RelDataType aggResultType =
typeFactory.createFieldTypeFromLogicalType(
fromDataTypeToLogicalType(
((ResolvedExpression) agg).getOutputDataType()));
// assemble exprs by agg children
List<RexNode> aggExprs =
agg.getChildren().stream()
.map(
child -> {
if (isDistinct) {
return context.toRexNode(child.getChildren().get(0));
} else {
return context.toRexNode(child);
}
})
.collect(Collectors.toList());
// assemble order by key
Expression orderKeyExpr = children.get(1);
Set<SqlKind> kinds = new HashSet<>();
RexNode collationRexNode =
createCollation(
context.toRexNode(orderKeyExpr),
RelFieldCollation.Direction.ASCENDING,
null,
kinds);
ImmutableList<RexFieldCollation> orderKey =
ImmutableList.of(new RexFieldCollation(collationRexNode, kinds));
// assemble partition by keys
List<RexNode> partitionKeys =
children.subList(4, children.size()).stream()
.map(context::toRexNode)
.collect(Collectors.toList());
// assemble bounds
Expression preceding = children.get(2);
boolean isRows = isRows((ValueLiteralExpression) preceding);
Expression following = children.get(3);
RexWindowBound lowerBound = createBound(context, preceding, SqlKind.PRECEDING);
RexWindowBound upperBound = createBound(context, following, SqlKind.FOLLOWING);
// build RexOver
return Optional.of(
context.getRelBuilder()
.getRexBuilder()
.makeOver(
aggResultType,
aggFunc,
aggExprs,
partitionKeys,
orderKey,
lowerBound,
upperBound,
isRows,
true,
false,
isDistinct,
false));
}
return Optional.empty();
}
private static boolean isRows(ValueLiteralExpression preceding) {
if (preceding.isNull()) {
return true;
}
return preceding
.getValueAs(OverWindowRange.class)
.map(r -> r == OverWindowRange.CURRENT_ROW || r == OverWindowRange.UNBOUNDED_ROW)
.orElseGet(
() ->
fromDataTypeToLogicalType(preceding.getOutputDataType())
.is(LogicalTypeRoot.BIGINT));
}
private RexNode createCollation(
RexNode node,
RelFieldCollation.Direction direction,
RelFieldCollation.NullDirection nullDirection,
Set<SqlKind> kinds) {
switch (node.getKind()) {
case DESCENDING:
kinds.add(node.getKind());
return createCollation(
((RexCall) node).getOperands().get(0),
RelFieldCollation.Direction.DESCENDING,
nullDirection,
kinds);
case NULLS_FIRST:
kinds.add(node.getKind());
return createCollation(
((RexCall) node).getOperands().get(0),
direction,
RelFieldCollation.NullDirection.FIRST,
kinds);
case NULLS_LAST:
kinds.add(node.getKind());
return createCollation(
((RexCall) node).getOperands().get(0),
direction,
RelFieldCollation.NullDirection.LAST,
kinds);
default:
if (nullDirection == null) {
// Set the null direction if not specified.
// Consistent with HIVE/SPARK/MYSQL
if (FlinkPlannerImpl.defaultNullCollation()
.last(direction.equals(RelFieldCollation.Direction.DESCENDING))) {
kinds.add(SqlKind.NULLS_LAST);
} else {
kinds.add(SqlKind.NULLS_FIRST);
}
}
return node;
}
}
private RexWindowBound createBound(ConvertContext context, Expression bound, SqlKind sqlKind) {
if (bound instanceof ValueLiteralExpression) {
final ValueLiteralExpression literal = (ValueLiteralExpression) bound;
if (literal.isNull()) {
if (sqlKind == SqlKind.PRECEDING) {
return RexWindowBounds.UNBOUNDED_PRECEDING;
} else {
return RexWindowBounds.CURRENT_ROW;
}
} else {
return literal.getValueAs(OverWindowRange.class)
.map(r -> createSymbolBound(r, sqlKind))
.orElseGet(
() ->
createLiteralBound(
context, (ValueLiteralExpression) bound, sqlKind));
}
} else {
throw new TableException("Unexpected expression: " + bound);
}
}
private static RexWindowBound createLiteralBound(
ConvertContext context, ValueLiteralExpression bound, SqlKind sqlKind) {
RelDataType returnType =
context.getTypeFactory()
.createFieldTypeFromLogicalType(new DecimalType(true, 19, 0));
SqlOperator sqlOperator =
new SqlPostfixOperator(
sqlKind.name(), sqlKind, 2, new OrdinalReturnTypeInference(0), null, null);
SqlNode[] operands = new SqlNode[] {SqlLiteral.createExactNumeric("1", SqlParserPos.ZERO)};
SqlNode node = new SqlBasicCall(sqlOperator, operands, SqlParserPos.ZERO);
RexNode literalRexNode =
bound.getValueAs(BigDecimal.class)
.map(v -> context.getRelBuilder().literal(v))
.orElse(context.getRelBuilder().literal(extractValue(bound, Object.class)));
List<RexNode> expressions = new ArrayList<>();
expressions.add(literalRexNode);
RexNode rexNode =
context.getRelBuilder()
.getRexBuilder()
.makeCall(returnType, sqlOperator, expressions);
return RexWindowBounds.create(node, rexNode);
}
private static RexWindowBound createSymbolBound(OverWindowRange bound, SqlKind sqlKind) {
switch (bound) {
case CURRENT_ROW:
case CURRENT_RANGE:
SqlNode currentRow = SqlWindow.createCurrentRow(SqlParserPos.ZERO);
return RexWindowBounds.create(currentRow, null);
case UNBOUNDED_ROW:
case UNBOUNDED_RANGE:
SqlNode unbounded =
sqlKind.equals(SqlKind.PRECEDING)
? SqlWindow.createUnboundedPreceding(SqlParserPos.ZERO)
: SqlWindow.createUnboundedFollowing(SqlParserPos.ZERO);
return RexWindowBounds.create(unbounded, null);
default:
throw new IllegalArgumentException("Unexpected expression: " + bound);
}
}
}
|
OverConvertRule
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
|
{
"start": 30997,
"end": 31283
}
|
class ____ implements EventHandler {
public TaskAttemptEvent lastTaskAttemptEvent;
@Override
public void handle(Event event) {
if (event instanceof TaskAttemptEvent) {
lastTaskAttemptEvent = (TaskAttemptEvent)event;
}
}
};
}
|
MockTaskAttemptEventHandler
|
java
|
spring-projects__spring-boot
|
module/spring-boot-batch-jdbc/src/test/java/org/springframework/boot/batch/jdbc/autoconfigure/BatchJdbcPropertiesTests.java
|
{
"start": 1216,
"end": 1405
}
|
class ____ extends JdbcDefaultBatchConfiguration {
@Override
public boolean getValidateTransactionState() {
return super.getValidateTransactionState();
}
}
}
|
TestBatchConfiguration
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedDoublesIndexFieldData.java
|
{
"start": 12357,
"end": 13530
}
|
class ____ extends LeafDoubleFieldData {
final LeafReader reader;
final String field;
protected final ToScriptFieldFactory<SortedNumericDoubleValues> toScriptFieldFactory;
SortedNumericDoubleFieldData(
LeafReader reader,
String field,
ToScriptFieldFactory<SortedNumericDoubleValues> toScriptFieldFactory
) {
this.reader = reader;
this.field = field;
this.toScriptFieldFactory = toScriptFieldFactory;
}
@Override
public SortedNumericDoubleValues getDoubleValues() {
try {
SortedNumericDocValues raw = DocValues.getSortedNumeric(reader, field);
return FieldData.sortableLongBitsToDoubles(SortedNumericLongValues.wrap(raw));
} catch (IOException e) {
throw new IllegalStateException("Cannot load doc values", e);
}
}
@Override
public DocValuesScriptFieldFactory getScriptFieldFactory(String name) {
return toScriptFieldFactory.getScriptFieldFactory(getDoubleValues(), name);
}
}
}
|
SortedNumericDoubleFieldData
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/main/java/org/hibernate/envers/boot/internal/FunctionContributorImpl.java
|
{
"start": 395,
"end": 857
}
|
class ____ implements FunctionContributor {
@Override
public void contributeFunctions(FunctionContributions functionContributions) {
final EnversService enversService = functionContributions.getServiceRegistry().getService( EnversService.class );
if ( !enversService.isEnabled() ) {
return;
}
functionContributions.getFunctionRegistry().register( OrderByFragmentFunction.FUNCTION_NAME, OrderByFragmentFunction.INSTANCE );
}
}
|
FunctionContributorImpl
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractLinearCounting.java
|
{
"start": 881,
"end": 1063
}
|
class ____ without having read the paper is considered adventurous.
*
* The algorithm just keep a record of all distinct values provided encoded as an integer.
*/
public abstract
|
does
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/CompactHashMap.java
|
{
"start": 28881,
"end": 31905
}
|
class ____ extends AbstractMapEntry<K, V> {
@ParametricNullness private final K key;
private int lastKnownIndex;
MapEntry(int index) {
this.key = key(index);
this.lastKnownIndex = index;
}
@Override
@ParametricNullness
public K getKey() {
return key;
}
private void updateLastKnownIndex() {
if (lastKnownIndex == -1
|| lastKnownIndex >= size()
|| !Objects.equals(key, key(lastKnownIndex))) {
lastKnownIndex = indexOf(key);
}
}
@Override
@ParametricNullness
public V getValue() {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
/*
* The cast is safe because the entry is present in the map. Or, if it has been removed by a
* concurrent modification, behavior is undefined.
*/
return uncheckedCastNullableTToT(delegate.get(key));
}
updateLastKnownIndex();
/*
* If the entry has been removed from the map, we return null, even though that might not be a
* valid value. That's the best we can do, short of holding a reference to the most recently
* seen value. And while we *could* do that, we aren't required to: Map.Entry explicitly says
* that behavior is undefined when the backing map is modified through another API. (It even
* permits us to throw IllegalStateException. Maybe we should have done that, but we probably
* shouldn't change now for fear of breaking people.)
*/
return (lastKnownIndex == -1) ? unsafeNull() : value(lastKnownIndex);
}
@Override
@ParametricNullness
public V setValue(@ParametricNullness V value) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return uncheckedCastNullableTToT(delegate.put(key, value)); // See discussion in getValue().
}
updateLastKnownIndex();
if (lastKnownIndex == -1) {
put(key, value);
return unsafeNull(); // See discussion in getValue().
} else {
V old = value(lastKnownIndex);
CompactHashMap.this.setValue(lastKnownIndex, value);
return old;
}
}
}
@Override
public int size() {
Map<K, V> delegate = delegateOrNull();
return (delegate != null) ? delegate.size() : size;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public boolean containsValue(@Nullable Object value) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.containsValue(value);
}
for (int i = 0; i < size; i++) {
if (Objects.equals(value, value(i))) {
return true;
}
}
return false;
}
@LazyInit private transient @Nullable Collection<V> valuesView;
@Override
public Collection<V> values() {
return (valuesView == null) ? valuesView = createValues() : valuesView;
}
Collection<V> createValues() {
return new ValuesView();
}
@WeakOuter
private final
|
MapEntry
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/scripting/xmltags/OgnlCacheTest.java
|
{
"start": 1212,
"end": 2032
}
|
class ____ {
@SuppressWarnings("unused")
private int id;
}
int run = 1000;
Map<String, Object> context = new HashMap<>();
List<Future<Object>> futures = new ArrayList<>();
context.put("data", new DataClass());
ExecutorService executor = Executors.newCachedThreadPool();
IntStream.range(0, run).forEach(i -> futures.add(executor.submit(() -> OgnlCache.getValue("data.id", context))));
for (int i = 0; i < run; i++) {
assertNotNull(futures.get(i).get());
}
executor.shutdown();
}
@Test
void issue2609() throws Exception {
Map<String, Object> context = new HashMap<>();
context.put("d1", Date.valueOf("2022-01-01"));
context.put("d2", Date.valueOf("2022-01-02"));
assertEquals(-1, OgnlCache.getValue("d1.compareTo(d2)", context));
}
}
|
DataClass
|
java
|
spring-projects__spring-boot
|
build-plugin/spring-boot-maven-plugin/src/intTest/java/org/springframework/boot/maven/EclipseM2eIntegrationTests.java
|
{
"start": 1026,
"end": 1800
}
|
class ____ {
@Test // gh-21992
void pluginPomIncludesOptionalShadeDependency() throws Exception {
String version = new Versions().get("project.version");
File repository = new File("build/test-maven-repository");
File pluginDirectory = new File(repository, "org/springframework/boot/spring-boot-maven-plugin/" + version);
File[] pomFiles = pluginDirectory.listFiles(this::isPomFile);
Arrays.sort(pomFiles, Comparator.comparing(File::getName));
File pomFile = pomFiles[pomFiles.length - 1];
String pomContent = new String(FileCopyUtils.copyToByteArray(pomFile), StandardCharsets.UTF_8);
assertThat(pomContent).contains("maven-shade-plugin");
}
private boolean isPomFile(File file) {
return file.getName().endsWith(".pom");
}
}
|
EclipseM2eIntegrationTests
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/filter/Bucket4jFilterFunctions.java
|
{
"start": 6885,
"end": 7018
}
|
class ____ extends SimpleFilterSupplier {
public FilterSupplier() {
super(Bucket4jFilterFunctions.class);
}
}
}
|
FilterSupplier
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/function/array/AbstractArrayIntersectsFunction.java
|
{
"start": 706,
"end": 1599
}
|
class ____ extends AbstractSqmSelfRenderingFunctionDescriptor {
protected final boolean nullable;
public AbstractArrayIntersectsFunction(boolean nullable, TypeConfiguration typeConfiguration) {
super(
"array_intersects" + ( nullable ? "_nullable" : "" ),
StandardArgumentsValidators.composite(
StandardArgumentsValidators.exactly( 2 ),
ArraysOfSameTypeArgumentValidator.INSTANCE
),
StandardFunctionReturnTypeResolvers.invariant( typeConfiguration.standardBasicTypeForJavaType( Boolean.class ) ),
StandardFunctionArgumentTypeResolvers.byArgument(
StandardFunctionArgumentTypeResolvers.argumentsOrImplied( 1 ),
StandardFunctionArgumentTypeResolvers.argumentsOrImplied( 0 )
)
);
this.nullable = nullable;
}
@Override
public String getArgumentListSignature() {
return "(ARRAY array0, OBJECT array1)";
}
}
|
AbstractArrayIntersectsFunction
|
java
|
quarkusio__quarkus
|
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/PemCertOrderWithTooManyValueTest.java
|
{
"start": 823,
"end": 1816
}
|
class ____ {
private static final String configuration = """
quarkus.tls.key-store.pem.foo.cert=target/certs/test-format.crt
quarkus.tls.key-store.pem.foo.key=target/certs/test-formats.key
quarkus.tls.key-store.pem.bar.cert=target/certs/test-format.crt
quarkus.tls.key-store.pem.bar.key=target/certs/test-formats.key
quarkus.tls.key-store.pem.order=bar,foo,baz
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.add(new StringAsset(configuration), "application.properties"))
.assertException(t -> assertThat(t.getCause().getMessage()).contains("`order`", "keyCerts`"));
@Test
void test() throws KeyStoreException, CertificateParsingException {
fail("Should not be called as the extension should fail before.");
}
}
|
PemCertOrderWithTooManyValueTest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
|
{
"start": 1794,
"end": 8777
}
|
class ____ {
private static final Logger log =
LoggerFactory.getLogger(AbstractLauncher.class);
public static final String CLASSPATH = "CLASSPATH";
public static final String ENV_DOCKER_CONTAINER_MOUNTS =
"YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS";
/**
* Env vars; set up at final launch stage
*/
protected final Map<String, String> envVars = new HashMap<>();
protected final ContainerLaunchContext containerLaunchContext =
Records.newRecord(ContainerLaunchContext.class);
protected final List<String> commands = new ArrayList<>(20);
protected final Map<String, LocalResource> localResources = new HashMap<>();
protected final Map<String, String> mountPaths = new HashMap<>();
private final Map<String, ByteBuffer> serviceData = new HashMap<>();
protected boolean yarnDockerMode = false;
protected String dockerImage;
protected String dockerNetwork;
protected String dockerHostname;
protected boolean runPrivilegedContainer = false;
private ServiceContext context;
public AbstractLauncher(ServiceContext context) {
this.context = context;
}
public void setYarnDockerMode(boolean yarnDockerMode){
this.yarnDockerMode = yarnDockerMode;
}
/**
* Get the env vars to work on
* @return env vars
*/
public Map<String, String> getEnv() {
return envVars;
}
/**
* Get the launch commands.
* @return the live list of commands
*/
public List<String> getCommands() {
return commands;
}
public void addLocalResource(String subPath, LocalResource resource) {
localResources.put(subPath, resource);
}
public void addLocalResource(String subPath, LocalResource resource, String mountPath) {
localResources.put(subPath, resource);
mountPaths.put(subPath, mountPath);
}
public void addCommand(String cmd) {
commands.add(cmd);
}
/**
* Complete the launch context (copy in env vars, etc).
* @return the container to launch
*/
public ContainerLaunchContext completeContainerLaunch() throws IOException {
String cmdStr = ServiceUtils.join(commands, " ", false);
log.debug("Completed setting up container command {}", cmdStr);
containerLaunchContext.setCommands(commands);
//env variables
if (log.isDebugEnabled()) {
log.debug("Environment variables");
for (Map.Entry<String, String> envPair : envVars.entrySet()) {
log.debug(" \"{}\"=\"{}\"", envPair.getKey(), envPair.getValue());
}
}
containerLaunchContext.setEnvironment(envVars);
//service data
if (log.isDebugEnabled()) {
log.debug("Service Data size");
for (Map.Entry<String, ByteBuffer> entry : serviceData.entrySet()) {
log.debug("\"{}\"=> {} bytes of data", entry.getKey(),
entry.getValue().array().length);
}
}
containerLaunchContext.setServiceData(serviceData);
// resources
dumpLocalResources();
containerLaunchContext.setLocalResources(localResources);
//tokens
if (context.tokens != null) {
containerLaunchContext.setTokens(context.tokens.duplicate());
}
if(yarnDockerMode){
Map<String, String> env = containerLaunchContext.getEnvironment();
env.put("YARN_CONTAINER_RUNTIME_TYPE", "docker");
env.put("YARN_CONTAINER_RUNTIME_DOCKER_IMAGE", dockerImage);
if (ServiceUtils.isSet(dockerNetwork)) {
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
dockerNetwork);
}
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME",
dockerHostname);
if (runPrivilegedContainer) {
env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
"true");
}
if (!mountPaths.isEmpty()) {
StringBuilder sb = new StringBuilder();
if (env.get(ENV_DOCKER_CONTAINER_MOUNTS) != null) {
// user specified mounts in the spec
sb.append(env.get(ENV_DOCKER_CONTAINER_MOUNTS));
}
for (Entry<String, String> mount : mountPaths.entrySet()) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(mount.getKey()).append(":")
.append(mount.getValue()).append(":ro");
}
env.put(ENV_DOCKER_CONTAINER_MOUNTS, sb.toString());
}
log.info("yarn docker env var has been set {}",
containerLaunchContext.getEnvironment().toString());
}
return containerLaunchContext;
}
public void setRetryContext(int maxRetries, int retryInterval,
long failuresValidityInterval) {
ContainerRetryContext retryContext = ContainerRetryContext
.newInstance(ContainerRetryPolicy.RETRY_ON_ALL_ERRORS, null,
maxRetries, retryInterval, failuresValidityInterval);
containerLaunchContext.setContainerRetryContext(retryContext);
}
/**
* Dump local resources at debug level
*/
private void dumpLocalResources() {
if (log.isDebugEnabled()) {
log.debug("{} resources: ", localResources.size());
for (Map.Entry<String, LocalResource> entry : localResources.entrySet()) {
String key = entry.getKey();
LocalResource val = entry.getValue();
log.debug("{} = {}", key, ServiceUtils.stringify(val.getResource()));
}
}
}
/**
* This is critical for an insecure cluster -it passes
* down the username to YARN, and so gives the code running
* in containers the rights it needs to work with
* data.
* @throws IOException problems working with current user
*/
protected void propagateUsernameInInsecureCluster() throws IOException {
//insecure cluster: propagate user name via env variable
String userName = UserGroupInformation.getCurrentUser().getUserName();
envVars.put(YarnServiceConstants.HADOOP_USER_NAME, userName);
}
/**
* Utility method to set up the classpath
* @param classpath classpath to use
*/
public void setClasspath(ClasspathConstructor classpath) {
setEnv(CLASSPATH, classpath.buildClasspath());
}
/**
* Set an environment variable in the launch context
* @param var variable name
* @param value value (must be non null)
*/
public void setEnv(String var, String value) {
Preconditions.checkArgument(var != null, "null variable name");
Preconditions.checkArgument(value != null, "null value");
envVars.put(var, value);
}
public void putEnv(Map<String, String> map) {
envVars.putAll(map);
}
public void setDockerImage(String dockerImage) {
this.dockerImage = dockerImage;
}
public void setDockerNetwork(String dockerNetwork) {
this.dockerNetwork = dockerNetwork;
}
public void setDockerHostname(String dockerHostname) {
this.dockerHostname = dockerHostname;
}
public void setRunPrivilegedContainer(boolean runPrivilegedContainer) {
this.runPrivilegedContainer = runPrivilegedContainer;
}
@VisibleForTesting
public String getDockerImage() {
return dockerImage;
}
}
|
AbstractLauncher
|
java
|
quarkusio__quarkus
|
extensions/reactive-routes/deployment/src/test/java/io/quarkus/vertx/web/base/RouteBaseTest.java
|
{
"start": 1394,
"end": 1618
}
|
class ____ {
@Route(path = "hello") // -> "/hello"
void hello(RoutingContext context) {
context.response().end("Hello world!");
}
}
@RouteBase(path = "simple")
static
|
SimpleBean
|
java
|
quarkusio__quarkus
|
integration-tests/hibernate-validator/src/test/java/io/quarkus/hibernate/validator/runtime/ArcProxyBeanMetaDataClassNormalizerTest.java
|
{
"start": 624,
"end": 965
}
|
class ____ beanClass isn't a Subclass.")
void normalize_simpleClass() {
Class<Original> expected = Original.class;
assertEquals(expected, new ArcProxyBeanMetaDataClassNormalizer().normalize(Original.class));
}
@Test
@DisplayName("Normalize should return 'superclass' if beanClass is the only Subclass in it's
|
if
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-client/src/test/java/org/springframework/security/oauth2/client/endpoint/WebClientReactiveRefreshTokenTokenResponseClientTests.java
|
{
"start": 3202,
"end": 22632
}
|
class ____ {
private WebClientReactiveRefreshTokenTokenResponseClient tokenResponseClient = new WebClientReactiveRefreshTokenTokenResponseClient();
private ClientRegistration.Builder clientRegistrationBuilder;
private OAuth2AccessToken accessToken;
private OAuth2RefreshToken refreshToken;
private MockWebServer server;
@BeforeEach
public void setup() throws Exception {
this.server = new MockWebServer();
this.server.start();
String tokenUri = this.server.url("/oauth2/token").toString();
this.clientRegistrationBuilder = TestClientRegistrations.clientRegistration().tokenUri(tokenUri);
this.accessToken = TestOAuth2AccessTokens.scopes("read", "write");
this.refreshToken = TestOAuth2RefreshTokens.refreshToken();
}
@AfterEach
public void cleanup() throws Exception {
this.server.shutdown();
}
@Test
public void setWebClientWhenClientIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.tokenResponseClient.setWebClient(null));
}
@Test
public void getTokenResponseWhenRequestIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.tokenResponseClient.getTokenResponse(null).block());
}
@Test
public void getTokenResponseWhenSuccessResponseThenReturnAccessTokenResponse() throws Exception {
this.server.enqueue(MockResponses.json("access-token-response.json"));
Instant expiresAtBefore = Instant.now().plusSeconds(3600);
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
OAuth2AccessTokenResponse accessTokenResponse = this.tokenResponseClient
.getTokenResponse(refreshTokenGrantRequest)
.block();
Instant expiresAtAfter = Instant.now().plusSeconds(3600);
RecordedRequest recordedRequest = this.server.takeRequest();
assertThat(recordedRequest.getMethod()).isEqualTo(HttpMethod.POST.toString());
assertThat(recordedRequest.getHeader(HttpHeaders.ACCEPT)).isEqualTo(MediaType.APPLICATION_JSON_VALUE);
assertThat(recordedRequest.getHeader(HttpHeaders.CONTENT_TYPE))
.isEqualTo(MediaType.APPLICATION_FORM_URLENCODED_VALUE);
assertThat(recordedRequest.getHeader(HttpHeaders.AUTHORIZATION)).startsWith("Basic ");
String formParameters = recordedRequest.getBody().readUtf8();
assertThat(formParameters).contains("grant_type=refresh_token");
assertThat(formParameters).contains("refresh_token=refresh-token");
assertThat(accessTokenResponse.getAccessToken().getTokenValue()).isEqualTo("access-token-1234");
assertThat(accessTokenResponse.getAccessToken().getTokenType()).isEqualTo(OAuth2AccessToken.TokenType.BEARER);
assertThat(accessTokenResponse.getAccessToken().getExpiresAt()).isBetween(expiresAtBefore, expiresAtAfter);
assertThat(accessTokenResponse.getAccessToken().getScopes())
.containsExactly(this.accessToken.getScopes().toArray(new String[0]));
assertThat(accessTokenResponse.getRefreshToken().getTokenValue()).isEqualTo(this.refreshToken.getTokenValue());
}
@Test
public void getTokenResponseWhenClientAuthenticationPostThenFormParametersAreSent() throws Exception {
this.server.enqueue(MockResponses.json("access-token-response.json"));
ClientRegistration clientRegistration = this.clientRegistrationBuilder
.clientAuthenticationMethod(ClientAuthenticationMethod.CLIENT_SECRET_POST)
.build();
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(clientRegistration,
this.accessToken, this.refreshToken);
this.tokenResponseClient.getTokenResponse(refreshTokenGrantRequest).block();
RecordedRequest recordedRequest = this.server.takeRequest();
assertThat(recordedRequest.getHeader(HttpHeaders.AUTHORIZATION)).isNull();
String formParameters = recordedRequest.getBody().readUtf8();
assertThat(formParameters).contains("client_id=client-id");
assertThat(formParameters).contains("client_secret=client-secret");
}
@Test
public void getTokenResponseWhenAuthenticationClientSecretJwtThenFormParametersAreSent() throws Exception {
this.server.enqueue(MockResponses.json("access-token-response.json"));
// @formatter:off
ClientRegistration clientRegistration = this.clientRegistrationBuilder
.clientAuthenticationMethod(ClientAuthenticationMethod.CLIENT_SECRET_JWT)
.clientSecret(TestKeys.DEFAULT_ENCODED_SECRET_KEY)
.build();
// @formatter:on
// Configure Jwt client authentication converter
SecretKeySpec secretKey = new SecretKeySpec(
clientRegistration.getClientSecret().getBytes(StandardCharsets.UTF_8), "HmacSHA256");
JWK jwk = TestJwks.jwk(secretKey).build();
Function<ClientRegistration, JWK> jwkResolver = (registration) -> jwk;
configureJwtClientAuthenticationConverter(jwkResolver);
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(clientRegistration,
this.accessToken, this.refreshToken);
this.tokenResponseClient.getTokenResponse(refreshTokenGrantRequest).block();
RecordedRequest actualRequest = this.server.takeRequest();
assertThat(actualRequest.getHeader(HttpHeaders.AUTHORIZATION)).isNull();
assertThat(actualRequest.getBody().readUtf8()).contains("grant_type=refresh_token",
"client_assertion_type=urn%3Aietf%3Aparams%3Aoauth%3Aclient-assertion-type%3Ajwt-bearer",
"client_assertion=");
}
@Test
public void getTokenResponseWhenAuthenticationPrivateKeyJwtThenFormParametersAreSent() throws Exception {
this.server.enqueue(MockResponses.json("access-token-response.json"));
// @formatter:off
ClientRegistration clientRegistration = this.clientRegistrationBuilder
.clientAuthenticationMethod(ClientAuthenticationMethod.PRIVATE_KEY_JWT)
.build();
// @formatter:on
// Configure Jwt client authentication converter
JWK jwk = TestJwks.DEFAULT_RSA_JWK;
Function<ClientRegistration, JWK> jwkResolver = (registration) -> jwk;
configureJwtClientAuthenticationConverter(jwkResolver);
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(clientRegistration,
this.accessToken, this.refreshToken);
this.tokenResponseClient.getTokenResponse(refreshTokenGrantRequest).block();
RecordedRequest actualRequest = this.server.takeRequest();
assertThat(actualRequest.getHeader(HttpHeaders.AUTHORIZATION)).isNull();
assertThat(actualRequest.getBody().readUtf8()).contains("grant_type=refresh_token",
"client_assertion_type=urn%3Aietf%3Aparams%3Aoauth%3Aclient-assertion-type%3Ajwt-bearer",
"client_assertion=");
}
private void configureJwtClientAuthenticationConverter(Function<ClientRegistration, JWK> jwkResolver) {
NimbusJwtClientAuthenticationParametersConverter<OAuth2RefreshTokenGrantRequest> jwtClientAuthenticationConverter = new NimbusJwtClientAuthenticationParametersConverter<>(
jwkResolver);
this.tokenResponseClient.addParametersConverter(jwtClientAuthenticationConverter);
}
@Test
public void getTokenResponseWhenSuccessResponseAndNotBearerTokenTypeThenThrowOAuth2AuthorizationException() {
this.server.enqueue(MockResponses.json("invalid-token-type-response.json"));
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
assertThatExceptionOfType(OAuth2AuthorizationException.class)
.isThrownBy(() -> this.tokenResponseClient.getTokenResponse(refreshTokenGrantRequest).block())
.withMessageContaining("[invalid_token_response]")
.withMessageContaining("An error occurred parsing the Access Token response")
.withCauseInstanceOf(Throwable.class);
}
@Test
public void getTokenResponseWhenSuccessResponseIncludesScopeThenAccessTokenHasResponseScope() throws Exception {
this.server.enqueue(MockResponses.json("access-token-response-read.json"));
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken,
Collections.singleton("read"));
OAuth2AccessTokenResponse accessTokenResponse = this.tokenResponseClient
.getTokenResponse(refreshTokenGrantRequest)
.block();
RecordedRequest recordedRequest = this.server.takeRequest();
String formParameters = recordedRequest.getBody().readUtf8();
assertThat(formParameters).contains("scope=read");
assertThat(accessTokenResponse.getAccessToken().getScopes()).containsExactly("read");
}
@Test
public void getTokenResponseWhenErrorResponseThenThrowOAuth2AuthorizationException() {
this.server.enqueue(MockResponses.json("unauthorized-client-response.json").setResponseCode(400));
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
assertThatExceptionOfType(OAuth2AuthorizationException.class)
.isThrownBy(() -> this.tokenResponseClient.getTokenResponse(refreshTokenGrantRequest).block())
.satisfies((ex) -> assertThat(ex.getError().getErrorCode()).isEqualTo("unauthorized_client"))
.withMessageContaining("[unauthorized_client]");
}
@Test
public void getTokenResponseWhenServerErrorResponseThenThrowOAuth2AuthorizationException() {
this.server.enqueue(new MockResponse().setResponseCode(500));
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
assertThatExceptionOfType(OAuth2AuthorizationException.class)
.isThrownBy(() -> this.tokenResponseClient.getTokenResponse(refreshTokenGrantRequest).block())
.satisfies((ex) -> assertThat(ex.getError().getErrorCode()).isEqualTo("invalid_token_response"))
.withMessageContaining("[invalid_token_response]")
.withMessageContaining("Empty OAuth 2.0 Access Token Response");
}
// gh-10130
@Test
public void setHeadersConverterWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.tokenResponseClient.setHeadersConverter(null))
.withMessage("headersConverter cannot be null");
}
// gh-10130
@Test
public void addHeadersConverterWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.tokenResponseClient.addHeadersConverter(null))
.withMessage("headersConverter cannot be null");
}
// gh-10130
@Test
public void getTokenResponseWhenHeadersConverterAddedThenCalled() throws Exception {
OAuth2RefreshTokenGrantRequest request = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
Converter<OAuth2RefreshTokenGrantRequest, HttpHeaders> addedHeadersConverter = mock();
HttpHeaders headers = new HttpHeaders();
headers.put("custom-header-name", Collections.singletonList("custom-header-value"));
given(addedHeadersConverter.convert(request)).willReturn(headers);
this.tokenResponseClient.addHeadersConverter(addedHeadersConverter);
this.server.enqueue(MockResponses.json("access-token-response.json"));
this.tokenResponseClient.getTokenResponse(request).block();
verify(addedHeadersConverter).convert(request);
RecordedRequest actualRequest = this.server.takeRequest();
assertThat(actualRequest.getHeader(HttpHeaders.AUTHORIZATION))
.isEqualTo("Basic Y2xpZW50LWlkOmNsaWVudC1zZWNyZXQ=");
assertThat(actualRequest.getHeader("custom-header-name")).isEqualTo("custom-header-value");
}
// gh-10130
@Test
public void getTokenResponseWhenHeadersConverterSetThenCalled() throws Exception {
OAuth2RefreshTokenGrantRequest request = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
ClientRegistration clientRegistration = request.getClientRegistration();
Converter<OAuth2RefreshTokenGrantRequest, HttpHeaders> headersConverter1 = mock();
HttpHeaders headers = new HttpHeaders();
headers.setBasicAuth(clientRegistration.getClientId(), clientRegistration.getClientSecret());
given(headersConverter1.convert(request)).willReturn(headers);
this.tokenResponseClient.setHeadersConverter(headersConverter1);
this.server.enqueue(MockResponses.json("access-token-response.json"));
this.tokenResponseClient.getTokenResponse(request).block();
verify(headersConverter1).convert(request);
RecordedRequest actualRequest = this.server.takeRequest();
assertThat(actualRequest.getHeader(HttpHeaders.AUTHORIZATION))
.isEqualTo("Basic Y2xpZW50LWlkOmNsaWVudC1zZWNyZXQ=");
}
@Test
public void setParametersConverterWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.tokenResponseClient.setParametersConverter(null))
.withMessage("parametersConverter cannot be null");
}
@Test
public void addParametersConverterWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.tokenResponseClient.addParametersConverter(null))
.withMessage("parametersConverter cannot be null");
}
@Test
public void getTokenResponseWhenParametersConverterAddedThenCalled() throws Exception {
OAuth2RefreshTokenGrantRequest request = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
Converter<OAuth2RefreshTokenGrantRequest, MultiValueMap<String, String>> addedParametersConverter = mock();
MultiValueMap<String, String> parameters = new LinkedMultiValueMap<>();
parameters.add("custom-parameter-name", "custom-parameter-value");
given(addedParametersConverter.convert(request)).willReturn(parameters);
this.tokenResponseClient.addParametersConverter(addedParametersConverter);
this.server.enqueue(MockResponses.json("access-token-response.json"));
this.tokenResponseClient.getTokenResponse(request).block();
verify(addedParametersConverter).convert(request);
RecordedRequest actualRequest = this.server.takeRequest();
assertThat(actualRequest.getBody().readUtf8()).contains("grant_type=refresh_token",
"custom-parameter-name=custom-parameter-value");
}
@Test
public void getTokenResponseWhenParametersConverterSetThenCalled() throws Exception {
OAuth2RefreshTokenGrantRequest request = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
Converter<OAuth2RefreshTokenGrantRequest, MultiValueMap<String, String>> parametersConverter = mock();
MultiValueMap<String, String> parameters = new LinkedMultiValueMap<>();
parameters.add("custom-parameter-name", "custom-parameter-value");
given(parametersConverter.convert(request)).willReturn(parameters);
this.tokenResponseClient.setParametersConverter(parametersConverter);
this.server.enqueue(MockResponses.json("access-token-response.json"));
this.tokenResponseClient.getTokenResponse(request).block();
verify(parametersConverter).convert(request);
RecordedRequest actualRequest = this.server.takeRequest();
assertThat(actualRequest.getBody().readUtf8()).contains("custom-parameter-name=custom-parameter-value");
}
@Test
public void getTokenResponseWhenParametersConverterSetThenAbleToOverrideDefaultParameters() throws Exception {
this.clientRegistrationBuilder.clientAuthenticationMethod(ClientAuthenticationMethod.CLIENT_SECRET_POST);
OAuth2RefreshTokenGrantRequest request = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
MultiValueMap<String, String> parameters = new LinkedMultiValueMap<>();
parameters.set(OAuth2ParameterNames.GRANT_TYPE, "custom");
parameters.set(OAuth2ParameterNames.REFRESH_TOKEN, "custom-token");
parameters.set(OAuth2ParameterNames.SCOPE, "one two");
this.tokenResponseClient.setParametersConverter((grantRequest) -> parameters);
this.server.enqueue(MockResponses.json("access-token-response.json"));
this.tokenResponseClient.getTokenResponse(request).block();
String formParameters = this.server.takeRequest().getBody().readUtf8();
// @formatter:off
assertThat(formParameters).contains(
param(OAuth2ParameterNames.GRANT_TYPE, "custom"),
param(OAuth2ParameterNames.CLIENT_ID, "client-id"),
param(OAuth2ParameterNames.REFRESH_TOKEN, "custom-token"),
param(OAuth2ParameterNames.SCOPE, "one two")
);
// @formatter:on
}
@Test
public void getTokenResponseWhenParametersCustomizerSetThenCalled() throws Exception {
this.server.enqueue(MockResponses.json("access-token-response.json"));
OAuth2RefreshTokenGrantRequest request = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
Consumer<MultiValueMap<String, String>> parametersCustomizer = mock();
this.tokenResponseClient.setParametersCustomizer(parametersCustomizer);
this.tokenResponseClient.getTokenResponse(request).block();
verify(parametersCustomizer).accept(any());
}
// gh-10260
@Test
public void getTokenResponseWhenSuccessCustomResponseThenReturnAccessTokenResponse() {
WebClientReactiveRefreshTokenTokenResponseClient customClient = new WebClientReactiveRefreshTokenTokenResponseClient();
BodyExtractor<Mono<OAuth2AccessTokenResponse>, ReactiveHttpInputMessage> extractor = mock();
OAuth2AccessTokenResponse response = TestOAuth2AccessTokenResponses.accessTokenResponse().build();
given(extractor.extract(any(), any())).willReturn(Mono.just(response));
customClient.setBodyExtractor(extractor);
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(
this.clientRegistrationBuilder.build(), this.accessToken, this.refreshToken);
this.server.enqueue(MockResponses.json("access-token-response.json"));
OAuth2AccessTokenResponse accessTokenResponse = customClient.getTokenResponse(refreshTokenGrantRequest).block();
assertThat(accessTokenResponse.getAccessToken()).isNotNull();
}
// gh-13144
@Test
public void getTokenResponseWhenCustomClientAuthenticationMethodThenIllegalArgument() {
ClientRegistration clientRegistration = this.clientRegistrationBuilder
.clientAuthenticationMethod(new ClientAuthenticationMethod("basic"))
.build();
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(clientRegistration,
this.accessToken, this.refreshToken);
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> this.tokenResponseClient.getTokenResponse(refreshTokenGrantRequest).block());
}
// gh-13144
@Test
public void getTokenResponseWhenUnsupportedClientAuthenticationMethodThenIllegalArgument() {
ClientRegistration clientRegistration = this.clientRegistrationBuilder
.clientAuthenticationMethod(ClientAuthenticationMethod.CLIENT_SECRET_JWT)
.build();
OAuth2RefreshTokenGrantRequest refreshTokenGrantRequest = new OAuth2RefreshTokenGrantRequest(clientRegistration,
this.accessToken, this.refreshToken);
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> this.tokenResponseClient.getTokenResponse(refreshTokenGrantRequest).block());
}
private static String param(String parameterName, String parameterValue) {
return "%s=%s".formatted(parameterName, URLEncoder.encode(parameterValue, StandardCharsets.UTF_8));
}
}
|
WebClientReactiveRefreshTokenTokenResponseClientTests
|
java
|
netty__netty
|
transport/src/main/java/io/netty/channel/CoalescingBufferQueue.java
|
{
"start": 1578,
"end": 3618
}
|
class ____ extends AbstractCoalescingBufferQueue {
private final Channel channel;
public CoalescingBufferQueue(Channel channel) {
this(channel, 4);
}
public CoalescingBufferQueue(Channel channel, int initSize) {
this(channel, initSize, false);
}
public CoalescingBufferQueue(Channel channel, int initSize, boolean updateWritability) {
super(updateWritability ? channel : null, initSize);
this.channel = ObjectUtil.checkNotNull(channel, "channel");
}
/**
* Remove a {@link ByteBuf} from the queue with the specified number of bytes. Any added buffer who's bytes are
* fully consumed during removal will have it's promise completed when the passed aggregate {@link ChannelPromise}
* completes.
*
* @param bytes the maximum number of readable bytes in the returned {@link ByteBuf}, if {@code bytes} is greater
* than {@link #readableBytes} then a buffer of length {@link #readableBytes} is returned.
* @param aggregatePromise used to aggregate the promises and listeners for the constituent buffers.
* @return a {@link ByteBuf} composed of the enqueued buffers.
*/
public ByteBuf remove(int bytes, ChannelPromise aggregatePromise) {
return remove(channel.alloc(), bytes, aggregatePromise);
}
/**
* Release all buffers in the queue and complete all listeners and promises.
*/
public void releaseAndFailAll(Throwable cause) {
releaseAndFailAll(channel, cause);
}
@Override
protected ByteBuf compose(ByteBufAllocator alloc, ByteBuf cumulation, ByteBuf next) {
if (cumulation instanceof CompositeByteBuf) {
CompositeByteBuf composite = (CompositeByteBuf) cumulation;
composite.addComponent(true, next);
return composite;
}
return composeIntoComposite(alloc, cumulation, next);
}
@Override
protected ByteBuf removeEmptyValue() {
return Unpooled.EMPTY_BUFFER;
}
}
|
CoalescingBufferQueue
|
java
|
apache__dubbo
|
dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/mesh/rule/destination/DestinationRuleSpec.java
|
{
"start": 901,
"end": 1751
}
|
class ____ {
private String host;
private List<Subset> subsets;
private TrafficPolicy trafficPolicy;
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public List<Subset> getSubsets() {
return subsets;
}
public void setSubsets(List<Subset> subsets) {
this.subsets = subsets;
}
public TrafficPolicy getTrafficPolicy() {
return trafficPolicy;
}
public void setTrafficPolicy(TrafficPolicy trafficPolicy) {
this.trafficPolicy = trafficPolicy;
}
@Override
public String toString() {
return "DestinationRuleSpec{" + "host='"
+ host + '\'' + ", subsets="
+ subsets + ", trafficPolicy="
+ trafficPolicy + '}';
}
}
|
DestinationRuleSpec
|
java
|
google__error-prone
|
test_helpers/src/main/java/com/google/errorprone/BugCheckerRefactoringTestHelper.java
|
{
"start": 4715,
"end": 4865
}
|
interface ____ {
Fix choose(List<Fix> fixes);
}
/** Predefined FixChoosers for selecting a fix by its position in the list */
public
|
FixChooser
|
java
|
resilience4j__resilience4j
|
resilience4j-circularbuffer/src/jcstress/java/io/github/resilience4j/circularbuffer/concurrent/ConcurrentEvictingQueueDoubleWriteTest.java
|
{
"start": 1003,
"end": 1461
}
|
class ____ {
ConcurrentEvictingQueue<Integer> queue;
public ConcurrentEvictingQueueDoubleWriteTest() {
queue = new ConcurrentEvictingQueue<>(3);
}
@Actor
public void firstActor() {
queue.offer(1);
}
@Actor
public void secondActor() {
queue.offer(2);
}
@Arbiter
public void arbiter(StringResult1 result) {
result.r1 = queue.toString();
}
}
|
ConcurrentEvictingQueueDoubleWriteTest
|
java
|
spring-projects__spring-framework
|
spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/factory/DummyFactory.java
|
{
"start": 1615,
"end": 4759
}
|
class ____
implements FactoryBean<Object>, BeanNameAware, BeanFactoryAware, InitializingBean, DisposableBean {
public static final String SINGLETON_NAME = "Factory singleton";
private static boolean prototypeCreated;
/**
* Clear static state.
*/
public static void reset() {
prototypeCreated = false;
}
/**
* Default is for factories to return a singleton instance.
*/
private boolean singleton = true;
private String beanName;
private AutowireCapableBeanFactory beanFactory;
private boolean postProcessed;
private boolean initialized;
private TestBean testBean;
private TestBean otherTestBean;
public DummyFactory() {
this.testBean = new TestBean();
this.testBean.setName(SINGLETON_NAME);
this.testBean.setAge(25);
}
/**
* Return if the bean managed by this factory is a singleton.
* @see FactoryBean#isSingleton()
*/
@Override
public boolean isSingleton() {
return this.singleton;
}
/**
* Set if the bean managed by this factory is a singleton.
*/
public void setSingleton(boolean singleton) {
this.singleton = singleton;
}
@Override
public void setBeanName(String beanName) {
this.beanName = beanName;
}
public String getBeanName() {
return beanName;
}
@Override
@SuppressWarnings("deprecation")
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = (AutowireCapableBeanFactory) beanFactory;
this.beanFactory.applyBeanPostProcessorsBeforeInitialization(this.testBean, this.beanName);
}
public BeanFactory getBeanFactory() {
return beanFactory;
}
public void setPostProcessed(boolean postProcessed) {
this.postProcessed = postProcessed;
}
public boolean isPostProcessed() {
return postProcessed;
}
public void setOtherTestBean(TestBean otherTestBean) {
this.otherTestBean = otherTestBean;
this.testBean.setSpouse(otherTestBean);
}
public TestBean getOtherTestBean() {
return otherTestBean;
}
@Override
public void afterPropertiesSet() {
if (initialized) {
throw new RuntimeException("Cannot call afterPropertiesSet twice on the one bean");
}
this.initialized = true;
}
/**
* Was this initialized by invocation of the
* afterPropertiesSet() method from the InitializingBean interface?
*/
public boolean wasInitialized() {
return initialized;
}
public static boolean wasPrototypeCreated() {
return prototypeCreated;
}
/**
* Return the managed object, supporting both singleton
* and prototype mode.
* @see FactoryBean#getObject()
*/
@Override
@SuppressWarnings("deprecation")
public Object getObject() throws BeansException {
if (isSingleton()) {
return this.testBean;
}
else {
TestBean prototype = new TestBean("prototype created at " + System.currentTimeMillis(), 11);
if (this.beanFactory != null) {
this.beanFactory.applyBeanPostProcessorsBeforeInitialization(prototype, this.beanName);
}
prototypeCreated = true;
return prototype;
}
}
@Override
public Class<?> getObjectType() {
return TestBean.class;
}
@Override
public void destroy() {
if (this.testBean != null) {
this.testBean.setName(null);
}
}
}
|
DummyFactory
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/LuceneEndpointBuilderFactory.java
|
{
"start": 1574,
"end": 1993
}
|
interface ____
extends
EndpointProducerBuilder {
default AdvancedLuceneEndpointBuilder advanced() {
return (AdvancedLuceneEndpointBuilder) this;
}
/**
* An Analyzer builds TokenStreams, which analyze text. It thus
* represents a policy for extracting index terms from text. The value
* for analyzer can be any
|
LuceneEndpointBuilder
|
java
|
elastic__elasticsearch
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/arrays/TDigestArrays.java
|
{
"start": 993,
"end": 1062
}
|
interface ____ BigArrays-like classes used within TDigest.
*/
public
|
for
|
java
|
apache__spark
|
common/unsafe/src/main/java/org/apache/spark/unsafe/UTF8StringBuilder.java
|
{
"start": 957,
"end": 1086
}
|
class ____ write {@link UTF8String}s to an internal buffer and build the concatenated
* {@link UTF8String} at the end.
*/
public
|
to
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/shuffle/ShuffleDescriptorRetriever.java
|
{
"start": 1139,
"end": 1439
}
|
interface ____ {
/**
* Get shuffle descriptor by JobID and ResultPartitionId.
*
* @return shuffle descriptor or empty if not exist.
*/
Optional<ShuffleDescriptor> getShuffleDescriptor(
JobID jobID, ResultPartitionID resultPartitionID);
}
|
ShuffleDescriptorRetriever
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/cacheable/api/JpaCacheApiUsageTest.java
|
{
"start": 1041,
"end": 1718
}
|
class ____ {
@Test
public void testEviction(EntityManagerFactoryScope scope) {
// first create an Order
scope.inTransaction(
entityManager ->
entityManager.persist( new Order( 1, 500 ) )
);
final EntityManagerFactory entityManagerFactory = scope.getEntityManagerFactory();
assertTrue( entityManagerFactory.getCache().contains( Order.class, 1 ) );
scope.inTransaction(
entityManager -> {
assertTrue( entityManagerFactory.getCache().contains( Order.class, 1 ) );
entityManager.createQuery( "delete Order" ).executeUpdate();
}
);
assertFalse( entityManagerFactory.getCache().contains( Order.class, 1 ) );
}
}
|
JpaCacheApiUsageTest
|
java
|
google__dagger
|
javatests/artifacts/dagger/build-tests/src/test/java/buildtests/InjectClassNonDaggerMethodTest.java
|
{
"start": 3734,
"end": 4363
}
|
interface ____ {",
" Foo foo();",
"}");
GradleModule.create(projectDir, "library1")
.addBuildFile(
"plugins {",
" id 'java'",
" id 'java-library'",
"}",
"dependencies {",
" implementation \"com.google.dagger:dagger:$dagger_version\"",
" annotationProcessor \"com.google.dagger:dagger-compiler:$dagger_version\"",
"}")
.addSrcFile(
"Foo.java",
"package library1;",
"",
"import javax.inject.Inject;",
"",
"
|
MyComponent
|
java
|
apache__camel
|
components/camel-as2/camel-as2-api/src/main/java/org/apache/camel/component/as2/api/AS2ServerConnection.java
|
{
"start": 15016,
"end": 15922
}
|
class
____.info("Listening on port {}", serversocket.getLocalPort());
while (!Thread.interrupted()) {
try {
// Set up incoming HTTP connection
final Socket inSocket = serversocket.accept();
// Start worker thread, using the service's HttpService
final Thread t = new RequestHandlerThread(this.service.httpService, inSocket);
t.setDaemon(true);
t.start();
} catch (final InterruptedIOException | SocketException ex) {
// If interrupted or server socket closed
break;
} catch (final IOException e) {
LOG.error("I/O error initialising connection thread: {}", e.getMessage());
break;
}
}
}
}
|
LOG
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/bean/issues/PrivateClasses.java
|
{
"start": 940,
"end": 1193
}
|
class ____ {
public static final String EXPECTED_OUTPUT = "Hello Camel";
public static final String METHOD_NAME = "sayHello";
private PrivateClasses() {
// Utility class; can't be instantiated
}
/**
* Public
|
PrivateClasses
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/JUnit3TestNotRunTest.java
|
{
"start": 4269,
"end": 4649
}
|
class ____ extends TestCase {
public void tesBasic() {}
public void tesMoreSpaces() {}
public void tesMultiline() {}
}
""")
.addOutputLines(
"out/PositiveCases.java",
"""
import junit.framework.TestCase;
import org.junit.Test;
public
|
PositiveCases
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java
|
{
"start": 3956,
"end": 10585
}
|
class ____ implements AuthenticateCallbackHandler {
private static final Logger log = LoggerFactory.getLogger(OAuthBearerUnsecuredValidatorCallbackHandler.class);
private static final String OPTION_PREFIX = "unsecuredValidator";
private static final String PRINCIPAL_CLAIM_NAME_OPTION = OPTION_PREFIX + "PrincipalClaimName";
private static final String SCOPE_CLAIM_NAME_OPTION = OPTION_PREFIX + "ScopeClaimName";
private static final String REQUIRED_SCOPE_OPTION = OPTION_PREFIX + "RequiredScope";
private static final String ALLOWABLE_CLOCK_SKEW_MILLIS_OPTION = OPTION_PREFIX + "AllowableClockSkewMs";
private Time time = Time.SYSTEM;
private Map<String, String> moduleOptions = null;
private boolean configured = false;
/**
* For testing
*
* @param time
* the mandatory time to set
*/
void time(Time time) {
this.time = Objects.requireNonNull(time);
}
/**
* Return true if this instance has been configured, otherwise false
*
* @return true if this instance has been configured, otherwise false
*/
public boolean configured() {
return configured;
}
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) {
if (!OAuthBearerLoginModule.OAUTHBEARER_MECHANISM.equals(saslMechanism))
throw new IllegalArgumentException(String.format("Unexpected SASL mechanism: %s", saslMechanism));
if (Objects.requireNonNull(jaasConfigEntries).size() != 1 || jaasConfigEntries.get(0) == null)
throw new IllegalArgumentException(
String.format("Must supply exactly 1 non-null JAAS mechanism configuration (size was %d)",
jaasConfigEntries.size()));
this.moduleOptions = Collections
.unmodifiableMap((Map<String, String>) jaasConfigEntries.get(0).getOptions());
configured = true;
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
if (!configured())
throw new IllegalStateException("Callback handler not configured");
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerValidatorCallback) {
OAuthBearerValidatorCallback validationCallback = (OAuthBearerValidatorCallback) callback;
try {
handleCallback(validationCallback);
} catch (OAuthBearerIllegalTokenException e) {
OAuthBearerValidationResult failureReason = e.reason();
String failureScope = failureReason.failureScope();
validationCallback.error(failureScope != null ? "insufficient_scope" : "invalid_token",
failureScope, failureReason.failureOpenIdConfig());
}
} else if (callback instanceof OAuthBearerExtensionsValidatorCallback) {
OAuthBearerExtensionsValidatorCallback extensionsCallback = (OAuthBearerExtensionsValidatorCallback) callback;
extensionsCallback.inputExtensions().map().forEach((extensionName, v) -> extensionsCallback.valid(extensionName));
} else
throw new UnsupportedCallbackException(callback);
}
}
@Override
public void close() {
// empty
}
private void handleCallback(OAuthBearerValidatorCallback callback) {
String tokenValue = callback.tokenValue();
if (tokenValue == null)
throw new IllegalArgumentException("Callback missing required token value");
String principalClaimName = principalClaimName();
String scopeClaimName = scopeClaimName();
List<String> requiredScope = requiredScope();
int allowableClockSkewMs = allowableClockSkewMs();
OAuthBearerUnsecuredJws unsecuredJwt = new OAuthBearerUnsecuredJws(tokenValue, principalClaimName,
scopeClaimName);
long now = time.milliseconds();
OAuthBearerValidationUtils
.validateClaimForExistenceAndType(unsecuredJwt, true, principalClaimName, String.class)
.throwExceptionIfFailed();
OAuthBearerValidationUtils.validateIssuedAt(unsecuredJwt, false, now, allowableClockSkewMs)
.throwExceptionIfFailed();
OAuthBearerValidationUtils.validateExpirationTime(unsecuredJwt, now, allowableClockSkewMs)
.throwExceptionIfFailed();
OAuthBearerValidationUtils.validateTimeConsistency(unsecuredJwt).throwExceptionIfFailed();
OAuthBearerValidationUtils.validateScope(unsecuredJwt, requiredScope).throwExceptionIfFailed();
log.info("Successfully validated token with principal {}: {}", unsecuredJwt.principalName(),
unsecuredJwt.claims());
callback.token(unsecuredJwt);
}
private String principalClaimName() {
String principalClaimNameValue = option(PRINCIPAL_CLAIM_NAME_OPTION);
return Utils.isBlank(principalClaimNameValue) ? "sub" : principalClaimNameValue.trim();
}
private String scopeClaimName() {
String scopeClaimNameValue = option(SCOPE_CLAIM_NAME_OPTION);
return Utils.isBlank(scopeClaimNameValue) ? "scope" : scopeClaimNameValue.trim();
}
private List<String> requiredScope() {
String requiredSpaceDelimitedScope = option(REQUIRED_SCOPE_OPTION);
return Utils.isBlank(requiredSpaceDelimitedScope) ? Collections.emptyList() : OAuthBearerScopeUtils.parseScope(requiredSpaceDelimitedScope.trim());
}
private int allowableClockSkewMs() {
String allowableClockSkewMsValue = option(ALLOWABLE_CLOCK_SKEW_MILLIS_OPTION);
int allowableClockSkewMs;
try {
allowableClockSkewMs = Utils.isBlank(allowableClockSkewMsValue) ? 0 : Integer.parseInt(allowableClockSkewMsValue.trim());
} catch (NumberFormatException e) {
throw new OAuthBearerConfigException(e.getMessage(), e);
}
if (allowableClockSkewMs < 0) {
throw new OAuthBearerConfigException(
String.format("Allowable clock skew millis must not be negative: %s", allowableClockSkewMsValue));
}
return allowableClockSkewMs;
}
private String option(String key) {
if (!configured)
throw new IllegalStateException("Callback handler not configured");
return moduleOptions.get(Objects.requireNonNull(key));
}
}
|
OAuthBearerUnsecuredValidatorCallbackHandler
|
java
|
elastic__elasticsearch
|
modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java
|
{
"start": 909,
"end": 1672
}
|
class ____ extends AbstractTokenizerFactory {
private final Pattern pattern;
private final int group;
PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
if (sPattern == null) {
throw new IllegalArgumentException("pattern is missing for [" + name + "] tokenizer of type 'pattern'");
}
this.pattern = Regex.compile(sPattern, settings.get("flags"));
this.group = settings.getAsInt("group", -1);
}
@Override
public Tokenizer create() {
return new PatternTokenizer(pattern, group);
}
}
|
PatternTokenizerFactory
|
java
|
quarkusio__quarkus
|
independent-projects/bootstrap/runner/src/main/java/io/quarkus/bootstrap/runner/JarResource.java
|
{
"start": 802,
"end": 2076
}
|
class ____ implements ClassLoadingResource {
private volatile ProtectionDomain protectionDomain;
private final ManifestInfo manifestInfo;
final Path jarPath;
final AtomicReference<CompletableFuture<JarFileReference>> jarFileReference = new AtomicReference<>();
public JarResource(ManifestInfo manifestInfo, Path jarPath) {
this.manifestInfo = manifestInfo;
this.jarPath = jarPath;
}
@Override
public void init() {
final URL url;
try {
String path = jarPath.toAbsolutePath().toString();
if (!path.startsWith("/")) {
path = '/' + path;
}
URI uri = new URI("file", null, path, null);
url = new URL((URL) null, uri.toString(), new JarUrlStreamHandler(uri));
} catch (URISyntaxException | MalformedURLException e) {
throw new RuntimeException("Unable to create protection domain for " + jarPath, e);
}
this.protectionDomain = new ProtectionDomain(new CodeSource(url, (Certificate[]) null), null);
}
@Override
public byte[] getResourceData(String resource) {
return JarFileReference.withJarFile(this, resource, JarResourceDataProvider.INSTANCE);
}
private static
|
JarResource
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/plugins/internal/RestExtension.java
|
{
"start": 705,
"end": 1757
}
|
interface ____ {
/**
* Returns a filter that determines which cat actions are exposed in /_cat.
*
* The filter should return {@code true} if an action should be included,
* or {@code false} otherwise.
*/
Predicate<AbstractCatAction> getCatActionsFilter();
/**
* Returns a filter that determines which rest actions are exposed.
*
* The filter should return {@code false} if an action should be included,
* or {@code false} if the paths
* @return
*/
Predicate<RestHandler> getActionsFilter();
/**
* Returns a rest extension which allows all rest endpoints through.
*/
static RestExtension allowAll() {
return new RestExtension() {
@Override
public Predicate<AbstractCatAction> getCatActionsFilter() {
return Predicates.always();
}
@Override
public Predicate<RestHandler> getActionsFilter() {
return Predicates.always();
}
};
}
}
|
RestExtension
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/logging/log4j2/SpringEnvironmentPropertySourceTests.java
|
{
"start": 1117,
"end": 2739
}
|
class ____ {
private MockEnvironment environment;
private SpringEnvironmentPropertySource propertySource;
@BeforeEach
void setup() {
this.environment = new MockEnvironment();
this.environment.setProperty("spring", "boot");
this.propertySource = new SpringEnvironmentPropertySource();
this.propertySource.setEnvironment(this.environment);
}
@Test
void getPriorityIsOrderedCorrectly() {
int priority = this.propertySource.getPriority();
assertThat(priority).isEqualTo(-100);
assertThat(priority).isLessThan(new SystemPropertiesPropertySource().getPriority());
assertThat(priority).isLessThan(new PropertiesPropertySource(new Properties()).getPriority());
}
@Test
void getPropertyWhenInEnvironmentReturnsValue() {
assertThat(this.propertySource.getProperty("spring")).isEqualTo("boot");
}
@Test
void getPropertyWhenEnvironmentIsNullReturnsNull() {
this.propertySource.setEnvironment(null);
assertThat(this.propertySource.getProperty("spring")).isNull();
}
@Test
void getPropertyWhenNotInEnvironmentReturnsNull() {
assertThat(this.propertySource.getProperty("nope")).isNull();
}
@Test
void containsPropertyWhenInEnvironmentReturnsTrue() {
assertThat(this.propertySource.containsProperty("spring")).isTrue();
}
@Test
void containsPropertyWhenEnvironmentIsNullReturnsFalse() {
this.propertySource.setEnvironment(null);
assertThat(this.propertySource.containsProperty("spring")).isFalse();
}
@Test
void containsPropertyWhenNotInEnvironmentReturnsFalse() {
assertThat(this.propertySource.containsProperty("nope")).isFalse();
}
}
|
SpringEnvironmentPropertySourceTests
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/RequestMappingHandlerMappingTests.java
|
{
"start": 26516,
"end": 26553
}
|
class ____ {
}
@Controller
static
|
Foo
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/authentication/logout/ForwardLogoutSuccessHandler.java
|
{
"start": 1238,
"end": 1934
}
|
class ____ implements LogoutSuccessHandler {
private final String targetUrl;
/**
* Construct a new {@link ForwardLogoutSuccessHandler} with the given target URL.
* @param targetUrl the target URL
*/
public ForwardLogoutSuccessHandler(String targetUrl) {
Assert.isTrue(UrlUtils.isValidRedirectUrl(targetUrl), () -> "'" + targetUrl + "' is not a valid target URL");
this.targetUrl = targetUrl;
}
@Override
public void onLogoutSuccess(HttpServletRequest request, HttpServletResponse response,
@Nullable Authentication authentication) throws IOException, ServletException {
request.getRequestDispatcher(this.targetUrl).forward(request, response);
}
}
|
ForwardLogoutSuccessHandler
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/context/request/RequestContextHolder.java
|
{
"start": 5325,
"end": 5754
}
|
class ____ {
public static @Nullable RequestAttributes getFacesRequestAttributes() {
try {
FacesContext facesContext = FacesContext.getCurrentInstance();
return (facesContext != null ? new FacesRequestAttributes(facesContext) : null);
}
catch (NoClassDefFoundError err) {
// typically for com/sun/faces/util/Util if only the JSF API jar is present
return null;
}
}
}
}
|
FacesRequestAttributesFactory
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/apidiff/CompilationBuilderHelpers.java
|
{
"start": 1813,
"end": 1909
}
|
class ____ {
/** A collection of sources to compile. */
public static
|
CompilationBuilderHelpers
|
java
|
FasterXML__jackson-core
|
src/main/java/tools/jackson/core/exc/InputCoercionException.java
|
{
"start": 487,
"end": 2413
}
|
class ____ extends StreamReadException
{
private static final long serialVersionUID = 3L;
/**
* Input token that represents input value that failed to coerce.
*/
protected final JsonToken _inputType;
/**
* Target type that input value failed to coerce to.
*/
protected final Class<?> _targetType;
/**
* Constructor that uses current parsing location as location, and
* sets processor (accessible via {@link #processor()}) to
* specified parser.
*
* @param p Parser in use at the point where failure occurred
* @param msg Exception mesage to use
* @param inputType Shape of input that failed to coerce
* @param targetType Target type of failed coercion
*/
public InputCoercionException(JsonParser p, String msg,
JsonToken inputType, Class<?> targetType) {
super(p, msg);
_inputType = inputType;
_targetType = targetType;
}
/**
* Fluent method that may be used to assign originating {@link JsonParser},
* to be accessed using {@link #processor()}.
*<p>
* NOTE: `this` instance is modified and no new instance is constructed.
*/
@Override
public InputCoercionException withParser(JsonParser p) {
_processor = p;
return this;
}
/**
* Accessor for getting information about input type (in form of token, giving "shape"
* of input) for which coercion failed.
*
* @return "Shape" of input for which coercion failed, as {@link JsonToken}
*/
public JsonToken getInputType() {
return _inputType;
}
/**
* Accessor for getting information about target type (in form of Java {@link java.lang.Class})
* for which coercion failed.
*
* @return Target type of failed conversion
*/
public Class<?> getTargetType() {
return _targetType;
}
}
|
InputCoercionException
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/factories/TableFactoryHarness.java
|
{
"start": 12476,
"end": 12710
}
|
interface ____ are default-implemented for convenience, but can be overridden when
* necessary. By default, a {@link ScanRuntimeProvider} is used which doesn't produce anything.
*
* <p>Sources derived from this base
|
methods
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java
|
{
"start": 13848,
"end": 14258
}
|
class ____<T extends ImmutableList<String>> {
final T t = null;
}
""")
.doTest();
}
@Test
public void immutableTypeArgumentInstantiation() {
compilationHelper
.addSourceLines(
"Holder.java",
"""
import com.google.errorprone.annotations.Immutable;
@Immutable(containerOf = "T")
public
|
Test
|
java
|
spring-projects__spring-boot
|
module/spring-boot-quartz/src/main/java/org/springframework/boot/quartz/actuate/endpoint/QuartzEndpoint.java
|
{
"start": 14271,
"end": 14722
}
|
class ____ {
private final String className;
QuartzJobSummaryDescriptor(JobDetail job) {
this.className = job.getJobClass().getName();
}
private static QuartzJobSummaryDescriptor of(JobDetail job) {
return new QuartzJobSummaryDescriptor(job);
}
public String getClassName() {
return this.className;
}
}
/**
* Description of a triggered on-demand {@link Job Quartz Job}.
*/
public static final
|
QuartzJobSummaryDescriptor
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceResponseHandlerTests.java
|
{
"start": 1013,
"end": 5612
}
|
class ____ extends ESTestCase {
public void testCheckForFailureStatusCode() {
var statusLine = mock(StatusLine.class);
var httpResponse = mock(HttpResponse.class);
when(httpResponse.getStatusLine()).thenReturn(statusLine);
var mockRequest = RequestTests.mockRequest("id");
var httpResult = new HttpResult(httpResponse, new byte[] {});
var handler = new HuggingFaceResponseHandler("", (request, result) -> null);
// 200 ok
when(statusLine.getStatusCode()).thenReturn(200);
handler.checkForFailureStatusCode(mockRequest, httpResult);
// 503
when(statusLine.getStatusCode()).thenReturn(503);
var retryException = expectThrows(RetryException.class, () -> handler.checkForFailureStatusCode(mockRequest, httpResult));
assertTrue(retryException.shouldRetry());
assertThat(
retryException.getCause().getMessage(),
containsString("Received a rate limit status code for request from inference entity id [id] status [503]")
);
assertThat(((ElasticsearchStatusException) retryException.getCause()).status(), is(RestStatus.BAD_REQUEST));
// 502
when(statusLine.getStatusCode()).thenReturn(502);
retryException = expectThrows(RetryException.class, () -> handler.checkForFailureStatusCode(mockRequest, httpResult));
assertTrue(retryException.shouldRetry());
assertThat(
retryException.getCause().getMessage(),
containsString("Received a rate limit status code for request from inference entity id [id] status [502]")
);
assertThat(((ElasticsearchStatusException) retryException.getCause()).status(), is(RestStatus.BAD_REQUEST));
// 429
when(statusLine.getStatusCode()).thenReturn(429);
retryException = expectThrows(RetryException.class, () -> handler.checkForFailureStatusCode(mockRequest, httpResult));
assertTrue(retryException.shouldRetry());
assertThat(
retryException.getCause().getMessage(),
containsString("Received a rate limit status code for request from inference entity id [id] status [429]")
);
assertThat(((ElasticsearchStatusException) retryException.getCause()).status(), is(RestStatus.TOO_MANY_REQUESTS));
// 413
when(statusLine.getStatusCode()).thenReturn(413);
retryException = expectThrows(ContentTooLargeException.class, () -> handler.checkForFailureStatusCode(mockRequest, httpResult));
assertTrue(retryException.shouldRetry());
assertThat(retryException.getCause().getMessage(), containsString("Received a content too large status code"));
assertThat(((ElasticsearchStatusException) retryException.getCause()).status(), is(RestStatus.REQUEST_ENTITY_TOO_LARGE));
// 401
when(statusLine.getStatusCode()).thenReturn(401);
retryException = expectThrows(RetryException.class, () -> handler.checkForFailureStatusCode(mockRequest, httpResult));
assertFalse(retryException.shouldRetry());
assertThat(
retryException.getCause().getMessage(),
containsString("Received an authentication error status code for request from inference entity id [id] status [401]")
);
assertThat(((ElasticsearchStatusException) retryException.getCause()).status(), is(RestStatus.UNAUTHORIZED));
// 300
when(statusLine.getStatusCode()).thenReturn(300);
retryException = expectThrows(RetryException.class, () -> handler.checkForFailureStatusCode(mockRequest, httpResult));
assertFalse(retryException.shouldRetry());
assertThat(
retryException.getCause().getMessage(),
containsString("Unhandled redirection for request from inference entity id [id] status [300]")
);
assertThat(((ElasticsearchStatusException) retryException.getCause()).status(), is(RestStatus.MULTIPLE_CHOICES));
// 402
when(statusLine.getStatusCode()).thenReturn(402);
retryException = expectThrows(RetryException.class, () -> handler.checkForFailureStatusCode(mockRequest, httpResult));
assertFalse(retryException.shouldRetry());
assertThat(
retryException.getCause().getMessage(),
containsString("Received an unsuccessful status code for request from inference entity id [id] status [402]")
);
assertThat(((ElasticsearchStatusException) retryException.getCause()).status(), is(RestStatus.PAYMENT_REQUIRED));
}
}
|
HuggingFaceResponseHandlerTests
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.