language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangeType.java | {
"start": 10491,
"end": 10871
} | class ____ extends AbstractChangePoint {
public static final String NAME = "dip";
public Dip(double pValue, int changePoint) {
super(pValue, changePoint);
}
public Dip(StreamInput in) throws IOException {
super(in);
}
@Override
public String getName() {
return NAME;
}
}
}
| Dip |
java | spring-projects__spring-boot | module/spring-boot-health/src/test/java/org/springframework/boot/health/contributor/MapCompositeTests.java | {
"start": 1337,
"end": 5273
} | class ____<T, C, E> {
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenMapIsNullThrowsException() {
assertThatIllegalArgumentException().isThrownBy(() -> createWithData(null, Function.identity()))
.withMessage("'map' must not be null");
}
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenValueAdapterIsNullThrowsException() {
assertThatIllegalArgumentException().isThrownBy(() -> createWithData(Collections.emptyMap(), null))
.withMessage("'valueAdapter' must not be null");
}
@Test
void createWhenMapContainsNullValueThrowsException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> createWithData(Collections.singletonMap("test", null), Function.identity()))
.withMessage("'map' must not contain null values");
}
@Test
void createWhenMapContainsNullKeyThrowsException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> createWithData(Collections.singletonMap(null, "test"), Function.identity()))
.withMessage("'map' must not contain null keys");
}
@Test
void createWhenMapContainsKeyWithSlashThrowsException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> createWithData(Collections.singletonMap("test/key", "test"), Function.identity()))
.withMessage("'map' keys must not contain a '/'");
}
@Test
void streamReturnsAdaptedEntries() {
T composite = create();
List<E> streamed = stream(composite).toList();
assertThat(streamed).hasSize(2);
E one = streamed.get(0);
E two = streamed.get(1);
assertThat(getName(one)).isEqualTo("one");
assertThat(getData(getContributor(one))).isEqualTo("eno");
assertThat(getName(two)).isEqualTo("two");
assertThat(getData(getContributor(two))).isEqualTo("owt");
}
@Test
void getContributorReturnsAdaptedEntry() {
T composite = create();
assertThat(getContributorData(composite, "one")).isEqualTo("eno");
assertThat(getContributorData(composite, "two")).isEqualTo("owt");
}
@Test
void getContributorCallsAdaptersOnlyOnce() {
Map<String, String> map = new LinkedHashMap<>();
map.put("one", "one");
map.put("two", "two");
int callCount = map.size();
AtomicInteger counter = new AtomicInteger(0);
T composite = createWithData(map, (name) -> count(name, counter));
assertThat(getContributorData(composite, "one")).isEqualTo("eno");
assertThat(counter.get()).isEqualTo(callCount);
assertThat(getContributorData(composite, "two")).isEqualTo("owt");
assertThat(counter.get()).isEqualTo(callCount);
}
@Test
void getContributorWhenNotInMapReturnsNull() {
T composite = create();
assertThat(getContributor(composite, "missing")).isNull();
}
private String count(CharSequence charSequence, AtomicInteger counter) {
counter.incrementAndGet();
return reverse(charSequence);
}
private String reverse(CharSequence charSequence) {
return new StringBuilder(charSequence).reverse().toString();
}
private T create() {
Map<String, String> map = new LinkedHashMap<>();
map.put("one", "one");
map.put("two", "two");
return createWithData(map, this::reverse);
}
private T createWithData(Map<String, String> map, Function<String, String> dataAdapter) {
return create(map, (dataAdapter != null) ? (key) -> createContributor(dataAdapter.apply(key)) : null);
}
private @Nullable String getContributorData(T composite, String name) {
C contributor = getContributor(composite, name);
assertThat(contributor).isNotNull();
return getData(contributor);
}
protected abstract T create(Map<String, String> map, @Nullable Function<String, C> valueAdapter);
protected abstract Stream<E> stream(T composite);
protected abstract @Nullable C getContributor(T composite, String name);
protected abstract C createContributor(String data);
protected abstract @Nullable String getData(C contributor);
protected abstract String getName(E entry);
protected abstract C getContributor(E entry);
}
| MapCompositeTests |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/model/assignment/ExistingInstanceSetterWrapperForCollectionsAndMaps.java | {
"start": 1495,
"end": 2886
} | class ____
extends SetterWrapperForCollectionsAndMapsWithNullCheck {
private final boolean includeElseBranch;
private final boolean mapNullToDefault;
private final Type targetType;
public ExistingInstanceSetterWrapperForCollectionsAndMaps(Assignment decoratedAssignment,
List<Type> thrownTypesToExclude,
Type targetType,
NullValueCheckStrategyGem nvcs,
NullValuePropertyMappingStrategyGem nvpms,
TypeFactory typeFactory,
boolean fieldAssignment) {
super(
decoratedAssignment,
thrownTypesToExclude,
targetType,
typeFactory,
fieldAssignment
);
this.mapNullToDefault = SET_TO_DEFAULT == nvpms;
this.targetType = targetType;
this.includeElseBranch = ALWAYS != nvcs && IGNORE != nvpms;
}
@Override
public Set<Type> getImportTypes() {
Set<Type> imported = new HashSet<>( super.getImportTypes() );
if ( isMapNullToDefault() && ( targetType.getImplementationType() != null ) ) {
imported.add( targetType.getImplementationType() );
}
return imported;
}
public boolean isIncludeElseBranch() {
return includeElseBranch;
}
public boolean isMapNullToDefault() {
return mapNullToDefault;
}
}
| ExistingInstanceSetterWrapperForCollectionsAndMaps |
java | mockito__mockito | mockito-extensions/mockito-errorprone/src/main/java/org/mockito/errorprone/bugpatterns/AbstractMockitoAnyForPrimitiveType.java | {
"start": 1174,
"end": 5666
} | class ____ extends BugChecker
implements MethodInvocationTreeMatcher {
protected abstract Matcher<? super MethodInvocationTree> matcher();
protected abstract String formatMessage(
String expectedTypeAsString, Type matcherType, String replacementName);
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (!matcher().matches(tree, state)) {
return NO_MATCH;
}
MethodSymbol method = ASTHelpers.getSymbol(tree);
Type matcherType = method.getReturnType();
// It is expected that the call to anyX() is itself the argument to another call which is
// the one being mocked, e.g. something like this:
// when(mock.call(..., anyInt(), ...))...
TreePath path = state.getPath();
Tree parentTree = path.getParentPath().getLeaf();
if (!(parentTree instanceof MethodInvocationTree)) {
// Ignore calls that are not arguments to another method call.
// TODO: Report this as a problem because it makes little sense.
// TODO: Support casting.
return NO_MATCH;
}
MethodInvocationTree parentCall = (MethodInvocationTree) parentTree;
MethodSymbol parentMethod = ASTHelpers.getSymbol(parentCall);
// Find the index of the argument in the parent call.
int argumentIndex = -1;
List<? extends ExpressionTree> parentArguments = parentCall.getArguments();
for (int i = 0; i < parentArguments.size(); i++) {
ExpressionTree argumentTree = parentArguments.get(i);
if (argumentTree == tree) {
argumentIndex = i;
break;
}
}
if (argumentIndex == -1) {
throw new IllegalStateException(
"Cannot find argument "
+ state.getSourceForNode(tree)
+ " in argument list from "
+ state.getSourceForNode(parentTree));
}
Type parameterType = getParameterType(parentMethod, argumentIndex);
TypeKind parameterTypeKind = parameterType.getKind();
if (parameterTypeKind.isPrimitive() && parameterTypeKind != matcherType.getKind()) {
String expectedTypeAsString = parameterType.toString();
String replacementName =
"any"
+ Character.toUpperCase(expectedTypeAsString.charAt(0))
+ expectedTypeAsString.substring(1);
String message = formatMessage(expectedTypeAsString, matcherType, replacementName);
SuggestedFix.Builder fixBuilder = SuggestedFix.builder();
ExpressionTree methodSelect = tree.getMethodSelect();
String replacement;
if (methodSelect instanceof MemberSelectTree) {
MemberSelectTree qualifier = (MemberSelectTree) methodSelect;
replacement =
state.getSourceForNode(qualifier.getExpression()) + "." + replacementName;
} else {
replacement = replacementName;
String staticImport = method.owner + "." + replacementName;
fixBuilder.addStaticImport(staticImport);
}
SuggestedFix fix = fixBuilder.replace(tree, replacement + "()").build();
return buildDescription(tree).setMessage(message).addFix(fix).build();
}
return NO_MATCH;
}
/**
* Get the type of the parameter for a supplied argument.
*
* @param method the method symbol that is being called.
* @param argumentIndex the index of the argument, can be greater than the number of parameters
* for a var arg method.
* @return the type of the associated parameter.
*/
private Type getParameterType(MethodSymbol method, int argumentIndex) {
List<VarSymbol> parameters = method.getParameters();
Type parameterType;
int parameterCount = parameters.size();
if (argumentIndex >= parameterCount && method.isVarArgs()) {
VarSymbol varArgParameter = parameters.get(parameterCount - 1);
parameterType = ((ArrayType) varArgParameter.asType()).getComponentType();
} else {
parameterType = parameters.get(argumentIndex).asType();
}
return parameterType;
}
}
| AbstractMockitoAnyForPrimitiveType |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java | {
"start": 7531,
"end": 17631
} | class ____ implements KafkaStreams.StateListener {
int numChanges = 0;
KafkaStreams.State oldState;
KafkaStreams.State newState;
public Map<KafkaStreams.State, Long> mapStates = new HashMap<>();
@Override
public void onChange(final KafkaStreams.State newState,
final KafkaStreams.State oldState) {
final long prevCount = mapStates.containsKey(newState) ? mapStates.get(newState) : 0;
numChanges++;
this.oldState = oldState;
this.newState = newState;
mapStates.put(newState, prevCount + 1);
}
}
@BeforeEach
public void before(final TestInfo testInfo) throws Exception {
time = new MockTime();
supplier = new MockClientSupplier();
supplier.setCluster(Cluster.bootstrap(singletonList(new InetSocketAddress("localhost", 9999))));
adminClient = (MockAdminClient) supplier.getAdmin(null);
streamsStateListener = new StateListenerStub();
props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID + safeUniqueTestName(testInfo));
props.put(StreamsConfig.CLIENT_ID_CONFIG, CLIENT_ID);
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:2018");
props.put(StreamsConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
props.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, NUM_THREADS);
}
@AfterEach
public void tearDown() {
if (clientMetricsMockedStatic != null)
clientMetricsMockedStatic.close();
if (streamThreadMockedStatic != null)
streamThreadMockedStatic.close();
if (globalStreamThreadMockedConstruction != null)
globalStreamThreadMockedConstruction.close();
if (metricsMockedConstruction != null)
metricsMockedConstruction.close();
if (streamsConfigUtils != null)
streamsConfigUtils.close();
if (adminClient != null)
adminClient.close();
reset(streamThreadOne, streamThreadTwo);
}
@SuppressWarnings("unchecked")
private void prepareStreams() {
// setup metrics
metricsMockedConstruction = mockConstruction(Metrics.class, (mock, context) -> {
assertEquals(4, context.arguments().size());
final List<MetricsReporter> reporters = (List<MetricsReporter>) context.arguments().get(1);
for (final MetricsReporter reporter : reporters) {
reporter.init(Collections.emptyList());
}
doAnswer(invocation -> {
for (final MetricsReporter reporter : reporters) {
reporter.close();
}
return null;
}).when(mock).close();
});
clientMetricsMockedStatic = mockStatic(ClientMetrics.class);
clientMetricsMockedStatic.when(ClientMetrics::version).thenReturn("1.56");
clientMetricsMockedStatic.when(ClientMetrics::commitId).thenReturn("1a2b3c4d5e");
ClientMetrics.addVersionMetric(any(StreamsMetricsImpl.class));
ClientMetrics.addCommitIdMetric(any(StreamsMetricsImpl.class));
ClientMetrics.addApplicationIdMetric(any(StreamsMetricsImpl.class), eq(APPLICATION_ID));
ClientMetrics.addTopologyDescriptionMetric(any(StreamsMetricsImpl.class), any());
ClientMetrics.addStateMetric(any(StreamsMetricsImpl.class), any());
ClientMetrics.addNumAliveStreamThreadMetric(any(StreamsMetricsImpl.class), any());
// setup stream threads
streamThreadMockedStatic = mockStatic(StreamThread.class);
streamThreadMockedStatic.when(() -> StreamThread.create(
any(TopologyMetadata.class),
any(StreamsConfig.class),
any(KafkaClientSupplier.class),
any(Admin.class),
any(UUID.class),
any(String.class),
any(StreamsMetricsImpl.class),
any(Time.class),
any(StreamsMetadataState.class),
anyLong(),
any(StateDirectory.class),
any(StateRestoreListener.class),
any(StandbyUpdateListener.class),
anyInt(),
any(Runnable.class),
any()
)).thenReturn(streamThreadOne).thenReturn(streamThreadTwo);
streamsConfigUtils = mockStatic(StreamsConfigUtils.class);
streamsConfigUtils.when(() -> StreamsConfigUtils.processingMode(any(StreamsConfig.class))).thenReturn(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE);
streamsConfigUtils.when(() -> StreamsConfigUtils.eosEnabled(any(StreamsConfig.class))).thenReturn(false);
streamsConfigUtils.when(() -> StreamsConfigUtils.totalCacheSize(any(StreamsConfig.class))).thenReturn(10 * 1024 * 1024L);
// setup global threads
final AtomicReference<GlobalStreamThread.State> globalThreadState = new AtomicReference<>(GlobalStreamThread.State.CREATED);
globalStreamThreadMockedConstruction = mockConstruction(GlobalStreamThread.class,
(mock, context) -> {
when(mock.state()).thenAnswer(invocation -> globalThreadState.get());
doNothing().when(mock).setStateListener(threadStateListenerCapture.capture());
doAnswer(invocation -> {
globalThreadState.set(GlobalStreamThread.State.RUNNING);
threadStateListenerCapture.getValue().onChange(mock,
GlobalStreamThread.State.RUNNING,
GlobalStreamThread.State.CREATED);
return null;
}).when(mock).start();
doAnswer(invocation -> {
supplier.restoreConsumer.close();
for (final MockProducer<byte[], byte[]> producer : supplier.producers) {
producer.close();
}
globalThreadState.set(GlobalStreamThread.State.DEAD);
threadStateListenerCapture.getValue().onChange(mock,
GlobalStreamThread.State.PENDING_SHUTDOWN,
GlobalStreamThread.State.RUNNING);
threadStateListenerCapture.getValue().onChange(mock,
GlobalStreamThread.State.DEAD,
GlobalStreamThread.State.PENDING_SHUTDOWN);
return null;
}).when(mock).shutdown();
when(mock.stillRunning()).thenReturn(globalThreadState.get() == GlobalStreamThread.State.RUNNING);
});
}
private AtomicReference<StreamThread.State> prepareStreamThread(final StreamThread thread, final int threadId) {
when(thread.getId()).thenReturn((long) threadId);
final AtomicReference<StreamThread.State> state = new AtomicReference<>(StreamThread.State.CREATED);
when(thread.state()).thenAnswer(invocation -> state.get());
doNothing().when(thread).setStateListener(threadStateListenerCapture.capture());
when(thread.getName()).thenReturn("processId-StreamThread-" + threadId);
return state;
}
private void prepareConsumer(final StreamThread thread, final AtomicReference<StreamThread.State> state) {
doAnswer(invocation -> {
supplier.consumer.close(
org.apache.kafka.clients.consumer.CloseOptions.groupMembershipOperation(org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP)
);
supplier.restoreConsumer.close(
org.apache.kafka.clients.consumer.CloseOptions.groupMembershipOperation(org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP)
);
for (final MockProducer<byte[], byte[]> producer : supplier.producers) {
producer.close();
}
state.set(StreamThread.State.DEAD);
threadStateListenerCapture.getValue().onChange(thread, StreamThread.State.PENDING_SHUTDOWN, StreamThread.State.RUNNING);
threadStateListenerCapture.getValue().onChange(thread, StreamThread.State.DEAD, StreamThread.State.PENDING_SHUTDOWN);
return null;
}).when(thread).shutdown(CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP);
}
private void prepareThreadLock(final StreamThread thread) {
when(thread.getStateLock()).thenReturn(new Object());
}
private void prepareThreadState(final StreamThread thread, final AtomicReference<StreamThread.State> state) {
doAnswer(invocation -> {
state.set(StreamThread.State.STARTING);
threadStateListenerCapture.getValue().onChange(thread,
StreamThread.State.STARTING,
StreamThread.State.CREATED);
threadStateListenerCapture.getValue().onChange(thread,
StreamThread.State.PARTITIONS_REVOKED,
StreamThread.State.STARTING);
threadStateListenerCapture.getValue().onChange(thread,
StreamThread.State.PARTITIONS_ASSIGNED,
StreamThread.State.PARTITIONS_REVOKED);
threadStateListenerCapture.getValue().onChange(thread,
StreamThread.State.RUNNING,
StreamThread.State.PARTITIONS_ASSIGNED);
return null;
}).when(thread).start();
}
private final CountDownLatch terminableThreadBlockingLatch = new CountDownLatch(1);
private void prepareTerminableThread(final StreamThread thread) throws InterruptedException {
doAnswer(invocation -> {
terminableThreadBlockingLatch.await();
return null;
}).when(thread).join();
}
private | StateListenerStub |
java | google__gson | gson/src/main/java/com/google/gson/JsonElement.java | {
"start": 2647,
"end": 3102
} | class ____ these
* methods for this:
*
* <ul>
* <li>{@link Gson#fromJson(JsonElement, Class) Gson.fromJson(JsonElement, ...)}, for example:
* <pre>
* JsonObject jsonObject = ...;
* MyClass myObj = gson.fromJson(jsonObject, MyClass.class);
* </pre>
* <li>{@link Gson#toJsonTree(Object)}, for example:
* <pre>
* MyClass myObj = ...;
* JsonElement json = gson.toJsonTree(myObj);
* </pre>
* </ul>
*
* The {@link TypeAdapter} | offers |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/iterables/Iterables_assertHasSameSizeAs_with_Array_Test.java | {
"start": 1469,
"end": 5814
} | class ____ extends IterablesBaseTest {
@Test
void should_pass_if_size_of_actual_is_equal_to_expected_size() {
iterables.assertHasSameSizeAs(someInfo(), newArrayList("Yoda", "Luke"), array("Solo", "Leia"));
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> iterables.assertHasSameSizeAs(someInfo(), null,
newArrayList("Solo", "Leia")))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_other_is_null() {
assertThatNullPointerException().isThrownBy(() -> {
Iterable<?> other = null;
iterables.assertHasSameSizeAs(someInfo(), newArrayList("Yoda", "Luke"), other);
}).withMessage("The Iterable to compare actual size with should not be null");
}
@Test
void should_fail_if_actual_size_is_not_equal_to_other_size() {
AssertionInfo info = someInfo();
Collection<String> actual = newArrayList("Yoda");
String[] other = array("Solo", "Luke", "Leia");
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> iterables.assertHasSameSizeAs(info, actual, other))
.withMessage(shouldHaveSameSizeAs(actual, other, actual.size(),
other.length).create(null,
info.representation())
.formatted());
}
@Test
void should_pass_if_actual_has_same_size_as_other_whatever_custom_comparison_strategy_is() {
iterablesWithCaseInsensitiveComparisonStrategy.assertHasSameSizeAs(someInfo(), newArrayList("Luke", "Yoda"),
array("Solo", "Leia"));
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> iterablesWithCaseInsensitiveComparisonStrategy.assertHasSameSizeAs(someInfo(),
null,
array("Solo",
"Leia")))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_other_is_null_whatever_custom_comparison_strategy_is() {
assertThatNullPointerException().isThrownBy(() -> {
Iterable<?> other = null;
iterables.assertHasSameSizeAs(someInfo(), newArrayList("Yoda", "Luke"), other);
}).withMessage("The Iterable to compare actual size with should not be null");
}
@Test
void should_fail_if_actual_size_is_not_equal_to_other_size_whatever_custom_comparison_strategy_is() {
AssertionInfo info = someInfo();
Collection<String> actual = newArrayList("Yoda");
String[] other = array("Solo", "Luke", "Leia");
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> iterablesWithCaseInsensitiveComparisonStrategy.assertHasSameSizeAs(info,
actual,
other))
.withMessage(shouldHaveSameSizeAs(actual, other, actual.size(),
other.length)
.create(null,
info.representation()));
}
}
| Iterables_assertHasSameSizeAs_with_Array_Test |
java | apache__flink | flink-table/flink-table-api-java-bridge/src/test/java/org/apache/flink/table/factories/PrintSinkFactoryTest.java | {
"start": 1597,
"end": 2495
} | class ____ {
private static final ResolvedSchema SCHEMA =
ResolvedSchema.of(
Column.physical("f0", DataTypes.STRING()),
Column.physical("f1", DataTypes.BIGINT()),
Column.physical("f2", DataTypes.BIGINT()));
@Test
void testPrint() {
Map<String, String> properties = new HashMap<>();
properties.put("connector", "print");
properties.put(PrintConnectorOptions.PRINT_IDENTIFIER.key(), "my_print");
properties.put(PrintConnectorOptions.STANDARD_ERROR.key(), "true");
List<String> partitionKeys = Arrays.asList("f0", "f1");
DynamicTableSink sink = createTableSink(SCHEMA, partitionKeys, properties);
assertThat(sink.asSummaryString()).isEqualTo("Print to System.err");
assertThat(sink).isInstanceOf(SupportsPartitioning.class);
}
}
| PrintSinkFactoryTest |
java | spring-projects__spring-security | test/src/main/java/org/springframework/security/test/web/servlet/response/SecurityMockMvcResultMatchers.java | {
"start": 3180,
"end": 11285
} | class ____ extends AuthenticationMatcher<AuthenticatedMatcher> {
private @Nullable SecurityContext expectedContext;
private @Nullable Authentication expectedAuthentication;
private @Nullable Object expectedAuthenticationPrincipal;
private @Nullable String expectedAuthenticationName;
private @Nullable Collection<? extends GrantedAuthority> expectedGrantedAuthorities;
private @Nullable Collection<String> expectedAuthorities;
private Predicate<GrantedAuthority> ignoreAuthorities = (authority) -> false;
private @Nullable Consumer<Authentication> assertAuthentication;
AuthenticatedMatcher() {
}
@NullUnmarked
@Override
public void match(MvcResult result) {
SecurityContext context = load(result);
Authentication auth = context.getAuthentication();
AssertionErrors.assertTrue("Authentication should not be null", auth != null);
if (this.assertAuthentication != null) {
this.assertAuthentication.accept(auth);
}
if (this.expectedContext != null) {
AssertionErrors.assertEquals(this.expectedContext + " does not equal " + context, this.expectedContext,
context);
}
if (this.expectedAuthentication != null) {
AssertionErrors.assertEquals(
this.expectedAuthentication + " does not equal " + context.getAuthentication(),
this.expectedAuthentication, context.getAuthentication());
}
if (this.expectedAuthenticationPrincipal != null) {
AssertionErrors.assertTrue("Authentication cannot be null", context.getAuthentication() != null);
AssertionErrors.assertEquals(
this.expectedAuthenticationPrincipal + " does not equal "
+ context.getAuthentication().getPrincipal(),
this.expectedAuthenticationPrincipal, context.getAuthentication().getPrincipal());
}
if (this.expectedAuthenticationName != null) {
AssertionErrors.assertTrue("Authentication cannot be null", auth != null);
String name = auth.getName();
AssertionErrors.assertEquals(this.expectedAuthenticationName + " does not equal " + name,
this.expectedAuthenticationName, name);
}
if (this.expectedGrantedAuthorities != null) {
AssertionErrors.assertTrue("Authentication cannot be null", auth != null);
Collection<? extends GrantedAuthority> authorities = new ArrayList<>(auth.getAuthorities());
authorities.removeIf(this.ignoreAuthorities);
AssertionErrors.assertTrue(
authorities + " does not contain the same authorities as " + this.expectedGrantedAuthorities,
authorities.containsAll(this.expectedGrantedAuthorities));
AssertionErrors.assertTrue(
this.expectedGrantedAuthorities + " does not contain the same authorities as " + authorities,
this.expectedGrantedAuthorities.containsAll(authorities));
}
if (this.expectedAuthorities != null) {
AssertionErrors.assertTrue("Authentication cannot be null", auth != null);
List<String> authorities = auth.getAuthorities()
.stream()
.filter(Predicate.not(this.ignoreAuthorities))
.map(GrantedAuthority::getAuthority)
.toList();
AssertionErrors.assertTrue(
authorities + " does not contain the same authorities as " + this.expectedAuthorities,
this.expectedAuthorities.containsAll(authorities));
AssertionErrors.assertTrue(
this.expectedAuthorities + " does not contain the same authorities as " + authorities,
authorities.containsAll(this.expectedAuthorities));
}
}
/**
* Allows for any validating the authentication with arbitrary assertions
* @param assertAuthentication the Consumer which validates the authentication
* @return the AuthenticatedMatcher to perform additional assertions
*/
public AuthenticatedMatcher withAuthentication(Consumer<Authentication> assertAuthentication) {
this.assertAuthentication = assertAuthentication;
return this;
}
/**
* Specifies the expected username
* @param expected the expected username
* @return the {@link AuthenticatedMatcher} for further customization
*/
public AuthenticatedMatcher withUsername(String expected) {
return withAuthenticationName(expected);
}
/**
* Specifies the expected {@link SecurityContext}
* @param expected the expected {@link SecurityContext}
* @return the {@link AuthenticatedMatcher} for further customization
*/
public AuthenticatedMatcher withSecurityContext(SecurityContext expected) {
this.expectedContext = expected;
return this;
}
/**
* Specifies the expected {@link Authentication}
* @param expected the expected {@link Authentication}
* @return the {@link AuthenticatedMatcher} for further customization
*/
public AuthenticatedMatcher withAuthentication(Authentication expected) {
this.expectedAuthentication = expected;
return this;
}
/**
* Specifies the expected principal
* @param expected the expected principal
* @return the {@link AuthenticatedMatcher} for further customization
*/
public AuthenticatedMatcher withAuthenticationPrincipal(Object expected) {
this.expectedAuthenticationPrincipal = expected;
return this;
}
/**
* Specifies the expected {@link Authentication#getName()}
* @param expected the expected {@link Authentication#getName()}
* @return the {@link AuthenticatedMatcher} for further customization
*/
public AuthenticatedMatcher withAuthenticationName(String expected) {
this.expectedAuthenticationName = expected;
return this;
}
/**
* Specifies the {@link GrantedAuthority#getAuthority()}
* @param authorities the authorityNames
* @return the {@link AuthenticatedMatcher} for further customization
*/
public AuthenticatedMatcher withAuthorities(String... authorities) {
Assert.notNull(authorities, "authorities cannot be null");
this.expectedAuthorities = Arrays.asList(authorities);
return this;
}
/**
* Specifies the {@link Authentication#getAuthorities()}
* @param expected the {@link Authentication#getAuthorities()}
* @return the {@link AuthenticatedMatcher} for further customization
*/
public AuthenticatedMatcher withAuthorities(Collection<? extends GrantedAuthority> expected) {
this.expectedGrantedAuthorities = expected;
return this;
}
/**
* Specifies the expected roles.
* <p>
* Since a set of authorities can contain more than just roles, this method
* differs from {@link #withAuthorities} in that it only verifies the authorities
* prefixed by {@code ROLE_}. Other authorities are ignored.
* <p>
* If you want to validate more than just roles, please use
* {@link #withAuthorities}.
* @param roles the roles. Each value is automatically prefixed with "ROLE_"
* @return the {@link AuthenticatedMatcher} for further customization
*/
public AuthenticatedMatcher withRoles(String... roles) {
return withRoles("ROLE_", roles);
}
/**
* Specifies the expected roles.
* <p>
* Since a set of authorities can contain more than just roles, this method
* differs from {@link #withAuthorities} in that it only verifies the authorities
* prefixed by {@code ROLE_}. Other authorities are ignored.
* <p>
* If you want to validate more than just roles, please use
* {@link #withAuthorities}.
* @param rolePrefix the role prefix
* @param roles the roles. Each value is automatically prefixed with the
* {@code rolePrefix}
* @return the {@link AuthenticatedMatcher} for further customization
* @since 7.0
*/
public AuthenticatedMatcher withRoles(String rolePrefix, String[] roles) {
List<GrantedAuthority> withPrefix = new ArrayList<>();
for (String role : roles) {
withPrefix.add(new SimpleGrantedAuthority(rolePrefix + role));
}
this.ignoreAuthorities = (authority) -> (authority.getAuthority() != null
&& !authority.getAuthority().startsWith(rolePrefix));
return withAuthorities(withPrefix);
}
}
/**
* A {@link MockMvc} {@link ResultMatcher} that verifies no {@link Authentication} is
* associated with the {@link MvcResult}.
*
* @author Rob Winch
* @since 4.0
*/
private static final | AuthenticatedMatcher |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java | {
"start": 68693,
"end": 70478
} | class ____ extends EncryptionFaultInjector {
private volatile int exceptionCount = 0;
MyInjector(int numFailures) {
exceptionCount = numFailures;
}
@Override
public synchronized void reencryptUpdaterProcessCheckpoint()
throws IOException {
if (exceptionCount > 0) {
--exceptionCount;
throw new IOException("Injected process checkpoint failure");
}
}
}
final MyInjector injector = new MyInjector(1);
EncryptionFaultInjector.instance = injector;
/* Setup test dir:
* /zones/zone/[0-9]
*/
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone = new Path(zoneParent, "zone");
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
for (int i = 0; i < 10; ++i) {
DFSTestUtil
.createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
0xFEED);
}
// re-encrypt the zone
rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
assertEquals(0, injector.exceptionCount);
// test listReencryptionStatus should still work
RemoteIterator<ZoneReencryptionStatus> it =
dfsAdmin.listReencryptionStatus();
assertTrue(it.hasNext());
ZoneReencryptionStatus zs = it.next();
assertEquals(zone.toString(), zs.getZoneName());
assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
verifyZoneCompletionTime(zs);
assertEquals(10, zs.getFilesReencrypted());
assertEquals(1, zs.getNumReencryptionFailures());
}
@Test
public void testReencryptionUpdaterFaultRecover() throws Exception {
| MyInjector |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java | {
"start": 3391,
"end": 60693
} | class ____ implements RequestManager, MemberStateListener, Closeable {
private final Time time;
private final Logger log;
private final LogContext logContext;
private final String groupId;
private final ShareConsumerMetadata metadata;
private final SubscriptionState subscriptions;
private final ShareFetchConfig shareFetchConfig;
protected final ShareFetchBuffer shareFetchBuffer;
private final ShareAcknowledgementEventHandler acknowledgeEventHandler;
private final Map<Integer, ShareSessionHandler> sessionHandlers;
private final Set<Integer> nodesWithPendingRequests;
private final ShareFetchMetricsManager metricsManager;
private final IdempotentCloser idempotentCloser = new IdempotentCloser();
private Uuid memberId;
private boolean fetchMoreRecords = false;
private final AtomicInteger fetchRecordsNodeId = new AtomicInteger(-1);
private final Map<Integer, Map<TopicIdPartition, Acknowledgements>> fetchAcknowledgementsToSend;
private final Map<Integer, Map<TopicIdPartition, Acknowledgements>> fetchAcknowledgementsInFlight;
private final Map<Integer, Tuple<AcknowledgeRequestState>> acknowledgeRequestStates;
private final long retryBackoffMs;
private final long retryBackoffMaxMs;
private boolean closing = false;
private final CompletableFuture<Void> closeFuture;
private boolean isAcknowledgementCommitCallbackRegistered = false;
private final Map<IdAndPartition, String> topicNamesMap = new HashMap<>();
private static final String INVALID_RESPONSE = "Acknowledgement not successful due to invalid response from broker";
ShareConsumeRequestManager(final Time time,
final LogContext logContext,
final String groupId,
final ShareConsumerMetadata metadata,
final SubscriptionState subscriptions,
final ShareFetchConfig shareFetchConfig,
final ShareFetchBuffer shareFetchBuffer,
final ShareAcknowledgementEventHandler acknowledgeEventHandler,
final ShareFetchMetricsManager metricsManager,
final long retryBackoffMs,
final long retryBackoffMaxMs) {
this.time = time;
this.log = logContext.logger(ShareConsumeRequestManager.class);
this.logContext = logContext;
this.groupId = groupId;
this.metadata = metadata;
this.subscriptions = subscriptions;
this.shareFetchConfig = shareFetchConfig;
this.shareFetchBuffer = shareFetchBuffer;
this.acknowledgeEventHandler = acknowledgeEventHandler;
this.metricsManager = metricsManager;
this.retryBackoffMs = retryBackoffMs;
this.retryBackoffMaxMs = retryBackoffMaxMs;
this.sessionHandlers = new HashMap<>();
this.nodesWithPendingRequests = new HashSet<>();
this.acknowledgeRequestStates = new HashMap<>();
this.fetchAcknowledgementsToSend = new HashMap<>();
this.fetchAcknowledgementsInFlight = new HashMap<>();
this.closeFuture = new CompletableFuture<>();
}
@Override
public PollResult poll(long currentTimeMs) {
if (memberId == null) {
if (closing && !closeFuture.isDone()) {
closeFuture.complete(null);
}
return PollResult.EMPTY;
}
// Send any pending acknowledgements before fetching more records.
PollResult pollResult = processAcknowledgements(currentTimeMs);
if (pollResult != null) {
return pollResult;
}
if (!fetchMoreRecords) {
return PollResult.EMPTY;
}
Map<Node, ShareSessionHandler> handlerMap = new HashMap<>();
Map<String, Uuid> topicIds = metadata.topicIds();
for (TopicPartition partition : partitionsToFetch()) {
Optional<Node> leaderOpt = metadata.currentLeader(partition).leader;
if (leaderOpt.isEmpty()) {
log.debug("Requesting metadata update for partition {} since current leader node is missing", partition);
metadata.requestUpdate(false);
continue;
}
Uuid topicId = topicIds.get(partition.topic());
if (topicId == null) {
log.debug("Requesting metadata update for partition {} since topic ID is missing", partition);
metadata.requestUpdate(false);
continue;
}
Node node = leaderOpt.get();
if (nodesWithPendingRequests.contains(node.id())) {
log.trace("Skipping fetch for partition {} because previous fetch request to {} has not been processed", partition, node.id());
} else {
// If there is a leader and no in-flight requests, issue a new fetch.
ShareSessionHandler handler = handlerMap.computeIfAbsent(node,
k -> sessionHandlers.computeIfAbsent(node.id(), n -> new ShareSessionHandler(logContext, n, memberId)));
TopicIdPartition tip = new TopicIdPartition(topicId, partition);
Acknowledgements acknowledgementsToSend = null;
boolean canSendAcknowledgements = true;
Map<TopicIdPartition, Acknowledgements> nodeAcksFromFetchMap = fetchAcknowledgementsToSend.get(node.id());
if (nodeAcksFromFetchMap != null) {
acknowledgementsToSend = nodeAcksFromFetchMap.remove(tip);
if (acknowledgementsToSend != null) {
// Check if the share session epoch is valid for sending acknowledgements.
if (!maybeAddAcknowledgements(handler, node, tip, acknowledgementsToSend)) {
canSendAcknowledgements = false;
}
}
}
if (canSendAcknowledgements) {
handler.addPartitionToFetch(tip, acknowledgementsToSend);
} else {
handler.addPartitionToFetch(tip, null);
}
topicNamesMap.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic());
// If we have not chosen a node for fetching records yet,
// choose now, and rotate the assigned partitions so the next poll starts on a different partition.
// This is only applicable for record_limit mode.
if (isShareAcquireModeRecordLimit() && fetchRecordsNodeId.compareAndSet(-1, node.id())) {
subscriptions.movePartitionToEnd(partition);
}
log.debug("Added fetch request for partition {} to node {}", tip, node.id());
}
}
// Iterate over the session handlers to see if there are acknowledgements to be sent for partitions
// which are no longer part of the current subscription.
// We fail acknowledgements for records fetched from a previous leader.
Cluster cluster = metadata.fetch();
sessionHandlers.forEach((nodeId, sessionHandler) -> {
Node node = cluster.nodeById(nodeId);
if (node != null) {
if (nodesWithPendingRequests.contains(node.id())) {
log.trace("Skipping fetch because previous fetch request to {} has not been processed", nodeId);
} else {
Map<TopicIdPartition, Acknowledgements> nodeAcksFromFetchMap = fetchAcknowledgementsToSend.get(nodeId);
if (nodeAcksFromFetchMap != null) {
nodeAcksFromFetchMap.forEach((tip, acks) -> {
if (!isLeaderKnownToHaveChanged(nodeId, tip)) {
// Check if the share session epoch is valid for sending acknowledgements.
if (!maybeAddAcknowledgements(sessionHandler, node, tip, acks)) {
return;
}
sessionHandler.addPartitionToAcknowledgeOnly(tip, acks);
handlerMap.put(node, sessionHandler);
topicNamesMap.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic());
log.debug("Added fetch request for previously subscribed partition {} to node {}", tip, nodeId);
} else {
log.debug("Leader for the partition is down or has changed, failing acknowledgements for partition {}", tip);
acks.complete(Errors.NOT_LEADER_OR_FOLLOWER.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, acks), true, Optional.empty());
}
});
nodeAcksFromFetchMap.clear();
}
}
}
});
// Iterate over the share session handlers and build a list of UnsentRequests.
List<UnsentRequest> requests = handlerMap.entrySet().stream().map(entry -> {
Node target = entry.getKey();
ShareSessionHandler handler = entry.getValue();
log.trace("Building ShareFetch request to send to node {}", target.id());
ShareFetchRequest.Builder requestBuilder = handler.newShareFetchBuilder(groupId, shareFetchConfig);
// For record_limit mode, we only send a full ShareFetch to a single node at a time.
// We prepare to build ShareFetch requests for all nodes with session handlers to permit
// piggy-backing of acknowledgements, and also to adjust the topic-partitions
// in the share session.
if (isShareAcquireModeRecordLimit() && target.id() != fetchRecordsNodeId.get()) {
ShareFetchRequestData data = requestBuilder.data();
// If there's nothing to send, just skip building the record.
if (data.topics().isEmpty() && data.forgottenTopicsData().isEmpty()) {
return null;
} else {
// There is something to send, but we don't want to fetch any records.
requestBuilder.data().setMaxRecords(0);
}
}
nodesWithPendingRequests.add(target.id());
BiConsumer<ClientResponse, Throwable> responseHandler = (clientResponse, error) -> {
if (error != null) {
handleShareFetchFailure(target, requestBuilder.data(), error);
} else {
handleShareFetchSuccess(target, requestBuilder.data(), clientResponse);
}
};
return new UnsentRequest(requestBuilder, Optional.of(target)).whenComplete(responseHandler);
}).filter(Objects::nonNull).collect(Collectors.toList());
return new PollResult(requests);
}
private boolean isShareAcquireModeRecordLimit() {
return shareFetchConfig.shareAcquireMode == ShareAcquireMode.RECORD_LIMIT;
}
/**
* Add acknowledgements for a topic-partition to the node's in-flight acknowledgements.
*
* @return True if we can add acknowledgements to the share session.
* If we cannot add acknowledgements, they are completed with {@link Errors#INVALID_SHARE_SESSION_EPOCH} exception.
*/
private boolean maybeAddAcknowledgements(ShareSessionHandler handler,
Node node,
TopicIdPartition tip,
Acknowledgements acknowledgements) {
if (handler.isNewSession()) {
// Failing the acknowledgements as we cannot have piggybacked acknowledgements in the initial ShareFetchRequest.
log.debug("Cannot send acknowledgements on initial epoch for ShareSession for partition {}", tip);
acknowledgements.complete(Errors.INVALID_SHARE_SESSION_EPOCH.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, acknowledgements), true, Optional.empty());
return false;
} else {
metricsManager.recordAcknowledgementSent(acknowledgements.size());
fetchAcknowledgementsInFlight.computeIfAbsent(node.id(), k -> new HashMap<>()).put(tip, acknowledgements);
return true;
}
}
public void fetch(Map<TopicIdPartition, NodeAcknowledgements> acknowledgementsMap) {
if (!fetchMoreRecords) {
log.debug("Fetch more data");
fetchMoreRecords = true;
}
// Store the acknowledgements and send them in the next ShareFetch.
processAcknowledgementsMap(acknowledgementsMap);
}
private void processAcknowledgementsMap(Map<TopicIdPartition, NodeAcknowledgements> acknowledgementsMap) {
acknowledgementsMap.forEach((tip, nodeAcks) -> {
int nodeId = nodeAcks.nodeId();
Map<TopicIdPartition, Acknowledgements> currentNodeAcknowledgementsMap = fetchAcknowledgementsToSend.get(nodeId);
if (currentNodeAcknowledgementsMap != null) {
Acknowledgements currentAcknowledgementsForNode = currentNodeAcknowledgementsMap.get(tip);
if (currentAcknowledgementsForNode != null) {
currentAcknowledgementsForNode.merge(nodeAcks.acknowledgements());
} else {
currentNodeAcknowledgementsMap.put(tip, nodeAcks.acknowledgements());
}
} else {
Map<TopicIdPartition, Acknowledgements> nodeAcknowledgementsMap = new HashMap<>();
nodeAcknowledgementsMap.put(tip, nodeAcks.acknowledgements());
fetchAcknowledgementsToSend.put(nodeId, nodeAcknowledgementsMap);
}
});
}
/**
* Process acknowledgeRequestStates and prepares a list of acknowledgements to be sent in the poll().
*
* @param currentTimeMs the current time in ms.
*
* @return the PollResult containing zero or more acknowledgements.
*/
private PollResult processAcknowledgements(long currentTimeMs) {
List<UnsentRequest> unsentRequests = new ArrayList<>();
AtomicBoolean isAsyncSent = new AtomicBoolean();
for (Map.Entry<Integer, Tuple<AcknowledgeRequestState>> requestStates : acknowledgeRequestStates.entrySet()) {
int nodeId = requestStates.getKey();
if (!isNodeFree(nodeId)) {
log.trace("Skipping acknowledge request because previous request to {} has not been processed, so acks are not sent", nodeId);
} else {
isAsyncSent.set(false);
// First, the acknowledgements from commitAsync are sent.
maybeBuildRequest(requestStates.getValue().getAsyncRequest(), currentTimeMs, true, isAsyncSent).ifPresent(unsentRequests::add);
// Check to ensure we start processing commitSync/close only if there are no commitAsync requests left to process.
if (isAsyncSent.get()) {
if (!isNodeFree(nodeId)) {
log.trace("Skipping acknowledge request because previous request to {} has not been processed, so acks are not sent", nodeId);
continue;
}
// We try to process the close request only if we have processed the async and the sync requests for the node.
if (requestStates.getValue().getSyncRequestQueue() == null) {
AcknowledgeRequestState closeRequestState = requestStates.getValue().getCloseRequest();
maybeBuildRequest(closeRequestState, currentTimeMs, false, isAsyncSent).ifPresent(unsentRequests::add);
} else {
// Processing the acknowledgements from commitSync
for (AcknowledgeRequestState acknowledgeRequestState : requestStates.getValue().getSyncRequestQueue()) {
if (!isNodeFree(nodeId)) {
log.trace("Skipping acknowledge request because previous request to {} has not been processed, so acks are not sent", nodeId);
break;
}
maybeBuildRequest(acknowledgeRequestState, currentTimeMs, false, isAsyncSent).ifPresent(unsentRequests::add);
}
}
}
}
}
PollResult pollResult = null;
if (!unsentRequests.isEmpty()) {
pollResult = new PollResult(unsentRequests);
} else if (checkAndRemoveCompletedAcknowledgements()) {
// Return empty result until all the acknowledgement request states are processed
pollResult = PollResult.EMPTY;
} else if (closing) {
if (!closeFuture.isDone()) {
closeFuture.complete(null);
}
pollResult = PollResult.EMPTY;
}
return pollResult;
}
private boolean isNodeFree(int nodeId) {
return !nodesWithPendingRequests.contains(nodeId);
}
public void setAcknowledgementCommitCallbackRegistered(boolean isAcknowledgementCommitCallbackRegistered) {
this.isAcknowledgementCommitCallbackRegistered = isAcknowledgementCommitCallbackRegistered;
}
private void maybeSendShareAcknowledgementEvent(Map<TopicIdPartition, Acknowledgements> acknowledgementsMap,
boolean checkForRenewAcknowledgements,
Optional<Integer> acquisitionLockTimeoutMs) {
if (isAcknowledgementCommitCallbackRegistered || checkForRenewAcknowledgements) {
ShareAcknowledgementEvent event = new ShareAcknowledgementEvent(acknowledgementsMap, checkForRenewAcknowledgements, acquisitionLockTimeoutMs);
acknowledgeEventHandler.add(event);
}
}
/**
*
* @param acknowledgeRequestState Contains the acknowledgements to be sent.
* @param currentTimeMs The current time in ms.
* @param onCommitAsync Boolean to denote if the acknowledgements came from a commitAsync or not.
* @param isAsyncSent Boolean to indicate if the async request has been sent.
*
* @return Returns the request if it was built.
*/
private Optional<UnsentRequest> maybeBuildRequest(AcknowledgeRequestState acknowledgeRequestState,
long currentTimeMs,
boolean onCommitAsync,
AtomicBoolean isAsyncSent) {
boolean asyncSent = true;
try {
if (acknowledgeRequestState == null ||
(!acknowledgeRequestState.isCloseRequest() && acknowledgeRequestState.isEmpty()) ||
(acknowledgeRequestState.isCloseRequest() && acknowledgeRequestState.isProcessed)) {
return Optional.empty();
}
if (acknowledgeRequestState.maybeExpire()) {
// Fill in TimeoutException
for (TopicIdPartition tip : acknowledgeRequestState.incompleteAcknowledgements.keySet()) {
metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getIncompleteAcknowledgementsCount(tip));
acknowledgeRequestState.handleAcknowledgeTimedOut(tip);
}
acknowledgeRequestState.incompleteAcknowledgements.clear();
// Reset timer for any future processing on the same request state.
acknowledgeRequestState.maybeResetTimerAndRequestState();
return Optional.empty();
}
if (!acknowledgeRequestState.canSendRequest(currentTimeMs)) {
// We wait for the backoff before we can send this request.
asyncSent = false;
return Optional.empty();
}
UnsentRequest request = acknowledgeRequestState.buildRequest();
if (request == null) {
asyncSent = false;
return Optional.empty();
}
acknowledgeRequestState.onSendAttempt(currentTimeMs);
return Optional.of(request);
} finally {
if (onCommitAsync) {
isAsyncSent.set(asyncSent);
}
}
}
/**
* Prunes the empty acknowledgementRequestStates in {@link #acknowledgeRequestStates}
*
* @return Returns true if there are still any acknowledgements left to be processed.
*/
private boolean checkAndRemoveCompletedAcknowledgements() {
boolean areAnyAcksLeft = false;
Iterator<Map.Entry<Integer, Tuple<AcknowledgeRequestState>>> iterator = acknowledgeRequestStates.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<Integer, Tuple<AcknowledgeRequestState>> acknowledgeRequestStatePair = iterator.next();
boolean areAsyncAcksLeft = true, areSyncAcksLeft = true;
if (!isRequestStateInProgress(acknowledgeRequestStatePair.getValue().getAsyncRequest())) {
acknowledgeRequestStatePair.getValue().setAsyncRequest(null);
areAsyncAcksLeft = false;
}
if (!areRequestStatesInProgress(acknowledgeRequestStatePair.getValue().getSyncRequestQueue())) {
acknowledgeRequestStatePair.getValue().nullifySyncRequestQueue();
areSyncAcksLeft = false;
}
if (!isRequestStateInProgress(acknowledgeRequestStatePair.getValue().getCloseRequest())) {
acknowledgeRequestStatePair.getValue().setCloseRequest(null);
}
if (areAsyncAcksLeft || areSyncAcksLeft) {
areAnyAcksLeft = true;
} else if (acknowledgeRequestStatePair.getValue().getCloseRequest() == null) {
iterator.remove();
}
}
if (!acknowledgeRequestStates.isEmpty()) areAnyAcksLeft = true;
return areAnyAcksLeft;
}
private boolean isRequestStateInProgress(AcknowledgeRequestState acknowledgeRequestState) {
if (acknowledgeRequestState == null) {
return false;
} else if (acknowledgeRequestState.isCloseRequest()) {
return !acknowledgeRequestState.isProcessed;
} else {
return !(acknowledgeRequestState.isEmpty());
}
}
private boolean areRequestStatesInProgress(Queue<AcknowledgeRequestState> acknowledgeRequestStates) {
if (acknowledgeRequestStates == null) return false;
for (AcknowledgeRequestState acknowledgeRequestState : acknowledgeRequestStates) {
if (isRequestStateInProgress(acknowledgeRequestState)) {
return true;
}
}
return false;
}
/**
* Enqueue an AcknowledgeRequestState to be picked up on the next poll
*
* @param acknowledgementsMap The acknowledgements to commit
* @param deadlineMs Time until which the request will be retried if it fails with
* an expected retriable error.
*
* @return The future which completes when the acknowledgements finished
*/
public CompletableFuture<Map<TopicIdPartition, Acknowledgements>> commitSync(
final Map<TopicIdPartition, NodeAcknowledgements> acknowledgementsMap,
final long deadlineMs) {
final AtomicInteger resultCount = new AtomicInteger();
final CompletableFuture<Map<TopicIdPartition, Acknowledgements>> future = new CompletableFuture<>();
final ResultHandler resultHandler = new ResultHandler(resultCount, Optional.of(future));
final Cluster cluster = metadata.fetch();
sessionHandlers.forEach((nodeId, sessionHandler) -> {
Node node = cluster.nodeById(nodeId);
if (node != null) {
acknowledgeRequestStates.putIfAbsent(nodeId, new Tuple<>(null, null, null));
// Add the incoming commitSync() request to the queue.
Map<TopicIdPartition, Acknowledgements> acknowledgementsMapForNode = new HashMap<>();
for (TopicIdPartition tip : sessionHandler.sessionPartitions()) {
NodeAcknowledgements nodeAcknowledgements = acknowledgementsMap.get(tip);
if ((nodeAcknowledgements != null) && (nodeAcknowledgements.nodeId() == node.id())) {
if (!isLeaderKnownToHaveChanged(node.id(), tip)) {
acknowledgementsMapForNode.put(tip, nodeAcknowledgements.acknowledgements());
metricsManager.recordAcknowledgementSent(nodeAcknowledgements.acknowledgements().size());
log.debug("Added sync acknowledge request for partition {} to node {}", tip.topicPartition(), node.id());
resultCount.incrementAndGet();
} else {
nodeAcknowledgements.acknowledgements().complete(Errors.NOT_LEADER_OR_FOLLOWER.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, nodeAcknowledgements.acknowledgements()), true, Optional.empty());
}
}
}
if (!acknowledgementsMapForNode.isEmpty()) {
acknowledgeRequestStates.get(nodeId).addSyncRequest(new AcknowledgeRequestState(logContext,
ShareConsumeRequestManager.class.getSimpleName() + ":1",
deadlineMs,
retryBackoffMs,
retryBackoffMaxMs,
sessionHandler,
nodeId,
acknowledgementsMapForNode,
resultHandler,
AcknowledgeRequestType.COMMIT_SYNC
));
}
}
});
resultHandler.completeIfEmpty();
return future;
}
/**
* Enqueue an AcknowledgeRequestState to be picked up on the next poll.
*
* @param acknowledgementsMap The acknowledgements to commit
* @param deadlineMs Time until which the request will be retried if it fails with
* an expected retriable error.
*/
public void commitAsync(
final Map<TopicIdPartition, NodeAcknowledgements> acknowledgementsMap,
final long deadlineMs) {
final Cluster cluster = metadata.fetch();
final ResultHandler resultHandler = new ResultHandler(Optional.empty());
sessionHandlers.forEach((nodeId, sessionHandler) -> {
Node node = cluster.nodeById(nodeId);
if (node != null) {
Map<TopicIdPartition, Acknowledgements> acknowledgementsMapForNode = new HashMap<>();
acknowledgeRequestStates.putIfAbsent(nodeId, new Tuple<>(null, null, null));
for (TopicIdPartition tip : sessionHandler.sessionPartitions()) {
NodeAcknowledgements nodeAcknowledgements = acknowledgementsMap.get(tip);
if ((nodeAcknowledgements != null) && (nodeAcknowledgements.nodeId() == node.id())) {
if (!isLeaderKnownToHaveChanged(node.id(), tip)) {
Acknowledgements acknowledgements = nodeAcknowledgements.acknowledgements();
acknowledgementsMapForNode.put(tip, acknowledgements);
metricsManager.recordAcknowledgementSent(acknowledgements.size());
log.debug("Added async acknowledge request for partition {} to node {}", tip.topicPartition(), node.id());
AcknowledgeRequestState asyncRequestState = acknowledgeRequestStates.get(nodeId).getAsyncRequest();
if (asyncRequestState == null) {
acknowledgeRequestStates.get(nodeId).setAsyncRequest(new AcknowledgeRequestState(logContext,
ShareConsumeRequestManager.class.getSimpleName() + ":2",
deadlineMs,
retryBackoffMs,
retryBackoffMaxMs,
sessionHandler,
nodeId,
acknowledgementsMapForNode,
resultHandler,
AcknowledgeRequestType.COMMIT_ASYNC
));
} else {
Acknowledgements prevAcks = asyncRequestState.acknowledgementsToSend.putIfAbsent(tip, acknowledgements);
if (prevAcks != null) {
asyncRequestState.acknowledgementsToSend.get(tip).merge(acknowledgements);
}
}
} else {
nodeAcknowledgements.acknowledgements().complete(Errors.NOT_LEADER_OR_FOLLOWER.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, nodeAcknowledgements.acknowledgements()), true, Optional.empty());
}
}
}
}
});
resultHandler.completeIfEmpty();
}
/**
* Enqueue the final AcknowledgeRequestState used to commit the final acknowledgements and
* close the share sessions.
*
* @param acknowledgementsMap The acknowledgements to commit
* @param deadlineMs Time until which the request will be retried if it fails with
* an expected retriable error.
*
* @return The future which completes when the acknowledgements finished
*/
public CompletableFuture<Void> acknowledgeOnClose(final Map<TopicIdPartition, NodeAcknowledgements> acknowledgementsMap,
final long deadlineMs) {
final Cluster cluster = metadata.fetch();
final AtomicInteger resultCount = new AtomicInteger();
final ResultHandler resultHandler = new ResultHandler(resultCount, Optional.empty());
closing = true;
Map<Integer, Map<TopicIdPartition, Acknowledgements>> acknowledgementsMapAllNodes = new HashMap<>();
acknowledgementsMap.forEach((tip, nodeAcks) -> {
if (!isLeaderKnownToHaveChanged(nodeAcks.nodeId(), tip)) {
Map<TopicIdPartition, Acknowledgements> acksMap = acknowledgementsMapAllNodes.computeIfAbsent(nodeAcks.nodeId(), k -> new HashMap<>());
Acknowledgements prevAcks = acksMap.putIfAbsent(tip, nodeAcks.acknowledgements());
if (prevAcks != null) {
acksMap.get(tip).merge(nodeAcks.acknowledgements());
}
} else {
nodeAcks.acknowledgements().complete(Errors.NOT_LEADER_OR_FOLLOWER.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, nodeAcks.acknowledgements()), true, Optional.empty());
}
});
sessionHandlers.forEach((nodeId, sessionHandler) -> {
Node node = cluster.nodeById(nodeId);
if (node != null) {
//Add any waiting piggyback acknowledgements for the node.
Map<TopicIdPartition, Acknowledgements> fetchAcks = fetchAcknowledgementsToSend.remove(nodeId);
if (fetchAcks != null) {
fetchAcks.forEach((tip, acks) -> {
if (!isLeaderKnownToHaveChanged(nodeId, tip)) {
Map<TopicIdPartition, Acknowledgements> acksMap = acknowledgementsMapAllNodes.computeIfAbsent(nodeId, k -> new HashMap<>());
Acknowledgements prevAcks = acksMap.putIfAbsent(tip, acks);
if (prevAcks != null) {
acksMap.get(tip).merge(acks);
}
} else {
acks.complete(Errors.NOT_LEADER_OR_FOLLOWER.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, acks), true, Optional.empty());
}
});
}
Map<TopicIdPartition, Acknowledgements> acknowledgementsMapForNode = acknowledgementsMapAllNodes.get(nodeId);
if (acknowledgementsMapForNode != null) {
acknowledgementsMapForNode.forEach((tip, acknowledgements) -> {
metricsManager.recordAcknowledgementSent(acknowledgements.size());
log.debug("Added closing acknowledge request for partition {} to node {}", tip.topicPartition(), node.id());
resultCount.incrementAndGet();
});
} else {
acknowledgementsMapForNode = new HashMap<>();
}
acknowledgeRequestStates.putIfAbsent(nodeId, new Tuple<>(null, null, null));
// Ensure there is no close() request already present as they are blocking calls
// and only one request can be active at a time.
if (acknowledgeRequestStates.get(nodeId).getCloseRequest() != null && isRequestStateInProgress(acknowledgeRequestStates.get(nodeId).getCloseRequest())) {
log.error("Attempt to call close() when there is an existing close request for node {}-{}", node.id(), acknowledgeRequestStates.get(nodeId).getSyncRequestQueue());
closeFuture.completeExceptionally(
new IllegalStateException("Attempt to call close() when there is an existing close request for node : " + node.id()));
} else {
// There can only be one close() happening at a time. So per node, there will be one acknowledge request state.
acknowledgeRequestStates.get(nodeId).setCloseRequest(
new AcknowledgeRequestState(logContext,
ShareConsumeRequestManager.class.getSimpleName() + ":3",
deadlineMs,
retryBackoffMs,
retryBackoffMaxMs,
sessionHandler,
nodeId,
acknowledgementsMapForNode,
resultHandler,
AcknowledgeRequestType.CLOSE
));
}
}
});
resultHandler.completeIfEmpty();
return closeFuture;
}
/**
* The method checks whether the leader for a topicIdPartition has changed.
* @param nodeId The previous leader for the partition.
* @param topicIdPartition The TopicIdPartition to check.
* @return Returns true if leader information is available and leader has changed.
* If the leader information is not available or if the leader has not changed, it returns false.
*/
private boolean isLeaderKnownToHaveChanged(int nodeId, TopicIdPartition topicIdPartition) {
Optional<Node> leaderNode = metadata.currentLeader(topicIdPartition.topicPartition()).leader;
if (leaderNode.isPresent()) {
if (leaderNode.get().id() != nodeId) {
log.debug("Node {} is no longer the leader for partition {}, failing acknowledgements", nodeId, topicIdPartition);
return true;
}
} else {
log.debug("No leader found for partition {}", topicIdPartition);
metadata.requestUpdate(false);
return false;
}
return false;
}
private void handleShareFetchSuccess(Node fetchTarget,
ShareFetchRequestData requestData,
ClientResponse resp) {
try {
log.debug("Completed ShareFetch request from node {} successfully", fetchTarget.id());
final ShareFetchResponse response = (ShareFetchResponse) resp.responseBody();
final ShareSessionHandler handler = sessionHandler(fetchTarget.id());
if (handler == null) {
log.error("Unable to find ShareSessionHandler for node {}. Ignoring ShareFetch response.",
fetchTarget.id());
return;
}
final short requestVersion = resp.requestHeader().apiVersion();
if (!handler.handleResponse(response, requestVersion)) {
if (response.error() == Errors.UNKNOWN_TOPIC_ID) {
metadata.requestUpdate(false);
}
// Complete any in-flight acknowledgements with the error code from the response.
Map<TopicIdPartition, Acknowledgements> nodeAcknowledgementsInFlight = fetchAcknowledgementsInFlight.remove(fetchTarget.id());
if (nodeAcknowledgementsInFlight != null) {
nodeAcknowledgementsInFlight.forEach((tip, acks) -> {
acks.complete(Errors.forCode(response.error().code()).exception());
metricsManager.recordFailedAcknowledgements(acks.size());
});
maybeSendShareAcknowledgementEvent(nodeAcknowledgementsInFlight, requestData.isRenewAck(), Optional.empty());
}
return;
}
final Map<TopicIdPartition, ShareFetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
final Optional<Integer> responseAcquisitionLockTimeoutMs = response.data().acquisitionLockTimeoutMs() > 0
? Optional.of(response.data().acquisitionLockTimeoutMs()) : Optional.empty();
response.data().responses().forEach(topicResponse ->
topicResponse.partitions().forEach(partition -> {
TopicIdPartition tip = lookupTopicId(topicResponse.topicId(), partition.partitionIndex());
if (tip != null) {
responseData.put(tip, partition);
}
})
);
final Set<TopicPartition> partitions = responseData.keySet().stream().map(TopicIdPartition::topicPartition).collect(Collectors.toSet());
final ShareFetchMetricsAggregator shareFetchMetricsAggregator = new ShareFetchMetricsAggregator(metricsManager, partitions);
List<ShareCompletedFetch> completedFetches = new ArrayList<>(responseData.size());
Map<TopicPartition, Metadata.LeaderIdAndEpoch> partitionsWithUpdatedLeaderInfo = new HashMap<>();
for (Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> entry : responseData.entrySet()) {
TopicIdPartition tip = entry.getKey();
ShareFetchResponseData.PartitionData partitionData = entry.getValue();
log.debug("ShareFetch for partition {} returned fetch data {}", tip, partitionData);
Map<TopicIdPartition, Acknowledgements> nodeAcknowledgementsInFlight = fetchAcknowledgementsInFlight.get(fetchTarget.id());
if (nodeAcknowledgementsInFlight != null) {
Acknowledgements acks = nodeAcknowledgementsInFlight.remove(tip);
if (acks != null) {
if (partitionData.acknowledgeErrorCode() != Errors.NONE.code()) {
metricsManager.recordFailedAcknowledgements(acks.size());
}
acks.complete(Errors.forCode(partitionData.acknowledgeErrorCode())
.exception(partitionData.acknowledgeErrorMessage()));
Map<TopicIdPartition, Acknowledgements> acksMap = Map.of(tip, acks);
maybeSendShareAcknowledgementEvent(acksMap, requestData.isRenewAck(), responseAcquisitionLockTimeoutMs);
}
}
Errors partitionError = Errors.forCode(partitionData.errorCode());
if (partitionError == Errors.NOT_LEADER_OR_FOLLOWER || partitionError == Errors.FENCED_LEADER_EPOCH) {
log.debug("For {}, received error {}, with leaderIdAndEpoch {} in ShareFetch", tip, partitionError, partitionData.currentLeader());
if (partitionData.currentLeader().leaderId() != -1 && partitionData.currentLeader().leaderEpoch() != -1) {
partitionsWithUpdatedLeaderInfo.put(tip.topicPartition(), new Metadata.LeaderIdAndEpoch(
Optional.of(partitionData.currentLeader().leaderId()), Optional.of(partitionData.currentLeader().leaderEpoch())));
}
}
completedFetches.add(
new ShareCompletedFetch(
logContext,
BufferSupplier.create(),
fetchTarget.id(),
tip,
partitionData,
responseAcquisitionLockTimeoutMs,
shareFetchMetricsAggregator,
requestVersion)
);
if (!partitionData.acquiredRecords().isEmpty()) {
fetchMoreRecords = false;
}
}
if (!completedFetches.isEmpty()) {
shareFetchBuffer.add(completedFetches);
}
// Handle any acknowledgements which were not received in the response for this node.
if (fetchAcknowledgementsInFlight.get(fetchTarget.id()) != null) {
fetchAcknowledgementsInFlight.remove(fetchTarget.id()).forEach((partition, acknowledgements) -> {
acknowledgements.complete(new InvalidRecordStateException(INVALID_RESPONSE));
maybeSendShareAcknowledgementEvent(Map.of(partition, acknowledgements), true, Optional.empty());
});
}
if (!partitionsWithUpdatedLeaderInfo.isEmpty()) {
List<Node> leaderNodes = response.data().nodeEndpoints().stream()
.map(e -> new Node(e.nodeId(), e.host(), e.port(), e.rack()))
.filter(e -> !e.equals(Node.noNode()))
.collect(Collectors.toList());
metadata.updatePartitionLeadership(partitionsWithUpdatedLeaderInfo, leaderNodes);
}
metricsManager.recordLatency(resp.destination(), resp.requestLatencyMs());
} finally {
log.debug("Removing pending request for node {} - success", fetchTarget.id());
if (isShareAcquireModeRecordLimit()) {
fetchRecordsNodeId.compareAndSet(fetchTarget.id(), -1);
}
nodesWithPendingRequests.remove(fetchTarget.id());
}
}
private void handleShareFetchFailure(Node fetchTarget,
ShareFetchRequestData requestData,
Throwable error) {
try {
log.debug("Completed ShareFetch request from node {} unsuccessfully {}", fetchTarget.id(), Errors.forException(error));
final ShareSessionHandler handler = sessionHandler(fetchTarget.id());
if (handler != null) {
handler.handleError(error);
}
requestData.topics().forEach(topic -> topic.partitions().forEach(partition -> {
TopicIdPartition tip = lookupTopicId(topic.topicId(), partition.partitionIndex());
if (tip == null) {
return;
}
Map<TopicIdPartition, Acknowledgements> nodeAcknowledgementsInFlight = fetchAcknowledgementsInFlight.get(fetchTarget.id());
if (nodeAcknowledgementsInFlight != null) {
Acknowledgements acks = nodeAcknowledgementsInFlight.remove(tip);
if (acks != null) {
metricsManager.recordFailedAcknowledgements(acks.size());
if (error instanceof KafkaException) {
acks.complete((KafkaException) error);
} else {
acks.complete(Errors.UNKNOWN_SERVER_ERROR.exception());
}
Map<TopicIdPartition, Acknowledgements> acksMap = Map.of(tip, acks);
maybeSendShareAcknowledgementEvent(acksMap, requestData.isRenewAck(), Optional.empty());
}
}
}));
} finally {
log.debug("Removing pending request for node {} - failed", fetchTarget.id());
if (isShareAcquireModeRecordLimit()) {
fetchRecordsNodeId.compareAndSet(fetchTarget.id(), -1);
}
nodesWithPendingRequests.remove(fetchTarget.id());
}
}
private void handleShareAcknowledgeSuccess(Node fetchTarget,
ShareAcknowledgeRequestData requestData,
AcknowledgeRequestState acknowledgeRequestState,
ClientResponse resp,
long responseCompletionTimeMs) {
try {
log.debug("Completed ShareAcknowledge request from node {} successfully", fetchTarget.id());
ShareAcknowledgeResponse response = (ShareAcknowledgeResponse) resp.responseBody();
final Optional<Integer> responseAcquisitionLockTimeoutMs = response.data().acquisitionLockTimeoutMs() > 0
? Optional.of(response.data().acquisitionLockTimeoutMs()) : Optional.empty();
Map<TopicPartition, Metadata.LeaderIdAndEpoch> partitionsWithUpdatedLeaderInfo = new HashMap<>();
if (acknowledgeRequestState.isCloseRequest()) {
response.data().responses().forEach(topicResponse -> topicResponse.partitions().forEach(partitionData -> {
TopicIdPartition tip = lookupTopicId(topicResponse.topicId(), partitionData.partitionIndex());
if (tip == null) {
return;
}
if (partitionData.errorCode() != Errors.NONE.code()) {
metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getInFlightAcknowledgementsCount(tip));
}
acknowledgeRequestState.handleAcknowledgeErrorCode(tip, Errors.forCode(partitionData.errorCode()), requestData.isRenewAck(), responseAcquisitionLockTimeoutMs);
}));
acknowledgeRequestState.onSuccessfulAttempt(responseCompletionTimeMs);
acknowledgeRequestState.processingComplete();
} else {
if (!acknowledgeRequestState.sessionHandler.handleResponse(response, resp.requestHeader().apiVersion())) {
// Received a response-level error code.
acknowledgeRequestState.onFailedAttempt(responseCompletionTimeMs);
if (response.error().exception() instanceof RetriableException) {
// We retry the request until the timer expires, unless we are closing.
acknowledgeRequestState.moveAllToIncompleteAcks();
} else {
acknowledgeRequestState.processPendingInFlightAcknowledgements(response.error().exception());
acknowledgeRequestState.processingComplete();
}
} else {
AtomicBoolean shouldRetry = new AtomicBoolean(false);
// Check all partition-level error codes.
response.data().responses().forEach(topicResponse -> topicResponse.partitions().forEach(partitionData -> {
Errors partitionError = Errors.forCode(partitionData.errorCode());
TopicIdPartition tip = lookupTopicId(topicResponse.topicId(), partitionData.partitionIndex());
if (tip == null) {
return;
}
handlePartitionError(partitionData, partitionsWithUpdatedLeaderInfo, acknowledgeRequestState,
partitionError, tip, shouldRetry, requestData.isRenewAck(), responseAcquisitionLockTimeoutMs);
}));
processRetryLogic(acknowledgeRequestState, shouldRetry, responseCompletionTimeMs);
}
}
if (!partitionsWithUpdatedLeaderInfo.isEmpty()) {
List<Node> leaderNodes = response.data().nodeEndpoints().stream()
.map(e -> new Node(e.nodeId(), e.host(), e.port(), e.rack()))
.filter(e -> !e.equals(Node.noNode()))
.collect(Collectors.toList());
metadata.updatePartitionLeadership(partitionsWithUpdatedLeaderInfo, leaderNodes);
}
if (acknowledgeRequestState.isProcessed) {
metricsManager.recordLatency(resp.destination(), resp.requestLatencyMs());
}
} finally {
log.debug("Removing pending request for node {} - success", fetchTarget.id());
nodesWithPendingRequests.remove(fetchTarget.id());
if (acknowledgeRequestState.isCloseRequest()) {
log.debug("Removing node from ShareSession {}", fetchTarget.id());
sessionHandlers.remove(fetchTarget.id());
}
}
}
private void handleShareAcknowledgeFailure(Node fetchTarget,
ShareAcknowledgeRequestData requestData,
AcknowledgeRequestState acknowledgeRequestState,
Throwable error,
long responseCompletionTimeMs) {
try {
log.debug("Completed ShareAcknowledge request from node {} unsuccessfully {}", fetchTarget.id(), Errors.forException(error));
acknowledgeRequestState.sessionHandler().handleError(error);
acknowledgeRequestState.onFailedAttempt(responseCompletionTimeMs);
requestData.topics().forEach(topic -> topic.partitions().forEach(partition -> {
TopicIdPartition tip = lookupTopicId(topic.topicId(), partition.partitionIndex());
if (tip == null) {
return;
}
metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getInFlightAcknowledgementsCount(tip));
acknowledgeRequestState.handleAcknowledgeErrorCode(tip, Errors.forException(error), requestData.isRenewAck(), Optional.empty());
}));
acknowledgeRequestState.processingComplete();
} finally {
log.debug("Removing pending request for node {} - failed", fetchTarget.id());
nodesWithPendingRequests.remove(fetchTarget.id());
if (acknowledgeRequestState.isCloseRequest()) {
log.debug("Removing node from ShareSession {}", fetchTarget.id());
sessionHandlers.remove(fetchTarget.id());
}
}
}
private void handlePartitionError(ShareAcknowledgeResponseData.PartitionData partitionData,
Map<TopicPartition, Metadata.LeaderIdAndEpoch> partitionsWithUpdatedLeaderInfo,
AcknowledgeRequestState acknowledgeRequestState,
Errors partitionError,
TopicIdPartition tip,
AtomicBoolean shouldRetry,
boolean isRenewAck,
Optional<Integer> acquisitionLockTimeoutMs) {
if (partitionError.exception() != null) {
boolean retry = false;
if (partitionError == Errors.NOT_LEADER_OR_FOLLOWER || partitionError == Errors.FENCED_LEADER_EPOCH || partitionError == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
// If the leader has changed, there's no point in retrying the operation because the acquisition locks
// will have been released.
// If the topic or partition has been deleted, we do not retry the failed acknowledgements.
// Instead, these records will be re-delivered once they get timed out on the broker.
updateLeaderInfoMap(partitionData, partitionsWithUpdatedLeaderInfo, partitionError, tip.topicPartition());
} else if (partitionError.exception() instanceof RetriableException) {
retry = true;
}
if (retry) {
if (acknowledgeRequestState.moveToIncompleteAcks(tip)) {
shouldRetry.set(true);
}
} else {
metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getInFlightAcknowledgementsCount(tip));
acknowledgeRequestState.handleAcknowledgeErrorCode(tip, partitionError, isRenewAck, Optional.empty());
}
} else {
acknowledgeRequestState.handleAcknowledgeErrorCode(tip, partitionError, isRenewAck, acquisitionLockTimeoutMs);
}
}
private void processRetryLogic(AcknowledgeRequestState acknowledgeRequestState,
AtomicBoolean shouldRetry,
long responseCompletionTimeMs) {
if (shouldRetry.get()) {
acknowledgeRequestState.onFailedAttempt(responseCompletionTimeMs);
// Check for any acknowledgements that did not receive a response.
// These acknowledgements are failed with InvalidRecordStateException.
acknowledgeRequestState.processPendingInFlightAcknowledgements(new InvalidRecordStateException(INVALID_RESPONSE));
} else {
acknowledgeRequestState.onSuccessfulAttempt(responseCompletionTimeMs);
acknowledgeRequestState.processingComplete();
}
}
private void updateLeaderInfoMap(ShareAcknowledgeResponseData.PartitionData partitionData,
Map<TopicPartition, Metadata.LeaderIdAndEpoch> partitionsWithUpdatedLeaderInfo,
Errors partitionError,
TopicPartition tp) {
log.debug("For {}, received error {}, with leaderIdAndEpoch {} in ShareAcknowledge", tp, partitionError, partitionData.currentLeader());
if (partitionData.currentLeader().leaderId() != -1 && partitionData.currentLeader().leaderEpoch() != -1) {
partitionsWithUpdatedLeaderInfo.put(tp,
new Metadata.LeaderIdAndEpoch(
Optional.of(partitionData.currentLeader().leaderId()),
Optional.of(partitionData.currentLeader().leaderEpoch())
));
}
}
private TopicIdPartition lookupTopicId(Uuid topicId, int partitionIndex) {
String topicName = metadata.topicNames().get(topicId);
if (topicName == null) {
topicName = topicNamesMap.remove(new IdAndPartition(topicId, partitionIndex));
}
if (topicName == null) {
log.error("Topic name not found in metadata for topicId {} and partitionIndex {}", topicId, partitionIndex);
return null;
}
return new TopicIdPartition(topicId, partitionIndex, topicName);
}
private List<TopicPartition> partitionsToFetch() {
return subscriptions.fetchablePartitions(tp -> true);
}
public ShareSessionHandler sessionHandler(int node) {
return sessionHandlers.get(node);
}
boolean hasCompletedFetches() {
return !shareFetchBuffer.isEmpty();
}
protected void closeInternal() {
Utils.closeQuietly(shareFetchBuffer, "shareFetchBuffer");
Utils.closeQuietly(metricsManager, "shareFetchMetricsManager");
}
public void close() {
idempotentCloser.close(this::closeInternal);
}
@Override
public void onMemberEpochUpdated(Optional<Integer> memberEpochOpt, String memberId) {
this.memberId = Uuid.fromString(memberId);
}
/**
* Represents a request to acknowledge delivery that can be retried or aborted.
*/
public | ShareConsumeRequestManager |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/type/SQLServerCastingXmlArrayJdbcType.java | {
"start": 470,
"end": 977
} | class ____ extends XmlArrayJdbcType {
public SQLServerCastingXmlArrayJdbcType(JdbcType elementJdbcType) {
super( elementJdbcType );
}
@Override
public void appendWriteExpression(
String writeExpression,
@Nullable Size size,
SqlAppender appender,
Dialect dialect) {
appender.append( "cast(" );
appender.append( writeExpression );
appender.append( " as xml)" );
}
@Override
public boolean isWriteExpressionTyped(Dialect dialect) {
return true;
}
}
| SQLServerCastingXmlArrayJdbcType |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightEvaluator.java | {
"start": 1308,
"end": 5470
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(RightEvaluator.class);
private final Source source;
private final BytesRef out;
private final UnicodeUtil.UTF8CodePoint cp;
private final EvalOperator.ExpressionEvaluator str;
private final EvalOperator.ExpressionEvaluator length;
private final DriverContext driverContext;
private Warnings warnings;
public RightEvaluator(Source source, BytesRef out, UnicodeUtil.UTF8CodePoint cp,
EvalOperator.ExpressionEvaluator str, EvalOperator.ExpressionEvaluator length,
DriverContext driverContext) {
this.source = source;
this.out = out;
this.cp = cp;
this.str = str;
this.length = length;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) {
try (IntBlock lengthBlock = (IntBlock) length.eval(page)) {
BytesRefVector strVector = strBlock.asVector();
if (strVector == null) {
return eval(page.getPositionCount(), strBlock, lengthBlock);
}
IntVector lengthVector = lengthBlock.asVector();
if (lengthVector == null) {
return eval(page.getPositionCount(), strBlock, lengthBlock);
}
return eval(page.getPositionCount(), strVector, lengthVector).asBlock();
}
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += str.baseRamBytesUsed();
baseRamBytesUsed += length.baseRamBytesUsed();
return baseRamBytesUsed;
}
public BytesRefBlock eval(int positionCount, BytesRefBlock strBlock, IntBlock lengthBlock) {
try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) {
BytesRef strScratch = new BytesRef();
position: for (int p = 0; p < positionCount; p++) {
switch (strBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
switch (lengthBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
BytesRef str = strBlock.getBytesRef(strBlock.getFirstValueIndex(p), strScratch);
int length = lengthBlock.getInt(lengthBlock.getFirstValueIndex(p));
result.appendBytesRef(Right.process(this.out, this.cp, str, length));
}
return result.build();
}
}
public BytesRefVector eval(int positionCount, BytesRefVector strVector, IntVector lengthVector) {
try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) {
BytesRef strScratch = new BytesRef();
position: for (int p = 0; p < positionCount; p++) {
BytesRef str = strVector.getBytesRef(p, strScratch);
int length = lengthVector.getInt(p);
result.appendBytesRef(Right.process(this.out, this.cp, str, length));
}
return result.build();
}
}
@Override
public String toString() {
return "RightEvaluator[" + "str=" + str + ", length=" + length + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(str, length);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | RightEvaluator |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/stacktrace/PointingStackTraceToActualInvocationInOrderTest.java | {
"start": 731,
"end": 3625
} | class ____ extends TestBase {
@Mock private IMethods mock;
@Mock private IMethods mockTwo;
private InOrder inOrder;
@Before
public void setup() {
inOrder = inOrder(mock, mockTwo);
first();
second();
third();
fourth();
}
private void first() {
mock.simpleMethod(1);
}
private void second() {
mockTwo.simpleMethod(2);
}
private void third() {
mock.simpleMethod(3);
}
private void fourth() {
mockTwo.simpleMethod(4);
}
@Test
public void shouldPointStackTraceToPreviousVerified() {
inOrder.verify(mock, atLeastOnce()).simpleMethod(anyInt());
inOrder.verify(mockTwo).simpleMethod(anyInt());
try {
inOrder.verify(mock).simpleMethod(999);
fail();
} catch (VerificationInOrderFailure e) {
assertThat(e).hasMessageContaining("fourth(");
}
}
@Test
public void shouldPointToThirdMethod() {
inOrder.verify(mock, atLeastOnce()).simpleMethod(anyInt());
try {
inOrder.verify(mockTwo).simpleMethod(999);
fail();
} catch (VerificationInOrderFailure e) {
assertThat(e).hasMessageContaining("third(");
}
}
@Test
public void shouldPointToSecondMethod() {
inOrder.verify(mock).simpleMethod(anyInt());
inOrder.verify(mockTwo).simpleMethod(anyInt());
try {
inOrder.verify(mockTwo, times(3)).simpleMethod(999);
fail();
} catch (VerificationInOrderFailure e) {
assertThat(e).hasMessageContaining("second(");
}
}
@Test
public void shouldPointToFirstMethodBecauseOfTooManyActualInvocations() {
try {
inOrder.verify(mock, times(0)).simpleMethod(anyInt());
fail();
} catch (VerificationInOrderFailure e) {
assertThat(e).hasMessageContaining("first(");
}
}
@Test
public void shouldPointToSecondMethodBecauseOfTooManyActualInvocations() {
inOrder.verify(mock).simpleMethod(anyInt());
try {
inOrder.verify(mockTwo, times(0)).simpleMethod(anyInt());
fail();
} catch (VerificationInOrderFailure e) {
assertThat(e).hasMessageContaining("second(");
}
}
@Test
public void shouldPointToFourthMethodBecauseOfTooFewActualInvocations() {
inOrder.verify(mock).simpleMethod(anyInt());
inOrder.verify(mockTwo).simpleMethod(anyInt());
inOrder.verify(mock).simpleMethod(anyInt());
try {
inOrder.verify(mockTwo, times(3)).simpleMethod(anyInt());
fail();
} catch (VerificationInOrderFailure e) {
assertThat(e).hasMessageContaining("fourth(");
}
}
}
| PointingStackTraceToActualInvocationInOrderTest |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java | {
"start": 1227,
"end": 1869
} | class ____ extends TestableSourceConnector {
public static MetricName metricsName = null;
public static final String VALUE = "started";
@Override
public void start(Map<String, String> props) {
super.start(props);
PluginMetrics pluginMetrics = context.pluginMetrics();
metricsName = pluginMetrics.metricName("start", "description", new LinkedHashMap<>());
pluginMetrics.addMetric(metricsName, (Gauge<Object>) (config, now) -> VALUE);
}
@Override
public Class<? extends Task> taskClass() {
return MonitorableSourceTask.class;
}
public static | MonitorableSourceConnector |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MissingCasesInEnumSwitchTest.java | {
"start": 3469,
"end": 3981
} | enum ____ {
ONE,
TWO,
THREE
}
void m(Case c) {
switch (c) {
case ONE, TWO -> System.err.println("found it!");
case null, default -> {}
}
}
}
""")
.doTest();
}
@Test
public void nonExhaustive_withDefaultForSkew() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Case |
java | apache__camel | test-infra/camel-test-infra-couchbase/src/test/java/org/apache/camel/test/infra/couchbase/services/CouchbaseService.java | {
"start": 1006,
"end": 1103
} | interface ____ extends CouchbaseInfraService, TestService, ContainerTestService {
}
| CouchbaseService |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java | {
"start": 3672,
"end": 5157
} | class ____ extends SortedBinaryDocValues implements ValueFetcherDocValues {
private final LeafReaderContext leafReaderContext;
private final ValueFetcher valueFetcher;
private final SourceProvider sourceProvider;
private final SortedSet<BytesRef> values;
private Iterator<BytesRef> iterator;
public SourceValueFetcherSortedBinaryDocValues(
LeafReaderContext leafReaderContext,
ValueFetcher valueFetcher,
SourceProvider sourceProvider
) {
this.leafReaderContext = leafReaderContext;
this.valueFetcher = valueFetcher;
this.sourceProvider = sourceProvider;
values = new TreeSet<>();
}
@Override
public boolean advanceExact(int doc) throws IOException {
values.clear();
Source source = sourceProvider.getSource(leafReaderContext, doc);
for (Object object : valueFetcher.fetchValues(source, doc, Collections.emptyList())) {
values.add(new BytesRef(object.toString()));
}
iterator = values.iterator();
return values.isEmpty() == false;
}
@Override
public int docValueCount() {
return values.size();
}
@Override
public BytesRef nextValue() {
assert iterator.hasNext();
return iterator.next();
}
}
}
| SourceValueFetcherSortedBinaryDocValues |
java | spring-projects__spring-framework | spring-orm/src/main/java/org/springframework/orm/jpa/persistenceunit/PersistenceUnitManager.java | {
"start": 1447,
"end": 2369
} | interface ____ {
/**
* Obtain the default PersistenceUnitInfo from this manager.
* @return the PersistenceUnitInfo (never {@code null})
* @throws IllegalStateException if there is no default PersistenceUnitInfo defined
* or it has already been obtained
*/
PersistenceUnitInfo obtainDefaultPersistenceUnitInfo() throws IllegalStateException;
/**
* Obtain the specified PersistenceUnitInfo from this manager.
* @param persistenceUnitName the name of the desired persistence unit
* @return the PersistenceUnitInfo (never {@code null})
* @throws IllegalArgumentException if no PersistenceUnitInfo with the given
* name is defined
* @throws IllegalStateException if the PersistenceUnitInfo with the given
* name has already been obtained
*/
PersistenceUnitInfo obtainPersistenceUnitInfo(String persistenceUnitName)
throws IllegalArgumentException, IllegalStateException;
}
| PersistenceUnitManager |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/proxy/TestProxy.java | {
"start": 607,
"end": 1144
} | class ____ implements InvocationHandler {
private int id;
private String name;
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
return null;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
| VO |
java | apache__logging-log4j2 | log4j-1.2-api/src/main/java/org/apache/log4j/layout/Log4j1SyslogLayout.java | {
"start": 2272,
"end": 2760
} | class ____ extends AbstractStringLayout {
/**
* Builds a SyslogLayout.
* <p>The main arguments are</p>
* <ul>
* <li>facility: The Facility is used to try to classify the message.</li>
* <li>includeNewLine: If true a newline will be appended to the result.</li>
* <li>escapeNL: Pattern to use for replacing newlines.</li>
* <li>charset: The character set.</li>
* </ul>
* @param <B> the builder type
*/
public static | Log4j1SyslogLayout |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java | {
"start": 1264,
"end": 2822
} | class ____ extends SourceValueFetcherIndexFieldData.Builder<SortedNumericDoubleValues> {
public Builder(
String fieldName,
ValuesSourceType valuesSourceType,
ValueFetcher valueFetcher,
SourceProvider sourceProvider,
ToScriptFieldFactory<SortedNumericDoubleValues> toScriptFieldFactory
) {
super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory);
}
@Override
public SourceValueFetcherSortedDoubleIndexFieldData build(IndexFieldDataCache cache, CircuitBreakerService breakerService) {
return new SourceValueFetcherSortedDoubleIndexFieldData(
fieldName,
valuesSourceType,
valueFetcher,
sourceProvider,
toScriptFieldFactory
);
}
}
protected SourceValueFetcherSortedDoubleIndexFieldData(
String fieldName,
ValuesSourceType valuesSourceType,
ValueFetcher valueFetcher,
SourceProvider sourceProvider,
ToScriptFieldFactory<SortedNumericDoubleValues> toScriptFieldFactory
) {
super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory);
}
@Override
public SourceValueFetcherLeafFieldData<SortedNumericDoubleValues> loadDirect(LeafReaderContext context) {
return new SourceValueFetcherSortedDoubleLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceProvider);
}
private static | Builder |
java | apache__spark | common/unsafe/src/main/java/org/apache/spark/sql/catalyst/util/CollationFactory.java | {
"start": 3462,
"end": 11523
} | class ____ {
public final String collationName;
public final String provider;
private final Collator collator;
public final Comparator<UTF8String> comparator;
/**
* Version of the collation. This is the version of the ICU library Collator.
* For UTF8 Binary the version is set to "1.0". For ICU collations and UTF8_LCASE
* (because it uses ICU mappings) the version is set to the version of the ICU library.
* When using ICU Collator this version is exposed through collator.getVersion().
* Whenever the collation is updated, the version should be updated as well or kept
* for backwards compatibility.
*/
public final String version;
/**
* Returns the sort key of the input UTF8String. Two UTF8String values are equal iff their
* sort keys are equal (compared as byte arrays).
* The sort key is defined as follows for collations without the RTRIM modifier:
* - UTF8_BINARY: It is the bytes of the string.
* - UTF8_LCASE: It is byte array we get by replacing all invalid UTF8 sequences with the
* Unicode replacement character and then converting all characters of the replaced string
* with their lowercase equivalents (the Greek capital and Greek small sigma both map to
* the Greek final sigma).
* - ICU collations: It is the byte array returned by the ICU library for the collated string.
* For strings with the RTRIM modifier, we right-trim the string and return the collation key
* of the resulting right-trimmed string.
*/
public final Function<UTF8String, byte[]> sortKeyFunction;
/**
* Potentially faster way than using comparator to compare two UTF8Strings for equality.
* Falls back to binary comparison if the collation is binary.
*/
public final BiFunction<UTF8String, UTF8String, Boolean> equalsFunction;
/**
* Support for Binary Equality implies that it is possible to check equality on
* byte by byte level. This allows for the usage of binaryEquals call on UTF8Strings
* which is more performant than calls to external ICU library.
*/
public final boolean supportsBinaryEquality;
/**
* Support for Binary Ordering implies that it is possible to check equality and ordering on
* byte by byte level. This allows for the usage of binaryEquals and binaryCompare calls on
* UTF8Strings which is more performant than calls to external ICU library. Support for
* Binary Ordering implies support for Binary Equality.
*/
public final boolean supportsBinaryOrdering;
/**
* Support for Lowercase Equality implies that it is possible to check equality on byte by
* byte level, but only after calling "UTF8String.lowerCaseCodePoints" on both arguments.
* This allows custom collation support for UTF8_LCASE collation in various Spark
* expressions, as this particular collation is not supported by the external ICU library.
*/
public final boolean supportsLowercaseEquality;
/**
* Support for Space Trimming implies that that based on specifier (for now only right trim)
* leading, trailing or both spaces are removed from the input string before comparison.
*/
public final boolean supportsSpaceTrimming;
/**
* Is Utf8 binary type as indicator if collation base type is UTF8 binary. Note currently only
* collations Utf8_Binary and Utf8_Binary_RTRIM are considered as Utf8 binary type.
*/
public final boolean isUtf8BinaryType;
/**
* Is Utf8 lcase type as indicator if collation base type is UTF8 lcase. Note currently only
* collations Utf8_Lcase and Utf8_Lcase_RTRIM are considered as Utf8 Lcase type.
*/
public final boolean isUtf8LcaseType;
public Collation(
String collationName,
String provider,
Collator collator,
Comparator<UTF8String> comparator,
String version,
Function<UTF8String, byte[]> sortKeyFunction,
BiFunction<UTF8String, UTF8String, Boolean> equalsFunction,
boolean isUtf8BinaryType,
boolean isUtf8LcaseType,
boolean supportsSpaceTrimming) {
this.collationName = collationName;
this.provider = provider;
this.collator = collator;
this.comparator = comparator;
this.version = version;
this.sortKeyFunction = sortKeyFunction;
this.isUtf8BinaryType = isUtf8BinaryType;
this.isUtf8LcaseType = isUtf8LcaseType;
this.equalsFunction = equalsFunction;
this.supportsSpaceTrimming = supportsSpaceTrimming;
this.supportsBinaryEquality = !supportsSpaceTrimming && isUtf8BinaryType;
this.supportsBinaryOrdering = !supportsSpaceTrimming && isUtf8BinaryType;
this.supportsLowercaseEquality = !supportsSpaceTrimming && isUtf8LcaseType;
// No Collation can simultaneously support binary equality and lowercase equality
assert(!supportsBinaryEquality || !supportsLowercaseEquality);
// Null is a special provider for indeterminate collation.
assert(SUPPORTED_PROVIDERS.contains(provider) || provider.equals(PROVIDER_NULL));
}
public Collator getCollator() {
return collator;
}
/**
* Collation ID is defined as 32-bit integer. We specify binary layouts for different classes of
* collations. Classes of collations are differentiated by most significant 3 bits (bit 31, 30
* and 29), bit 31 being most significant and bit 0 being least significant.
* ---
* General collation ID binary layout:
* bit 31: 1 for INDETERMINATE (requires all other bits to be 1 as well), 0 otherwise.
* bit 30: 0 for predefined, 1 for user-defined.
* Following bits are specified for predefined collations:
* bit 29: 0 for UTF8_BINARY, 1 for ICU collations.
* bit 28-24: Reserved.
* bit 23-22: Reserved for version.
* bit 21-19 Zeros, reserved for future trimmings.
* bit 18 0 = none, 1 = right trim.
* bit 17-0: Depend on collation family.
* ---
* INDETERMINATE collation ID binary layout:
* bit 31-0: 1
* INDETERMINATE collation ID is equal to -1.
* ---
* User-defined collation ID binary layout:
* bit 31: 0
* bit 30: 1
* bit 29-0: Undefined, reserved for future use.
* ---
* UTF8_BINARY collation ID binary layout:
* bit 31-24: Zeroes.
* bit 23-22: Zeroes, reserved for version.
* bit 21-19 Zeros, reserved for future trimmings.
* bit 18 0 = none, 1 = right trim.
* bit 17-3: Zeroes.
* bit 2: 0, reserved for accent sensitivity.
* bit 1: 0, reserved for uppercase and case-insensitive.
* bit 0: 0 = case-sensitive, 1 = lowercase.
* ---
* ICU collation ID binary layout:
* bit 31-30: Zeroes.
* bit 29: 1
* bit 28-24: Zeroes.
* bit 23-22: Zeroes, reserved for version.
* bit 21-18: Reserved for space trimming.
* 0000 = none, 0001 = right trim. Bits 21-19 remain reserved and fixed to 0.
* bit 17: 0 = case-sensitive, 1 = case-insensitive.
* bit 16: 0 = accent-sensitive, 1 = accent-insensitive.
* bit 15-14: Zeroes, reserved for punctuation sensitivity.
* bit 13-12: Zeroes, reserved for first letter preference.
* bit 11-0: Locale ID as specified in `ICULocaleToId` mapping.
* ---
* Some illustrative examples of collation name to ID mapping:
* - UTF8_BINARY -> 0
* - UTF8_BINARY_RTRIM -> 0x00040000
* - UTF8_LCASE -> 1
* - UTF8_LCASE_RTRIM -> 0x00040001
* - UNICODE -> 0x20000000
* - UNICODE_AI -> 0x20010000
* - UNICODE_CI -> 0x20020000
* - UNICODE_RTRIM -> 0x20040000
* - UNICODE_CI_AI -> 0x20030000
* - UNICODE_CI_RTRIM -> 0x20060000
* - UNICODE_AI_RTRIM -> 0x20050000
* - UNICODE_CI_AI_RTRIM-> 0x20070000
* - af -> 0x20000001
* - af_CI_AI -> 0x20030001
*/
private abstract static | Collation |
java | apache__camel | components/camel-azure/camel-azure-servicebus/src/test/java/org/apache/camel/component/azure/servicebus/ServiceBusHeaderFilterStrategyTest.java | {
"start": 1309,
"end": 1952
} | class ____ {
private final ServiceBusHeaderFilterStrategy headerFilterStrategy = new ServiceBusHeaderFilterStrategy();
@ParameterizedTest
@ArgumentsSource(HeaderArgumentsProvider.class)
void testApplyFilterToCamelHeadersPassesKnownTypes(String headerName, Object headerValue) {
assertThat(headerFilterStrategy.applyFilterToCamelHeaders(headerName, headerValue, null)).isFalse();
}
@Test
void testApplyFilterToCamelHeadersFiltersUnknownType() {
assertThat(headerFilterStrategy.applyFilterToCamelHeaders("objectHeader", new Object(), null)).isTrue();
}
static | ServiceBusHeaderFilterStrategyTest |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/services/ExtensibleEnumRegistry.java | {
"start": 1202,
"end": 1313
} | enum ____.
* It's used internally by Maven and can also be used by plugins and extensions to access
* custom | type |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java | {
"start": 719,
"end": 2713
} | class ____ implements LifecycleAction {
public static final String NAME = "TEST_ACTION";
private final List<Step> steps;
private static final ObjectParser<MockAction, Void> PARSER = new ObjectParser<>(NAME, MockAction::new);
private final boolean safe;
public static MockAction parse(XContentParser parser) {
return PARSER.apply(parser, null);
}
public MockAction() {
this(List.of());
}
public MockAction(List<Step> steps) {
this(steps, true);
}
public MockAction(List<Step> steps, boolean safe) {
this.steps = steps;
this.safe = safe;
}
public MockAction(StreamInput in) throws IOException {
this.steps = in.readCollectionAsList(MockStep::new);
this.safe = in.readBoolean();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.endObject();
return builder;
}
@Override
public String getWriteableName() {
return NAME;
}
public List<Step> getSteps() {
return steps;
}
@Override
public boolean isSafeAction() {
return safe;
}
@Override
public List<Step> toSteps(Client client, String phase, Step.StepKey nextStepKey) {
return new ArrayList<>(steps);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(steps.stream().map(MockStep::new).toList());
out.writeBoolean(safe);
}
@Override
public int hashCode() {
return Objects.hash(steps, safe);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj.getClass() != getClass()) {
return false;
}
MockAction other = (MockAction) obj;
return Objects.equals(steps, other.steps) && Objects.equals(safe, other.safe);
}
}
| MockAction |
java | apache__kafka | streams/examples/src/main/java/org/apache/kafka/streams/examples/pageview/JsonTimestampExtractor.java | {
"start": 1170,
"end": 1927
} | class ____ implements TimestampExtractor {
@Override
public long extract(final ConsumerRecord<Object, Object> record, final long partitionTime) {
if (record.value() instanceof PageViewTypedDemo.PageView) {
return ((PageViewTypedDemo.PageView) record.value()).timestamp;
}
if (record.value() instanceof PageViewTypedDemo.UserProfile) {
return ((PageViewTypedDemo.UserProfile) record.value()).timestamp;
}
if (record.value() instanceof JsonNode) {
return ((JsonNode) record.value()).get("timestamp").longValue();
}
throw new IllegalArgumentException("JsonTimestampExtractor cannot recognize the record value " + record.value());
}
}
| JsonTimestampExtractor |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/conversion/nativetypes/ByteTarget.java | {
"start": 210,
"end": 1840
} | class ____ {
private byte b;
private Byte bb;
private short s;
private Short ss;
private int i;
private Integer ii;
private long l;
private Long ll;
private float f;
private Float ff;
private double d;
private Double dd;
public byte getB() {
return b;
}
public void setB(byte b) {
this.b = b;
}
public Byte getBb() {
return bb;
}
public void setBb(Byte bb) {
this.bb = bb;
}
public short getS() {
return s;
}
public void setS(short s) {
this.s = s;
}
public Short getSs() {
return ss;
}
public void setSs(Short ss) {
this.ss = ss;
}
public int getI() {
return i;
}
public void setI(int i) {
this.i = i;
}
public Integer getIi() {
return ii;
}
public void setIi(Integer ii) {
this.ii = ii;
}
public long getL() {
return l;
}
public void setL(long l) {
this.l = l;
}
public Long getLl() {
return ll;
}
public void setLl(Long ll) {
this.ll = ll;
}
public float getF() {
return f;
}
public void setF(float f) {
this.f = f;
}
public Float getFf() {
return ff;
}
public void setFf(Float ff) {
this.ff = ff;
}
public double getD() {
return d;
}
public void setD(double d) {
this.d = d;
}
public Double getDd() {
return dd;
}
public void setDd(Double dd) {
this.dd = dd;
}
}
| ByteTarget |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/TestNullKeyMap.java | {
"start": 160,
"end": 570
} | class ____ extends TestCase {
public void test_0 () throws Exception {
HashMap map = new HashMap();
map.put(null, 123);
String text = JSON.toJSONString(map);
Assert.assertEquals("{null:123}", text);
HashMap map2 = JSON.parseObject(text, HashMap.class);
Assert.assertEquals(map.get(null), map2.get(null));
}
}
| TestNullKeyMap |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_hasOnlyElementsOfTypes_Test.java | {
"start": 968,
"end": 1432
} | class ____ extends AtomicReferenceArrayAssertBaseTest {
private final Class<?>[] types = { CharSequence.class };
@Override
protected AtomicReferenceArrayAssert<Object> invoke_api_method() {
return assertions.hasOnlyElementsOfTypes(types);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasOnlyElementsOfTypes(getInfo(assertions), internalArray(), types);
}
}
| AtomicReferenceArrayAssert_hasOnlyElementsOfTypes_Test |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/data/conversion/ArrayObjectArrayConverter.java | {
"start": 5033,
"end": 8419
} | interface ____<E> extends Serializable {
E[] convert(GenericArrayData internal);
}
// --------------------------------------------------------------------------------------------
// Shared code
// --------------------------------------------------------------------------------------------
void allocateWriter(int length) {
if (reuseWriter.getNumElements() != length) {
reuseWriter = new BinaryArrayWriter(reuseArray, length, elementSize);
} else {
reuseWriter.reset();
}
}
void writeElement(int pos, E element) {
if (element == null) {
writerNullSetter.setNull(reuseWriter, pos);
} else {
writerValueSetter.setValue(reuseWriter, pos, elementConverter.toInternal(element));
}
}
BinaryArrayData completeWriter() {
reuseWriter.complete();
return reuseArray;
}
// --------------------------------------------------------------------------------------------
// Factory method
// --------------------------------------------------------------------------------------------
public static ArrayObjectArrayConverter<?> create(DataType dataType) {
return createForElement(dataType.getChildren().get(0));
}
public static <E> ArrayObjectArrayConverter<E> createForElement(DataType elementDataType) {
final LogicalType elementType = elementDataType.getLogicalType();
return new ArrayObjectArrayConverter<>(
(Class<E>) primitiveToWrapper(elementDataType.getConversionClass()),
BinaryArrayData.calculateFixLengthPartSize(elementType),
BinaryArrayWriter.createNullSetter(elementType),
BinaryWriter.createValueSetter(elementType),
createGenericToJavaArrayConverter(elementType),
ArrayData.createElementGetter(elementType),
(DataStructureConverter<Object, E>)
DataStructureConverters.getConverter(elementDataType));
}
@SuppressWarnings("unchecked")
private static <E> GenericToJavaArrayConverter<E> createGenericToJavaArrayConverter(
LogicalType elementType) {
switch (elementType.getTypeRoot()) {
case BOOLEAN:
return internal -> (E[]) ArrayUtils.toObject(internal.toBooleanArray());
case TINYINT:
return internal -> (E[]) ArrayUtils.toObject(internal.toByteArray());
case SMALLINT:
return internal -> (E[]) ArrayUtils.toObject(internal.toShortArray());
case INTEGER:
return internal -> (E[]) ArrayUtils.toObject(internal.toIntArray());
case BIGINT:
return internal -> (E[]) ArrayUtils.toObject(internal.toLongArray());
case FLOAT:
return internal -> (E[]) ArrayUtils.toObject(internal.toFloatArray());
case DOUBLE:
return internal -> (E[]) ArrayUtils.toObject(internal.toDoubleArray());
case DISTINCT_TYPE:
return createGenericToJavaArrayConverter(
((DistinctType) elementType).getSourceType());
default:
return internal -> {
throw new IllegalStateException();
};
}
}
}
| GenericToJavaArrayConverter |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticSparseVectorQueryRewriteInterceptor.java | {
"start": 571,
"end": 1201
} | class ____ implements QueryRewriteInterceptor {
@Override
public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) {
if (queryBuilder instanceof SparseVectorQueryBuilder sparseVectorQueryBuilder) {
return new InterceptedInferenceSparseVectorQueryBuilder(sparseVectorQueryBuilder);
} else {
throw new IllegalStateException("Unexpected query builder type: " + queryBuilder.getClass());
}
}
@Override
public String getQueryName() {
return SparseVectorQueryBuilder.NAME;
}
}
| SemanticSparseVectorQueryRewriteInterceptor |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/completable/CompletableSubscribeOn.java | {
"start": 874,
"end": 1478
} | class ____ extends Completable {
final CompletableSource source;
final Scheduler scheduler;
public CompletableSubscribeOn(CompletableSource source, Scheduler scheduler) {
this.source = source;
this.scheduler = scheduler;
}
@Override
protected void subscribeActual(final CompletableObserver observer) {
final SubscribeOnObserver parent = new SubscribeOnObserver(observer, source);
observer.onSubscribe(parent);
Disposable f = scheduler.scheduleDirect(parent);
parent.task.replace(f);
}
static final | CompletableSubscribeOn |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/CriteriaUpdateAssociationSetNullValueTest.java | {
"start": 2675,
"end": 2943
} | class ____ {
@Id
private Long id;
private String name;
public Child() {
}
public Child(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
}
}
| Child |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TestParametersNotInitializedTest.java | {
"start": 2537,
"end": 3065
} | class ____ extends BlockJUnit4ClassRunner {
public MyRunner(Class<?> testClass) throws InitializationError {
super(testClass);
}
}
""")
.expectUnchanged()
.addInputLines(
"Test.java",
"""
import com.google.testing.junit.testparameterinjector.TestParameter;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@RunWith(MyRunner.class)
public | MyRunner |
java | apache__camel | components/camel-mina/src/test/java/org/apache/camel/component/mina/MinaUdpNoCamelTest.java | {
"start": 3121,
"end": 4432
} | class ____ extends IoHandlerAdapter {
private final String host;
private final int port;
private final NioDatagramAcceptor acceptor;
private int numMessagesReceived;
private UDPServer(String host, int port) {
this.host = host;
this.port = port;
acceptor = new NioDatagramAcceptor();
DatagramSessionConfig sessionConfig = acceptor.getSessionConfig();
sessionConfig.setReuseAddress(true);
acceptor.getFilterChain().addLast("codec", new ProtocolCodecFilter(codecFactory));
acceptor.getFilterChain().addLast("logger", new LoggingFilter());
acceptor.setHandler(this);
}
public void listen() throws IOException {
acceptor.bind(new InetSocketAddress(host, port));
}
public void close() {
acceptor.unbind();
}
@Override
public void messageReceived(IoSession session, Object message) {
LOGGER.debug("UDPServer Received body: {}", message);
numMessagesReceived++;
}
@Override
public void exceptionCaught(IoSession session, Throwable cause) {
LOGGER.error("Ooops! Something went wrong :|", cause);
}
}
private final | UDPServer |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java | {
"start": 6509,
"end": 8531
} | class ____ extends SearchExtBuilder {
public static TermVectorsFetchBuilder fromXContent(XContentParser parser) throws IOException {
String field;
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.VALUE_STRING) {
field = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "Expected a VALUE_STRING but got " + token);
}
if (field == null) {
throw new ParsingException(parser.getTokenLocation(), "no fields specified for " + TermVectorsFetchSubPhase.NAME);
}
return new TermVectorsFetchBuilder(field);
}
private final String field;
private TermVectorsFetchBuilder(String field) {
this.field = field;
}
private TermVectorsFetchBuilder(StreamInput in) throws IOException {
this.field = in.readString();
}
private String getField() {
return field;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TermVectorsFetchBuilder that = (TermVectorsFetchBuilder) o;
return Objects.equals(field, that.field);
}
@Override
public int hashCode() {
return Objects.hash(field);
}
@Override
public String getWriteableName() {
return TermVectorsFetchSubPhase.NAME;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(field);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.field(TermVectorsFetchSubPhase.NAME, field);
}
}
}
| TermVectorsFetchBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/schemaupdate/index/IndexesOrderTest.java | {
"start": 2579,
"end": 2879
} | class ____ {
@Id
private Long id;
@Column(name = "name")
private String name;
@Column(name = "is_active")
private Boolean isActive;
@ManyToOne
@JoinColumn(name = "entityb_id")
private EntityB entityB;
}
@Entity(name = "EntityB")
@Table(name = "ENTITY_B")
public static | EntityA |
java | apache__camel | components/camel-jsch/src/generated/java/org/apache/camel/component/scp/ScpComponentConfigurer.java | {
"start": 730,
"end": 3454
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ScpComponent target = (ScpComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": target.setHealthCheckConsumerEnabled(property(camelContext, boolean.class, value)); return true;
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": target.setHealthCheckProducerEnabled(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "verboselogging":
case "verboseLogging": target.setVerboseLogging(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": return boolean.class;
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "verboselogging":
case "verboseLogging": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ScpComponent target = (ScpComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": return target.isHealthCheckConsumerEnabled();
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": return target.isHealthCheckProducerEnabled();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "verboselogging":
case "verboseLogging": return target.isVerboseLogging();
default: return null;
}
}
}
| ScpComponentConfigurer |
java | apache__camel | components/camel-box/camel-box-component/src/test/java/org/apache/camel/component/box/BoxEventsManagerIT.java | {
"start": 1755,
"end": 2013
} | class ____ won't be generated again if the generator MOJO finds it under
* src/test/java.
*/
@EnabledIf(value = "org.apache.camel.component.box.AbstractBoxITSupport#hasCredentials",
disabledReason = "Box credentials were not provided")
public | source |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/InPredicateTest.java | {
"start": 943,
"end": 1758
} | class ____ {
@Test
public void testInPredicate(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
CriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<Event> cr = cb.createQuery( Event.class );
Root<Event> root = cr.from( Event.class );
List<String> names = getNames();
cr.select( root ).where( root.get( "name" ).in( names ) );
// This should trigger the error from HHH-15895 as QuerySqmImpl
// tries to handle the Criteria parameters
session.createQuery( cr );
} );
}
private List<String> getNames() {
int maxNames = 100000;
List<String> names = new ArrayList<>( maxNames );
for ( int i = 0; i < maxNames; i++ ) {
names.add( "abc" + i );
}
return names;
}
@Entity(name = "Event")
@Table(name = "EVENT_TABLE")
public static | InPredicateTest |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/utils/ClassUtils.java | {
"start": 7449,
"end": 8166
} | class ____ classes and interfaces.
* @return The canonical name of the underlying class.
*/
public static String getCanonicalName(Class cls) {
Objects.requireNonNull(cls, "cls");
return cls.getCanonicalName();
}
/**
* Gets and returns the canonical name of the underlying class.
*
* @param obj Object instance.
* @return The canonical name of the underlying class.
*/
public static String getCanonicalName(Object obj) {
Objects.requireNonNull(obj, "obj");
return obj.getClass().getCanonicalName();
}
/**
* Gets and returns the simple name of the underlying class.
*
* @param cls Instances of the | represent |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/LambdaUtil.java | {
"start": 1093,
"end": 2722
} | class ____ {
private LambdaUtil() {
throw new AssertionError();
}
/**
* This method supplies all elements from the input to the consumer. Exceptions that happen on
* elements are suppressed until all elements are processed. If exceptions happened for one or
* more of the inputs, they are reported in a combining suppressed exception.
*
* @param inputs iterator for all inputs to the throwingConsumer.
* @param throwingConsumer this consumer will be called for all elements delivered by the input
* iterator.
* @param <T> the type of input.
* @throws Exception collected exceptions that happened during the invocation of the consumer on
* the input elements.
*/
public static <T> void applyToAllWhileSuppressingExceptions(
Iterable<T> inputs, ThrowingConsumer<T, ? extends Exception> throwingConsumer)
throws Exception {
if (inputs != null && throwingConsumer != null) {
Exception exception = null;
for (T input : inputs) {
if (input != null) {
try {
throwingConsumer.accept(input);
} catch (Exception ex) {
exception = ExceptionUtils.firstOrSuppressed(ex, exception);
}
}
}
if (exception != null) {
throw exception;
}
}
}
/**
* Runs the given runnable with the given ClassLoader as the thread's {@link
* Thread#setContextClassLoader(ClassLoader) context | LambdaUtil |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/iterable/IterableAssert_haveAtLeast_Test.java | {
"start": 1105,
"end": 1603
} | class ____ extends IterableAssertBaseTest {
private static Condition<Object> condition;
@BeforeAll
static void beforeOnce() {
condition = new TestCondition<>();
}
@Override
protected ConcreteIterableAssert<Object> invoke_api_method() {
return assertions.haveAtLeast(2, condition);
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertHaveAtLeast(getInfo(assertions), getActual(assertions), 2, condition);
}
}
| IterableAssert_haveAtLeast_Test |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/path/JSONPath_field_access_filter_in_decimal.java | {
"start": 4299,
"end": 4926
} | class ____ {
private BigDecimal id;
private String name;
public Entity(Integer id, String name){
if (id == null) {
this.id = null;
} else {
this.id = BigDecimal.valueOf(id);
}
this.name = name;
}
public BigDecimal getId() {
return id;
}
public void setId(BigDecimal id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
| Entity |
java | netty__netty | testsuite/src/main/java/io/netty/testsuite/transport/socket/DatagramUnicastIPv6Test.java | {
"start": 977,
"end": 1578
} | class ____ extends DatagramUnicastInetTest {
@BeforeAll
public static void assumeIpv6Supported() {
try {
Channel channel = SelectorProvider.provider().openDatagramChannel(StandardProtocolFamily.INET6);
channel.close();
} catch (UnsupportedOperationException e) {
throw new TestAbortedException("IPv6 not supported", e);
} catch (IOException ignore) {
// Ignore
}
}
@Override
protected SocketProtocolFamily socketProtocolFamily() {
return SocketProtocolFamily.INET6;
}
}
| DatagramUnicastIPv6Test |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/UrlEncodeEvaluator.java | {
"start": 5125,
"end": 5894
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory val;
private final Function<DriverContext, BreakingBytesRefBuilder> scratch;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val,
Function<DriverContext, BreakingBytesRefBuilder> scratch) {
this.source = source;
this.val = val;
this.scratch = scratch;
}
@Override
public UrlEncodeEvaluator get(DriverContext context) {
return new UrlEncodeEvaluator(source, val.get(context), scratch.apply(context), context);
}
@Override
public String toString() {
return "UrlEncodeEvaluator[" + "val=" + val + "]";
}
}
}
| Factory |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ScriptCondition.java | {
"start": 828,
"end": 3008
} | class ____ implements ExecutableCondition {
public static final String TYPE = "script";
private static final Result MET = new Result(null, TYPE, true);
private static final Result UNMET = new Result(null, TYPE, false);
private final Script script;
private final WatcherConditionScript.Factory scriptFactory;
public ScriptCondition(Script script) {
this.script = script;
this.scriptFactory = null;
}
ScriptCondition(Script script, ScriptService scriptService) {
this.script = script;
this.scriptFactory = scriptService.compile(script, WatcherConditionScript.CONTEXT);
}
public Script getScript() {
return script;
}
public static ScriptCondition parse(ScriptService scriptService, String watchId, XContentParser parser) throws IOException {
try {
Script script = Script.parse(parser);
return new ScriptCondition(script, scriptService);
} catch (ElasticsearchParseException pe) {
throw new ElasticsearchParseException(
"could not parse [{}] condition for watch [{}]. failed to parse script",
pe,
TYPE,
watchId
);
}
}
@Override
public Result execute(WatchExecutionContext ctx) {
return doExecute(ctx);
}
public Result doExecute(WatchExecutionContext ctx) {
WatcherConditionScript conditionScript = scriptFactory.newInstance(script.getParams(), ctx);
return conditionScript.execute() ? MET : UNMET;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return script.toXContent(builder, params);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ScriptCondition condition = (ScriptCondition) o;
return script.equals(condition.script);
}
@Override
public int hashCode() {
return script.hashCode();
}
@Override
public String type() {
return TYPE;
}
}
| ScriptCondition |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injectionpoint/DummyInjectionPoint.java | {
"start": 317,
"end": 1283
} | class ____ implements InjectionPoint {
private final Type type;
private final Set<Annotation> qualifiers;
DummyInjectionPoint(Type type, Annotation... qualifiers) {
this.type = type;
this.qualifiers = Set.of(qualifiers);
}
@Override
public Type getType() {
return type;
}
@Override
public Set<Annotation> getQualifiers() {
return qualifiers;
}
@Override
public Bean<?> getBean() {
throw new UnsupportedOperationException();
}
@Override
public Member getMember() {
throw new UnsupportedOperationException();
}
@Override
public Annotated getAnnotated() {
throw new UnsupportedOperationException();
}
@Override
public boolean isDelegate() {
throw new UnsupportedOperationException();
}
@Override
public boolean isTransient() {
throw new UnsupportedOperationException();
}
}
| DummyInjectionPoint |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/bean/scope/CustomScopeWithoutContextTest.java | {
"start": 609,
"end": 1431
} | class ____ {
@RegisterExtension
ArcTestContainer container = new ArcTestContainer(MyScope.class, MyNormalScope.class, MyBean.class, MyNormalBean.class);
@Test
public void pseudoScope() {
BeanManager bm = Arc.container().beanManager();
Bean<MyBean> bean = (Bean<MyBean>) bm.resolve(bm.getBeans(MyBean.class));
assertEquals(MyScope.class, bean.getScope());
}
@Test
public void normalScope() {
BeanManager bm = Arc.container().beanManager();
Bean<MyNormalBean> bean = (Bean<MyNormalBean>) bm.resolve(bm.getBeans(MyNormalBean.class));
assertEquals(MyNormalScope.class, bean.getScope());
}
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
@Scope
@ | CustomScopeWithoutContextTest |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/parser/ParserConfig.java | {
"start": 59018,
"end": 59138
} | interface ____ {
Class<?> handler(String typeName, Class<?> expectClass, int features);
}
}
| AutoTypeCheckHandler |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/initializer/subpkg/MySuperclass.java | {
"start": 100,
"end": 780
} | class ____ {
public boolean publicInitializerCalled;
public boolean protectedInitializerCalled;
public boolean packagePrivateInitializerCalled;
public boolean privateInitializerCalled;
@Inject
public void publicInject(MyDependency ignored) {
publicInitializerCalled = true;
}
@Inject
protected void protectedInject(MyDependency ignored) {
protectedInitializerCalled = true;
}
@Inject
void packagePrivateInject(MyDependency ignored) {
packagePrivateInitializerCalled = true;
}
@Inject
private void privateInject(MyDependency ignored) {
privateInitializerCalled = true;
}
}
| MySuperclass |
java | apache__maven | impl/maven-core/src/test/java/org/apache/maven/model/PropertiesTest.java | {
"start": 5094,
"end": 10746
} | class ____ {
@Test
void testWriteOperationBehavior() {
// Create a Model with initial properties
Model model = new Model();
// Set initial properties using setProperties to establish the backend
Properties initialProps = new Properties();
initialProps.setProperty("initial.key", "initial.value");
model.setProperties(initialProps);
// Get the WrapperProperties instance
Properties wrapperProps = model.getProperties();
// First read - should initialize cache
assertEquals("initial.value", wrapperProps.getProperty("initial.key"));
// Simulate external change by directly calling setProperties (another WrapperProperties instance)
Properties externalProps = new Properties();
externalProps.setProperty("initial.key", "externally.modified");
externalProps.setProperty("external.key", "external.value");
model.setProperties(externalProps);
// Read again - should return fresh value (no caching in current implementation)
assertEquals("externally.modified", wrapperProps.getProperty("initial.key"));
// Now perform a write operation
wrapperProps.setProperty("new.key", "new.value");
// Read the initial key again - should return the current value
assertEquals("externally.modified", wrapperProps.getProperty("initial.key"));
// Read the external key that was set before the write operation
assertEquals("external.value", wrapperProps.getProperty("external.key"));
// Read the new key that was just set
assertEquals("new.value", wrapperProps.getProperty("new.key"));
}
@Test
void testMultipleWrapperPropertiesShareSameBackend() {
// Create a Model with initial properties
Model model = new Model();
Properties initialProps = new Properties();
initialProps.setProperty("shared.key", "initial.value");
model.setProperties(initialProps);
// Get two WrapperProperties instances from the same Model
Properties wrapper1 = model.getProperties();
Properties wrapper2 = model.getProperties();
// Both wrappers should read the same initial value
assertEquals("initial.value", wrapper1.getProperty("shared.key"));
assertEquals("initial.value", wrapper2.getProperty("shared.key"));
// Write through wrapper1
wrapper1.setProperty("from.wrapper1", "value1");
// wrapper2 should see the changes immediately (no caching)
assertEquals("value1", wrapper2.getProperty("from.wrapper1"));
assertEquals("initial.value", wrapper2.getProperty("shared.key"));
// Now wrapper2 performs a write operation
wrapper2.setProperty("from.wrapper2", "value2");
// Both wrappers should see all changes immediately
assertEquals("value1", wrapper1.getProperty("from.wrapper1"));
assertEquals("value2", wrapper1.getProperty("from.wrapper2"));
assertEquals("value1", wrapper2.getProperty("from.wrapper1"));
assertEquals("value2", wrapper2.getProperty("from.wrapper2"));
// Add another property through wrapper1
wrapper1.setProperty("another.key", "another.value");
assertEquals("another.value", wrapper1.getProperty("another.key"));
assertEquals("another.value", wrapper2.getProperty("another.key"));
}
@Test
void testVariousWriteOperations() {
// Create a Model with initial properties
Model model = new Model();
Properties initialProps = new Properties();
initialProps.setProperty("key1", "value1");
model.setProperties(initialProps);
Properties wrapper = model.getProperties();
// Initial read
assertEquals("value1", wrapper.getProperty("key1"));
// Test put() method
wrapper.put("key2", "value2");
assertEquals("value2", wrapper.getProperty("key2"));
// Simulate external change
Properties externalProps1 = new Properties();
externalProps1.setProperty("key1", "modified_after_put");
externalProps1.setProperty("key2", "value2");
externalProps1.setProperty("external.key", "external.value");
model.setProperties(externalProps1);
assertEquals("modified_after_put", wrapper.getProperty("key1"));
// Test remove() method
wrapper.remove("key2");
assertEquals(null, wrapper.getProperty("key2"));
// Simulate external change
Properties externalProps2 = new Properties();
externalProps2.setProperty("key1", "modified_after_remove");
externalProps2.setProperty("external.key", "external.value");
model.setProperties(externalProps2);
assertEquals("modified_after_remove", wrapper.getProperty("key1"));
// Test putAll() method
Properties newProps = new Properties();
newProps.setProperty("putall.key1", "putall.value1");
newProps.setProperty("putall.key2", "putall.value2");
wrapper.putAll(newProps);
assertEquals("putall.value1", wrapper.getProperty("putall.key1"));
assertEquals("putall.value2", wrapper.getProperty("putall.key2"));
}
}
}
| WrapperPropertiesBehaviorTests |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/filter/NettyRoutingFilterCompatibleTests.java | {
"start": 1541,
"end": 1855
} | class ____ exception with YAML or Properties parsing.
*
* {@link NettyRoutingFilter#getHttpClient(Route, ServerWebExchange)}
* {@link NettyRoutingFilter#getResponseTimeout(Route)}
*
* @author echooymxq
**/
@SpringBootTest(webEnvironment = RANDOM_PORT)
@DirtiesContext
@ActiveProfiles("netty-routing-filter")
| cast |
java | quarkusio__quarkus | extensions/reactive-mssql-client/runtime/src/main/java/io/quarkus/reactive/mssql/client/runtime/MsSQLServiceBindingConverter.java | {
"start": 436,
"end": 790
} | class ____ implements ServiceBindingConverter {
@Override
public Optional<ServiceBindingConfigSource> convert(List<ServiceBinding> serviceBindings) {
return ServiceBinding.singleMatchingByType("sqlserver", serviceBindings)
.map(new DatasourceServiceBindingConfigSourceFactory.Reactive());
}
}
| MsSQLServiceBindingConverter |
java | quarkusio__quarkus | integration-tests/liquibase-mongodb/src/test/java/io/quarkus/it/liquibase/mongodb/UserResourceTest.java | {
"start": 938,
"end": 1751
} | class ____ {
@Inject
@Named("users")
MongoClient mongoClient;
@Test
public void testTheEndpoint() {
// assert that a fruit exist as one has been created in the changelog
List<User> list = get("/users").as(new TypeRef<>() {
});
Assertions.assertEquals(1, list.size());
}
@Test
public void validateTheIdx() {
// check that the index that the changelog created exist
ListIndexesIterable<Document> indexes = mongoClient.getDatabase("users").getCollection("Users").listIndexes();
Set<String> names = StreamSupport.stream(indexes.spliterator(), false)
.map(doc -> doc.getString("name"))
.collect(Collectors.toSet());
Assertions.assertTrue(names.contains("emailIdx"));
}
}
| UserResourceTest |
java | quarkusio__quarkus | extensions/mailer/deployment/src/test/java/io/quarkus/mailer/InvalidEmailTest.java | {
"start": 406,
"end": 2542
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(root -> root
.addClasses(Sender.class));
@Inject
MockMailbox mockMailbox;
@Inject
Sender sender;
@Test
public void testInvalidTo() {
List<String> to = List.of("clement@test.io", "inv alid@quarkus.io", "max@test.io");
List<String> cc = List.of();
List<String> bcc = List.of();
Assertions.assertThatThrownBy(() -> sender.send(to, cc, bcc).await().atMost(Duration.ofSeconds(5)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Unable to send an email, an email address is invalid")
.hasMessageNotContaining("@");
Assertions.assertThat(mockMailbox.getTotalMessagesSent()).isEqualTo(0);
}
@Test
public void testInvalidCC() {
List<String> cc = List.of("clement@test.io", "inv alid@quarkus.io", "max@test.io");
List<String> to = List.of();
List<String> bcc = List.of();
Assertions.assertThatThrownBy(() -> sender.send(to, cc, bcc).await().atMost(Duration.ofSeconds(5)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Unable to send an email, an email address is invalid")
.hasMessageNotContaining("@");
Assertions.assertThat(mockMailbox.getTotalMessagesSent()).isEqualTo(0);
}
@Test
public void testInvalidBCC() {
List<String> bcc = List.of("clement@test.io", "inv alid@quarkus.io", "max@test.io");
List<String> to = List.of();
List<String> cc = List.of();
Assertions.assertThatThrownBy(() -> sender.send(to, cc, bcc).await().atMost(Duration.ofSeconds(5)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Unable to send an email, an email address is invalid")
.hasMessageNotContaining("@");
Assertions.assertThat(mockMailbox.getTotalMessagesSent()).isEqualTo(0);
}
@Singleton
static | InvalidEmailTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BufferSorter.java | {
"start": 1547,
"end": 1832
} | class ____ the point at which sort should
* happen based on the memory consumed so far by the buffer and the data
* structures maintained by an implementation of this interface. That is why
* a method is provided to get the memory consumed so far by the datastructures
* in the | decides |
java | apache__dubbo | dubbo-plugin/dubbo-rest-openapi/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/openapi/ConfigFactory.java | {
"start": 1426,
"end": 7397
} | class ____ {
private static Map<String, Method> CONFIG_METHODS;
private final FrameworkModel frameworkModel;
private volatile Map<String, OpenAPIConfig> configMap;
public ConfigFactory(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
}
private static Environment getEnvironment(FrameworkModel frameworkModel) {
return frameworkModel.defaultApplication().modelEnvironment();
}
public OpenAPIConfig getConfig(String group) {
return getConfigMap().get(group);
}
public OpenAPIConfig getGlobalConfig() {
return getConfigMap().get(Constants.GLOBAL_GROUP);
}
private Map<String, OpenAPIConfig> getConfigMap() {
if (configMap == null) {
synchronized (this) {
if (configMap == null) {
configMap = readConfigMap();
}
}
}
return configMap;
}
private Map<String, OpenAPIConfig> readConfigMap() {
Map<String, OpenAPIConfig> map = new HashMap<>();
Environment environment = getEnvironment(frameworkModel);
Configuration configuration = environment.getConfiguration();
List<Map<String, String>> configMaps = environment.getConfigurationMaps();
Set<String> allKeys = new HashSet<>();
for (Map<String, String> configMap : configMaps) {
for (String key : configMap.keySet()) {
if (key.startsWith(H2_SETTINGS_OPENAPI_PREFIX)) {
allKeys.add(key);
}
}
}
int len = H2_SETTINGS_OPENAPI_PREFIX.length();
Map<Pair<String, String>, TreeMap<Integer, String>> valuesMap = new HashMap<>();
for (String fullKey : allKeys) {
if (fullKey.length() > len) {
char c = fullKey.charAt(len);
String group, key;
if (c == '.') {
group = StringUtils.EMPTY_STRING;
key = fullKey.substring(len + 1);
} else if (c == 's') {
int end = fullKey.indexOf('.', len + 1);
group = fullKey.substring(len + 1, end);
key = fullKey.substring(end + 1);
} else {
continue;
}
int brkStart = key.lastIndexOf('[');
if (brkStart > 0) {
try {
String value = configuration.getString(fullKey);
if (StringUtils.isEmpty(value)) {
continue;
}
int index = Integer.parseInt(key.substring(brkStart + 1, key.length() - 1));
valuesMap
.computeIfAbsent(Pair.of(group, key.substring(0, brkStart)), k -> new TreeMap<>())
.put(index, value);
} catch (NumberFormatException ignored) {
}
continue;
}
applyConfigValue(map, group, key, configuration.getString(fullKey));
}
}
for (Map.Entry<Pair<String, String>, TreeMap<Integer, String>> entry : valuesMap.entrySet()) {
Pair<String, String> pair = entry.getKey();
String value = StringUtils.join(entry.getValue().values(), ",");
applyConfigValue(map, pair.getKey(), pair.getValue(), value);
}
map.computeIfAbsent(Constants.GLOBAL_GROUP, k -> new OpenAPIConfig());
return map;
}
private static void applyConfigValue(Map<String, OpenAPIConfig> map, String group, String key, String value) {
if (value == null || value.isEmpty()) {
return;
}
OpenAPIConfig config = map.computeIfAbsent(group, k -> new OpenAPIConfig());
int index = key.indexOf("settings.");
if (index == 0) {
Map<String, String> settings = config.getSettings();
if (settings == null) {
config.setSettings(settings = new HashMap<>());
}
settings.put(key.substring(9), value);
return;
}
Map<String, Method> configMethods = CONFIG_METHODS;
if (configMethods == null) {
configMethods = new HashMap<>();
for (Method method : OpenAPIConfig.class.getMethods()) {
String name = toConfigName(method);
if (name != null) {
configMethods.put(name, method);
}
}
CONFIG_METHODS = configMethods;
}
Method method = configMethods.get(key);
if (method == null) {
return;
}
Class<?> valueType = method.getParameterTypes()[0];
try {
if (valueType == String.class) {
method.invoke(config, value);
} else if (valueType == Boolean.class) {
method.invoke(config, StringUtils.toBoolean(value, false));
} else if (valueType.isArray()) {
method.invoke(config, new Object[] {StringUtils.tokenize(value)});
}
} catch (Throwable ignored) {
}
}
private static String toConfigName(Method method) {
if (method.getParameterCount() != 1) {
return null;
}
String name = method.getName();
if (!name.startsWith("set")) {
return null;
}
int len = name.length();
StringBuilder sb = new StringBuilder(len);
for (int i = 3; i < len; i++) {
char c = name.charAt(i);
if (Character.isUpperCase(c)) {
if (i > 3) {
sb.append('-');
}
sb.append(Character.toLowerCase(c));
} else {
sb.append(c);
}
}
return sb.toString();
}
}
| ConfigFactory |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/search/VectorAlgorithm.java | {
"start": 142,
"end": 310
} | enum ____ {
/**
* Brute force algorithm.
*/
FLAT,
/**
* Hierarchical Navigable Small World Graph algorithm.
*/
HNSW
}
| VectorAlgorithm |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/plugins/ModuleSupport.java | {
"start": 6752,
"end": 7463
} | class ____ services files.
record ScanResult(Set<String> classFiles, Set<String> serviceFiles) {}
@SuppressForbidden(reason = "need access to the jar file")
static ScanResult scan(JarFile jarFile) {
Map<Boolean, Set<String>> map = jarFile.versionedStream()
.filter(e -> e.isDirectory() == false)
.map(JarEntry::getName)
.filter(e -> (e.endsWith(".class") ^ e.startsWith(SERVICES_PREFIX)))
.collect(Collectors.partitioningBy(e -> e.startsWith(SERVICES_PREFIX), Collectors.toSet()));
return new ScanResult(map.get(Boolean.FALSE), map.get(Boolean.TRUE));
}
// Returns an optional containing the package name from a given binary | and |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/BeanPipelineVariablesTest.java | {
"start": 3062,
"end": 3410
} | class ____ {
public void doNotUseMe(String body) {
fail("Should not invoce me");
}
public void withAnnotations(@Variables Map<String, Object> variables, @Body String body) {
assertEquals("Hello World from James", body);
assertEquals("James", variables.get("from"));
}
}
}
| BazBean |
java | alibaba__nacos | client-basic/src/main/java/com/alibaba/nacos/client/auth/ram/identify/StsConfig.java | {
"start": 1733,
"end": 4781
} | class ____ {
private static final StsConfig INSTANCE = new StsConfig();
}
private StsConfig() {
String ramRoleName = NacosClientProperties.PROTOTYPE.getProperty(IdentifyConstants.RAM_ROLE_NAME_PROPERTY);
if (!StringUtils.isBlank(ramRoleName)) {
setRamRoleName(ramRoleName);
}
String timeToRefreshInMillisecond = NacosClientProperties.PROTOTYPE.getProperty(IdentifyConstants.REFRESH_TIME_PROPERTY);
if (!StringUtils.isBlank(timeToRefreshInMillisecond)) {
setTimeToRefreshInMillisecond(Integer.parseInt(timeToRefreshInMillisecond));
}
String securityCredentials = NacosClientProperties.PROTOTYPE.getProperty(IdentifyConstants.SECURITY_PROPERTY);
if (!StringUtils.isBlank(securityCredentials)) {
setSecurityCredentials(securityCredentials);
}
String securityCredentialsUrl = NacosClientProperties.PROTOTYPE.getProperty(IdentifyConstants.SECURITY_URL_PROPERTY);
if (!StringUtils.isBlank(securityCredentialsUrl)) {
setSecurityCredentialsUrl(securityCredentialsUrl);
}
String cacheSecurityCredentials = NacosClientProperties.PROTOTYPE.getProperty(IdentifyConstants.SECURITY_CACHE_PROPERTY);
if (!StringUtils.isBlank(cacheSecurityCredentials)) {
setCacheSecurityCredentials(Boolean.parseBoolean(cacheSecurityCredentials));
}
}
public static StsConfig getInstance() {
return Singleton.INSTANCE;
}
public String getRamRoleName() {
return ramRoleName;
}
public void setRamRoleName(String ramRoleName) {
this.ramRoleName = ramRoleName;
}
public int getTimeToRefreshInMillisecond() {
return timeToRefreshInMillisecond;
}
public void setTimeToRefreshInMillisecond(int timeToRefreshInMillisecond) {
this.timeToRefreshInMillisecond = timeToRefreshInMillisecond;
}
public String getSecurityCredentialsUrl() {
if (securityCredentialsUrl == null && ramRoleName != null) {
return RAM_SECURITY_CREDENTIALS_URL + ramRoleName;
}
return securityCredentialsUrl;
}
public void setSecurityCredentialsUrl(String securityCredentialsUrl) {
this.securityCredentialsUrl = securityCredentialsUrl;
}
public String getSecurityCredentials() {
return securityCredentials;
}
public void setSecurityCredentials(String securityCredentials) {
this.securityCredentials = securityCredentials;
}
public boolean isStsOn() {
return StringUtils.isNotEmpty(getSecurityCredentials()) || StringUtils.isNotEmpty(getSecurityCredentialsUrl());
}
public boolean isCacheSecurityCredentials() {
return cacheSecurityCredentials;
}
public void setCacheSecurityCredentials(boolean cacheSecurityCredentials) {
this.cacheSecurityCredentials = cacheSecurityCredentials;
}
}
| Singleton |
java | google__dagger | javatests/dagger/hilt/android/processor/internal/androidentrypoint/AndroidEntryPointProcessorTest.java | {
"start": 9635,
"end": 10598
} | class ____<T> extends Hilt_BaseActivity {}");
HiltCompilerTests.hiltCompiler(testActivity)
.compile(
subject -> {
subject.compilationDidFail();
if (HiltCompilerTests.backend(subject) == Backend.JAVAC) {
subject.hasErrorCount(2);
} else {
subject.hasErrorCount(1);
}
subject.hasErrorContaining(
"@AndroidEntryPoint-annotated classes cannot have type parameters.");
});
}
@Test
public void checkAndroidEntryPointOnApplicationRecommendsHiltAndroidApp() {
Source testActivity =
HiltCompilerTests.javaSource(
"test.MyApplication",
"package test;",
"",
"import android.app.Application;",
"import dagger.hilt.android.AndroidEntryPoint;",
"",
"@AndroidEntryPoint(Application.class)",
"public | BaseActivity |
java | apache__logging-log4j2 | log4j-mongodb/src/test/java/org/apache/logging/log4j/mongodb/AbstractMongoDbCappedIT.java | {
"start": 1221,
"end": 1910
} | class ____ {
protected void test(final LoggerContext ctx, final MongoClient mongoClient) {
final Logger logger = ctx.getLogger(AbstractMongoDbCappedIT.class);
logger.info("Hello log");
final MongoDatabase database = mongoClient.getDatabase(MongoDbTestConstants.DATABASE_NAME);
assertNotNull(database);
final MongoCollection<Document> collection =
database.getCollection(getClass().getSimpleName());
assertNotNull(collection);
final Document first = collection.find().first();
assertNotNull(first);
assertEquals("Hello log", first.getString("message"), first.toJson());
}
}
| AbstractMongoDbCappedIT |
java | google__dagger | javatests/dagger/internal/codegen/ConflictingEntryPointsTest.java | {
"start": 2018,
"end": 2134
} | interface ____ extends Base1, Base2 {",
"",
" @Component.Builder",
" | TestComponent |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/beans/factory/xml/XmlBeanFactoryTests.java | {
"start": 77506,
"end": 77897
} | class ____ {
public final Object array;
public ConstructorArrayTestBean(int[] array) {
this.array = array;
}
public ConstructorArrayTestBean(float[] array) {
this.array = array;
}
public ConstructorArrayTestBean(short[] array) {
this.array = array;
}
public ConstructorArrayTestBean(String[] array) {
this.array = array;
}
}
static | ConstructorArrayTestBean |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java | {
"start": 920,
"end": 994
} | class ____ encode {@code field -> memory size} mappings
*/
public final | to |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/loader/ast/spi/SingleUniqueKeyEntityLoader.java | {
"start": 348,
"end": 695
} | interface ____<T> extends SingleEntityLoader<T> {
/**
* Load by unique key value
*/
@Override
T load(Object ukValue, LockOptions lockOptions, Boolean readOnly, SharedSessionContractImplementor session);
/**
* Resolve the matching id
*/
Object resolveId(Object key, SharedSessionContractImplementor session);
}
| SingleUniqueKeyEntityLoader |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/TableEnvironment.java | {
"start": 30547,
"end": 30818
} | class ____ for the format of the path.
* @param functionDescriptor The descriptor of the function to create.
*/
void createTemporaryFunction(String path, FunctionDescriptor functionDescriptor);
/**
* Registers a {@link UserDefinedFunction} | description |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/jpa/event/internal/CallbackRegistryImpl.java | {
"start": 4092,
"end": 5788
} | class ____ {
private static final Callback[] NO_CALLBACKS = new Callback[0];
private final Map<Class<?>, Callback[]> preCreates = new HashMap<>();
private final Map<Class<?>, Callback[]> postCreates = new HashMap<>();
private final Map<Class<?>, Callback[]> preRemoves = new HashMap<>();
private final Map<Class<?>, Callback[]> postRemoves = new HashMap<>();
private final Map<Class<?>, Callback[]> preUpdates = new HashMap<>();
private final Map<Class<?>, Callback[]> postUpdates = new HashMap<>();
private final Map<Class<?>, Callback[]> postLoads = new HashMap<>();
@AllowReflection
public void registerCallbacks(Class<?> entityClass, Callback[] callbacks) {
if ( callbacks != null ) {
for ( Callback callback : callbacks ) {
addCallback( entityClass, callback );
}
}
}
public void addCallback(Class<?> entityClass, Callback callback) {
final var callbackMap = getCallbackMap( callback.getCallbackType() );
final Callback[] existingCallbacks = callbackMap.getOrDefault( entityClass, NO_CALLBACKS );
callbackMap.put( entityClass, ArrayHelper.add( existingCallbacks, callback ) );
}
private Map<Class<?>, Callback[]> getCallbackMap(CallbackType callbackType) {
return switch ( callbackType ) {
case PRE_PERSIST -> preCreates;
case POST_PERSIST -> postCreates;
case PRE_REMOVE -> preRemoves;
case POST_REMOVE -> postRemoves;
case PRE_UPDATE -> preUpdates;
case POST_UPDATE -> postUpdates;
case POST_LOAD -> postLoads;
};
}
protected CallbackRegistry build() {
return new CallbackRegistryImpl( preCreates, postCreates, preRemoves, postRemoves, preUpdates, postUpdates, postLoads );
}
}
}
| Builder |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/StreamToString.java | {
"start": 1372,
"end": 1942
} | class ____ extends AbstractToString {
private static final TypePredicate STREAM = isDescendantOf("java.util.stream.Stream");
@Inject
StreamToString(ErrorProneFlags flags) {
super(flags);
}
@Override
protected TypePredicate typePredicate() {
return STREAM;
}
@Override
protected Optional<Fix> implicitToStringFix(ExpressionTree tree, VisitorState state) {
return Optional.empty();
}
@Override
protected Optional<Fix> toStringFix(Tree parent, ExpressionTree tree, VisitorState state) {
return Optional.empty();
}
}
| StreamToString |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/resource/OAuth2ResourceServerConfigurer.java | {
"start": 24501,
"end": 25452
} | class ____ {
private Consumer<OAuth2ProtectedResourceMetadata.Builder> protectedResourceMetadataCustomizer;
private ProtectedResourceMetadataConfigurer() {
}
/**
* Sets the {@code Consumer} providing access to the
* {@link OAuth2ProtectedResourceMetadata.Builder} allowing the ability to
* customize the claims of the Resource Server's configuration.
* @param protectedResourceMetadataCustomizer the {@code Consumer} providing
* access to the {@link OAuth2ProtectedResourceMetadata.Builder}
* @return the {@link ProtectedResourceMetadataConfigurer} for further
* configuration
*/
public ProtectedResourceMetadataConfigurer protectedResourceMetadataCustomizer(
Consumer<OAuth2ProtectedResourceMetadata.Builder> protectedResourceMetadataCustomizer) {
this.protectedResourceMetadataCustomizer = protectedResourceMetadataCustomizer;
return this;
}
}
private static final | ProtectedResourceMetadataConfigurer |
java | apache__maven | compat/maven-model-builder/src/main/java/org/apache/maven/model/interpolation/ObjectBasedValueSource.java | {
"start": 2026,
"end": 3461
} | class ____ extends AbstractValueSource {
private final Object root;
/**
* Construct a new value source, using the supplied object as the root from
* which to start, and using expressions split at the dot ('.') to navigate
* the object graph beneath this root.
* @param root the root of the graph.
*/
public ObjectBasedValueSource(Object root) {
super(true);
this.root = root;
}
/**
* <p>Split the expression into parts, tokenized on the dot ('.') character. Then,
* starting at the root object contained in this value source, apply each part
* to the object graph below this root, using either 'getXXX()' or 'isXXX()'
* accessor types to resolve the value for each successive expression part.
* Finally, return the result of the last expression part's resolution.</p>
*
* <p><b>NOTE:</b> The object-graph navigation actually takes place via the
* {@link ReflectionValueExtractor} class.</p>
*/
@Override
public Object getValue(String expression) {
if (expression == null || expression.trim().isEmpty()) {
return null;
}
try {
return ReflectionValueExtractor.evaluate(expression, root, false);
} catch (Exception e) {
addFeedback("Failed to extract \'" + expression + "\' from: " + root, e);
}
return null;
}
}
| ObjectBasedValueSource |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/cfg/CacheProviderTest.java | {
"start": 3260,
"end": 3983
} | class ____ implements CacheProvider {
private static final long serialVersionUID = 1L;
final CustomTestSerializerCache _cache = new CustomTestSerializerCache();
@Override
public LookupCache<JavaType, ValueDeserializer<Object>> forDeserializerCache(DeserializationConfig config) {
return new SimpleLookupCache<>(16, 64);
}
@Override
public LookupCache<TypeKey, ValueSerializer<Object>> forSerializerCache(SerializationConfig config) {
return _cache;
}
@Override
public LookupCache<Object, JavaType> forTypeFactory() {
return new SimpleLookupCache<>(16, 64);
}
}
static | CustomSerCacheProvider |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/emops/Race.java | {
"start": 505,
"end": 880
} | class ____ {
@Id @GeneratedValue public Integer id;
@OrderColumn( name="index_" ) @OneToMany(cascade = CascadeType.ALL, fetch = FetchType.EAGER)
@org.hibernate.annotations.Cascade( { org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN })
public List<Competitor> competitors = new ArrayList<Competitor>();
public String name;
}
| Race |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/cascade/multilevel/MultiLevelCascadeCollectionEmbeddableTest.java | {
"start": 9550,
"end": 9924
} | class ____ {
@EmbeddedId
private SubSubEntityId id;
@ManyToOne(fetch = FetchType.LAZY)
@JoinColumn(name = "ID_NUM", referencedColumnName = "ID_NUM", insertable = false, updatable = false)
@JoinColumn(name = "IND_NUM", referencedColumnName = "IND_NUM", insertable = false, updatable = false)
private SubEntity subEntity;
}
@Embeddable
public static | SubSubEntity |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/task/TaskSchedulingAutoConfigurationTests.java | {
"start": 14597,
"end": 14874
} | class ____ {
private final List<String> threadNames;
LazyTestBean(List<String> threadNames) {
this.threadNames = threadNames;
}
@Scheduled(fixedRate = 2000)
void accumulate() {
this.threadNames.add(Thread.currentThread().getName());
}
}
static | LazyTestBean |
java | quarkusio__quarkus | extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/OidcProvider.java | {
"start": 32047,
"end": 35004
} | class ____ implements Validator {
private final Map<String, Set<String>> customClaims;
private CustomClaimsValidator(Map<String, Set<String>> customClaims) {
this.customClaims = customClaims;
}
@Override
public String validate(JwtContext jwtContext) throws MalformedClaimException {
var claims = jwtContext.getJwtClaims();
for (var requiredClaim : customClaims.entrySet()) {
String validationFailureMessage = validate(requiredClaim.getKey(), requiredClaim.getValue(), claims);
if (validationFailureMessage != null) {
if (ACR.equals(requiredClaim.getKey())) {
throwAuthenticationFailedException(validationFailureMessage, requiredClaim.getValue());
}
return validationFailureMessage;
}
}
return null;
}
private static String validate(String requiredClaimName, Set<String> requiredClaimValues, JwtClaims claims)
throws MalformedClaimException {
if (!claims.hasClaim(requiredClaimName)) {
return "claim " + requiredClaimName + " is missing";
}
if (claims.isClaimValueString(requiredClaimName)) {
if (requiredClaimValues.size() == 1) {
String actualClaimValue = claims.getStringClaimValue(requiredClaimName);
String requiredClaimValue = requiredClaimValues.iterator().next();
if (!requiredClaimValue.equals(actualClaimValue)) {
return "claim " + requiredClaimName + " does not match expected value of " + requiredClaimValues;
}
} else {
throw new MalformedClaimException("expected claim " + requiredClaimName + " must be a list of strings");
}
} else {
if (claims.isClaimValueStringList(requiredClaimName)) {
List<String> actualClaimValues = claims.getStringListClaimValue(requiredClaimName);
for (String requiredClaimValue : requiredClaimValues) {
if (!actualClaimValues.contains(requiredClaimValue)) {
return "claim " + requiredClaimName + " does not match expected value of " + requiredClaimValues;
}
}
} else {
throw new MalformedClaimException(
"expected claim " + requiredClaimName + " must be a list of strings or a string");
}
}
return null;
}
}
private static Map<String, Object> tokenMap(String token, boolean idToken) {
return Map.of(idToken ? OidcConstants.ID_TOKEN_VALUE : OidcConstants.ACCESS_TOKEN_VALUE, token);
}
private static final | CustomClaimsValidator |
java | elastic__elasticsearch | x-pack/plugin/otel-data/src/javaRestTest/java/org/elasticsearch/xpack/oteldata/otlp/OTLPMetricsIndexingRestIT.java | {
"start": 3216,
"end": 24366
} | class ____ extends ESRestTestCase {
private static final String USER = "test_admin";
private static final String PASS = "x-pack-test-password";
private static final Resource TEST_RESOURCE = Resource.create(Attributes.of(stringKey("service.name"), "elasticsearch"));
private static final InstrumentationScopeInfo TEST_SCOPE = InstrumentationScopeInfo.create("io.opentelemetry.example.metrics");
private OtlpHttpMetricExporter exporter;
private SdkMeterProvider meterProvider;
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.user(USER, PASS, "superuser", false)
.setting("xpack.security.enabled", "true")
.setting("xpack.security.autoconfiguration.enabled", "false")
.setting("xpack.license.self_generated.type", "trial")
.setting("xpack.ml.enabled", "false")
.setting("xpack.watcher.enabled", "false")
.build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
protected Settings restClientSettings() {
String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()));
return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", token).build();
}
@Before
public void beforeTest() throws Exception {
exporter = OtlpHttpMetricExporter.builder()
.setEndpoint(getClusterHosts().getFirst().toURI() + "/_otlp/v1/metrics")
.addHeader("Authorization", "ApiKey " + createApiKey())
.build();
meterProvider = SdkMeterProvider.builder()
.registerMetricReader(
PeriodicMetricReader.builder(exporter)
.setExecutor(Executors.newScheduledThreadPool(0))
.setInterval(Duration.ofNanos(Long.MAX_VALUE))
.build()
)
.build();
assertBusy(() -> assertOK(client().performRequest(new Request("GET", "_index_template/metrics-otel@template"))));
}
private static String createApiKey() throws IOException {
// Create API key with create_doc privilege for metrics-* index
Request createApiKeyRequest = new Request("POST", "/_security/api_key");
createApiKeyRequest.setJsonEntity("""
{
"name": "otel-metrics-test-key",
"role_descriptors": {
"metrics_writer": {
"index": [
{
"names": ["metrics-*"],
"privileges": ["create_doc", "auto_configure"]
}
]
}
}
}
""");
ObjectPath createApiKeyResponse = ObjectPath.createFromResponse(client().performRequest(createApiKeyRequest));
return createApiKeyResponse.evaluate("encoded");
}
@Override
public void tearDown() throws Exception {
meterProvider.close();
super.tearDown();
}
public void testIngestMetricViaMeterProvider() throws Exception {
Meter sampleMeter = meterProvider.get("io.opentelemetry.example.metrics");
long totalMemory = 42;
sampleMeter.gaugeBuilder("jvm.memory.total")
.setDescription("Reports JVM memory usage.")
.setUnit("By")
.buildWithCallback(result -> result.record(totalMemory, Attributes.empty()));
var result = meterProvider.shutdown();
assertThat(result.isSuccess(), is(true));
refreshMetricsIndices();
ObjectPath search = search("metrics-generic.otel-default");
assertThat(search.toString(), search.evaluate("hits.total.value"), equalTo(1));
var source = search.evaluate("hits.hits.0._source");
assertThat(evaluate(source, "@timestamp"), isA(String.class));
assertThat(evaluate(source, "start_timestamp"), isA(String.class));
assertThat(evaluate(source, "_metric_names_hash"), isA(String.class));
assertThat(ObjectPath.<Number>evaluate(source, "metrics.jvm\\.memory\\.total").longValue(), equalTo(totalMemory));
assertThat(evaluate(source, "unit"), equalTo("By"));
assertThat(evaluate(source, "scope.name"), equalTo("io.opentelemetry.example.metrics"));
}
public void testIngestMetricDataViaMetricExporter() throws Exception {
long now = Clock.getDefault().now();
long totalMemory = 42;
MetricData jvmMemoryMetricData = createLongGauge(TEST_RESOURCE, Attributes.empty(), "jvm.memory.total", totalMemory, "By", now);
export(List.of(jvmMemoryMetricData));
ObjectPath search = search("metrics-generic.otel-default");
assertThat(search.toString(), search.evaluate("hits.total.value"), equalTo(1));
var source = search.evaluate("hits.hits.0._source");
assertThat(Instant.parse(evaluate(source, "@timestamp")), equalTo(Instant.ofEpochMilli(TimeUnit.NANOSECONDS.toMillis(now))));
assertThat(Instant.parse(evaluate(source, "start_timestamp")), equalTo(Instant.ofEpochMilli(TimeUnit.NANOSECONDS.toMillis(now))));
assertThat(evaluate(source, "_metric_names_hash"), isA(String.class));
assertThat(ObjectPath.<Number>evaluate(source, "metrics.jvm\\.memory\\.total").longValue(), equalTo(totalMemory));
assertThat(evaluate(source, "unit"), equalTo("By"));
assertThat(evaluate(source, "resource.attributes.service\\.name"), equalTo("elasticsearch"));
assertThat(evaluate(source, "scope.name"), equalTo("io.opentelemetry.example.metrics"));
}
public void testGroupingSameGroup() throws Exception {
long now = Clock.getDefault().now();
MetricData metric1 = createDoubleGauge(TEST_RESOURCE, Attributes.empty(), "metric1", 42, "By", now);
// uses an equal but not the same resource to test grouping across resourceMetrics
MetricData metric2 = createDoubleGauge(TEST_RESOURCE.toBuilder().build(), Attributes.empty(), "metric2", 42, "By", now);
export(List.of(metric1, metric2));
ObjectPath path = ObjectPath.createFromResponse(
client().performRequest(new Request("GET", "metrics-generic.otel-default/_search"))
);
assertThat(path.toString(), path.evaluate("hits.total.value"), equalTo(1));
assertThat(path.evaluate("hits.hits.0._source.metrics"), equalTo(Map.of("metric1", 42.0, "metric2", 42.0)));
assertThat(path.evaluate("hits.hits.0._source.resource"), equalTo(Map.of("attributes", Map.of("service.name", "elasticsearch"))));
}
public void testGroupingDifferentGroup() throws Exception {
long now = Clock.getDefault().now();
export(
List.of(
createDoubleGauge(TEST_RESOURCE, Attributes.empty(), "metric1", 42, "By", now),
createDoubleGauge(TEST_RESOURCE, Attributes.empty(), "metric1", 42, "By", now + TimeUnit.MILLISECONDS.toNanos(1)),
createDoubleGauge(TEST_RESOURCE, Attributes.empty(), "metric1", 42, "", now),
createDoubleGauge(TEST_RESOURCE, Attributes.of(stringKey("foo"), "bar"), "metric1", 42, "By", now)
)
);
ObjectPath path = search("metrics-generic.otel-default");
assertThat(path.toString(), path.evaluate("hits.total.value"), equalTo(4));
}
public void testGauge() throws Exception {
long now = Clock.getDefault().now();
export(
List.of(
createDoubleGauge(TEST_RESOURCE, Attributes.empty(), "double_gauge", 42.0, "By", now),
createLongGauge(TEST_RESOURCE, Attributes.empty(), "long_gauge", 42, "By", now)
)
);
Map<String, Object> metrics = evaluate(getMapping("metrics-generic.otel-default"), "properties.metrics.properties");
assertThat(evaluate(metrics, "double_gauge.type"), equalTo("double"));
assertThat(evaluate(metrics, "double_gauge.meta.unit"), equalTo("By"));
assertThat(evaluate(metrics, "double_gauge.time_series_metric"), equalTo("gauge"));
assertThat(evaluate(metrics, "long_gauge.type"), equalTo("long"));
assertThat(evaluate(metrics, "long_gauge.meta.unit"), equalTo("By"));
assertThat(evaluate(metrics, "long_gauge.time_series_metric"), equalTo("gauge"));
}
public void testCounterTemporality() throws Exception {
long now = Clock.getDefault().now();
export(
List.of(
createCounter(TEST_RESOURCE, Attributes.empty(), "cumulative_counter", 42, "By", now, CUMULATIVE, MONOTONIC),
createCounter(TEST_RESOURCE, Attributes.empty(), "delta_counter", 42, "By", now, DELTA, MONOTONIC)
)
);
Map<String, Object> metrics = evaluate(getMapping("metrics-generic.otel-default"), "properties.metrics.properties");
assertThat(evaluate(metrics, "cumulative_counter.type"), equalTo("long"));
assertThat(evaluate(metrics, "cumulative_counter.time_series_metric"), equalTo("counter"));
assertThat(evaluate(metrics, "delta_counter.type"), equalTo("long"));
assertThat(evaluate(metrics, "delta_counter.time_series_metric"), equalTo("gauge"));
}
public void testCounterMonotonicity() throws Exception {
long now = Clock.getDefault().now();
export(
List.of(
createCounter(TEST_RESOURCE, Attributes.empty(), "up_down_counter", 42, "By", now, CUMULATIVE, NON_MONOTONIC),
createCounter(TEST_RESOURCE, Attributes.empty(), "up_down_counter_delta", 42, "By", now, DELTA, NON_MONOTONIC)
)
);
Map<String, Object> metrics = evaluate(getMapping("metrics-generic.otel-default"), "properties.metrics.properties");
assertThat(evaluate(metrics, "up_down_counter.type"), equalTo("long"));
assertThat(evaluate(metrics, "up_down_counter.time_series_metric"), equalTo("gauge"));
assertThat(evaluate(metrics, "up_down_counter_delta.type"), equalTo("long"));
assertThat(evaluate(metrics, "up_down_counter_delta.time_series_metric"), equalTo("gauge"));
}
public void testExponentialHistograms() throws Exception {
long now = Clock.getDefault().now();
export(List.of(createExponentialHistogram(now, "exponential_histogram", DELTA, Attributes.empty())));
Map<String, Object> mappings = evaluate(getMapping("metrics-generic.otel-default"), "properties.metrics.properties");
assertThat(evaluate(mappings, "exponential_histogram.type"), equalTo("histogram"));
// Get document and check values/counts array
ObjectPath search = search("metrics-generic.otel-default");
assertThat(search.toString(), search.evaluate("hits.total.value"), equalTo(1));
var source = search.evaluate("hits.hits.0._source");
assertThat(evaluate(source, "metrics.exponential_histogram.counts"), equalTo(List.of(2, 1, 10, 1, 2)));
assertThat(evaluate(source, "metrics.exponential_histogram.values"), equalTo(List.of(-3.0, -1.5, 0.0, 1.5, 3.0)));
}
public void testExponentialHistogramsAsAggregateMetricDouble() throws Exception {
long now = Clock.getDefault().now();
export(
List.of(
createExponentialHistogram(
now,
"exponential_histogram_summary",
DELTA,
Attributes.of(
AttributeKey.stringArrayKey("elasticsearch.mapping.hints"),
List.of("aggregate_metric_double", "_doc_count")
)
)
)
);
Map<String, Object> mappings = evaluate(getMapping("metrics-generic.otel-default"), "properties.metrics.properties");
assertThat(evaluate(mappings, "exponential_histogram_summary.type"), equalTo("aggregate_metric_double"));
ObjectPath search = search("metrics-generic.otel-default");
assertThat(search.toString(), search.evaluate("hits.total.value"), equalTo(1));
var source = search.evaluate("hits.hits.0._source");
assertThat(evaluate(source, "_doc_count"), equalTo(16));
assertThat(evaluate(source, "metrics.exponential_histogram_summary.value_count"), equalTo(16));
assertThat(evaluate(source, "metrics.exponential_histogram_summary.sum"), equalTo(10.0));
}
public void testHistogram() throws Exception {
long now = Clock.getDefault().now();
export(List.of(createHistogram(now, "histogram", DELTA, Attributes.empty())));
Map<String, Object> metrics = evaluate(getMapping("metrics-generic.otel-default"), "properties.metrics.properties");
assertThat(evaluate(metrics, "histogram.type"), equalTo("histogram"));
// Get document and check values/counts array
ObjectPath search = search("metrics-generic.otel-default");
assertThat(search.toString(), search.evaluate("hits.total.value"), equalTo(1));
var source = search.evaluate("hits.hits.0._source");
assertThat(evaluate(source, "metrics.histogram.counts"), equalTo(List.of(1, 2, 3, 4, 5, 6)));
List<Double> values = evaluate(source, "metrics.histogram.values");
assertThat(values, equalTo(List.of(1.0, 3.0, 5.0, 7.0, 9.0, 10.0)));
}
public void testHistogramAsAggregateMetricDouble() throws Exception {
long now = Clock.getDefault().now();
export(
List.of(
createHistogram(
now,
"histogram_summary",
DELTA,
Attributes.of(
AttributeKey.stringArrayKey("elasticsearch.mapping.hints"),
List.of("aggregate_metric_double", "_doc_count")
)
)
)
);
Map<String, Object> metrics = evaluate(getMapping("metrics-generic.otel-default"), "properties.metrics.properties");
assertThat(evaluate(metrics, "histogram_summary.type"), equalTo("aggregate_metric_double"));
ObjectPath search = search("metrics-generic.otel-default");
assertThat(search.toString(), search.evaluate("hits.total.value"), equalTo(1));
var source = search.evaluate("hits.hits.0._source");
assertThat(evaluate(source, "_doc_count"), equalTo(21));
assertThat(evaluate(source, "metrics.histogram_summary.value_count"), equalTo(21));
assertThat(evaluate(source, "metrics.histogram_summary.sum"), equalTo(10.0));
}
public void testTsidForBulkIsSame() throws Exception {
// This test is to ensure that the _tsid is the same when indexing via a bulk request or OTLP.
long now = Clock.getDefault().now();
export(
List.of(
createDoubleGauge(
TEST_RESOURCE,
Attributes.builder()
.put("string", "foo")
.put("string_array", "foo", "bar", "baz")
.put("boolean", true)
.put("long", 42L)
.put("double", 42.0)
.put("host.ip", "127.0.0.1")
.build(),
"metric",
42,
"By",
now
)
)
);
BufferedMurmur3Hasher hasher = new BufferedMurmur3Hasher(0);
hasher.addString("metric");
String metricNamesHash = Long.toHexString(hasher.digestHash().hashCode());
// Index the same metric via a bulk request
Request bulkRequest = new Request("POST", "metrics-generic.otel-default/_bulk?refresh");
bulkRequest.setJsonEntity(
"{\"create\":{}}\n"
+ """
{
"@timestamp": $time,
"start_timestamp": $time,
"data_stream": {
"type": "metrics",
"dataset": "generic.otel",
"namespace": "default"
},
"_metric_names_hash": "$metric_names_hash",
"metrics": {
"metric": 42.0
},
"attributes": {
"string": "foo",
"string_array": ["foo", "bar", "baz"],
"boolean": true,
"long": 42,
"double": 42.0,
"host.ip": "127.0.0.1"
},
"resource": {
"attributes": {
"service.name": "elasticsearch"
}
},
"scope": {
"name": "io.opentelemetry.example.metrics"
},
"unit": "By"
}
""".replace("\n", "")
.replace("$time", Long.toString(TimeUnit.NANOSECONDS.toMillis(now) + 1))
.replace("$metric_names_hash", metricNamesHash)
+ "\n"
);
assertThat(ObjectPath.createFromResponse(client().performRequest(bulkRequest)).evaluate("errors"), equalTo(false));
ObjectPath searchResponse = ObjectPath.createFromResponse(
client().performRequest(new Request("GET", "metrics-generic.otel-default/_search?docvalue_fields=_tsid"))
);
assertThat(searchResponse.evaluate("hits.total.value"), equalTo(2));
assertThat(searchResponse.evaluate("hits.hits.0.fields._tsid"), equalTo(searchResponse.evaluate("hits.hits.1.fields._tsid")));
}
private static Map<String, Object> getMapping(String target) throws IOException {
Map<String, Object> mappings = ObjectPath.createFromResponse(client().performRequest(new Request("GET", target + "/_mapping")))
.evaluate("");
assertThat(mappings, aMapWithSize(1));
Map<String, Object> mapping = evaluate(mappings.values().iterator().next(), "mappings");
assertThat(mapping, not(anEmptyMap()));
return mapping;
}
private void export(List<MetricData> metrics) throws Exception {
CompletableResultCode result = exporter.export(metrics).join(10, TimeUnit.SECONDS);
Throwable failure = result.getFailureThrowable();
if (failure instanceof Exception e) {
throw e;
} else if (failure != null) {
throw new RuntimeException("Failed to export metrics", failure);
}
assertThat(result.isSuccess(), is(true));
refreshMetricsIndices();
}
private ObjectPath search(String target) throws IOException {
return ObjectPath.createFromResponse(client().performRequest(new Request("GET", target + "/_search")));
}
private static void refreshMetricsIndices() throws IOException {
assertOK(client().performRequest(new Request("GET", "metrics-*/_refresh")));
}
private static MetricData createDoubleGauge(
Resource resource,
Attributes attributes,
String name,
double value,
String unit,
long timeEpochNanos
) {
return ImmutableMetricData.createDoubleGauge(
resource,
TEST_SCOPE,
name,
"Your description could be here.",
unit,
ImmutableGaugeData.create(List.of(ImmutableDoublePointData.create(timeEpochNanos, timeEpochNanos, attributes, value)))
);
}
private static MetricData createLongGauge(
Resource resource,
Attributes attributes,
String name,
long value,
String unit,
long timeEpochNanos
) {
return ImmutableMetricData.createLongGauge(
resource,
TEST_SCOPE,
name,
"Your description could be here.",
unit,
ImmutableGaugeData.create(List.of(ImmutableLongPointData.create(timeEpochNanos, timeEpochNanos, attributes, value)))
);
}
private static MetricData createCounter(
Resource resource,
Attributes attributes,
String name,
long value,
String unit,
long timeEpochNanos,
AggregationTemporality temporality,
Monotonicity monotonicity
) {
return ImmutableMetricData.createLongSum(
resource,
TEST_SCOPE,
name,
"Your description could be here.",
unit,
ImmutableSumData.create(
monotonicity.isMonotonic(),
temporality,
List.of(ImmutableLongPointData.create(timeEpochNanos, timeEpochNanos, attributes, value))
)
);
}
// this is just to enhance readability of the createCounter calls (avoid boolean parameter)
| OTLPMetricsIndexingRestIT |
java | apache__rocketmq | client/src/test/java/org/apache/rocketmq/client/impl/consumer/ConsumeMessagePopConcurrentlyServiceTest.java | {
"start": 2438,
"end": 10068
} | class ____ {
@Mock
private DefaultMQPushConsumerImpl defaultMQPushConsumerImpl;
@Mock
private MessageListenerConcurrently messageListener;
@Mock
private DefaultMQPushConsumer defaultMQPushConsumer;
private ConsumeMessagePopConcurrentlyService popService;
private final String defaultGroup = "defaultGroup";
private final String defaultBroker = "defaultBroker";
private final String defaultTopic = "defaultTopic";
@Before
public void init() throws Exception {
when(defaultMQPushConsumer.getConsumerGroup()).thenReturn(defaultGroup);
when(defaultMQPushConsumer.getConsumeThreadMin()).thenReturn(1);
when(defaultMQPushConsumer.getConsumeThreadMax()).thenReturn(3);
when(defaultMQPushConsumer.getConsumeMessageBatchMaxSize()).thenReturn(32);
when(defaultMQPushConsumerImpl.getDefaultMQPushConsumer()).thenReturn(defaultMQPushConsumer);
ConsumerStatsManager consumerStatsManager = mock(ConsumerStatsManager.class);
when(defaultMQPushConsumerImpl.getConsumerStatsManager()).thenReturn(consumerStatsManager);
popService = new ConsumeMessagePopConcurrentlyService(defaultMQPushConsumerImpl, messageListener);
}
@Test
public void testUpdateCorePoolSize() {
popService.updateCorePoolSize(2);
popService.incCorePoolSize();
popService.decCorePoolSize();
assertEquals(2, popService.getCorePoolSize());
}
@Test
public void testConsumeMessageDirectly() {
when(messageListener.consumeMessage(any(), any(ConsumeConcurrentlyContext.class))).thenReturn(ConsumeConcurrentlyStatus.CONSUME_SUCCESS);
ConsumeMessageDirectlyResult actual = popService.consumeMessageDirectly(createMessageExt(), defaultBroker);
assertEquals(CMResult.CR_SUCCESS, actual.getConsumeResult());
}
@Test
public void testConsumeMessageDirectlyWithCrLater() {
when(messageListener.consumeMessage(any(), any(ConsumeConcurrentlyContext.class))).thenReturn(ConsumeConcurrentlyStatus.RECONSUME_LATER);
ConsumeMessageDirectlyResult actual = popService.consumeMessageDirectly(createMessageExt(), defaultBroker);
assertEquals(CMResult.CR_LATER, actual.getConsumeResult());
}
@Test
public void testConsumeMessageDirectlyWithCrReturnNull() {
ConsumeMessageDirectlyResult actual = popService.consumeMessageDirectly(createMessageExt(), defaultBroker);
assertEquals(CMResult.CR_RETURN_NULL, actual.getConsumeResult());
}
@Test
public void testConsumeMessageDirectlyWithCrThrowException() {
when(messageListener.consumeMessage(any(), any(ConsumeConcurrentlyContext.class))).thenThrow(new RuntimeException("exception"));
ConsumeMessageDirectlyResult actual = popService.consumeMessageDirectly(createMessageExt(), defaultBroker);
assertEquals(CMResult.CR_THROW_EXCEPTION, actual.getConsumeResult());
}
@Test
public void testShutdown() throws IllegalAccessException {
popService.shutdown(3000L);
Field scheduledExecutorServiceField = FieldUtils.getDeclaredField(popService.getClass(), "scheduledExecutorService", true);
Field consumeExecutorField = FieldUtils.getDeclaredField(popService.getClass(), "consumeExecutor", true);
ScheduledExecutorService scheduledExecutorService = (ScheduledExecutorService) scheduledExecutorServiceField.get(popService);
ThreadPoolExecutor consumeExecutor = (ThreadPoolExecutor) consumeExecutorField.get(popService);
assertTrue(scheduledExecutorService.isShutdown());
assertTrue(scheduledExecutorService.isTerminated());
assertTrue(consumeExecutor.isShutdown());
assertTrue(consumeExecutor.isTerminated());
}
@Test
public void testSubmitConsumeRequest() {
assertThrows(UnsupportedOperationException.class, () -> {
List<MessageExt> msgs = mock(List.class);
ProcessQueue processQueue = mock(ProcessQueue.class);
MessageQueue messageQueue = mock(MessageQueue.class);
popService.submitConsumeRequest(msgs, processQueue, messageQueue, false);
});
}
@Test
public void testSubmitPopConsumeRequest() throws IllegalAccessException {
List<MessageExt> msgs = Collections.singletonList(createMessageExt());
PopProcessQueue processQueue = mock(PopProcessQueue.class);
MessageQueue messageQueue = mock(MessageQueue.class);
ThreadPoolExecutor consumeExecutor = mock(ThreadPoolExecutor.class);
FieldUtils.writeDeclaredField(popService, "consumeExecutor", consumeExecutor, true);
popService.submitPopConsumeRequest(msgs, processQueue, messageQueue);
verify(consumeExecutor, times(1)).submit(any(Runnable.class));
}
@Test
public void testSubmitPopConsumeRequestWithMultiMsg() throws IllegalAccessException {
List<MessageExt> msgs = Arrays.asList(createMessageExt(), createMessageExt());
PopProcessQueue processQueue = mock(PopProcessQueue.class);
MessageQueue messageQueue = mock(MessageQueue.class);
ThreadPoolExecutor consumeExecutor = mock(ThreadPoolExecutor.class);
FieldUtils.writeDeclaredField(popService, "consumeExecutor", consumeExecutor, true);
when(defaultMQPushConsumer.getConsumeMessageBatchMaxSize()).thenReturn(1);
popService.submitPopConsumeRequest(msgs, processQueue, messageQueue);
verify(consumeExecutor, times(2)).submit(any(Runnable.class));
}
@Test
public void testProcessConsumeResult() {
ConsumeConcurrentlyContext context = mock(ConsumeConcurrentlyContext.class);
ConsumeMessagePopConcurrentlyService.ConsumeRequest consumeRequest = mock(ConsumeMessagePopConcurrentlyService.ConsumeRequest.class);
when(consumeRequest.getMsgs()).thenReturn(Arrays.asList(createMessageExt(), createMessageExt()));
MessageQueue messageQueue = mock(MessageQueue.class);
when(messageQueue.getTopic()).thenReturn(defaultTopic);
when(consumeRequest.getMessageQueue()).thenReturn(messageQueue);
PopProcessQueue processQueue = mock(PopProcessQueue.class);
when(processQueue.ack()).thenReturn(0);
when(consumeRequest.getPopProcessQueue()).thenReturn(processQueue);
when(defaultMQPushConsumerImpl.getPopDelayLevel()).thenReturn(new int[]{1, 10});
popService.processConsumeResult(ConsumeConcurrentlyStatus.CONSUME_SUCCESS, context, consumeRequest);
verify(defaultMQPushConsumerImpl, times(1)).ackAsync(any(MessageExt.class), any());
}
private MessageExt createMessageExt() {
MessageExt result = new MessageExt();
result.setBody("body".getBytes(StandardCharsets.UTF_8));
result.setTopic(defaultTopic);
result.setBrokerName(defaultBroker);
result.putUserProperty("key", "value");
result.getProperties().put(MessageConst.PROPERTY_PRODUCER_GROUP, defaultGroup);
result.getProperties().put(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, "TX1");
long curTime = System.currentTimeMillis();
result.setBornTimestamp(curTime - 1000);
result.getProperties().put(MessageConst.PROPERTY_POP_CK, curTime + " " + curTime + " " + curTime + " " + curTime);
result.setKeys("keys");
SocketAddress bornHost = new InetSocketAddress("127.0.0.1", 12911);
SocketAddress storeHost = new InetSocketAddress("127.0.0.1", 10911);
result.setBornHost(bornHost);
result.setStoreHost(storeHost);
return result;
}
}
| ConsumeMessagePopConcurrentlyServiceTest |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/RegularCancellationToken.java | {
"start": 430,
"end": 704
} | class ____ implements CancellationToken {
private final AtomicBoolean cancelled = new AtomicBoolean();
@Override
public boolean isCancellationRequested() {
return cancelled.get();
}
@Override
public void cancel() {
cancelled.set(true);
}
}
| RegularCancellationToken |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/TenantUpdateTest.java | {
"start": 971,
"end": 2404
} | class ____ extends TestCase {
private String sql = "UPDATE T_USER SET FNAME = ? WHERE FID = ?";
private String expect_sql = "UPDATE T_USER" +
"\nSET FNAME = ?, tenant = 123" +
"\nWHERE FID = ?";
private WallConfig config = new WallConfig();
private WallConfig config_callback = new WallConfig();
protected void setUp() throws Exception {
config.setTenantTablePattern("*");
config.setTenantColumn("tenant");
config_callback.setTenantCallBack(new TenantTestCallBack());
}
public void testMySql() throws Exception {
WallProvider.setTenantValue(123);
MySqlWallProvider provider = new MySqlWallProvider(config);
WallCheckResult checkResult = provider.check(sql);
assertEquals(0, checkResult.getViolations().size());
String resultSql = SQLUtils.toSQLString(checkResult.getStatementList(), JdbcConstants.MYSQL);
assertEquals(expect_sql, resultSql);
}
public void testMySql2() throws Exception {
WallProvider.setTenantValue(123);
MySqlWallProvider provider = new MySqlWallProvider(config_callback);
WallCheckResult checkResult = provider.check(sql);
assertEquals(0, checkResult.getViolations().size());
String resultSql = SQLUtils.toSQLString(checkResult.getStatementList(), JdbcConstants.MYSQL);
assertEquals(expect_sql, resultSql);
}
}
| TenantUpdateTest |
java | quarkusio__quarkus | extensions/qute/deployment/src/main/java/io/quarkus/qute/deployment/MessageBundleProcessor.java | {
"start": 90201,
"end": 90448
} | class ____ for a localized file; org.acme.Foo_en -> org.acme.Foo
className = additionalClassNameSanitizer.apply(className);
return GeneratedClassGizmoAdaptor.isApplicationClass(className);
}
}
private | generated |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/jackson2/RememberMeAuthenticationTokenMixin.java | {
"start": 1206,
"end": 1608
} | class ____ need to register it with
* {@link com.fasterxml.jackson.databind.ObjectMapper} and 2 more mixin classes.
*
* <ol>
* <li>{@link SimpleGrantedAuthorityMixin}</li>
* <li>{@link UserMixin}</li>
* <li>{@link UnmodifiableSetMixin}</li>
* </ol>
*
* <pre>
* ObjectMapper mapper = new ObjectMapper();
* mapper.registerModule(new CoreJackson2Module());
* </pre>
*
* <i>Note: This | you |
java | alibaba__nacos | istio/src/main/java/com/alibaba/nacos/istio/api/ApiGenerator.java | {
"start": 894,
"end": 1304
} | interface ____<T> {
/**
* Generate data based on resource snapshot.
*
* @param pushRequest Push Request
* @return data
*/
List<T> generate(PushRequest pushRequest);
/**
* Delta generate data based on resource snapshot.
*
* @param pushRequest Push Request
* @return data
*/
List<Resource> deltaGenerate(PushRequest pushRequest);
}
| ApiGenerator |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java | {
"start": 13776,
"end": 14138
} | interface ____ {
ExpressionEvaluator.Factory build(
Source source,
ExpressionEvaluator.Factory unitsEvaluator,
ExpressionEvaluator.Factory startTimestampEvaluator,
ExpressionEvaluator.Factory endTimestampEvaluator,
ZoneId zoneId
);
}
@FunctionalInterface
public | DateDiffFactory |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/dataformat/Base64DataFormat.java | {
"start": 1457,
"end": 3497
} | class ____ extends DataFormatDefinition {
@XmlAttribute
@Metadata(defaultValue = "76", javaType = "java.lang.Integer")
private String lineLength;
@XmlAttribute
@Metadata(label = "advanced")
private String lineSeparator;
@XmlAttribute
@Metadata(label = "advanced", javaType = "java.lang.Boolean")
private String urlSafe;
public Base64DataFormat() {
super("base64");
}
protected Base64DataFormat(Base64DataFormat source) {
super(source);
this.lineLength = source.lineLength;
this.lineSeparator = source.lineSeparator;
this.urlSafe = source.urlSafe;
}
private Base64DataFormat(Builder builder) {
this();
this.lineLength = builder.lineLength;
this.lineSeparator = builder.lineSeparator;
this.urlSafe = builder.urlSafe;
}
@Override
public Base64DataFormat copyDefinition() {
return new Base64DataFormat(this);
}
public String getLineLength() {
return lineLength;
}
/**
* To specific a maximum line length for the encoded data.
* <p/>
* By default 76 is used.
*/
public void setLineLength(String lineLength) {
this.lineLength = lineLength;
}
public String getLineSeparator() {
return lineSeparator;
}
/**
* The line separators to use.
* <p/>
* Uses new line characters (CRLF) by default.
*/
public void setLineSeparator(String lineSeparator) {
this.lineSeparator = lineSeparator;
}
public String getUrlSafe() {
return urlSafe;
}
/**
* Instead of emitting '+' and '/' we emit '-' and '_' respectively. urlSafe is only applied to encode operations.
* Decoding seamlessly handles both modes. Is by default false.
*/
public void setUrlSafe(String urlSafe) {
this.urlSafe = urlSafe;
}
/**
* {@code Builder} is a specific builder for {@link Base64DataFormat}.
*/
@XmlTransient
public static | Base64DataFormat |
java | apache__camel | catalog/camel-catalog-maven/src/test/java/org/apache/camel/catalog/maven/MavenVersionManagerManualTest.java | {
"start": 1541,
"end": 5587
} | class ____ {
private static final String COMPONENTS_CATALOG = "org/apache/camel/catalog/components.properties";
@Test
public void testLoadVersion() throws Exception {
MavenVersionManager manager = new MavenVersionManager();
String current = manager.getLoadedVersion();
assertNull(current);
boolean loaded = manager.loadVersion("2.17.2");
assertTrue(loaded);
assertEquals("2.17.2", manager.getLoadedVersion());
InputStream is = manager.getResourceAsStream(COMPONENTS_CATALOG);
assertNotNull(is);
String text = CatalogHelper.loadText(is);
// should not contain Camel 2.18 components
assertFalse(text.contains("servicenow"));
// but 2.17 components such
assertTrue(text.contains("nats"));
}
@Test
public void testEndpointOptions217() {
CamelCatalog catalog = new DefaultCamelCatalog(false);
catalog.setVersionManager(new MavenVersionManager());
catalog.loadVersion("2.17.1");
assertEquals("2.17.1", catalog.getLoadedVersion());
String json = catalog.componentJSonSchema("ahc");
assertNotNull(json);
// should have loaded the 2.17.1 version
assertTrue(json.contains("\"version\": \"2.17.1\""));
// should not contain Camel 2.18 option
assertFalse(json.contains("connectionClose"));
}
@Test
public void testEndpointOptions218OrNewer() {
CamelCatalog catalog = new DefaultCamelCatalog(false);
catalog.setVersionManager(new MavenVersionManager());
catalog.loadVersion("2.18.3");
String json = catalog.componentJSonSchema("ahc");
assertNotNull(json);
// should contain the Camel 2.18 option
assertTrue(json.contains("connectionClose"));
}
@Test
public void testRuntimeProviderLoadVersion() {
CamelCatalog catalog = new DefaultCamelCatalog(false);
catalog.setVersionManager(new MavenVersionManager());
catalog.setRuntimeProvider(new DefaultRuntimeProvider());
String version = "2.18.2";
boolean loaded = catalog.loadVersion(version);
assertTrue(loaded);
loaded = catalog.loadRuntimeProviderVersion(catalog.getRuntimeProvider().getProviderGroupId(),
catalog.getRuntimeProvider().getProviderArtifactId(), version);
assertTrue(loaded);
assertEquals(version, catalog.getLoadedVersion());
assertEquals(version, catalog.getRuntimeProviderLoadedVersion());
List<String> names = catalog.findComponentNames();
assertTrue(names.contains("file"));
assertTrue(names.contains("ftp"));
assertTrue(names.contains("jms"));
}
@Test
public void testRuntimeProviderLoadVersionWithCaching() {
CamelCatalog catalog = new DefaultCamelCatalog(true);
catalog.setVersionManager(new MavenVersionManager());
catalog.setRuntimeProvider(new DefaultRuntimeProvider());
String version = "2.18.2";
boolean loaded = catalog.loadVersion(version);
assertTrue(loaded);
loaded = catalog.loadRuntimeProviderVersion(catalog.getRuntimeProvider().getProviderGroupId(),
catalog.getRuntimeProvider().getProviderArtifactId(), version);
assertTrue(loaded);
assertEquals(version, catalog.getLoadedVersion());
assertEquals(version, catalog.getRuntimeProviderLoadedVersion());
List<String> names = catalog.findComponentNames();
assertTrue(names.contains("file"));
assertTrue(names.contains("ftp"));
assertTrue(names.contains("jms"));
}
@Test
public void testLoadUnknownVersion() {
MavenVersionManager manager = new MavenVersionManager();
String current = manager.getLoadedVersion();
assertNull(current);
// version 2.99 does not exist and cannot be loaded
boolean loaded = manager.loadVersion("2.99");
assertFalse(loaded);
}
}
| MavenVersionManagerManualTest |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java | {
"start": 6715,
"end": 67277
} | class ____ extends ESTestCase {
private static final Object EXPLICIT_NULL = new Object();
private static final IndexingPressure NOOP_INDEXING_PRESSURE = new NoopIndexingPressure();
private final boolean useLegacyFormat;
private ThreadPool threadPool;
public ShardBulkInferenceActionFilterTests(boolean useLegacyFormat) {
this.useLegacyFormat = useLegacyFormat;
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return List.of(new Object[] { true }, new Object[] { false });
}
@Before
public void setupThreadPool() {
threadPool = new TestThreadPool(getTestName());
}
@After
public void tearDownThreadPool() throws Exception {
terminate(threadPool);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testFilterNoop() throws Exception {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
ShardBulkInferenceActionFilter filter = createFilter(threadPool, Map.of(), NOOP_INDEXING_PRESSURE, useLegacyFormat, inferenceStats);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
assertNull(((BulkShardRequest) request).getInferenceFieldMap());
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
BulkShardRequest request = new BulkShardRequest(
new ShardId("test", "test", 0),
WriteRequest.RefreshPolicy.NONE,
new BulkItemRequest[0]
);
request.setInferenceFieldMap(
Map.of("foo", new InferenceFieldMetadata("foo", "bar", "baz", generateRandomStringArray(5, 10, false, false), null))
);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testLicenseInvalidForInference() throws InterruptedException {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
StaticModel model = StaticModel.createRandomInstance();
var licenseState = MockLicenseState.createMock();
when(licenseState.isAllowed(InferencePlugin.INFERENCE_API_FEATURE)).thenReturn(false);
ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(model.getInferenceEntityId(), model),
NOOP_INDEXING_PRESSURE,
useLegacyFormat,
licenseState,
inferenceStats
);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
BulkShardRequest bulkShardRequest = (BulkShardRequest) request;
assertThat(bulkShardRequest.items().length, equalTo(1));
BulkItemResponse.Failure failure = bulkShardRequest.items()[0].getPrimaryResponse().getFailure();
assertNotNull(failure);
assertThat(failure.getCause(), instanceOf(ElasticsearchSecurityException.class));
assertThat(
failure.getMessage(),
containsString(org.elasticsearch.core.Strings.format("current license is non-compliant for [%s]", XPackField.INFERENCE))
);
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"obj.field1",
new InferenceFieldMetadata("obj.field1", model.getInferenceEntityId(), new String[] { "obj.field1" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[1];
items[0] = new BulkItemRequest(0, new IndexRequest("test").source("obj.field1", "Test"));
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testLicenseInvalidForEis() throws InterruptedException {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
StaticModel model = new StaticModel(
randomAlphanumericOfLength(5),
TaskType.TEXT_EMBEDDING,
ElasticInferenceService.NAME,
new TestModel.TestServiceSettings("foo", 128, SimilarityMeasure.COSINE, DenseVectorFieldMapper.ElementType.BYTE),
new TestModel.TestTaskSettings(randomInt(3)),
new TestModel.TestSecretSettings(randomAlphaOfLength(4))
);
var licenseState = MockLicenseState.createMock();
when(licenseState.isAllowed(InferencePlugin.EIS_INFERENCE_FEATURE)).thenReturn(false);
ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(model.getInferenceEntityId(), model),
NOOP_INDEXING_PRESSURE,
useLegacyFormat,
licenseState,
inferenceStats
);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
BulkShardRequest bulkShardRequest = (BulkShardRequest) request;
assertThat(bulkShardRequest.items().length, equalTo(1));
BulkItemResponse.Failure failure = bulkShardRequest.items()[0].getPrimaryResponse().getFailure();
assertNotNull(failure);
assertThat(failure.getCause(), instanceOf(ElasticsearchSecurityException.class));
assertThat(
failure.getMessage(),
containsString(org.elasticsearch.core.Strings.format("current license is non-compliant for [%s]", XPackField.INFERENCE))
);
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"obj.field1",
new InferenceFieldMetadata("obj.field1", model.getInferenceEntityId(), new String[] { "obj.field1" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[1];
items[0] = new BulkItemRequest(0, new IndexRequest("test").source("obj.field1", "Test"));
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testInferenceNotFound() throws Exception {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
StaticModel model = StaticModel.createRandomInstance();
ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(model.getInferenceEntityId(), model),
NOOP_INDEXING_PRESSURE,
useLegacyFormat,
inferenceStats
);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
BulkShardRequest bulkShardRequest = (BulkShardRequest) request;
assertNull(bulkShardRequest.getInferenceFieldMap());
for (BulkItemRequest item : bulkShardRequest.items()) {
assertNotNull(item.getPrimaryResponse());
assertTrue(item.getPrimaryResponse().isFailed());
BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure();
assertThat(failure.getStatus(), equalTo(RestStatus.NOT_FOUND));
}
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"field1",
new InferenceFieldMetadata("field1", model.getInferenceEntityId(), new String[] { "field1" }, null),
"field2",
new InferenceFieldMetadata("field2", "inference_0", new String[] { "field2" }, null),
"field3",
new InferenceFieldMetadata("field3", "inference_0", new String[] { "field3" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[10];
for (int i = 0; i < items.length; i++) {
items[i] = randomBulkItemRequest(useLegacyFormat, Map.of(), inferenceFieldMap)[0];
}
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testItemFailures() throws Exception {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
StaticModel model = StaticModel.createRandomInstance(TaskType.SPARSE_EMBEDDING);
ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(model.getInferenceEntityId(), model),
NOOP_INDEXING_PRESSURE,
useLegacyFormat,
inferenceStats
);
model.putResult("I am a failure", new ChunkedInferenceError(new IllegalArgumentException("boom")));
model.putResult("I am a success", randomChunkedInferenceEmbedding(model, List.of("I am a success")));
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
BulkShardRequest bulkShardRequest = (BulkShardRequest) request;
assertNull(bulkShardRequest.getInferenceFieldMap());
assertThat(bulkShardRequest.items().length, equalTo(3));
// item 0 is a failure
assertNotNull(bulkShardRequest.items()[0].getPrimaryResponse());
assertTrue(bulkShardRequest.items()[0].getPrimaryResponse().isFailed());
BulkItemResponse.Failure failure = bulkShardRequest.items()[0].getPrimaryResponse().getFailure();
assertThat(failure.getCause().getMessage(), containsString("Exception when running inference"));
assertThat(failure.getCause().getCause().getMessage(), containsString("boom"));
assertThat(failure.getStatus(), is(RestStatus.BAD_REQUEST));
// item 1 is a success
assertNull(bulkShardRequest.items()[1].getPrimaryResponse());
IndexRequest actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[1].request());
assertThat(
XContentMapValues.extractValue(useLegacyFormat ? "field1.text" : "field1", actualRequest.sourceAsMap()),
equalTo("I am a success")
);
if (useLegacyFormat == false) {
assertNotNull(
XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME + ".field1", actualRequest.sourceAsMap())
);
}
// item 2 is a failure
assertNotNull(bulkShardRequest.items()[2].getPrimaryResponse());
assertTrue(bulkShardRequest.items()[2].getPrimaryResponse().isFailed());
failure = bulkShardRequest.items()[2].getPrimaryResponse().getFailure();
assertThat(failure.getCause().getMessage(), containsString("Exception when running inference"));
assertThat(failure.getCause().getCause().getMessage(), containsString("boom"));
assertThat(failure.getStatus(), is(RestStatus.BAD_REQUEST));
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"field1",
new InferenceFieldMetadata("field1", model.getInferenceEntityId(), new String[] { "field1" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[3];
items[0] = new BulkItemRequest(0, new IndexRequest("index").source("field1", "I am a failure"));
items[1] = new BulkItemRequest(1, new IndexRequest("index").source("field1", "I am a success"));
items[2] = new BulkItemRequest(2, new IndexRequest("index").source("field1", "I am a failure"));
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
AtomicInteger success = new AtomicInteger(0);
AtomicInteger failed = new AtomicInteger(0);
verify(inferenceStats.requestCount(), atMost(3)).incrementBy(anyLong(), assertArg(attributes -> {
var statusCode = attributes.get("status_code");
if (statusCode == null) {
failed.incrementAndGet();
assertThat(attributes.get("error.type"), is("IllegalArgumentException"));
} else {
success.incrementAndGet();
assertThat(statusCode, is(200));
}
assertThat(attributes.get("task_type"), is(model.getTaskType().toString()));
assertThat(attributes.get("service"), is(model.getConfigurations().getService()));
assertThat(attributes.get("inference_source"), is("semantic_text_bulk"));
}));
assertThat(success.get(), equalTo(1));
assertThat(failed.get(), equalTo(2));
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testExplicitNull() throws Exception {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
StaticModel model = StaticModel.createRandomInstance(TaskType.SPARSE_EMBEDDING);
model.putResult("I am a failure", new ChunkedInferenceError(new IllegalArgumentException("boom")));
model.putResult("I am a success", randomChunkedInferenceEmbedding(model, List.of("I am a success")));
ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(model.getInferenceEntityId(), model),
NOOP_INDEXING_PRESSURE,
useLegacyFormat,
inferenceStats
);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
BulkShardRequest bulkShardRequest = (BulkShardRequest) request;
assertNull(bulkShardRequest.getInferenceFieldMap());
assertThat(bulkShardRequest.items().length, equalTo(5));
// item 0
assertNull(bulkShardRequest.items()[0].getPrimaryResponse());
IndexRequest actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[0].request());
assertThat(XContentMapValues.extractValue("obj.field1", actualRequest.sourceAsMap(), EXPLICIT_NULL), is(EXPLICIT_NULL));
assertNull(XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME, actualRequest.sourceAsMap(), EXPLICIT_NULL));
// item 1 is a success
assertNull(bulkShardRequest.items()[1].getPrimaryResponse());
actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[1].request());
assertInferenceResults(useLegacyFormat, actualRequest, "obj.field1", "I am a success", 1);
// item 2 is a failure
assertNotNull(bulkShardRequest.items()[2].getPrimaryResponse());
assertTrue(bulkShardRequest.items()[2].getPrimaryResponse().isFailed());
var failure = bulkShardRequest.items()[2].getPrimaryResponse().getFailure();
assertThat(failure.getCause().getCause().getMessage(), containsString("boom"));
// item 3
assertNull(bulkShardRequest.items()[3].getPrimaryResponse());
actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[3].request());
assertInferenceResults(useLegacyFormat, actualRequest, "obj.field1", EXPLICIT_NULL, null);
// item 4
assertNull(bulkShardRequest.items()[4].getPrimaryResponse());
actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[4].request());
assertNull(XContentMapValues.extractValue("obj.field1", actualRequest.sourceAsMap(), EXPLICIT_NULL));
assertNull(XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME, actualRequest.sourceAsMap(), EXPLICIT_NULL));
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"obj.field1",
new InferenceFieldMetadata("obj.field1", model.getInferenceEntityId(), new String[] { "obj.field1" }, null)
);
Map<String, Object> sourceWithNull = new HashMap<>();
sourceWithNull.put("field1", null);
BulkItemRequest[] items = new BulkItemRequest[5];
items[0] = new BulkItemRequest(0, new IndexRequest("index").source(Map.of("obj", sourceWithNull)));
items[1] = new BulkItemRequest(1, new IndexRequest("index").source("obj.field1", "I am a success"));
items[2] = new BulkItemRequest(2, new IndexRequest("index").source("obj.field1", "I am a failure"));
items[3] = new BulkItemRequest(3, new UpdateRequest().doc(new IndexRequest("index").source(Map.of("obj", sourceWithNull))));
items[4] = new BulkItemRequest(4, new UpdateRequest().doc(new IndexRequest("index").source(Map.of("field2", "value"))));
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testHandleEmptyInput() throws Exception {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
StaticModel model = StaticModel.createRandomInstance();
ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(model.getInferenceEntityId(), model),
NOOP_INDEXING_PRESSURE,
useLegacyFormat,
inferenceStats
);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
BulkShardRequest bulkShardRequest = (BulkShardRequest) request;
assertNull(bulkShardRequest.getInferenceFieldMap());
assertThat(bulkShardRequest.items().length, equalTo(3));
// Create with Empty string
assertNull(bulkShardRequest.items()[0].getPrimaryResponse());
IndexRequest actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[0].request());
assertInferenceResults(useLegacyFormat, actualRequest, "semantic_text_field", "", 0);
// Create with whitespace only
assertNull(bulkShardRequest.items()[1].getPrimaryResponse());
actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[1].request());
assertInferenceResults(useLegacyFormat, actualRequest, "semantic_text_field", " ", 0);
// Update with multiple Whitespaces
assertNull(bulkShardRequest.items()[2].getPrimaryResponse());
actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[2].request());
assertInferenceResults(useLegacyFormat, actualRequest, "semantic_text_field", " ", 0);
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"semantic_text_field",
new InferenceFieldMetadata("semantic_text_field", model.getInferenceEntityId(), new String[] { "semantic_text_field" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[3];
items[0] = new BulkItemRequest(0, new IndexRequest("index").source(Map.of("semantic_text_field", "")));
items[1] = new BulkItemRequest(1, new IndexRequest("index").source(Map.of("semantic_text_field", " ")));
items[2] = new BulkItemRequest(2, new UpdateRequest().doc(new IndexRequest("index").source(Map.of("semantic_text_field", " "))));
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testManyRandomDocs() throws Exception {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
Map<String, StaticModel> inferenceModelMap = new HashMap<>();
int numModels = randomIntBetween(1, 3);
for (int i = 0; i < numModels; i++) {
StaticModel model = StaticModel.createRandomInstance();
inferenceModelMap.put(model.getInferenceEntityId(), model);
}
int numInferenceFields = randomIntBetween(1, 3);
Map<String, InferenceFieldMetadata> inferenceFieldMap = new HashMap<>();
for (int i = 0; i < numInferenceFields; i++) {
String field = randomAlphaOfLengthBetween(5, 10);
String inferenceId = randomFrom(inferenceModelMap.keySet());
inferenceFieldMap.put(field, new InferenceFieldMetadata(field, inferenceId, new String[] { field }, null));
}
int numRequests = atLeast(100);
BulkItemRequest[] originalRequests = new BulkItemRequest[numRequests];
BulkItemRequest[] modifiedRequests = new BulkItemRequest[numRequests];
for (int id = 0; id < numRequests; id++) {
BulkItemRequest[] res = randomBulkItemRequest(useLegacyFormat, inferenceModelMap, inferenceFieldMap);
originalRequests[id] = res[0];
modifiedRequests[id] = res[1];
}
ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
inferenceModelMap,
NOOP_INDEXING_PRESSURE,
useLegacyFormat,
inferenceStats
);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
assertThat(request, instanceOf(BulkShardRequest.class));
BulkShardRequest bulkShardRequest = (BulkShardRequest) request;
assertNull(bulkShardRequest.getInferenceFieldMap());
BulkItemRequest[] items = bulkShardRequest.items();
assertThat(items.length, equalTo(originalRequests.length));
for (int id = 0; id < items.length; id++) {
IndexRequest actualRequest = getIndexRequestOrNull(items[id].request());
IndexRequest expectedRequest = getIndexRequestOrNull(modifiedRequests[id].request());
try {
assertToXContentEquivalent(expectedRequest.source(), actualRequest.source(), expectedRequest.getContentType());
} catch (Exception exc) {
throw new IllegalStateException(exc);
}
}
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
BulkShardRequest original = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, originalRequests);
original.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, original, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testIndexingPressure() throws Exception {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
final InstrumentedIndexingPressure indexingPressure = new InstrumentedIndexingPressure(Settings.EMPTY);
final StaticModel sparseModel = StaticModel.createRandomInstance(TaskType.SPARSE_EMBEDDING);
final StaticModel denseModel = StaticModel.createRandomInstance(TaskType.TEXT_EMBEDDING);
final ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(sparseModel.getInferenceEntityId(), sparseModel, denseModel.getInferenceEntityId(), denseModel),
indexingPressure,
useLegacyFormat,
inferenceStats
);
XContentBuilder doc0Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "a test value");
XContentBuilder doc1Source = IndexSource.getXContentBuilder(XContentType.JSON, "dense_field", "another test value");
XContentBuilder doc2Source = IndexSource.getXContentBuilder(
XContentType.JSON,
"sparse_field",
"a test value",
"dense_field",
"another test value"
);
XContentBuilder doc3Source = IndexSource.getXContentBuilder(
XContentType.JSON,
"dense_field",
List.of("value one", " ", "value two")
);
XContentBuilder doc4Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", " ");
XContentBuilder doc5Source = XContentFactory.contentBuilder(XContentType.JSON);
{
doc5Source.startObject();
if (useLegacyFormat == false) {
doc5Source.field("sparse_field", "a test value");
}
addSemanticTextInferenceResults(
useLegacyFormat,
doc5Source,
List.of(randomSemanticText(useLegacyFormat, "sparse_field", sparseModel, null, List.of("a test value"), XContentType.JSON))
);
doc5Source.endObject();
}
XContentBuilder doc0UpdateSource = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "an updated value");
XContentBuilder doc1UpdateSource = IndexSource.getXContentBuilder(XContentType.JSON, "dense_field", null);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain actionFilterChain = (task, action, request, listener) -> {
try {
BulkShardRequest bulkShardRequest = (BulkShardRequest) request;
assertNull(bulkShardRequest.getInferenceFieldMap());
assertThat(bulkShardRequest.items().length, equalTo(10));
for (BulkItemRequest item : bulkShardRequest.items()) {
assertNull(item.getPrimaryResponse());
}
IndexingPressure.Coordinating coordinatingIndexingPressure = indexingPressure.getCoordinating();
assertThat(coordinatingIndexingPressure, notNullValue());
verify(coordinatingIndexingPressure).increment(1, length(doc0Source));
verify(coordinatingIndexingPressure).increment(1, length(doc1Source));
verify(coordinatingIndexingPressure).increment(1, length(doc2Source));
verify(coordinatingIndexingPressure).increment(1, length(doc3Source));
verify(coordinatingIndexingPressure).increment(1, length(doc4Source));
verify(coordinatingIndexingPressure).increment(1, length(doc0UpdateSource));
if (useLegacyFormat == false) {
verify(coordinatingIndexingPressure).increment(1, length(doc1UpdateSource));
}
verify(coordinatingIndexingPressure, times(useLegacyFormat ? 6 : 7)).increment(eq(0), longThat(l -> l > 0));
// Verify that the only times that increment is called are the times verified above
verify(coordinatingIndexingPressure, times(useLegacyFormat ? 12 : 14)).increment(anyInt(), anyLong());
// Verify that the coordinating indexing pressure is maintained through downstream action filters
verify(coordinatingIndexingPressure, never()).close();
// Call the listener once the request is successfully processed, like is done in the production code path
listener.onResponse(null);
} finally {
chainExecuted.countDown();
}
};
ActionListener actionListener = mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"sparse_field",
new InferenceFieldMetadata("sparse_field", sparseModel.getInferenceEntityId(), new String[] { "sparse_field" }, null),
"dense_field",
new InferenceFieldMetadata("dense_field", denseModel.getInferenceEntityId(), new String[] { "dense_field" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[10];
items[0] = new BulkItemRequest(0, new IndexRequest("index").id("doc_0").source(doc0Source));
items[1] = new BulkItemRequest(1, new IndexRequest("index").id("doc_1").source(doc1Source));
items[2] = new BulkItemRequest(2, new IndexRequest("index").id("doc_2").source(doc2Source));
items[3] = new BulkItemRequest(3, new IndexRequest("index").id("doc_3").source(doc3Source));
items[4] = new BulkItemRequest(4, new IndexRequest("index").id("doc_4").source(doc4Source));
items[5] = new BulkItemRequest(5, new IndexRequest("index").id("doc_5").source(doc5Source));
items[6] = new BulkItemRequest(6, new IndexRequest("index").id("doc_6").source("non_inference_field", "yet another test value"));
items[7] = new BulkItemRequest(7, new UpdateRequest().doc(new IndexRequest("index").id("doc_0").source(doc0UpdateSource)));
items[8] = new BulkItemRequest(8, new UpdateRequest().doc(new IndexRequest("index").id("doc_1").source(doc1UpdateSource)));
items[9] = new BulkItemRequest(
9,
new UpdateRequest().doc(new IndexRequest("index").id("doc_3").source("non_inference_field", "yet another updated value"))
);
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
IndexingPressure.Coordinating coordinatingIndexingPressure = indexingPressure.getCoordinating();
assertThat(coordinatingIndexingPressure, notNullValue());
verify(coordinatingIndexingPressure).close();
}
@SuppressWarnings("unchecked")
public void testIndexingPressureTripsOnInferenceRequestGeneration() throws Exception {
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
final InstrumentedIndexingPressure indexingPressure = new InstrumentedIndexingPressure(
Settings.builder().put(MAX_COORDINATING_BYTES.getKey(), "1b").build()
);
final StaticModel sparseModel = StaticModel.createRandomInstance(TaskType.SPARSE_EMBEDDING);
final ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(sparseModel.getInferenceEntityId(), sparseModel),
indexingPressure,
useLegacyFormat,
inferenceStats
);
XContentBuilder doc1Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain<BulkShardRequest, BulkShardResponse> actionFilterChain = (task, action, request, listener) -> {
try {
assertNull(request.getInferenceFieldMap());
assertThat(request.items().length, equalTo(3));
assertNull(request.items()[0].getPrimaryResponse());
assertNull(request.items()[2].getPrimaryResponse());
BulkItemRequest doc1Request = request.items()[1];
BulkItemResponse doc1Response = doc1Request.getPrimaryResponse();
assertNotNull(doc1Response);
assertTrue(doc1Response.isFailed());
BulkItemResponse.Failure doc1Failure = doc1Response.getFailure();
assertThat(
doc1Failure.getCause().getMessage(),
containsString(
"Unable to insert inference results into document [doc_1]"
+ " due to memory pressure. Please retry the bulk request with fewer documents or smaller document sizes."
)
);
assertThat(doc1Failure.getCause().getCause(), instanceOf(EsRejectedExecutionException.class));
assertThat(doc1Failure.getStatus(), is(RestStatus.TOO_MANY_REQUESTS));
IndexRequest doc1IndexRequest = getIndexRequestOrNull(doc1Request.request());
assertThat(doc1IndexRequest, notNullValue());
assertThat(doc1IndexRequest.source(), equalBytes(BytesReference.bytes(doc1Source)));
IndexingPressure.Coordinating coordinatingIndexingPressure = indexingPressure.getCoordinating();
assertThat(coordinatingIndexingPressure, notNullValue());
verify(coordinatingIndexingPressure).increment(1, length(doc1Source));
verify(coordinatingIndexingPressure, times(1)).increment(anyInt(), anyLong());
// Verify that the coordinating indexing pressure is maintained through downstream action filters
verify(coordinatingIndexingPressure, never()).close();
// Call the listener once the request is successfully processed, like is done in the production code path
listener.onResponse(null);
} finally {
chainExecuted.countDown();
}
};
ActionListener<BulkShardResponse> actionListener = (ActionListener<BulkShardResponse>) mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"sparse_field",
new InferenceFieldMetadata("sparse_field", sparseModel.getInferenceEntityId(), new String[] { "sparse_field" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[3];
items[0] = new BulkItemRequest(0, new IndexRequest("index").id("doc_0").source("non_inference_field", "foo"));
items[1] = new BulkItemRequest(1, new IndexRequest("index").id("doc_1").source(doc1Source));
items[2] = new BulkItemRequest(2, new IndexRequest("index").id("doc_2").source("non_inference_field", "baz"));
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
IndexingPressure.Coordinating coordinatingIndexingPressure = indexingPressure.getCoordinating();
assertThat(coordinatingIndexingPressure, notNullValue());
verify(coordinatingIndexingPressure).close();
}
@SuppressWarnings("unchecked")
public void testIndexingPressureTripsOnInferenceResponseHandling() throws Exception {
final XContentBuilder doc1Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
final InstrumentedIndexingPressure indexingPressure = new InstrumentedIndexingPressure(
Settings.builder().put(MAX_COORDINATING_BYTES.getKey(), (length(doc1Source) + 1) + "b").build()
);
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
final StaticModel sparseModel = StaticModel.createRandomInstance(TaskType.SPARSE_EMBEDDING);
sparseModel.putResult("bar", randomChunkedInferenceEmbedding(sparseModel, List.of("bar")));
final ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(sparseModel.getInferenceEntityId(), sparseModel),
indexingPressure,
useLegacyFormat,
inferenceStats
);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain<BulkShardRequest, BulkShardResponse> actionFilterChain = (task, action, request, listener) -> {
try {
assertNull(request.getInferenceFieldMap());
assertThat(request.items().length, equalTo(3));
assertNull(request.items()[0].getPrimaryResponse());
assertNull(request.items()[2].getPrimaryResponse());
BulkItemRequest doc1Request = request.items()[1];
BulkItemResponse doc1Response = doc1Request.getPrimaryResponse();
assertNotNull(doc1Response);
assertTrue(doc1Response.isFailed());
BulkItemResponse.Failure doc1Failure = doc1Response.getFailure();
assertThat(
doc1Failure.getCause().getMessage(),
containsString(
"Unable to insert inference results into document [doc_1]"
+ " due to memory pressure. Please retry the bulk request with fewer documents or smaller document sizes."
)
);
assertThat(doc1Failure.getCause().getCause(), instanceOf(EsRejectedExecutionException.class));
assertThat(doc1Failure.getStatus(), is(RestStatus.TOO_MANY_REQUESTS));
IndexRequest doc1IndexRequest = getIndexRequestOrNull(doc1Request.request());
assertThat(doc1IndexRequest, notNullValue());
assertThat(doc1IndexRequest.source(), equalBytes(BytesReference.bytes(doc1Source)));
IndexingPressure.Coordinating coordinatingIndexingPressure = indexingPressure.getCoordinating();
assertThat(coordinatingIndexingPressure, notNullValue());
verify(coordinatingIndexingPressure).increment(1, length(doc1Source));
verify(coordinatingIndexingPressure).increment(eq(0), longThat(l -> l > 0));
verify(coordinatingIndexingPressure, times(2)).increment(anyInt(), anyLong());
// Verify that the coordinating indexing pressure is maintained through downstream action filters
verify(coordinatingIndexingPressure, never()).close();
// Call the listener once the request is successfully processed, like is done in the production code path
listener.onResponse(null);
} finally {
chainExecuted.countDown();
}
};
ActionListener<BulkShardResponse> actionListener = (ActionListener<BulkShardResponse>) mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"sparse_field",
new InferenceFieldMetadata("sparse_field", sparseModel.getInferenceEntityId(), new String[] { "sparse_field" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[3];
items[0] = new BulkItemRequest(0, new IndexRequest("index").id("doc_0").source("non_inference_field", "foo"));
items[1] = new BulkItemRequest(1, new IndexRequest("index").id("doc_1").source(doc1Source));
items[2] = new BulkItemRequest(2, new IndexRequest("index").id("doc_2").source("non_inference_field", "baz"));
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
IndexingPressure.Coordinating coordinatingIndexingPressure = indexingPressure.getCoordinating();
assertThat(coordinatingIndexingPressure, notNullValue());
verify(coordinatingIndexingPressure).close();
}
@SuppressWarnings("unchecked")
public void testIndexingPressurePartialFailure() throws Exception {
// Use different length strings so that doc 1 and doc 2 sources are different sizes
final XContentBuilder doc1Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "bar");
final XContentBuilder doc2Source = IndexSource.getXContentBuilder(XContentType.JSON, "sparse_field", "bazzz");
final StaticModel sparseModel = StaticModel.createRandomInstance(TaskType.SPARSE_EMBEDDING);
final ChunkedInferenceEmbedding barEmbedding = randomChunkedInferenceEmbedding(sparseModel, List.of("bar"));
final ChunkedInferenceEmbedding bazzzEmbedding = randomChunkedInferenceEmbedding(sparseModel, List.of("bazzz"));
sparseModel.putResult("bar", barEmbedding);
sparseModel.putResult("bazzz", bazzzEmbedding);
CheckedBiFunction<List<String>, ChunkedInference, Long, IOException> estimateInferenceResultsBytes = (inputs, inference) -> {
SemanticTextField semanticTextField = semanticTextFieldFromChunkedInferenceResults(
useLegacyFormat,
"sparse_field",
sparseModel,
null,
inputs,
inference,
XContentType.JSON
);
XContentBuilder builder = XContentFactory.jsonBuilder();
semanticTextField.toXContent(builder, EMPTY_PARAMS);
return length(builder);
};
final InstrumentedIndexingPressure indexingPressure = new InstrumentedIndexingPressure(
Settings.builder()
.put(
MAX_COORDINATING_BYTES.getKey(),
(length(doc1Source) + length(doc2Source) + estimateInferenceResultsBytes.apply(List.of("bar"), barEmbedding)
+ (estimateInferenceResultsBytes.apply(List.of("bazzz"), bazzzEmbedding) / 2)) + "b"
)
.build()
);
final InferenceStats inferenceStats = InferenceStatsTests.mockInferenceStats();
final ShardBulkInferenceActionFilter filter = createFilter(
threadPool,
Map.of(sparseModel.getInferenceEntityId(), sparseModel),
indexingPressure,
useLegacyFormat,
inferenceStats
);
CountDownLatch chainExecuted = new CountDownLatch(1);
ActionFilterChain<BulkShardRequest, BulkShardResponse> actionFilterChain = (task, action, request, listener) -> {
try {
assertNull(request.getInferenceFieldMap());
assertThat(request.items().length, equalTo(4));
assertNull(request.items()[0].getPrimaryResponse());
assertNull(request.items()[1].getPrimaryResponse());
assertNull(request.items()[3].getPrimaryResponse());
BulkItemRequest doc2Request = request.items()[2];
BulkItemResponse doc2Response = doc2Request.getPrimaryResponse();
assertNotNull(doc2Response);
assertTrue(doc2Response.isFailed());
BulkItemResponse.Failure doc2Failure = doc2Response.getFailure();
assertThat(
doc2Failure.getCause().getMessage(),
containsString(
"Unable to insert inference results into document [doc_2]"
+ " due to memory pressure. Please retry the bulk request with fewer documents or smaller document sizes."
)
);
assertThat(doc2Failure.getCause().getCause(), instanceOf(EsRejectedExecutionException.class));
assertThat(doc2Failure.getStatus(), is(RestStatus.TOO_MANY_REQUESTS));
IndexRequest doc2IndexRequest = getIndexRequestOrNull(doc2Request.request());
assertThat(doc2IndexRequest, notNullValue());
assertThat(doc2IndexRequest.source(), equalBytes(BytesReference.bytes(doc2Source)));
IndexingPressure.Coordinating coordinatingIndexingPressure = indexingPressure.getCoordinating();
assertThat(coordinatingIndexingPressure, notNullValue());
verify(coordinatingIndexingPressure).increment(1, length(doc1Source));
verify(coordinatingIndexingPressure).increment(1, length(doc2Source));
verify(coordinatingIndexingPressure, times(2)).increment(eq(0), longThat(l -> l > 0));
verify(coordinatingIndexingPressure, times(4)).increment(anyInt(), anyLong());
// Verify that the coordinating indexing pressure is maintained through downstream action filters
verify(coordinatingIndexingPressure, never()).close();
// Call the listener once the request is successfully processed, like is done in the production code path
listener.onResponse(null);
} finally {
chainExecuted.countDown();
}
};
ActionListener<BulkShardResponse> actionListener = (ActionListener<BulkShardResponse>) mock(ActionListener.class);
Task task = mock(Task.class);
Map<String, InferenceFieldMetadata> inferenceFieldMap = Map.of(
"sparse_field",
new InferenceFieldMetadata("sparse_field", sparseModel.getInferenceEntityId(), new String[] { "sparse_field" }, null)
);
BulkItemRequest[] items = new BulkItemRequest[4];
items[0] = new BulkItemRequest(0, new IndexRequest("index").id("doc_0").source("non_inference_field", "foo"));
items[1] = new BulkItemRequest(1, new IndexRequest("index").id("doc_1").source(doc1Source));
items[2] = new BulkItemRequest(2, new IndexRequest("index").id("doc_2").source(doc2Source));
items[3] = new BulkItemRequest(3, new IndexRequest("index").id("doc_3").source("non_inference_field", "baz"));
BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items);
request.setInferenceFieldMap(inferenceFieldMap);
filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain);
awaitLatch(chainExecuted, 10, TimeUnit.SECONDS);
IndexingPressure.Coordinating coordinatingIndexingPressure = indexingPressure.getCoordinating();
assertThat(coordinatingIndexingPressure, notNullValue());
verify(coordinatingIndexingPressure).close();
}
private static ShardBulkInferenceActionFilter createFilter(
ThreadPool threadPool,
Map<String, StaticModel> modelMap,
IndexingPressure indexingPressure,
boolean useLegacyFormat,
InferenceStats inferenceStats
) {
MockLicenseState licenseState = MockLicenseState.createMock();
when(licenseState.isAllowed(InferencePlugin.INFERENCE_API_FEATURE)).thenReturn(true);
return createFilter(threadPool, modelMap, indexingPressure, useLegacyFormat, licenseState, inferenceStats);
}
@SuppressWarnings("unchecked")
private static ShardBulkInferenceActionFilter createFilter(
ThreadPool threadPool,
Map<String, StaticModel> modelMap,
IndexingPressure indexingPressure,
boolean useLegacyFormat,
MockLicenseState licenseState,
InferenceStats inferenceStats
) {
ModelRegistry modelRegistry = mock(ModelRegistry.class);
Answer<?> unparsedModelAnswer = invocationOnMock -> {
String id = (String) invocationOnMock.getArguments()[0];
ActionListener<UnparsedModel> listener = (ActionListener<UnparsedModel>) invocationOnMock.getArguments()[1];
var model = modelMap.get(id);
if (model != null) {
listener.onResponse(
new UnparsedModel(
model.getInferenceEntityId(),
model.getTaskType(),
model.getServiceSettings().model(),
XContentHelper.convertToMap(JsonXContent.jsonXContent, Strings.toString(model.getTaskSettings()), false),
XContentHelper.convertToMap(JsonXContent.jsonXContent, Strings.toString(model.getSecretSettings()), false)
)
);
} else {
listener.onFailure(new ResourceNotFoundException("model id [{}] not found", id));
}
return null;
};
doAnswer(unparsedModelAnswer).when(modelRegistry).getModelWithSecrets(any(), any());
Answer<MinimalServiceSettings> minimalServiceSettingsAnswer = invocationOnMock -> {
String inferenceId = (String) invocationOnMock.getArguments()[0];
var model = modelMap.get(inferenceId);
if (model == null) {
throw new ResourceNotFoundException("model id [{}] not found", inferenceId);
}
return new MinimalServiceSettings(model);
};
doAnswer(minimalServiceSettingsAnswer).when(modelRegistry).getMinimalServiceSettings(any());
InferenceService inferenceService = mock(InferenceService.class);
Answer<?> chunkedInferAnswer = invocationOnMock -> {
StaticModel model = (StaticModel) invocationOnMock.getArguments()[0];
List<ChunkInferenceInput> inputs = (List<ChunkInferenceInput>) invocationOnMock.getArguments()[2];
ActionListener<List<ChunkedInference>> listener = (ActionListener<List<ChunkedInference>>) invocationOnMock.getArguments()[6];
Runnable runnable = () -> {
List<ChunkedInference> results = new ArrayList<>();
for (ChunkInferenceInput input : inputs) {
results.add(model.getResults(input.inputText()));
}
listener.onResponse(results);
};
if (randomBoolean()) {
try {
threadPool.generic().execute(runnable);
} catch (Exception exc) {
listener.onFailure(exc);
}
} else {
runnable.run();
}
return null;
};
doAnswer(chunkedInferAnswer).when(inferenceService).chunkedInfer(any(), any(), any(), any(), any(), any(), any());
Answer<Model> modelAnswer = invocationOnMock -> {
String inferenceId = (String) invocationOnMock.getArguments()[0];
return modelMap.get(inferenceId);
};
doAnswer(modelAnswer).when(inferenceService).parsePersistedConfigWithSecrets(any(), any(), any(), any());
InferenceServiceRegistry inferenceServiceRegistry = mock(InferenceServiceRegistry.class);
when(inferenceServiceRegistry.getService(any())).thenReturn(Optional.of(inferenceService));
return new ShardBulkInferenceActionFilter(
createClusterService(useLegacyFormat),
inferenceServiceRegistry,
modelRegistry,
licenseState,
indexingPressure,
inferenceStats
);
}
private static ClusterService createClusterService(boolean useLegacyFormat) {
IndexMetadata indexMetadata = mock(IndexMetadata.class);
var indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), useLegacyFormat)
.build();
when(indexMetadata.getSettings()).thenReturn(indexSettings);
ProjectMetadata project = spy(ProjectMetadata.builder(Metadata.DEFAULT_PROJECT_ID).build());
when(project.index(anyString())).thenReturn(indexMetadata);
Metadata metadata = mock(Metadata.class);
when(metadata.getProject()).thenReturn(project);
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metadata(metadata).build();
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(clusterState);
long batchSizeInBytes = randomLongBetween(1, ByteSizeValue.ofKb(1).getBytes());
Settings settings = Settings.builder().put(INDICES_INFERENCE_BATCH_SIZE.getKey(), ByteSizeValue.ofBytes(batchSizeInBytes)).build();
when(clusterService.getSettings()).thenReturn(settings);
when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, Set.of(INDICES_INFERENCE_BATCH_SIZE)));
return clusterService;
}
private static BulkItemRequest[] randomBulkItemRequest(
boolean useLegacyFormat,
Map<String, StaticModel> modelMap,
Map<String, InferenceFieldMetadata> fieldInferenceMap
) throws IOException {
Map<String, Object> docMap = new LinkedHashMap<>();
Map<String, Object> expectedDocMap = new LinkedHashMap<>();
// force JSON to avoid double/float conversions
XContentType requestContentType = XContentType.JSON;
Map<String, Object> inferenceMetadataFields = new HashMap<>();
for (var entry : fieldInferenceMap.values()) {
String field = entry.getName();
var model = modelMap.get(entry.getInferenceId());
Object inputObject = randomSemanticTextInput();
String inputText = inputObject.toString();
docMap.put(field, inputObject);
expectedDocMap.put(field, useLegacyFormat ? inputText : inputObject);
if (model == null) {
// ignore results, the doc should fail with a resource not found exception
continue;
}
SemanticTextField semanticTextField;
// The model is not field aware and that is why we are skipping the embedding generation process for existing values.
// This prevents a situation where embeddings in the expected docMap do not match those in the model, which could happen if
// embeddings were overwritten.
if (model.hasResult(inputText)) {
var results = model.getResults(inputText);
semanticTextField = semanticTextFieldFromChunkedInferenceResults(
useLegacyFormat,
field,
model,
null,
List.of(inputText),
results,
requestContentType
);
} else {
Map<String, List<String>> inputTextMap = Map.of(field, List.of(inputText));
semanticTextField = randomSemanticText(useLegacyFormat, field, model, null, List.of(inputText), requestContentType);
model.putResult(inputText, toChunkedResult(useLegacyFormat, inputTextMap, semanticTextField));
}
if (useLegacyFormat) {
expectedDocMap.put(field, semanticTextField);
} else {
inferenceMetadataFields.put(field, semanticTextField);
}
}
if (useLegacyFormat == false) {
expectedDocMap.put(InferenceMetadataFieldsMapper.NAME, inferenceMetadataFields);
}
int requestId = randomIntBetween(0, Integer.MAX_VALUE);
return new BulkItemRequest[] {
new BulkItemRequest(requestId, new IndexRequest("index").source(docMap, requestContentType)),
new BulkItemRequest(requestId, new IndexRequest("index").source(expectedDocMap, requestContentType)) };
}
private static long length(XContentBuilder builder) {
return BytesReference.bytes(builder).length();
}
@SuppressWarnings({ "unchecked" })
private static void assertInferenceResults(
boolean useLegacyFormat,
IndexRequest request,
String fieldName,
Object expectedOriginalValue,
Integer expectedChunkCount
) {
final Map<String, Object> requestMap = request.sourceAsMap();
if (useLegacyFormat) {
assertThat(
XContentMapValues.extractValue(getOriginalTextFieldName(fieldName), requestMap, EXPLICIT_NULL),
equalTo(expectedOriginalValue)
);
List<Object> chunks = (List<Object>) XContentMapValues.extractValue(getChunksFieldName(fieldName), requestMap);
if (expectedChunkCount == null) {
assertNull(chunks);
} else {
assertNotNull(chunks);
assertThat(chunks.size(), equalTo(expectedChunkCount));
}
} else {
assertThat(XContentMapValues.extractValue(fieldName, requestMap, EXPLICIT_NULL), equalTo(expectedOriginalValue));
Map<String, Object> inferenceMetadataFields = (Map<String, Object>) XContentMapValues.extractValue(
InferenceMetadataFieldsMapper.NAME,
requestMap,
EXPLICIT_NULL
);
assertNotNull(inferenceMetadataFields);
// When using the inference metadata fields format, chunks are mapped by source field. We handle clearing inference results for
// a field by emitting an empty chunk list for it. This is done to prevent the clear operation from clearing inference results
// for other source fields.
List<Object> chunks = (List<Object>) XContentMapValues.extractValue(
getChunksFieldName(fieldName) + "." + fieldName,
inferenceMetadataFields,
EXPLICIT_NULL
);
// When using the new format, the chunks field should always exist
int expectedSize = expectedChunkCount == null ? 0 : expectedChunkCount;
assertNotNull(chunks);
assertThat(chunks.size(), equalTo(expectedSize));
}
}
private static | ShardBulkInferenceActionFilterTests |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-common/deployment/src/main/java/io/quarkus/resteasy/reactive/common/deployment/SerializersUtil.java | {
"start": 1056,
"end": 5262
} | class ____ {
public static void setupSerializers(ResteasyReactiveCommonRecorder recorder,
BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
List<MessageBodyReaderBuildItem> messageBodyReaderBuildItems,
List<MessageBodyWriterBuildItem> messageBodyWriterBuildItems,
List<MessageBodyReaderOverrideBuildItem> messageBodyReaderOverrideBuildItems,
List<MessageBodyWriterOverrideBuildItem> messageBodyWriterOverrideBuildItems,
BeanContainerBuildItem beanContainerBuildItem,
ApplicationResultBuildItem applicationResultBuildItem,
Serialisers serialisers, RuntimeType runtimeType) {
Map<String, MessageBodyReaderWriterOverrideData> writerOverrides = new HashMap<>();
for (MessageBodyWriterOverrideBuildItem writerOverride : messageBodyWriterOverrideBuildItems) {
writerOverrides.put(writerOverride.getClassName(), writerOverride.getOverrideData());
}
for (MessageBodyWriterBuildItem additionalWriter : RuntimeTypeItem.filter(messageBodyWriterBuildItems,
runtimeType)) {
ResourceWriter writer = new ResourceWriter();
String writerClassName = additionalWriter.getClassName();
MessageBodyReaderWriterOverrideData overrideData = writerOverrides.get(writerClassName);
if (overrideData != null) {
writer.setBuiltin(overrideData.isBuiltIn());
} else {
writer.setBuiltin(additionalWriter.isBuiltin());
}
writer.setFactory(FactoryUtils.factory(writerClassName,
applicationResultBuildItem.getResult().getSingletonClasses(), recorder,
beanContainerBuildItem));
writer.setConstraint(additionalWriter.getRuntimeType());
if (!additionalWriter.getMediaTypeStrings().isEmpty()) {
writer.setMediaTypeStrings(additionalWriter.getMediaTypeStrings());
}
if (overrideData != null) {
writer.setPriority(overrideData.getPriority());
} else {
writer.setPriority(additionalWriter.getPriority());
}
recorder.registerWriter(serialisers, additionalWriter.getHandledClassName(), writer);
reflectiveClass.produce(
ReflectiveClassBuildItem.builder(writerClassName).build());
}
Map<String, MessageBodyReaderWriterOverrideData> readerOverrides = new HashMap<>();
for (MessageBodyReaderOverrideBuildItem readerOverride : messageBodyReaderOverrideBuildItems) {
readerOverrides.put(readerOverride.getClassName(), readerOverride.getOverrideData());
}
for (MessageBodyReaderBuildItem additionalReader : RuntimeTypeItem.filter(messageBodyReaderBuildItems,
runtimeType)) {
ResourceReader reader = new ResourceReader();
String readerClassName = additionalReader.getClassName();
MessageBodyReaderWriterOverrideData overrideData = readerOverrides.get(readerClassName);
if (overrideData != null) {
reader.setBuiltin(overrideData.isBuiltIn());
} else {
reader.setBuiltin(additionalReader.isBuiltin());
}
reader.setFactory(FactoryUtils.factory(readerClassName,
applicationResultBuildItem.getResult().getSingletonClasses(), recorder,
beanContainerBuildItem));
reader.setConstraint(additionalReader.getRuntimeType());
if (!additionalReader.getMediaTypeStrings().isEmpty()) {
reader.setMediaTypeStrings(additionalReader.getMediaTypeStrings());
}
if (overrideData != null) {
reader.setPriority(overrideData.getPriority());
} else {
reader.setPriority(additionalReader.getPriority());
}
recorder.registerReader(serialisers, additionalReader.getHandledClassName(), reader);
reflectiveClass.produce(
ReflectiveClassBuildItem.builder(readerClassName).build());
}
}
}
| SerializersUtil |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableTakeUntilTest.java | {
"start": 1108,
"end": 6214
} | class ____ extends RxJavaTest {
@Test
public void takeUntil() {
Disposable sSource = mock(Disposable.class);
Disposable sOther = mock(Disposable.class);
TestObservable source = new TestObservable(sSource);
TestObservable other = new TestObservable(sOther);
Observer<String> result = TestHelper.mockObserver();
Observable<String> stringObservable = Observable.unsafeCreate(source)
.takeUntil(Observable.unsafeCreate(other));
stringObservable.subscribe(result);
source.sendOnNext("one");
source.sendOnNext("two");
other.sendOnNext("three");
source.sendOnNext("four");
source.sendOnCompleted();
other.sendOnCompleted();
verify(result, times(1)).onNext("one");
verify(result, times(1)).onNext("two");
verify(result, times(0)).onNext("three");
verify(result, times(0)).onNext("four");
verify(sSource, times(1)).dispose();
verify(sOther, times(1)).dispose();
}
@Test
public void takeUntilSourceCompleted() {
Disposable sSource = mock(Disposable.class);
Disposable sOther = mock(Disposable.class);
TestObservable source = new TestObservable(sSource);
TestObservable other = new TestObservable(sOther);
Observer<String> result = TestHelper.mockObserver();
Observable<String> stringObservable = Observable.unsafeCreate(source).takeUntil(Observable.unsafeCreate(other));
stringObservable.subscribe(result);
source.sendOnNext("one");
source.sendOnNext("two");
source.sendOnCompleted();
verify(result, times(1)).onNext("one");
verify(result, times(1)).onNext("two");
verify(sSource, never()).dispose(); // no longer disposing itself on terminal events
verify(sOther, times(1)).dispose();
}
@Test
public void takeUntilSourceError() {
Disposable sSource = mock(Disposable.class);
Disposable sOther = mock(Disposable.class);
TestObservable source = new TestObservable(sSource);
TestObservable other = new TestObservable(sOther);
Throwable error = new Throwable();
Observer<String> result = TestHelper.mockObserver();
Observable<String> stringObservable = Observable.unsafeCreate(source).takeUntil(Observable.unsafeCreate(other));
stringObservable.subscribe(result);
source.sendOnNext("one");
source.sendOnNext("two");
source.sendOnError(error);
source.sendOnNext("three");
verify(result, times(1)).onNext("one");
verify(result, times(1)).onNext("two");
verify(result, times(0)).onNext("three");
verify(result, times(1)).onError(error);
verify(sSource, never()).dispose(); // no longer disposing itself on terminal events
verify(sOther, times(1)).dispose();
}
@Test
public void takeUntilOtherError() {
Disposable sSource = mock(Disposable.class);
Disposable sOther = mock(Disposable.class);
TestObservable source = new TestObservable(sSource);
TestObservable other = new TestObservable(sOther);
Throwable error = new Throwable();
Observer<String> result = TestHelper.mockObserver();
Observable<String> stringObservable = Observable.unsafeCreate(source).takeUntil(Observable.unsafeCreate(other));
stringObservable.subscribe(result);
source.sendOnNext("one");
source.sendOnNext("two");
other.sendOnError(error);
source.sendOnNext("three");
verify(result, times(1)).onNext("one");
verify(result, times(1)).onNext("two");
verify(result, times(0)).onNext("three");
verify(result, times(1)).onError(error);
verify(result, times(0)).onComplete();
verify(sSource, times(1)).dispose();
verify(sOther, never()).dispose(); // no longer disposing itself on termination
}
/**
* If the 'other' onCompletes then we unsubscribe from the source and onComplete.
*/
@Test
public void takeUntilOtherCompleted() {
Disposable sSource = mock(Disposable.class);
Disposable sOther = mock(Disposable.class);
TestObservable source = new TestObservable(sSource);
TestObservable other = new TestObservable(sOther);
Observer<String> result = TestHelper.mockObserver();
Observable<String> stringObservable = Observable.unsafeCreate(source).takeUntil(Observable.unsafeCreate(other));
stringObservable.subscribe(result);
source.sendOnNext("one");
source.sendOnNext("two");
other.sendOnCompleted();
source.sendOnNext("three");
verify(result, times(1)).onNext("one");
verify(result, times(1)).onNext("two");
verify(result, times(0)).onNext("three");
verify(result, times(1)).onComplete();
verify(sSource, times(1)).dispose();
verify(sOther, never()).dispose(); // no longer disposing itself on terminal events
}
private static | ObservableTakeUntilTest |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FtpReconnectAttemptUnknownHostIT.java | {
"start": 1006,
"end": 1713
} | class ____ extends FtpServerTestSupport {
private String getFtpUrl() {
return "ftp://admin@doesnotexisthost:{{ftp.server.port}}/reconnect?password=admin";
}
@Test
public void testFromFileToFtp() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(0);
// let it run a little
Thread.sleep(3000);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(getFtpUrl()).to("mock:result");
}
};
}
}
| FtpReconnectAttemptUnknownHostIT |
java | apache__flink | flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/ProtobufSQLITCase.java | {
"start": 1765,
"end": 16324
} | class ____ extends BatchTestBase {
private MapTest getProtoTestObject() {
MapTest.InnerMessageTest innerMessageTest =
MapTest.InnerMessageTest.newBuilder().setA(1).setB(2).build();
MapTest mapTest =
MapTest.newBuilder()
.setA(1)
.putMap1("a", "b")
.putMap1("c", "d")
.putMap2("f", innerMessageTest)
.build();
return mapTest;
}
@Test
public void testSource() {
TestProtobufTestStore.sourcePbInputs.clear();
TestProtobufTestStore.sourcePbInputs.add(getProtoTestObject().toByteArray());
env().setParallelism(1);
String sql =
"create table bigdata_source ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest'"
+ ")";
tEnv().executeSql(sql);
TableResult result = tEnv().executeSql("select * from bigdata_source");
Row row = result.collect().next();
assertEquals(1, (int) row.getField(0));
Map<String, String> map1 = (Map<String, String>) row.getField(1);
assertEquals("b", map1.get("a"));
assertEquals("d", map1.get("c"));
Map<String, Row> map2 = (Map<String, Row>) row.getField(2);
Row innerRow = map2.get("f");
assertEquals(1, innerRow.getField(0));
assertEquals(2L, innerRow.getField(1));
}
@Test
public void testSourceNotIgnoreParseError() throws InterruptedException {
TestProtobufTestStore.sourcePbInputs.clear();
// pass an incompatible bytes
TestProtobufTestStore.sourcePbInputs.add(new byte[] {127, 127, 127, 127, 127});
env().setParallelism(1);
String sql =
"create table bigdata_source ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest'"
+ ")";
tEnv().executeSql(sql);
TableResult result = tEnv().executeSql("select * from bigdata_source");
try {
result.await();
} catch (Exception ex) {
return;
}
fail("executeSql should raise exception");
}
@Test
public void testSourceIgnoreParseError() throws InterruptedException, ExecutionException {
TestProtobufTestStore.sourcePbInputs.clear();
// pass an incompatible bytes
TestProtobufTestStore.sourcePbInputs.add(new byte[] {127, 127, 127, 127, 127});
env().setParallelism(1);
String sql =
"create table bigdata_source ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest',"
+ " 'protobuf.ignore-parse-errors' = 'true'"
+ ")";
tEnv().executeSql(sql);
TableResult result = tEnv().executeSql("select * from bigdata_source");
CloseableIterator<Row> iterator = result.collect();
assertFalse(iterator.hasNext());
}
@Test
public void testSourceWithDefaultValueOfPb2WhenTrue() {
MapTest mapTest = MapTest.newBuilder().build();
TestProtobufTestStore.sourcePbInputs.clear();
TestProtobufTestStore.sourcePbInputs.add(mapTest.toByteArray());
env().setParallelism(1);
String sql =
"create table bigdata_source ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest', "
+ " 'protobuf.read-default-values' = 'true' "
+ ")";
tEnv().executeSql(sql);
TableResult result = tEnv().executeSql("select * from bigdata_source");
Row row = result.collect().next();
assertEquals(0, (int) row.getField(0));
}
@Test
public void testSourceWithDefaultValueOfPb2WhenFalse() {
MapTest mapTest = MapTest.newBuilder().build();
TestProtobufTestStore.sourcePbInputs.clear();
TestProtobufTestStore.sourcePbInputs.add(mapTest.toByteArray());
env().setParallelism(1);
String sql =
"create table bigdata_source ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest', "
+ " 'protobuf.read-default-values' = 'false' "
+ ")";
tEnv().executeSql(sql);
TableResult result = tEnv().executeSql("select * from bigdata_source");
Row row = result.collect().next();
assertNull(row.getField(0));
}
@Test
public void testSourceWithDefaultValueOfPb3WhenTrue() {
Pb3Test pb3Test = Pb3Test.newBuilder().build();
TestProtobufTestStore.sourcePbInputs.clear();
TestProtobufTestStore.sourcePbInputs.add(pb3Test.toByteArray());
env().setParallelism(1);
String sql =
"create table bigdata_source ( "
+ " a int,"
+ " b bigint,"
+ " c string,"
+ " d float"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.Pb3Test', "
+ " 'protobuf.read-default-values' = 'true' "
+ ")";
tEnv().executeSql(sql);
TableResult result = tEnv().executeSql("select * from bigdata_source");
Row row = result.collect().next();
assertEquals(0, (int) row.getField(0));
}
@Test
public void testSourceWithDefaultValueOfPb3WhenFalse() {
Pb3Test pb3Test = Pb3Test.newBuilder().build();
TestProtobufTestStore.sourcePbInputs.clear();
TestProtobufTestStore.sourcePbInputs.add(pb3Test.toByteArray());
env().setParallelism(1);
String sql =
"create table bigdata_source ( "
+ " a int,"
+ " b bigint,"
+ " c string,"
+ " d float"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.Pb3Test', "
+ " 'protobuf.read-default-values' = 'false' "
+ ")";
tEnv().executeSql(sql);
TableResult result = tEnv().executeSql("select * from bigdata_source");
Row row = result.collect().next();
assertEquals(0, (int) row.getField(0));
}
@Test
public void testSink() throws Exception {
TestProtobufTestStore.sourcePbInputs.clear();
TestProtobufTestStore.sourcePbInputs.add(getProtoTestObject().toByteArray());
TestProtobufTestStore.sinkResults.clear();
env().setParallelism(1);
String sql =
"create table bigdata_sink ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest'"
+ ")";
tEnv().executeSql(sql);
TableResult tableResult =
tEnv().executeSql(
"insert into bigdata_sink select 1, map['a', 'b', 'c', 'd'], map['f', row(1,cast(2 as bigint))] ");
tableResult.await();
byte[] bytes = TestProtobufTestStore.sinkResults.get(0);
MapTest mapTest = MapTest.parseFrom(bytes);
assertEquals(1, mapTest.getA());
assertEquals("b", mapTest.getMap1Map().get("a"));
assertEquals("d", mapTest.getMap1Map().get("c"));
MapTest.InnerMessageTest innerMessageTest = mapTest.getMap2Map().get("f");
assertEquals(1, innerMessageTest.getA());
assertEquals(2L, innerMessageTest.getB());
}
@Test
public void testSinkWithNullLiteral() throws Exception {
TestProtobufTestStore.sourcePbInputs.clear();
TestProtobufTestStore.sourcePbInputs.add(getProtoTestObject().toByteArray());
TestProtobufTestStore.sinkResults.clear();
env().setParallelism(1);
String sql =
"create table bigdata_sink ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest', "
+ " 'protobuf.write-null-string-literal' = 'NULL' "
+ ")";
tEnv().executeSql(sql);
TableResult tableResult =
tEnv().executeSql(
"insert into bigdata_sink select 1, map['a', null], map['b', cast(null as row<a int, b bigint>)]");
tableResult.await();
byte[] bytes = TestProtobufTestStore.sinkResults.get(0);
MapTest mapTest = MapTest.parseFrom(bytes);
assertEquals(1, mapTest.getA());
assertEquals("NULL", mapTest.getMap1Map().get("a"));
MapTest.InnerMessageTest innerMessageTest = mapTest.getMap2Map().get("b");
assertEquals(MapTest.InnerMessageTest.getDefaultInstance(), innerMessageTest);
}
@Test
public void testSinkWithNullLiteralWithEscape() throws Exception {
TestProtobufTestStore.sourcePbInputs.clear();
TestProtobufTestStore.sourcePbInputs.add(getProtoTestObject().toByteArray());
TestProtobufTestStore.sinkResults.clear();
env().setParallelism(1);
String sql =
"create table bigdata_sink ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'protobuf-test-connector', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest', "
+ " 'protobuf.write-null-string-literal' = '\\\"NULL\\\"' "
+ ")";
tEnv().executeSql(sql);
TableResult tableResult =
tEnv().executeSql(
"insert into bigdata_sink select 1, map['a', null], map['b', cast(null as row<a int, b bigint>)]");
tableResult.await();
byte[] bytes = TestProtobufTestStore.sinkResults.get(0);
MapTest mapTest = MapTest.parseFrom(bytes);
assertEquals(1, mapTest.getA());
assertEquals("\"NULL\"", mapTest.getMap1Map().get("a"));
MapTest.InnerMessageTest innerMessageTest = mapTest.getMap2Map().get("b");
assertEquals(MapTest.InnerMessageTest.getDefaultInstance(), innerMessageTest);
}
@Test
public void testUnsupportedBulkFilesystemSink() {
env().setParallelism(1);
String sql =
"create table bigdata_sink ( "
+ " a int, "
+ " map1 map<string,string>,"
+ " map2 map<string, row<a int, b bigint>>"
+ ") with ("
+ " 'connector' = 'filesystem', "
+ " 'path' = '/tmp/unused', "
+ " 'format' = 'protobuf', "
+ " 'protobuf.message-class-name' = 'org.apache.flink.formats.protobuf.testproto.MapTest'"
+ ")";
tEnv().executeSql(sql);
assertThatThrownBy(
() -> {
TableResult tableResult =
tEnv().executeSql(
"insert into bigdata_sink select 1, map['a', 'b', 'c', 'd'], map['f', row(1,cast(2 as bigint))] ");
tableResult.await();
})
.satisfies(
anyCauseMatches(
ValidationException.class,
"The 'protobuf' format is not supported for the 'filesystem' connector."));
}
}
| ProtobufSQLITCase |
java | netty__netty | codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HaProxyMessageEncoderTest.java | {
"start": 1572,
"end": 16578
} | class ____ {
private static final int V2_HEADER_BYTES_LENGTH = 16;
private static final int IPv4_ADDRESS_BYTES_LENGTH = 12;
private static final int IPv6_ADDRESS_BYTES_LENGTH = 36;
@Test
public void testIPV4EncodeProxyV1() {
EmbeddedChannel ch = new EmbeddedChannel(INSTANCE);
HAProxyMessage message = new HAProxyMessage(
HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4,
"192.168.0.1", "192.168.0.11", 56324, 443);
assertTrue(ch.writeOutbound(message));
ByteBuf byteBuf = ch.readOutbound();
assertEquals("PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n",
byteBuf.toString(CharsetUtil.US_ASCII));
byteBuf.release();
assertFalse(ch.finish());
}
@Test
public void testIPV6EncodeProxyV1() {
EmbeddedChannel ch = new EmbeddedChannel(INSTANCE);
HAProxyMessage message = new HAProxyMessage(
HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6,
"2001:0db8:85a3:0000:0000:8a2e:0370:7334", "1050:0:0:0:5:600:300c:326b", 56324, 443);
assertTrue(ch.writeOutbound(message));
ByteBuf byteBuf = ch.readOutbound();
assertEquals("PROXY TCP6 2001:0db8:85a3:0000:0000:8a2e:0370:7334 1050:0:0:0:5:600:300c:326b 56324 443\r\n",
byteBuf.toString(CharsetUtil.US_ASCII));
byteBuf.release();
assertFalse(ch.finish());
}
@Test
public void testIPv4EncodeProxyV2() {
EmbeddedChannel ch = new EmbeddedChannel(INSTANCE);
HAProxyMessage message = new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4,
"192.168.0.1", "192.168.0.11", 56324, 443);
assertTrue(ch.writeOutbound(message));
ByteBuf byteBuf = ch.readOutbound();
// header
byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12);
assertArrayEquals(BINARY_PREFIX, headerBytes);
// command
byte commandByte = byteBuf.getByte(12);
assertEquals(0x02, (commandByte & 0xf0) >> 4);
assertEquals(0x01, commandByte & 0x0f);
// transport protocol, address family
byte transportByte = byteBuf.getByte(13);
assertEquals(0x01, (transportByte & 0xf0) >> 4);
assertEquals(0x01, transportByte & 0x0f);
// source address length
int sourceAddrLength = byteBuf.getUnsignedShort(14);
assertEquals(12, sourceAddrLength);
// source address
byte[] sourceAddr = ByteBufUtil.getBytes(byteBuf, 16, 4);
assertArrayEquals(new byte[] { (byte) 0xc0, (byte) 0xa8, 0x00, 0x01 }, sourceAddr);
// destination address
byte[] destAddr = ByteBufUtil.getBytes(byteBuf, 20, 4);
assertArrayEquals(new byte[] { (byte) 0xc0, (byte) 0xa8, 0x00, 0x0b }, destAddr);
// source port
int sourcePort = byteBuf.getUnsignedShort(24);
assertEquals(56324, sourcePort);
// destination port
int destPort = byteBuf.getUnsignedShort(26);
assertEquals(443, destPort);
byteBuf.release();
assertFalse(ch.finish());
}
@Test
public void testIPv6EncodeProxyV2() {
EmbeddedChannel ch = new EmbeddedChannel(INSTANCE);
HAProxyMessage message = new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6,
"2001:0db8:85a3:0000:0000:8a2e:0370:7334", "1050:0:0:0:5:600:300c:326b", 56324, 443);
assertTrue(ch.writeOutbound(message));
ByteBuf byteBuf = ch.readOutbound();
// header
byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12);
assertArrayEquals(BINARY_PREFIX, headerBytes);
// command
byte commandByte = byteBuf.getByte(12);
assertEquals(0x02, (commandByte & 0xf0) >> 4);
assertEquals(0x01, commandByte & 0x0f);
// transport protocol, address family
byte transportByte = byteBuf.getByte(13);
assertEquals(0x02, (transportByte & 0xf0) >> 4);
assertEquals(0x01, transportByte & 0x0f);
// source address length
int sourceAddrLength = byteBuf.getUnsignedShort(14);
assertEquals(IPv6_ADDRESS_BYTES_LENGTH, sourceAddrLength);
// source address
byte[] sourceAddr = ByteBufUtil.getBytes(byteBuf, 16, 16);
assertArrayEquals(new byte[] {
(byte) 0x20, (byte) 0x01, 0x0d, (byte) 0xb8,
(byte) 0x85, (byte) 0xa3, 0x00, 0x00, 0x00, 0x00, (byte) 0x8a, 0x2e,
0x03, 0x70, 0x73, 0x34
}, sourceAddr);
// destination address
byte[] destAddr = ByteBufUtil.getBytes(byteBuf, 32, 16);
assertArrayEquals(new byte[] {
(byte) 0x10, (byte) 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x05, 0x06, 0x00, 0x30, 0x0c, 0x32, 0x6b
}, destAddr);
// source port
int sourcePort = byteBuf.getUnsignedShort(48);
assertEquals(56324, sourcePort);
// destination port
int destPort = byteBuf.getUnsignedShort(50);
assertEquals(443, destPort);
byteBuf.release();
assertFalse(ch.finish());
}
@Test
public void testUnixEncodeProxyV2() {
EmbeddedChannel ch = new EmbeddedChannel(INSTANCE);
HAProxyMessage message = new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM,
"/var/run/src.sock", "/var/run/dst.sock", 0, 0);
assertTrue(ch.writeOutbound(message));
ByteBuf byteBuf = ch.readOutbound();
// header
byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12);
assertArrayEquals(BINARY_PREFIX, headerBytes);
// command
byte commandByte = byteBuf.getByte(12);
assertEquals(0x02, (commandByte & 0xf0) >> 4);
assertEquals(0x01, commandByte & 0x0f);
// transport protocol, address family
byte transportByte = byteBuf.getByte(13);
assertEquals(0x03, (transportByte & 0xf0) >> 4);
assertEquals(0x01, transportByte & 0x0f);
// address length
int addrLength = byteBuf.getUnsignedShort(14);
assertEquals(TOTAL_UNIX_ADDRESS_BYTES_LENGTH, addrLength);
// source address
int srcAddrEnd = byteBuf.forEachByte(16, 108, ByteProcessor.FIND_NUL);
assertEquals("/var/run/src.sock",
byteBuf.slice(16, srcAddrEnd - 16).toString(CharsetUtil.US_ASCII));
// destination address
int dstAddrEnd = byteBuf.forEachByte(124, 108, ByteProcessor.FIND_NUL);
assertEquals("/var/run/dst.sock",
byteBuf.slice(124, dstAddrEnd - 124).toString(CharsetUtil.US_ASCII));
byteBuf.release();
assertFalse(ch.finish());
}
@Test
public void testTLVEncodeProxy() {
EmbeddedChannel ch = new EmbeddedChannel(INSTANCE);
List<HAProxyTLV> tlvs = new ArrayList<HAProxyTLV>();
ByteBuf helloWorld = Unpooled.copiedBuffer("hello world", CharsetUtil.US_ASCII);
HAProxyTLV alpnTlv = new HAProxyTLV(Type.PP2_TYPE_ALPN, (byte) 0x01, helloWorld.copy());
tlvs.add(alpnTlv);
ByteBuf arbitrary = Unpooled.copiedBuffer("an arbitrary string", CharsetUtil.US_ASCII);
HAProxyTLV authorityTlv = new HAProxyTLV(Type.PP2_TYPE_AUTHORITY, (byte) 0x01, arbitrary.copy());
tlvs.add(authorityTlv);
HAProxyMessage message = new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4,
"192.168.0.1", "192.168.0.11", 56324, 443, tlvs);
assertTrue(ch.writeOutbound(message));
ByteBuf byteBuf = ch.readOutbound();
// length
assertEquals(byteBuf.getUnsignedShort(14), byteBuf.readableBytes() - V2_HEADER_BYTES_LENGTH);
// skip to tlv section
ByteBuf tlv = byteBuf.skipBytes(V2_HEADER_BYTES_LENGTH + IPv4_ADDRESS_BYTES_LENGTH);
// alpn tlv
assertEquals(alpnTlv.typeByteValue(), tlv.readByte());
short bufLength = tlv.readShort();
assertEquals(helloWorld.array().length, bufLength);
assertEquals(helloWorld, tlv.readSlice(bufLength));
// authority tlv
assertEquals(authorityTlv.typeByteValue(), tlv.readByte());
bufLength = tlv.readShort();
assertEquals(arbitrary.array().length, bufLength);
assertEquals(arbitrary, tlv.readSlice(bufLength));
byteBuf.release();
assertFalse(ch.finish());
}
@Test
public void testSslTLVEncodeProxy() {
EmbeddedChannel ch = new EmbeddedChannel(INSTANCE);
List<HAProxyTLV> tlvs = new ArrayList<HAProxyTLV>();
ByteBuf helloWorld = Unpooled.copiedBuffer("hello world", CharsetUtil.US_ASCII);
HAProxyTLV alpnTlv = new HAProxyTLV(Type.PP2_TYPE_ALPN, (byte) 0x01, helloWorld.copy());
tlvs.add(alpnTlv);
ByteBuf arbitrary = Unpooled.copiedBuffer("an arbitrary string", CharsetUtil.US_ASCII);
HAProxyTLV authorityTlv = new HAProxyTLV(Type.PP2_TYPE_AUTHORITY, (byte) 0x01, arbitrary.copy());
tlvs.add(authorityTlv);
ByteBuf sslContent = Unpooled.copiedBuffer("some ssl content", CharsetUtil.US_ASCII);
HAProxySSLTLV haProxySSLTLV = new HAProxySSLTLV(1, (byte) 0x01, tlvs, sslContent.copy());
HAProxyMessage message = new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4,
"192.168.0.1", "192.168.0.11", 56324, 443,
Collections.<HAProxyTLV>singletonList(haProxySSLTLV));
assertTrue(ch.writeOutbound(message));
ByteBuf byteBuf = ch.readOutbound();
assertEquals(byteBuf.getUnsignedShort(14), byteBuf.readableBytes() - V2_HEADER_BYTES_LENGTH);
ByteBuf tlv = byteBuf.skipBytes(V2_HEADER_BYTES_LENGTH + IPv4_ADDRESS_BYTES_LENGTH);
// ssl tlv type
assertEquals(haProxySSLTLV.typeByteValue(), tlv.readByte());
// length
int bufLength = tlv.readUnsignedShort();
assertEquals(bufLength, tlv.readableBytes());
// client, verify
assertEquals(0x01, byteBuf.readByte());
assertEquals(1, byteBuf.readInt());
// alpn tlv
assertEquals(alpnTlv.typeByteValue(), tlv.readByte());
bufLength = tlv.readShort();
assertEquals(helloWorld.array().length, bufLength);
assertEquals(helloWorld, tlv.readSlice(bufLength));
// authority tlv
assertEquals(authorityTlv.typeByteValue(), tlv.readByte());
bufLength = tlv.readShort();
assertEquals(arbitrary.array().length, bufLength);
assertEquals(arbitrary, tlv.readSlice(bufLength));
byteBuf.release();
assertFalse(ch.finish());
}
@Test
public void testEncodeLocalProxyV2() {
EmbeddedChannel ch = new EmbeddedChannel(INSTANCE);
HAProxyMessage message = new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.LOCAL, HAProxyProxiedProtocol.UNKNOWN,
null, null, 0, 0);
assertTrue(ch.writeOutbound(message));
ByteBuf byteBuf = ch.readOutbound();
// header
byte[] headerBytes = new byte[12];
byteBuf.readBytes(headerBytes);
assertArrayEquals(BINARY_PREFIX, headerBytes);
// command
byte commandByte = byteBuf.readByte();
assertEquals(0x02, (commandByte & 0xf0) >> 4);
assertEquals(0x00, commandByte & 0x0f);
// transport protocol, address family
byte transportByte = byteBuf.readByte();
assertEquals(0x00, transportByte);
// source address length
int sourceAddrLength = byteBuf.readUnsignedShort();
assertEquals(0, sourceAddrLength);
assertFalse(byteBuf.isReadable());
byteBuf.release();
assertFalse(ch.finish());
}
@Test
public void testInvalidIpV4Address() {
final String invalidIpv4Address = "192.168.0.1234";
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
new HAProxyMessage(
HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4,
invalidIpv4Address, "192.168.0.11", 56324, 443);
}
});
}
@Test
public void testInvalidIpV6Address() {
final String invalidIpv6Address = "2001:0db8:85a3:0000:0000:8a2e:0370:73345";
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
new HAProxyMessage(
HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6,
invalidIpv6Address, "1050:0:0:0:5:600:300c:326b", 56324, 443);
}
});
}
@Test
public void testInvalidUnixAddress() {
final String invalidUnixAddress = new String(new byte[UNIX_ADDRESS_BYTES_LENGTH + 1]);
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM,
invalidUnixAddress, "/var/run/dst.sock", 0, 0);
}
});
}
@Test
public void testNullUnixAddress() {
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM,
null, null, 0, 0);
}
});
}
@Test
public void testLongUnixAddress() {
final String longUnixAddress = new String(new char[109]).replace("\0", "a");
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM,
"source", longUnixAddress, 0, 0);
}
});
}
@Test
public void testInvalidUnixPort() {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
new HAProxyMessage(
HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM,
"/var/run/src.sock", "/var/run/dst.sock", 80, 443);
}
});
}
}
| HaProxyMessageEncoderTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java | {
"start": 67872,
"end": 121391
} | class ____ testing full text functions that uses parameterized tests
testBasicFullTextFunction(testCase);
testFullTextFunctionWithFunctionsPushedToLucene(testCase);
testFullTextFunctionConjunctionWhereOperands(testCase);
testFullTextFunctionMultipleWhereClauses(testCase);
testFullTextFunctionMultipleFullTextFunctions(testCase);
testFullTextFunctionWithNonPushableConjunction(testCase);
testFullTextFunctionWithPushableConjunction(testCase);
testFullTextFunctionWithNonPushableDisjunction(testCase);
testFullTextFunctionWithPushableDisjunction(testCase);
testFullTextFunctionWithPushableDisjunction(testCase);
testMultipleFullTextFunctionFilterPushdown(testCase);
testFullTextFunctionsDisjunctionPushdown(testCase);
testFullTextFunctionsDisjunctionWithFiltersPushdown(testCase);
testFullTextFunctionWithStatsWherePushable(testCase);
testFullTextFunctionWithStatsPushableAndNonPushableCondition(testCase);
testFullTextFunctionStatsWithNonPushableCondition(testCase);
testFullTextFunctionWithStatsBy(testCase);
}
private void testBasicFullTextFunction(FullTextFunctionTestCase testCase) {
String query = String.format(Locale.ROOT, """
from test
| where %s
""", testCase.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var esQuery = as(field.child(), EsQueryExec.class);
assertThat(as(esQuery.limit(), Literal.class).value(), is(1000));
var expected = testCase.queryBuilder();
assertEquals(expected.toString(), esQuery.query().toString());
}
private void testFullTextFunctionWithFunctionsPushedToLucene(FullTextFunctionTestCase testCase) {
String queryText = String.format(Locale.ROOT, """
from test
| where %s and cidr_match(ip, "127.0.0.1/32")
""", testCase.esqlQuery());
var analyzer = makeAnalyzer("mapping-all-types.json");
var plan = plannerOptimizer.plan(queryText, IS_SV_STATS, analyzer);
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var query = as(field.child(), EsQueryExec.class);
assertThat(as(query.limit(), Literal.class).value(), is(1000));
Source filterSource = new Source(2, testCase.esqlQuery().length() + 13, "cidr_match(ip, \"127.0.0.1/32\")");
var terms = wrapWithSingleQuery(queryText, unscore(termsQuery("ip", "127.0.0.1/32")), "ip", filterSource);
var queryBuilder = testCase.queryBuilder();
var expected = boolQuery().must(queryBuilder).must(terms);
assertEquals(expected.toString(), query.query().toString());
}
private void testFullTextFunctionConjunctionWhereOperands(FullTextFunctionTestCase testCase) {
String queryText = String.format(Locale.ROOT, """
from test
| where %s and integer > 10010
""", testCase.esqlQuery());
var analyzer = makeAnalyzer("mapping-all-types.json");
var plan = plannerOptimizer.plan(queryText, IS_SV_STATS, analyzer);
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var query = as(field.child(), EsQueryExec.class);
assertThat(as(query.limit(), Literal.class).value(), is(1000));
Source filterSource = new Source(2, testCase.esqlQuery().length() + 13, "integer > 10000");
var range = wrapWithSingleQuery(queryText, unscore(rangeQuery("integer").gt(10010)), "integer", filterSource);
var queryBuilder = testCase.queryBuilder();
var expected = boolQuery().must(queryBuilder).must(range);
assertEquals(expected.toString(), query.query().toString());
}
private void testFullTextFunctionMultipleFullTextFunctions(FullTextFunctionTestCase testCase) {
FullTextFunctionTestCase second = randomFullTextFunctionTestCase();
String queryText = String.format(Locale.ROOT, """
from test
| where %s and %s
""", testCase.esqlQuery(), second.esqlQuery());
var analyzer = makeAnalyzer("mapping-all-types.json");
var plan = plannerOptimizer.plan(queryText, IS_SV_STATS, analyzer);
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var query = as(field.child(), EsQueryExec.class);
assertThat(as(query.limit(), Literal.class).value(), is(1000));
var queryBuiilderLeft = testCase.queryBuilder();
var queryBuilderRight = second.queryBuilder();
var expected = boolQuery().must(queryBuiilderLeft).must(queryBuilderRight);
assertEquals(expected.toString(), query.query().toString());
}
private void testFullTextFunctionMultipleWhereClauses(FullTextFunctionTestCase testCase) {
String queryText = String.format(Locale.ROOT, """
from test
| where %s
| where integer > 10010
""", testCase.esqlQuery());
var analyzer = makeAnalyzer("mapping-all-types.json");
var plan = plannerOptimizer.plan(queryText, IS_SV_STATS, analyzer);
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var query = as(field.child(), EsQueryExec.class);
assertThat(as(query.limit(), Literal.class).value(), is(1000));
Source filterSource = new Source(3, 8, "integer > 10000");
var range = wrapWithSingleQuery(queryText, unscore(rangeQuery("integer").gt(10010)), "integer", filterSource);
var queryBuilder = testCase.queryBuilder();
var expected = boolQuery().must(queryBuilder).must(range);
assertEquals(expected.toString(), query.query().toString());
}
private void testFullTextFunctionWithNonPushableConjunction(FullTextFunctionTestCase testCase) {
String query = String.format(Locale.ROOT, """
from test
| where %s and length(text) > 10
""", testCase.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
var filterLimit = as(fieldExtract.child(), LimitExec.class);
var filter = as(filterLimit.child(), FilterExec.class);
assertThat(filter.condition(), instanceOf(GreaterThan.class));
var fieldFilterExtract = as(filter.child(), FieldExtractExec.class);
var esQuery = as(fieldFilterExtract.child(), EsQueryExec.class);
assertEquals(testCase.queryBuilder().toString(), esQuery.query().toString());
}
private void testFullTextFunctionWithPushableConjunction(FullTextFunctionTestCase testCase) {
String query = String.format(Locale.ROOT, """
from test metadata _score
| where %s and integer > 10000
""", testCase.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
var esQuery = as(fieldExtract.child(), EsQueryExec.class);
Source source = new Source(2, testCase.esqlQuery().length() + 13, "integer > 10000");
BoolQueryBuilder expected = new BoolQueryBuilder().must(testCase.queryBuilder())
.must(wrapWithSingleQuery(query, unscore(rangeQuery("integer").gt(10000)), "integer", source));
assertEquals(expected.toString(), esQuery.query().toString());
}
private void testFullTextFunctionWithNonPushableDisjunction(FullTextFunctionTestCase testCase) {
String query = String.format(Locale.ROOT, """
from test
| where %s or length(text) > 10
""", testCase.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var filterLimit = as(field.child(), LimitExec.class);
var filter = as(filterLimit.child(), FilterExec.class);
Or or = as(filter.condition(), Or.class);
assertThat(or.left(), instanceOf(testCase.fullTextFunction()));
assertThat(or.right(), instanceOf(GreaterThan.class));
var fieldExtract = as(filter.child(), FieldExtractExec.class);
assertThat(fieldExtract.child(), instanceOf(EsQueryExec.class));
}
private void testFullTextFunctionWithPushableDisjunction(FullTextFunctionTestCase testCase) {
String query = String.format(Locale.ROOT, """
from test
| where %s or integer > 10000
""", testCase.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
var esQuery = as(fieldExtract.child(), EsQueryExec.class);
Source source = new Source(2, testCase.esqlQuery().length() + 12, "integer > 10000");
BoolQueryBuilder expected = new BoolQueryBuilder().should(testCase.queryBuilder())
.should(wrapWithSingleQuery(query, unscore(rangeQuery("integer").gt(10000)), "integer", source));
assertEquals(expected.toString(), esQuery.query().toString());
}
private FullTextFunctionTestCase randomFullTextFunctionTestCase() {
return switch (randomIntBetween(0, 3)) {
case 0 -> new MatchFunctionTestCase();
case 1 -> new MatchOperatorTestCase();
case 2 -> new KqlFunctionTestCase();
case 3 -> new QueryStringFunctionTestCase();
default -> throw new IllegalStateException("Unexpected value");
};
}
private void testMultipleFullTextFunctionFilterPushdown(FullTextFunctionTestCase testCase) {
FullTextFunctionTestCase second = randomFullTextFunctionTestCase();
FullTextFunctionTestCase third = new MatchFunctionTestCase();
String query = String.format(Locale.ROOT, """
from test
| where %s and %s
| sort integer
| where integer > 10000
| eval description = concat("integer: ", to_str(integer), ", text: ", text, " ", keyword)
| where %s
""", testCase.esqlQuery(), second.esqlQuery(), third.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var eval = as(plan, EvalExec.class);
var topNExec = as(eval.child(), TopNExec.class);
var exchange = as(topNExec.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
var actualLuceneQuery = as(fieldExtract.child(), EsQueryExec.class).query();
Source filterSource = new Source(4, 8, "integer > 10000");
var expectedLuceneQuery = new BoolQueryBuilder().must(testCase.queryBuilder())
.must(second.queryBuilder())
.must(wrapWithSingleQuery(query, unscore(rangeQuery("integer").gt(10000)), "integer", filterSource))
.must(third.queryBuilder());
assertEquals(expectedLuceneQuery.toString(), actualLuceneQuery.toString());
}
public void testFullTextFunctionsDisjunctionPushdown(FullTextFunctionTestCase testCase) {
FullTextFunctionTestCase second = randomFullTextFunctionTestCase();
FullTextFunctionTestCase third = randomFullTextFunctionTestCase();
String query = String.format(Locale.ROOT, """
from test
| where (%s or %s) and %s
| sort integer
""", testCase.esqlQuery(), second.esqlQuery(), third.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var topNExec = as(plan, TopNExec.class);
var exchange = as(topNExec.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
var actualLuceneQuery = as(fieldExtract.child(), EsQueryExec.class).query();
var expectedLuceneQuery = new BoolQueryBuilder().must(
new BoolQueryBuilder().should(testCase.queryBuilder()).should(second.queryBuilder())
).must(third.queryBuilder());
assertEquals(expectedLuceneQuery.toString(), actualLuceneQuery.toString());
}
public void testFullTextFunctionsDisjunctionWithFiltersPushdown(FullTextFunctionTestCase testCase) {
FullTextFunctionTestCase second = randomFullTextFunctionTestCase();
String query = String.format(Locale.ROOT, """
from test
| where (%s or %s) and length(keyword) > 5
| sort integer
""", testCase.esqlQuery(), second.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var topNExec = as(plan, TopNExec.class);
var exchange = as(topNExec.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
var secondTopNExec = as(fieldExtract.child(), TopNExec.class);
var secondFieldExtract = as(secondTopNExec.child(), FieldExtractExec.class);
var filterExec = as(secondFieldExtract.child(), FilterExec.class);
var thirdFilterExtract = as(filterExec.child(), FieldExtractExec.class);
var actualLuceneQuery = as(thirdFilterExtract.child(), EsQueryExec.class).query();
var expectedLuceneQuery = new BoolQueryBuilder().should(testCase.queryBuilder()).should(second.queryBuilder());
assertEquals(expectedLuceneQuery.toString(), actualLuceneQuery.toString());
}
public void testFullTextFunctionWithStatsWherePushable(FullTextFunctionTestCase testCase) {
String query = String.format(Locale.ROOT, """
from test
| stats c = count(*) where %s
""", testCase.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var agg = as(limit.child(), AggregateExec.class);
var exchange = as(agg.child(), ExchangeExec.class);
var stats = as(exchange.child(), EsStatsQueryExec.class);
QueryBuilder expected = testCase.queryBuilder();
assertThat(stats.query().toString(), equalTo(expected.toString()));
}
public void testFullTextFunctionWithStatsPushableAndNonPushableCondition(FullTextFunctionTestCase testCase) {
String query = String.format(Locale.ROOT, """
from test
| where length(keyword) > 10
| stats c = count(*) where %s
""", testCase.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var agg = as(limit.child(), AggregateExec.class);
var exchange = as(agg.child(), ExchangeExec.class);
var aggExec = as(exchange.child(), AggregateExec.class);
var filter = as(aggExec.child(), FilterExec.class);
assertTrue(filter.condition() instanceof GreaterThan);
var fieldExtract = as(filter.child(), FieldExtractExec.class);
var esQuery = as(fieldExtract.child(), EsQueryExec.class);
QueryBuilder expected = testCase.queryBuilder();
assertThat(esQuery.query().toString(), equalTo(expected.toString()));
}
public void testFullTextFunctionStatsWithNonPushableCondition(FullTextFunctionTestCase testCase) {
FullTextFunctionTestCase second = randomFullTextFunctionTestCase();
String query = String.format(Locale.ROOT, """
from test
| stats c = count(*) where %s, d = count(*) where %s
""", testCase.esqlQuery(), second.esqlQuery());
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var agg = as(limit.child(), AggregateExec.class);
var aggregates = agg.aggregates();
assertThat(aggregates.size(), is(2));
for (NamedExpression aggregate : aggregates) {
var alias = as(aggregate, Alias.class);
var count = as(alias.child(), Count.class);
var fullTextFunction = as(count.filter(), FullTextFunction.class);
}
var exchange = as(agg.child(), ExchangeExec.class);
var aggExec = as(exchange.child(), AggregateExec.class);
aggExec.forEachDown(EsQueryExec.class, esQueryExec -> { assertNull(esQueryExec.query()); });
}
public void testFullTextFunctionWithStatsBy(FullTextFunctionTestCase testCase) {
String query = String.format(Locale.ROOT, """
from test
| stats count(*) where %s by keyword
""", testCase.esqlQuery());
var analyzer = makeAnalyzer("mapping-default.json");
var plannerOptimizer = new TestPlannerOptimizer(config, analyzer);
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var agg = as(limit.child(), AggregateExec.class);
var grouping = as(agg.groupings().get(0), FieldAttribute.class);
assertEquals("keyword", grouping.name());
var aggregateAlias = as(agg.aggregates().get(0), Alias.class);
assertEquals("count(*) where " + testCase.esqlQuery(), aggregateAlias.name());
var count = as(aggregateAlias.child(), Count.class);
var countFilter = as(count.filter(), testCase.fullTextFunction());
var aggregateFieldAttr = as(agg.aggregates().get(1), FieldAttribute.class);
assertEquals("keyword", aggregateFieldAttr.name());
var exchange = as(agg.child(), ExchangeExec.class);
var aggExec = as(exchange.child(), AggregateExec.class);
aggExec.forEachDown(EsQueryExec.class, esQueryExec -> { assertNull(esQueryExec.query()); });
}
public void testKnnPrefilters() {
String query = """
from test
| where knn(dense_vector, [0, 1, 2]) and integer > 10
""";
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var queryExec = as(field.child(), EsQueryExec.class);
QueryBuilder expectedFilterQueryBuilder = wrapWithSingleQuery(
query,
unscore(rangeQuery("integer").gt(10)),
"integer",
new Source(2, 41, "integer > 10")
);
KnnVectorQueryBuilder expectedKnnQueryBuilder = new KnnVectorQueryBuilder(
"dense_vector",
new float[] { 0, 1, 2 },
1000,
null,
null,
null,
null
).addFilterQuery(expectedFilterQueryBuilder);
var expectedQuery = boolQuery().must(expectedKnnQueryBuilder).must(expectedFilterQueryBuilder);
assertEquals(expectedQuery.toString(), queryExec.query().toString());
}
public void testKnnPrefiltersWithMultipleFilters() {
String query = """
from test
| where knn(dense_vector, [0, 1, 2])
| where integer > 10
| where keyword == "test"
""";
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var queryExec = as(field.child(), EsQueryExec.class);
var integerFilter = wrapWithSingleQuery(query, unscore(rangeQuery("integer").gt(10)), "integer", new Source(3, 8, "integer > 10"));
var keywordFilter = wrapWithSingleQuery(
query,
unscore(termQuery("keyword", "test")),
"keyword",
new Source(4, 8, "keyword == \"test\"")
);
QueryBuilder expectedFilterQueryBuilder = boolQuery().must(integerFilter).must(keywordFilter);
KnnVectorQueryBuilder expectedKnnQueryBuilder = new KnnVectorQueryBuilder(
"dense_vector",
new float[] { 0, 1, 2 },
1000,
null,
null,
null,
null
).addFilterQuery(expectedFilterQueryBuilder);
var expectedQuery = boolQuery().must(expectedKnnQueryBuilder).must(integerFilter).must(keywordFilter);
assertEquals(expectedQuery.toString(), queryExec.query().toString());
}
public void testPushDownConjunctionsToKnnPrefilter() {
String query = """
from test
| where knn(dense_vector, [0, 1, 2]) and integer > 10
""";
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var queryExec = as(field.child(), EsQueryExec.class);
// The filter condition should be pushed down to both the KNN query and the main query
QueryBuilder expectedFilterQueryBuilder = wrapWithSingleQuery(
query,
unscore(rangeQuery("integer").gt(10)),
"integer",
new Source(2, 41, "integer > 10")
);
KnnVectorQueryBuilder expectedKnnQueryBuilder = new KnnVectorQueryBuilder(
"dense_vector",
new float[] { 0, 1, 2 },
1000,
null,
null,
null,
null
).addFilterQuery(expectedFilterQueryBuilder);
var expectedQuery = boolQuery().must(expectedKnnQueryBuilder).must(expectedFilterQueryBuilder);
assertEquals(expectedQuery.toString(), queryExec.query().toString());
}
public void testPushDownNegatedConjunctionsToKnnPrefilter() {
String query = """
from test
| where knn(dense_vector, [0, 1, 2]) and NOT integer > 10
""";
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var queryExec = as(field.child(), EsQueryExec.class);
// The filter condition should be pushed down to both the KNN query and the main query
QueryBuilder expectedFilterQueryBuilder = wrapWithSingleQuery(
query,
unscore(boolQuery().mustNot(unscore(rangeQuery("integer").gt(10)))),
"integer",
new Source(2, 41, "NOT integer > 10")
);
KnnVectorQueryBuilder expectedKnnQueryBuilder = new KnnVectorQueryBuilder(
"dense_vector",
new float[] { 0, 1, 2 },
1000,
null,
null,
null,
null
).addFilterQuery(expectedFilterQueryBuilder);
var expectedQuery = boolQuery().must(expectedKnnQueryBuilder).must(expectedFilterQueryBuilder);
assertEquals(expectedQuery.toString(), queryExec.query().toString());
}
public void testNotPushDownDisjunctionsToKnnPrefilter() {
String query = """
from test
| where knn(dense_vector, [0, 1, 2]) or integer > 10
""";
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var queryExec = as(field.child(), EsQueryExec.class);
// The disjunction should not be pushed down to the KNN query
KnnVectorQueryBuilder knnQueryBuilder = new KnnVectorQueryBuilder(
"dense_vector",
new float[] { 0, 1, 2 },
1000,
null,
null,
null,
null
);
QueryBuilder rangeQueryBuilder = wrapWithSingleQuery(
query,
unscore(rangeQuery("integer").gt(10)),
"integer",
new Source(2, 40, "integer > 10")
);
var expectedQuery = boolQuery().should(knnQueryBuilder).should(rangeQueryBuilder);
assertEquals(expectedQuery.toString(), queryExec.query().toString());
}
public void testNotPushDownKnnWithNonPushablePrefilters() {
String query = """
from test
| where ((knn(dense_vector, [0, 1, 2]) AND integer > 10) and ((keyword == "test") or length(text) > 10))
""";
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var secondLimit = as(field.child(), LimitExec.class);
var filter = as(secondLimit.child(), FilterExec.class);
var and = as(filter.condition(), And.class);
var knn = as(and.left(), Knn.class);
assertEquals("(keyword == \"test\") or length(text) > 10", knn.filterExpressions().get(0).toString());
assertEquals("integer > 10", knn.filterExpressions().get(1).toString());
var fieldExtract = as(filter.child(), FieldExtractExec.class);
var queryExec = as(fieldExtract.child(), EsQueryExec.class);
// The query should only contain the pushable condition
QueryBuilder integerGtQuery = wrapWithSingleQuery(
query,
unscore(rangeQuery("integer").gt(10)),
"integer",
new Source(2, 43, "integer > 10")
);
assertEquals(integerGtQuery.toString(), queryExec.query().toString());
}
public void testPushDownComplexNegationsToKnnPrefilter() {
String query = """
from test
| where ((knn(dense_vector, [0, 1, 2]) or NOT integer > 10)
and NOT ((keyword == "test") or knn(dense_vector, [4, 5, 6])))
""";
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
var queryExec = as(fieldExtract.child(), EsQueryExec.class);
QueryBuilder notKeywordQuery = wrapWithSingleQuery(
query,
unscore(boolQuery().mustNot(unscore(termQuery("keyword", "test")))),
"keyword",
new Source(3, 12, "keyword == \"test\"")
);
QueryBuilder notKeywordFilter = wrapWithSingleQuery(
query,
unscore(boolQuery().mustNot(unscore(termQuery("keyword", "test")))),
"keyword",
new Source(3, 6, "NOT ((keyword == \"test\") or knn(dense_vector, [4, 5, 6]))")
);
QueryBuilder notIntegerGt10 = wrapWithSingleQuery(
query,
unscore(boolQuery().mustNot(unscore(rangeQuery("integer").gt(10)))),
"integer",
new Source(2, 42, "NOT integer > 10")
);
KnnVectorQueryBuilder firstKnn = new KnnVectorQueryBuilder("dense_vector", new float[] { 0, 1, 2 }, 1000, null, null, null, null);
KnnVectorQueryBuilder secondKnn = new KnnVectorQueryBuilder("dense_vector", new float[] { 4, 5, 6 }, 1000, null, null, null, null);
firstKnn.addFilterQuery(notKeywordFilter);
secondKnn.addFilterQuery(notIntegerGt10);
// Build the main boolean query structure
BoolQueryBuilder expectedQuery = boolQuery().must(notKeywordQuery) // NOT (keyword == "test")
.must(unscore(boolQuery().mustNot(secondKnn)))
.must(boolQuery().should(firstKnn).should(notIntegerGt10));
assertEquals(expectedQuery.toString(), queryExec.query().toString());
}
public void testMultipleKnnQueriesInPrefilters() {
String query = """
from test
| where ((knn(dense_vector, [0, 1, 2]) or integer > 10) and ((keyword == "test") or knn(dense_vector, [4, 5, 6])))
""";
var plan = plannerOptimizer.plan(query, IS_SV_STATS, makeAnalyzer("mapping-all-types.json"));
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var queryExec = as(field.child(), EsQueryExec.class);
KnnVectorQueryBuilder firstKnnQuery = new KnnVectorQueryBuilder(
"dense_vector",
new float[] { 0, 1, 2 },
1000,
null,
null,
null,
null
);
// Integer range query (right side of first OR)
QueryBuilder integerRangeQuery = wrapWithSingleQuery(
query,
unscore(rangeQuery("integer").gt(10)),
"integer",
new Source(2, 42, "integer > 10")
);
// Second KNN query (right side of second OR)
KnnVectorQueryBuilder secondKnnQuery = new KnnVectorQueryBuilder(
"dense_vector",
new float[] { 4, 5, 6 },
1000,
null,
null,
null,
null
);
// Keyword term query (left side of second OR)
QueryBuilder keywordQuery = wrapWithSingleQuery(
query,
unscore(termQuery("keyword", "test")),
"keyword",
new Source(2, 62, "keyword == \"test\"")
);
// First OR (knn1 OR integer > 10)
var firstOr = boolQuery().should(firstKnnQuery).should(integerRangeQuery);
// Second OR (keyword == "test" OR knn2)
var secondOr = boolQuery().should(keywordQuery).should(secondKnnQuery);
firstKnnQuery.addFilterQuery(keywordQuery);
secondKnnQuery.addFilterQuery(integerRangeQuery);
// Top-level AND combining both ORs
var expectedQuery = boolQuery().must(firstOr).must(secondOr);
assertEquals(expectedQuery.toString(), queryExec.query().toString());
}
/**
* LimitExec[1000[INTEGER]]
* \_ExchangeExec[[!alias_integer, boolean{f}#415, byte{f}#416, constant_keyword-foo{f}#417, date{f}#418, date_nanos{f}#419,
* double{f}#420, float{f}#421, half_float{f}#422, integer{f}#424, ip{f}#425, keyword{f}#426, long{f}#427, scaled_float{f}#423,
* !semantic_text, short{f}#429, text{f}#430, unsigned_long{f}#428, version{f}#431, wildcard{f}#432], false]
* \_ProjectExec[[!alias_integer, boolean{f}#415, byte{f}#416, constant_keyword-foo{f}#417, date{f}#418, date_nanos{f}#419,
* double{f}#420, float{f}#421, half_float{f}#422, integer{f}#424, ip{f}#425, keyword{f}#426, long{f}#427, scaled_float{f}#423,
* !semantic_text, short{f}#429, text{f}#430, unsigned_long{f}#428, version{f}#431, wildcard{f}#432]]
* \_FieldExtractExec[!alias_integer, boolean{f}#415, byte{f}#416, consta..]
* \_EsQueryExec[test], indexMode[standard], query[][_doc{f}#434], limit[1000], sort[] estimatedRowSize[412]
*/
public void testConstantKeywordWithMatchingFilter() {
String queryText = """
from test
| where `constant_keyword-foo` == "foo"
""";
var analyzer = makeAnalyzer("mapping-all-types.json");
var plan = plannerOptimizer.plan(queryText, CONSTANT_K_STATS, analyzer);
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var query = as(field.child(), EsQueryExec.class);
assertThat(as(query.limit(), Literal.class).value(), is(1000));
assertNull(query.query());
}
/**
* LimitExec[1000[INTEGER]]
* \_ExchangeExec[[!alias_integer, boolean{f}#4, byte{f}#5, constant_keyword-foo{f}#6, date{f}#7, date_nanos{f}#8, double{f}#9,
* float{f}#10, half_float{f}#11, integer{f}#13, ip{f}#14, keyword{f}#15, long{f}#16, scaled_float{f}#12, !semantic_text,
* short{f}#18, text{f}#19, unsigned_long{f}#17, version{f}#20, wildcard{f}#21], false]
* \_LocalSourceExec[[!alias_integer, boolean{f}#4, byte{f}#5, constant_keyword-foo{f}#6, date{f}#7, date_nanos{f}#8, double{f}#9,
* float{f}#10, half_float{f}#11, integer{f}#13, ip{f}#14, keyword{f}#15, long{f}#16, scaled_float{f}#12, !semantic_text,
* short{f}#18, text{f}#19, unsigned_long{f}#17, version{f}#20, wildcard{f}#21], EMPTY]
*/
public void testConstantKeywordWithNonMatchingFilter() {
String queryText = """
from test
| where `constant_keyword-foo` == "non-matching"
""";
var analyzer = makeAnalyzer("mapping-all-types.json");
var plan = plannerOptimizer.plan(queryText, CONSTANT_K_STATS, analyzer);
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var source = as(exchange.child(), LocalSourceExec.class);
}
/**
* LimitExec[1000[INTEGER]]
* \_ExchangeExec[[!alias_integer, boolean{f}#6, byte{f}#7, constant_keyword-foo{r}#25, date{f}#9, date_nanos{f}#10, double{f}#1...
* \_ProjectExec[[!alias_integer, boolean{f}#6, byte{f}#7, constant_keyword-foo{r}#25, date{f}#9, date_nanos{f}#10, double{f}#1...
* \_FieldExtractExec[!alias_integer, boolean{f}#6, byte{f}#7, date{f}#9,
* \_LimitExec[1000[INTEGER]]
* \_FilterExec[constant_keyword-foo{r}#25 == [66 6f 6f][KEYWORD]]
* \_MvExpandExec[constant_keyword-foo{f}#8,constant_keyword-foo{r}#25]
* \_FieldExtractExec[constant_keyword-foo{f}#8]
* \_EsQueryExec[test], indexMode[standard], query[][_doc{f}#26], limit[], sort[] estimatedRowSize[412]
*/
public void testConstantKeywordExpandFilter() {
String queryText = """
from test
| mv_expand `constant_keyword-foo`
| where `constant_keyword-foo` == "foo"
""";
var analyzer = makeAnalyzer("mapping-all-types.json");
var plan = plannerOptimizer.plan(queryText, CONSTANT_K_STATS, analyzer);
var limit = as(plan, LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
var limit2 = as(fieldExtract.child(), LimitExec.class);
var filter = as(limit2.child(), FilterExec.class);
var expand = as(filter.child(), MvExpandExec.class);
var field = as(expand.child(), FieldExtractExec.class); // MV_EXPAND is not optimized yet (it doesn't accept literals)
as(field.child(), EsQueryExec.class);
}
/**
* DissectExec[constant_keyword-foo{f}#8,Parser[pattern=%{bar}, appendSeparator=, ...
* \_LimitExec[1000[INTEGER]]
* \_ExchangeExec[[!alias_integer, boolean{f}#6, byte{f}#7, constant_keyword-foo{f}#8, date{f}#9, date_nanos{f}#10, double{f}#11...
* \_ProjectExec[[!alias_integer, boolean{f}#6, byte{f}#7, constant_keyword-foo{f}#8, date{f}#9, date_nanos{f}#10, double{f}#11...
* \_FieldExtractExec[!alias_integer, boolean{f}#6, byte{f}#7, constant_k..]
* \_EsQueryExec[test], indexMode[standard], query[][_doc{f}#25], limit[1000], sort[] estimatedRowSize[462]
*/
public void testConstantKeywordDissectFilter() {
String queryText = """
from test
| dissect `constant_keyword-foo` "%{bar}"
| where `constant_keyword-foo` == "foo"
""";
var analyzer = makeAnalyzer("mapping-all-types.json");
var plan = plannerOptimizer.plan(queryText, CONSTANT_K_STATS, analyzer);
var dissect = as(plan, DissectExec.class);
var limit = as(dissect.child(), LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
var project = as(exchange.child(), ProjectExec.class);
var field = as(project.child(), FieldExtractExec.class);
var query = as(field.child(), EsQueryExec.class);
assertNull(query.query());
}
public void testMatchFunctionWithStatsWherePushable() {
String query = """
from test
| stats c = count(*) where match(last_name, "Smith")
""";
var plan = plannerOptimizer.plan(query);
var limit = as(plan, LimitExec.class);
var agg = as(limit.child(), AggregateExec.class);
var exchange = as(agg.child(), ExchangeExec.class);
var stats = as(exchange.child(), EsStatsQueryExec.class);
QueryBuilder expected = new MatchQueryBuilder("last_name", "Smith").lenient(true);
assertThat(stats.query().toString(), equalTo(expected.toString()));
}
public void testMatchFunctionWithStatsPushableAndNonPushableCondition() {
String query = """
from test
| where length(first_name) > 10
| stats c = count(*) where match(last_name, "Smith")
""";
var plan = plannerOptimizer.plan(query);
var limit = as(plan, LimitExec.class);
var agg = as(limit.child(), AggregateExec.class);
var exchange = as(agg.child(), ExchangeExec.class);
var aggExec = as(exchange.child(), AggregateExec.class);
var filter = as(aggExec.child(), FilterExec.class);
assertTrue(filter.condition() instanceof GreaterThan);
var fieldExtract = as(filter.child(), FieldExtractExec.class);
var esQuery = as(fieldExtract.child(), EsQueryExec.class);
QueryBuilder expected = new MatchQueryBuilder("last_name", "Smith").lenient(true);
assertThat(esQuery.query().toString(), equalTo(expected.toString()));
}
public void testMatchFunctionStatisWithNonPushableCondition() {
String query = """
from test
| stats c = count(*) where match(last_name, "Smith"), d = count(*) where match(first_name, "Anna")
""";
var plan = plannerOptimizer.plan(query);
var limit = as(plan, LimitExec.class);
var agg = as(limit.child(), AggregateExec.class);
var aggregates = agg.aggregates();
assertThat(aggregates.size(), is(2));
for (NamedExpression aggregate : aggregates) {
var alias = as(aggregate, Alias.class);
var count = as(alias.child(), Count.class);
var match = as(count.filter(), Match.class);
}
var exchange = as(agg.child(), ExchangeExec.class);
var aggExec = as(exchange.child(), AggregateExec.class);
var fieldExtract = as(aggExec.child(), FieldExtractExec.class);
var esQuery = as(fieldExtract.child(), EsQueryExec.class);
assertNull(esQuery.query());
}
public void testToDateNanosPushDown() {
IndexResolution indexWithUnionTypedFields = indexWithDateDateNanosUnionType();
plannerOptimizerDateDateNanosUnionTypes = new TestPlannerOptimizer(EsqlTestUtils.TEST_CFG, makeAnalyzer(indexWithUnionTypedFields));
var stats = EsqlTestUtils.statsForExistingField("date_and_date_nanos", "date_and_date_nanos_and_long");
String query = """
from index*
| where date_and_date_nanos < "2025-01-01" and date_and_date_nanos_and_long::date_nanos >= "2024-01-01\"""";
var plan = plannerOptimizerDateDateNanosUnionTypes.plan(query, stats);
// date_and_date_nanos should be pushed down to EsQueryExec, date_and_date_nanos_and_long should not be pushed down
var project = as(plan, ProjectExec.class);
List<? extends NamedExpression> projections = project.projections();
assertEquals(2, projections.size());
FieldAttribute fa = as(projections.get(0), FieldAttribute.class);
assertEquals(DATE_NANOS, fa.dataType());
assertEquals("date_and_date_nanos", fa.fieldName().string());
assertTrue(isMultiTypeEsField(fa)); // mixed date and date_nanos are auto-casted
UnsupportedAttribute ua = as(projections.get(1), UnsupportedAttribute.class); // mixed date, date_nanos and long are not auto-casted
assertEquals("date_and_date_nanos_and_long", ua.fieldName().string());
var limit = as(project.child(), LimitExec.class);
var exchange = as(limit.child(), ExchangeExec.class);
project = as(exchange.child(), ProjectExec.class);
var fieldExtract = as(project.child(), FieldExtractExec.class);
limit = as(fieldExtract.child(), LimitExec.class);
// date_and_date_nanos_and_long::date_nanos >= "2024-01-01" is not pushed down
var filter = as(limit.child(), FilterExec.class);
GreaterThanOrEqual gt = as(filter.condition(), GreaterThanOrEqual.class);
fa = as(gt.left(), FieldAttribute.class);
assertTrue(isMultiTypeEsField(fa));
assertEquals("date_and_date_nanos_and_long", fa.fieldName().string());
fieldExtract = as(filter.child(), FieldExtractExec.class); // extract date_and_date_nanos_and_long
var esQuery = as(fieldExtract.child(), EsQueryExec.class);
var source = ((SingleValueQuery.Builder) esQuery.query()).source();
var expected = wrapWithSingleQuery(
query,
unscore(
rangeQuery("date_and_date_nanos").lt("2025-01-01T00:00:00.000Z").timeZone("Z").format("strict_date_optional_time_nanos")
),
"date_and_date_nanos",
source
); // date_and_date_nanos is pushed down
assertThat(expected.toString(), is(esQuery.query().toString()));
}
public void testVerifierOnMissingReferences() throws Exception {
PhysicalPlan plan = plannerOptimizer.plan("""
from test
| stats a = min(salary) by emp_no
""");
var limit = as(plan, LimitExec.class);
var aggregate = as(limit.child(), AggregateExec.class);
var min = as(Alias.unwrap(aggregate.aggregates().get(0)), Min.class);
var salary = as(min.field(), NamedExpression.class);
assertThat(salary.name(), is("salary"));
// emulate a rule that adds a missing attribute
FieldAttribute missingAttr = getFieldAttribute("missing attr");
List<Order> orders = List.of(new Order(plan.source(), missingAttr, Order.OrderDirection.ASC, Order.NullsPosition.FIRST));
TopNExec topNExec = new TopNExec(plan.source(), plan, orders, new Literal(Source.EMPTY, limit, INTEGER), randomEstimatedRowSize());
// We want to verify that the localOptimize detects the missing attribute.
// However, it also throws an error in one of the rules before we get to the verifier.
// So we use an implementation of LocalPhysicalPlanOptimizer that does not have any rules.
LocalPhysicalPlanOptimizer optimizerWithNoRules = getCustomRulesLocalPhysicalPlanOptimizer(List.of());
Exception e = expectThrows(IllegalStateException.class, () -> optimizerWithNoRules.localOptimize(topNExec));
assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references [missing attr"));
}
private LocalPhysicalPlanOptimizer getCustomRulesLocalPhysicalPlanOptimizer(List<RuleExecutor.Batch<PhysicalPlan>> batches) {
var flags = new EsqlFlags(true);
LocalPhysicalOptimizerContext context = new LocalPhysicalOptimizerContext(
TEST_PLANNER_SETTINGS,
flags,
config,
FoldContext.small(),
SearchStats.EMPTY
);
LocalPhysicalPlanOptimizer localPhysicalPlanOptimizer = new LocalPhysicalPlanOptimizer(context) {
@Override
protected List<Batch<PhysicalPlan>> batches() {
return batches;
}
};
return localPhysicalPlanOptimizer;
}
public void testVerifierOnAdditionalAttributeAdded() throws Exception {
PhysicalPlan plan = plannerOptimizer.plan("""
from test
| stats a = min(salary) by emp_no
""");
var limit = as(plan, LimitExec.class);
var aggregate = as(limit.child(), AggregateExec.class);
var min = as(Alias.unwrap(aggregate.aggregates().get(0)), Min.class);
var salary = as(min.field(), NamedExpression.class);
assertThat(salary.name(), is("salary"));
Holder<Integer> appliedCount = new Holder<>(0);
// use a custom rule that adds another output attribute
var customRuleBatch = new RuleExecutor.Batch<>(
"CustomRuleBatch",
RuleExecutor.Limiter.ONCE,
new PhysicalOptimizerRules.ParameterizedOptimizerRule<PhysicalPlan, LocalPhysicalOptimizerContext>() {
@Override
public PhysicalPlan rule(PhysicalPlan plan, LocalPhysicalOptimizerContext context) {
// This rule adds a missing attribute to the plan output
// We only want to apply it once, so we use a static counter
if (appliedCount.get() == 0) {
appliedCount.set(appliedCount.get() + 1);
Literal additionalLiteral = new Literal(Source.EMPTY, "additional literal", INTEGER);
return new EvalExec(
plan.source(),
plan,
List.of(new Alias(Source.EMPTY, "additionalAttribute", additionalLiteral))
);
}
return plan;
}
}
);
LocalPhysicalPlanOptimizer customRulesLocalPhysicalPlanOptimizer = getCustomRulesLocalPhysicalPlanOptimizer(
List.of(customRuleBatch)
);
Exception e = expectThrows(VerificationException.class, () -> customRulesLocalPhysicalPlanOptimizer.localOptimize(plan));
assertThat(e.getMessage(), containsString("Output has changed from"));
assertThat(e.getMessage(), containsString("additionalAttribute"));
}
public void testVerifierOnAttributeDatatypeChanged() throws Exception {
PhysicalPlan plan = plannerOptimizer.plan("""
from test
| stats a = min(salary) by emp_no
""");
var limit = as(plan, LimitExec.class);
var aggregate = as(limit.child(), AggregateExec.class);
var min = as(Alias.unwrap(aggregate.aggregates().get(0)), Min.class);
var salary = as(min.field(), NamedExpression.class);
assertThat(salary.name(), is("salary"));
Holder<Integer> appliedCount = new Holder<>(0);
// use a custom rule that changes the datatype of an output attribute
var customRuleBatch = new RuleExecutor.Batch<>(
"CustomRuleBatch",
RuleExecutor.Limiter.ONCE,
new PhysicalOptimizerRules.ParameterizedOptimizerRule<PhysicalPlan, LocalPhysicalOptimizerContext>() {
@Override
public PhysicalPlan rule(PhysicalPlan plan, LocalPhysicalOptimizerContext context) {
// We only want to apply it once, so we use a static counter
if (appliedCount.get() == 0) {
appliedCount.set(appliedCount.get() + 1);
LimitExec limit = as(plan, LimitExec.class);
LimitExec newLimit = new LimitExec(
plan.source(),
limit.child(),
new Literal(Source.EMPTY, 1000, INTEGER),
randomEstimatedRowSize()
) {
@Override
public List<Attribute> output() {
List<Attribute> oldOutput = super.output();
List<Attribute> newOutput = new ArrayList<>(oldOutput);
newOutput.set(0, oldOutput.get(0).withDataType(DataType.DATETIME));
return newOutput;
}
};
return newLimit;
}
return plan;
}
}
);
LocalPhysicalPlanOptimizer customRulesLocalPhysicalPlanOptimizer = getCustomRulesLocalPhysicalPlanOptimizer(
List.of(customRuleBatch)
);
Exception e = expectThrows(VerificationException.class, () -> customRulesLocalPhysicalPlanOptimizer.localOptimize(plan));
assertThat(e.getMessage(), containsString("Output has changed from"));
}
public void testTranslateMetricsGroupedByTwoDimension() {
var query = "TS k8s | STATS sum(rate(network.total_bytes_in)) BY cluster, pod";
var plan = plannerOptimizerTimeSeries.plan(query);
var project = as(plan, ProjectExec.class);
var unpack = as(project.child(), EvalExec.class);
var limit = as(unpack.child(), LimitExec.class);
var secondAgg = as(limit.child(), AggregateExec.class);
var pack = as(secondAgg.child(), EvalExec.class);
var finalAgg = as(pack.child(), TimeSeriesAggregateExec.class);
var sink = as(finalAgg.child(), ExchangeExec.class);
ProjectExec projectExec = as(sink.child(), ProjectExec.class);
EvalExec evalExec = as(projectExec.child(), EvalExec.class);
FieldExtractExec readDimensions = as(evalExec.child(), FieldExtractExec.class);
assertThat(Expressions.names(readDimensions.attributesToExtract()), containsInAnyOrder("cluster", "pod"));
TimeSeriesAggregateExec partialAgg = as(readDimensions.child(), TimeSeriesAggregateExec.class);
assertThat(partialAgg.aggregates(), hasSize(2));
assertThat(Alias.unwrap(partialAgg.aggregates().get(0)), instanceOf(Rate.class));
assertThat(Alias.unwrap(partialAgg.aggregates().get(1)), instanceOf(FirstDocId.class));
FieldExtractExec readMetrics = as(partialAgg.child(), FieldExtractExec.class);
assertThat(
Expressions.names(readMetrics.attributesToExtract()),
containsInAnyOrder("_tsid", "@timestamp", "network.total_bytes_in")
);
as(readMetrics.child(), EsQueryExec.class);
}
private boolean isMultiTypeEsField(Expression e) {
return e instanceof FieldAttribute fa && fa.field() instanceof MultiTypeEsField;
}
private Stat queryStatsFor(PhysicalPlan plan) {
var limit = as(plan, LimitExec.class);
var agg = as(limit.child(), AggregateExec.class);
var exg = as(agg.child(), ExchangeExec.class);
var statSource = as(exg.child(), EsStatsQueryExec.class);
return statSource.stat();
}
/**
* Base | for |
java | netty__netty | testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketStartTlsTest.java | {
"start": 9554,
"end": 11449
} | class ____ extends SimpleChannelInboundHandler<String> {
private final SslHandler sslHandler;
private final boolean autoRead;
private Future<Channel> handshakeFuture;
final AtomicReference<Throwable> exception = new AtomicReference<Throwable>();
StartTlsClientHandler(SSLEngine engine, boolean autoRead) {
engine.setUseClientMode(true);
sslHandler = new SslHandler(engine);
this.autoRead = autoRead;
}
@Override
public void channelActive(ChannelHandlerContext ctx)
throws Exception {
if (!autoRead) {
ctx.read();
}
ctx.writeAndFlush("StartTlsRequest\n");
}
@Override
public void channelRead0(ChannelHandlerContext ctx, String msg) throws Exception {
if ("StartTlsResponse".equals(msg)) {
ctx.pipeline().addAfter("logger", "ssl", sslHandler);
handshakeFuture = sslHandler.handshakeFuture();
ctx.writeAndFlush("EncryptedRequest\n");
return;
}
assertEquals("EncryptedResponse", msg);
assertNotNull(handshakeFuture);
assertTrue(handshakeFuture.isSuccess());
ctx.close();
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
if (!autoRead) {
ctx.read();
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx,
Throwable cause) throws Exception {
if (logger.isWarnEnabled()) {
logger.warn("Unexpected exception from the client side", cause);
}
exception.compareAndSet(null, cause);
ctx.close();
}
}
private static | StartTlsClientHandler |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStats.java | {
"start": 6791,
"end": 8445
} | enum ____ implements Writeable {
STARTED,
INDEXING,
ABORTING,
STOPPING,
STOPPED,
FAILED,
WAITING;
public static State fromString(String name) {
return valueOf(name.trim().toUpperCase(Locale.ROOT));
}
public static State fromStream(StreamInput in) throws IOException {
return in.readEnum(State.class);
}
public static State fromComponents(TransformTaskState taskState, IndexerState indexerState) {
if (taskState == null || taskState == TransformTaskState.STOPPED) {
return STOPPED;
} else if (taskState == TransformTaskState.FAILED) {
return FAILED;
} else {
// If we get here then the task state must be started, and that means we should have an indexer state
assert (taskState == TransformTaskState.STARTED);
assert (indexerState != null);
return switch (indexerState) {
case STARTED -> STARTED;
case INDEXING -> INDEXING;
case STOPPING -> STOPPING;
case STOPPED -> STOPPING;
case ABORTING -> ABORTING;
};
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeEnum(this);
}
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
public String value() {
return name().toLowerCase(Locale.ROOT);
}
}
}
| State |
java | quarkusio__quarkus | extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/validatorfactory/MyMultipleHibernateValidatorFactoryCustomizer.java | {
"start": 430,
"end": 1261
} | class ____ implements HibernateValidatorFactoryCustomizer {
@Override
public void customize(BaseHibernateValidatorConfiguration<?> configuration) {
ConstraintMapping constraintMapping = configuration.createConstraintMapping();
constraintMapping
.constraintDefinition(Email.class)
.includeExistingValidators(false)
.validatedBy(MyEmailValidator.class);
configuration.addMapping(constraintMapping);
constraintMapping = configuration.createConstraintMapping();
constraintMapping
.constraintDefinition(Min.class)
.includeExistingValidators(false)
.validatedBy(MyNumValidator.class);
configuration.addMapping(constraintMapping);
}
}
| MyMultipleHibernateValidatorFactoryCustomizer |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/RestoreStreamTaskTest.java | {
"start": 11805,
"end": 12542
} | class ____<IN, OUT> extends AbstractStreamOperator<OUT>
implements OneInputStreamOperator<IN, OUT> {
@Override
public void initializeState(StateInitializationContext context) throws Exception {
assertThat(context.getRestoredCheckpointId().isPresent())
.as("Restored context id should be set iff is restored")
.isEqualTo(context.isRestored());
if (context.isRestored()) {
RESTORED_OPERATORS.put(
getOperatorID(), context.getRestoredCheckpointId().getAsLong());
}
}
}
/** Operator that counts processed messages and keeps result on state. */
private static | RestoreWatchOperator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.